@@ -103,31 +103,24 @@ static size_t compute_elp(uint16_t *sigma, const uint16_t *syndromes) { | |||
* @param[in] codeword Array of size VEC_N1_SIZE_BYTES storing the codeword | |||
*/ | |||
static void message_from_codeword(uint64_t *message, const uint64_t *codeword) { | |||
int32_t val = PARAM_N1 - PARAM_K; | |||
uint64_t mask1 = (uint64_t) (0xffffffffffffffff << val % 64); | |||
uint64_t mask2 = (uint64_t) (0xffffffffffffffff >> (64 - val % 64)); | |||
size_t index = val / 64; | |||
uint64_t mask1 = (uint64_t) (0xffffffffffffffff << ((PARAM_N1 - PARAM_K) % 64)); | |||
uint64_t mask2 = (uint64_t) (0xffffffffffffffff >> (64 - (PARAM_N1 - PARAM_K) % 64)); | |||
size_t index = (PARAM_N1 - PARAM_K) / 64; | |||
for (size_t i = 0; i < VEC_K_SIZE_64 - 1; ++i) { | |||
uint64_t message1 = (codeword[index] & mask1) >> val % 64; | |||
uint64_t message2 = (codeword[++index] & mask2) << (64 - val % 64); | |||
message[i] = message1 | message2; | |||
message[i] = (codeword[index] & mask1) >> ((PARAM_N1 - PARAM_K) % 64); | |||
message[i] |= (codeword[++index] & mask2) << (64 - (PARAM_N1 - PARAM_K) % 64); | |||
} | |||
// Last byte (8-val % 8 is the number of bits given by message1) | |||
if ((PARAM_K % 64 == 0) || (64 - val % 64 < PARAM_K % 64)) { | |||
uint64_t message1 = (codeword[index] & mask1) >> val % 64; | |||
uint64_t message2 = (codeword[++index] & mask2) << (64 - val % 64); | |||
message[VEC_K_SIZE_64 - 1] = message1 | message2; | |||
} else { | |||
uint64_t message1 = (codeword[index] & mask1) >> val % 64; | |||
message[VEC_K_SIZE_64 - 1] = message1; | |||
message[VEC_K_SIZE_64 - 1] = (codeword[index] & mask1) >> ((PARAM_N1 - PARAM_K) % 64); | |||
++index; | |||
if (index < VEC_N1_SIZE_64) { | |||
message[VEC_K_SIZE_64 - 1] |= (codeword[index] & mask2) << (64 - (PARAM_N1 - PARAM_K) % 64); | |||
} | |||
} | |||
/** | |||
* @brief Computes the 2^PARAM_DELTA syndromes from the received vector vector | |||
* | |||
@@ -52,7 +52,7 @@ static void compute_subset_sums(uint16_t *subset_sums, const uint16_t *set, uint | |||
subset_sums[0] = 0; | |||
for (i = 0; i < set_size; ++i) { | |||
for (j = 0; j < (1U << i); ++j) { | |||
for (j = 0; j < (1 << i); ++j) { | |||
subset_sums[(1 << i) + j] = set[i] ^ subset_sums[j]; | |||
} | |||
} | |||
@@ -179,6 +179,7 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
uint16_t beta_m_pow; | |||
size_t i, j, k; | |||
size_t x; | |||
// Step 1 | |||
if (m_f == 1) { | |||
@@ -187,10 +188,12 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
} | |||
w[0] = f[0]; | |||
x = 1; | |||
for (j = 0; j < m; ++j) { | |||
for (k = 0; k < (1U << j); ++k) { | |||
w[(1 << j) + k] = w[k] ^ tmp[j]; | |||
for (k = 0; k < x; ++k) { | |||
w[x + k] = w[k] ^ tmp[j]; | |||
} | |||
x <<= 1; | |||
} | |||
return; | |||
@@ -199,7 +202,8 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
// Step 2: compute g | |||
if (betas[m - 1] != 1) { | |||
beta_m_pow = 1; | |||
for (i = 1; i < (1U << m_f); ++i) { | |||
x = 1 << m_f; | |||
for (i = 1; i < x; ++i) { | |||
beta_m_pow = PQCLEAN_HQC128_AVX2_gf_mul(beta_m_pow, betas[m - 1]); | |||
f[i] = PQCLEAN_HQC128_AVX2_gf_mul(beta_m_pow, f[i]); | |||
} | |||
@@ -189,26 +189,20 @@ static size_t compute_elp(uint16_t *sigma, const uint16_t *syndromes) { | |||
* @param[in] codeword Array of size VEC_N1_SIZE_BYTES storing the codeword | |||
*/ | |||
static void message_from_codeword(uint64_t *message, const uint64_t *codeword) { | |||
int32_t val = PARAM_N1 - PARAM_K; | |||
uint64_t mask1 = (uint64_t) (0xffffffffffffffff << val % 64); | |||
uint64_t mask2 = (uint64_t) (0xffffffffffffffff >> (64 - val % 64)); | |||
size_t index = val / 64; | |||
uint64_t mask1 = (uint64_t) (0xffffffffffffffff << ((PARAM_N1 - PARAM_K) % 64)); | |||
uint64_t mask2 = (uint64_t) (0xffffffffffffffff >> (64 - (PARAM_N1 - PARAM_K) % 64)); | |||
size_t index = (PARAM_N1 - PARAM_K) / 64; | |||
for (size_t i = 0; i < VEC_K_SIZE_64 - 1; ++i) { | |||
uint64_t message1 = (codeword[index] & mask1) >> val % 64; | |||
uint64_t message2 = (codeword[++index] & mask2) << (64 - val % 64); | |||
message[i] = message1 | message2; | |||
message[i] = (codeword[index] & mask1) >> ((PARAM_N1 - PARAM_K) % 64); | |||
message[i] |= (codeword[++index] & mask2) << (64 - (PARAM_N1 - PARAM_K) % 64); | |||
} | |||
// Last byte (8-val % 8 is the number of bits given by message1) | |||
if ((PARAM_K % 64 == 0) || (64 - val % 64 < PARAM_K % 64)) { | |||
uint64_t message1 = (codeword[index] & mask1) >> val % 64; | |||
uint64_t message2 = (codeword[++index] & mask2) << (64 - val % 64); | |||
message[VEC_K_SIZE_64 - 1] = message1 | message2; | |||
} else { | |||
uint64_t message1 = (codeword[index] & mask1) >> val % 64; | |||
message[VEC_K_SIZE_64 - 1] = message1; | |||
message[VEC_K_SIZE_64 - 1] = (codeword[index] & mask1) >> ((PARAM_N1 - PARAM_K) % 64); | |||
++index; | |||
if (index < VEC_N1_SIZE_64) { | |||
message[VEC_K_SIZE_64 - 1] |= (codeword[index] & mask2) << (64 - (PARAM_N1 - PARAM_K) % 64); | |||
} | |||
} | |||
@@ -55,7 +55,7 @@ static void compute_subset_sums(uint16_t *subset_sums, const uint16_t *set, uint | |||
subset_sums[0] = 0; | |||
for (i = 0; i < set_size; ++i) { | |||
for (j = 0; j < (1U << i); ++j) { | |||
for (j = 0; j < (1 << i); ++j) { | |||
subset_sums[(1 << i) + j] = set[i] ^ subset_sums[j]; | |||
} | |||
} | |||
@@ -182,21 +182,25 @@ static void fft_t_rec(uint16_t *f, const uint16_t *w, size_t f_coeffs, uint8_t m | |||
uint16_t beta_m_pow; | |||
size_t i, j, k; | |||
size_t x; | |||
// Step 1 | |||
if (m_f == 1) { | |||
f[0] = 0; | |||
for (i = 0; i < (1U << m); ++i) { | |||
x = 1 << m; | |||
for (i = 0; i < x; ++i) { | |||
f[0] ^= w[i]; | |||
} | |||
f[1] = 0; | |||
betas_sums[0] = 0; | |||
x = 1; | |||
for (j = 0; j < m; ++j) { | |||
for (k = 0; k < (1U << j); ++k) { | |||
betas_sums[(1 << j) + k] = betas_sums[k] ^ betas[j]; | |||
f[1] ^= PQCLEAN_HQC128_CLEAN_gf_mul(betas_sums[(1 << j) + k], w[(1 << j) + k]); | |||
for (k = 0; k < x; ++k) { | |||
betas_sums[x + k] = betas_sums[k] ^ betas[j]; | |||
f[1] ^= PQCLEAN_HQC128_CLEAN_gf_mul(betas_sums[x + k], w[x + k]); | |||
} | |||
x <<= 1; | |||
} | |||
return; | |||
@@ -248,7 +252,8 @@ static void fft_t_rec(uint16_t *f, const uint16_t *w, size_t f_coeffs, uint8_t m | |||
// Step 2: compute f from g | |||
if (betas[m - 1] != 1) { | |||
beta_m_pow = 1; | |||
for (i = 1; i < (1U << m_f); ++i) { | |||
x = 1 << m_f; | |||
for (i = 1; i < x; ++i) { | |||
beta_m_pow = PQCLEAN_HQC128_CLEAN_gf_mul(beta_m_pow, betas[m - 1]); | |||
f[i] = PQCLEAN_HQC128_CLEAN_gf_mul(beta_m_pow, f[i]); | |||
} | |||
@@ -436,6 +441,7 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
uint16_t beta_m_pow; | |||
size_t i, j, k; | |||
size_t x; | |||
// Step 1 | |||
if (m_f == 1) { | |||
@@ -444,10 +450,12 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
} | |||
w[0] = f[0]; | |||
x = 1; | |||
for (j = 0; j < m; ++j) { | |||
for (k = 0; k < (1U << j); ++k) { | |||
w[(1 << j) + k] = w[k] ^ tmp[j]; | |||
for (k = 0; k < x; ++k) { | |||
w[x + k] = w[k] ^ tmp[j]; | |||
} | |||
x <<= 1; | |||
} | |||
return; | |||
@@ -456,7 +464,8 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
// Step 2: compute g | |||
if (betas[m - 1] != 1) { | |||
beta_m_pow = 1; | |||
for (i = 1; i < (1U << m_f); ++i) { | |||
x = 1 << m_f; | |||
for (i = 1; i < x; ++i) { | |||
beta_m_pow = PQCLEAN_HQC128_CLEAN_gf_mul(beta_m_pow, betas[m - 1]); | |||
f[i] = PQCLEAN_HQC128_CLEAN_gf_mul(beta_m_pow, f[i]); | |||
} | |||
@@ -82,7 +82,7 @@ static void fast_convolution_mult(uint64_t *o, const uint32_t *a1, const uint64_ | |||
size_t i, j; | |||
for (i = 0; i < 16; i++) { | |||
permuted_table[i] = i; | |||
permuted_table[i] = (uint16_t) i; | |||
} | |||
seedexpander(ctx, (uint8_t *) permutation_table, 16 * sizeof(uint16_t)); | |||
@@ -108,7 +108,7 @@ static void fast_convolution_mult(uint64_t *o, const uint32_t *a1, const uint64_ | |||
} | |||
for (i = 0; i < weight; i++) { | |||
permuted_sparse_vect[i] = i; | |||
permuted_sparse_vect[i] = (uint16_t) i; | |||
} | |||
seedexpander(ctx, (uint8_t *) permutation_sparse_vect, weight * sizeof(uint16_t)); | |||
@@ -103,31 +103,24 @@ static size_t compute_elp(uint16_t *sigma, const uint16_t *syndromes) { | |||
* @param[in] codeword Array of size VEC_N1_SIZE_BYTES storing the codeword | |||
*/ | |||
static void message_from_codeword(uint64_t *message, const uint64_t *codeword) { | |||
int32_t val = PARAM_N1 - PARAM_K; | |||
uint64_t mask1 = (uint64_t) (0xffffffffffffffff << val % 64); | |||
uint64_t mask2 = (uint64_t) (0xffffffffffffffff >> (64 - val % 64)); | |||
size_t index = val / 64; | |||
uint64_t mask1 = (uint64_t) (0xffffffffffffffff << ((PARAM_N1 - PARAM_K) % 64)); | |||
uint64_t mask2 = (uint64_t) (0xffffffffffffffff >> (64 - (PARAM_N1 - PARAM_K) % 64)); | |||
size_t index = (PARAM_N1 - PARAM_K) / 64; | |||
for (size_t i = 0; i < VEC_K_SIZE_64 - 1; ++i) { | |||
uint64_t message1 = (codeword[index] & mask1) >> val % 64; | |||
uint64_t message2 = (codeword[++index] & mask2) << (64 - val % 64); | |||
message[i] = message1 | message2; | |||
message[i] = (codeword[index] & mask1) >> ((PARAM_N1 - PARAM_K) % 64); | |||
message[i] |= (codeword[++index] & mask2) << (64 - (PARAM_N1 - PARAM_K) % 64); | |||
} | |||
// Last byte (8-val % 8 is the number of bits given by message1) | |||
if ((PARAM_K % 64 == 0) || (64 - val % 64 < PARAM_K % 64)) { | |||
uint64_t message1 = (codeword[index] & mask1) >> val % 64; | |||
uint64_t message2 = (codeword[++index] & mask2) << (64 - val % 64); | |||
message[VEC_K_SIZE_64 - 1] = message1 | message2; | |||
} else { | |||
uint64_t message1 = (codeword[index] & mask1) >> val % 64; | |||
message[VEC_K_SIZE_64 - 1] = message1; | |||
message[VEC_K_SIZE_64 - 1] = (codeword[index] & mask1) >> ((PARAM_N1 - PARAM_K) % 64); | |||
++index; | |||
if (index < VEC_N1_SIZE_64) { | |||
message[VEC_K_SIZE_64 - 1] |= (codeword[index] & mask2) << (64 - (PARAM_N1 - PARAM_K) % 64); | |||
} | |||
} | |||
/** | |||
* @brief Computes the 2^PARAM_DELTA syndromes from the received vector vector | |||
* | |||
@@ -52,7 +52,7 @@ static void compute_subset_sums(uint16_t *subset_sums, const uint16_t *set, uint | |||
subset_sums[0] = 0; | |||
for (i = 0; i < set_size; ++i) { | |||
for (j = 0; j < (1U << i); ++j) { | |||
for (j = 0; j < (1 << i); ++j) { | |||
subset_sums[(1 << i) + j] = set[i] ^ subset_sums[j]; | |||
} | |||
} | |||
@@ -179,6 +179,7 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
uint16_t beta_m_pow; | |||
size_t i, j, k; | |||
size_t x; | |||
// Step 1 | |||
if (m_f == 1) { | |||
@@ -187,10 +188,12 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
} | |||
w[0] = f[0]; | |||
x = 1; | |||
for (j = 0; j < m; ++j) { | |||
for (k = 0; k < (1U << j); ++k) { | |||
w[(1 << j) + k] = w[k] ^ tmp[j]; | |||
for (k = 0; k < x; ++k) { | |||
w[x + k] = w[k] ^ tmp[j]; | |||
} | |||
x <<= 1; | |||
} | |||
return; | |||
@@ -199,7 +202,8 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
// Step 2: compute g | |||
if (betas[m - 1] != 1) { | |||
beta_m_pow = 1; | |||
for (i = 1; i < (1U << m_f); ++i) { | |||
x = 1 << m_f; | |||
for (i = 1; i < x; ++i) { | |||
beta_m_pow = PQCLEAN_HQC192_AVX2_gf_mul(beta_m_pow, betas[m - 1]); | |||
f[i] = PQCLEAN_HQC192_AVX2_gf_mul(beta_m_pow, f[i]); | |||
} | |||
@@ -189,26 +189,20 @@ static size_t compute_elp(uint16_t *sigma, const uint16_t *syndromes) { | |||
* @param[in] codeword Array of size VEC_N1_SIZE_BYTES storing the codeword | |||
*/ | |||
static void message_from_codeword(uint64_t *message, const uint64_t *codeword) { | |||
int32_t val = PARAM_N1 - PARAM_K; | |||
uint64_t mask1 = (uint64_t) (0xffffffffffffffff << val % 64); | |||
uint64_t mask2 = (uint64_t) (0xffffffffffffffff >> (64 - val % 64)); | |||
size_t index = val / 64; | |||
uint64_t mask1 = (uint64_t) (0xffffffffffffffff << ((PARAM_N1 - PARAM_K) % 64)); | |||
uint64_t mask2 = (uint64_t) (0xffffffffffffffff >> (64 - (PARAM_N1 - PARAM_K) % 64)); | |||
size_t index = (PARAM_N1 - PARAM_K) / 64; | |||
for (size_t i = 0; i < VEC_K_SIZE_64 - 1; ++i) { | |||
uint64_t message1 = (codeword[index] & mask1) >> val % 64; | |||
uint64_t message2 = (codeword[++index] & mask2) << (64 - val % 64); | |||
message[i] = message1 | message2; | |||
message[i] = (codeword[index] & mask1) >> ((PARAM_N1 - PARAM_K) % 64); | |||
message[i] |= (codeword[++index] & mask2) << (64 - (PARAM_N1 - PARAM_K) % 64); | |||
} | |||
// Last byte (8-val % 8 is the number of bits given by message1) | |||
if ((PARAM_K % 64 == 0) || (64 - val % 64 < PARAM_K % 64)) { | |||
uint64_t message1 = (codeword[index] & mask1) >> val % 64; | |||
uint64_t message2 = (codeword[++index] & mask2) << (64 - val % 64); | |||
message[VEC_K_SIZE_64 - 1] = message1 | message2; | |||
} else { | |||
uint64_t message1 = (codeword[index] & mask1) >> val % 64; | |||
message[VEC_K_SIZE_64 - 1] = message1; | |||
message[VEC_K_SIZE_64 - 1] = (codeword[index] & mask1) >> ((PARAM_N1 - PARAM_K) % 64); | |||
++index; | |||
if (index < VEC_N1_SIZE_64) { | |||
message[VEC_K_SIZE_64 - 1] |= (codeword[index] & mask2) << (64 - (PARAM_N1 - PARAM_K) % 64); | |||
} | |||
} | |||
@@ -55,7 +55,7 @@ static void compute_subset_sums(uint16_t *subset_sums, const uint16_t *set, uint | |||
subset_sums[0] = 0; | |||
for (i = 0; i < set_size; ++i) { | |||
for (j = 0; j < (1U << i); ++j) { | |||
for (j = 0; j < (1 << i); ++j) { | |||
subset_sums[(1 << i) + j] = set[i] ^ subset_sums[j]; | |||
} | |||
} | |||
@@ -182,21 +182,25 @@ static void fft_t_rec(uint16_t *f, const uint16_t *w, size_t f_coeffs, uint8_t m | |||
uint16_t beta_m_pow; | |||
size_t i, j, k; | |||
size_t x; | |||
// Step 1 | |||
if (m_f == 1) { | |||
f[0] = 0; | |||
for (i = 0; i < (1U << m); ++i) { | |||
x = 1 << m; | |||
for (i = 0; i < x; ++i) { | |||
f[0] ^= w[i]; | |||
} | |||
f[1] = 0; | |||
betas_sums[0] = 0; | |||
x = 1; | |||
for (j = 0; j < m; ++j) { | |||
for (k = 0; k < (1U << j); ++k) { | |||
betas_sums[(1 << j) + k] = betas_sums[k] ^ betas[j]; | |||
f[1] ^= PQCLEAN_HQC192_CLEAN_gf_mul(betas_sums[(1 << j) + k], w[(1 << j) + k]); | |||
for (k = 0; k < x; ++k) { | |||
betas_sums[x + k] = betas_sums[k] ^ betas[j]; | |||
f[1] ^= PQCLEAN_HQC192_CLEAN_gf_mul(betas_sums[x + k], w[x + k]); | |||
} | |||
x <<= 1; | |||
} | |||
return; | |||
@@ -248,7 +252,8 @@ static void fft_t_rec(uint16_t *f, const uint16_t *w, size_t f_coeffs, uint8_t m | |||
// Step 2: compute f from g | |||
if (betas[m - 1] != 1) { | |||
beta_m_pow = 1; | |||
for (i = 1; i < (1U << m_f); ++i) { | |||
x = 1 << m_f; | |||
for (i = 1; i < x; ++i) { | |||
beta_m_pow = PQCLEAN_HQC192_CLEAN_gf_mul(beta_m_pow, betas[m - 1]); | |||
f[i] = PQCLEAN_HQC192_CLEAN_gf_mul(beta_m_pow, f[i]); | |||
} | |||
@@ -436,6 +441,7 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
uint16_t beta_m_pow; | |||
size_t i, j, k; | |||
size_t x; | |||
// Step 1 | |||
if (m_f == 1) { | |||
@@ -444,10 +450,12 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
} | |||
w[0] = f[0]; | |||
x = 1; | |||
for (j = 0; j < m; ++j) { | |||
for (k = 0; k < (1U << j); ++k) { | |||
w[(1 << j) + k] = w[k] ^ tmp[j]; | |||
for (k = 0; k < x; ++k) { | |||
w[x + k] = w[k] ^ tmp[j]; | |||
} | |||
x <<= 1; | |||
} | |||
return; | |||
@@ -456,7 +464,8 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
// Step 2: compute g | |||
if (betas[m - 1] != 1) { | |||
beta_m_pow = 1; | |||
for (i = 1; i < (1U << m_f); ++i) { | |||
x = 1 << m_f; | |||
for (i = 1; i < x; ++i) { | |||
beta_m_pow = PQCLEAN_HQC192_CLEAN_gf_mul(beta_m_pow, betas[m - 1]); | |||
f[i] = PQCLEAN_HQC192_CLEAN_gf_mul(beta_m_pow, f[i]); | |||
} | |||
@@ -82,7 +82,7 @@ static void fast_convolution_mult(uint64_t *o, const uint32_t *a1, const uint64_ | |||
size_t i, j; | |||
for (i = 0; i < 16; i++) { | |||
permuted_table[i] = i; | |||
permuted_table[i] = (uint16_t) i; | |||
} | |||
seedexpander(ctx, (uint8_t *) permutation_table, 16 * sizeof(uint16_t)); | |||
@@ -108,7 +108,7 @@ static void fast_convolution_mult(uint64_t *o, const uint32_t *a1, const uint64_ | |||
} | |||
for (i = 0; i < weight; i++) { | |||
permuted_sparse_vect[i] = i; | |||
permuted_sparse_vect[i] = (uint16_t) i; | |||
} | |||
seedexpander(ctx, (uint8_t *) permutation_sparse_vect, weight * sizeof(uint16_t)); | |||
@@ -103,31 +103,24 @@ static size_t compute_elp(uint16_t *sigma, const uint16_t *syndromes) { | |||
* @param[in] codeword Array of size VEC_N1_SIZE_BYTES storing the codeword | |||
*/ | |||
static void message_from_codeword(uint64_t *message, const uint64_t *codeword) { | |||
int32_t val = PARAM_N1 - PARAM_K; | |||
uint64_t mask1 = (uint64_t) (0xffffffffffffffff << val % 64); | |||
uint64_t mask2 = (uint64_t) (0xffffffffffffffff >> (64 - val % 64)); | |||
size_t index = val / 64; | |||
uint64_t mask1 = (uint64_t) (0xffffffffffffffff << ((PARAM_N1 - PARAM_K) % 64)); | |||
uint64_t mask2 = (uint64_t) (0xffffffffffffffff >> (64 - (PARAM_N1 - PARAM_K) % 64)); | |||
size_t index = (PARAM_N1 - PARAM_K) / 64; | |||
for (size_t i = 0; i < VEC_K_SIZE_64 - 1; ++i) { | |||
uint64_t message1 = (codeword[index] & mask1) >> val % 64; | |||
uint64_t message2 = (codeword[++index] & mask2) << (64 - val % 64); | |||
message[i] = message1 | message2; | |||
message[i] = (codeword[index] & mask1) >> ((PARAM_N1 - PARAM_K) % 64); | |||
message[i] |= (codeword[++index] & mask2) << (64 - (PARAM_N1 - PARAM_K) % 64); | |||
} | |||
// Last byte (8-val % 8 is the number of bits given by message1) | |||
if ((PARAM_K % 64 == 0) || (64 - val % 64 < PARAM_K % 64)) { | |||
uint64_t message1 = (codeword[index] & mask1) >> val % 64; | |||
uint64_t message2 = (codeword[++index] & mask2) << (64 - val % 64); | |||
message[VEC_K_SIZE_64 - 1] = message1 | message2; | |||
} else { | |||
uint64_t message1 = (codeword[index] & mask1) >> val % 64; | |||
message[VEC_K_SIZE_64 - 1] = message1; | |||
message[VEC_K_SIZE_64 - 1] = (codeword[index] & mask1) >> ((PARAM_N1 - PARAM_K) % 64); | |||
++index; | |||
if (index < VEC_N1_SIZE_64) { | |||
message[VEC_K_SIZE_64 - 1] |= (codeword[index] & mask2) << (64 - (PARAM_N1 - PARAM_K) % 64); | |||
} | |||
} | |||
/** | |||
* @brief Computes the 2^PARAM_DELTA syndromes from the received vector vector | |||
* | |||
@@ -52,7 +52,7 @@ static void compute_subset_sums(uint16_t *subset_sums, const uint16_t *set, uint | |||
subset_sums[0] = 0; | |||
for (i = 0; i < set_size; ++i) { | |||
for (j = 0; j < (1U << i); ++j) { | |||
for (j = 0; j < (1 << i); ++j) { | |||
subset_sums[(1 << i) + j] = set[i] ^ subset_sums[j]; | |||
} | |||
} | |||
@@ -179,6 +179,7 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
uint16_t beta_m_pow; | |||
size_t i, j, k; | |||
size_t x; | |||
// Step 1 | |||
if (m_f == 1) { | |||
@@ -187,10 +188,12 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
} | |||
w[0] = f[0]; | |||
x = 1; | |||
for (j = 0; j < m; ++j) { | |||
for (k = 0; k < (1U << j); ++k) { | |||
w[(1 << j) + k] = w[k] ^ tmp[j]; | |||
for (k = 0; k < x; ++k) { | |||
w[x + k] = w[k] ^ tmp[j]; | |||
} | |||
x <<= 1; | |||
} | |||
return; | |||
@@ -199,7 +202,8 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
// Step 2: compute g | |||
if (betas[m - 1] != 1) { | |||
beta_m_pow = 1; | |||
for (i = 1; i < (1U << m_f); ++i) { | |||
x = 1 << m_f; | |||
for (i = 1; i < x; ++i) { | |||
beta_m_pow = PQCLEAN_HQC256_AVX2_gf_mul(beta_m_pow, betas[m - 1]); | |||
f[i] = PQCLEAN_HQC256_AVX2_gf_mul(beta_m_pow, f[i]); | |||
} | |||
@@ -189,26 +189,20 @@ static size_t compute_elp(uint16_t *sigma, const uint16_t *syndromes) { | |||
* @param[in] codeword Array of size VEC_N1_SIZE_BYTES storing the codeword | |||
*/ | |||
static void message_from_codeword(uint64_t *message, const uint64_t *codeword) { | |||
int32_t val = PARAM_N1 - PARAM_K; | |||
uint64_t mask1 = (uint64_t) (0xffffffffffffffff << val % 64); | |||
uint64_t mask2 = (uint64_t) (0xffffffffffffffff >> (64 - val % 64)); | |||
size_t index = val / 64; | |||
uint64_t mask1 = (uint64_t) (0xffffffffffffffff << ((PARAM_N1 - PARAM_K) % 64)); | |||
uint64_t mask2 = (uint64_t) (0xffffffffffffffff >> (64 - (PARAM_N1 - PARAM_K) % 64)); | |||
size_t index = (PARAM_N1 - PARAM_K) / 64; | |||
for (size_t i = 0; i < VEC_K_SIZE_64 - 1; ++i) { | |||
uint64_t message1 = (codeword[index] & mask1) >> val % 64; | |||
uint64_t message2 = (codeword[++index] & mask2) << (64 - val % 64); | |||
message[i] = message1 | message2; | |||
message[i] = (codeword[index] & mask1) >> ((PARAM_N1 - PARAM_K) % 64); | |||
message[i] |= (codeword[++index] & mask2) << (64 - (PARAM_N1 - PARAM_K) % 64); | |||
} | |||
// Last byte (8-val % 8 is the number of bits given by message1) | |||
if ((PARAM_K % 64 == 0) || (64 - val % 64 < PARAM_K % 64)) { | |||
uint64_t message1 = (codeword[index] & mask1) >> val % 64; | |||
uint64_t message2 = (codeword[++index] & mask2) << (64 - val % 64); | |||
message[VEC_K_SIZE_64 - 1] = message1 | message2; | |||
} else { | |||
uint64_t message1 = (codeword[index] & mask1) >> val % 64; | |||
message[VEC_K_SIZE_64 - 1] = message1; | |||
message[VEC_K_SIZE_64 - 1] = (codeword[index] & mask1) >> ((PARAM_N1 - PARAM_K) % 64); | |||
++index; | |||
if (index < VEC_N1_SIZE_64) { | |||
message[VEC_K_SIZE_64 - 1] |= (codeword[index] & mask2) << (64 - (PARAM_N1 - PARAM_K) % 64); | |||
} | |||
} | |||
@@ -55,7 +55,7 @@ static void compute_subset_sums(uint16_t *subset_sums, const uint16_t *set, uint | |||
subset_sums[0] = 0; | |||
for (i = 0; i < set_size; ++i) { | |||
for (j = 0; j < (1U << i); ++j) { | |||
for (j = 0; j < (1 << i); ++j) { | |||
subset_sums[(1 << i) + j] = set[i] ^ subset_sums[j]; | |||
} | |||
} | |||
@@ -182,21 +182,25 @@ static void fft_t_rec(uint16_t *f, const uint16_t *w, size_t f_coeffs, uint8_t m | |||
uint16_t beta_m_pow; | |||
size_t i, j, k; | |||
size_t x; | |||
// Step 1 | |||
if (m_f == 1) { | |||
f[0] = 0; | |||
for (i = 0; i < (1U << m); ++i) { | |||
x = 1 << m; | |||
for (i = 0; i < x; ++i) { | |||
f[0] ^= w[i]; | |||
} | |||
f[1] = 0; | |||
betas_sums[0] = 0; | |||
x = 1; | |||
for (j = 0; j < m; ++j) { | |||
for (k = 0; k < (1U << j); ++k) { | |||
betas_sums[(1 << j) + k] = betas_sums[k] ^ betas[j]; | |||
f[1] ^= PQCLEAN_HQC256_CLEAN_gf_mul(betas_sums[(1 << j) + k], w[(1 << j) + k]); | |||
for (k = 0; k < x; ++k) { | |||
betas_sums[x + k] = betas_sums[k] ^ betas[j]; | |||
f[1] ^= PQCLEAN_HQC256_CLEAN_gf_mul(betas_sums[x + k], w[x + k]); | |||
} | |||
x <<= 1; | |||
} | |||
return; | |||
@@ -248,7 +252,8 @@ static void fft_t_rec(uint16_t *f, const uint16_t *w, size_t f_coeffs, uint8_t m | |||
// Step 2: compute f from g | |||
if (betas[m - 1] != 1) { | |||
beta_m_pow = 1; | |||
for (i = 1; i < (1U << m_f); ++i) { | |||
x = 1 << m_f; | |||
for (i = 1; i < x; ++i) { | |||
beta_m_pow = PQCLEAN_HQC256_CLEAN_gf_mul(beta_m_pow, betas[m - 1]); | |||
f[i] = PQCLEAN_HQC256_CLEAN_gf_mul(beta_m_pow, f[i]); | |||
} | |||
@@ -436,6 +441,7 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
uint16_t beta_m_pow; | |||
size_t i, j, k; | |||
size_t x; | |||
// Step 1 | |||
if (m_f == 1) { | |||
@@ -444,10 +450,12 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
} | |||
w[0] = f[0]; | |||
x = 1; | |||
for (j = 0; j < m; ++j) { | |||
for (k = 0; k < (1U << j); ++k) { | |||
w[(1 << j) + k] = w[k] ^ tmp[j]; | |||
for (k = 0; k < x; ++k) { | |||
w[x + k] = w[k] ^ tmp[j]; | |||
} | |||
x <<= 1; | |||
} | |||
return; | |||
@@ -456,7 +464,8 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
// Step 2: compute g | |||
if (betas[m - 1] != 1) { | |||
beta_m_pow = 1; | |||
for (i = 1; i < (1U << m_f); ++i) { | |||
x = 1 << m_f; | |||
for (i = 1; i < x; ++i) { | |||
beta_m_pow = PQCLEAN_HQC256_CLEAN_gf_mul(beta_m_pow, betas[m - 1]); | |||
f[i] = PQCLEAN_HQC256_CLEAN_gf_mul(beta_m_pow, f[i]); | |||
} | |||
@@ -82,7 +82,7 @@ static void fast_convolution_mult(uint64_t *o, const uint32_t *a1, const uint64_ | |||
size_t i, j; | |||
for (i = 0; i < 16; i++) { | |||
permuted_table[i] = i; | |||
permuted_table[i] = (uint16_t) i; | |||
} | |||
seedexpander(ctx, (uint8_t *) permutation_table, 16 * sizeof(uint16_t)); | |||
@@ -108,7 +108,7 @@ static void fast_convolution_mult(uint64_t *o, const uint32_t *a1, const uint64_ | |||
} | |||
for (i = 0; i < weight; i++) { | |||
permuted_sparse_vect[i] = i; | |||
permuted_sparse_vect[i] = (uint16_t) i; | |||
} | |||
seedexpander(ctx, (uint8_t *) permutation_sparse_vect, weight * sizeof(uint16_t)); | |||
@@ -51,7 +51,7 @@ static void compute_subset_sums(uint16_t *subset_sums, const uint16_t *set, uint | |||
subset_sums[0] = 0; | |||
for (i = 0; i < set_size; ++i) { | |||
for (j = 0; j < (1U << i); ++j) { | |||
for (j = 0; j < (1 << i); ++j) { | |||
subset_sums[(1 << i) + j] = set[i] ^ subset_sums[j]; | |||
} | |||
} | |||
@@ -178,6 +178,7 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
uint16_t beta_m_pow; | |||
size_t i, j, k; | |||
size_t x; | |||
// Step 1 | |||
if (m_f == 1) { | |||
@@ -186,10 +187,12 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
} | |||
w[0] = f[0]; | |||
x = 1; | |||
for (j = 0; j < m; ++j) { | |||
for (k = 0; k < (1U << j); ++k) { | |||
w[(1 << j) + k] = w[k] ^ tmp[j]; | |||
for (k = 0; k < x; ++k) { | |||
w[x + k] = w[k] ^ tmp[j]; | |||
} | |||
x <<= 1; | |||
} | |||
return; | |||
@@ -198,7 +201,8 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
// Step 2: compute g | |||
if (betas[m - 1] != 1) { | |||
beta_m_pow = 1; | |||
for (i = 1; i < (1U << m_f); ++i) { | |||
x = 1 << m_f; | |||
for (i = 1; i < x; ++i) { | |||
beta_m_pow = PQCLEAN_HQCRMRS128_AVX2_gf_mul(beta_m_pow, betas[m - 1]); | |||
f[i] = PQCLEAN_HQCRMRS128_AVX2_gf_mul(beta_m_pow, f[i]); | |||
} | |||
@@ -51,7 +51,7 @@ static void compute_subset_sums(uint16_t *subset_sums, const uint16_t *set, uint | |||
subset_sums[0] = 0; | |||
for (i = 0; i < set_size; ++i) { | |||
for (j = 0; j < (1U << i); ++j) { | |||
for (j = 0; j < (1 << i); ++j) { | |||
subset_sums[(1 << i) + j] = set[i] ^ subset_sums[j]; | |||
} | |||
} | |||
@@ -178,6 +178,7 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
uint16_t beta_m_pow; | |||
size_t i, j, k; | |||
size_t x; | |||
// Step 1 | |||
if (m_f == 1) { | |||
@@ -186,10 +187,12 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
} | |||
w[0] = f[0]; | |||
x = 1; | |||
for (j = 0; j < m; ++j) { | |||
for (k = 0; k < (1U << j); ++k) { | |||
w[(1 << j) + k] = w[k] ^ tmp[j]; | |||
for (k = 0; k < x; ++k) { | |||
w[x + k] = w[k] ^ tmp[j]; | |||
} | |||
x <<= 1; | |||
} | |||
return; | |||
@@ -198,7 +201,8 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
// Step 2: compute g | |||
if (betas[m - 1] != 1) { | |||
beta_m_pow = 1; | |||
for (i = 1; i < (1U << m_f); ++i) { | |||
x = 1 << m_f; | |||
for (i = 1; i < x; ++i) { | |||
beta_m_pow = PQCLEAN_HQCRMRS128_CLEAN_gf_mul(beta_m_pow, betas[m - 1]); | |||
f[i] = PQCLEAN_HQCRMRS128_CLEAN_gf_mul(beta_m_pow, f[i]); | |||
} | |||
@@ -82,7 +82,7 @@ static void fast_convolution_mult(uint64_t *o, const uint32_t *a1, const uint64_ | |||
size_t i, j; | |||
for (i = 0; i < 16; i++) { | |||
permuted_table[i] = i; | |||
permuted_table[i] = (uint16_t) i; | |||
} | |||
seedexpander(ctx, (uint8_t *) permutation_table, 16 * sizeof(uint16_t)); | |||
@@ -108,7 +108,7 @@ static void fast_convolution_mult(uint64_t *o, const uint32_t *a1, const uint64_ | |||
} | |||
for (i = 0; i < weight; i++) { | |||
permuted_sparse_vect[i] = i; | |||
permuted_sparse_vect[i] = (uint16_t) i; | |||
} | |||
seedexpander(ctx, (uint8_t *) permutation_sparse_vect, weight * sizeof(uint16_t)); | |||
@@ -51,7 +51,7 @@ static void compute_subset_sums(uint16_t *subset_sums, const uint16_t *set, uint | |||
subset_sums[0] = 0; | |||
for (i = 0; i < set_size; ++i) { | |||
for (j = 0; j < (1U << i); ++j) { | |||
for (j = 0; j < (1 << i); ++j) { | |||
subset_sums[(1 << i) + j] = set[i] ^ subset_sums[j]; | |||
} | |||
} | |||
@@ -178,6 +178,7 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
uint16_t beta_m_pow; | |||
size_t i, j, k; | |||
size_t x; | |||
// Step 1 | |||
if (m_f == 1) { | |||
@@ -186,10 +187,12 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
} | |||
w[0] = f[0]; | |||
x = 1; | |||
for (j = 0; j < m; ++j) { | |||
for (k = 0; k < (1U << j); ++k) { | |||
w[(1 << j) + k] = w[k] ^ tmp[j]; | |||
for (k = 0; k < x; ++k) { | |||
w[x + k] = w[k] ^ tmp[j]; | |||
} | |||
x <<= 1; | |||
} | |||
return; | |||
@@ -198,7 +201,8 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
// Step 2: compute g | |||
if (betas[m - 1] != 1) { | |||
beta_m_pow = 1; | |||
for (i = 1; i < (1U << m_f); ++i) { | |||
x = 1 << m_f; | |||
for (i = 1; i < x; ++i) { | |||
beta_m_pow = PQCLEAN_HQCRMRS192_AVX2_gf_mul(beta_m_pow, betas[m - 1]); | |||
f[i] = PQCLEAN_HQCRMRS192_AVX2_gf_mul(beta_m_pow, f[i]); | |||
} | |||
@@ -51,7 +51,7 @@ static void compute_subset_sums(uint16_t *subset_sums, const uint16_t *set, uint | |||
subset_sums[0] = 0; | |||
for (i = 0; i < set_size; ++i) { | |||
for (j = 0; j < (1U << i); ++j) { | |||
for (j = 0; j < (1 << i); ++j) { | |||
subset_sums[(1 << i) + j] = set[i] ^ subset_sums[j]; | |||
} | |||
} | |||
@@ -178,6 +178,7 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
uint16_t beta_m_pow; | |||
size_t i, j, k; | |||
size_t x; | |||
// Step 1 | |||
if (m_f == 1) { | |||
@@ -186,10 +187,12 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
} | |||
w[0] = f[0]; | |||
x = 1; | |||
for (j = 0; j < m; ++j) { | |||
for (k = 0; k < (1U << j); ++k) { | |||
w[(1 << j) + k] = w[k] ^ tmp[j]; | |||
for (k = 0; k < x; ++k) { | |||
w[x + k] = w[k] ^ tmp[j]; | |||
} | |||
x <<= 1; | |||
} | |||
return; | |||
@@ -198,7 +201,8 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
// Step 2: compute g | |||
if (betas[m - 1] != 1) { | |||
beta_m_pow = 1; | |||
for (i = 1; i < (1U << m_f); ++i) { | |||
x = 1 << m_f; | |||
for (i = 1; i < x; ++i) { | |||
beta_m_pow = PQCLEAN_HQCRMRS192_CLEAN_gf_mul(beta_m_pow, betas[m - 1]); | |||
f[i] = PQCLEAN_HQCRMRS192_CLEAN_gf_mul(beta_m_pow, f[i]); | |||
} | |||
@@ -82,7 +82,7 @@ static void fast_convolution_mult(uint64_t *o, const uint32_t *a1, const uint64_ | |||
size_t i, j; | |||
for (i = 0; i < 16; i++) { | |||
permuted_table[i] = i; | |||
permuted_table[i] = (uint16_t) i; | |||
} | |||
seedexpander(ctx, (uint8_t *) permutation_table, 16 * sizeof(uint16_t)); | |||
@@ -108,7 +108,7 @@ static void fast_convolution_mult(uint64_t *o, const uint32_t *a1, const uint64_ | |||
} | |||
for (i = 0; i < weight; i++) { | |||
permuted_sparse_vect[i] = i; | |||
permuted_sparse_vect[i] = (uint16_t) i; | |||
} | |||
seedexpander(ctx, (uint8_t *) permutation_sparse_vect, weight * sizeof(uint16_t)); | |||
@@ -51,7 +51,7 @@ static void compute_subset_sums(uint16_t *subset_sums, const uint16_t *set, uint | |||
subset_sums[0] = 0; | |||
for (i = 0; i < set_size; ++i) { | |||
for (j = 0; j < (1U << i); ++j) { | |||
for (j = 0; j < (1 << i); ++j) { | |||
subset_sums[(1 << i) + j] = set[i] ^ subset_sums[j]; | |||
} | |||
} | |||
@@ -178,6 +178,7 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
uint16_t beta_m_pow; | |||
size_t i, j, k; | |||
size_t x; | |||
// Step 1 | |||
if (m_f == 1) { | |||
@@ -186,10 +187,12 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
} | |||
w[0] = f[0]; | |||
x = 1; | |||
for (j = 0; j < m; ++j) { | |||
for (k = 0; k < (1U << j); ++k) { | |||
w[(1 << j) + k] = w[k] ^ tmp[j]; | |||
for (k = 0; k < x; ++k) { | |||
w[x + k] = w[k] ^ tmp[j]; | |||
} | |||
x <<= 1; | |||
} | |||
return; | |||
@@ -198,7 +201,8 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
// Step 2: compute g | |||
if (betas[m - 1] != 1) { | |||
beta_m_pow = 1; | |||
for (i = 1; i < (1U << m_f); ++i) { | |||
x = 1 << m_f; | |||
for (i = 1; i < x; ++i) { | |||
beta_m_pow = PQCLEAN_HQCRMRS256_AVX2_gf_mul(beta_m_pow, betas[m - 1]); | |||
f[i] = PQCLEAN_HQCRMRS256_AVX2_gf_mul(beta_m_pow, f[i]); | |||
} | |||
@@ -51,7 +51,7 @@ static void compute_subset_sums(uint16_t *subset_sums, const uint16_t *set, uint | |||
subset_sums[0] = 0; | |||
for (i = 0; i < set_size; ++i) { | |||
for (j = 0; j < (1U << i); ++j) { | |||
for (j = 0; j < (1 << i); ++j) { | |||
subset_sums[(1 << i) + j] = set[i] ^ subset_sums[j]; | |||
} | |||
} | |||
@@ -178,6 +178,7 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
uint16_t beta_m_pow; | |||
size_t i, j, k; | |||
size_t x; | |||
// Step 1 | |||
if (m_f == 1) { | |||
@@ -186,10 +187,12 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
} | |||
w[0] = f[0]; | |||
x = 1; | |||
for (j = 0; j < m; ++j) { | |||
for (k = 0; k < (1U << j); ++k) { | |||
w[(1 << j) + k] = w[k] ^ tmp[j]; | |||
for (k = 0; k < x; ++k) { | |||
w[x + k] = w[k] ^ tmp[j]; | |||
} | |||
x <<= 1; | |||
} | |||
return; | |||
@@ -198,7 +201,8 @@ static void fft_rec(uint16_t *w, uint16_t *f, size_t f_coeffs, uint8_t m, uint32 | |||
// Step 2: compute g | |||
if (betas[m - 1] != 1) { | |||
beta_m_pow = 1; | |||
for (i = 1; i < (1U << m_f); ++i) { | |||
x = 1 << m_f; | |||
for (i = 1; i < x; ++i) { | |||
beta_m_pow = PQCLEAN_HQCRMRS256_CLEAN_gf_mul(beta_m_pow, betas[m - 1]); | |||
f[i] = PQCLEAN_HQCRMRS256_CLEAN_gf_mul(beta_m_pow, f[i]); | |||
} | |||
@@ -82,7 +82,7 @@ static void fast_convolution_mult(uint64_t *o, const uint32_t *a1, const uint64_ | |||
size_t i, j; | |||
for (i = 0; i < 16; i++) { | |||
permuted_table[i] = i; | |||
permuted_table[i] = (uint16_t) i; | |||
} | |||
seedexpander(ctx, (uint8_t *) permutation_table, 16 * sizeof(uint16_t)); | |||
@@ -108,7 +108,7 @@ static void fast_convolution_mult(uint64_t *o, const uint32_t *a1, const uint64_ | |||
} | |||
for (i = 0; i < weight; i++) { | |||
permuted_sparse_vect[i] = i; | |||
permuted_sparse_vect[i] = (uint16_t) i; | |||
} | |||
seedexpander(ctx, (uint8_t *) permutation_sparse_vect, weight * sizeof(uint16_t)); | |||
@@ -12,6 +12,7 @@ consistency_checks: | |||
- reed_solomon.h | |||
- code.c | |||
- fft.c | |||
- reed_solomon.c | |||
- source: | |||
scheme: hqc-rmrs-192 | |||
implementation: clean | |||
@@ -21,9 +22,9 @@ consistency_checks: | |||
- gf.h | |||
- hqc.h | |||
- reed_muller.h | |||
- reed_solomon.h | |||
- code.c | |||
- fft.c | |||
- reed_solomon.c | |||
- source: | |||
scheme: hqc-rmrs-192 | |||
implementation: avx2 | |||
@@ -35,7 +36,6 @@ consistency_checks: | |||
- hqc.h | |||
- parsing.h | |||
- reed_muller.h | |||
- reed_solomon.h | |||
- vector.h | |||
- code.c | |||
- fft.c | |||
@@ -44,6 +44,7 @@ consistency_checks: | |||
- kem.c | |||
- parsing.c | |||
- reed_muller.c | |||
- reed_solomon.c | |||
- vector.c | |||
- source: | |||
scheme: hqc-rmrs-256 | |||
@@ -57,6 +58,7 @@ consistency_checks: | |||
- reed_solomon.h | |||
- code.c | |||
- fft.c | |||
- reed_solomon.c | |||
- source: | |||
scheme: hqc-rmrs-256 | |||
implementation: avx2 | |||
@@ -68,7 +70,6 @@ consistency_checks: | |||
- hqc.h | |||
- parsing.h | |||
- reed_muller.h | |||
- reed_solomon.h | |||
- vector.h | |||
- code.c | |||
- fft.c | |||
@@ -77,4 +78,5 @@ consistency_checks: | |||
- kem.c | |||
- parsing.c | |||
- reed_muller.c | |||
- reed_solomon.c | |||
- vector.c |
@@ -12,6 +12,7 @@ consistency_checks: | |||
- reed_solomon.h | |||
- code.c | |||
- fft.c | |||
- reed_solomon.c | |||
- source: | |||
scheme: hqc-rmrs-192 | |||
implementation: clean | |||
@@ -23,7 +24,6 @@ consistency_checks: | |||
- hqc.h | |||
- parsing.h | |||
- reed_muller.h | |||
- reed_solomon.h | |||
- vector.h | |||
- code.c | |||
- fft.c | |||
@@ -33,6 +33,7 @@ consistency_checks: | |||
- kem.c | |||
- parsing.c | |||
- reed_muller.c | |||
- reed_solomon.c | |||
- vector.c | |||
- source: | |||
scheme: hqc-rmrs-192 | |||
@@ -43,9 +44,9 @@ consistency_checks: | |||
- gf.h | |||
- hqc.h | |||
- reed_muller.h | |||
- reed_solomon.h | |||
- code.c | |||
- fft.c | |||
- reed_solomon.c | |||
- source: | |||
scheme: hqc-rmrs-256 | |||
implementation: clean | |||
@@ -67,6 +68,7 @@ consistency_checks: | |||
- kem.c | |||
- parsing.c | |||
- reed_muller.c | |||
- reed_solomon.c | |||
- vector.c | |||
- source: | |||
scheme: hqc-rmrs-256 | |||
@@ -77,6 +79,6 @@ consistency_checks: | |||
- gf.h | |||
- hqc.h | |||
- reed_muller.h | |||
- reed_solomon.h | |||
- code.c | |||
- fft.c | |||
- reed_solomon.c |
@@ -12,6 +12,7 @@ consistency_checks: | |||
- reed_solomon.h | |||
- code.c | |||
- fft.c | |||
- reed_solomon.c | |||
- source: | |||
scheme: hqc-rmrs-256 | |||
implementation: clean | |||
@@ -21,9 +22,9 @@ consistency_checks: | |||
- gf.h | |||
- hqc.h | |||
- reed_muller.h | |||
- reed_solomon.h | |||
- code.c | |||
- fft.c | |||
- reed_solomon.c | |||
- source: | |||
scheme: hqc-rmrs-256 | |||
implementation: avx2 | |||
@@ -35,7 +36,6 @@ consistency_checks: | |||
- hqc.h | |||
- parsing.h | |||
- reed_muller.h | |||
- reed_solomon.h | |||
- vector.h | |||
- code.c | |||
- fft.c | |||
@@ -44,4 +44,5 @@ consistency_checks: | |||
- kem.c | |||
- parsing.c | |||
- reed_muller.c | |||
- reed_solomon.c | |||
- vector.c |
@@ -12,6 +12,7 @@ consistency_checks: | |||
- reed_solomon.h | |||
- code.c | |||
- fft.c | |||
- reed_solomon.c | |||
- source: | |||
scheme: hqc-rmrs-256 | |||
implementation: clean | |||
@@ -23,7 +24,6 @@ consistency_checks: | |||
- hqc.h | |||
- parsing.h | |||
- reed_muller.h | |||
- reed_solomon.h | |||
- vector.h | |||
- code.c | |||
- fft.c | |||
@@ -33,6 +33,7 @@ consistency_checks: | |||
- kem.c | |||
- parsing.c | |||
- reed_muller.c | |||
- reed_solomon.c | |||
- vector.c | |||
- source: | |||
scheme: hqc-rmrs-256 | |||
@@ -43,6 +44,6 @@ consistency_checks: | |||
- gf.h | |||
- hqc.h | |||
- reed_muller.h | |||
- reed_solomon.h | |||
- code.c | |||
- fft.c | |||
- reed_solomon.c |
@@ -9,6 +9,6 @@ consistency_checks: | |||
- gf.h | |||
- hqc.h | |||
- reed_muller.h | |||
- reed_solomon.h | |||
- code.c | |||
- fft.c | |||
- reed_solomon.c |
@@ -9,6 +9,6 @@ consistency_checks: | |||
- gf.h | |||
- hqc.h | |||
- reed_muller.h | |||
- reed_solomon.h | |||
- code.c | |||
- fft.c | |||
- reed_solomon.c |