move code out of headers
Este cometimento está contido em:
ascendente
24014ae255
cometimento
64413d2bab
@ -2,8 +2,8 @@
|
||||
|
||||
LIB=librainbowIa-classic_clean.a
|
||||
|
||||
HEADERS = api.h blas_comm.h blas.h blas_u32.h gf16.h hash_len_config.h parallel_matrix_op.h rainbow_blas.h rainbow_config.h rainbow.h rainbow_keypair_computation.h rainbow_keypair.h utils_hash.h utils_prng.h
|
||||
OBJECTS = blas_comm.o parallel_matrix_op.o rainbow.o rainbow_keypair.o rainbow_keypair_computation.o sign.o utils_hash.o utils_prng.o
|
||||
HEADERS = api.h blas_comm.h blas.h blas_u32.h gf.h parallel_matrix_op.h rainbow_blas.h rainbow_config.h rainbow.h rainbow_keypair_computation.h rainbow_keypair.h utils_hash.h utils_prng.h
|
||||
OBJECTS = blas_comm.o parallel_matrix_op.o rainbow.o rainbow_keypair.o rainbow_keypair_computation.o sign.o utils_hash.o utils_prng.o blas_u32.o gf.o
|
||||
|
||||
CFLAGS=-O3 -Wall -Wconversion -Wextra -Wpedantic -Wvla -Werror -Wmissing-prototypes -Wredundant-decls -std=c99 -I../../../common $(EXTRAFLAGS)
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
# nmake /f Makefile.Microsoft_nmake
|
||||
|
||||
LIBRARY=librainbowIa-classic_clean.lib
|
||||
OBJECTS = blas_comm.obj parallel_matrix_op.obj rainbow.obj rainbow_keypair.obj rainbow_keypair_computation.obj sign.obj utils_hash.obj utils_prng.obj
|
||||
OBJECTS = blas_comm.obj parallel_matrix_op.obj rainbow.obj rainbow_keypair.obj rainbow_keypair_computation.obj sign.obj utils_hash.obj utils_prng.obj blas_u32.obj gf.obj
|
||||
|
||||
CFLAGS=/nologo /I ..\..\..\common /W4 /WX
|
||||
|
||||
|
@ -8,15 +8,16 @@
|
||||
#include "blas_comm.h"
|
||||
#include "blas_u32.h"
|
||||
|
||||
#define gf16v_mul_scalar _gf16v_mul_scalar_u32
|
||||
#define gf16v_madd _gf16v_madd_u32
|
||||
//TODO remove the gf16v/gf256v if they are not used in the parameter sets
|
||||
#define gf16v_mul_scalar PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_mul_scalar_u32
|
||||
#define gf16v_madd PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_madd_u32
|
||||
|
||||
#define gf256v_add _gf256v_add_u32
|
||||
#define gf256v_mul_scalar _gf256v_mul_scalar_u32
|
||||
#define gf256v_madd _gf256v_madd_u32
|
||||
#define gf256v_add PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_add_u32
|
||||
#define gf256v_mul_scalar PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_mul_scalar_u32
|
||||
#define gf256v_madd PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_madd_u32
|
||||
|
||||
#define gf256v_predicated_add _gf256v_predicated_add_u32
|
||||
#define gf16v_dot _gf16v_dot_u32
|
||||
#define gf256v_predicated_add PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_predicated_add_u32
|
||||
#define gf16v_dot PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_dot_u32
|
||||
|
||||
|
||||
#endif // _BLAS_H_
|
||||
|
@ -4,11 +4,51 @@
|
||||
|
||||
#include "blas_comm.h"
|
||||
#include "blas.h"
|
||||
#include "gf.h"
|
||||
|
||||
#include <assert.h> // FIXME(js): don't use assert() and don't deal with NDEBUG
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
|
||||
/// @brief get an element from GF(16) vector .
|
||||
///
|
||||
/// @param[in] a - the input vector a.
|
||||
/// @param[in] i - the index in the vector a.
|
||||
/// @return the value of the element.
|
||||
///
|
||||
uint8_t PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_get_ele(const uint8_t *a, unsigned i) {
|
||||
uint8_t r = a[i >> 1];
|
||||
uint8_t r0 = r & 0xf;
|
||||
uint8_t r1 = r >> 4;
|
||||
uint8_t m = (uint8_t)(-((int8_t)i & 1));
|
||||
return (uint8_t)((r1 & m) | ((~m)&r0));
|
||||
}
|
||||
|
||||
/// @brief set an element for a GF(16) vector .
|
||||
///
|
||||
/// @param[in,out] a - the vector a.
|
||||
/// @param[in] i - the index in the vector a.
|
||||
/// @param[in] v - the value for the i-th element in vector a.
|
||||
/// @return the value of the element.
|
||||
///
|
||||
static uint8_t PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_set_ele(uint8_t *a, unsigned i, uint8_t v) {
|
||||
uint8_t m = (uint8_t) (0xf ^ (-((int8_t)i & 1))); /// 1--> 0xf0 , 0--> 0x0f
|
||||
uint8_t ai_remaining = (uint8_t) (a[i >> 1] & (~m)); /// erase
|
||||
a[i >> 1] = (uint8_t) (ai_remaining | (m & (v << 4)) | (m & v & 0xf)); /// set
|
||||
return v;
|
||||
}
|
||||
|
||||
|
||||
/// @brief get an element from GF(256) vector .
|
||||
///
|
||||
/// @param[in] a - the input vector a.
|
||||
/// @param[in] i - the index in the vector a.
|
||||
/// @return the value of the element.
|
||||
///
|
||||
uint8_t PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_get_ele(const uint8_t *a, unsigned i) {
|
||||
return a[i];
|
||||
}
|
||||
|
||||
|
||||
void PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_set_zero(uint8_t *b, unsigned _num_byte) {
|
||||
gf256v_add(b, b, _num_byte);
|
||||
}
|
||||
@ -39,7 +79,7 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_polymul(uint8_t *c, const uint8_t *a,
|
||||
static void gf16mat_prod_ref(uint8_t *c, const uint8_t *matA, unsigned n_A_vec_byte, unsigned n_A_width, const uint8_t *b) {
|
||||
PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_set_zero(c, n_A_vec_byte);
|
||||
for (unsigned i = 0; i < n_A_width; i++) {
|
||||
uint8_t bb = gf16v_get_ele(b, i);
|
||||
uint8_t bb = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_get_ele(b, i);
|
||||
gf16v_madd(c, matA, bb, n_A_vec_byte);
|
||||
matA += n_A_vec_byte;
|
||||
}
|
||||
@ -62,7 +102,7 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16mat_mul(uint8_t *c, const uint8_t *a, co
|
||||
PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_set_zero(c, n_vec_byte);
|
||||
const uint8_t *bk = b + n_vec_byte * k;
|
||||
for (unsigned i = 0; i < len_vec; i++) {
|
||||
uint8_t bb = gf16v_get_ele(bk, i);
|
||||
uint8_t bb = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_get_ele(bk, i);
|
||||
gf16v_madd(c, a + n_vec_byte * i, bb, n_vec_byte);
|
||||
}
|
||||
c += n_vec_byte;
|
||||
@ -85,7 +125,6 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256mat_mul(uint8_t *c, const uint8_t *a, c
|
||||
|
||||
static
|
||||
unsigned gf16mat_gauss_elim_ref(uint8_t *mat, unsigned h, unsigned w) {
|
||||
/// assert( 0==(w&1) ); w must be even !!!
|
||||
unsigned n_w_byte = (w + 1) / 2;
|
||||
unsigned r8 = 1;
|
||||
for (unsigned i = 0; i < h; i++) {
|
||||
@ -93,11 +132,11 @@ unsigned gf16mat_gauss_elim_ref(uint8_t *mat, unsigned h, unsigned w) {
|
||||
uint8_t *ai = mat + n_w_byte * i;
|
||||
for (unsigned j = i + 1; j < h; j++) {
|
||||
uint8_t *aj = mat + n_w_byte * j;
|
||||
gf256v_predicated_add(ai + offset_byte, !gf16_is_nonzero(gf16v_get_ele(ai, i)), aj + offset_byte, n_w_byte - offset_byte);
|
||||
gf256v_predicated_add(ai + offset_byte, !PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16_is_nonzero(PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_get_ele(ai, i)), aj + offset_byte, n_w_byte - offset_byte);
|
||||
}
|
||||
uint8_t pivot = gf16v_get_ele(ai, i);
|
||||
r8 &= gf16_is_nonzero(pivot);
|
||||
pivot = gf16_inv(pivot);
|
||||
uint8_t pivot = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_get_ele(ai, i);
|
||||
r8 &= PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16_is_nonzero(pivot);
|
||||
pivot = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16_inv(pivot);
|
||||
offset_byte = (i + 1) >> 1;
|
||||
gf16v_mul_scalar(ai + offset_byte, pivot, n_w_byte - offset_byte);
|
||||
for (unsigned j = 0; j < h; j++) {
|
||||
@ -105,7 +144,7 @@ unsigned gf16mat_gauss_elim_ref(uint8_t *mat, unsigned h, unsigned w) {
|
||||
continue;
|
||||
}
|
||||
uint8_t *aj = mat + n_w_byte * j;
|
||||
gf16v_madd(aj + offset_byte, ai + offset_byte, gf16v_get_ele(aj, i), n_w_byte - offset_byte);
|
||||
gf16v_madd(aj + offset_byte, ai + offset_byte, PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_get_ele(aj, i), n_w_byte - offset_byte);
|
||||
}
|
||||
}
|
||||
return r8;
|
||||
@ -113,16 +152,15 @@ unsigned gf16mat_gauss_elim_ref(uint8_t *mat, unsigned h, unsigned w) {
|
||||
|
||||
static
|
||||
unsigned gf16mat_solve_linear_eq_ref(uint8_t *sol, const uint8_t *inp_mat, const uint8_t *c_terms, unsigned n) {
|
||||
assert(64 >= n);
|
||||
uint8_t mat[64 * 33];
|
||||
unsigned n_byte = (n + 1) >> 1;
|
||||
for (unsigned i = 0; i < n; i++) {
|
||||
memcpy(mat + i * (n_byte + 1), inp_mat + i * n_byte, n_byte);
|
||||
mat[i * (n_byte + 1) + n_byte] = gf16v_get_ele(c_terms, i);
|
||||
mat[i * (n_byte + 1) + n_byte] = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_get_ele(c_terms, i);
|
||||
}
|
||||
unsigned r8 = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16mat_gauss_elim(mat, n, n + 2); /// XXX: this function is ``defined'' in blas.h
|
||||
for (unsigned i = 0; i < n; i++) {
|
||||
gf16v_set_ele(sol, i, mat[i * (n_byte + 1) + n_byte]);
|
||||
PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_set_ele(sol, i, mat[i * (n_byte + 1) + n_byte]);
|
||||
}
|
||||
return r8;
|
||||
}
|
||||
@ -146,7 +184,7 @@ unsigned PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16mat_inv(uint8_t *inv_a, const uint8_
|
||||
uint8_t *ai = aa + i * 2 * n_w_byte;
|
||||
PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_set_zero(ai, 2 * n_w_byte);
|
||||
gf256v_add(ai, a + i * n_w_byte, n_w_byte);
|
||||
gf16v_set_ele(ai + n_w_byte, i, 1);
|
||||
PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_set_ele(ai + n_w_byte, i, 1);
|
||||
}
|
||||
unsigned r8 = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16mat_gauss_elim(aa, H, 2 * H); /// XXX: would 2*H fail if H is odd ???
|
||||
gf16mat_submat(inv_a, H, H, aa, 2 * H, H);
|
||||
@ -166,20 +204,17 @@ unsigned gf256mat_gauss_elim_ref( uint8_t *mat, unsigned h, unsigned w ) {
|
||||
|
||||
for (unsigned j = i + 1; j < h; j++) {
|
||||
uint8_t *aj = mat + w * j;
|
||||
// gf256v_predicated_add( ai + i , !gf256_is_nonzero(ai[i]) , aj + i , w-i );
|
||||
gf256v_predicated_add( ai + skip_len_align4, !gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4 );
|
||||
gf256v_predicated_add( ai + skip_len_align4, !PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4 );
|
||||
}
|
||||
r8 &= gf256_is_nonzero(ai[i]);
|
||||
r8 &= PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256_is_nonzero(ai[i]);
|
||||
uint8_t pivot = ai[i];
|
||||
pivot = gf256_inv( pivot );
|
||||
// gf256v_mul_scalar( ai + (i+1) , pivot , w - (i+1) );
|
||||
pivot = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256_inv( pivot );
|
||||
gf256v_mul_scalar( ai + skip_len_align4, pivot, w - skip_len_align4 );
|
||||
for (unsigned j = 0; j < h; j++) {
|
||||
if (i == j) {
|
||||
continue;
|
||||
}
|
||||
uint8_t *aj = mat + w * j;
|
||||
// gf256v_madd( aj + (i+1) , ai + (i+1) , aj[i] , w - (i+1) );
|
||||
gf256v_madd( aj + skip_len_align4, ai + skip_len_align4, aj[i], w - skip_len_align4 );
|
||||
}
|
||||
}
|
||||
@ -189,13 +224,12 @@ unsigned gf256mat_gauss_elim_ref( uint8_t *mat, unsigned h, unsigned w ) {
|
||||
|
||||
static
|
||||
unsigned gf256mat_solve_linear_eq_ref( uint8_t *sol, const uint8_t *inp_mat, const uint8_t *c_terms, unsigned n ) {
|
||||
assert( 63 >= n );
|
||||
uint8_t mat[ 64 * 64 ];
|
||||
for (unsigned i = 0; i < n; i++) {
|
||||
memcpy( mat + i * (n + 1), inp_mat + i * n, n );
|
||||
mat[i * (n + 1) + n] = c_terms[i];
|
||||
}
|
||||
unsigned r8 = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256mat_gauss_elim( mat, n, n + 1 ); /// XXX: this function is ``defined'' in blas.h
|
||||
unsigned r8 = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256mat_gauss_elim( mat, n, n + 1 );
|
||||
for (unsigned i = 0; i < n; i++) {
|
||||
sol[i] = mat[i * (n + 1) + n];
|
||||
}
|
||||
@ -246,8 +280,6 @@ unsigned PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256mat_inv( uint8_t *inv_a, const uint
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
void PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16mat_prod(uint8_t *c, const uint8_t *matA, unsigned n_A_vec_byte, unsigned n_A_width, const uint8_t *b) {
|
||||
gf16mat_prod_impl( c, matA, n_A_vec_byte, n_A_width, b);
|
||||
}
|
||||
@ -274,3 +306,5 @@ unsigned PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256mat_gauss_elim( uint8_t *mat, unsig
|
||||
unsigned PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256mat_solve_linear_eq( uint8_t *sol, const uint8_t *inp_mat, const uint8_t *c_terms, unsigned n ) {
|
||||
return gf256mat_solve_linear_eq_impl( sol, inp_mat, c_terms, n );
|
||||
}
|
||||
|
||||
|
||||
|
@ -13,28 +13,7 @@
|
||||
/// @param[in] i - the index in the vector a.
|
||||
/// @return the value of the element.
|
||||
///
|
||||
static inline uint8_t gf16v_get_ele(const uint8_t *a, unsigned i) {
|
||||
uint8_t r = a[i >> 1];
|
||||
uint8_t r0 = r & 0xf;
|
||||
uint8_t r1 = r >> 4;
|
||||
uint8_t m = (uint8_t)(-((int8_t)i & 1));
|
||||
return (uint8_t)((r1 & m) | ((~m)&r0));
|
||||
}
|
||||
|
||||
/// @brief set an element for a GF(16) vector .
|
||||
///
|
||||
/// @param[in,out] a - the vector a.
|
||||
/// @param[in] i - the index in the vector a.
|
||||
/// @param[in] v - the value for the i-th element in vector a.
|
||||
/// @return the value of the element.
|
||||
///
|
||||
static inline uint8_t gf16v_set_ele(uint8_t *a, unsigned i, uint8_t v) {
|
||||
uint8_t m = (uint8_t) (0xf ^ (-((int8_t)i & 1))); /// 1--> 0xf0 , 0--> 0x0f
|
||||
uint8_t ai_remaining = (uint8_t) (a[i >> 1] & (~m)); /// erase
|
||||
a[i >> 1] = (uint8_t) (ai_remaining | (m & (v << 4)) | (m & v & 0xf)); /// set
|
||||
return v;
|
||||
}
|
||||
|
||||
uint8_t PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_get_ele(const uint8_t *a, unsigned i);
|
||||
|
||||
/// @brief get an element from GF(256) vector .
|
||||
///
|
||||
@ -42,25 +21,7 @@ static inline uint8_t gf16v_set_ele(uint8_t *a, unsigned i, uint8_t v) {
|
||||
/// @param[in] i - the index in the vector a.
|
||||
/// @return the value of the element.
|
||||
///
|
||||
static inline uint8_t gf256v_get_ele(const uint8_t *a, unsigned i) {
|
||||
return a[i];
|
||||
}
|
||||
|
||||
|
||||
/// @brief set an element for a GF(256) vector .
|
||||
///
|
||||
/// @param[in,out] a - the vector a.
|
||||
/// @param[in] i - the index in the vector a.
|
||||
/// @param[in] v - the value for the i-th element in vector a.
|
||||
/// @return the value of the element.
|
||||
///
|
||||
static inline uint8_t gf256v_set_ele(uint8_t *a, unsigned i, uint8_t v) {
|
||||
a[i] = v;
|
||||
return v;
|
||||
}
|
||||
|
||||
|
||||
/////////////////////////////////////
|
||||
uint8_t PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_get_ele(const uint8_t *a, unsigned i);
|
||||
|
||||
|
||||
/// @brief set a vector to 0.
|
||||
|
165
crypto_sign/rainbowIa-classic/clean/blas_u32.c
Ficheiro normal
165
crypto_sign/rainbowIa-classic/clean/blas_u32.c
Ficheiro normal
@ -0,0 +1,165 @@
|
||||
#include "blas_u32.h"
|
||||
#include "gf.h"
|
||||
|
||||
//TODO remove the gf16v/gf256v if they are not used in the parameter sets
|
||||
void PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_add_u32(uint8_t *accu_b, const uint8_t *a, unsigned _num_byte) {
|
||||
unsigned n_u32 = _num_byte >> 2;
|
||||
uint32_t *b_u32 = (uint32_t *) accu_b;
|
||||
const uint32_t *a_u32 = (const uint32_t *) a;
|
||||
for (unsigned i = 0; i < n_u32; i++) {
|
||||
b_u32[i] ^= a_u32[i];
|
||||
}
|
||||
|
||||
a += (n_u32 << 2);
|
||||
accu_b += (n_u32 << 2);
|
||||
unsigned rem = _num_byte & 3;
|
||||
for (unsigned i = 0; i < rem; i++) {
|
||||
accu_b[i] ^= a[i];
|
||||
}
|
||||
}
|
||||
|
||||
void PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_predicated_add_u32(uint8_t *accu_b, uint8_t predicate, const uint8_t *a, unsigned _num_byte) {
|
||||
uint32_t pr_u32 = ((uint32_t) 0) - ((uint32_t) predicate);
|
||||
uint8_t pr_u8 = pr_u32 & 0xff;
|
||||
|
||||
unsigned n_u32 = _num_byte >> 2;
|
||||
uint32_t *b_u32 = (uint32_t *) accu_b;
|
||||
const uint32_t *a_u32 = (const uint32_t *) a;
|
||||
for (unsigned i = 0; i < n_u32; i++) {
|
||||
b_u32[i] ^= (a_u32[i] & pr_u32);
|
||||
}
|
||||
|
||||
a += (n_u32 << 2);
|
||||
accu_b += (n_u32 << 2);
|
||||
unsigned rem = _num_byte & 3;
|
||||
for (unsigned i = 0; i < rem; i++) {
|
||||
accu_b[i] ^= (a[i] & pr_u8);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_mul_scalar_u32(uint8_t *a, uint8_t gf16_b, unsigned _num_byte) {
|
||||
unsigned n_u32 = _num_byte >> 2;
|
||||
uint32_t *a_u32 = (uint32_t *) a;
|
||||
for (unsigned i = 0; i < n_u32; i++) {
|
||||
a_u32[i] = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_mul_u32(a_u32[i], gf16_b);
|
||||
}
|
||||
|
||||
union tmp_32 {
|
||||
uint8_t u8[4];
|
||||
uint32_t u32;
|
||||
} t;
|
||||
t.u32 = 0;
|
||||
a += (n_u32 << 2);
|
||||
unsigned rem = _num_byte & 3;
|
||||
for (unsigned i = 0; i < rem; i++) {
|
||||
t.u8[i] = a[i];
|
||||
}
|
||||
t.u32 = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_mul_u32(t.u32, gf16_b);
|
||||
for (unsigned i = 0; i < rem; i++) {
|
||||
a[i] = t.u8[i];
|
||||
}
|
||||
}
|
||||
|
||||
void PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_mul_scalar_u32(uint8_t *a, uint8_t b, unsigned _num_byte) {
|
||||
unsigned n_u32 = _num_byte >> 2;
|
||||
uint32_t *a_u32 = (uint32_t *) a;
|
||||
for (unsigned i = 0; i < n_u32; i++) {
|
||||
a_u32[i] = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_mul_u32(a_u32[i], b);
|
||||
}
|
||||
|
||||
union tmp_32 {
|
||||
uint8_t u8[4];
|
||||
uint32_t u32;
|
||||
} t;
|
||||
t.u32 = 0;
|
||||
a += (n_u32 << 2);
|
||||
unsigned rem = _num_byte & 3;
|
||||
for (unsigned i = 0; i < rem; i++) {
|
||||
t.u8[i] = a[i];
|
||||
}
|
||||
t.u32 = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_mul_u32(t.u32, b);
|
||||
for (unsigned i = 0; i < rem; i++) {
|
||||
a[i] = t.u8[i];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_madd_u32(uint8_t *accu_c, const uint8_t *a, uint8_t gf16_b, unsigned _num_byte) {
|
||||
unsigned n_u32 = _num_byte >> 2;
|
||||
uint32_t *c_u32 = (uint32_t *) accu_c;
|
||||
const uint32_t *a_u32 = (const uint32_t *) a;
|
||||
for (unsigned i = 0; i < n_u32; i++) {
|
||||
c_u32[i] ^= PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_mul_u32(a_u32[i], gf16_b);
|
||||
}
|
||||
|
||||
union tmp_32 {
|
||||
uint8_t u8[4];
|
||||
uint32_t u32;
|
||||
} t;
|
||||
t.u32 = 0;
|
||||
accu_c += (n_u32 << 2);
|
||||
a += (n_u32 << 2);
|
||||
unsigned rem = _num_byte & 3;
|
||||
for (unsigned i = 0; i < rem; i++) {
|
||||
t.u8[i] = a[i];
|
||||
}
|
||||
t.u32 = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_mul_u32(t.u32, gf16_b);
|
||||
for (unsigned i = 0; i < rem; i++) {
|
||||
accu_c[i] ^= t.u8[i];
|
||||
}
|
||||
}
|
||||
|
||||
void PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_madd_u32(uint8_t *accu_c, const uint8_t *a, uint8_t gf256_b, unsigned _num_byte) {
|
||||
unsigned n_u32 = _num_byte >> 2;
|
||||
uint32_t *c_u32 = (uint32_t *) accu_c;
|
||||
const uint32_t *a_u32 = (const uint32_t *) a;
|
||||
for (unsigned i = 0; i < n_u32; i++) {
|
||||
c_u32[i] ^= PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_mul_u32(a_u32[i], gf256_b);
|
||||
}
|
||||
|
||||
union tmp_32 {
|
||||
uint8_t u8[4];
|
||||
uint32_t u32;
|
||||
} t;
|
||||
t.u32 = 0;
|
||||
accu_c += (n_u32 << 2);
|
||||
a += (n_u32 << 2);
|
||||
unsigned rem = _num_byte & 3;
|
||||
for (unsigned i = 0; i < rem; i++) {
|
||||
t.u8[i] = a[i];
|
||||
}
|
||||
t.u32 = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_mul_u32(t.u32, gf256_b);
|
||||
for (unsigned i = 0; i < rem; i++) {
|
||||
accu_c[i] ^= t.u8[i];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
uint8_t PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_dot_u32(const uint8_t *a, const uint8_t *b, unsigned _num_byte) {
|
||||
unsigned n_u32 = _num_byte >> 2;
|
||||
const uint32_t *a_u32 = (const uint32_t *) a;
|
||||
const uint32_t *b_u32 = (const uint32_t *) b;
|
||||
uint32_t r = 0;
|
||||
for (unsigned i = 0; i < n_u32; i++) {
|
||||
r ^= PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_mul_u32_u32(a_u32[i], b_u32[i]);
|
||||
}
|
||||
|
||||
unsigned rem = _num_byte & 3;
|
||||
if (rem) {
|
||||
union tmp_32 {
|
||||
uint8_t u8[4];
|
||||
uint32_t u32;
|
||||
} ta, tb;
|
||||
ta.u32 = 0;
|
||||
tb.u32 = 0;
|
||||
for (unsigned i = 0; i < rem; i++) {
|
||||
ta.u8[i] = a[(n_u32 << 2) + i];
|
||||
}
|
||||
for (unsigned i = 0; i < rem; i++) {
|
||||
tb.u8[i] = b[(n_u32 << 2) + i];
|
||||
}
|
||||
r ^= PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_mul_u32_u32(ta.u32, tb.u32);
|
||||
}
|
||||
return PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_reduce_u32(r);
|
||||
}
|
@ -4,174 +4,16 @@
|
||||
/// @brief Inlined functions for implementing basic linear algebra functions for uint32 arch.
|
||||
///
|
||||
|
||||
|
||||
#include "gf16.h"
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
static inline void _gf256v_add_u32(uint8_t *accu_b, const uint8_t *a, unsigned _num_byte) {
|
||||
unsigned n_u32 = _num_byte >> 2;
|
||||
uint32_t *b_u32 = (uint32_t *) accu_b;
|
||||
const uint32_t *a_u32 = (const uint32_t *) a;
|
||||
for (unsigned i = 0; i < n_u32; i++) {
|
||||
b_u32[i] ^= a_u32[i];
|
||||
}
|
||||
|
||||
a += (n_u32 << 2);
|
||||
accu_b += (n_u32 << 2);
|
||||
unsigned rem = _num_byte & 3;
|
||||
for (unsigned i = 0; i < rem; i++) {
|
||||
accu_b[i] ^= a[i];
|
||||
}
|
||||
}
|
||||
|
||||
static inline void _gf256v_predicated_add_u32(uint8_t *accu_b, uint8_t predicate, const uint8_t *a, unsigned _num_byte) {
|
||||
uint32_t pr_u32 = ((uint32_t) 0) - ((uint32_t) predicate);
|
||||
uint8_t pr_u8 = pr_u32 & 0xff;
|
||||
|
||||
unsigned n_u32 = _num_byte >> 2;
|
||||
uint32_t *b_u32 = (uint32_t *) accu_b;
|
||||
const uint32_t *a_u32 = (const uint32_t *) a;
|
||||
for (unsigned i = 0; i < n_u32; i++) {
|
||||
b_u32[i] ^= (a_u32[i] & pr_u32);
|
||||
}
|
||||
|
||||
a += (n_u32 << 2);
|
||||
accu_b += (n_u32 << 2);
|
||||
unsigned rem = _num_byte & 3;
|
||||
for (unsigned i = 0; i < rem; i++) {
|
||||
accu_b[i] ^= (a[i] & pr_u8);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static inline void _gf16v_mul_scalar_u32(uint8_t *a, uint8_t gf16_b, unsigned _num_byte) {
|
||||
unsigned n_u32 = _num_byte >> 2;
|
||||
uint32_t *a_u32 = (uint32_t *) a;
|
||||
for (unsigned i = 0; i < n_u32; i++) {
|
||||
a_u32[i] = gf16v_mul_u32(a_u32[i], gf16_b);
|
||||
}
|
||||
|
||||
union tmp_32 {
|
||||
uint8_t u8[4];
|
||||
uint32_t u32;
|
||||
} t;
|
||||
t.u32 = 0;
|
||||
a += (n_u32 << 2);
|
||||
unsigned rem = _num_byte & 3;
|
||||
for (unsigned i = 0; i < rem; i++) {
|
||||
t.u8[i] = a[i];
|
||||
}
|
||||
t.u32 = gf16v_mul_u32(t.u32, gf16_b);
|
||||
for (unsigned i = 0; i < rem; i++) {
|
||||
a[i] = t.u8[i];
|
||||
}
|
||||
}
|
||||
|
||||
static inline void _gf256v_mul_scalar_u32(uint8_t *a, uint8_t b, unsigned _num_byte) {
|
||||
unsigned n_u32 = _num_byte >> 2;
|
||||
uint32_t *a_u32 = (uint32_t *) a;
|
||||
for (unsigned i = 0; i < n_u32; i++) {
|
||||
a_u32[i] = gf256v_mul_u32(a_u32[i], b);
|
||||
}
|
||||
|
||||
union tmp_32 {
|
||||
uint8_t u8[4];
|
||||
uint32_t u32;
|
||||
} t;
|
||||
t.u32 = 0;
|
||||
a += (n_u32 << 2);
|
||||
unsigned rem = _num_byte & 3;
|
||||
for (unsigned i = 0; i < rem; i++) {
|
||||
t.u8[i] = a[i];
|
||||
}
|
||||
t.u32 = gf256v_mul_u32(t.u32, b);
|
||||
for (unsigned i = 0; i < rem; i++) {
|
||||
a[i] = t.u8[i];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static inline void _gf16v_madd_u32(uint8_t *accu_c, const uint8_t *a, uint8_t gf16_b, unsigned _num_byte) {
|
||||
unsigned n_u32 = _num_byte >> 2;
|
||||
uint32_t *c_u32 = (uint32_t *) accu_c;
|
||||
const uint32_t *a_u32 = (const uint32_t *) a;
|
||||
for (unsigned i = 0; i < n_u32; i++) {
|
||||
c_u32[i] ^= gf16v_mul_u32(a_u32[i], gf16_b);
|
||||
}
|
||||
|
||||
// TODO: this will certainly not work on Big Endian
|
||||
union tmp_32 {
|
||||
uint8_t u8[4];
|
||||
uint32_t u32;
|
||||
} t;
|
||||
t.u32 = 0;
|
||||
accu_c += (n_u32 << 2);
|
||||
a += (n_u32 << 2);
|
||||
unsigned rem = _num_byte & 3;
|
||||
for (unsigned i = 0; i < rem; i++) {
|
||||
t.u8[i] = a[i];
|
||||
}
|
||||
t.u32 = gf16v_mul_u32(t.u32, gf16_b);
|
||||
for (unsigned i = 0; i < rem; i++) {
|
||||
accu_c[i] ^= t.u8[i];
|
||||
}
|
||||
}
|
||||
|
||||
static inline void _gf256v_madd_u32(uint8_t *accu_c, const uint8_t *a, uint8_t gf256_b, unsigned _num_byte) {
|
||||
unsigned n_u32 = _num_byte >> 2;
|
||||
uint32_t *c_u32 = (uint32_t *) accu_c;
|
||||
const uint32_t *a_u32 = (const uint32_t *) a;
|
||||
for (unsigned i = 0; i < n_u32; i++) {
|
||||
c_u32[i] ^= gf256v_mul_u32(a_u32[i], gf256_b);
|
||||
}
|
||||
|
||||
// TODO: this will certainly not work on Big Endian
|
||||
union tmp_32 {
|
||||
uint8_t u8[4];
|
||||
uint32_t u32;
|
||||
} t;
|
||||
t.u32 = 0;
|
||||
accu_c += (n_u32 << 2);
|
||||
a += (n_u32 << 2);
|
||||
unsigned rem = _num_byte & 3;
|
||||
for (unsigned i = 0; i < rem; i++) {
|
||||
t.u8[i] = a[i];
|
||||
}
|
||||
t.u32 = gf256v_mul_u32(t.u32, gf256_b);
|
||||
for (unsigned i = 0; i < rem; i++) {
|
||||
accu_c[i] ^= t.u8[i];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static inline uint8_t _gf16v_dot_u32(const uint8_t *a, const uint8_t *b, unsigned _num_byte) {
|
||||
unsigned n_u32 = _num_byte >> 2;
|
||||
const uint32_t *a_u32 = (const uint32_t *) a;
|
||||
const uint32_t *b_u32 = (const uint32_t *) b;
|
||||
uint32_t r = 0;
|
||||
for (unsigned i = 0; i < n_u32; i++) {
|
||||
r ^= gf16v_mul_u32_u32(a_u32[i], b_u32[i]);
|
||||
}
|
||||
|
||||
unsigned rem = _num_byte & 3;
|
||||
if (rem) {
|
||||
union tmp_32 {
|
||||
uint8_t u8[4];
|
||||
uint32_t u32;
|
||||
} ta, tb;
|
||||
ta.u32 = 0;
|
||||
tb.u32 = 0;
|
||||
for (unsigned i = 0; i < rem; i++) {
|
||||
ta.u8[i] = a[(n_u32 << 2) + i];
|
||||
}
|
||||
for (unsigned i = 0; i < rem; i++) {
|
||||
tb.u8[i] = b[(n_u32 << 2) + i];
|
||||
}
|
||||
r ^= gf16v_mul_u32_u32(ta.u32, tb.u32);
|
||||
}
|
||||
return gf16v_reduce_u32(r);
|
||||
}
|
||||
//TODO remove the gf16v/gf256v if they are not used in the parameter sets
|
||||
void PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_add_u32(uint8_t *accu_b, const uint8_t *a, unsigned _num_byte);
|
||||
void PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_predicated_add_u32(uint8_t *accu_b, uint8_t predicate, const uint8_t *a, unsigned _num_byte);
|
||||
void PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_mul_scalar_u32(uint8_t *a, uint8_t gf16_b, unsigned _num_byte);
|
||||
void PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_mul_scalar_u32(uint8_t *a, uint8_t b, unsigned _num_byte);
|
||||
void PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_madd_u32(uint8_t *accu_c, const uint8_t *a, uint8_t gf16_b, unsigned _num_byte);
|
||||
void PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_madd_u32(uint8_t *accu_c, const uint8_t *a, uint8_t gf256_b, unsigned _num_byte);
|
||||
uint8_t PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_dot_u32(const uint8_t *a, const uint8_t *b, unsigned _num_byte);
|
||||
|
||||
#endif // _BLAS_U32_H_
|
||||
|
||||
|
@ -1,13 +1,9 @@
|
||||
#ifndef _GF16_H_
|
||||
#define _GF16_H_
|
||||
/// @file gf16.h
|
||||
/// @brief Library for arithmetics in GF(16) and GF(256)
|
||||
///
|
||||
#include "gf.h"
|
||||
|
||||
//TODO remove the gf16v/gf256v if they are not used in the parameter sets
|
||||
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
// gf4 := gf2[x]/x^2+x+1
|
||||
//// gf4 := gf2[x]/x^2+x+1
|
||||
static inline uint8_t gf4_mul_2(uint8_t a) {
|
||||
uint8_t r = (uint8_t) (a << 1);
|
||||
r ^= (uint8_t) ((a >> 1) * 7);
|
||||
@ -28,10 +24,6 @@ static inline uint8_t gf4_squ(uint8_t a) {
|
||||
return a ^ (a >> 1);
|
||||
}
|
||||
|
||||
static inline uint8_t gf4_inv(uint8_t a) {
|
||||
return a ^ (a >> 1);
|
||||
}
|
||||
|
||||
static inline uint32_t gf4v_mul_2_u32(uint32_t a) {
|
||||
uint32_t bit0 = a & 0x55555555;
|
||||
uint32_t bit1 = a & 0xaaaaaaaa;
|
||||
@ -57,30 +49,14 @@ static inline uint32_t _gf4v_mul_u32_u32(uint32_t a0, uint32_t a1, uint32_t b0,
|
||||
return ((c1_ ^ c0) << 1) ^ c0 ^ c2;
|
||||
}
|
||||
|
||||
static inline uint32_t gf4v_mul_u32_u32(uint32_t a, uint32_t b) {
|
||||
uint32_t a0 = a & 0x55555555;
|
||||
uint32_t a1 = (a >> 1) & 0x55555555;
|
||||
uint32_t b0 = b & 0x55555555;
|
||||
uint32_t b1 = (b >> 1) & 0x55555555;
|
||||
|
||||
return _gf4v_mul_u32_u32(a0, a1, b0, b1);
|
||||
}
|
||||
|
||||
static inline uint32_t gf4v_squ_u32(uint32_t a) {
|
||||
uint32_t bit1 = a & 0xaaaaaaaa;
|
||||
return a ^ (bit1 >> 1);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static inline uint8_t gf16_is_nonzero(uint8_t a) {
|
||||
uint8_t PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16_is_nonzero(uint8_t a) {
|
||||
unsigned a4 = a & 0xf;
|
||||
unsigned r = ((unsigned) 0) - a4;
|
||||
r >>= 4;
|
||||
return r & 1;
|
||||
}
|
||||
|
||||
// gf16 := gf4[y]/y^2+y+x
|
||||
//// gf16 := gf4[y]/y^2+y+x
|
||||
static inline uint8_t gf16_mul(uint8_t a, uint8_t b) {
|
||||
uint8_t a0 = a & 3;
|
||||
uint8_t a1 = (a >> 2);
|
||||
@ -101,7 +77,7 @@ static inline uint8_t gf16_squ(uint8_t a) {
|
||||
return (uint8_t)((a1 << 2) ^ a1squ_x2 ^ gf4_squ(a0));
|
||||
}
|
||||
|
||||
static inline uint8_t gf16_inv(uint8_t a) {
|
||||
uint8_t PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16_inv(uint8_t a) {
|
||||
uint8_t a2 = gf16_squ(a);
|
||||
uint8_t a4 = gf16_squ(a2);
|
||||
uint8_t a8 = gf16_squ(a4);
|
||||
@ -109,21 +85,15 @@ static inline uint8_t gf16_inv(uint8_t a) {
|
||||
return gf16_mul(a8, a6);
|
||||
}
|
||||
|
||||
static inline uint8_t gf16_mul_4(uint8_t a) {
|
||||
return (uint8_t)((((a << 2) ^ a) & (8 + 4)) ^ gf4_mul_2(a >> 2));
|
||||
}
|
||||
|
||||
static inline uint8_t gf16_mul_8(uint8_t a) {
|
||||
uint8_t a0 = a & 3;
|
||||
uint8_t a1 = a >> 2;
|
||||
return (uint8_t)(gf4_mul_2(a0 ^ a1) << 2 | gf4_mul_3(a1));
|
||||
}
|
||||
|
||||
////////////
|
||||
|
||||
// gf16 := gf4[y]/y^2+y+x
|
||||
|
||||
static inline uint32_t gf16v_mul_u32(uint32_t a, uint8_t b) {
|
||||
uint32_t PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_mul_u32(uint32_t a, uint8_t b) {
|
||||
uint32_t axb0 = gf4v_mul_u32(a, b);
|
||||
uint32_t axb1 = gf4v_mul_u32(a, b >> 2);
|
||||
uint32_t a0b1 = (axb1 << 2) & 0xcccccccc;
|
||||
@ -142,12 +112,11 @@ static inline uint32_t _gf16v_mul_u32_u32(uint32_t a0, uint32_t a1, uint32_t a2,
|
||||
uint32_t c2_1_ = (a2 ^ a3) & (b2 ^ b3);
|
||||
uint32_t c2_r0 = c2_0 ^ c2_2;
|
||||
uint32_t c2_r1 = c2_0 ^ c2_1_;
|
||||
//uint32_t c2 = c2_r0^(c2_r1<<1);
|
||||
// GF(4) x2: (bit0<<1)^bit1^(bit1>>1);
|
||||
return ((c1_ ^ c0) << 2) ^ c0 ^ (c2_r0 << 1) ^ c2_r1 ^ (c2_r1 << 1);
|
||||
}
|
||||
|
||||
static inline uint32_t gf16v_mul_u32_u32(uint32_t a, uint32_t b) {
|
||||
uint32_t PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_mul_u32_u32(uint32_t a, uint32_t b) {
|
||||
uint32_t a0 = a & 0x11111111;
|
||||
uint32_t a1 = (a >> 1) & 0x11111111;
|
||||
uint32_t a2 = (a >> 2) & 0x11111111;
|
||||
@ -168,24 +137,18 @@ static inline uint8_t gf256v_reduce_u32(uint32_t a) {
|
||||
return rr[0] ^ rr[1];
|
||||
}
|
||||
|
||||
static inline uint8_t gf16v_reduce_u32(uint32_t a) {
|
||||
uint8_t PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_reduce_u32(uint32_t a) {
|
||||
uint8_t r256 = gf256v_reduce_u32(a);
|
||||
return (uint8_t)((r256 & 0xf) ^ (r256 >> 4));
|
||||
}
|
||||
|
||||
static inline uint32_t gf16v_squ_u32(uint32_t a) {
|
||||
uint32_t a2 = gf4v_squ_u32(a);
|
||||
|
||||
return a2 ^ gf4v_mul_2_u32((a2 >> 2) & 0x33333333);
|
||||
}
|
||||
|
||||
static inline uint32_t gf16v_mul_8_u32(uint32_t a) {
|
||||
uint32_t a1 = a & 0xcccccccc;
|
||||
uint32_t a0 = (a << 2) & 0xcccccccc;
|
||||
return gf4v_mul_2_u32(a0 ^ a1) | gf4v_mul_3_u32(a1 >> 2);
|
||||
}
|
||||
|
||||
static inline uint8_t gf256_is_nonzero(uint8_t a) {
|
||||
uint8_t PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256_is_nonzero(uint8_t a) {
|
||||
unsigned a8 = a;
|
||||
unsigned r = ((unsigned) 0) - a8;
|
||||
r >>= 8;
|
||||
@ -205,15 +168,6 @@ static inline uint8_t gf256_mul(uint8_t a, uint8_t b) {
|
||||
return (uint8_t)((a0b1_a1b0 ^ a1b1) << 4 ^ a0b0 ^ a1b1_x8);
|
||||
}
|
||||
|
||||
static inline uint8_t gf256_mul_gf16(uint8_t a, uint8_t gf16_b) {
|
||||
uint8_t a0 = a & 15;
|
||||
uint8_t a1 = (a >> 4);
|
||||
uint8_t b0 = gf16_b & 15;
|
||||
uint8_t a0b0 = gf16_mul(a0, b0);
|
||||
uint8_t a1b0 = gf16_mul(a1, b0);
|
||||
return (uint8_t) (a0b0 ^ (a1b0 << 4));
|
||||
}
|
||||
|
||||
static inline uint8_t gf256_squ(uint8_t a) {
|
||||
uint8_t a0 = a & 15;
|
||||
uint8_t a1 = (a >> 4);
|
||||
@ -222,7 +176,7 @@ static inline uint8_t gf256_squ(uint8_t a) {
|
||||
return (uint8_t)((a1 << 4) ^ a1squ_x8 ^ gf16_squ(a0));
|
||||
}
|
||||
|
||||
static inline uint8_t gf256_inv(uint8_t a) {
|
||||
uint8_t PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256_inv(uint8_t a) {
|
||||
// 128+64+32+16+8+4+2 = 254
|
||||
uint8_t a2 = gf256_squ(a);
|
||||
uint8_t a4 = gf256_squ(a2);
|
||||
@ -237,26 +191,12 @@ static inline uint8_t gf256_inv(uint8_t a) {
|
||||
return gf256_mul(a2, a128_);
|
||||
}
|
||||
|
||||
static inline uint32_t gf256v_mul_u32(uint32_t a, uint8_t b) {
|
||||
uint32_t axb0 = gf16v_mul_u32(a, b);
|
||||
uint32_t axb1 = gf16v_mul_u32(a, b >> 4);
|
||||
uint32_t PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_mul_u32(uint32_t a, uint8_t b) {
|
||||
uint32_t axb0 = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_mul_u32(a, b);
|
||||
uint32_t axb1 = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_mul_u32(a, b >> 4);
|
||||
uint32_t a0b1 = (axb1 << 4) & 0xf0f0f0f0;
|
||||
uint32_t a1b1 = axb1 & 0xf0f0f0f0;
|
||||
uint32_t a1b1_4 = a1b1 >> 4;
|
||||
|
||||
return axb0 ^ a0b1 ^ a1b1 ^ gf16v_mul_8_u32(a1b1_4);
|
||||
}
|
||||
|
||||
static inline uint32_t gf256v_squ_u32(uint32_t a) {
|
||||
uint32_t a2 = gf16v_squ_u32(a);
|
||||
uint32_t ar = (a2 >> 4) & 0x0f0f0f0f;
|
||||
|
||||
return a2 ^ gf16v_mul_8_u32(ar);
|
||||
}
|
||||
|
||||
static inline uint32_t gf256v_mul_gf16_u32(uint32_t a, uint8_t gf16_b) {
|
||||
return gf16v_mul_u32(a, gf16_b);
|
||||
}
|
||||
|
||||
#endif // _GF16_H_
|
||||
|
19
crypto_sign/rainbowIa-classic/clean/gf.h
Ficheiro normal
19
crypto_sign/rainbowIa-classic/clean/gf.h
Ficheiro normal
@ -0,0 +1,19 @@
|
||||
#ifndef _GF16_H_
|
||||
#define _GF16_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
/// @file gf16.h
|
||||
/// @brief Library for arithmetics in GF(16) and GF(256)
|
||||
///
|
||||
|
||||
uint8_t PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16_is_nonzero(uint8_t a);
|
||||
uint8_t PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16_inv(uint8_t a);
|
||||
uint32_t PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_mul_u32(uint32_t a, uint8_t b);
|
||||
uint32_t PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_mul_u32_u32(uint32_t a, uint32_t b);
|
||||
uint8_t PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_reduce_u32(uint32_t a);
|
||||
uint8_t PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256_is_nonzero(uint8_t a);
|
||||
uint8_t PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256_inv(uint8_t a);
|
||||
uint32_t PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_mul_u32(uint32_t a, uint8_t b);
|
||||
|
||||
#endif // _GF16_H_
|
@ -1,12 +0,0 @@
|
||||
#ifndef _HASH_LEN_CONFIG_H_
|
||||
#define _HASH_LEN_CONFIG_H_
|
||||
/// @file hash_len_config.h
|
||||
/// @brief defining the lenght of outputs of the internel hash functions.
|
||||
///
|
||||
|
||||
|
||||
// TODO: remove this file
|
||||
/// defining the lenght of outputs of the internel hash functions.
|
||||
//#define _HASH_LEN (32)
|
||||
|
||||
#endif
|
@ -12,13 +12,42 @@
|
||||
|
||||
//////////////// Section: triangle matrix <-> rectangle matrix ///////////////////////////////////
|
||||
|
||||
///
|
||||
/// @brief Calculate the corresponding index in an array for an upper-triangle(UT) matrix.
|
||||
///
|
||||
/// @param[in] i_row - the i-th row in an upper-triangle matrix.
|
||||
/// @param[in] j_col - the j-th column in an upper-triangle matrix.
|
||||
/// @param[in] dim - the dimension of the upper-triangle matrix, i.e., an dim x dim matrix.
|
||||
/// @return the corresponding index in an array storage.
|
||||
///
|
||||
unsigned PQCLEAN_RAINBOWIACLASSIC_CLEAN_idx_of_trimat( unsigned i_row, unsigned j_col, unsigned dim ) {
|
||||
return (dim + dim - i_row + 1 ) * i_row / 2 + j_col - i_row;
|
||||
}
|
||||
|
||||
///
|
||||
/// @brief Calculate the corresponding index in an array for an upper-triangle or lower-triangle matrix.
|
||||
///
|
||||
/// @param[in] i_row - the i-th row in a triangle matrix.
|
||||
/// @param[in] j_col - the j-th column in a triangle matrix.
|
||||
/// @param[in] dim - the dimension of the triangle matrix, i.e., an dim x dim matrix.
|
||||
/// @return the corresponding index in an array storage.
|
||||
///
|
||||
static inline
|
||||
unsigned idx_of_2trimat( unsigned i_row, unsigned j_col, unsigned n_var ) {
|
||||
if ( i_row > j_col ) {
|
||||
return PQCLEAN_RAINBOWIACLASSIC_CLEAN_idx_of_trimat(j_col, i_row, n_var);
|
||||
}
|
||||
return PQCLEAN_RAINBOWIACLASSIC_CLEAN_idx_of_trimat(i_row, j_col, n_var);
|
||||
}
|
||||
|
||||
|
||||
|
||||
void PQCLEAN_RAINBOWIACLASSIC_CLEAN_UpperTrianglize( unsigned char *btriC, const unsigned char *bA, unsigned Awidth, unsigned size_batch ) {
|
||||
unsigned char *runningC = btriC;
|
||||
unsigned Aheight = Awidth;
|
||||
for (unsigned i = 0; i < Aheight; i++) {
|
||||
for (unsigned j = 0; j < i; j++) {
|
||||
unsigned idx = idx_of_trimat(j, i, Aheight);
|
||||
unsigned idx = PQCLEAN_RAINBOWIACLASSIC_CLEAN_idx_of_trimat(j, i, Aheight);
|
||||
gf256v_add( btriC + idx * size_batch, bA + size_batch * (i * Awidth + j), size_batch );
|
||||
}
|
||||
gf256v_add( runningC, bA + size_batch * (i * Awidth + i), size_batch * (Aheight - i) );
|
||||
@ -43,7 +72,7 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_batch_trimat_madd_gf16( unsigned char *bC, c
|
||||
if (k < i) {
|
||||
continue;
|
||||
}
|
||||
gf16v_madd( bC, & btriA[ (k - i)*size_batch ], gf16v_get_ele( &B[j * size_Bcolvec], k ), size_batch );
|
||||
gf16v_madd( bC, & btriA[ (k - i)*size_batch ], PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_get_ele( &B[j * size_Bcolvec], k ), size_batch );
|
||||
}
|
||||
bC += size_batch;
|
||||
}
|
||||
@ -61,7 +90,7 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_batch_trimat_madd_gf256( unsigned char *bC,
|
||||
if (k < i) {
|
||||
continue;
|
||||
}
|
||||
gf256v_madd( bC, & btriA[ (k - i)*size_batch ], gf256v_get_ele( &B[j * size_Bcolvec], k ), size_batch );
|
||||
gf256v_madd( bC, & btriA[ (k - i)*size_batch ], PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_get_ele( &B[j * size_Bcolvec], k ), size_batch );
|
||||
}
|
||||
bC += size_batch;
|
||||
}
|
||||
@ -82,7 +111,7 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_batch_trimatTr_madd_gf16( unsigned char *bC,
|
||||
if (i < k) {
|
||||
continue;
|
||||
}
|
||||
gf16v_madd( bC, & btriA[ size_batch * (idx_of_trimat(k, i, Aheight)) ], gf16v_get_ele( &B[j * size_Bcolvec], k ), size_batch );
|
||||
gf16v_madd( bC, & btriA[ size_batch * (PQCLEAN_RAINBOWIACLASSIC_CLEAN_idx_of_trimat(k, i, Aheight)) ], PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_get_ele( &B[j * size_Bcolvec], k ), size_batch );
|
||||
}
|
||||
bC += size_batch;
|
||||
}
|
||||
@ -98,7 +127,7 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_batch_trimatTr_madd_gf256( unsigned char *bC
|
||||
if (i < k) {
|
||||
continue;
|
||||
}
|
||||
gf256v_madd( bC, & btriA[ size_batch * (idx_of_trimat(k, i, Aheight)) ], gf256v_get_ele( &B[j * size_Bcolvec], k ), size_batch );
|
||||
gf256v_madd( bC, & btriA[ size_batch * (PQCLEAN_RAINBOWIACLASSIC_CLEAN_idx_of_trimat(k, i, Aheight)) ], PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_get_ele( &B[j * size_Bcolvec], k ), size_batch );
|
||||
}
|
||||
bC += size_batch;
|
||||
}
|
||||
@ -117,7 +146,7 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_batch_2trimat_madd_gf16( unsigned char *bC,
|
||||
if (i == k) {
|
||||
continue;
|
||||
}
|
||||
gf16v_madd( bC, & btriA[ size_batch * (idx_of_2trimat(i, k, Aheight)) ], gf16v_get_ele( &B[j * size_Bcolvec], k ), size_batch );
|
||||
gf16v_madd( bC, & btriA[ size_batch * (idx_of_2trimat(i, k, Aheight)) ], PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_get_ele( &B[j * size_Bcolvec], k ), size_batch );
|
||||
}
|
||||
bC += size_batch;
|
||||
}
|
||||
@ -133,7 +162,7 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_batch_2trimat_madd_gf256( unsigned char *bC,
|
||||
if (i == k) {
|
||||
continue;
|
||||
}
|
||||
gf256v_madd( bC, & btriA[ size_batch * (idx_of_2trimat(i, k, Aheight)) ], gf256v_get_ele( &B[j * size_Bcolvec], k ), size_batch );
|
||||
gf256v_madd( bC, & btriA[ size_batch * (idx_of_2trimat(i, k, Aheight)) ], PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_get_ele( &B[j * size_Bcolvec], k ), size_batch );
|
||||
}
|
||||
bC += size_batch;
|
||||
}
|
||||
@ -149,7 +178,7 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_batch_matTr_madd_gf16( unsigned char *bC, co
|
||||
unsigned Atr_width = Aheight;
|
||||
for (unsigned i = 0; i < Atr_height; i++) {
|
||||
for (unsigned j = 0; j < Atr_width; j++) {
|
||||
gf16v_madd( bC, & bB[ j * Bwidth * size_batch ], gf16v_get_ele( &A_to_tr[size_Acolvec * i], j ), size_batch * Bwidth );
|
||||
gf16v_madd( bC, & bB[ j * Bwidth * size_batch ], PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_get_ele( &A_to_tr[size_Acolvec * i], j ), size_batch * Bwidth );
|
||||
}
|
||||
bC += size_batch * Bwidth;
|
||||
}
|
||||
@ -161,7 +190,7 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_batch_matTr_madd_gf256( unsigned char *bC, c
|
||||
unsigned Atr_width = Aheight;
|
||||
for (unsigned i = 0; i < Atr_height; i++) {
|
||||
for (unsigned j = 0; j < Atr_width; j++) {
|
||||
gf256v_madd( bC, & bB[ j * Bwidth * size_batch ], gf256v_get_ele( &A_to_tr[size_Acolvec * i], j ), size_batch * Bwidth );
|
||||
gf256v_madd( bC, & bB[ j * Bwidth * size_batch ], PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_get_ele( &A_to_tr[size_Acolvec * i], j ), size_batch * Bwidth );
|
||||
}
|
||||
bC += size_batch * Bwidth;
|
||||
}
|
||||
@ -177,7 +206,7 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_batch_bmatTr_madd_gf16( unsigned char *bC, c
|
||||
for (unsigned i = 0; i < Aheight; i++) {
|
||||
for (unsigned j = 0; j < Bwidth; j++) {
|
||||
for (unsigned k = 0; k < Bheight; k++) {
|
||||
gf16v_madd( bC, & bA[ size_batch * (i + k * Aheight) ], gf16v_get_ele( &B[j * size_Bcolvec], k ), size_batch );
|
||||
gf16v_madd( bC, & bA[ size_batch * (i + k * Aheight) ], PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_get_ele( &B[j * size_Bcolvec], k ), size_batch );
|
||||
}
|
||||
bC += size_batch;
|
||||
}
|
||||
@ -191,7 +220,7 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_batch_bmatTr_madd_gf256( unsigned char *bC,
|
||||
for (unsigned i = 0; i < Aheight; i++) {
|
||||
for (unsigned j = 0; j < Bwidth; j++) {
|
||||
for (unsigned k = 0; k < Bheight; k++) {
|
||||
gf256v_madd( bC, & bA[ size_batch * (i + k * Aheight) ], gf256v_get_ele( &B[j * size_Bcolvec], k ), size_batch );
|
||||
gf256v_madd( bC, & bA[ size_batch * (i + k * Aheight) ], PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_get_ele( &B[j * size_Bcolvec], k ), size_batch );
|
||||
}
|
||||
bC += size_batch;
|
||||
}
|
||||
@ -208,7 +237,7 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_batch_mat_madd_gf16( unsigned char *bC, cons
|
||||
for (unsigned i = 0; i < Aheight; i++) {
|
||||
for (unsigned j = 0; j < Bwidth; j++) {
|
||||
for (unsigned k = 0; k < Bheight; k++) {
|
||||
gf16v_madd( bC, & bA[ k * size_batch ], gf16v_get_ele( &B[j * size_Bcolvec], k ), size_batch );
|
||||
gf16v_madd( bC, & bA[ k * size_batch ], PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_get_ele( &B[j * size_Bcolvec], k ), size_batch );
|
||||
}
|
||||
bC += size_batch;
|
||||
}
|
||||
@ -222,7 +251,7 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_batch_mat_madd_gf256( unsigned char *bC, con
|
||||
for (unsigned i = 0; i < Aheight; i++) {
|
||||
for (unsigned j = 0; j < Bwidth; j++) {
|
||||
for (unsigned k = 0; k < Bheight; k++) {
|
||||
gf256v_madd( bC, & bA[ k * size_batch ], gf256v_get_ele( &B[j * size_Bcolvec], k ), size_batch );
|
||||
gf256v_madd( bC, & bA[ k * size_batch ], PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_get_ele( &B[j * size_Bcolvec], k ), size_batch );
|
||||
}
|
||||
bC += size_batch;
|
||||
}
|
||||
@ -239,14 +268,11 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_batch_mat_madd_gf256( unsigned char *bC, con
|
||||
|
||||
|
||||
void PQCLEAN_RAINBOWIACLASSIC_CLEAN_batch_quad_trimat_eval_gf16( unsigned char *y, const unsigned char *trimat, const unsigned char *x, unsigned dim, unsigned size_batch ) {
|
||||
///
|
||||
/// assert( dim <= 128 );
|
||||
/// assert( size_batch <= 128 );
|
||||
unsigned char tmp[256];
|
||||
|
||||
unsigned char _x[256];
|
||||
for (unsigned i = 0; i < dim; i++) {
|
||||
_x[i] = gf16v_get_ele( x, i );
|
||||
_x[i] = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_get_ele( x, i );
|
||||
}
|
||||
|
||||
PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_set_zero( y, size_batch );
|
||||
@ -261,14 +287,11 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_batch_quad_trimat_eval_gf16( unsigned char *
|
||||
}
|
||||
|
||||
void PQCLEAN_RAINBOWIACLASSIC_CLEAN_batch_quad_trimat_eval_gf256( unsigned char *y, const unsigned char *trimat, const unsigned char *x, unsigned dim, unsigned size_batch ) {
|
||||
///
|
||||
/// assert( dim <= 256 );
|
||||
/// assert( size_batch <= 256 );
|
||||
unsigned char tmp[256];
|
||||
|
||||
unsigned char _x[256];
|
||||
for (unsigned i = 0; i < dim; i++) {
|
||||
_x[i] = gf256v_get_ele( x, i );
|
||||
_x[i] = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_get_ele( x, i );
|
||||
}
|
||||
|
||||
PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_set_zero( y, size_batch );
|
||||
@ -290,19 +313,15 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_batch_quad_trimat_eval_gf256( unsigned char
|
||||
|
||||
void PQCLEAN_RAINBOWIACLASSIC_CLEAN_batch_quad_recmat_eval_gf16( unsigned char *z, const unsigned char *y, unsigned dim_y, const unsigned char *mat,
|
||||
const unsigned char *x, unsigned dim_x, unsigned size_batch ) {
|
||||
///
|
||||
/// assert( dim_x <= 128 );
|
||||
/// assert( dim_y <= 128 );
|
||||
/// assert( size_batch <= 128 );
|
||||
unsigned char tmp[128];
|
||||
|
||||
unsigned char _x[128];
|
||||
for (unsigned i = 0; i < dim_x; i++) {
|
||||
_x[i] = gf16v_get_ele( x, i );
|
||||
_x[i] = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_get_ele( x, i );
|
||||
}
|
||||
unsigned char _y[128];
|
||||
for (unsigned i = 0; i < dim_y; i++) {
|
||||
_y[i] = gf16v_get_ele( y, i );
|
||||
_y[i] = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_get_ele( y, i );
|
||||
}
|
||||
|
||||
PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_set_zero( z, size_batch );
|
||||
@ -319,19 +338,15 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_batch_quad_recmat_eval_gf16( unsigned char *
|
||||
|
||||
void PQCLEAN_RAINBOWIACLASSIC_CLEAN_batch_quad_recmat_eval_gf256( unsigned char *z, const unsigned char *y, unsigned dim_y, const unsigned char *mat,
|
||||
const unsigned char *x, unsigned dim_x, unsigned size_batch ) {
|
||||
///
|
||||
/// assert( dim_x <= 128 );
|
||||
/// assert( dim_y <= 128 );
|
||||
/// assert( size_batch <= 128 );
|
||||
unsigned char tmp[128];
|
||||
|
||||
unsigned char _x[128];
|
||||
for (unsigned i = 0; i < dim_x; i++) {
|
||||
_x[i] = gf256v_get_ele( x, i );
|
||||
_x[i] = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_get_ele( x, i );
|
||||
}
|
||||
unsigned char _y[128];
|
||||
for (unsigned i = 0; i < dim_y; i++) {
|
||||
_y[i] = gf256v_get_ele( y, i );
|
||||
_y[i] = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_get_ele( y, i );
|
||||
}
|
||||
|
||||
PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_set_zero( z, size_batch );
|
||||
|
@ -5,15 +5,6 @@
|
||||
///
|
||||
///
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
/// Librarys for batched matrix operations.
|
||||
/// A batched matrix is a matrix which each element of the matrix
|
||||
/// contains size_batch GF elements.
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
|
||||
|
||||
//////////////// Section: triangle matrix <-> rectangle matrix ///////////////////////////////////
|
||||
|
||||
|
||||
@ -25,27 +16,7 @@
|
||||
/// @param[in] dim - the dimension of the upper-triangle matrix, i.e., an dim x dim matrix.
|
||||
/// @return the corresponding index in an array storage.
|
||||
///
|
||||
static inline
|
||||
unsigned idx_of_trimat( unsigned i_row, unsigned j_col, unsigned dim ) {
|
||||
return (dim + dim - i_row + 1 ) * i_row / 2 + j_col - i_row;
|
||||
}
|
||||
|
||||
///
|
||||
/// @brief Calculate the corresponding index in an array for an upper-triangle or lower-triangle matrix.
|
||||
///
|
||||
/// @param[in] i_row - the i-th row in a triangle matrix.
|
||||
/// @param[in] j_col - the j-th column in a triangle matrix.
|
||||
/// @param[in] dim - the dimension of the triangle matrix, i.e., an dim x dim matrix.
|
||||
/// @return the corresponding index in an array storage.
|
||||
///
|
||||
static inline
|
||||
unsigned idx_of_2trimat( unsigned i_row, unsigned j_col, unsigned n_var ) {
|
||||
if ( i_row > j_col ) {
|
||||
return idx_of_trimat(j_col, i_row, n_var);
|
||||
}
|
||||
return idx_of_trimat(i_row, j_col, n_var);
|
||||
}
|
||||
|
||||
unsigned PQCLEAN_RAINBOWIACLASSIC_CLEAN_idx_of_trimat( unsigned i_row, unsigned j_col, unsigned dim );
|
||||
|
||||
///
|
||||
/// @brief Upper trianglize a rectangle matrix to the corresponding upper-trangle matrix.
|
||||
|
@ -23,7 +23,7 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_extcpk_to_pk( pk_t *pk, const ext_cpk_t *cpk
|
||||
const unsigned char *idx_l2 = cpk->l2_Q1;
|
||||
for (unsigned i = 0; i < _V1; i++) {
|
||||
for (unsigned j = i; j < _V1; j++) {
|
||||
unsigned pub_idx = idx_of_trimat(i, j, _PUB_N);
|
||||
unsigned pub_idx = PQCLEAN_RAINBOWIACLASSIC_CLEAN_idx_of_trimat(i, j, _PUB_N);
|
||||
memcpy( & pk->pk[ _PUB_M_BYTE * pub_idx ], idx_l1, _O1_BYTE );
|
||||
memcpy( (&pk->pk[ _PUB_M_BYTE * pub_idx ]) + _O1_BYTE, idx_l2, _O2_BYTE );
|
||||
idx_l1 += _O1_BYTE;
|
||||
@ -34,7 +34,7 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_extcpk_to_pk( pk_t *pk, const ext_cpk_t *cpk
|
||||
idx_l2 = cpk->l2_Q2;
|
||||
for (unsigned i = 0; i < _V1; i++) {
|
||||
for (unsigned j = _V1; j < _V1 + _O1; j++) {
|
||||
unsigned pub_idx = idx_of_trimat(i, j, _PUB_N);
|
||||
unsigned pub_idx = PQCLEAN_RAINBOWIACLASSIC_CLEAN_idx_of_trimat(i, j, _PUB_N);
|
||||
memcpy( & pk->pk[ _PUB_M_BYTE * pub_idx ], idx_l1, _O1_BYTE );
|
||||
memcpy( (&pk->pk[ _PUB_M_BYTE * pub_idx ]) + _O1_BYTE, idx_l2, _O2_BYTE );
|
||||
idx_l1 += _O1_BYTE;
|
||||
@ -45,7 +45,7 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_extcpk_to_pk( pk_t *pk, const ext_cpk_t *cpk
|
||||
idx_l2 = cpk->l2_Q3;
|
||||
for (unsigned i = 0; i < _V1; i++) {
|
||||
for (unsigned j = _V1 + _O1; j < _PUB_N; j++) {
|
||||
unsigned pub_idx = idx_of_trimat(i, j, _PUB_N);
|
||||
unsigned pub_idx = PQCLEAN_RAINBOWIACLASSIC_CLEAN_idx_of_trimat(i, j, _PUB_N);
|
||||
memcpy( & pk->pk[ _PUB_M_BYTE * pub_idx ], idx_l1, _O1_BYTE );
|
||||
memcpy( (&pk->pk[ _PUB_M_BYTE * pub_idx ]) + _O1_BYTE, idx_l2, _O2_BYTE );
|
||||
idx_l1 += _O1_BYTE;
|
||||
@ -56,7 +56,7 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_extcpk_to_pk( pk_t *pk, const ext_cpk_t *cpk
|
||||
idx_l2 = cpk->l2_Q5;
|
||||
for (unsigned i = _V1; i < _V1 + _O1; i++) {
|
||||
for (unsigned j = i; j < _V1 + _O1; j++) {
|
||||
unsigned pub_idx = idx_of_trimat(i, j, _PUB_N);
|
||||
unsigned pub_idx = PQCLEAN_RAINBOWIACLASSIC_CLEAN_idx_of_trimat(i, j, _PUB_N);
|
||||
memcpy( & pk->pk[ _PUB_M_BYTE * pub_idx ], idx_l1, _O1_BYTE );
|
||||
memcpy( (&pk->pk[ _PUB_M_BYTE * pub_idx ]) + _O1_BYTE, idx_l2, _O2_BYTE );
|
||||
idx_l1 += _O1_BYTE;
|
||||
@ -67,7 +67,7 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_extcpk_to_pk( pk_t *pk, const ext_cpk_t *cpk
|
||||
idx_l2 = cpk->l2_Q6;
|
||||
for (unsigned i = _V1; i < _V1 + _O1; i++) {
|
||||
for (unsigned j = _V1 + _O1; j < _PUB_N; j++) {
|
||||
unsigned pub_idx = idx_of_trimat(i, j, _PUB_N);
|
||||
unsigned pub_idx = PQCLEAN_RAINBOWIACLASSIC_CLEAN_idx_of_trimat(i, j, _PUB_N);
|
||||
memcpy( & pk->pk[ _PUB_M_BYTE * pub_idx ], idx_l1, _O1_BYTE );
|
||||
memcpy( (&pk->pk[ _PUB_M_BYTE * pub_idx ]) + _O1_BYTE, idx_l2, _O2_BYTE );
|
||||
idx_l1 += _O1_BYTE;
|
||||
@ -78,7 +78,7 @@ void PQCLEAN_RAINBOWIACLASSIC_CLEAN_extcpk_to_pk( pk_t *pk, const ext_cpk_t *cpk
|
||||
idx_l2 = cpk->l2_Q9;
|
||||
for (unsigned i = _V1 + _O1; i < _PUB_N; i++) {
|
||||
for (unsigned j = i; j < _PUB_N; j++) {
|
||||
unsigned pub_idx = idx_of_trimat(i, j, _PUB_N);
|
||||
unsigned pub_idx = PQCLEAN_RAINBOWIACLASSIC_CLEAN_idx_of_trimat(i, j, _PUB_N);
|
||||
memcpy( & pk->pk[ _PUB_M_BYTE * pub_idx ], idx_l1, _O1_BYTE );
|
||||
memcpy( (&pk->pk[ _PUB_M_BYTE * pub_idx ]) + _O1_BYTE, idx_l2, _O2_BYTE );
|
||||
idx_l1 += _O1_BYTE;
|
||||
|
@ -3,7 +3,6 @@
|
||||
///
|
||||
///
|
||||
|
||||
#include "hash_len_config.h"
|
||||
#include "rainbow_config.h"
|
||||
#include "sha2.h"
|
||||
#include "utils_hash.h"
|
||||
|
@ -3,10 +3,7 @@
|
||||
/// @file utils_hash.h
|
||||
/// @brief the interface for adapting hash functions.
|
||||
///
|
||||
///
|
||||
|
||||
// for the definition of _HASH_LEN.
|
||||
#include "hash_len_config.h"
|
||||
#include <stddef.h>
|
||||
|
||||
int PQCLEAN_RAINBOWIACLASSIC_CLEAN_hash_msg( unsigned char *digest, size_t len_digest, const unsigned char *m, size_t mlen );
|
||||
|
Carregando…
Criar uma nova questão referindo esta
Bloquear um utilizador