@@ -14,4 +14,13 @@ principal-submitters: | |||
- Frederik Vercauteren | |||
implementations: | |||
- name: clean | |||
version: https://github.com/KULeuven-COSIC/SABER/commit/14ede83f1ff3bcc41f0464543542366c68b55871 | |||
version: https://github.com/KULeuven-COSIC/SABER/commit/509cc5ec3a7e12a751ccdd2ef5bd6e54e00bd350 | |||
- name: avx2 | |||
version: https://github.com/KULeuven-COSIC/SABER/commit/509cc5ec3a7e12a751ccdd2ef5bd6e54e00bd350 | |||
supported_platforms: | |||
- architecture: x86_64 | |||
operating_systems: | |||
- Linux | |||
- Darwin | |||
required_flags: | |||
- avx2 |
@@ -0,0 +1 @@ | |||
Public Domain |
@@ -0,0 +1,22 @@ | |||
# This Makefile can be used with GNU Make or BSD Make | |||
LIB=libfiresaber_avx2.a | |||
HEADERS=api.h cbd.h kem.h pack_unpack.h poly.h SABER_indcpa.h SABER_params.h verify.h | |||
OBJECTS=cbd.o kem.o pack_unpack.o SABER_indcpa.o verify.o | |||
CFLAGS=-O3 -mavx2 -Wall -Wextra -Wpedantic -Wvla -Werror -Wredundant-decls -Wmissing-prototypes -std=c99 -I../../../common $(EXTRAFLAGS) | |||
all: $(LIB) | |||
%.o: %.s $(HEADERS) | |||
$(AS) -o $@ $< | |||
%.o: %.c $(HEADERS) | |||
$(CC) $(CFLAGS) -c -o $@ $< | |||
$(LIB): $(OBJECTS) | |||
$(AR) -r $@ $(OBJECTS) | |||
clean: | |||
$(RM) $(OBJECTS) | |||
$(RM) $(LIB) |
@@ -0,0 +1,416 @@ | |||
#include "./polymul/toom-cook_4way.c" | |||
#include "SABER_indcpa.h" | |||
#include "SABER_params.h" | |||
#include "api.h" | |||
#include "cbd.h" | |||
#include "fips202.h" | |||
#include "pack_unpack.h" | |||
#include "randombytes.h" | |||
#include <stdint.h> | |||
#include <stdio.h> | |||
#include <string.h> | |||
//#include "randombytes.h" | |||
//#include "./polymul/toom_cook_4/toom-cook_4way.c" | |||
#define h1 4 //2^(EQ-EP-1) | |||
#define h2 ( (1<<(SABER_EP-2)) - (1<<(SABER_EP-SABER_ET-1)) + (1<<(SABER_EQ-SABER_EP-1)) ) | |||
static void POL2MSG(uint8_t *message_dec, const uint16_t *message_dec_unpacked) { | |||
int32_t i, j; | |||
for (j = 0; j < SABER_KEYBYTES; j++) { | |||
message_dec[j] = 0; | |||
for (i = 0; i < 8; i++) { | |||
message_dec[j] = message_dec[j] | (message_dec_unpacked[j * 8 + i] << i); | |||
} | |||
} | |||
} | |||
/*----------------------------------------------------------------------------------- | |||
This routine generates a=[Matrix K x K] of 256-coefficient polynomials | |||
-------------------------------------------------------------------------------------*/ | |||
static void GenMatrix(polyvec *a, const uint8_t *seed) { | |||
uint8_t buf[SABER_K * SABER_K * 13 * SABER_N / 8]; | |||
uint16_t temp_ar[SABER_N]; | |||
int i, j, k; | |||
uint16_t mod = (SABER_Q - 1); | |||
shake128(buf, sizeof(buf), seed, SABER_SEEDBYTES); | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_K; j++) { | |||
PQCLEAN_FIRESABER_AVX2_BS2POLq(temp_ar, buf + (i * SABER_K + j) * 13 * SABER_N / 8); | |||
for (k = 0; k < SABER_N; k++) { | |||
a[i].vec[j].coeffs[k] = (temp_ar[k])& mod ; | |||
} | |||
} | |||
} | |||
} | |||
static void GenSecret(uint16_t r[SABER_K][SABER_N], const uint8_t *seed) { | |||
uint32_t i; | |||
uint8_t buf[SABER_MU * SABER_N * SABER_K / 8]; | |||
shake128(buf, sizeof(buf), seed, SABER_NOISESEEDBYTES); | |||
for (i = 0; i < SABER_K; i++) { | |||
PQCLEAN_FIRESABER_AVX2_cbd(r[i], buf + i * SABER_MU * SABER_N / 8); | |||
} | |||
} | |||
//********************************matrix-vector mul routines***************************************************** | |||
static void matrix_vector_mul(__m256i a1_avx_combined[NUM_POLY][NUM_POLY][AVX_N1], __m256i b_bucket[NUM_POLY][SCHB_N * 4], __m256i res_avx[NUM_POLY][AVX_N1], int isTranspose) { | |||
int64_t i, j; | |||
__m256i c_bucket[2 * SCM_SIZE * 4]; //Holds results for 9 Karatsuba at a time | |||
for (i = 0; i < NUM_POLY; i++) { | |||
for (j = 0; j < NUM_POLY; j++) { | |||
if (isTranspose == 0) { | |||
toom_cook_4way_avx_n1(a1_avx_combined[i][j], b_bucket[j], c_bucket, j); | |||
} else { | |||
toom_cook_4way_avx_n1(a1_avx_combined[j][i], b_bucket[j], c_bucket, j); | |||
} | |||
} | |||
TC_interpol(c_bucket, res_avx[i]); | |||
} | |||
} | |||
static void vector_vector_mul(__m256i a_avx[NUM_POLY][AVX_N1], __m256i b_bucket[NUM_POLY][SCHB_N * 4], __m256i res_avx[AVX_N1]) { | |||
int64_t i; | |||
__m256i c_bucket[2 * SCM_SIZE * 4]; //Holds results for 9 Karatsuba at a time | |||
for (i = 0; i < NUM_POLY; i++) { | |||
toom_cook_4way_avx_n1(a_avx[i], b_bucket[i], c_bucket, i); | |||
} | |||
TC_interpol(c_bucket, res_avx); | |||
} | |||
//********************************matrix-vector mul routines***************************************************** | |||
void PQCLEAN_FIRESABER_AVX2_indcpa_kem_keypair(uint8_t *pk, uint8_t *sk) { | |||
polyvec a[SABER_K]; | |||
uint16_t skpv1[SABER_K][SABER_N]; | |||
uint8_t seed[SABER_SEEDBYTES]; | |||
uint8_t noiseseed[SABER_COINBYTES]; | |||
int32_t i, j, k; | |||
//--------------AVX declaration------------------ | |||
__m256i sk_avx[SABER_K][SABER_N / 16]; | |||
__m256i mod; | |||
__m256i res_avx[SABER_K][SABER_N / 16]; | |||
__m256i a_avx[SABER_K][SABER_K][SABER_N / 16]; | |||
//__m256i acc[2*SABER_N/16]; | |||
mod = _mm256_set1_epi16(SABER_Q - 1); | |||
__m256i b_bucket[NUM_POLY][SCHB_N * 4]; | |||
//--------------AVX declaration ends------------------ | |||
randombytes(seed, SABER_SEEDBYTES); | |||
shake128(seed, SABER_SEEDBYTES, seed, SABER_SEEDBYTES); // for not revealing system RNG state | |||
randombytes(noiseseed, SABER_COINBYTES); | |||
GenMatrix(a, seed); //sample matrix A | |||
GenSecret(skpv1, noiseseed); | |||
// Load sk into avx vectors | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
sk_avx[i][j] = _mm256_loadu_si256 ((__m256i const *) (&skpv1[i][j * 16])); | |||
} | |||
} | |||
// Load a into avx vectors | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_K; j++) { | |||
for (k = 0; k < SABER_N / 16; k++) { | |||
a_avx[i][j][k] = _mm256_loadu_si256 ((__m256i const *) (&a[i].vec[j].coeffs[k * 16])); | |||
} | |||
} | |||
} | |||
//------------------------do the matrix vector multiplication and rounding------------ | |||
for (j = 0; j < NUM_POLY; j++) { | |||
TC_eval(sk_avx[j], b_bucket[j]); | |||
} | |||
matrix_vector_mul(a_avx, b_bucket, res_avx, 1);// Matrix-vector multiplication; Matrix in transposed order | |||
// Now truncation | |||
for (i = 0; i < SABER_K; i++) { //shift right EQ-EP bits | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
res_avx[i][j] = _mm256_add_epi16 (res_avx[i][j], _mm256_set1_epi16(h1)); | |||
res_avx[i][j] = _mm256_srli_epi16 (res_avx[i][j], (SABER_EQ - SABER_EP) ); | |||
res_avx[i][j] = _mm256_and_si256 (res_avx[i][j], mod); | |||
} | |||
} | |||
//------------------Pack sk into byte string------- | |||
PQCLEAN_FIRESABER_AVX2_POLVEC2BS(sk, (const uint16_t (*)[SABER_N])skpv1, SABER_Q); | |||
//------------------Pack pk into byte string------- | |||
for (i = 0; i < SABER_K; i++) { // reuses skpv1[] for unpacking avx of public-key | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
_mm256_maskstore_epi32 ((int *) (skpv1[i] + j * 16), _mm256_set1_epi32(-1), res_avx[i][j]); | |||
} | |||
} | |||
PQCLEAN_FIRESABER_AVX2_POLVEC2BS(pk, (const uint16_t (*)[SABER_N])skpv1, SABER_P); // load the public-key into pk byte string | |||
for (i = 0; i < SABER_SEEDBYTES; i++) { // now load the seedbytes in PK. Easy since seed bytes are kept in byte format. | |||
pk[SABER_POLYVECCOMPRESSEDBYTES + i] = seed[i]; | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_AVX2_indcpa_kem_enc(uint8_t ciphertext[SABER_BYTES_CCA_DEC], const uint8_t m[SABER_KEYBYTES], const uint8_t noiseseed[SABER_NOISESEEDBYTES], const uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES]) { | |||
uint32_t i, j, k; | |||
polyvec a[SABER_K]; // skpv; | |||
uint8_t seed[SABER_SEEDBYTES]; | |||
uint16_t pkcl[SABER_K][SABER_N]; //public key of received by the client | |||
uint16_t skpv1[SABER_K][SABER_N]; | |||
uint16_t temp[SABER_K][SABER_N]; | |||
uint16_t message[SABER_KEYBYTES * 8]; | |||
uint8_t msk_c[SABER_SCALEBYTES_KEM]; | |||
//--------------AVX declaration------------------ | |||
__m256i sk_avx[SABER_K][SABER_N / 16]; | |||
__m256i mod, mod_p; | |||
__m256i res_avx[SABER_K][SABER_N / 16]; | |||
__m256i vprime_avx[SABER_N / 16]; | |||
__m256i a_avx[SABER_K][SABER_K][SABER_N / 16]; | |||
//__m256i acc[2*SABER_N/16]; | |||
__m256i pkcl_avx[SABER_K][SABER_N / 16]; | |||
__m256i message_avx[SABER_N / 16]; | |||
mod = _mm256_set1_epi16(SABER_Q - 1); | |||
mod_p = _mm256_set1_epi16(SABER_P - 1); | |||
__m256i b_bucket[NUM_POLY][SCHB_N * 4]; | |||
//--------------AVX declaration ends------------------ | |||
for (i = 0; i < SABER_SEEDBYTES; i++) { // Load the seedbytes in the client seed from PK. | |||
seed[i] = pk[ SABER_POLYVECCOMPRESSEDBYTES + i]; | |||
} | |||
GenMatrix(a, seed); | |||
GenSecret(skpv1, noiseseed); | |||
// ----------- Load skpv1 into avx vectors ---------- | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
sk_avx[i][j] = _mm256_loadu_si256 ((__m256i const *) (&skpv1[i][j * 16])); | |||
} | |||
} | |||
// ----------- Load skpv1 into avx vectors ---------- | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_K; j++) { | |||
for (k = 0; k < SABER_N / 16; k++) { | |||
a_avx[i][j][k] = _mm256_loadu_si256 ((__m256i const *) (&a[i].vec[j].coeffs[k * 16])); | |||
} | |||
} | |||
} | |||
//-----------------matrix-vector multiplication and rounding | |||
for (j = 0; j < NUM_POLY; j++) { | |||
TC_eval(sk_avx[j], b_bucket[j]); | |||
} | |||
matrix_vector_mul(a_avx, b_bucket, res_avx, 0);// Matrix-vector multiplication; Matrix in normal order | |||
// Now truncation | |||
for (i = 0; i < SABER_K; i++) { //shift right EQ-EP bits | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
res_avx[i][j] = _mm256_add_epi16 (res_avx[i][j], _mm256_set1_epi16(h1)); | |||
res_avx[i][j] = _mm256_srli_epi16 (res_avx[i][j], (SABER_EQ - SABER_EP) ); | |||
res_avx[i][j] = _mm256_and_si256 (res_avx[i][j], mod); | |||
} | |||
} | |||
//-----this result should be put in b_prime for later use in server. | |||
for (i = 0; i < SABER_K; i++) { // first store in 16 bit arrays | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
_mm256_maskstore_epi32 ((int *)(temp[i] + j * 16), _mm256_set1_epi32(-1), res_avx[i][j]); | |||
} | |||
} | |||
PQCLEAN_FIRESABER_AVX2_POLVEC2BS(ciphertext, (const uint16_t (*)[SABER_N])temp, SABER_P); // Pack b_prime into ciphertext byte string | |||
//**************client matrix-vector multiplication ends******************// | |||
//------now calculate the v' | |||
//-------unpack the public_key | |||
PQCLEAN_FIRESABER_AVX2_BS2POLVEC(pkcl, pk, SABER_P); | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
pkcl_avx[i][j] = _mm256_loadu_si256 ((__m256i const *) (&pkcl[i][j * 16])); | |||
} | |||
} | |||
// InnerProduct | |||
//for(k=0;k<SABER_N/16;k++){ | |||
// vprime_avx[k]=_mm256_xor_si256(vprime_avx[k],vprime_avx[k]); | |||
//} | |||
// vector-vector scalar multiplication with mod p | |||
vector_vector_mul(pkcl_avx, b_bucket, vprime_avx); | |||
// Computation of v'+h1 | |||
for (i = 0; i < SABER_N / 16; i++) { //adding h1 | |||
vprime_avx[i] = _mm256_add_epi16(vprime_avx[i], _mm256_set1_epi16(h1)); | |||
} | |||
// unpack m; | |||
for (j = 0; j < SABER_KEYBYTES; j++) { | |||
for (i = 0; i < 8; i++) { | |||
message[8 * j + i] = ((m[j] >> i) & 0x01); | |||
} | |||
} | |||
// message encoding | |||
for (i = 0; i < SABER_N / 16; i++) { | |||
message_avx[i] = _mm256_loadu_si256 ((__m256i const *) (&message[i * 16])); | |||
message_avx[i] = _mm256_slli_epi16 (message_avx[i], (SABER_EP - 1) ); | |||
} | |||
// SHIFTRIGHT(v'+h1-m mod p, EP-ET) | |||
for (k = 0; k < SABER_N / 16; k++) { | |||
vprime_avx[k] = _mm256_sub_epi16(vprime_avx[k], message_avx[k]); | |||
vprime_avx[k] = _mm256_and_si256(vprime_avx[k], mod_p); | |||
vprime_avx[k] = _mm256_srli_epi16 (vprime_avx[k], (SABER_EP - SABER_ET) ); | |||
} | |||
// Unpack avx | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
_mm256_maskstore_epi32 ((int *) (temp[0] + j * 16), _mm256_set1_epi32(-1), vprime_avx[j]); | |||
} | |||
PQCLEAN_FIRESABER_AVX2_SABER_pack_6bit(msk_c, temp[0]); | |||
for (j = 0; j < SABER_SCALEBYTES_KEM; j++) { | |||
ciphertext[SABER_CIPHERTEXTBYTES + j] = msk_c[j]; | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_AVX2_indcpa_kem_dec(uint8_t m[SABER_KEYBYTES], const uint8_t sk[SABER_INDCPA_SECRETKEYBYTES], const uint8_t ciphertext[SABER_BYTES_CCA_DEC]) { | |||
uint32_t i, j; | |||
uint16_t sksv[SABER_K][SABER_N]; //secret key of the server | |||
uint16_t pksv[SABER_K][SABER_N]; | |||
uint16_t message_dec_unpacked[SABER_KEYBYTES * 8]; // one element containes on decrypted bit; | |||
uint8_t scale_ar[SABER_SCALEBYTES_KEM]; | |||
uint16_t op[SABER_N]; | |||
//--------------AVX declaration------------------ | |||
//__m256i mod_p; | |||
__m256i v_avx[SABER_N / 16]; | |||
//__m256i acc[2*SABER_N/16]; | |||
__m256i sksv_avx[SABER_K][SABER_N / 16]; | |||
__m256i pksv_avx[SABER_K][SABER_N / 16]; | |||
//mod_p=_mm256_set1_epi16(SABER_P-1); | |||
__m256i b_bucket[NUM_POLY][SCHB_N * 4]; | |||
//--------------AVX declaration ends------------------ | |||
//-------unpack the public_key | |||
PQCLEAN_FIRESABER_AVX2_BS2POLVEC(sksv, sk, SABER_Q); //sksv is the secret-key | |||
PQCLEAN_FIRESABER_AVX2_BS2POLVEC(pksv, ciphertext, SABER_P); //pksv is the ciphertext | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
sksv_avx[i][j] = _mm256_loadu_si256 ((__m256i const *) (&sksv[i][j * 16])); | |||
pksv_avx[i][j] = _mm256_loadu_si256 ((__m256i const *) (&pksv[i][j * 16])); | |||
} | |||
} | |||
for (i = 0; i < SABER_N / 16; i++) { | |||
v_avx[i] = _mm256_xor_si256(v_avx[i], v_avx[i]); | |||
} | |||
// InnerProduct(b', s, mod p) | |||
for (j = 0; j < NUM_POLY; j++) { | |||
TC_eval(sksv_avx[j], b_bucket[j]); | |||
} | |||
vector_vector_mul(pksv_avx, b_bucket, v_avx); | |||
for (i = 0; i < SABER_N / 16; i++) { | |||
_mm256_maskstore_epi32 ((int *)(message_dec_unpacked + i * 16), _mm256_set1_epi32(-1), v_avx[i]); | |||
} | |||
for (i = 0; i < SABER_SCALEBYTES_KEM; i++) { | |||
scale_ar[i] = ciphertext[SABER_CIPHERTEXTBYTES + i]; | |||
} | |||
PQCLEAN_FIRESABER_AVX2_SABER_un_pack6bit(op, scale_ar); | |||
//addition of h2 | |||
for (i = 0; i < SABER_N; i++) { | |||
message_dec_unpacked[i] = ( ( message_dec_unpacked[i] + h2 - (op[i] << (SABER_EP - SABER_ET)) ) & (SABER_P - 1) ) >> (SABER_EP - 1); | |||
} | |||
POL2MSG(m, message_dec_unpacked); | |||
} |
@@ -0,0 +1,13 @@ | |||
#ifndef INDCPA_H | |||
#define INDCPA_H | |||
#include "SABER_params.h" | |||
#include <stdint.h> | |||
void PQCLEAN_FIRESABER_AVX2_indcpa_kem_keypair(uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES], uint8_t sk[SABER_INDCPA_SECRETKEYBYTES]); | |||
void PQCLEAN_FIRESABER_AVX2_indcpa_kem_enc(uint8_t ciphertext[SABER_BYTES_CCA_DEC], const uint8_t m[SABER_KEYBYTES], const uint8_t noiseseed[SABER_NOISESEEDBYTES], const uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES]); | |||
void PQCLEAN_FIRESABER_AVX2_indcpa_kem_dec(uint8_t m[SABER_KEYBYTES], const uint8_t sk[SABER_INDCPA_SECRETKEYBYTES], const uint8_t ciphertext[SABER_BYTES_CCA_DEC]); | |||
#endif |
@@ -0,0 +1,45 @@ | |||
#ifndef PARAMS_H | |||
#define PARAMS_H | |||
#include "api.h" | |||
#define SABER_K 4 | |||
#define SABER_MU 6 | |||
#define SABER_ET 6 | |||
#define SABER_EQ 13 | |||
#define SABER_EP 10 | |||
#define SABER_N 256 | |||
#define SABER_Q 8192 //2^13 | |||
#define SABER_P 1024 | |||
#define SABER_SEEDBYTES 32 | |||
#define SABER_NOISESEEDBYTES 32 | |||
#define SABER_COINBYTES 32 | |||
#define SABER_KEYBYTES 32 | |||
#define SABER_HASHBYTES 32 | |||
#define SABER_POLYBYTES 416 //13*256/8 | |||
#define SABER_POLYVECBYTES (SABER_K * SABER_POLYBYTES) | |||
#define SABER_POLYVECCOMPRESSEDBYTES (SABER_K * 320) //10*256/8 NOTE : changed till here due to parameter adaptation | |||
#define SABER_CIPHERTEXTBYTES (SABER_POLYVECCOMPRESSEDBYTES) | |||
#define SABER_SCALEBYTES_KEM ((SABER_ET)*SABER_N/8) | |||
#define SABER_INDCPA_PUBLICKEYBYTES (SABER_POLYVECCOMPRESSEDBYTES + SABER_SEEDBYTES) | |||
#define SABER_INDCPA_SECRETKEYBYTES (SABER_POLYVECBYTES) | |||
#define SABER_PUBLICKEYBYTES (SABER_INDCPA_PUBLICKEYBYTES) | |||
#define SABER_SECRETKEYBYTES (SABER_INDCPA_SECRETKEYBYTES + SABER_INDCPA_PUBLICKEYBYTES + SABER_HASHBYTES + SABER_KEYBYTES) | |||
#define SABER_BYTES_CCA_DEC (SABER_POLYVECCOMPRESSEDBYTES + SABER_SCALEBYTES_KEM) /* Second part is for Targhi-Unruh */ | |||
#endif |
@@ -0,0 +1,18 @@ | |||
#ifndef PQCLEAN_FIRESABER_AVX2_API_H | |||
#define PQCLEAN_FIRESABER_AVX2_API_H | |||
#define PQCLEAN_FIRESABER_AVX2_CRYPTO_ALGNAME "FireSaber" | |||
#define PQCLEAN_FIRESABER_AVX2_CRYPTO_BYTES 32 | |||
#define PQCLEAN_FIRESABER_AVX2_CRYPTO_CIPHERTEXTBYTES 1472 | |||
#define PQCLEAN_FIRESABER_AVX2_CRYPTO_PUBLICKEYBYTES 1312 | |||
#define PQCLEAN_FIRESABER_AVX2_CRYPTO_SECRETKEYBYTES 3040 | |||
int PQCLEAN_FIRESABER_AVX2_crypto_kem_keypair(unsigned char *pk, unsigned char *sk); | |||
int PQCLEAN_FIRESABER_AVX2_crypto_kem_enc(unsigned char *ct, unsigned char *k, const unsigned char *pk); | |||
int PQCLEAN_FIRESABER_AVX2_crypto_kem_dec(unsigned char *k, const unsigned char *ct, const unsigned char *sk); | |||
#endif /* PQCLEAN_FIRESABER_AVX2_API_H */ |
@@ -0,0 +1,52 @@ | |||
#include "SABER_params.h" | |||
#include "api.h" | |||
#include "cbd.h" | |||
#include <stdint.h> | |||
/*--------------------------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at, Public Domain https://github.com/pq-crystals/kyber) | |||
of "CRYSTALS – Kyber: a CCA-secure module-lattice-based KEM" | |||
by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
----------------------------------------------------------------------*/ | |||
static uint64_t load_littleendian(const unsigned char *x, int bytes) { | |||
int i; | |||
uint64_t r = x[0]; | |||
for (i = 1; i < bytes; i++) { | |||
r |= (uint64_t)x[i] << (8 * i); | |||
} | |||
return r; | |||
} | |||
void PQCLEAN_FIRESABER_AVX2_cbd(uint16_t *r, const unsigned char *buf) { | |||
uint16_t Qmod_minus1 = SABER_Q - 1; | |||
uint32_t t, d, a[4], b[4]; | |||
int i, j; | |||
for (i = 0; i < SABER_N / 4; i++) { | |||
t = load_littleendian(buf + 3 * i, 3); | |||
d = 0; | |||
for (j = 0; j < 3; j++) { | |||
d += (t >> j) & 0x249249; | |||
} | |||
a[0] = d & 0x7; | |||
b[0] = (d >> 3) & 0x7; | |||
a[1] = (d >> 6) & 0x7; | |||
b[1] = (d >> 9) & 0x7; | |||
a[2] = (d >> 12) & 0x7; | |||
b[2] = (d >> 15) & 0x7; | |||
a[3] = (d >> 18) & 0x7; | |||
b[3] = (d >> 21); | |||
r[4 * i + 0] = (uint16_t)(a[0] - b[0]) & Qmod_minus1; | |||
r[4 * i + 1] = (uint16_t)(a[1] - b[1]) & Qmod_minus1; | |||
r[4 * i + 2] = (uint16_t)(a[2] - b[2]) & Qmod_minus1; | |||
r[4 * i + 3] = (uint16_t)(a[3] - b[3]) & Qmod_minus1; | |||
} | |||
} |
@@ -0,0 +1,16 @@ | |||
#ifndef CBD_H | |||
#define CBD_H | |||
/*--------------------------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at, Public Domain https://github.com/pq-crystals/kyber) | |||
of "CRYSTALS – Kyber: a CCA-secure module-lattice-based KEM" | |||
by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
----------------------------------------------------------------------*/ | |||
#include "poly.h" | |||
#include <stdint.h> | |||
void PQCLEAN_FIRESABER_AVX2_cbd(uint16_t *r, const unsigned char *buf); | |||
#endif |
@@ -0,0 +1,79 @@ | |||
#include "SABER_indcpa.h" | |||
#include "SABER_params.h" | |||
#include "api.h" | |||
#include "fips202.h" | |||
#include "randombytes.h" | |||
#include "verify.h" | |||
#include <immintrin.h> | |||
#include <stdint.h> | |||
#include <stdio.h> | |||
#include <string.h> | |||
int PQCLEAN_FIRESABER_AVX2_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) { | |||
int i; | |||
PQCLEAN_FIRESABER_AVX2_indcpa_kem_keypair(pk, sk); // sk[0:SABER_INDCPA_SECRETKEYBYTES-1] <-- sk | |||
for (i = 0; i < SABER_INDCPA_PUBLICKEYBYTES; i++) { | |||
sk[i + SABER_INDCPA_SECRETKEYBYTES] = pk[i]; // sk[SABER_INDCPA_SECRETKEYBYTES:SABER_INDCPA_SECRETKEYBYTES+SABER_INDCPA_SECRETKEYBYTES-1] <-- pk | |||
} | |||
sha3_256(sk + SABER_SECRETKEYBYTES - 64, pk, SABER_INDCPA_PUBLICKEYBYTES); // Then hash(pk) is appended. | |||
randombytes(sk + SABER_SECRETKEYBYTES - SABER_KEYBYTES, SABER_KEYBYTES); // Remaining part of sk contains a pseudo-random number. | |||
// This is output when check in PQCLEAN_FIRESABER_AVX2_crypto_kem_dec() fails. | |||
return (0); | |||
} | |||
int PQCLEAN_FIRESABER_AVX2_crypto_kem_enc(uint8_t *c, uint8_t *k, const uint8_t *pk) { | |||
uint8_t kr[64]; // Will contain key, coins | |||
uint8_t buf[64]; | |||
randombytes(buf, 32); | |||
sha3_256(buf, buf, 32); // BUF[0:31] <-- random message (will be used as the key for client) Note: hash doesnot release system RNG output | |||
sha3_256(buf + 32, pk, SABER_INDCPA_PUBLICKEYBYTES); // BUF[32:63] <-- Hash(public key); Multitarget countermeasure for coins + contributory KEM | |||
sha3_512(kr, buf, 64); // kr[0:63] <-- Hash(buf[0:63]); | |||
// K^ <-- kr[0:31] | |||
// noiseseed (r) <-- kr[32:63]; | |||
PQCLEAN_FIRESABER_AVX2_indcpa_kem_enc(c, buf, (const uint8_t *) (kr + 32), pk); // buf[0:31] contains message; kr[32:63] contains randomness r; | |||
sha3_256(kr + 32, c, SABER_BYTES_CCA_DEC); | |||
sha3_256(k, kr, 64); // hash concatenation of pre-k and h(c) to k | |||
return (0); | |||
} | |||
int PQCLEAN_FIRESABER_AVX2_crypto_kem_dec(uint8_t *k, const uint8_t *c, const uint8_t *sk) { | |||
int i; | |||
uint8_t fail; | |||
uint8_t cmp[SABER_BYTES_CCA_DEC]; | |||
uint8_t buf[64]; | |||
uint8_t kr[64]; // Will contain key, coins | |||
const uint8_t *pk = sk + SABER_INDCPA_SECRETKEYBYTES; | |||
PQCLEAN_FIRESABER_AVX2_indcpa_kem_dec(buf, sk, c); // buf[0:31] <-- message | |||
// Multitarget countermeasure for coins + contributory KEM | |||
for (i = 0; i < 32; i++) { // Save hash by storing h(pk) in sk | |||
buf[32 + i] = sk[SABER_SECRETKEYBYTES - 64 + i]; | |||
} | |||
sha3_512(kr, buf, 64); | |||
PQCLEAN_FIRESABER_AVX2_indcpa_kem_enc(cmp, buf, (const uint8_t *) (kr + 32), pk); | |||
fail = PQCLEAN_FIRESABER_AVX2_verify(c, cmp, SABER_BYTES_CCA_DEC); | |||
sha3_256(kr + 32, c, SABER_BYTES_CCA_DEC); // overwrite coins in kr with h(c) | |||
PQCLEAN_FIRESABER_AVX2_cmov(kr, sk + SABER_SECRETKEYBYTES - SABER_KEYBYTES, SABER_KEYBYTES, fail); | |||
sha3_256(k, kr, 64); // hash concatenation of pre-k and h(c) to k | |||
return (0); | |||
} |
@@ -0,0 +1,35 @@ | |||
#ifndef INDCPA_H | |||
#define INDCPA_H | |||
#include <stdint.h> | |||
void PQCLEAN_FIRESABER_AVX2_indcpa_keypair(uint8_t *pk, uint8_t *sk); | |||
void PQCLEAN_FIRESABER_AVX2_indcpa_client(uint8_t *pk, uint8_t *b_prime, uint8_t *c, uint8_t *key); | |||
void PQCLEAN_FIRESABER_AVX2_indcpa_server(uint8_t *pk, uint8_t *b_prime, uint8_t *c, uint8_t *key); | |||
void PQCLEAN_FIRESABER_AVX2_indcpa_kem_keypair(uint8_t *pk, uint8_t *sk); | |||
void PQCLEAN_FIRESABER_AVX2_indcpa_kem_enc(uint8_t *message, uint8_t *noiseseed, uint8_t *pk, uint8_t *ciphertext); | |||
void PQCLEAN_FIRESABER_AVX2_indcpa_kem_dec(uint8_t *sk, uint8_t *ciphertext, uint8_t message_dec[]); | |||
int PQCLEAN_FIRESABER_AVX2_crypto_kem_keypair(unsigned char *pk, unsigned char *sk); | |||
int PQCLEAN_FIRESABER_AVX2_crypto_kem_enc(unsigned char *c, unsigned char *k, const unsigned char *pk); | |||
int PQCLEAN_FIRESABER_AVX2_crypto_kem_dec(unsigned char *k, const unsigned char *c, const unsigned char *sk); | |||
//uint64_t clock1,clock2; | |||
//uint64_t clock_kp_kex, clock_enc_kex, clock_dec_kex; | |||
#endif |
@@ -0,0 +1,502 @@ | |||
#include "pack_unpack.h" | |||
void PQCLEAN_FIRESABER_AVX2_SABER_pack_3bit(uint8_t *bytes, const uint16_t *data) { | |||
uint32_t j; | |||
uint32_t offset_data = 0, offset_byte = 0; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = 3 * j; | |||
offset_data = 8 * j; | |||
bytes[offset_byte + 0] = (data[offset_data + 0] & 0x7) | ( (data[offset_data + 1] & 0x7) << 3 ) | ((data[offset_data + 2] & 0x3) << 6); | |||
bytes[offset_byte + 1] = ((data[offset_data + 2] >> 2 ) & 0x01) | ( (data[offset_data + 3] & 0x7) << 1 ) | ( (data[offset_data + 4] & 0x7) << 4 ) | (((data[offset_data + 5]) & 0x01) << 7); | |||
bytes[offset_byte + 2] = ((data[offset_data + 5] >> 1 ) & 0x03) | ( (data[offset_data + 6] & 0x7) << 2 ) | ( (data[offset_data + 7] & 0x7) << 5 ); | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_AVX2_SABER_un_pack3bit(uint16_t *data, const uint8_t *bytes) { | |||
uint32_t j; | |||
uint32_t offset_data = 0, offset_byte = 0; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = 3 * j; | |||
offset_data = 8 * j; | |||
data[offset_data + 0] = (bytes[offset_byte + 0]) & 0x07; | |||
data[offset_data + 1] = ( (bytes[offset_byte + 0]) >> 3 ) & 0x07; | |||
data[offset_data + 2] = ( ( (bytes[offset_byte + 0]) >> 6 ) & 0x03) | ( ( (bytes[offset_byte + 1]) & 0x01) << 2 ); | |||
data[offset_data + 3] = ( (bytes[offset_byte + 1]) >> 1 ) & 0x07; | |||
data[offset_data + 4] = ( (bytes[offset_byte + 1]) >> 4 ) & 0x07; | |||
data[offset_data + 5] = ( ( (bytes[offset_byte + 1]) >> 7 ) & 0x01) | ( ( (bytes[offset_byte + 2]) & 0x03) << 1 ); | |||
data[offset_data + 6] = ( (bytes[offset_byte + 2] >> 2) & 0x07 ); | |||
data[offset_data + 7] = ( (bytes[offset_byte + 2] >> 5) & 0x07 ); | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_AVX2_SABER_pack_4bit(uint8_t *bytes, const uint16_t *data) { | |||
uint32_t j; | |||
uint32_t offset_data = 0; | |||
for (j = 0; j < SABER_N / 2; j++) { | |||
offset_data = 2 * j; | |||
bytes[j] = (data[offset_data] & 0x0f) | ( (data[offset_data + 1] & 0x0f) << 4 ); | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_AVX2_SABER_un_pack4bit(uint16_t *data, const uint8_t *bytes) { | |||
uint32_t j; | |||
uint32_t offset_data = 0; | |||
for (j = 0; j < SABER_N / 2; j++) { | |||
offset_data = 2 * j; | |||
data[offset_data] = bytes[j] & 0x0f; | |||
data[offset_data + 1] = (bytes[j] >> 4) & 0x0f; | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_AVX2_SABER_pack_6bit(uint8_t *bytes, const uint16_t *data) { | |||
uint32_t j; | |||
uint32_t offset_data = 0, offset_byte = 0; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = 3 * j; | |||
offset_data = 4 * j; | |||
bytes[offset_byte + 0] = (data[offset_data + 0] & 0x3f) | ((data[offset_data + 1] & 0x03) << 6); | |||
bytes[offset_byte + 1] = ((data[offset_data + 1] >> 2) & 0x0f) | ((data[offset_data + 2] & 0x0f) << 4); | |||
bytes[offset_byte + 2] = ((data[offset_data + 2] >> 4) & 0x03) | ((data[offset_data + 3] & 0x3f) << 2); | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_AVX2_SABER_un_pack6bit(uint16_t *data, const uint8_t *bytes) { | |||
uint32_t j; | |||
uint32_t offset_data = 0, offset_byte = 0; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = 3 * j; | |||
offset_data = 4 * j; | |||
data[offset_data + 0] = bytes[offset_byte + 0] & 0x3f; | |||
data[offset_data + 1] = ((bytes[offset_byte + 0] >> 6) & 0x03) | ((bytes[offset_byte + 1] & 0x0f) << 2) ; | |||
data[offset_data + 2] = ((bytes[offset_byte + 1] & 0xff) >> 4) | ((bytes[offset_byte + 2] & 0x03) << 4) ; | |||
data[offset_data + 3] = ((bytes[offset_byte + 2] & 0xff) >> 2); | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_AVX2_SABER_pack10bit(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 10) / 8; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = offset_byte1 + 5 * j; | |||
offset_data = 4 * j; | |||
bytes[offset_byte + 0] = ( data[i][ offset_data + 0 ] & (0xff)); | |||
bytes[offset_byte + 1] = ( (data[i][ offset_data + 0 ] >> 8) & 0x03 ) | ((data[i][ offset_data + 1 ] & 0x3f) << 2); | |||
bytes[offset_byte + 2] = ( (data[i][ offset_data + 1 ] >> 6) & 0x0f ) | ( (data[i][ offset_data + 2 ] & 0x0f) << 4); | |||
bytes[offset_byte + 3] = ( (data[i][ offset_data + 2 ] >> 4) & 0x3f ) | ((data[i][ offset_data + 3 ] & 0x03) << 6); | |||
bytes[offset_byte + 4] = ( (data[i][ offset_data + 3 ] >> 2) & 0xff ); | |||
} | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_AVX2_POLVECp2BS(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 10) / 8; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = offset_byte1 + 5 * j; | |||
offset_data = 4 * j; | |||
bytes[offset_byte + 0] = ( data[i][ offset_data + 0 ] & (0xff)); | |||
bytes[offset_byte + 1] = ( (data[i][ offset_data + 0 ] >> 8) & 0x03 ) | ((data[i][ offset_data + 1 ] & 0x3f) << 2); | |||
bytes[offset_byte + 2] = ( (data[i][ offset_data + 1 ] >> 6) & 0x0f ) | ( (data[i][ offset_data + 2 ] & 0x0f) << 4); | |||
bytes[offset_byte + 3] = ( (data[i][ offset_data + 2 ] >> 4) & 0x3f ) | ((data[i][ offset_data + 3 ] & 0x03) << 6); | |||
bytes[offset_byte + 4] = ( (data[i][ offset_data + 3 ] >> 2) & 0xff ); | |||
} | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_AVX2_POLVECq2BS(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 13) / 8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = offset_byte1 + 13 * j; | |||
offset_data = 8 * j; | |||
bytes[offset_byte + 0] = ( data[i][ offset_data + 0 ] & (0xff)); | |||
bytes[offset_byte + 1] = ( (data[i][ offset_data + 0 ] >> 8) & 0x1f ) | ((data[i][ offset_data + 1 ] & 0x07) << 5); | |||
bytes[offset_byte + 2] = ( (data[i][ offset_data + 1 ] >> 3) & 0xff ); | |||
bytes[offset_byte + 3] = ( (data[i][ offset_data + 1 ] >> 11) & 0x03 ) | ((data[i][ offset_data + 2 ] & 0x3f) << 2); | |||
bytes[offset_byte + 4] = ( (data[i][ offset_data + 2 ] >> 6) & 0x7f ) | ( (data[i][ offset_data + 3 ] & 0x01) << 7 ); | |||
bytes[offset_byte + 5] = ( (data[i][ offset_data + 3 ] >> 1) & 0xff ); | |||
bytes[offset_byte + 6] = ( (data[i][ offset_data + 3 ] >> 9) & 0x0f ) | ( (data[i][ offset_data + 4 ] & 0x0f) << 4 ); | |||
bytes[offset_byte + 7] = ( (data[i][ offset_data + 4] >> 4) & 0xff ); | |||
bytes[offset_byte + 8] = ( (data[i][ offset_data + 4 ] >> 12) & 0x01 ) | ( (data[i][ offset_data + 5 ] & 0x7f) << 1 ); | |||
bytes[offset_byte + 9] = ( (data[i][ offset_data + 5 ] >> 7) & 0x3f ) | ( (data[i][ offset_data + 6 ] & 0x03) << 6 ); | |||
bytes[offset_byte + 10] = ( (data[i][ offset_data + 6 ] >> 2) & 0xff ); | |||
bytes[offset_byte + 11] = ( (data[i][ offset_data + 6 ] >> 10) & 0x07 ) | ( (data[i][ offset_data + 7 ] & 0x1f) << 3 ); | |||
bytes[offset_byte + 12] = ( (data[i][ offset_data + 7 ] >> 5) & 0xff ); | |||
} | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_AVX2_BS2POLq(uint16_t data[SABER_N], const uint8_t *bytes) { | |||
uint32_t j; | |||
uint32_t offset_data = 0, offset_byte = 0; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = 13 * j; | |||
offset_data = 8 * j; | |||
data[offset_data + 0] = ( bytes[ offset_byte + 0 ] & (0xff)) | ((bytes[offset_byte + 1] & 0x1f) << 8); | |||
data[offset_data + 1] = ( bytes[ offset_byte + 1 ] >> 5 & (0x07)) | ((bytes[offset_byte + 2] & 0xff) << 3) | ((bytes[offset_byte + 3] & 0x03) << 11); | |||
data[offset_data + 2] = ( bytes[ offset_byte + 3 ] >> 2 & (0x3f)) | ((bytes[offset_byte + 4] & 0x7f) << 6); | |||
data[offset_data + 3] = ( bytes[ offset_byte + 4 ] >> 7 & (0x01)) | ((bytes[offset_byte + 5] & 0xff) << 1) | ((bytes[offset_byte + 6] & 0x0f) << 9); | |||
data[offset_data + 4] = ( bytes[ offset_byte + 6 ] >> 4 & (0x0f)) | ((bytes[offset_byte + 7] & 0xff) << 4) | ((bytes[offset_byte + 8] & 0x01) << 12); | |||
data[offset_data + 5] = ( bytes[ offset_byte + 8] >> 1 & (0x7f)) | ((bytes[offset_byte + 9] & 0x3f) << 7); | |||
data[offset_data + 6] = ( bytes[ offset_byte + 9] >> 6 & (0x03)) | ((bytes[offset_byte + 10] & 0xff) << 2) | ((bytes[offset_byte + 11] & 0x07) << 10); | |||
data[offset_data + 7] = ( bytes[ offset_byte + 11] >> 3 & (0x1f)) | ((bytes[offset_byte + 12] & 0xff) << 5); | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_AVX2_BS2POLVECp(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 10) / 8; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = offset_byte1 + 5 * j; | |||
offset_data = 4 * j; | |||
data[i][offset_data + 0] = ( bytes[ offset_byte + 0 ] & (0xff)) | ((bytes[ offset_byte + 1 ] & 0x03) << 8); | |||
data[i][offset_data + 1] = ( (bytes[ offset_byte + 1 ] >> 2) & (0x3f)) | ((bytes[ offset_byte + 2 ] & 0x0f) << 6); | |||
data[i][offset_data + 2] = ( (bytes[ offset_byte + 2 ] >> 4) & (0x0f)) | ((bytes[ offset_byte + 3 ] & 0x3f) << 4); | |||
data[i][offset_data + 3] = ( (bytes[ offset_byte + 3 ] >> 6) & (0x03)) | ((bytes[ offset_byte + 4 ] & 0xff) << 2); | |||
} | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_AVX2_BS2POLVECq(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 13) / 8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = offset_byte1 + 13 * j; | |||
offset_data = 8 * j; | |||
data[i][offset_data + 0] = ( bytes[ offset_byte + 0 ] & (0xff)) | ((bytes[offset_byte + 1] & 0x1f) << 8); | |||
data[i][offset_data + 1] = ( bytes[ offset_byte + 1 ] >> 5 & (0x07)) | ((bytes[offset_byte + 2] & 0xff) << 3) | ((bytes[offset_byte + 3] & 0x03) << 11); | |||
data[i][offset_data + 2] = ( bytes[ offset_byte + 3 ] >> 2 & (0x3f)) | ((bytes[offset_byte + 4] & 0x7f) << 6); | |||
data[i][offset_data + 3] = ( bytes[ offset_byte + 4 ] >> 7 & (0x01)) | ((bytes[offset_byte + 5] & 0xff) << 1) | ((bytes[offset_byte + 6] & 0x0f) << 9); | |||
data[i][offset_data + 4] = ( bytes[ offset_byte + 6 ] >> 4 & (0x0f)) | ((bytes[offset_byte + 7] & 0xff) << 4) | ((bytes[offset_byte + 8] & 0x01) << 12); | |||
data[i][offset_data + 5] = ( bytes[ offset_byte + 8] >> 1 & (0x7f)) | ((bytes[offset_byte + 9] & 0x3f) << 7); | |||
data[i][offset_data + 6] = ( bytes[ offset_byte + 9] >> 6 & (0x03)) | ((bytes[offset_byte + 10] & 0xff) << 2) | ((bytes[offset_byte + 11] & 0x07) << 10); | |||
data[i][offset_data + 7] = ( bytes[ offset_byte + 11] >> 3 & (0x1f)) | ((bytes[offset_byte + 12] & 0xff) << 5); | |||
} | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_AVX2_SABER_un_pack10bit(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 10) / 8; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = offset_byte1 + 5 * j; | |||
offset_data = 4 * j; | |||
data[i][offset_data + 0] = ( bytes[ offset_byte + 0 ] & (0xff)) | ((bytes[ offset_byte + 1 ] & 0x03) << 8); | |||
data[i][offset_data + 1] = ( (bytes[ offset_byte + 1 ] >> 2) & (0x3f)) | ((bytes[ offset_byte + 2 ] & 0x0f) << 6); | |||
data[i][offset_data + 2] = ( (bytes[ offset_byte + 2 ] >> 4) & (0x0f)) | ((bytes[ offset_byte + 3 ] & 0x3f) << 4); | |||
data[i][offset_data + 3] = ( (bytes[ offset_byte + 3 ] >> 6) & (0x03)) | ((bytes[ offset_byte + 4 ] & 0xff) << 2); | |||
} | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_AVX2_SABER_pack13bit(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 13) / 8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = offset_byte1 + 13 * j; | |||
offset_data = 8 * j; | |||
bytes[offset_byte + 0] = ( data[i][ offset_data + 0 ] & (0xff)); | |||
bytes[offset_byte + 1] = ( (data[i][ offset_data + 0 ] >> 8) & 0x1f ) | ((data[i][ offset_data + 1 ] & 0x07) << 5); | |||
bytes[offset_byte + 2] = ( (data[i][ offset_data + 1 ] >> 3) & 0xff ); | |||
bytes[offset_byte + 3] = ( (data[i][ offset_data + 1 ] >> 11) & 0x03 ) | ((data[i][ offset_data + 2 ] & 0x3f) << 2); | |||
bytes[offset_byte + 4] = ( (data[i][ offset_data + 2 ] >> 6) & 0x7f ) | ( (data[i][ offset_data + 3 ] & 0x01) << 7 ); | |||
bytes[offset_byte + 5] = ( (data[i][ offset_data + 3 ] >> 1) & 0xff ); | |||
bytes[offset_byte + 6] = ( (data[i][ offset_data + 3 ] >> 9) & 0x0f ) | ( (data[i][ offset_data + 4 ] & 0x0f) << 4 ); | |||
bytes[offset_byte + 7] = ( (data[i][ offset_data + 4] >> 4) & 0xff ); | |||
bytes[offset_byte + 8] = ( (data[i][ offset_data + 4 ] >> 12) & 0x01 ) | ( (data[i][ offset_data + 5 ] & 0x7f) << 1 ); | |||
bytes[offset_byte + 9] = ( (data[i][ offset_data + 5 ] >> 7) & 0x3f ) | ( (data[i][ offset_data + 6 ] & 0x03) << 6 ); | |||
bytes[offset_byte + 10] = ( (data[i][ offset_data + 6 ] >> 2) & 0xff ); | |||
bytes[offset_byte + 11] = ( (data[i][ offset_data + 6 ] >> 10) & 0x07 ) | ( (data[i][ offset_data + 7 ] & 0x1f) << 3 ); | |||
bytes[offset_byte + 12] = ( (data[i][ offset_data + 7 ] >> 5) & 0xff ); | |||
} | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_AVX2_SABER_un_pack13bit(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 13) / 8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = offset_byte1 + 13 * j; | |||
offset_data = 8 * j; | |||
data[i][offset_data + 0] = ( bytes[ offset_byte + 0 ] & (0xff)) | ((bytes[offset_byte + 1] & 0x1f) << 8); | |||
data[i][offset_data + 1] = ( bytes[ offset_byte + 1 ] >> 5 & (0x07)) | ((bytes[offset_byte + 2] & 0xff) << 3) | ((bytes[offset_byte + 3] & 0x03) << 11); | |||
data[i][offset_data + 2] = ( bytes[ offset_byte + 3 ] >> 2 & (0x3f)) | ((bytes[offset_byte + 4] & 0x7f) << 6); | |||
data[i][offset_data + 3] = ( bytes[ offset_byte + 4 ] >> 7 & (0x01)) | ((bytes[offset_byte + 5] & 0xff) << 1) | ((bytes[offset_byte + 6] & 0x0f) << 9); | |||
data[i][offset_data + 4] = ( bytes[ offset_byte + 6 ] >> 4 & (0x0f)) | ((bytes[offset_byte + 7] & 0xff) << 4) | ((bytes[offset_byte + 8] & 0x01) << 12); | |||
data[i][offset_data + 5] = ( bytes[ offset_byte + 8] >> 1 & (0x7f)) | ((bytes[offset_byte + 9] & 0x3f) << 7); | |||
data[i][offset_data + 6] = ( bytes[ offset_byte + 9] >> 6 & (0x03)) | ((bytes[offset_byte + 10] & 0xff) << 2) | ((bytes[offset_byte + 11] & 0x07) << 10); | |||
data[i][offset_data + 7] = ( bytes[ offset_byte + 11] >> 3 & (0x1f)) | ((bytes[offset_byte + 12] & 0xff) << 5); | |||
} | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_AVX2_SABER_poly_un_pack13bit(uint16_t data[SABER_N], const uint8_t *bytes) { | |||
uint32_t j; | |||
uint32_t offset_data = 0, offset_byte = 0; | |||
//for(i=0;i<SABER_K;i++){ | |||
//i=0; | |||
//offset_byte1=i*(SABER_N*13)/8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
//offset_byte=offset_byte1+13*j; | |||
offset_byte = 13 * j; | |||
offset_data = 8 * j; | |||
data[offset_data + 0] = ( bytes[ offset_byte + 0 ] & (0xff)) | ((bytes[offset_byte + 1] & 0x1f) << 8); | |||
data[offset_data + 1] = ( bytes[ offset_byte + 1 ] >> 5 & (0x07)) | ((bytes[offset_byte + 2] & 0xff) << 3) | ((bytes[offset_byte + 3] & 0x03) << 11); | |||
data[offset_data + 2] = ( bytes[ offset_byte + 3 ] >> 2 & (0x3f)) | ((bytes[offset_byte + 4] & 0x7f) << 6); | |||
data[offset_data + 3] = ( bytes[ offset_byte + 4 ] >> 7 & (0x01)) | ((bytes[offset_byte + 5] & 0xff) << 1) | ((bytes[offset_byte + 6] & 0x0f) << 9); | |||
data[offset_data + 4] = ( bytes[ offset_byte + 6 ] >> 4 & (0x0f)) | ((bytes[offset_byte + 7] & 0xff) << 4) | ((bytes[offset_byte + 8] & 0x01) << 12); | |||
data[offset_data + 5] = ( bytes[ offset_byte + 8] >> 1 & (0x7f)) | ((bytes[offset_byte + 9] & 0x3f) << 7); | |||
data[offset_data + 6] = ( bytes[ offset_byte + 9] >> 6 & (0x03)) | ((bytes[offset_byte + 10] & 0xff) << 2) | ((bytes[offset_byte + 11] & 0x07) << 10); | |||
data[offset_data + 7] = ( bytes[ offset_byte + 11] >> 3 & (0x1f)) | ((bytes[offset_byte + 12] & 0xff) << 5); | |||
} | |||
//} | |||
} | |||
void PQCLEAN_FIRESABER_AVX2_SABER_pack11bit(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]) { | |||
/*This function packs 11 bit data stream into 8 bits of data. | |||
*/ | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 11) / 8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = offset_byte1 + 11 * j; | |||
offset_data = 8 * j; | |||
bytes[offset_byte + 0] = ( data[i][ offset_data + 0 ] & (0xff)); | |||
bytes[offset_byte + 1] = ( (data[i][ offset_data + 0 ] >> 8) & 0x07 ) | ((data[i][ offset_data + 1 ] & 0x1f) << 3); | |||
bytes[offset_byte + 2] = ( (data[i][ offset_data + 1 ] >> 5) & 0x3f ) | ((data[i][ offset_data + 2 ] & 0x03) << 6); | |||
bytes[offset_byte + 3] = ( (data[i][ offset_data + 2 ] >> 2) & 0xff ); | |||
bytes[offset_byte + 4] = ( (data[i][ offset_data + 2 ] >> 10) & 0x01 ) | ((data[i][ offset_data + 3 ] & 0x7f) << 1); | |||
bytes[offset_byte + 5] = ( (data[i][ offset_data + 3 ] >> 7) & 0x0f ) | ((data[i][ offset_data + 4 ] & 0x0f) << 4); | |||
bytes[offset_byte + 6] = ( (data[i][ offset_data + 4 ] >> 4) & 0x7f ) | ((data[i][ offset_data + 5 ] & 0x01) << 7); | |||
bytes[offset_byte + 7] = ( (data[i][ offset_data + 5 ] >> 1) & 0xff ); | |||
bytes[offset_byte + 8] = ( (data[i][ offset_data + 5 ] >> 9) & 0x03 ) | ((data[i][ offset_data + 6 ] & 0x3f) << 2); | |||
bytes[offset_byte + 9] = ( (data[i][ offset_data + 6 ] >> 6) & 0x1f ) | ((data[i][ offset_data + 7 ] & 0x07) << 5); | |||
bytes[offset_byte + 10] = ( (data[i][ offset_data + 7 ] >> 3) & 0xff ); | |||
} | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_AVX2_SABER_un_pack11bit(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 11) / 8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = offset_byte1 + 11 * j; | |||
offset_data = 8 * j; | |||
data[i][offset_data + 0] = (bytes[offset_byte + 0]) | ( (bytes[offset_byte + 1] & 0x07) << 8 ); | |||
data[i][offset_data + 1] = ( (bytes[offset_byte + 1] >> 3) & 0x1f) | ( (bytes[offset_byte + 2] & 0x3f) << 5 ); | |||
data[i][offset_data + 2] = ( (bytes[offset_byte + 2] >> 6) & 0x03) | ( (bytes[offset_byte + 3] & 0xff) << 2 ) | ( (bytes[offset_byte + 4] & 0x01) << 10 ); | |||
data[i][offset_data + 3] = ( (bytes[offset_byte + 4] >> 1) & 0x7f) | ( (bytes[offset_byte + 5] & 0x0f) << 7 ); | |||
data[i][offset_data + 4] = ( (bytes[offset_byte + 5] >> 4) & 0x0f) | ( (bytes[offset_byte + 6] & 0x7f) << 4 ); | |||
data[i][offset_data + 5] = ( (bytes[offset_byte + 6] >> 7) & 0x01) | ( (bytes[offset_byte + 7] & 0xff) << 1 ) | ( (bytes[offset_byte + 8] & 0x03) << 9 ); | |||
data[i][offset_data + 6] = ( (bytes[offset_byte + 8] >> 2) & 0x3f) | ( (bytes[offset_byte + 9] & 0x1f) << 6 ); | |||
data[i][offset_data + 7] = ( (bytes[offset_byte + 9] >> 5) & 0x07) | ( (bytes[offset_byte + 10] & 0xff) << 3 ); | |||
} | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_AVX2_SABER_pack14bit(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 14) / 8; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = offset_byte1 + 7 * j; | |||
offset_data = 4 * j; | |||
bytes[offset_byte + 0] = ( data[i][ offset_data + 0 ] & (0xff)); | |||
bytes[offset_byte + 1] = ( (data[i][ offset_data + 0 ] >> 8) & 0x3f ) | ((data[i][ offset_data + 1 ] & 0x03) << 6); | |||
bytes[offset_byte + 2] = ( (data[i][ offset_data + 1 ] >> 2) & 0xff ); | |||
bytes[offset_byte + 3] = ( (data[i][ offset_data + 1 ] >> 10) & 0x0f ) | ((data[i][ offset_data + 2 ] & 0x0f) << 4); | |||
bytes[offset_byte + 4] = ( (data[i][ offset_data + 2 ] >> 4) & 0xff ); | |||
bytes[offset_byte + 5] = ( (data[i][ offset_data + 2 ] >> 12) & 0x03 ) | ((data[i][ offset_data + 3 ] & 0x3f) << 2); | |||
bytes[offset_byte + 6] = ( (data[i][ offset_data + 3 ] >> 6) & 0xff ); | |||
} | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_AVX2_SABER_un_pack14bit(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 14) / 8; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = offset_byte1 + 7 * j; | |||
offset_data = 4 * j; | |||
data[i][offset_data + 0] = (bytes[offset_byte + 0] & 0xff) | ( (bytes[offset_byte + 1] & 0x3f) << 8 ); | |||
data[i][offset_data + 1] = ( (bytes[offset_byte + 1] >> 6) & 0x03) | ((bytes[offset_byte + 2] & 0xff) << 2 ) | ( (bytes[offset_byte + 3] & 0x0f) << 10 ); | |||
data[i][offset_data + 2] = ( (bytes[offset_byte + 3] >> 4) & 0x0f) | ( (bytes[offset_byte + 4] ) << 4 ) | ( (bytes[offset_byte + 5] & 0x03) << 12 ); | |||
data[i][offset_data + 3] = ( (bytes[offset_byte + 5] >> 2) & 0x3f) | ( (bytes[offset_byte + 6] ) << 6 ); | |||
} | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_AVX2_POLVEC2BS(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N], uint16_t modulus) { | |||
if (modulus == 1024) { | |||
PQCLEAN_FIRESABER_AVX2_POLVECp2BS(bytes, data); | |||
} else if (modulus == 8192) { | |||
PQCLEAN_FIRESABER_AVX2_POLVECq2BS(bytes, data); | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_AVX2_BS2POLVEC(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes, uint16_t modulus) { | |||
if (modulus == 1024) { | |||
PQCLEAN_FIRESABER_AVX2_BS2POLVECp(data, bytes); | |||
} else if (modulus == 8192) { | |||
PQCLEAN_FIRESABER_AVX2_BS2POLVECq(data, bytes); | |||
} | |||
} |
@@ -0,0 +1,56 @@ | |||
#ifndef PACK_UNPACK_H | |||
#define PACK_UNPACK_H | |||
#include "SABER_params.h" | |||
#include <stdint.h> | |||
#include <stdio.h> | |||
void PQCLEAN_FIRESABER_AVX2_BS2POLq(uint16_t data[SABER_N], const uint8_t *bytes); | |||
void PQCLEAN_FIRESABER_AVX2_BS2POLVEC(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes, uint16_t modulus); | |||
void PQCLEAN_FIRESABER_AVX2_BS2POLVECq(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes); | |||
void PQCLEAN_FIRESABER_AVX2_BS2POLVECp(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes); | |||
void PQCLEAN_FIRESABER_AVX2_POLVEC2BS(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N], uint16_t modulus); | |||
void PQCLEAN_FIRESABER_AVX2_POLVECq2BS(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]); | |||
void PQCLEAN_FIRESABER_AVX2_POLVECp2BS(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]); | |||
void PQCLEAN_FIRESABER_AVX2_SABER_pack_3bit(uint8_t *bytes, const uint16_t *data); | |||
void PQCLEAN_FIRESABER_AVX2_SABER_pack_4bit(uint8_t *bytes, const uint16_t *data); | |||
void PQCLEAN_FIRESABER_AVX2_SABER_pack_6bit(uint8_t *bytes, const uint16_t *data); | |||
void PQCLEAN_FIRESABER_AVX2_SABER_pack10bit(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]); | |||
void PQCLEAN_FIRESABER_AVX2_SABER_pack11bit(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]); | |||
void PQCLEAN_FIRESABER_AVX2_SABER_pack13bit(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]); | |||
void PQCLEAN_FIRESABER_AVX2_SABER_pack14bit(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]); | |||
void PQCLEAN_FIRESABER_AVX2_SABER_poly_un_pack13bit(uint16_t data[SABER_N], const uint8_t *bytes); | |||
void PQCLEAN_FIRESABER_AVX2_SABER_un_pack3bit(uint16_t *data, const uint8_t *bytes); | |||
void PQCLEAN_FIRESABER_AVX2_SABER_un_pack4bit(uint16_t *data, const uint8_t *bytes); | |||
void PQCLEAN_FIRESABER_AVX2_SABER_un_pack6bit(uint16_t *data, const uint8_t *bytes); | |||
void PQCLEAN_FIRESABER_AVX2_SABER_un_pack10bit(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes); | |||
void PQCLEAN_FIRESABER_AVX2_SABER_un_pack11bit(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes); | |||
void PQCLEAN_FIRESABER_AVX2_SABER_un_pack13bit(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes); | |||
void PQCLEAN_FIRESABER_AVX2_SABER_un_pack14bit(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes); | |||
#endif |
@@ -0,0 +1,27 @@ | |||
#ifndef POLY_H | |||
#define POLY_H | |||
/*--------------------------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at, Public Domain https://github.com/pq-crystals/kyber) | |||
of "CRYSTALS – Kyber: a CCA-secure module-lattice-based KEM" | |||
by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
----------------------------------------------------------------------*/ | |||
#include "SABER_params.h" | |||
#include <stdint.h> | |||
typedef struct { | |||
uint16_t coeffs[SABER_N]; | |||
} poly; | |||
typedef struct { | |||
poly vec[SABER_K]; | |||
} polyvec; | |||
void PQCLEAN_FIRESABER_AVX2_poly_getnoise(uint16_t *r, const unsigned char *seed, unsigned char nonce); | |||
void PQCLEAN_FIRESABER_AVX2_poly_getnoise4x(uint16_t *r0, uint16_t *r1, uint16_t *r2, const unsigned char *seed, unsigned char nonce0, unsigned char nonce1, unsigned char nonce2, unsigned char nonce3); | |||
#endif |
@@ -0,0 +1,20 @@ | |||
#include "../SABER_params.h" | |||
#define AVX_N (SABER_N >> 4) | |||
#define small_len_avx (AVX_N >> 2) | |||
#define SCHB_N 16 | |||
#define N_SB (SABER_N >> 2) | |||
#define N_SB_RES (2*N_SB-1) | |||
#define N_SB_16 (N_SB >> 2) | |||
#define N_SB_16_RES (2*N_SB_16-1) | |||
#define AVX_N1 16 /*N/16*/ | |||
#define SCM_SIZE 16 | |||
// The dimension of a vector. i.e vector has NUM_POLY elements and Matrix has NUM_POLY X NUM_POLY elements | |||
#define NUM_POLY SABER_K | |||
//int NUM_POLY=2; |
@@ -0,0 +1,303 @@ | |||
#include <immintrin.h> | |||
static void transpose_n1(__m256i *M) | |||
{ | |||
//int i; | |||
register __m256i r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; | |||
register __m256i temp, temp0, temp1, temp2; | |||
//for(i=0; i<8; i=i+1) | |||
//{ | |||
r0 = _mm256_unpacklo_epi16(M[0], M[1]); | |||
r1 = _mm256_unpacklo_epi16(M[2], M[3]); | |||
r2 = _mm256_unpacklo_epi16(M[4], M[5]); | |||
r3 = _mm256_unpacklo_epi16(M[6], M[7]); | |||
r4 = _mm256_unpacklo_epi16(M[8], M[9]); | |||
r5 = _mm256_unpacklo_epi16(M[10], M[11]); | |||
r6 = _mm256_unpacklo_epi16(M[12], M[13]); | |||
r7 = _mm256_unpacklo_epi16(M[14], M[15]); | |||
temp = _mm256_unpacklo_epi32(r0, r1); | |||
temp0 = _mm256_unpacklo_epi32(r2, r3); | |||
temp1 = _mm256_unpacklo_epi32(r4, r5); | |||
temp2 = _mm256_unpacklo_epi32(r6, r7); | |||
r8 = _mm256_unpackhi_epi32(r0, r1); | |||
r9 = _mm256_unpackhi_epi32(r2, r3); | |||
r10 = _mm256_unpackhi_epi32(r4, r5); | |||
r11 = _mm256_unpackhi_epi32(r6, r7); | |||
r0 = _mm256_unpacklo_epi64(temp, temp0); | |||
r2 = _mm256_unpackhi_epi64(temp, temp0); | |||
r1 = _mm256_unpacklo_epi64(temp1, temp2); | |||
r3 = _mm256_unpackhi_epi64(temp1, temp2); | |||
temp = _mm256_unpackhi_epi16(M[0], M[1]); | |||
temp0 = _mm256_unpackhi_epi16(M[2], M[3]); | |||
temp1 = _mm256_unpackhi_epi16(M[4], M[5]); | |||
temp2 = _mm256_unpackhi_epi16(M[6], M[7]); | |||
r4 = _mm256_unpackhi_epi16(M[8], M[9]); | |||
M[0] = _mm256_permute2f128_si256(r0, r1, 0x20); | |||
M[8] = _mm256_permute2f128_si256(r0, r1, 0x31); | |||
M[1] = _mm256_permute2f128_si256(r2, r3, 0x20); | |||
M[9] = _mm256_permute2f128_si256(r2, r3, 0x31); | |||
r5 = _mm256_unpackhi_epi16(M[10], M[11]); | |||
r6 = _mm256_unpackhi_epi16(M[12], M[13]); | |||
r7 = _mm256_unpackhi_epi16(M[14], M[15]); | |||
r0 = _mm256_unpacklo_epi64(r8, r9); | |||
r1 = _mm256_unpacklo_epi64(r10, r11); | |||
r2 = _mm256_unpackhi_epi64(r8, r9); | |||
r3 = _mm256_unpackhi_epi64(r10, r11); | |||
M[3] = _mm256_permute2f128_si256(r2, r3, 0x20); | |||
M[11] = _mm256_permute2f128_si256(r2, r3, 0x31); | |||
M[2] = _mm256_permute2f128_si256(r0, r1, 0x20); | |||
M[10] = _mm256_permute2f128_si256(r0, r1, 0x31); | |||
//for(i=0; i<4; i=i+1) | |||
//{ | |||
r0 = _mm256_unpacklo_epi32(temp, temp0); | |||
r1 = _mm256_unpacklo_epi32(temp1, temp2); | |||
r2 = _mm256_unpacklo_epi32(r4, r5); | |||
r3 = _mm256_unpacklo_epi32(r6, r7); | |||
//} | |||
//for(i=0; i<2; i=i+1) | |||
//{ | |||
r8 = _mm256_unpacklo_epi64(r0, r1); | |||
r10 = _mm256_unpackhi_epi64(r0, r1); | |||
r9 = _mm256_unpacklo_epi64(r2, r3); | |||
r11 = _mm256_unpackhi_epi64(r2, r3); | |||
M[4] = _mm256_permute2f128_si256(r8, r9, 0x20); | |||
M[12] = _mm256_permute2f128_si256(r8, r9, 0x31); | |||
M[5] = _mm256_permute2f128_si256(r10, r11, 0x20); | |||
M[13] = _mm256_permute2f128_si256(r10, r11, 0x31); | |||
r0 = _mm256_unpackhi_epi32(temp, temp0); | |||
r1 = _mm256_unpackhi_epi32(temp1, temp2); | |||
r2 = _mm256_unpackhi_epi32(r4, r5); | |||
r3 = _mm256_unpackhi_epi32(r6, r7); | |||
//} | |||
// for(i=0; i<2; i=i+1) | |||
// { | |||
r4 = _mm256_unpacklo_epi64(r0, r1); | |||
r6 = _mm256_unpackhi_epi64(r0, r1); | |||
r5 = _mm256_unpacklo_epi64(r2, r3); | |||
r7 = _mm256_unpackhi_epi64(r2, r3); | |||
// } | |||
//------------------------------------------------------- | |||
M[6] = _mm256_permute2f128_si256(r4, r5, 0x20); | |||
M[14] = _mm256_permute2f128_si256(r4, r5, 0x31); | |||
M[7] = _mm256_permute2f128_si256(r6, r7, 0x20); | |||
M[15] = _mm256_permute2f128_si256(r6, r7, 0x31); | |||
} | |||
/* | |||
void transpose_unrolled(__m256i *M) | |||
{ | |||
int i; | |||
__m256i tL[8], tH[8]; | |||
__m256i bL[4], bH[4], cL[4], cH[4]; | |||
__m256i dL[2], dH[2], eL[2], eH[2], fL[2], fH[2], gL[2], gH[2]; | |||
__m256i r0, r1, r2, r3, r4, r5, r6, r7; | |||
//for(i=0; i<8; i=i+1) | |||
//{ | |||
tL[0] = _mm256_unpacklo_epi16(M[0], M[1]); | |||
tH[0] = _mm256_unpackhi_epi16(M[0], M[1]); | |||
tL[1] = _mm256_unpacklo_epi16(M[2], M[3]); | |||
tH[1] = _mm256_unpackhi_epi16(M[2], M[3]); | |||
tL[2] = _mm256_unpacklo_epi16(M[4], M[5]); | |||
tH[2] = _mm256_unpackhi_epi16(M[4], M[5]); | |||
tL[3] = _mm256_unpacklo_epi16(M[6], M[7]); | |||
tH[3] = _mm256_unpackhi_epi16(M[6], M[7]); | |||
tL[4] = _mm256_unpacklo_epi16(M[8], M[9]); | |||
tH[4] = _mm256_unpackhi_epi16(M[8], M[9]); | |||
tL[5] = _mm256_unpacklo_epi16(M[10], M[11]); | |||
tH[5] = _mm256_unpackhi_epi16(M[10], M[11]); | |||
tL[6] = _mm256_unpacklo_epi16(M[12], M[13]); | |||
tH[6] = _mm256_unpackhi_epi16(M[12], M[13]); | |||
tL[7] = _mm256_unpacklo_epi16(M[14], M[15]); | |||
tH[7] = _mm256_unpackhi_epi16(M[14], M[15]); | |||
//} | |||
//------------------------------------------------------- | |||
//for(i=0; i<4; i=i+1) | |||
//{ | |||
bL[0] = _mm256_unpacklo_epi32(tL[0], tL[1]); | |||
bH[0] = _mm256_unpackhi_epi32(tL[0], tL[1]); | |||
bL[1] = _mm256_unpacklo_epi32(tL[2], tL[3]); | |||
bH[1] = _mm256_unpackhi_epi32(tL[2], tL[3]); | |||
bL[2] = _mm256_unpacklo_epi32(tL[4], tL[5]); | |||
bH[2] = _mm256_unpackhi_epi32(tL[4], tL[5]); | |||
bL[3] = _mm256_unpacklo_epi32(tL[6], tL[7]); | |||
bH[3] = _mm256_unpackhi_epi32(tL[6], tL[7]); | |||
//} | |||
//for(i=0; i<2; i=i+1) | |||
//{ | |||
dL[0] = _mm256_unpacklo_epi64(bL[0], bL[1]); | |||
dH[0] = _mm256_unpackhi_epi64(bL[0], bL[1]); | |||
dL[1] = _mm256_unpacklo_epi64(bL[2], bL[3]); | |||
dH[1] = _mm256_unpackhi_epi64(bL[2], bL[3]); | |||
M[0] = _mm256_permute2f128_si256(dL[0], dL[1], 0x20); | |||
M[8] = _mm256_permute2f128_si256(dL[0], dL[1], 0x31); | |||
M[1] = _mm256_permute2f128_si256(dH[0], dH[1], 0x20); | |||
M[9] = _mm256_permute2f128_si256(dH[0], dH[1], 0x31); | |||
//} | |||
//for(i=0; i<2; i=i+1) | |||
//{ | |||
eL[0] = _mm256_unpacklo_epi64(bH[0], bH[1]); | |||
eH[0] = _mm256_unpackhi_epi64(bH[0], bH[1]); | |||
eL[1] = _mm256_unpacklo_epi64(bH[2], bH[3]); | |||
eH[1] = _mm256_unpackhi_epi64(bH[2], bH[3]); | |||
//} | |||
//------------------------------------------------------- | |||
//------------------------------------------------------- | |||
for(i=0; i<4; i=i+1) | |||
{ | |||
cL[i] = _mm256_unpacklo_epi32(tH[2*i], tH[2*i+1]); | |||
cH[i] = _mm256_unpackhi_epi32(tH[2*i], tH[2*i+1]); | |||
} | |||
for(i=0; i<2; i=i+1) | |||
{ | |||
fL[i] = _mm256_unpacklo_epi64(cL[2*i], cL[2*i+1]); | |||
fH[i] = _mm256_unpackhi_epi64(cL[2*i], cL[2*i+1]); | |||
} | |||
for(i=0; i<2; i=i+1) | |||
{ | |||
gL[i] = _mm256_unpacklo_epi64(cH[2*i], cH[2*i+1]); | |||
gH[i] = _mm256_unpackhi_epi64(cH[2*i], cH[2*i+1]); | |||
} | |||
//------------------------------------------------------- | |||
M[2] = _mm256_permute2f128_si256(eL[0], eL[1], 0x20); | |||
M[10] = _mm256_permute2f128_si256(eL[0], eL[1], 0x31); | |||
M[3] = _mm256_permute2f128_si256(eH[0], eH[1], 0x20); | |||
M[11] = _mm256_permute2f128_si256(eH[0], eH[1], 0x31); | |||
M[4] = _mm256_permute2f128_si256(fL[0], fL[1], 0x20); | |||
M[12] = _mm256_permute2f128_si256(fL[0], fL[1], 0x31); | |||
M[5] = _mm256_permute2f128_si256(fH[0], fH[1], 0x20); | |||
M[13] = _mm256_permute2f128_si256(fH[0], fH[1], 0x31); | |||
M[6] = _mm256_permute2f128_si256(gL[0], gL[1], 0x20); | |||
M[14] = _mm256_permute2f128_si256(gL[0], gL[1], 0x31); | |||
M[7] = _mm256_permute2f128_si256(gH[0], gH[1], 0x20); | |||
M[15] = _mm256_permute2f128_si256(gH[0], gH[1], 0x31); | |||
} | |||
void transpose1(__m256i *M) | |||
{ | |||
int i; | |||
__m256i tL[8], tH[8]; | |||
__m256i bL[4], bH[4], cL[4], cH[4]; | |||
__m256i dL[2], dH[2], eL[2], eH[2], fL[2], fH[2], gL[2], gH[2]; | |||
for(i=0; i<8; i=i+1) | |||
{ | |||
tL[i] = _mm256_unpacklo_epi16(M[2*i], M[2*i+1]); | |||
tH[i] = _mm256_unpackhi_epi16(M[2*i], M[2*i+1]); | |||
} | |||
for(i=0; i<4; i=i+1) | |||
{ | |||
bL[i] = _mm256_unpacklo_epi32(tL[2*i], tL[2*i+1]); | |||
bH[i] = _mm256_unpackhi_epi32(tL[2*i], tL[2*i+1]); | |||
} | |||
for(i=0; i<4; i=i+1) | |||
{ | |||
cL[i] = _mm256_unpacklo_epi32(tH[2*i], tH[2*i+1]); | |||
cH[i] = _mm256_unpackhi_epi32(tH[2*i], tH[2*i+1]); | |||
} | |||
for(i=0; i<2; i=i+1) | |||
{ | |||
dL[i] = _mm256_unpacklo_epi64(bL[2*i], bL[2*i+1]); | |||
dH[i] = _mm256_unpackhi_epi64(bL[2*i], bL[2*i+1]); | |||
} | |||
for(i=0; i<2; i=i+1) | |||
{ | |||
eL[i] = _mm256_unpacklo_epi64(bH[2*i], bH[2*i+1]); | |||
eH[i] = _mm256_unpackhi_epi64(bH[2*i], bH[2*i+1]); | |||
} | |||
for(i=0; i<2; i=i+1) | |||
{ | |||
fL[i] = _mm256_unpacklo_epi64(cL[2*i], cL[2*i+1]); | |||
fH[i] = _mm256_unpackhi_epi64(cL[2*i], cL[2*i+1]); | |||
} | |||
for(i=0; i<2; i=i+1) | |||
{ | |||
gL[i] = _mm256_unpacklo_epi64(cH[2*i], cH[2*i+1]); | |||
gH[i] = _mm256_unpackhi_epi64(cH[2*i], cH[2*i+1]); | |||
} | |||
M[0] = _mm256_permute2f128_si256(dL[0], dL[1], 0x20); | |||
M[8] = _mm256_permute2f128_si256(dL[0], dL[1], 0x31); | |||
M[1] = _mm256_permute2f128_si256(dH[0], dH[1], 0x20); | |||
M[9] = _mm256_permute2f128_si256(dH[0], dH[1], 0x31); | |||
M[2] = _mm256_permute2f128_si256(eL[0], eL[1], 0x20); | |||
M[10] = _mm256_permute2f128_si256(eL[0], eL[1], 0x31); | |||
M[3] = _mm256_permute2f128_si256(eH[0], eH[1], 0x20); | |||
M[11] = _mm256_permute2f128_si256(eH[0], eH[1], 0x31); | |||
M[4] = _mm256_permute2f128_si256(fL[0], fL[1], 0x20); | |||
M[12] = _mm256_permute2f128_si256(fL[0], fL[1], 0x31); | |||
M[5] = _mm256_permute2f128_si256(fH[0], fH[1], 0x20); | |||
M[13] = _mm256_permute2f128_si256(fH[0], fH[1], 0x31); | |||
M[6] = _mm256_permute2f128_si256(gL[0], gL[1], 0x20); | |||
M[14] = _mm256_permute2f128_si256(gL[0], gL[1], 0x31); | |||
M[7] = _mm256_permute2f128_si256(gH[0], gH[1], 0x20); | |||
M[15] = _mm256_permute2f128_si256(gH[0], gH[1], 0x31); | |||
} | |||
*/ |
@@ -0,0 +1,753 @@ | |||
//#define SCM_SIZE 16 | |||
//#pragma STDC FP_CONTRACT ON | |||
#include <immintrin.h> | |||
inline __m256i mul_add(__m256i a, __m256i b, __m256i c) { | |||
return _mm256_add_epi16(_mm256_mullo_epi16(a, b), c); | |||
} | |||
static void schoolbook_avx_new3_acc(__m256i* a, __m256i* b, __m256i* c_avx) ////8 coefficients of a and b has been prefetched | |||
//the c_avx are added cummulatively | |||
{ | |||
register __m256i a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7; | |||
register __m256i temp; | |||
a0=a[0]; | |||
a1=a[1]; | |||
a2=a[2]; | |||
a3=a[3]; | |||
a4=a[4]; | |||
a5=a[5]; | |||
a6=a[6]; | |||
a7=a[7]; | |||
b0=b[0]; | |||
b1=b[1]; | |||
b2=b[2]; | |||
b3=b[3]; | |||
b4=b[4]; | |||
b5=b[5]; | |||
b6=b[6]; | |||
b7=b[7]; | |||
// New Unrolled first triangle | |||
//otherwise accumulate | |||
c_avx[0] = mul_add(a0, b0, c_avx[0]); | |||
temp = _mm256_mullo_epi16 (a0, b1); | |||
temp=mul_add(a1, b0, temp); | |||
c_avx[1] = _mm256_add_epi16(temp, c_avx[1]); | |||
temp = _mm256_mullo_epi16 (a0, b2); | |||
temp = mul_add(a1, b1, temp); | |||
temp=mul_add(a2, b0, temp); | |||
c_avx[2] = _mm256_add_epi16(temp, c_avx[2]); | |||
temp = _mm256_mullo_epi16 (a0, b3); | |||
temp = mul_add(a1, b2, temp); | |||
temp = mul_add(a2, b1, temp); | |||
temp=mul_add(a3, b0, temp); | |||
c_avx[3] = _mm256_add_epi16(temp, c_avx[3]); | |||
temp = _mm256_mullo_epi16 (a0, b4); | |||
temp = mul_add(a1, b3, temp); | |||
temp = mul_add(a3, b1, temp); | |||
temp = mul_add(a4, b0, temp); | |||
temp=mul_add(a2, b2, temp); | |||
c_avx[4] = _mm256_add_epi16(temp, c_avx[4]); | |||
temp = _mm256_mullo_epi16 (a0, b5); | |||
temp = mul_add(a1, b4 , temp); | |||
temp = mul_add(a2, b3, temp); | |||
temp = mul_add(a3, b2, temp); | |||
temp = mul_add( a4, b1, temp); | |||
temp=mul_add(a5, b0, temp); | |||
c_avx[5] = _mm256_add_epi16(temp, c_avx[5]); | |||
temp = _mm256_mullo_epi16 (a0, b6); | |||
temp = mul_add(a1, b5, temp); | |||
temp = mul_add(a5, b1, temp); | |||
temp = mul_add(a6, b0, temp); | |||
temp = mul_add(a2, b4, temp); | |||
temp = mul_add(a3, b3, temp); | |||
temp=mul_add(a4, b2, temp); | |||
c_avx[6] = _mm256_add_epi16(temp, c_avx[6]); | |||
temp = _mm256_mullo_epi16 (a0, b7); | |||
temp = mul_add(a1, b6, temp); | |||
temp = mul_add (a6, b1, temp); | |||
temp = mul_add (a7, b0, temp); | |||
temp = mul_add(a2, b5, temp); | |||
temp = mul_add (a3, b4, temp); | |||
temp = mul_add (a4, b3, temp); | |||
temp=mul_add(a5, b2, temp); | |||
c_avx[7] = _mm256_add_epi16(temp, c_avx[7]); | |||
temp = _mm256_mullo_epi16 (a0, b[8]); | |||
temp = mul_add (a1, b7, temp); | |||
temp = mul_add (a7, b1, temp); | |||
temp = mul_add (a[8], b0, temp); | |||
temp = mul_add (a2, b6,temp); | |||
temp = mul_add(a3, b5, temp); | |||
temp = mul_add (a4, b4,temp); | |||
temp = mul_add (a5, b3, temp); | |||
temp=mul_add(a6, b2, temp); | |||
c_avx[8] = _mm256_add_epi16(temp, c_avx[8]); | |||
temp = _mm256_mullo_epi16 (a0, b[9]); | |||
temp = mul_add (a1, b[8], temp); | |||
temp = mul_add (a[8], b1, temp); | |||
temp = mul_add (a[9], b0, temp); | |||
temp = mul_add (a2, b7, temp); | |||
temp = mul_add (a3, b6, temp); | |||
temp = mul_add (a4, b5, temp); | |||
temp = mul_add (a5, b4, temp); | |||
temp = mul_add (a6, b3, temp); | |||
temp=mul_add(a7, b2, temp); | |||
c_avx[9] = _mm256_add_epi16(temp, c_avx[9]); | |||
temp= _mm256_mullo_epi16 (a0, b[10]); | |||
temp = mul_add (a1, b[9], temp); | |||
temp = mul_add (a[9], b1, temp); | |||
temp = mul_add (a[10], b0, temp); | |||
temp = mul_add (a2, b[8], temp); | |||
temp = mul_add (a3, b7, temp); | |||
temp = mul_add (a4, b6, temp); | |||
temp = mul_add (a5, b5, temp); | |||
temp = mul_add (a6, b4, temp); | |||
temp = mul_add (a7, b3, temp); | |||
temp=mul_add(a[8], b2, temp); | |||
c_avx[10] = _mm256_add_epi16(temp, c_avx[10]); | |||
temp = _mm256_mullo_epi16 (a0, b[11]); | |||
temp = mul_add (a1, b[10], temp ); | |||
temp = mul_add (a[10], b1, temp ); | |||
temp = mul_add (a[11], b0, temp ); | |||
temp = mul_add (a2, b[9], temp ); | |||
temp = mul_add (a3, b[8], temp ); | |||
temp = mul_add (a4, b7, temp ); | |||
temp = mul_add (a5, b6, temp ); | |||
temp = mul_add (a6, b5, temp ); | |||
temp = mul_add (a7, b4, temp ); | |||
temp = mul_add (a[8], b3, temp ); | |||
temp=mul_add(a[9], b2, temp); | |||
c_avx[11] = _mm256_add_epi16(temp, c_avx[11]); | |||
temp = _mm256_mullo_epi16 (a0, b[12]); | |||
temp = mul_add (a1, b[11], temp); | |||
temp = mul_add (a[11], b1, temp); | |||
temp = mul_add (a[12], b0, temp); | |||
temp = mul_add (a2, b[10], temp); | |||
temp = mul_add (a3, b[9], temp); | |||
temp = mul_add (a4, b[8], temp); | |||
temp = mul_add (a5, b7, temp); | |||
temp = mul_add (a6, b6, temp); | |||
temp = mul_add (a7, b5, temp); | |||
temp = mul_add (a[8], b4, temp); | |||
temp = mul_add (a[9], b3, temp); | |||
temp=mul_add(a[10], b2, temp); | |||
c_avx[12] = _mm256_add_epi16(temp, c_avx[12]); | |||
temp = _mm256_mullo_epi16 (a0, b[13]); | |||
temp = mul_add (a1, b[12], temp ); | |||
temp = mul_add (a[12], b1, temp ); | |||
temp = mul_add (a[13], b0, temp ); | |||
temp = mul_add (a2, b[11], temp ); | |||
temp = mul_add (a3, b[10], temp ); | |||
temp = mul_add (a4, b[9], temp ); | |||
temp = mul_add (a5, b[8], temp ); | |||
temp = mul_add (a6, b7, temp ); | |||
temp = mul_add (a7, b6, temp ); | |||
temp = mul_add (a[8], b5, temp ); | |||
temp = mul_add (a[9], b4, temp ); | |||
temp = mul_add (a[10], b3, temp ); | |||
temp=mul_add(a[11], b2, temp); | |||
c_avx[13] = _mm256_add_epi16(temp, c_avx[13]); | |||
temp = _mm256_mullo_epi16 (a0, b[14]); | |||
temp = mul_add (a1, b[13], temp ); | |||
temp = mul_add (a[13], b1, temp ); | |||
temp = mul_add (a[14], b0, temp ); | |||
temp = mul_add (a2, b[12], temp ); | |||
temp = mul_add (a3, b[11], temp ); | |||
temp = mul_add (a4, b[10], temp ); | |||
temp = mul_add (a5, b[9], temp ); | |||
temp = mul_add (a6, b[8], temp ); | |||
temp = mul_add (a7, b7, temp ); | |||
temp = mul_add (a[8], b6, temp ); | |||
temp = mul_add (a[9], b5, temp ); | |||
temp = mul_add (a[10], b4, temp ); | |||
temp = mul_add (a[11], b3, temp ); | |||
temp=mul_add(a[12], b2, temp); | |||
c_avx[14] = _mm256_add_epi16(temp, c_avx[14]); | |||
temp = _mm256_mullo_epi16 (a0, b[15]); | |||
temp = mul_add (a1, b[14], temp ); | |||
temp = mul_add (a[14], b1, temp ); | |||
temp = mul_add (a[15], b0, temp ); | |||
temp = mul_add (a2, b[13], temp ); | |||
temp = mul_add (a3, b[12], temp ); | |||
temp = mul_add (a4, b[11], temp ); | |||
temp = mul_add (a5, b[10], temp ); | |||
temp = mul_add (a6, b[9], temp ); | |||
temp = mul_add (a7, b[8], temp ); | |||
temp = mul_add (a[8], b7, temp ); | |||
temp = mul_add (a[9], b6, temp ); | |||
temp = mul_add (a[10], b5, temp ); | |||
temp = mul_add (a[11], b4, temp ); | |||
temp = mul_add (a[12], b3, temp ); | |||
temp=mul_add(a[13], b2, temp); | |||
c_avx[15] = _mm256_add_epi16(temp, c_avx[15]); | |||
// unrolled second triangle | |||
a0=a[14]; | |||
a1=a[15]; | |||
a2=a[13]; | |||
a3=a[12]; | |||
a4=a[11]; | |||
a5=a[10]; | |||
a6=a[9]; | |||
a7=a[8]; | |||
b0=b[14]; | |||
b1=b[15]; | |||
b2=b[13]; | |||
b3=b[12]; | |||
b4=b[11]; | |||
b5=b[10]; | |||
b6=b[9]; | |||
b7=b[8]; | |||
temp = _mm256_mullo_epi16 (a[1], b1); | |||
temp = mul_add (a[2], b0, temp ); | |||
temp = mul_add (a[3], b2, temp ); | |||
temp = mul_add (a[4], b3, temp ); | |||
temp = mul_add (a[5], b4, temp ); | |||
temp = mul_add (a[6], b5, temp ); | |||
temp = mul_add (a[7], b6, temp ); | |||
temp = mul_add (a7, b7, temp ); | |||
temp = mul_add (a6, b[7], temp ); | |||
temp = mul_add (a5, b[6], temp ); | |||
temp = mul_add (a4, b[5], temp ); | |||
temp = mul_add (a3, b[4], temp ); | |||
temp = mul_add (a2, b[3], temp ); | |||
temp = mul_add (a0, b[2], temp ); | |||
temp=mul_add(a1, b[1], temp); | |||
c_avx[16] = _mm256_add_epi16(temp, c_avx[16]); | |||
temp = _mm256_mullo_epi16 (a[2], b1); | |||
temp = mul_add (a[3], b0, temp ); | |||
temp = mul_add (a[4], b2, temp ); | |||
temp = mul_add (a[5], b3, temp ); | |||
temp = mul_add (a[6], b4, temp ); | |||
temp = mul_add (a[7], b5, temp ); | |||
temp = mul_add (a7, b6, temp ); | |||
temp = mul_add (a6, b7, temp ); | |||
temp = mul_add (a5, b[7], temp ); | |||
temp = mul_add (a4, b[6], temp ); | |||
temp = mul_add (a3, b[5], temp ); | |||
temp = mul_add (a2, b[4], temp ); | |||
temp = mul_add (a0, b[3], temp ); | |||
temp=mul_add(a1, b[2], temp); | |||
c_avx[17] = _mm256_add_epi16(temp, c_avx[17]); | |||
temp = _mm256_mullo_epi16 (a[3], b1); | |||
temp = mul_add (a[4], b0, temp ); | |||
temp = mul_add (a[5], b2, temp ); | |||
temp = mul_add (a[6], b3, temp ); | |||
temp = mul_add (a[7], b4, temp ); | |||
temp = mul_add (a7, b5, temp ); | |||
temp = mul_add (a6, b6, temp ); | |||
temp = mul_add (a5, b7, temp ); | |||
temp = mul_add (a4, b[7], temp ); | |||
temp = mul_add (a3, b[6], temp ); | |||
temp = mul_add (a2, b[5], temp ); | |||
temp = mul_add (a0, b[4], temp ); | |||
temp=mul_add(a1, b[3], temp); | |||
c_avx[18] = _mm256_add_epi16(temp, c_avx[18]); | |||
temp = _mm256_mullo_epi16 (a[4], b1); | |||
temp = mul_add (a[5], b0, temp ); | |||
temp = mul_add (a[6], b2, temp ); | |||
temp = mul_add (a[7], b3, temp ); | |||
temp = mul_add (a7, b4, temp ); | |||
temp = mul_add (a6, b5, temp ); | |||
temp = mul_add (a5, b6, temp ); | |||
temp = mul_add (a4, b7, temp ); | |||
temp = mul_add (a3, b[7], temp ); | |||
temp = mul_add (a2, b[6], temp ); | |||
temp = mul_add (a0, b[5], temp ); | |||
temp=mul_add(a1, b[4], temp); | |||
c_avx[19] = _mm256_add_epi16(temp, c_avx[19]); | |||
temp = _mm256_mullo_epi16 (a[5], b1); | |||
temp = mul_add (a[6], b0, temp ); | |||
temp = mul_add (a[7], b2, temp ); | |||
temp = mul_add (a7, b3, temp ); | |||
temp = mul_add (a6, b4, temp ); | |||
temp = mul_add (a5, b5, temp ); | |||
temp = mul_add (a4, b6, temp ); | |||
temp = mul_add (a3, b7, temp ); | |||
temp = mul_add (a2, b[7], temp ); | |||
temp = mul_add (a0, b[6], temp ); | |||
temp=mul_add(a1, b[5], temp); | |||
c_avx[20] = _mm256_add_epi16(temp, c_avx[20]); | |||
temp = _mm256_mullo_epi16 (a[6], b1); | |||
temp = mul_add (a[7], b0, temp ); | |||
temp = mul_add (a7, b2, temp ); | |||
temp = mul_add (a6, b3, temp ); | |||
temp = mul_add (a5, b4, temp ); | |||
temp = mul_add (a4, b5, temp ); | |||
temp = mul_add (a3, b6, temp ); | |||
temp = mul_add (a2, b7, temp ); | |||
temp = mul_add (a0, b[7], temp ); | |||
temp=mul_add(a1, b[6], temp); | |||
c_avx[21] = _mm256_add_epi16(temp, c_avx[21]); | |||
temp = _mm256_mullo_epi16 (a[7], b1); | |||
temp = mul_add (a7, b0, temp ); | |||
temp = mul_add (a6, b2, temp ); | |||
temp = mul_add (a5, b3, temp ); | |||
temp = mul_add (a4, b4, temp ); | |||
temp = mul_add (a3, b5, temp ); | |||
temp = mul_add (a2, b6, temp ); | |||
temp = mul_add (a0, b7, temp ); | |||
temp=mul_add(a1, b[7], temp); | |||
c_avx[22] = _mm256_add_epi16(temp, c_avx[22]); | |||
temp = _mm256_mullo_epi16 (a7, b1); | |||
temp = mul_add (a6, b0, temp ); | |||
temp = mul_add (a5, b2, temp ); | |||
temp = mul_add (a4, b3, temp ); | |||
temp = mul_add (a3, b4, temp ); | |||
temp = mul_add (a2, b5, temp ); | |||
temp = mul_add (a0, b6, temp ); | |||
temp=mul_add(a1, b7, temp); | |||
c_avx[23] = _mm256_add_epi16(temp, c_avx[23]); | |||
temp = _mm256_mullo_epi16 (a6, b1); | |||
temp = mul_add (a5, b0, temp ); | |||
temp = mul_add (a4, b2, temp ); | |||
temp = mul_add (a3, b3, temp ); | |||
temp = mul_add (a2, b4, temp ); | |||
temp = mul_add (a0, b5, temp ); | |||
temp=mul_add(a1, b6, temp); | |||
c_avx[24] = _mm256_add_epi16(temp, c_avx[24]); | |||
temp = _mm256_mullo_epi16 (a5, b1); | |||
temp = mul_add (a4, b0, temp ); | |||
temp = mul_add (a3, b2, temp ); | |||
temp = mul_add (a2, b3, temp ); | |||
temp = mul_add (a0, b4, temp ); | |||
temp=mul_add(a1, b5, temp); | |||
c_avx[25] = _mm256_add_epi16(temp, c_avx[25]); | |||
temp = _mm256_mullo_epi16 (a4, b1); | |||
temp = mul_add (a3, b0, temp ); | |||
temp = mul_add (a2, b2, temp ); | |||
temp = mul_add (a0, b3, temp ); | |||
temp=mul_add(a1, b4, temp); | |||
c_avx[26] = _mm256_add_epi16(temp, c_avx[26]); | |||
temp = _mm256_mullo_epi16 (a3, b1); | |||
temp = mul_add (a2, b0, temp ); | |||
temp = mul_add (a0, b2, temp ); | |||
temp=mul_add(a1, b3, temp); | |||
c_avx[27] = _mm256_add_epi16(temp, c_avx[27]); | |||
temp = _mm256_mullo_epi16 (a2, b1); | |||
temp = mul_add (a0, b0, temp ); | |||
temp=mul_add(a1, b2, temp); | |||
c_avx[28] = _mm256_add_epi16(temp, c_avx[28]); | |||
temp = _mm256_mullo_epi16 (a0, b1); | |||
temp=mul_add(a1, b0, temp); | |||
c_avx[29] = _mm256_add_epi16(temp, c_avx[29]); | |||
c_avx[30] = mul_add(a1, b1, c_avx[30]); | |||
c_avx[2*SCM_SIZE-1] = _mm256_set_epi64x(0, 0, 0, 0); | |||
} | |||
static void schoolbook_avx_new2(__m256i* a, __m256i* b, __m256i* c_avx) ////8 coefficients of a and b has been prefetched | |||
//the c_avx are not added cummulatively | |||
{ | |||
__m256i a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7; | |||
__m256i temp; | |||
a0=a[0]; | |||
a1=a[1]; | |||
a2=a[2]; | |||
a3=a[3]; | |||
a4=a[4]; | |||
a5=a[5]; | |||
a6=a[6]; | |||
a7=a[7]; | |||
b0=b[0]; | |||
b1=b[1]; | |||
b2=b[2]; | |||
b3=b[3]; | |||
b4=b[4]; | |||
b5=b[5]; | |||
b6=b[6]; | |||
b7=b[7]; | |||
// New Unrolled first triangle | |||
c_avx[0] = _mm256_mullo_epi16 (a0, b0); | |||
temp = _mm256_mullo_epi16 (a0, b1); | |||
c_avx[1]=mul_add(a1, b0, temp); | |||
temp = _mm256_mullo_epi16 (a0, b2); | |||
temp = mul_add(a1, b1, temp); | |||
c_avx[2]= mul_add(a2, b0, temp); | |||
temp = _mm256_mullo_epi16 (a0, b3); | |||
temp = mul_add(a1, b2, temp); | |||
temp = mul_add(a2, b1, temp); | |||
c_avx[3]= mul_add(a3, b0, temp); | |||
temp = _mm256_mullo_epi16 (a0, b4); | |||
temp = mul_add(a1, b3, temp); | |||
temp = mul_add(a3, b1, temp); | |||
temp = mul_add(a4, b0, temp); | |||
c_avx[4]= mul_add(a2, b2, temp); | |||
temp = _mm256_mullo_epi16 (a0, b5); | |||
temp = mul_add(a1, b4 , temp); | |||
temp = mul_add(a2, b3, temp); | |||
temp = mul_add(a3, b2, temp); | |||
temp = mul_add( a4, b1, temp); | |||
c_avx[5] = mul_add(a5, b0, temp); | |||
temp = _mm256_mullo_epi16 (a0, b6); | |||
temp = mul_add(a1, b5, temp); | |||
temp = mul_add(a5, b1, temp); | |||
temp = mul_add(a6, b0, temp); | |||
temp = mul_add(a2, b4, temp); | |||
temp = mul_add(a3, b3, temp); | |||
c_avx[6] = mul_add(a4, b2, temp); | |||
temp = _mm256_mullo_epi16 (a0, b7); | |||
temp = mul_add(a1, b6, temp); | |||
temp = mul_add (a6, b1, temp); | |||
temp = mul_add (a7, b0, temp); | |||
temp = mul_add(a2, b5, temp); | |||
temp = mul_add (a3, b4, temp); | |||
temp = mul_add (a4, b3, temp); | |||
c_avx[7] = mul_add (a5, b2, temp); | |||
temp = _mm256_mullo_epi16 (a0, b[8]); | |||
temp = mul_add (a1, b7, temp); | |||
temp = mul_add (a7, b1, temp); | |||
temp = mul_add (a[8], b0, temp); | |||
temp = mul_add (a2, b6,temp); | |||
temp = mul_add(a3, b5, temp); | |||
temp = mul_add (a4, b4,temp); | |||
temp = mul_add (a5, b3, temp); | |||
c_avx[8] = mul_add (a6, b2, temp); | |||
temp = _mm256_mullo_epi16 (a0, b[9]); | |||
temp = mul_add (a1, b[8], temp); | |||
temp = mul_add (a[8], b1, temp); | |||
temp = mul_add (a[9], b0, temp); | |||
temp = mul_add (a2, b7, temp); | |||
temp = mul_add (a3, b6, temp); | |||
temp = mul_add (a4, b5, temp); | |||
temp = mul_add (a5, b4, temp); | |||
temp = mul_add (a6, b3, temp); | |||
c_avx[9] = mul_add (a7, b2, temp); | |||
temp= _mm256_mullo_epi16 (a0, b[10]); | |||
temp = mul_add (a1, b[9], temp); | |||
temp = mul_add (a[9], b1, temp); | |||
temp = mul_add (a[10], b0, temp); | |||
temp = mul_add (a2, b[8], temp); | |||
temp = mul_add (a3, b7, temp); | |||
temp = mul_add (a4, b6, temp); | |||
temp = mul_add (a5, b5, temp); | |||
temp = mul_add (a6, b4, temp); | |||
temp = mul_add (a7, b3, temp); | |||
c_avx[10] = mul_add (a[8], b2, temp); | |||
temp = _mm256_mullo_epi16 (a0, b[11]); | |||
temp = mul_add (a1, b[10], temp ); | |||
temp = mul_add (a[10], b1, temp ); | |||
temp = mul_add (a[11], b0, temp ); | |||
temp = mul_add (a2, b[9], temp ); | |||
temp = mul_add (a3, b[8], temp ); | |||
temp = mul_add (a4, b7, temp ); | |||
temp = mul_add (a5, b6, temp ); | |||
temp = mul_add (a6, b5, temp ); | |||
temp = mul_add (a7, b4, temp ); | |||
temp = mul_add (a[8], b3, temp ); | |||
c_avx[11] = mul_add (a[9], b2, temp ); | |||
temp = _mm256_mullo_epi16 (a0, b[12]); | |||
temp = mul_add (a1, b[11], temp); | |||
temp = mul_add (a[11], b1, temp); | |||
temp = mul_add (a[12], b0, temp); | |||
temp = mul_add (a2, b[10], temp); | |||
temp = mul_add (a3, b[9], temp); | |||
temp = mul_add (a4, b[8], temp); | |||
temp = mul_add (a5, b7, temp); | |||
temp = mul_add (a6, b6, temp); | |||
temp = mul_add (a7, b5, temp); | |||
temp = mul_add (a[8], b4, temp); | |||
temp = mul_add (a[9], b3, temp); | |||
c_avx[12] = mul_add (a[10], b2, temp); | |||
temp = _mm256_mullo_epi16 (a0, b[13]); | |||
temp = mul_add (a1, b[12], temp ); | |||
temp = mul_add (a[12], b1, temp ); | |||
temp = mul_add (a[13], b0, temp ); | |||
temp = mul_add (a2, b[11], temp ); | |||
temp = mul_add (a3, b[10], temp ); | |||
temp = mul_add (a4, b[9], temp ); | |||
temp = mul_add (a5, b[8], temp ); | |||
temp = mul_add (a6, b7, temp ); | |||
temp = mul_add (a7, b6, temp ); | |||
temp = mul_add (a[8], b5, temp ); | |||
temp = mul_add (a[9], b4, temp ); | |||
temp = mul_add (a[10], b3, temp ); | |||
c_avx[13] = mul_add (a[11], b2, temp ); | |||
temp = _mm256_mullo_epi16 (a0, b[14]); | |||
temp = mul_add (a1, b[13], temp ); | |||
temp = mul_add (a[13], b1, temp ); | |||
temp = mul_add (a[14], b0, temp ); | |||
temp = mul_add (a2, b[12], temp ); | |||
temp = mul_add (a3, b[11], temp ); | |||
temp = mul_add (a4, b[10], temp ); | |||
temp = mul_add (a5, b[9], temp ); | |||
temp = mul_add (a6, b[8], temp ); | |||
temp = mul_add (a7, b7, temp ); | |||
temp = mul_add (a[8], b6, temp ); | |||
temp = mul_add (a[9], b5, temp ); | |||
temp = mul_add (a[10], b4, temp ); | |||
temp = mul_add (a[11], b3, temp ); | |||
c_avx[14] = mul_add (a[12], b2, temp ); | |||
temp = _mm256_mullo_epi16 (a0, b[15]); | |||
temp = mul_add (a1, b[14], temp ); | |||
temp = mul_add (a[14], b1, temp ); | |||
temp = mul_add (a[15], b0, temp ); | |||
temp = mul_add (a2, b[13], temp ); | |||
temp = mul_add (a3, b[12], temp ); | |||
temp = mul_add (a4, b[11], temp ); | |||
temp = mul_add (a5, b[10], temp ); | |||
temp = mul_add (a6, b[9], temp ); | |||
temp = mul_add (a7, b[8], temp ); | |||
temp = mul_add (a[8], b7, temp ); | |||
temp = mul_add (a[9], b6, temp ); | |||
temp = mul_add (a[10], b5, temp ); | |||
temp = mul_add (a[11], b4, temp ); | |||
temp = mul_add (a[12], b3, temp ); | |||
c_avx[15] = mul_add (a[13], b2, temp ); | |||
// unrolled second triangle | |||
a0=a[14]; | |||
a1=a[15]; | |||
a2=a[13]; | |||
a3=a[12]; | |||
a4=a[11]; | |||
a5=a[10]; | |||
a6=a[9]; | |||
a7=a[8]; | |||
b0=b[14]; | |||
b1=b[15]; | |||
b2=b[13]; | |||
b3=b[12]; | |||
b4=b[11]; | |||
b5=b[10]; | |||
b6=b[9]; | |||
b7=b[8]; | |||
temp = _mm256_mullo_epi16 (a[1], b1); | |||
temp = mul_add (a[2], b0, temp ); | |||
temp = mul_add (a[3], b2, temp ); | |||
temp = mul_add (a[4], b3, temp ); | |||
temp = mul_add (a[5], b4, temp ); | |||
temp = mul_add (a[6], b5, temp ); | |||
temp = mul_add (a[7], b6, temp ); | |||
temp = mul_add (a7, b7, temp ); | |||
temp = mul_add (a6, b[7], temp ); | |||
temp = mul_add (a5, b[6], temp ); | |||
temp = mul_add (a4, b[5], temp ); | |||
temp = mul_add (a3, b[4], temp ); | |||
temp = mul_add (a2, b[3], temp ); | |||
temp = mul_add (a0, b[2], temp ); | |||
c_avx[16] = mul_add (a1, b[1], temp ); | |||
temp = _mm256_mullo_epi16 (a[2], b1); | |||
temp = mul_add (a[3], b0, temp ); | |||
temp = mul_add (a[4], b2, temp ); | |||
temp = mul_add (a[5], b3, temp ); | |||
temp = mul_add (a[6], b4, temp ); | |||
temp = mul_add (a[7], b5, temp ); | |||
temp = mul_add (a7, b6, temp ); | |||
temp = mul_add (a6, b7, temp ); | |||
temp = mul_add (a5, b[7], temp ); | |||
temp = mul_add (a4, b[6], temp ); | |||
temp = mul_add (a3, b[5], temp ); | |||
temp = mul_add (a2, b[4], temp ); | |||
temp = mul_add (a0, b[3], temp ); | |||
c_avx[17] = mul_add (a1, b[2], temp ); | |||
temp = _mm256_mullo_epi16 (a[3], b1); | |||
temp = mul_add (a[4], b0, temp ); | |||
temp = mul_add (a[5], b2, temp ); | |||
temp = mul_add (a[6], b3, temp ); | |||
temp = mul_add (a[7], b4, temp ); | |||
temp = mul_add (a7, b5, temp ); | |||
temp = mul_add (a6, b6, temp ); | |||
temp = mul_add (a5, b7, temp ); | |||
temp = mul_add (a4, b[7], temp ); | |||
temp = mul_add (a3, b[6], temp ); | |||
temp = mul_add (a2, b[5], temp ); | |||
temp = mul_add (a0, b[4], temp ); | |||
c_avx[18] = mul_add (a1, b[3], temp ); | |||
temp = _mm256_mullo_epi16 (a[4], b1); | |||
temp = mul_add (a[5], b0, temp ); | |||
temp = mul_add (a[6], b2, temp ); | |||
temp = mul_add (a[7], b3, temp ); | |||
temp = mul_add (a7, b4, temp ); | |||
temp = mul_add (a6, b5, temp ); | |||
temp = mul_add (a5, b6, temp ); | |||
temp = mul_add (a4, b7, temp ); | |||
temp = mul_add (a3, b[7], temp ); | |||
temp = mul_add (a2, b[6], temp ); | |||
temp = mul_add (a0, b[5], temp ); | |||
c_avx[19] = mul_add (a1, b[4], temp ); | |||
temp = _mm256_mullo_epi16 (a[5], b1); | |||
temp = mul_add (a[6], b0, temp ); | |||
temp = mul_add (a[7], b2, temp ); | |||
temp = mul_add (a7, b3, temp ); | |||
temp = mul_add (a6, b4, temp ); | |||
temp = mul_add (a5, b5, temp ); | |||
temp = mul_add (a4, b6, temp ); | |||
temp = mul_add (a3, b7, temp ); | |||
temp = mul_add (a2, b[7], temp ); | |||
temp = mul_add (a0, b[6], temp ); | |||
c_avx[20] = mul_add (a1, b[5], temp ); | |||
temp = _mm256_mullo_epi16 (a[6], b1); | |||
temp = mul_add (a[7], b0, temp ); | |||
temp = mul_add (a7, b2, temp ); | |||
temp = mul_add (a6, b3, temp ); | |||
temp = mul_add (a5, b4, temp ); | |||
temp = mul_add (a4, b5, temp ); | |||
temp = mul_add (a3, b6, temp ); | |||
temp = mul_add (a2, b7, temp ); | |||
temp = mul_add (a0, b[7], temp ); | |||
c_avx[21] = mul_add (a1, b[6], temp ); | |||
temp = _mm256_mullo_epi16 (a[7], b1); | |||
temp = mul_add (a7, b0, temp ); | |||
temp = mul_add (a6, b2, temp ); | |||
temp = mul_add (a5, b3, temp ); | |||
temp = mul_add (a4, b4, temp ); | |||
temp = mul_add (a3, b5, temp ); | |||
temp = mul_add (a2, b6, temp ); | |||
temp = mul_add (a0, b7, temp ); | |||
c_avx[22] = mul_add (a1, b[7], temp ); | |||
temp = _mm256_mullo_epi16 (a7, b1); | |||
temp = mul_add (a6, b0, temp ); | |||
temp = mul_add (a5, b2, temp ); | |||
temp = mul_add (a4, b3, temp ); | |||
temp = mul_add (a3, b4, temp ); | |||
temp = mul_add (a2, b5, temp ); | |||
temp = mul_add (a0, b6, temp ); | |||
c_avx[23] = mul_add (a1, b7, temp ); | |||
temp = _mm256_mullo_epi16 (a6, b1); | |||
temp = mul_add (a5, b0, temp ); | |||
temp = mul_add (a4, b2, temp ); | |||
temp = mul_add (a3, b3, temp ); | |||
temp = mul_add (a2, b4, temp ); | |||
temp = mul_add (a0, b5, temp ); | |||
c_avx[24] = mul_add (a1, b6, temp ); | |||
temp = _mm256_mullo_epi16 (a5, b1); | |||
temp = mul_add (a4, b0, temp ); | |||
temp = mul_add (a3, b2, temp ); | |||
temp = mul_add (a2, b3, temp ); | |||
temp = mul_add (a0, b4, temp ); | |||
c_avx[25] = mul_add (a1, b5, temp ); | |||
temp = _mm256_mullo_epi16 (a4, b1); | |||
temp = mul_add (a3, b0, temp ); | |||
temp = mul_add (a2, b2, temp ); | |||
temp = mul_add (a0, b3, temp ); | |||
c_avx[26] = mul_add (a1, b4, temp ); | |||
temp = _mm256_mullo_epi16 (a3, b1); | |||
temp = mul_add (a2, b0, temp ); | |||
temp = mul_add (a0, b2, temp ); | |||
c_avx[27] = mul_add (a1, b3, temp ); | |||
temp = _mm256_mullo_epi16 (a2, b1); | |||
temp = mul_add (a0, b0, temp ); | |||
c_avx[28] = mul_add (a1, b2, temp ); | |||
temp = _mm256_mullo_epi16 (a0, b1); | |||
c_avx[29] = mul_add (a1, b0, temp); | |||
c_avx[30] = _mm256_mullo_epi16 (a1, b1); | |||
c_avx[2*SCM_SIZE-1] = _mm256_set_epi64x(0, 0, 0, 0); | |||
} |
@@ -0,0 +1,35 @@ | |||
#include "verify.h" | |||
/*------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at https://github.com/pq-crystals/kyber) of | |||
"CRYSTALS – Kyber: a CCA-secure module-lattice-based KEM" | |||
by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
----------------------------------------------------*/ | |||
/* returns 0 for equal strings, 1 for non-equal strings */ | |||
uint8_t PQCLEAN_FIRESABER_AVX2_verify(const uint8_t *a, const uint8_t *b, size_t len) { | |||
uint64_t r; | |||
size_t i; | |||
r = 0; | |||
for (i = 0; i < len; i++) { | |||
r |= a[i] ^ b[i]; | |||
} | |||
r = (~r + 1); // Two's complement | |||
r >>= 63; | |||
return (uint8_t) r; | |||
} | |||
/* b = 1 means mov, b = 0 means don't mov*/ | |||
void PQCLEAN_FIRESABER_AVX2_cmov(uint8_t *r, const uint8_t *x, size_t len, uint8_t b) { | |||
size_t i; | |||
b = -b; | |||
for (i = 0; i < len; i++) { | |||
r[i] ^= b & (x[i] ^ r[i]); | |||
} | |||
} |
@@ -0,0 +1,22 @@ | |||
#ifndef VERIFY_H | |||
#define VERIFY_H | |||
/*------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at https://github.com/pq-crystals/kyber) of | |||
"CRYSTALS – Kyber: a CCA-secure module-lattice-based KEM" | |||
by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
----------------------------------------------------*/ | |||
#include <stddef.h> | |||
#include <stdint.h> | |||
/* returns 0 for equal strings, 1 for non-equal strings */ | |||
uint8_t PQCLEAN_FIRESABER_AVX2_verify(const uint8_t *a, const uint8_t *b, size_t len); | |||
/* b = 1 means mov, b = 0 means don't mov*/ | |||
void PQCLEAN_FIRESABER_AVX2_cmov(uint8_t *r, const uint8_t *x, size_t len, uint8_t b); | |||
#endif |
@@ -1,8 +1 @@ | |||
SABER_v1.1 | |||
Public domain | |||
Authors: Jan-Pieter D'Anvers, Angshuman Karmakar, Sujoy Sinha Roy, | |||
Frederik Vercauteren | |||
Public Domain |
@@ -1,10 +1,10 @@ | |||
# This Makefile can be used with GNU Make or BSD Make | |||
LIB=libfiresaber_clean.a | |||
HEADERS=api.h cbd.h poly.h poly_mul.h SABER_indcpa.h SABER_params.h verify.h pack_unpack.h | |||
HEADERS=api.h cbd.h pack_unpack.h poly.h poly_mul.h SABER_indcpa.h SABER_params.h verify.h | |||
OBJECTS=cbd.o kem.o pack_unpack.o poly.o poly_mul.o SABER_indcpa.o verify.o | |||
CFLAGS=-O3 -Wall -Wextra -Wpedantic -Werror -Wmissing-prototypes -Wredundant-decls -std=c99 -I../../../common $(EXTRAFLAGS) | |||
CFLAGS=-O3 -Wall -Wextra -Wpedantic -Wvla -Werror -Wredundant-decls -Wmissing-prototypes -std=c99 -I../../../common $(EXTRAFLAGS) | |||
all: $(LIB) | |||
@@ -3,296 +3,90 @@ | |||
#include "fips202.h" | |||
#include "pack_unpack.h" | |||
#include "poly.h" | |||
#include "poly_mul.h" | |||
#include "randombytes.h" | |||
#include <stdint.h> | |||
#include <string.h> | |||
#define h1 (1 << (SABER_EQ - SABER_EP - 1)) | |||
#define h2 ((1 << (SABER_EP - 2)) - (1 << (SABER_EP - SABER_ET - 1)) + (1 << (SABER_EQ - SABER_EP - 1))) | |||
void PQCLEAN_FIRESABER_CLEAN_indcpa_kem_keypair(uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES], uint8_t sk[SABER_INDCPA_SECRETKEYBYTES]) { | |||
uint16_t A[SABER_L][SABER_L][SABER_N]; | |||
uint16_t s[SABER_L][SABER_N]; | |||
uint16_t b[SABER_L][SABER_N] = {0}; | |||
/*----------------------------------------------------------------------------------- | |||
This routine generates a=[Matrix K x K] of 256-coefficient polynomials | |||
uint8_t seed_A[SABER_SEEDBYTES]; | |||
uint8_t seed_s[SABER_NOISE_SEEDBYTES]; | |||
int i, j; | |||
#define h1 4 //2^(EQ-EP-1) | |||
randombytes(seed_A, SABER_SEEDBYTES); | |||
shake128(seed_A, SABER_SEEDBYTES, seed_A, SABER_SEEDBYTES); // for not revealing system RNG state | |||
randombytes(seed_s, SABER_NOISE_SEEDBYTES); | |||
#define h2 ( (1<<(SABER_EP-2)) - (1<<(SABER_EP-SABER_ET-1)) + (1<<(SABER_EQ-SABER_EP-1)) ) | |||
PQCLEAN_FIRESABER_CLEAN_GenMatrix(A, seed_A); | |||
PQCLEAN_FIRESABER_CLEAN_GenSecret(s, seed_s); | |||
PQCLEAN_FIRESABER_CLEAN_MatrixVectorMul(b, (const uint16_t (*)[SABER_L][SABER_N])A, (const uint16_t (*)[SABER_N])s, 1); | |||
static void InnerProd(uint16_t pkcl[SABER_K][SABER_N], uint16_t skpv[SABER_K][SABER_N], uint16_t mod, uint16_t res[SABER_N]); | |||
static void MatrixVectorMul(polyvec *a, uint16_t skpv[SABER_K][SABER_N], uint16_t res[SABER_K][SABER_N], uint16_t mod, int16_t transpose); | |||
static void POL2MSG(const uint16_t *message_dec_unpacked, unsigned char *message_dec); | |||
static void GenMatrix(polyvec *a, const unsigned char *seed) { | |||
unsigned char buf[SABER_K * SABER_K * (13 * SABER_N / 8)]; | |||
uint16_t temp_ar[SABER_N]; | |||
int i, j, k; | |||
uint16_t mod = (SABER_Q - 1); | |||
shake128(buf, sizeof(buf), seed, SABER_SEEDBYTES); | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_K; j++) { | |||
PQCLEAN_FIRESABER_CLEAN_BS2POL(buf + (i * SABER_K + j) * (13 * SABER_N / 8), temp_ar); | |||
for (k = 0; k < SABER_N; k++) { | |||
a[i].vec[j].coeffs[k] = (temp_ar[k])& mod ; | |||
} | |||
} | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_CLEAN_indcpa_kem_keypair(unsigned char *pk, unsigned char *sk) { | |||
polyvec a[SABER_K]; | |||
uint16_t skpv[SABER_K][SABER_N]; | |||
unsigned char seed[SABER_SEEDBYTES]; | |||
unsigned char noiseseed[SABER_COINBYTES]; | |||
int32_t i, j; | |||
uint16_t mod_q = SABER_Q - 1; | |||
uint16_t res[SABER_K][SABER_N]; | |||
randombytes(seed, SABER_SEEDBYTES); | |||
// for not revealing system RNG state | |||
shake128(seed, SABER_SEEDBYTES, seed, SABER_SEEDBYTES); | |||
randombytes(noiseseed, SABER_COINBYTES); | |||
GenMatrix(a, seed); //sample matrix A | |||
// generate secret from constant-time binomial distribution | |||
PQCLEAN_FIRESABER_CLEAN_GenSecret(skpv, noiseseed); | |||
// do the matrix vector multiplication and rounding | |||
for (i = 0; i < SABER_K; i++) { | |||
for (i = 0; i < SABER_L; i++) { | |||
for (j = 0; j < SABER_N; j++) { | |||
res[i][j] = 0; | |||
b[i][j] = (b[i][j] + h1) >> (SABER_EQ - SABER_EP); | |||
} | |||
} | |||
MatrixVectorMul(a, skpv, res, SABER_Q - 1, 1); | |||
// now rounding | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_N; j++) { | |||
// shift right 3 bits | |||
res[i][j] = (res[i][j] + h1) & (mod_q); | |||
res[i][j] = (res[i][j] >> (SABER_EQ - SABER_EP)); | |||
} | |||
} | |||
// unload and pack sk=3 x (256 coefficients of 14 bits) | |||
PQCLEAN_FIRESABER_CLEAN_POLVEC2BS(sk, skpv, SABER_Q); | |||
// unload and pack pk=256 bits seed and 3 x (256 coefficients of 11 bits) | |||
// load the public-key coefficients | |||
PQCLEAN_FIRESABER_CLEAN_POLVEC2BS(pk, res, SABER_P); | |||
// now load the seedbytes in PK. Easy since seed bytes are kept in byte format. | |||
for (i = 0; i < SABER_SEEDBYTES; i++) { | |||
pk[SABER_POLYVECCOMPRESSEDBYTES + i] = seed[i]; | |||
} | |||
PQCLEAN_FIRESABER_CLEAN_POLVECq2BS(sk, (const uint16_t (*)[SABER_N])s); | |||
PQCLEAN_FIRESABER_CLEAN_POLVECp2BS(pk, (const uint16_t (*)[SABER_N])b); | |||
memcpy(pk + SABER_POLYVECCOMPRESSEDBYTES, seed_A, sizeof(seed_A)); | |||
} | |||
void PQCLEAN_FIRESABER_CLEAN_indcpa_kem_enc(const unsigned char *message_received, unsigned char *noiseseed, const unsigned char *pk, unsigned char *ciphertext) { | |||
uint32_t i, j, k; | |||
polyvec a[SABER_K]; | |||
unsigned char seed[SABER_SEEDBYTES]; | |||
// public key of received by the client | |||
uint16_t pkcl[SABER_K][SABER_N]; | |||
uint16_t skpv1[SABER_K][SABER_N]; | |||
uint16_t message[SABER_KEYBYTES * 8]; | |||
uint16_t res[SABER_K][SABER_N]; | |||
uint16_t mod_p = SABER_P - 1; | |||
uint16_t mod_q = SABER_Q - 1; | |||
uint16_t vprime[SABER_N]; | |||
unsigned char msk_c[SABER_SCALEBYTES_KEM]; | |||
// extract the seedbytes from Public Key. | |||
for (i = 0; i < SABER_SEEDBYTES; i++) { | |||
seed[i] = pk[ SABER_POLYVECCOMPRESSEDBYTES + i]; | |||
} | |||
GenMatrix(a, seed); | |||
// generate secret from constant-time binomial distribution | |||
PQCLEAN_FIRESABER_CLEAN_GenSecret(skpv1, noiseseed); | |||
// matrix-vector multiplication and rounding | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_N; j++) { | |||
res[i][j] = 0; | |||
} | |||
} | |||
MatrixVectorMul(a, skpv1, res, SABER_Q - 1, 0); | |||
// now rounding | |||
//shift right 3 bits | |||
for (i = 0; i < SABER_K; i++) { | |||
void PQCLEAN_FIRESABER_CLEAN_indcpa_kem_enc(uint8_t ciphertext[SABER_BYTES_CCA_DEC], const uint8_t m[SABER_KEYBYTES], const uint8_t seed_sp[SABER_NOISE_SEEDBYTES], const uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES]) { | |||
uint16_t A[SABER_L][SABER_L][SABER_N]; | |||
uint16_t sp[SABER_L][SABER_N]; | |||
uint16_t bp[SABER_L][SABER_N] = {0}; | |||
uint16_t vp[SABER_N] = {0}; | |||
uint16_t mp[SABER_N]; | |||
uint16_t b[SABER_L][SABER_N]; | |||
int i, j; | |||
const uint8_t *seed_A = pk + SABER_POLYVECCOMPRESSEDBYTES; | |||
PQCLEAN_FIRESABER_CLEAN_GenMatrix(A, seed_A); | |||
PQCLEAN_FIRESABER_CLEAN_GenSecret(sp, seed_sp); | |||
PQCLEAN_FIRESABER_CLEAN_MatrixVectorMul(bp, (const uint16_t (*)[SABER_L][SABER_N])A, (const uint16_t (*)[SABER_N])sp, 0); | |||
for (i = 0; i < SABER_L; i++) { | |||
for (j = 0; j < SABER_N; j++) { | |||
res[i][j] = ( res[i][j] + h1 ) & mod_q; | |||
res[i][j] = (res[i][j] >> (SABER_EQ - SABER_EP) ); | |||
bp[i][j] = (bp[i][j] + h1) >> (SABER_EQ - SABER_EP); | |||
} | |||
} | |||
PQCLEAN_FIRESABER_CLEAN_POLVEC2BS(ciphertext, res, SABER_P); | |||
PQCLEAN_FIRESABER_CLEAN_POLVECp2BS(ciphertext, (const uint16_t (*)[SABER_N])bp); | |||
PQCLEAN_FIRESABER_CLEAN_BS2POLVECp(b, pk); | |||
PQCLEAN_FIRESABER_CLEAN_InnerProd(vp, (const uint16_t (*)[SABER_N])b, (const uint16_t (*)[SABER_N])sp); | |||
// ************client matrix-vector multiplication ends************ | |||
PQCLEAN_FIRESABER_CLEAN_BS2POLmsg(mp, m); | |||
// now calculate the v' | |||
// unpack the public_key | |||
// pkcl is the b in the protocol | |||
PQCLEAN_FIRESABER_CLEAN_BS2POLVEC(pk, pkcl, SABER_P); | |||
for (i = 0; i < SABER_N; i++) { | |||
vprime[i] = 0; | |||
} | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_N; j++) { | |||
skpv1[i][j] = skpv1[i][j] & (mod_p); | |||
} | |||
for (j = 0; j < SABER_N; j++) { | |||
vp[j] = (vp[j] - (mp[j] << (SABER_EP - 1)) + h1) >> (SABER_EP - SABER_ET); | |||
} | |||
// vector-vector scalar multiplication with mod p | |||
InnerProd(pkcl, skpv1, mod_p, vprime); | |||
// addition of h1 to vprime | |||
for (i = 0; i < SABER_N; i++) { | |||
vprime[i] = vprime[i] + h1; | |||
} | |||
// unpack message_received; | |||
for (j = 0; j < SABER_KEYBYTES; j++) { | |||
for (i = 0; i < 8; i++) { | |||
message[8 * j + i] = ((message_received[j] >> i) & 0x01); | |||
} | |||
} | |||
// message encoding | |||
for (i = 0; i < SABER_N; i++) { | |||
message[i] = (message[i] << (SABER_EP - 1)); | |||
} | |||
for (k = 0; k < SABER_N; k++) { | |||
vprime[k] = ( (vprime[k] - message[k]) & (mod_p) ) >> (SABER_EP - SABER_ET); | |||
} | |||
PQCLEAN_FIRESABER_CLEAN_pack_6bit(msk_c, vprime); | |||
for (j = 0; j < SABER_SCALEBYTES_KEM; j++) { | |||
ciphertext[SABER_POLYVECCOMPRESSEDBYTES + j] = msk_c[j]; | |||
} | |||
PQCLEAN_FIRESABER_CLEAN_POLT2BS(ciphertext + SABER_POLYVECCOMPRESSEDBYTES, vp); | |||
} | |||
void PQCLEAN_FIRESABER_CLEAN_indcpa_kem_dec(uint8_t m[SABER_KEYBYTES], const uint8_t sk[SABER_INDCPA_SECRETKEYBYTES], const uint8_t ciphertext[SABER_BYTES_CCA_DEC]) { | |||
void PQCLEAN_FIRESABER_CLEAN_indcpa_kem_dec(const unsigned char *sk, const unsigned char *ciphertext, unsigned char message_dec[]) { | |||
uint32_t i, j; | |||
// secret key of the server | |||
uint16_t sksv[SABER_K][SABER_N]; | |||
uint16_t pksv[SABER_K][SABER_N]; | |||
uint8_t scale_ar[SABER_SCALEBYTES_KEM]; | |||
uint16_t mod_p = SABER_P - 1; | |||
uint16_t v[SABER_N]; | |||
uint16_t op[SABER_N]; | |||
// sksv is the secret-key | |||
PQCLEAN_FIRESABER_CLEAN_BS2POLVEC(sk, sksv, SABER_Q); | |||
// pksv is the ciphertext | |||
PQCLEAN_FIRESABER_CLEAN_BS2POLVEC(ciphertext, pksv, SABER_P); | |||
// vector-vector scalar multiplication with mod p | |||
for (i = 0; i < SABER_N; i++) { | |||
v[i] = 0; | |||
} | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_N; j++) { | |||
sksv[i][j] = sksv[i][j] & (mod_p); | |||
} | |||
} | |||
InnerProd(pksv, sksv, mod_p, v); | |||
//Extraction | |||
for (i = 0; i < SABER_SCALEBYTES_KEM; i++) { | |||
scale_ar[i] = ciphertext[SABER_POLYVECCOMPRESSEDBYTES + i]; | |||
} | |||
uint16_t s[SABER_L][SABER_N]; | |||
uint16_t b[SABER_L][SABER_N]; | |||
uint16_t v[SABER_N] = {0}; | |||
uint16_t cm[SABER_N]; | |||
int i; | |||
PQCLEAN_FIRESABER_CLEAN_un_pack6bit(scale_ar, op); | |||
PQCLEAN_FIRESABER_CLEAN_BS2POLVECq(s, sk); | |||
PQCLEAN_FIRESABER_CLEAN_BS2POLVECp(b, ciphertext); | |||
PQCLEAN_FIRESABER_CLEAN_InnerProd(v, (const uint16_t (*)[SABER_N])b, (const uint16_t (*)[SABER_N])s); | |||
PQCLEAN_FIRESABER_CLEAN_BS2POLT(cm, ciphertext + SABER_POLYVECCOMPRESSEDBYTES); | |||
//addition of h1 | |||
for (i = 0; i < SABER_N; i++) { | |||
v[i] = ( ( v[i] + h2 - (op[i] << (SABER_EP - SABER_ET)) ) & (mod_p) ) >> (SABER_EP - 1); | |||
v[i] = (v[i] + h2 - (cm[i] << (SABER_EP - SABER_ET))) >> (SABER_EP - 1); | |||
} | |||
// pack decrypted message | |||
POL2MSG(v, message_dec); | |||
} | |||
static void MatrixVectorMul(polyvec *a, uint16_t skpv[SABER_K][SABER_N], uint16_t res[SABER_K][SABER_N], uint16_t mod, int16_t transpose) { | |||
uint16_t acc[SABER_N]; | |||
int32_t i, j, k; | |||
if (transpose == 1) { | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_K; j++) { | |||
PQCLEAN_FIRESABER_CLEAN_pol_mul((uint16_t *)&a[j].vec[i], skpv[j], acc, SABER_Q, SABER_N); | |||
for (k = 0; k < SABER_N; k++) { | |||
res[i][k] = res[i][k] + acc[k]; | |||
//reduction mod p | |||
res[i][k] = (res[i][k] & mod); | |||
//clear the accumulator | |||
acc[k] = 0; | |||
} | |||
} | |||
} | |||
} else { | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_K; j++) { | |||
PQCLEAN_FIRESABER_CLEAN_pol_mul((uint16_t *)&a[i].vec[j], skpv[j], acc, SABER_Q, SABER_N); | |||
for (k = 0; k < SABER_N; k++) { | |||
res[i][k] = res[i][k] + acc[k]; | |||
// reduction | |||
res[i][k] = res[i][k] & mod; | |||
// clear the accumulator | |||
acc[k] = 0; | |||
} | |||
} | |||
} | |||
} | |||
} | |||
static void POL2MSG(const uint16_t *message_dec_unpacked, unsigned char *message_dec) { | |||
int32_t i, j; | |||
for (j = 0; j < SABER_KEYBYTES; j++) { | |||
message_dec[j] = 0; | |||
for (i = 0; i < 8; i++) { | |||
message_dec[j] = message_dec[j] | (uint8_t) (message_dec_unpacked[j * 8 + i] << i); | |||
} | |||
} | |||
} | |||
static void InnerProd(uint16_t pkcl[SABER_K][SABER_N], uint16_t skpv[SABER_K][SABER_N], uint16_t mod, uint16_t res[SABER_N]) { | |||
uint32_t j, k; | |||
uint16_t acc[SABER_N]; | |||
// vector-vector scalar multiplication with mod p | |||
for (j = 0; j < SABER_K; j++) { | |||
PQCLEAN_FIRESABER_CLEAN_pol_mul(pkcl[j], skpv[j], acc, SABER_P, SABER_N); | |||
for (k = 0; k < SABER_N; k++) { | |||
res[k] = res[k] + acc[k]; | |||
// reduction | |||
res[k] = res[k] & mod; | |||
// clear the accumulator | |||
acc[k] = 0; | |||
} | |||
} | |||
PQCLEAN_FIRESABER_CLEAN_POLmsg2BS(m, v); | |||
} |
@@ -1,9 +1,13 @@ | |||
#ifndef INDCPA_H | |||
#define INDCPA_H | |||
#include "SABER_params.h" | |||
#include <stdint.h> | |||
void PQCLEAN_FIRESABER_CLEAN_indcpa_kem_keypair(unsigned char *pk, unsigned char *sk); | |||
void PQCLEAN_FIRESABER_CLEAN_indcpa_kem_enc(const unsigned char *message, unsigned char *noiseseed, const unsigned char *pk, unsigned char *ciphertext); | |||
void PQCLEAN_FIRESABER_CLEAN_indcpa_kem_dec(const unsigned char *sk, const unsigned char *ciphertext, unsigned char *message_dec); | |||
void PQCLEAN_FIRESABER_CLEAN_indcpa_kem_keypair(uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES], uint8_t sk[SABER_INDCPA_SECRETKEYBYTES]); | |||
#endif | |||
void PQCLEAN_FIRESABER_CLEAN_indcpa_kem_enc(uint8_t ciphertext[SABER_BYTES_CCA_DEC], const uint8_t m[SABER_KEYBYTES], const uint8_t seed_sp[SABER_NOISE_SEEDBYTES], const uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES]); | |||
void PQCLEAN_FIRESABER_CLEAN_indcpa_kem_dec(uint8_t m[SABER_KEYBYTES], const uint8_t sk[SABER_INDCPA_SECRETKEYBYTES], const uint8_t ciphertext[SABER_BYTES_CCA_DEC]); | |||
#endif |
@@ -1,49 +1,39 @@ | |||
#ifndef PARAMS_H | |||
#define PARAMS_H | |||
#include "api.h" | |||
#define SABER_K 4 | |||
/* Change this for different security strengths */ | |||
/* Don't change anything below this line */ | |||
#define SABER_L 4 | |||
#define SABER_MU 6 | |||
#define SABER_ET 6 | |||
#define SABER_EQ 13 | |||
#define SABER_EP 10 | |||
#define SABER_N 256 | |||
#define SABER_Q 8192 | |||
#define SABER_P 1024 | |||
#define SABER_SEEDBYTES 32 | |||
#define SABER_NOISESEEDBYTES 32 | |||
#define SABER_COINBYTES 32 | |||
#define SABER_KEYBYTES 32 | |||
#define SABER_HASHBYTES 32 | |||
#define SABER_POLYBYTES 416 //13*256/8 | |||
#define SABER_POLYVECBYTES (SABER_K * SABER_POLYBYTES) | |||
#define SABER_SEEDBYTES 32 | |||
#define SABER_NOISE_SEEDBYTES 32 | |||
#define SABER_KEYBYTES 32 | |||
#define SABER_HASHBYTES 32 | |||
#define SABER_POLYVECCOMPRESSEDBYTES (SABER_K * 320) //10*256/8 NOTE : changed till here due to parameter adaptation | |||
#define SABER_POLYCOINBYTES (SABER_MU * SABER_N / 8) | |||
#define SABER_CIPHERTEXTBYTES (SABER_POLYVECCOMPRESSEDBYTES) | |||
#define SABER_POLYBYTES (SABER_EQ * SABER_N / 8) | |||
#define SABER_POLYVECBYTES (SABER_L * SABER_POLYBYTES) | |||
#define SABER_SCALEBYTES (SABER_DELTA*SABER_N/8) | |||
#define SABER_POLYCOMPRESSEDBYTES (SABER_EP * SABER_N / 8) | |||
#define SABER_POLYVECCOMPRESSEDBYTES (SABER_L * SABER_POLYCOMPRESSEDBYTES) | |||
#define SABER_SCALEBYTES_KEM ((SABER_ET)*SABER_N/8) | |||
#define SABER_SCALEBYTES_KEM (SABER_ET * SABER_N / 8) | |||
#define SABER_INDCPA_PUBLICKEYBYTES (SABER_POLYVECCOMPRESSEDBYTES + SABER_SEEDBYTES) | |||
#define SABER_INDCPA_SECRETKEYBYTES (SABER_POLYVECBYTES) | |||
#define SABER_PUBLICKEYBYTES (SABER_INDCPA_PUBLICKEYBYTES) | |||
#define SABER_SECRETKEYBYTES (SABER_INDCPA_SECRETKEYBYTES + SABER_INDCPA_PUBLICKEYBYTES + SABER_HASHBYTES + SABER_KEYBYTES) | |||
#define SABER_SECRETKEYBYTES (SABER_INDCPA_SECRETKEYBYTES + SABER_INDCPA_PUBLICKEYBYTES + SABER_HASHBYTES + SABER_KEYBYTES) | |||
#define SABER_BYTES_CCA_DEC (SABER_POLYVECCOMPRESSEDBYTES + SABER_SCALEBYTES_KEM) /* Second part is for Targhi-Unruh */ | |||
#define SABER_BYTES_CCA_DEC (SABER_POLYVECCOMPRESSEDBYTES + SABER_SCALEBYTES_KEM) | |||
#endif | |||
@@ -1,14 +1,18 @@ | |||
#ifndef PQCLEAN_FIRESABER_CLEAN_API_H | |||
#define PQCLEAN_FIRESABER_CLEAN_API_H | |||
#define PQCLEAN_FIRESABER_CLEAN_CRYPTO_ALGNAME "FireSaber" | |||
#define PQCLEAN_FIRESABER_CLEAN_CRYPTO_SECRETKEYBYTES 3040 | |||
#define PQCLEAN_FIRESABER_CLEAN_CRYPTO_PUBLICKEYBYTES (4*320+32) | |||
#define PQCLEAN_FIRESABER_CLEAN_CRYPTO_BYTES 32 | |||
#define PQCLEAN_FIRESABER_CLEAN_CRYPTO_CIPHERTEXTBYTES 1472 | |||
#define PQCLEAN_FIRESABER_CLEAN_CRYPTO_PUBLICKEYBYTES 1312 | |||
#define PQCLEAN_FIRESABER_CLEAN_CRYPTO_SECRETKEYBYTES 3040 | |||
int PQCLEAN_FIRESABER_CLEAN_crypto_kem_keypair(unsigned char *pk, unsigned char *sk); | |||
int PQCLEAN_FIRESABER_CLEAN_crypto_kem_enc(unsigned char *ct, unsigned char *ss, const unsigned char *pk); | |||
int PQCLEAN_FIRESABER_CLEAN_crypto_kem_dec(unsigned char *ss, const unsigned char *ct, const unsigned char *sk); | |||
int PQCLEAN_FIRESABER_CLEAN_crypto_kem_enc(unsigned char *ct, unsigned char *k, const unsigned char *pk); | |||
int PQCLEAN_FIRESABER_CLEAN_crypto_kem_dec(unsigned char *k, const unsigned char *ct, const unsigned char *sk); | |||
#endif /* api_h */ |
@@ -1,3 +1,7 @@ | |||
#include "SABER_params.h" | |||
#include "api.h" | |||
#include "cbd.h" | |||
#include <stdint.h> | |||
/*--------------------------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at, Public Domain https://github.com/pq-crystals/kyber) | |||
@@ -6,12 +10,8 @@ by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
----------------------------------------------------------------------*/ | |||
#include "SABER_params.h" | |||
#include "api.h" | |||
#include "cbd.h" | |||
#include <stdint.h> | |||
static uint64_t load_littleendian(const unsigned char *x, int bytes) { | |||
static uint64_t load_littleendian(const uint8_t *x, int bytes) { | |||
int i; | |||
uint64_t r = x[0]; | |||
for (i = 1; i < bytes; i++) { | |||
@@ -20,33 +20,29 @@ static uint64_t load_littleendian(const unsigned char *x, int bytes) { | |||
return r; | |||
} | |||
void PQCLEAN_FIRESABER_CLEAN_cbd(uint16_t *r, const unsigned char *buf) { | |||
uint16_t Qmod_minus1 = SABER_Q - 1; | |||
void PQCLEAN_FIRESABER_CLEAN_cbd(uint16_t s[SABER_N], const uint8_t buf[SABER_POLYCOINBYTES]) { | |||
uint32_t t, d, a[4], b[4]; | |||
int i, j; | |||
for (i = 0; i < SABER_N / 4; i++) { | |||
t = (uint32_t) load_littleendian(buf + 3 * i, 3); | |||
t = load_littleendian(buf + 3 * i, 3); | |||
d = 0; | |||
for (j = 0; j < 3; j++) { | |||
d += (t >> j) & 0x249249; | |||
} | |||
a[0] = d & 0x7; | |||
b[0] = (d >> 3) & 0x7; | |||
a[1] = (d >> 6) & 0x7; | |||
b[1] = (d >> 9) & 0x7; | |||
a[0] = d & 0x7; | |||
b[0] = (d >> 3) & 0x7; | |||
a[1] = (d >> 6) & 0x7; | |||
b[1] = (d >> 9) & 0x7; | |||
a[2] = (d >> 12) & 0x7; | |||
b[2] = (d >> 15) & 0x7; | |||
a[3] = (d >> 18) & 0x7; | |||
b[3] = (d >> 21); | |||
r[4 * i + 0] = (uint16_t)(a[0] - b[0]) & Qmod_minus1; | |||
r[4 * i + 1] = (uint16_t)(a[1] - b[1]) & Qmod_minus1; | |||
r[4 * i + 2] = (uint16_t)(a[2] - b[2]) & Qmod_minus1; | |||
r[4 * i + 3] = (uint16_t)(a[3] - b[3]) & Qmod_minus1; | |||
s[4 * i + 0] = (uint16_t)(a[0] - b[0]); | |||
s[4 * i + 1] = (uint16_t)(a[1] - b[1]); | |||
s[4 * i + 2] = (uint16_t)(a[2] - b[2]); | |||
s[4 * i + 3] = (uint16_t)(a[3] - b[3]); | |||
} | |||
} |
@@ -1,6 +1,5 @@ | |||
#ifndef CBD_H | |||
#define CBD_H | |||
/*--------------------------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at, Public Domain https://github.com/pq-crystals/kyber) | |||
@@ -8,10 +7,10 @@ of "CRYSTALS – Kyber: a CCA-secure module-lattice-based KEM" | |||
by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
----------------------------------------------------------------------*/ | |||
#include "poly.h" | |||
#include "SABER_params.h" | |||
#include <stdint.h> | |||
void PQCLEAN_FIRESABER_CLEAN_cbd(uint16_t *r, const unsigned char *buf); | |||
void PQCLEAN_FIRESABER_CLEAN_cbd(uint16_t s[SABER_N], const uint8_t buf[SABER_POLYCOINBYTES]); | |||
#endif |
@@ -1,5 +1,6 @@ | |||
#include "SABER_indcpa.h" | |||
#include "SABER_params.h" | |||
#include "api.h" | |||
#include "fips202.h" | |||
#include "randombytes.h" | |||
#include "verify.h" | |||
@@ -7,90 +8,71 @@ | |||
#include <stdio.h> | |||
#include <string.h> | |||
int PQCLEAN_FIRESABER_CLEAN_crypto_kem_keypair(unsigned char *pk, unsigned char *sk) { | |||
int i; | |||
// sk[0:SABER_INDCPA_SECRETKEYBYTES-1] <-- sk | |||
PQCLEAN_FIRESABER_CLEAN_indcpa_kem_keypair(pk, sk); | |||
int PQCLEAN_FIRESABER_CLEAN_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) { | |||
int i; | |||
// sk[SABER_INDCPA_SECRETKEYBYTES:SABER_INDCPA_SECRETKEYBYTES+SABER_INDCPA_SECRETKEYBYTES-1] <-- pk | |||
PQCLEAN_FIRESABER_CLEAN_indcpa_kem_keypair(pk, sk); // sk[0:SABER_INDCPA_SECRETKEYBYTES-1] <-- sk | |||
for (i = 0; i < SABER_INDCPA_PUBLICKEYBYTES; i++) { | |||
sk[i + SABER_INDCPA_SECRETKEYBYTES] = pk[i]; | |||
sk[i + SABER_INDCPA_SECRETKEYBYTES] = pk[i]; // sk[SABER_INDCPA_SECRETKEYBYTES:SABER_INDCPA_SECRETKEYBYTES+SABER_INDCPA_SECRETKEYBYTES-1] <-- pk | |||
} | |||
// Then hash(pk) is appended. | |||
sha3_256(sk + SABER_SECRETKEYBYTES - 64, pk, SABER_INDCPA_PUBLICKEYBYTES); | |||
sha3_256(sk + SABER_SECRETKEYBYTES - 64, pk, SABER_INDCPA_PUBLICKEYBYTES); // Then hash(pk) is appended. | |||
// Remaining part of sk contains a pseudo-random number. | |||
// This is output when check in crypto_kem_dec() fails. | |||
randombytes(sk + SABER_SECRETKEYBYTES - SABER_KEYBYTES, SABER_KEYBYTES ); | |||
randombytes(sk + SABER_SECRETKEYBYTES - SABER_KEYBYTES, SABER_KEYBYTES); // Remaining part of sk contains a pseudo-random number. | |||
// This is output when check in PQCLEAN_FIRESABER_CLEAN_crypto_kem_dec() fails. | |||
return (0); | |||
} | |||
int PQCLEAN_FIRESABER_CLEAN_crypto_kem_enc(unsigned char *ct, unsigned char *ss, const unsigned char *pk) { | |||
// Will contain key, coins | |||
unsigned char kr[64]; | |||
unsigned char buf[64]; | |||
int PQCLEAN_FIRESABER_CLEAN_crypto_kem_enc(uint8_t *c, uint8_t *k, const uint8_t *pk) { | |||
randombytes(buf, 32); | |||
uint8_t kr[64]; // Will contain key, coins | |||
uint8_t buf[64]; | |||
// BUF[0:31] <-- random message (will be used as the key for client) Note: hash doesnot release system RNG output | |||
sha3_256(buf, buf, 32); | |||
randombytes(buf, 32); | |||
// BUF[32:63] <-- Hash(public key); Multitarget countermeasure for coins + contributory KEM | |||
sha3_256(buf + 32, pk, SABER_INDCPA_PUBLICKEYBYTES); | |||
sha3_256(buf, buf, 32); // BUF[0:31] <-- random message (will be used as the key for client) Note: hash doesnot release system RNG output | |||
// kr[0:63] <-- Hash(buf[0:63]); | |||
sha3_512(kr, buf, 64); | |||
sha3_256(buf + 32, pk, SABER_INDCPA_PUBLICKEYBYTES); // BUF[32:63] <-- Hash(public key); Multitarget countermeasure for coins + contributory KEM | |||
sha3_512(kr, buf, 64); // kr[0:63] <-- Hash(buf[0:63]); | |||
// K^ <-- kr[0:31] | |||
// noiseseed (r) <-- kr[32:63]; | |||
// buf[0:31] contains message; kr[32:63] contains randomness r; | |||
PQCLEAN_FIRESABER_CLEAN_indcpa_kem_enc(buf, kr + 32, pk, ct); | |||
PQCLEAN_FIRESABER_CLEAN_indcpa_kem_enc(c, buf, kr + 32, pk); // buf[0:31] contains message; kr[32:63] contains randomness r; | |||
sha3_256(kr + 32, ct, SABER_BYTES_CCA_DEC); | |||
sha3_256(kr + 32, c, SABER_BYTES_CCA_DEC); | |||
// hash concatenation of pre-k and h(c) to k | |||
sha3_256(ss, kr, 64); | |||
sha3_256(k, kr, 64); // hash concatenation of pre-k and h(c) to k | |||
return (0); | |||
} | |||
int PQCLEAN_FIRESABER_CLEAN_crypto_kem_dec(unsigned char *ss, const unsigned char *ct, const unsigned char *sk) { | |||
int PQCLEAN_FIRESABER_CLEAN_crypto_kem_dec(uint8_t *k, const uint8_t *c, const uint8_t *sk) { | |||
int i; | |||
unsigned char fail; | |||
unsigned char cmp[SABER_BYTES_CCA_DEC]; | |||
unsigned char buf[64]; | |||
// Will contain key, coins | |||
unsigned char kr[64]; | |||
const unsigned char *pk = sk + SABER_INDCPA_SECRETKEYBYTES; | |||
// buf[0:31] <-- message | |||
PQCLEAN_FIRESABER_CLEAN_indcpa_kem_dec(sk, ct, buf); | |||
uint8_t fail; | |||
uint8_t cmp[SABER_BYTES_CCA_DEC]; | |||
uint8_t buf[64]; | |||
uint8_t kr[64]; // Will contain key, coins | |||
const uint8_t *pk = sk + SABER_INDCPA_SECRETKEYBYTES; | |||
PQCLEAN_FIRESABER_CLEAN_indcpa_kem_dec(buf, sk, c); // buf[0:31] <-- message | |||
// Multitarget countermeasure for coins + contributory KEM | |||
// Save hash by storing h(pk) in sk | |||
for (i = 0; i < 32; i++) { | |||
for (i = 0; i < 32; i++) { // Save hash by storing h(pk) in sk | |||
buf[32 + i] = sk[SABER_SECRETKEYBYTES - 64 + i]; | |||
} | |||
sha3_512(kr, buf, 64); | |||
PQCLEAN_FIRESABER_CLEAN_indcpa_kem_enc(buf, kr + 32, pk, cmp); | |||
PQCLEAN_FIRESABER_CLEAN_indcpa_kem_enc(cmp, buf, kr + 32, pk); | |||
fail = PQCLEAN_FIRESABER_CLEAN_verify(ct, cmp, SABER_BYTES_CCA_DEC); | |||
fail = PQCLEAN_FIRESABER_CLEAN_verify(c, cmp, SABER_BYTES_CCA_DEC); | |||
// overwrite coins in kr with h(c) | |||
sha3_256(kr + 32, ct, SABER_BYTES_CCA_DEC); | |||
sha3_256(kr + 32, c, SABER_BYTES_CCA_DEC); // overwrite coins in kr with h(c) | |||
PQCLEAN_FIRESABER_CLEAN_cmov(kr, sk + SABER_SECRETKEYBYTES - SABER_KEYBYTES, SABER_KEYBYTES, fail); | |||
// hash concatenation of pre-k and h(c) to k | |||
sha3_256(ss, kr, 64); | |||
sha3_256(k, kr, 64); // hash concatenation of pre-k and h(c) to k | |||
return (0); | |||
} |
@@ -1,254 +1,136 @@ | |||
#include "api.h" | |||
#include "pack_unpack.h" | |||
#include <string.h> | |||
void PQCLEAN_FIRESABER_CLEAN_pack_3bit(uint8_t *bytes, const uint16_t *data) { | |||
uint32_t j; | |||
uint32_t offset_data, offset_byte; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
void PQCLEAN_FIRESABER_CLEAN_POLT2BS(uint8_t bytes[SABER_SCALEBYTES_KEM], const uint16_t data[SABER_N]) { | |||
size_t j, offset_byte, offset_data; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = 3 * j; | |||
offset_data = 8 * j; | |||
bytes[offset_byte + 0] = (data[offset_data + 0] & 0x7) | | |||
((data[offset_data + 1] & 0x7) << 3) | | |||
((data[offset_data + 2] & 0x3) << 6); | |||
bytes[offset_byte + 1] = ((data[offset_data + 2] >> 2 ) & 0x01) | | |||
((data[offset_data + 3] & 0x7) << 1) | | |||
((data[offset_data + 4] & 0x7) << 4) | | |||
(((data[offset_data + 5]) & 0x01) << 7); | |||
bytes[offset_byte + 2] = ((data[offset_data + 5] >> 1 ) & 0x03) | | |||
((data[offset_data + 6] & 0x7) << 2) | | |||
((data[offset_data + 7] & 0x7) << 5); | |||
offset_data = 4 * j; | |||
bytes[offset_byte + 0] = (data[offset_data + 0] & 0x3f) | ((data[offset_data + 1] & 0x03) << 6); | |||
bytes[offset_byte + 1] = ((data[offset_data + 1] >> 2) & 0x0f) | ((data[offset_data + 2] & 0x0f) << 4); | |||
bytes[offset_byte + 2] = ((data[offset_data + 2] >> 4) & 0x03) | ((data[offset_data + 3] & 0x3f) << 2); | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_CLEAN_un_pack3bit(const uint8_t *bytes, uint16_t *data) { | |||
uint32_t j; | |||
uint32_t offset_data, offset_byte; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
void PQCLEAN_FIRESABER_CLEAN_BS2POLT(uint16_t data[SABER_N], const uint8_t bytes[SABER_SCALEBYTES_KEM]) { | |||
size_t j, offset_byte, offset_data; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = 3 * j; | |||
offset_data = 8 * j; | |||
data[offset_data + 0] = (bytes[offset_byte + 0]) & 0x07; | |||
data[offset_data + 1] = ((bytes[offset_byte + 0]) >> 3 ) & 0x07; | |||
data[offset_data + 2] = (((bytes[offset_byte + 0]) >> 6 ) & 0x03) | | |||
(((bytes[offset_byte + 1]) & 0x01) << 2); | |||
data[offset_data + 3] = ((bytes[offset_byte + 1]) >> 1 ) & 0x07; | |||
data[offset_data + 4] = ((bytes[offset_byte + 1]) >> 4 ) & 0x07; | |||
data[offset_data + 5] = (((bytes[offset_byte + 1]) >> 7 ) & 0x01) | | |||
(((bytes[offset_byte + 2]) & 0x03) << 1); | |||
data[offset_data + 6] = ((bytes[offset_byte + 2] >> 2) & 0x07); | |||
data[offset_data + 7] = ((bytes[offset_byte + 2] >> 5) & 0x07); | |||
offset_data = 4 * j; | |||
data[offset_data + 0] = bytes[offset_byte + 0] & 0x3f; | |||
data[offset_data + 1] = ((bytes[offset_byte + 0] >> 6) & 0x03) | ((bytes[offset_byte + 1] & 0x0f) << 2); | |||
data[offset_data + 2] = ((bytes[offset_byte + 1] & 0xff) >> 4) | ((bytes[offset_byte + 2] & 0x03) << 4); | |||
data[offset_data + 3] = ((bytes[offset_byte + 2] & 0xff) >> 2); | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_CLEAN_pack_4bit(uint8_t *bytes, const uint16_t *data) { | |||
uint32_t j; | |||
uint32_t offset_data; | |||
for (j = 0; j < SABER_N / 2; j++) { | |||
offset_data = 2 * j; | |||
bytes[j] = (data[offset_data] & 0x0f) | | |||
((data[offset_data + 1] & 0x0f) << 4); | |||
static void POLq2BS(uint8_t bytes[SABER_POLYBYTES], const uint16_t data[SABER_N]) { | |||
size_t j, offset_byte, offset_data; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = 13 * j; | |||
offset_data = 8 * j; | |||
bytes[offset_byte + 0] = (data[offset_data + 0] & (0xff)); | |||
bytes[offset_byte + 1] = ((data[offset_data + 0] >> 8) & 0x1f) | ((data[offset_data + 1] & 0x07) << 5); | |||
bytes[offset_byte + 2] = ((data[offset_data + 1] >> 3) & 0xff); | |||
bytes[offset_byte + 3] = ((data[offset_data + 1] >> 11) & 0x03) | ((data[offset_data + 2] & 0x3f) << 2); | |||
bytes[offset_byte + 4] = ((data[offset_data + 2] >> 6) & 0x7f) | ((data[offset_data + 3] & 0x01) << 7); | |||
bytes[offset_byte + 5] = ((data[offset_data + 3] >> 1) & 0xff); | |||
bytes[offset_byte + 6] = ((data[offset_data + 3] >> 9) & 0x0f) | ((data[offset_data + 4] & 0x0f) << 4); | |||
bytes[offset_byte + 7] = ((data[offset_data + 4] >> 4) & 0xff); | |||
bytes[offset_byte + 8] = ((data[offset_data + 4] >> 12) & 0x01) | ((data[offset_data + 5] & 0x7f) << 1); | |||
bytes[offset_byte + 9] = ((data[offset_data + 5] >> 7) & 0x3f) | ((data[offset_data + 6] & 0x03) << 6); | |||
bytes[offset_byte + 10] = ((data[offset_data + 6] >> 2) & 0xff); | |||
bytes[offset_byte + 11] = ((data[offset_data + 6] >> 10) & 0x07) | ((data[offset_data + 7] & 0x1f) << 3); | |||
bytes[offset_byte + 12] = ((data[offset_data + 7] >> 5) & 0xff); | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_CLEAN_un_pack4bit(const unsigned char *bytes, uint16_t *ar) { | |||
uint32_t j; | |||
uint32_t offset_data; | |||
for (j = 0; j < SABER_N / 2; j++) { | |||
offset_data = 2 * j; | |||
ar[offset_data] = bytes[j] & 0x0f; | |||
ar[offset_data + 1] = (bytes[j] >> 4) & 0x0f; | |||
static void BS2POLq(uint16_t data[SABER_N], const uint8_t bytes[SABER_POLYBYTES]) { | |||
size_t j, offset_byte, offset_data; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = 13 * j; | |||
offset_data = 8 * j; | |||
data[offset_data + 0] = (bytes[offset_byte + 0] & (0xff)) | ((bytes[offset_byte + 1] & 0x1f) << 8); | |||
data[offset_data + 1] = (bytes[offset_byte + 1] >> 5 & (0x07)) | ((bytes[offset_byte + 2] & 0xff) << 3) | ((bytes[offset_byte + 3] & 0x03) << 11); | |||
data[offset_data + 2] = (bytes[offset_byte + 3] >> 2 & (0x3f)) | ((bytes[offset_byte + 4] & 0x7f) << 6); | |||
data[offset_data + 3] = (bytes[offset_byte + 4] >> 7 & (0x01)) | ((bytes[offset_byte + 5] & 0xff) << 1) | ((bytes[offset_byte + 6] & 0x0f) << 9); | |||
data[offset_data + 4] = (bytes[offset_byte + 6] >> 4 & (0x0f)) | ((bytes[offset_byte + 7] & 0xff) << 4) | ((bytes[offset_byte + 8] & 0x01) << 12); | |||
data[offset_data + 5] = (bytes[offset_byte + 8] >> 1 & (0x7f)) | ((bytes[offset_byte + 9] & 0x3f) << 7); | |||
data[offset_data + 6] = (bytes[offset_byte + 9] >> 6 & (0x03)) | ((bytes[offset_byte + 10] & 0xff) << 2) | ((bytes[offset_byte + 11] & 0x07) << 10); | |||
data[offset_data + 7] = (bytes[offset_byte + 11] >> 3 & (0x1f)) | ((bytes[offset_byte + 12] & 0xff) << 5); | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_CLEAN_pack_6bit(uint8_t *bytes, const uint16_t *data) { | |||
uint32_t j; | |||
uint32_t offset_data, offset_byte; | |||
static void POLp2BS(uint8_t bytes[SABER_POLYCOMPRESSEDBYTES], const uint16_t data[SABER_N]) { | |||
size_t j, offset_byte, offset_data; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = 3 * j; | |||
offset_byte = 5 * j; | |||
offset_data = 4 * j; | |||
bytes[offset_byte + 0] = (data[offset_data + 0] & 0x3f) | | |||
((data[offset_data + 1] & 0x03) << 6); | |||
bytes[offset_byte + 1] = ((data[offset_data + 1] >> 2) & 0x0f) | | |||
((data[offset_data + 2] & 0x0f) << 4); | |||
bytes[offset_byte + 2] = ((data[offset_data + 2] >> 4) & 0x03) | | |||
((data[offset_data + 3] & 0x3f) << 2); | |||
bytes[offset_byte + 0] = (data[offset_data + 0] & (0xff)); | |||
bytes[offset_byte + 1] = ((data[offset_data + 0] >> 8) & 0x03) | ((data[offset_data + 1] & 0x3f) << 2); | |||
bytes[offset_byte + 2] = ((data[offset_data + 1] >> 6) & 0x0f) | ((data[offset_data + 2] & 0x0f) << 4); | |||
bytes[offset_byte + 3] = ((data[offset_data + 2] >> 4) & 0x3f) | ((data[offset_data + 3] & 0x03) << 6); | |||
bytes[offset_byte + 4] = ((data[offset_data + 3] >> 2) & 0xff); | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_CLEAN_un_pack6bit(const unsigned char *bytes, uint16_t *data) { | |||
uint32_t j; | |||
uint32_t offset_data, offset_byte; | |||
static void BS2POLp(uint16_t data[SABER_N], const uint8_t bytes[SABER_POLYCOMPRESSEDBYTES]) { | |||
size_t j, offset_byte, offset_data; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = 3 * j; | |||
offset_byte = 5 * j; | |||
offset_data = 4 * j; | |||
data[offset_data + 0] = bytes[offset_byte + 0] & 0x3f; | |||
data[offset_data + 1] = ((bytes[offset_byte + 0] >> 6) & 0x03) | | |||
((bytes[offset_byte + 1] & 0x0f) << 2); | |||
data[offset_data + 2] = ((bytes[offset_byte + 1] & 0xff) >> 4) | | |||
((bytes[offset_byte + 2] & 0x03) << 4); | |||
data[offset_data + 3] = ((bytes[offset_byte + 2] & 0xff) >> 2); | |||
data[offset_data + 0] = (bytes[offset_byte + 0] & (0xff)) | ((bytes[offset_byte + 1] & 0x03) << 8); | |||
data[offset_data + 1] = ((bytes[offset_byte + 1] >> 2) & (0x3f)) | ((bytes[offset_byte + 2] & 0x0f) << 6); | |||
data[offset_data + 2] = ((bytes[offset_byte + 2] >> 4) & (0x0f)) | ((bytes[offset_byte + 3] & 0x3f) << 4); | |||
data[offset_data + 3] = ((bytes[offset_byte + 3] >> 6) & (0x03)) | ((bytes[offset_byte + 4] & 0xff) << 2); | |||
} | |||
} | |||
static void POLVECp2BS(uint8_t *bytes, uint16_t data[SABER_K][SABER_N]) { | |||
uint32_t i, j; | |||
uint32_t offset_data, offset_byte, offset_byte1; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 10) / 8; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = offset_byte1 + 5 * j; | |||
offset_data = 4 * j; | |||
bytes[offset_byte + 0] = (data[i][offset_data + 0] & (0xff)); | |||
bytes[offset_byte + 1] = ((data[i][offset_data + 0] >> 8) & 0x03) | | |||
((data[i][offset_data + 1] & 0x3f) << 2); | |||
bytes[offset_byte + 2] = ((data[i][offset_data + 1] >> 6) & 0x0f) | | |||
((data[i][offset_data + 2] & 0x0f) << 4); | |||
bytes[offset_byte + 3] = ((data[i][offset_data + 2] >> 4) & 0x3f) | | |||
((data[i][offset_data + 3] & 0x03) << 6); | |||
bytes[offset_byte + 4] = ((data[i][offset_data + 3] >> 2) & 0xff); | |||
} | |||
void PQCLEAN_FIRESABER_CLEAN_POLVECq2BS(uint8_t bytes[SABER_POLYVECBYTES], const uint16_t data[SABER_L][SABER_N]) { | |||
size_t i; | |||
for (i = 0; i < SABER_L; i++) { | |||
POLq2BS(bytes + i * SABER_POLYBYTES, data[i]); | |||
} | |||
} | |||
static void BS2POLVECp(const unsigned char *bytes, uint16_t data[SABER_K][SABER_N]) { | |||
uint32_t i, j; | |||
uint32_t offset_data, offset_byte, offset_byte1; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 10) / 8; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = offset_byte1 + 5 * j; | |||
offset_data = 4 * j; | |||
data[i][offset_data + 0] = (bytes[offset_byte + 0] & (0xff)) | | |||
((bytes[offset_byte + 1] & 0x03) << 8); | |||
data[i][offset_data + 1] = ((bytes[offset_byte + 1] >> 2) & (0x3f)) | | |||
((bytes[offset_byte + 2] & 0x0f) << 6); | |||
data[i][offset_data + 2] = ((bytes[offset_byte + 2] >> 4) & (0x0f)) | | |||
((bytes[offset_byte + 3] & 0x3f) << 4); | |||
data[i][offset_data + 3] = ((bytes[offset_byte + 3] >> 6) & (0x03)) | | |||
((bytes[offset_byte + 4] & 0xff) << 2); | |||
} | |||
void PQCLEAN_FIRESABER_CLEAN_BS2POLVECq(uint16_t data[SABER_L][SABER_N], const uint8_t bytes[SABER_POLYVECBYTES]) { | |||
size_t i; | |||
for (i = 0; i < SABER_L; i++) { | |||
BS2POLq(data[i], bytes + i * SABER_POLYBYTES); | |||
} | |||
} | |||
static void POLVECq2BS(uint8_t *bytes, uint16_t data[SABER_K][SABER_N]) { | |||
uint32_t i, j; | |||
uint32_t offset_data, offset_byte, offset_byte1; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 13) / 8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = offset_byte1 + 13 * j; | |||
offset_data = 8 * j; | |||
bytes[offset_byte + 0] = (data[i][offset_data + 0] & (0xff)); | |||
bytes[offset_byte + 1] = ((data[i][offset_data + 0] >> 8) & 0x1f) | | |||
((data[i][offset_data + 1] & 0x07) << 5); | |||
bytes[offset_byte + 2] = ((data[i][offset_data + 1] >> 3) & 0xff); | |||
bytes[offset_byte + 3] = ((data[i][offset_data + 1] >> 11) & 0x03) | | |||
((data[i][offset_data + 2] & 0x3f) << 2); | |||
bytes[offset_byte + 4] = ((data[i][offset_data + 2] >> 6) & 0x7f) | | |||
((data[i][offset_data + 3] & 0x01) << 7); | |||
bytes[offset_byte + 5] = ((data[i][offset_data + 3] >> 1) & 0xff); | |||
bytes[offset_byte + 6] = ((data[i][offset_data + 3] >> 9) & 0x0f) | | |||
((data[i][offset_data + 4] & 0x0f) << 4); | |||
bytes[offset_byte + 7] = ((data[i][offset_data + 4] >> 4) & 0xff); | |||
bytes[offset_byte + 8] = ((data[i][offset_data + 4] >> 12) & 0x01) | | |||
((data[i][offset_data + 5] & 0x7f) << 1); | |||
bytes[offset_byte + 9] = ((data[i][offset_data + 5] >> 7) & 0x3f) | | |||
((data[i][offset_data + 6] & 0x03) << 6); | |||
bytes[offset_byte + 10] = ((data[i][offset_data + 6] >> 2) & 0xff); | |||
bytes[offset_byte + 11] = ((data[i][offset_data + 6] >> 10) & 0x07) | | |||
((data[i][offset_data + 7] & 0x1f) << 3); | |||
bytes[offset_byte + 12] = ((data[i][offset_data + 7] >> 5) & 0xff); | |||
} | |||
void PQCLEAN_FIRESABER_CLEAN_POLVECp2BS(uint8_t bytes[SABER_POLYVECCOMPRESSEDBYTES], const uint16_t data[SABER_L][SABER_N]) { | |||
size_t i; | |||
for (i = 0; i < SABER_L; i++) { | |||
POLp2BS(bytes + i * (SABER_EP * SABER_N / 8), data[i]); | |||
} | |||
} | |||
static void BS2POLVECq(const unsigned char *bytes, uint16_t data[SABER_K][SABER_N]) { | |||
uint32_t i, j; | |||
uint32_t offset_data, offset_byte, offset_byte1; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 13) / 8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = offset_byte1 + 13 * j; | |||
offset_data = 8 * j; | |||
data[i][offset_data + 0] = (bytes[offset_byte + 0] & (0xff)) | | |||
((bytes[offset_byte + 1] & 0x1f) << 8); | |||
data[i][offset_data + 1] = (bytes[offset_byte + 1] >> 5 & (0x07)) | | |||
((bytes[offset_byte + 2] & 0xff) << 3) | | |||
((bytes[offset_byte + 3] & 0x03) << 11); | |||
data[i][offset_data + 2] = (bytes[offset_byte + 3] >> 2 & (0x3f)) | | |||
((bytes[offset_byte + 4] & 0x7f) << 6); | |||
data[i][offset_data + 3] = (bytes[offset_byte + 4] >> 7 & (0x01)) | | |||
((bytes[offset_byte + 5] & 0xff) << 1) | | |||
((bytes[offset_byte + 6] & 0x0f) << 9); | |||
data[i][offset_data + 4] = (bytes[offset_byte + 6] >> 4 & (0x0f)) | | |||
((bytes[offset_byte + 7] & 0xff) << 4) | | |||
((bytes[offset_byte + 8] & 0x01) << 12); | |||
data[i][offset_data + 5] = (bytes[offset_byte + 8] >> 1 & (0x7f)) | | |||
((bytes[offset_byte + 9] & 0x3f) << 7); | |||
data[i][offset_data + 6] = (bytes[offset_byte + 9] >> 6 & (0x03)) | | |||
((bytes[offset_byte + 10] & 0xff) << 2) | | |||
((bytes[offset_byte + 11] & 0x07) << 10); | |||
data[i][offset_data + 7] = (bytes[offset_byte + 11] >> 3 & (0x1f)) | | |||
((bytes[offset_byte + 12] & 0xff) << 5); | |||
} | |||
void PQCLEAN_FIRESABER_CLEAN_BS2POLVECp(uint16_t data[SABER_L][SABER_N], const uint8_t bytes[SABER_POLYVECCOMPRESSEDBYTES]) { | |||
size_t i; | |||
for (i = 0; i < SABER_L; i++) { | |||
BS2POLp(data[i], bytes + i * (SABER_EP * SABER_N / 8)); | |||
} | |||
} | |||
//only BS2POLq no BS2POLp | |||
void PQCLEAN_FIRESABER_CLEAN_BS2POL(const unsigned char *bytes, uint16_t data[SABER_N]) { | |||
uint32_t j; | |||
uint32_t offset_data, offset_byte; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = 13 * j; | |||
offset_data = 8 * j; | |||
data[offset_data + 0] = (bytes[offset_byte + 0] & (0xff)) | | |||
((bytes[offset_byte + 1] & 0x1f) << 8); | |||
data[offset_data + 1] = (bytes[offset_byte + 1] >> 5 & (0x07)) | | |||
((bytes[offset_byte + 2] & 0xff) << 3) | | |||
((bytes[offset_byte + 3] & 0x03) << 11); | |||
data[offset_data + 2] = (bytes[offset_byte + 3] >> 2 & (0x3f)) | | |||
((bytes[offset_byte + 4] & 0x7f) << 6); | |||
data[offset_data + 3] = (bytes[offset_byte + 4] >> 7 & (0x01)) | | |||
((bytes[offset_byte + 5] & 0xff) << 1) | | |||
((bytes[offset_byte + 6] & 0x0f) << 9); | |||
data[offset_data + 4] = (bytes[offset_byte + 6] >> 4 & (0x0f)) | | |||
((bytes[offset_byte + 7] & 0xff) << 4) | | |||
((bytes[offset_byte + 8] & 0x01) << 12); | |||
data[offset_data + 5] = (bytes[offset_byte + 8] >> 1 & (0x7f)) | | |||
((bytes[offset_byte + 9] & 0x3f) << 7); | |||
data[offset_data + 6] = (bytes[offset_byte + 9] >> 6 & (0x03)) | | |||
((bytes[offset_byte + 10] & 0xff) << 2) | | |||
((bytes[offset_byte + 11] & 0x07) << 10); | |||
data[offset_data + 7] = (bytes[offset_byte + 11] >> 3 & (0x1f)) | | |||
((bytes[offset_byte + 12] & 0xff) << 5); | |||
void PQCLEAN_FIRESABER_CLEAN_BS2POLmsg(uint16_t data[SABER_N], const uint8_t bytes[SABER_KEYBYTES]) { | |||
size_t i, j; | |||
for (j = 0; j < SABER_KEYBYTES; j++) { | |||
for (i = 0; i < 8; i++) { | |||
data[j * 8 + i] = ((bytes[j] >> i) & 0x01); | |||
} | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_CLEAN_POLVEC2BS(uint8_t *bytes, uint16_t data[SABER_K][SABER_N], uint16_t modulus) { | |||
if (modulus == 1024) { | |||
POLVECp2BS(bytes, data); | |||
} else if (modulus == 8192) { | |||
POLVECq2BS(bytes, data); | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_CLEAN_POLmsg2BS(uint8_t bytes[SABER_KEYBYTES], const uint16_t data[SABER_N]) { | |||
size_t i, j; | |||
memset(bytes, 0, SABER_KEYBYTES); | |||
void PQCLEAN_FIRESABER_CLEAN_BS2POLVEC(const unsigned char *bytes, uint16_t data[SABER_K][SABER_N], uint16_t modulus) { | |||
if (modulus == 1024) { | |||
BS2POLVECp(bytes, data); | |||
} else if (modulus == 8192) { | |||
BS2POLVECq(bytes, data); | |||
for (j = 0; j < SABER_KEYBYTES; j++) { | |||
for (i = 0; i < 8; i++) { | |||
bytes[j] = bytes[j] | ((data[j * 8 + i] & 0x01) << i); | |||
} | |||
} | |||
} |
@@ -1,28 +1,27 @@ | |||
#ifndef PACK_UNPACK_H | |||
#define PACK_UNPACK_H | |||
#include "SABER_params.h" | |||
#include <stdint.h> | |||
#include <stdio.h> | |||
void PQCLEAN_FIRESABER_CLEAN_POLT2BS(uint8_t bytes[SABER_SCALEBYTES_KEM], const uint16_t data[SABER_N]); | |||
void PQCLEAN_FIRESABER_CLEAN_BS2POLT(uint16_t data[SABER_N], const uint8_t bytes[SABER_SCALEBYTES_KEM]); | |||
void PQCLEAN_FIRESABER_CLEAN_pack_3bit(uint8_t *bytes, const uint16_t *data); | |||
void PQCLEAN_FIRESABER_CLEAN_un_pack3bit(const uint8_t *bytes, uint16_t *data); | |||
void PQCLEAN_FIRESABER_CLEAN_POLVECq2BS(uint8_t bytes[SABER_POLYVECBYTES], const uint16_t data[SABER_L][SABER_N]); | |||
void PQCLEAN_FIRESABER_CLEAN_pack_4bit(uint8_t *bytes, const uint16_t *data); | |||
void PQCLEAN_FIRESABER_CLEAN_POLVECp2BS(uint8_t bytes[SABER_POLYVECCOMPRESSEDBYTES], const uint16_t data[SABER_L][SABER_N]); | |||
void PQCLEAN_FIRESABER_CLEAN_un_pack4bit(const unsigned char *bytes, uint16_t *ar); | |||
void PQCLEAN_FIRESABER_CLEAN_pack_6bit(uint8_t *bytes, const uint16_t *data); | |||
void PQCLEAN_FIRESABER_CLEAN_BS2POLVECq(uint16_t data[SABER_L][SABER_N], const uint8_t bytes[SABER_POLYVECBYTES]); | |||
void PQCLEAN_FIRESABER_CLEAN_un_pack6bit(const unsigned char *bytes, uint16_t *data); | |||
void PQCLEAN_FIRESABER_CLEAN_BS2POLVECp(uint16_t data[SABER_L][SABER_N], const uint8_t bytes[SABER_POLYVECCOMPRESSEDBYTES]); | |||
void PQCLEAN_FIRESABER_CLEAN_BS2POL(const unsigned char *bytes, uint16_t data[SABER_N]); | |||
void PQCLEAN_FIRESABER_CLEAN_BS2POLmsg(uint16_t data[SABER_N], const uint8_t bytes[SABER_KEYBYTES]); | |||
void PQCLEAN_FIRESABER_CLEAN_POLVEC2BS(uint8_t *bytes, uint16_t data[SABER_K][SABER_N], uint16_t modulus); | |||
void PQCLEAN_FIRESABER_CLEAN_POLmsg2BS(uint8_t bytes[SABER_KEYBYTES], const uint16_t data[SABER_N]); | |||
void PQCLEAN_FIRESABER_CLEAN_BS2POLVEC(const unsigned char *bytes, uint16_t data[SABER_K][SABER_N], uint16_t modulus); | |||
#endif |
@@ -1,21 +1,49 @@ | |||
/*--------------------------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at, Public Domain https://github.com/pq-crystals/kyber) | |||
of "CRYSTALS – Kyber: a CCA-secure module-lattice-based KEM" | |||
by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
#include "SABER_params.h" | |||
#include "api.h" | |||
#include "cbd.h" | |||
#include "fips202.h" | |||
#include "pack_unpack.h" | |||
#include "poly.h" | |||
#include "poly_mul.h" | |||
#include <stdio.h> | |||
void PQCLEAN_FIRESABER_CLEAN_GenSecret(uint16_t r[SABER_K][SABER_N], const unsigned char *seed) { | |||
uint8_t buf[SABER_MU * SABER_N * SABER_K / 8]; | |||
void PQCLEAN_FIRESABER_CLEAN_MatrixVectorMul(uint16_t res[SABER_L][SABER_N], const uint16_t A[SABER_L][SABER_L][SABER_N], const uint16_t s[SABER_L][SABER_N], int16_t transpose) { | |||
int i, j; | |||
for (i = 0; i < SABER_L; i++) { | |||
for (j = 0; j < SABER_L; j++) { | |||
if (transpose == 1) { | |||
PQCLEAN_FIRESABER_CLEAN_poly_mul_acc(A[j][i], s[j], res[i]); | |||
} else { | |||
PQCLEAN_FIRESABER_CLEAN_poly_mul_acc(A[i][j], s[j], res[i]); | |||
} | |||
} | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_CLEAN_InnerProd(uint16_t res[SABER_N], const uint16_t b[SABER_L][SABER_N], const uint16_t s[SABER_L][SABER_N]) { | |||
int j; | |||
for (j = 0; j < SABER_L; j++) { | |||
PQCLEAN_FIRESABER_CLEAN_poly_mul_acc(b[j], s[j], res); | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_CLEAN_GenMatrix(uint16_t A[SABER_L][SABER_L][SABER_N], const uint8_t seed[SABER_SEEDBYTES]) { | |||
uint8_t buf[SABER_L * SABER_POLYVECBYTES]; | |||
int i; | |||
shake128(buf, sizeof(buf), seed, SABER_SEEDBYTES); | |||
for (i = 0; i < SABER_L; i++) { | |||
PQCLEAN_FIRESABER_CLEAN_BS2POLVECq(A[i], buf + i * SABER_POLYVECBYTES); | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_CLEAN_GenSecret(uint16_t s[SABER_L][SABER_N], const uint8_t seed[SABER_NOISE_SEEDBYTES]) { | |||
uint8_t buf[SABER_L * SABER_POLYCOINBYTES]; | |||
size_t i; | |||
shake128(buf, sizeof(buf), seed, SABER_NOISESEEDBYTES); | |||
shake128(buf, sizeof(buf), seed, SABER_NOISE_SEEDBYTES); | |||
for (size_t i = 0; i < SABER_K; i++) { | |||
PQCLEAN_FIRESABER_CLEAN_cbd(r[i], buf + i * SABER_MU * SABER_N / 8); | |||
for (i = 0; i < SABER_L; i++) { | |||
PQCLEAN_FIRESABER_CLEAN_cbd(s[i], buf + i * SABER_POLYCOINBYTES); | |||
} | |||
} |
@@ -1,26 +1,15 @@ | |||
#ifndef POLY_H | |||
#define POLY_H | |||
/*--------------------------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at, Public Domain https://github.com/pq-crystals/kyber) | |||
of "CRYSTALS – Kyber: a CCA-secure module-lattice-based KEM" | |||
by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
#include "SABER_params.h" | |||
#include <stdint.h> | |||
typedef struct { | |||
uint16_t coeffs[SABER_N]; | |||
} poly; | |||
void PQCLEAN_FIRESABER_CLEAN_MatrixVectorMul(uint16_t res[SABER_L][SABER_N], const uint16_t a[SABER_L][SABER_L][SABER_N], const uint16_t s[SABER_L][SABER_N], int16_t transpose); | |||
void PQCLEAN_FIRESABER_CLEAN_InnerProd(uint16_t res[SABER_N], const uint16_t b[SABER_L][SABER_N], const uint16_t s[SABER_L][SABER_N]); | |||
void PQCLEAN_FIRESABER_CLEAN_GenMatrix(uint16_t a[SABER_L][SABER_L][SABER_N], const uint8_t seed[SABER_SEEDBYTES]); | |||
typedef struct { | |||
poly vec[SABER_K]; | |||
} polyvec; | |||
void PQCLEAN_FIRESABER_CLEAN_GenSecret(uint16_t s[SABER_L][SABER_N], const uint8_t seed[SABER_NOISE_SEEDBYTES]); | |||
void PQCLEAN_FIRESABER_CLEAN_GenSecret(uint16_t r[SABER_K][SABER_N], const unsigned char *seed); | |||
#endif |
@@ -228,19 +228,15 @@ static void toom_cook_4way (const uint16_t *a1, const uint16_t *b1, uint16_t *re | |||
} | |||
} | |||
void PQCLEAN_FIRESABER_CLEAN_pol_mul(uint16_t *a, uint16_t *b, uint16_t *res, uint16_t p, uint32_t n) { | |||
uint32_t i; | |||
// normal multiplication | |||
uint16_t c[512]; | |||
for (i = 0; i < 512; i++) { | |||
c[i] = 0; | |||
} | |||
/* res += a*b */ | |||
void PQCLEAN_FIRESABER_CLEAN_poly_mul_acc(const uint16_t a[SABER_N], const uint16_t b[SABER_N], uint16_t res[SABER_N]) { | |||
uint16_t c[2 * SABER_N] = {0}; | |||
int i; | |||
toom_cook_4way(a, b, c); | |||
// reduction | |||
for (i = n; i < 2 * n; i++) { | |||
res[i - n] = (c[i - n] - c[i]) & (p - 1); | |||
/* reduction */ | |||
for (i = SABER_N; i < 2 * SABER_N; i++) { | |||
res[i - SABER_N] += (c[i - SABER_N] - c[i]); | |||
} | |||
} |
@@ -1,9 +1,9 @@ | |||
#ifndef POLYMUL_H | |||
#define POLYMUL_H | |||
#ifndef POLY_MUL_H | |||
#define POLY_MUL_H | |||
#include "SABER_params.h" | |||
#include <stdint.h> | |||
void PQCLEAN_FIRESABER_CLEAN_pol_mul(uint16_t *a, uint16_t *b, uint16_t *res, uint16_t p, uint32_t n); | |||
void PQCLEAN_FIRESABER_CLEAN_poly_mul_acc(const uint16_t a[SABER_N], const uint16_t b[SABER_N], uint16_t res[SABER_N]); | |||
#endif |
@@ -1,3 +1,5 @@ | |||
#include "verify.h" | |||
/*------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at https://github.com/pq-crystals/kyber) of | |||
@@ -5,26 +7,25 @@ This file has been adapted from the implementation | |||
by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
----------------------------------------------------*/ | |||
#include "verify.h" | |||
#include <stdint.h> | |||
/* returns 0 for equal strings, 1 for non-equal strings */ | |||
unsigned char PQCLEAN_FIRESABER_CLEAN_verify(const unsigned char *a, const unsigned char *b, size_t len) { | |||
uint8_t PQCLEAN_FIRESABER_CLEAN_verify(const uint8_t *a, const uint8_t *b, size_t len) { | |||
uint64_t r; | |||
size_t i; | |||
r = 0; | |||
for (i = 0; i < len; i++) { | |||
r |= a[i] ^ b[i]; | |||
} | |||
r = (~r + 1); // Two's complement | |||
r >>= 63; | |||
return (unsigned char)r; | |||
return (uint8_t) r; | |||
} | |||
/* b = 1 means mov, b = 0 means don't mov*/ | |||
void PQCLEAN_FIRESABER_CLEAN_cmov(unsigned char *r, const unsigned char *x, size_t len, unsigned char b) { | |||
void PQCLEAN_FIRESABER_CLEAN_cmov(uint8_t *r, const uint8_t *x, size_t len, uint8_t b) { | |||
size_t i; | |||
b = -b; | |||
@@ -1,6 +1,5 @@ | |||
#ifndef VERIFY_H | |||
#define VERIFY_H | |||
/*------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at https://github.com/pq-crystals/kyber) of | |||
@@ -13,9 +12,11 @@ Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
#include <stdint.h> | |||
/* returns 0 for equal strings, 1 for non-equal strings */ | |||
unsigned char PQCLEAN_FIRESABER_CLEAN_verify(const unsigned char *a, const unsigned char *b, size_t len); | |||
uint8_t PQCLEAN_FIRESABER_CLEAN_verify(const uint8_t *a, const uint8_t *b, size_t len); | |||
/* b = 1 means mov, b = 0 means don't mov*/ | |||
void PQCLEAN_FIRESABER_CLEAN_cmov(unsigned char *r, const unsigned char *x, size_t len, unsigned char b); | |||
void PQCLEAN_FIRESABER_CLEAN_cmov(uint8_t *r, const uint8_t *x, size_t len, uint8_t b); | |||
#endif |
@@ -14,4 +14,13 @@ principal-submitters: | |||
- Frederik Vercauteren | |||
implementations: | |||
- name: clean | |||
version: https://github.com/KULeuven-COSIC/SABER/commit/14ede83f1ff3bcc41f0464543542366c68b55871 | |||
version: https://github.com/KULeuven-COSIC/SABER/commit/509cc5ec3a7e12a751ccdd2ef5bd6e54e00bd350 | |||
- name: avx2 | |||
version: https://github.com/KULeuven-COSIC/SABER/commit/509cc5ec3a7e12a751ccdd2ef5bd6e54e00bd350 | |||
supported_platforms: | |||
- architecture: x86_64 | |||
operating_systems: | |||
- Linux | |||
- Darwin | |||
required_flags: | |||
- avx2 |
@@ -0,0 +1 @@ | |||
Public Domain |
@@ -0,0 +1,22 @@ | |||
# This Makefile can be used with GNU Make or BSD Make | |||
LIB=liblightsaber_avx2.a | |||
HEADERS=api.h cbd.h kem.h pack_unpack.h poly.h SABER_indcpa.h SABER_params.h verify.h | |||
OBJECTS=cbd.o kem.o pack_unpack.o SABER_indcpa.o verify.o | |||
CFLAGS=-O3 -mavx2 -Wall -Wextra -Wpedantic -Wvla -Werror -Wredundant-decls -Wmissing-prototypes -std=c99 -I../../../common $(EXTRAFLAGS) | |||
all: $(LIB) | |||
%.o: %.s $(HEADERS) | |||
$(AS) -o $@ $< | |||
%.o: %.c $(HEADERS) | |||
$(CC) $(CFLAGS) -c -o $@ $< | |||
$(LIB): $(OBJECTS) | |||
$(AR) -r $@ $(OBJECTS) | |||
clean: | |||
$(RM) $(OBJECTS) | |||
$(RM) $(LIB) |
@@ -0,0 +1,416 @@ | |||
#include "./polymul/toom-cook_4way.c" | |||
#include "SABER_indcpa.h" | |||
#include "SABER_params.h" | |||
#include "api.h" | |||
#include "cbd.h" | |||
#include "fips202.h" | |||
#include "pack_unpack.h" | |||
#include "randombytes.h" | |||
#include <stdint.h> | |||
#include <stdio.h> | |||
#include <string.h> | |||
//#include "randombytes.h" | |||
//#include "./polymul/toom_cook_4/toom-cook_4way.c" | |||
#define h1 4 //2^(EQ-EP-1) | |||
#define h2 ( (1<<(SABER_EP-2)) - (1<<(SABER_EP-SABER_ET-1)) + (1<<(SABER_EQ-SABER_EP-1)) ) | |||
static void POL2MSG(uint8_t *message_dec, const uint16_t *message_dec_unpacked) { | |||
int32_t i, j; | |||
for (j = 0; j < SABER_KEYBYTES; j++) { | |||
message_dec[j] = 0; | |||
for (i = 0; i < 8; i++) { | |||
message_dec[j] = message_dec[j] | (message_dec_unpacked[j * 8 + i] << i); | |||
} | |||
} | |||
} | |||
/*----------------------------------------------------------------------------------- | |||
This routine generates a=[Matrix K x K] of 256-coefficient polynomials | |||
-------------------------------------------------------------------------------------*/ | |||
static void GenMatrix(polyvec *a, const uint8_t *seed) { | |||
uint8_t buf[SABER_K * SABER_K * 13 * SABER_N / 8]; | |||
uint16_t temp_ar[SABER_N]; | |||
int i, j, k; | |||
uint16_t mod = (SABER_Q - 1); | |||
shake128(buf, sizeof(buf), seed, SABER_SEEDBYTES); | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_K; j++) { | |||
PQCLEAN_LIGHTSABER_AVX2_BS2POLq(temp_ar, buf + (i * SABER_K + j) * 13 * SABER_N / 8); | |||
for (k = 0; k < SABER_N; k++) { | |||
a[i].vec[j].coeffs[k] = (temp_ar[k])& mod ; | |||
} | |||
} | |||
} | |||
} | |||
static void GenSecret(uint16_t r[SABER_K][SABER_N], const uint8_t *seed) { | |||
uint32_t i; | |||
uint8_t buf[SABER_MU * SABER_N * SABER_K / 8]; | |||
shake128(buf, sizeof(buf), seed, SABER_NOISESEEDBYTES); | |||
for (i = 0; i < SABER_K; i++) { | |||
PQCLEAN_LIGHTSABER_AVX2_cbd(r[i], buf + i * SABER_MU * SABER_N / 8); | |||
} | |||
} | |||
//********************************matrix-vector mul routines***************************************************** | |||
static void matrix_vector_mul(__m256i a1_avx_combined[NUM_POLY][NUM_POLY][AVX_N1], __m256i b_bucket[NUM_POLY][SCHB_N * 4], __m256i res_avx[NUM_POLY][AVX_N1], int isTranspose) { | |||
int64_t i, j; | |||
__m256i c_bucket[2 * SCM_SIZE * 4]; //Holds results for 9 Karatsuba at a time | |||
for (i = 0; i < NUM_POLY; i++) { | |||
for (j = 0; j < NUM_POLY; j++) { | |||
if (isTranspose == 0) { | |||
toom_cook_4way_avx_n1(a1_avx_combined[i][j], b_bucket[j], c_bucket, j); | |||
} else { | |||
toom_cook_4way_avx_n1(a1_avx_combined[j][i], b_bucket[j], c_bucket, j); | |||
} | |||
} | |||
TC_interpol(c_bucket, res_avx[i]); | |||
} | |||
} | |||
static void vector_vector_mul(__m256i a_avx[NUM_POLY][AVX_N1], __m256i b_bucket[NUM_POLY][SCHB_N * 4], __m256i res_avx[AVX_N1]) { | |||
int64_t i; | |||
__m256i c_bucket[2 * SCM_SIZE * 4]; //Holds results for 9 Karatsuba at a time | |||
for (i = 0; i < NUM_POLY; i++) { | |||
toom_cook_4way_avx_n1(a_avx[i], b_bucket[i], c_bucket, i); | |||
} | |||
TC_interpol(c_bucket, res_avx); | |||
} | |||
//********************************matrix-vector mul routines***************************************************** | |||
void PQCLEAN_LIGHTSABER_AVX2_indcpa_kem_keypair(uint8_t *pk, uint8_t *sk) { | |||
polyvec a[SABER_K]; | |||
uint16_t skpv1[SABER_K][SABER_N]; | |||
uint8_t seed[SABER_SEEDBYTES]; | |||
uint8_t noiseseed[SABER_COINBYTES]; | |||
int32_t i, j, k; | |||
//--------------AVX declaration------------------ | |||
__m256i sk_avx[SABER_K][SABER_N / 16]; | |||
__m256i mod; | |||
__m256i res_avx[SABER_K][SABER_N / 16]; | |||
__m256i a_avx[SABER_K][SABER_K][SABER_N / 16]; | |||
//__m256i acc[2*SABER_N/16]; | |||
mod = _mm256_set1_epi16(SABER_Q - 1); | |||
__m256i b_bucket[NUM_POLY][SCHB_N * 4]; | |||
//--------------AVX declaration ends------------------ | |||
randombytes(seed, SABER_SEEDBYTES); | |||
shake128(seed, SABER_SEEDBYTES, seed, SABER_SEEDBYTES); // for not revealing system RNG state | |||
randombytes(noiseseed, SABER_COINBYTES); | |||
GenMatrix(a, seed); //sample matrix A | |||
GenSecret(skpv1, noiseseed); | |||
// Load sk into avx vectors | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
sk_avx[i][j] = _mm256_loadu_si256 ((__m256i const *) (&skpv1[i][j * 16])); | |||
} | |||
} | |||
// Load a into avx vectors | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_K; j++) { | |||
for (k = 0; k < SABER_N / 16; k++) { | |||
a_avx[i][j][k] = _mm256_loadu_si256 ((__m256i const *) (&a[i].vec[j].coeffs[k * 16])); | |||
} | |||
} | |||
} | |||
//------------------------do the matrix vector multiplication and rounding------------ | |||
for (j = 0; j < NUM_POLY; j++) { | |||
TC_eval(sk_avx[j], b_bucket[j]); | |||
} | |||
matrix_vector_mul(a_avx, b_bucket, res_avx, 1);// Matrix-vector multiplication; Matrix in transposed order | |||
// Now truncation | |||
for (i = 0; i < SABER_K; i++) { //shift right EQ-EP bits | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
res_avx[i][j] = _mm256_add_epi16 (res_avx[i][j], _mm256_set1_epi16(h1)); | |||
res_avx[i][j] = _mm256_srli_epi16 (res_avx[i][j], (SABER_EQ - SABER_EP) ); | |||
res_avx[i][j] = _mm256_and_si256 (res_avx[i][j], mod); | |||
} | |||
} | |||
//------------------Pack sk into byte string------- | |||
PQCLEAN_LIGHTSABER_AVX2_POLVEC2BS(sk, (const uint16_t (*)[SABER_N])skpv1, SABER_Q); | |||
//------------------Pack pk into byte string------- | |||
for (i = 0; i < SABER_K; i++) { // reuses skpv1[] for unpacking avx of public-key | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
_mm256_maskstore_epi32 ((int *) (skpv1[i] + j * 16), _mm256_set1_epi32(-1), res_avx[i][j]); | |||
} | |||
} | |||
PQCLEAN_LIGHTSABER_AVX2_POLVEC2BS(pk, (const uint16_t (*)[SABER_N])skpv1, SABER_P); // load the public-key into pk byte string | |||
for (i = 0; i < SABER_SEEDBYTES; i++) { // now load the seedbytes in PK. Easy since seed bytes are kept in byte format. | |||
pk[SABER_POLYVECCOMPRESSEDBYTES + i] = seed[i]; | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_AVX2_indcpa_kem_enc(uint8_t ciphertext[SABER_BYTES_CCA_DEC], const uint8_t m[SABER_KEYBYTES], const uint8_t noiseseed[SABER_NOISESEEDBYTES], const uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES]) { | |||
uint32_t i, j, k; | |||
polyvec a[SABER_K]; // skpv; | |||
uint8_t seed[SABER_SEEDBYTES]; | |||
uint16_t pkcl[SABER_K][SABER_N]; //public key of received by the client | |||
uint16_t skpv1[SABER_K][SABER_N]; | |||
uint16_t temp[SABER_K][SABER_N]; | |||
uint16_t message[SABER_KEYBYTES * 8]; | |||
uint8_t msk_c[SABER_SCALEBYTES_KEM]; | |||
//--------------AVX declaration------------------ | |||
__m256i sk_avx[SABER_K][SABER_N / 16]; | |||
__m256i mod, mod_p; | |||
__m256i res_avx[SABER_K][SABER_N / 16]; | |||
__m256i vprime_avx[SABER_N / 16]; | |||
__m256i a_avx[SABER_K][SABER_K][SABER_N / 16]; | |||
//__m256i acc[2*SABER_N/16]; | |||
__m256i pkcl_avx[SABER_K][SABER_N / 16]; | |||
__m256i message_avx[SABER_N / 16]; | |||
mod = _mm256_set1_epi16(SABER_Q - 1); | |||
mod_p = _mm256_set1_epi16(SABER_P - 1); | |||
__m256i b_bucket[NUM_POLY][SCHB_N * 4]; | |||
//--------------AVX declaration ends------------------ | |||
for (i = 0; i < SABER_SEEDBYTES; i++) { // Load the seedbytes in the client seed from PK. | |||
seed[i] = pk[ SABER_POLYVECCOMPRESSEDBYTES + i]; | |||
} | |||
GenMatrix(a, seed); | |||
GenSecret(skpv1, noiseseed); | |||
// ----------- Load skpv1 into avx vectors ---------- | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
sk_avx[i][j] = _mm256_loadu_si256 ((__m256i const *) (&skpv1[i][j * 16])); | |||
} | |||
} | |||
// ----------- Load skpv1 into avx vectors ---------- | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_K; j++) { | |||
for (k = 0; k < SABER_N / 16; k++) { | |||
a_avx[i][j][k] = _mm256_loadu_si256 ((__m256i const *) (&a[i].vec[j].coeffs[k * 16])); | |||
} | |||
} | |||
} | |||
//-----------------matrix-vector multiplication and rounding | |||
for (j = 0; j < NUM_POLY; j++) { | |||
TC_eval(sk_avx[j], b_bucket[j]); | |||
} | |||
matrix_vector_mul(a_avx, b_bucket, res_avx, 0);// Matrix-vector multiplication; Matrix in normal order | |||
// Now truncation | |||
for (i = 0; i < SABER_K; i++) { //shift right EQ-EP bits | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
res_avx[i][j] = _mm256_add_epi16 (res_avx[i][j], _mm256_set1_epi16(h1)); | |||
res_avx[i][j] = _mm256_srli_epi16 (res_avx[i][j], (SABER_EQ - SABER_EP) ); | |||
res_avx[i][j] = _mm256_and_si256 (res_avx[i][j], mod); | |||
} | |||
} | |||
//-----this result should be put in b_prime for later use in server. | |||
for (i = 0; i < SABER_K; i++) { // first store in 16 bit arrays | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
_mm256_maskstore_epi32 ((int *)(temp[i] + j * 16), _mm256_set1_epi32(-1), res_avx[i][j]); | |||
} | |||
} | |||
PQCLEAN_LIGHTSABER_AVX2_POLVEC2BS(ciphertext, (const uint16_t (*)[SABER_N])temp, SABER_P); // Pack b_prime into ciphertext byte string | |||
//**************client matrix-vector multiplication ends******************// | |||
//------now calculate the v' | |||
//-------unpack the public_key | |||
PQCLEAN_LIGHTSABER_AVX2_BS2POLVEC(pkcl, pk, SABER_P); | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
pkcl_avx[i][j] = _mm256_loadu_si256 ((__m256i const *) (&pkcl[i][j * 16])); | |||
} | |||
} | |||
// InnerProduct | |||
//for(k=0;k<SABER_N/16;k++){ | |||
// vprime_avx[k]=_mm256_xor_si256(vprime_avx[k],vprime_avx[k]); | |||
//} | |||
// vector-vector scalar multiplication with mod p | |||
vector_vector_mul(pkcl_avx, b_bucket, vprime_avx); | |||
// Computation of v'+h1 | |||
for (i = 0; i < SABER_N / 16; i++) { //adding h1 | |||
vprime_avx[i] = _mm256_add_epi16(vprime_avx[i], _mm256_set1_epi16(h1)); | |||
} | |||
// unpack m; | |||
for (j = 0; j < SABER_KEYBYTES; j++) { | |||
for (i = 0; i < 8; i++) { | |||
message[8 * j + i] = ((m[j] >> i) & 0x01); | |||
} | |||
} | |||
// message encoding | |||
for (i = 0; i < SABER_N / 16; i++) { | |||
message_avx[i] = _mm256_loadu_si256 ((__m256i const *) (&message[i * 16])); | |||
message_avx[i] = _mm256_slli_epi16 (message_avx[i], (SABER_EP - 1) ); | |||
} | |||
// SHIFTRIGHT(v'+h1-m mod p, EP-ET) | |||
for (k = 0; k < SABER_N / 16; k++) { | |||
vprime_avx[k] = _mm256_sub_epi16(vprime_avx[k], message_avx[k]); | |||
vprime_avx[k] = _mm256_and_si256(vprime_avx[k], mod_p); | |||
vprime_avx[k] = _mm256_srli_epi16 (vprime_avx[k], (SABER_EP - SABER_ET) ); | |||
} | |||
// Unpack avx | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
_mm256_maskstore_epi32 ((int *) (temp[0] + j * 16), _mm256_set1_epi32(-1), vprime_avx[j]); | |||
} | |||
PQCLEAN_LIGHTSABER_AVX2_SABER_pack_3bit(msk_c, temp[0]); | |||
for (j = 0; j < SABER_SCALEBYTES_KEM; j++) { | |||
ciphertext[SABER_CIPHERTEXTBYTES + j] = msk_c[j]; | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_AVX2_indcpa_kem_dec(uint8_t m[SABER_KEYBYTES], const uint8_t sk[SABER_INDCPA_SECRETKEYBYTES], const uint8_t ciphertext[SABER_BYTES_CCA_DEC]) { | |||
uint32_t i, j; | |||
uint16_t sksv[SABER_K][SABER_N]; //secret key of the server | |||
uint16_t pksv[SABER_K][SABER_N]; | |||
uint16_t message_dec_unpacked[SABER_KEYBYTES * 8]; // one element containes on decrypted bit; | |||
uint8_t scale_ar[SABER_SCALEBYTES_KEM]; | |||
uint16_t op[SABER_N]; | |||
//--------------AVX declaration------------------ | |||
//__m256i mod_p; | |||
__m256i v_avx[SABER_N / 16]; | |||
//__m256i acc[2*SABER_N/16]; | |||
__m256i sksv_avx[SABER_K][SABER_N / 16]; | |||
__m256i pksv_avx[SABER_K][SABER_N / 16]; | |||
//mod_p=_mm256_set1_epi16(SABER_P-1); | |||
__m256i b_bucket[NUM_POLY][SCHB_N * 4]; | |||
//--------------AVX declaration ends------------------ | |||
//-------unpack the public_key | |||
PQCLEAN_LIGHTSABER_AVX2_BS2POLVEC(sksv, sk, SABER_Q); //sksv is the secret-key | |||
PQCLEAN_LIGHTSABER_AVX2_BS2POLVEC(pksv, ciphertext, SABER_P); //pksv is the ciphertext | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
sksv_avx[i][j] = _mm256_loadu_si256 ((__m256i const *) (&sksv[i][j * 16])); | |||
pksv_avx[i][j] = _mm256_loadu_si256 ((__m256i const *) (&pksv[i][j * 16])); | |||
} | |||
} | |||
for (i = 0; i < SABER_N / 16; i++) { | |||
v_avx[i] = _mm256_xor_si256(v_avx[i], v_avx[i]); | |||
} | |||
// InnerProduct(b', s, mod p) | |||
for (j = 0; j < NUM_POLY; j++) { | |||
TC_eval(sksv_avx[j], b_bucket[j]); | |||
} | |||
vector_vector_mul(pksv_avx, b_bucket, v_avx); | |||
for (i = 0; i < SABER_N / 16; i++) { | |||
_mm256_maskstore_epi32 ((int *)(message_dec_unpacked + i * 16), _mm256_set1_epi32(-1), v_avx[i]); | |||
} | |||
for (i = 0; i < SABER_SCALEBYTES_KEM; i++) { | |||
scale_ar[i] = ciphertext[SABER_CIPHERTEXTBYTES + i]; | |||
} | |||
PQCLEAN_LIGHTSABER_AVX2_SABER_un_pack3bit(op, scale_ar); | |||
//addition of h2 | |||
for (i = 0; i < SABER_N; i++) { | |||
message_dec_unpacked[i] = ( ( message_dec_unpacked[i] + h2 - (op[i] << (SABER_EP - SABER_ET)) ) & (SABER_P - 1) ) >> (SABER_EP - 1); | |||
} | |||
POL2MSG(m, message_dec_unpacked); | |||
} |
@@ -0,0 +1,13 @@ | |||
#ifndef INDCPA_H | |||
#define INDCPA_H | |||
#include "SABER_params.h" | |||
#include <stdint.h> | |||
void PQCLEAN_LIGHTSABER_AVX2_indcpa_kem_keypair(uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES], uint8_t sk[SABER_INDCPA_SECRETKEYBYTES]); | |||
void PQCLEAN_LIGHTSABER_AVX2_indcpa_kem_enc(uint8_t ciphertext[SABER_BYTES_CCA_DEC], const uint8_t m[SABER_KEYBYTES], const uint8_t noiseseed[SABER_NOISESEEDBYTES], const uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES]); | |||
void PQCLEAN_LIGHTSABER_AVX2_indcpa_kem_dec(uint8_t m[SABER_KEYBYTES], const uint8_t sk[SABER_INDCPA_SECRETKEYBYTES], const uint8_t ciphertext[SABER_BYTES_CCA_DEC]); | |||
#endif |
@@ -0,0 +1,46 @@ | |||
#ifndef PARAMS_H | |||
#define PARAMS_H | |||
#include "api.h" | |||
#define SABER_K 2 | |||
#define SABER_MU 10 | |||
#define SABER_ET 3 | |||
#define SABER_EQ 13 | |||
#define SABER_EP 10 | |||
#define SABER_N 256 | |||
#define SABER_Q 8192 //2^13 | |||
#define SABER_P 1024 | |||
#define SABER_SEEDBYTES 32 | |||
#define SABER_NOISESEEDBYTES 32 | |||
#define SABER_COINBYTES 32 | |||
#define SABER_KEYBYTES 32 | |||
#define SABER_HASHBYTES 32 | |||
#define SABER_POLYBYTES 416 //13*256/8 | |||
#define SABER_POLYVECBYTES (SABER_K * SABER_POLYBYTES) | |||
#define SABER_POLYVECCOMPRESSEDBYTES (SABER_K * 320) //10*256/8 NOTE : changed till here due to parameter adaptation | |||
#define SABER_CIPHERTEXTBYTES (SABER_POLYVECCOMPRESSEDBYTES) | |||
#define SABER_SCALEBYTES_KEM ((SABER_ET)*SABER_N/8) | |||
#define SABER_INDCPA_PUBLICKEYBYTES (SABER_POLYVECCOMPRESSEDBYTES + SABER_SEEDBYTES) | |||
#define SABER_INDCPA_SECRETKEYBYTES (SABER_POLYVECBYTES) | |||
#define SABER_PUBLICKEYBYTES (SABER_INDCPA_PUBLICKEYBYTES) | |||
#define SABER_SECRETKEYBYTES (SABER_INDCPA_SECRETKEYBYTES + SABER_INDCPA_PUBLICKEYBYTES + SABER_HASHBYTES + SABER_KEYBYTES) | |||
#define SABER_BYTES_CCA_DEC (SABER_POLYVECCOMPRESSEDBYTES + SABER_SCALEBYTES_KEM) /* Second part is for Targhi-Unruh */ | |||
#endif |
@@ -0,0 +1,18 @@ | |||
#ifndef PQCLEAN_LIGHTSABER_AVX2_API_H | |||
#define PQCLEAN_LIGHTSABER_AVX2_API_H | |||
#define PQCLEAN_LIGHTSABER_AVX2_CRYPTO_ALGNAME "LightSaber" | |||
#define PQCLEAN_LIGHTSABER_AVX2_CRYPTO_BYTES 32 | |||
#define PQCLEAN_LIGHTSABER_AVX2_CRYPTO_CIPHERTEXTBYTES 736 | |||
#define PQCLEAN_LIGHTSABER_AVX2_CRYPTO_PUBLICKEYBYTES 672 | |||
#define PQCLEAN_LIGHTSABER_AVX2_CRYPTO_SECRETKEYBYTES 1568 | |||
int PQCLEAN_LIGHTSABER_AVX2_crypto_kem_keypair(unsigned char *pk, unsigned char *sk); | |||
int PQCLEAN_LIGHTSABER_AVX2_crypto_kem_enc(unsigned char *ct, unsigned char *k, const unsigned char *pk); | |||
int PQCLEAN_LIGHTSABER_AVX2_crypto_kem_dec(unsigned char *k, const unsigned char *ct, const unsigned char *sk); | |||
#endif /* PQCLEAN_LIGHTSABER_AVX2_API_H */ |
@@ -0,0 +1,51 @@ | |||
#include "SABER_params.h" | |||
#include "api.h" | |||
#include "cbd.h" | |||
#include <stdint.h> | |||
/*--------------------------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at, Public Domain https://github.com/pq-crystals/kyber) | |||
of "CRYSTALS – Kyber: a CCA-secure module-lattice-based KEM" | |||
by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
----------------------------------------------------------------------*/ | |||
static uint64_t load_littleendian(const unsigned char *x, int bytes) { | |||
int i; | |||
uint64_t r = x[0]; | |||
for (i = 1; i < bytes; i++) { | |||
r |= (uint64_t)x[i] << (8 * i); | |||
} | |||
return r; | |||
} | |||
void PQCLEAN_LIGHTSABER_AVX2_cbd(uint16_t *r, const unsigned char *buf) { | |||
uint16_t Qmod_minus1 = SABER_Q - 1; | |||
uint64_t t, d, a[4], b[4]; | |||
int i, j; | |||
for (i = 0; i < SABER_N / 4; i++) { | |||
t = load_littleendian(buf + 5 * i, 5); | |||
d = 0; | |||
for (j = 0; j < 5; j++) { | |||
d += (t >> j) & 0x0842108421UL; | |||
} | |||
a[0] = d & 0x1f; | |||
b[0] = (d >> 5) & 0x1f; | |||
a[1] = (d >> 10) & 0x1f; | |||
b[1] = (d >> 15) & 0x1f; | |||
a[2] = (d >> 20) & 0x1f; | |||
b[2] = (d >> 25) & 0x1f; | |||
a[3] = (d >> 30) & 0x1f; | |||
b[3] = (d >> 35); | |||
r[4 * i + 0] = (uint16_t)(a[0] - b[0]) & Qmod_minus1; | |||
r[4 * i + 1] = (uint16_t)(a[1] - b[1]) & Qmod_minus1; | |||
r[4 * i + 2] = (uint16_t)(a[2] - b[2]) & Qmod_minus1; | |||
r[4 * i + 3] = (uint16_t)(a[3] - b[3]) & Qmod_minus1; | |||
} | |||
} |
@@ -0,0 +1,16 @@ | |||
#ifndef CBD_H | |||
#define CBD_H | |||
/*--------------------------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at, Public Domain https://github.com/pq-crystals/kyber) | |||
of "CRYSTALS – Kyber: a CCA-secure module-lattice-based KEM" | |||
by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
----------------------------------------------------------------------*/ | |||
#include "poly.h" | |||
#include <stdint.h> | |||
void PQCLEAN_LIGHTSABER_AVX2_cbd(uint16_t *r, const unsigned char *buf); | |||
#endif |
@@ -0,0 +1,79 @@ | |||
#include "SABER_indcpa.h" | |||
#include "SABER_params.h" | |||
#include "api.h" | |||
#include "fips202.h" | |||
#include "randombytes.h" | |||
#include "verify.h" | |||
#include <immintrin.h> | |||
#include <stdint.h> | |||
#include <stdio.h> | |||
#include <string.h> | |||
int PQCLEAN_LIGHTSABER_AVX2_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) { | |||
int i; | |||
PQCLEAN_LIGHTSABER_AVX2_indcpa_kem_keypair(pk, sk); // sk[0:SABER_INDCPA_SECRETKEYBYTES-1] <-- sk | |||
for (i = 0; i < SABER_INDCPA_PUBLICKEYBYTES; i++) { | |||
sk[i + SABER_INDCPA_SECRETKEYBYTES] = pk[i]; // sk[SABER_INDCPA_SECRETKEYBYTES:SABER_INDCPA_SECRETKEYBYTES+SABER_INDCPA_SECRETKEYBYTES-1] <-- pk | |||
} | |||
sha3_256(sk + SABER_SECRETKEYBYTES - 64, pk, SABER_INDCPA_PUBLICKEYBYTES); // Then hash(pk) is appended. | |||
randombytes(sk + SABER_SECRETKEYBYTES - SABER_KEYBYTES, SABER_KEYBYTES); // Remaining part of sk contains a pseudo-random number. | |||
// This is output when check in PQCLEAN_LIGHTSABER_AVX2_crypto_kem_dec() fails. | |||
return (0); | |||
} | |||
int PQCLEAN_LIGHTSABER_AVX2_crypto_kem_enc(uint8_t *c, uint8_t *k, const uint8_t *pk) { | |||
uint8_t kr[64]; // Will contain key, coins | |||
uint8_t buf[64]; | |||
randombytes(buf, 32); | |||
sha3_256(buf, buf, 32); // BUF[0:31] <-- random message (will be used as the key for client) Note: hash doesnot release system RNG output | |||
sha3_256(buf + 32, pk, SABER_INDCPA_PUBLICKEYBYTES); // BUF[32:63] <-- Hash(public key); Multitarget countermeasure for coins + contributory KEM | |||
sha3_512(kr, buf, 64); // kr[0:63] <-- Hash(buf[0:63]); | |||
// K^ <-- kr[0:31] | |||
// noiseseed (r) <-- kr[32:63]; | |||
PQCLEAN_LIGHTSABER_AVX2_indcpa_kem_enc(c, buf, (const uint8_t *) (kr + 32), pk); // buf[0:31] contains message; kr[32:63] contains randomness r; | |||
sha3_256(kr + 32, c, SABER_BYTES_CCA_DEC); | |||
sha3_256(k, kr, 64); // hash concatenation of pre-k and h(c) to k | |||
return (0); | |||
} | |||
int PQCLEAN_LIGHTSABER_AVX2_crypto_kem_dec(uint8_t *k, const uint8_t *c, const uint8_t *sk) { | |||
int i; | |||
uint8_t fail; | |||
uint8_t cmp[SABER_BYTES_CCA_DEC]; | |||
uint8_t buf[64]; | |||
uint8_t kr[64]; // Will contain key, coins | |||
const uint8_t *pk = sk + SABER_INDCPA_SECRETKEYBYTES; | |||
PQCLEAN_LIGHTSABER_AVX2_indcpa_kem_dec(buf, sk, c); // buf[0:31] <-- message | |||
// Multitarget countermeasure for coins + contributory KEM | |||
for (i = 0; i < 32; i++) { // Save hash by storing h(pk) in sk | |||
buf[32 + i] = sk[SABER_SECRETKEYBYTES - 64 + i]; | |||
} | |||
sha3_512(kr, buf, 64); | |||
PQCLEAN_LIGHTSABER_AVX2_indcpa_kem_enc(cmp, buf, (const uint8_t *) (kr + 32), pk); | |||
fail = PQCLEAN_LIGHTSABER_AVX2_verify(c, cmp, SABER_BYTES_CCA_DEC); | |||
sha3_256(kr + 32, c, SABER_BYTES_CCA_DEC); // overwrite coins in kr with h(c) | |||
PQCLEAN_LIGHTSABER_AVX2_cmov(kr, sk + SABER_SECRETKEYBYTES - SABER_KEYBYTES, SABER_KEYBYTES, fail); | |||
sha3_256(k, kr, 64); // hash concatenation of pre-k and h(c) to k | |||
return (0); | |||
} |
@@ -0,0 +1,35 @@ | |||
#ifndef INDCPA_H | |||
#define INDCPA_H | |||
#include <stdint.h> | |||
void PQCLEAN_LIGHTSABER_AVX2_indcpa_keypair(uint8_t *pk, uint8_t *sk); | |||
void PQCLEAN_LIGHTSABER_AVX2_indcpa_client(uint8_t *pk, uint8_t *b_prime, uint8_t *c, uint8_t *key); | |||
void PQCLEAN_LIGHTSABER_AVX2_indcpa_server(uint8_t *pk, uint8_t *b_prime, uint8_t *c, uint8_t *key); | |||
void PQCLEAN_LIGHTSABER_AVX2_indcpa_kem_keypair(uint8_t *pk, uint8_t *sk); | |||
void PQCLEAN_LIGHTSABER_AVX2_indcpa_kem_enc(uint8_t *message, uint8_t *noiseseed, uint8_t *pk, uint8_t *ciphertext); | |||
void PQCLEAN_LIGHTSABER_AVX2_indcpa_kem_dec(uint8_t *sk, uint8_t *ciphertext, uint8_t message_dec[]); | |||
int PQCLEAN_LIGHTSABER_AVX2_crypto_kem_keypair(unsigned char *pk, unsigned char *sk); | |||
int PQCLEAN_LIGHTSABER_AVX2_crypto_kem_enc(unsigned char *c, unsigned char *k, const unsigned char *pk); | |||
int PQCLEAN_LIGHTSABER_AVX2_crypto_kem_dec(unsigned char *k, const unsigned char *c, const unsigned char *sk); | |||
//uint64_t clock1,clock2; | |||
//uint64_t clock_kp_kex, clock_enc_kex, clock_dec_kex; | |||
#endif |
@@ -0,0 +1,502 @@ | |||
#include "pack_unpack.h" | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_pack_3bit(uint8_t *bytes, const uint16_t *data) { | |||
uint32_t j; | |||
uint32_t offset_data = 0, offset_byte = 0; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = 3 * j; | |||
offset_data = 8 * j; | |||
bytes[offset_byte + 0] = (data[offset_data + 0] & 0x7) | ( (data[offset_data + 1] & 0x7) << 3 ) | ((data[offset_data + 2] & 0x3) << 6); | |||
bytes[offset_byte + 1] = ((data[offset_data + 2] >> 2 ) & 0x01) | ( (data[offset_data + 3] & 0x7) << 1 ) | ( (data[offset_data + 4] & 0x7) << 4 ) | (((data[offset_data + 5]) & 0x01) << 7); | |||
bytes[offset_byte + 2] = ((data[offset_data + 5] >> 1 ) & 0x03) | ( (data[offset_data + 6] & 0x7) << 2 ) | ( (data[offset_data + 7] & 0x7) << 5 ); | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_un_pack3bit(uint16_t *data, const uint8_t *bytes) { | |||
uint32_t j; | |||
uint32_t offset_data = 0, offset_byte = 0; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = 3 * j; | |||
offset_data = 8 * j; | |||
data[offset_data + 0] = (bytes[offset_byte + 0]) & 0x07; | |||
data[offset_data + 1] = ( (bytes[offset_byte + 0]) >> 3 ) & 0x07; | |||
data[offset_data + 2] = ( ( (bytes[offset_byte + 0]) >> 6 ) & 0x03) | ( ( (bytes[offset_byte + 1]) & 0x01) << 2 ); | |||
data[offset_data + 3] = ( (bytes[offset_byte + 1]) >> 1 ) & 0x07; | |||
data[offset_data + 4] = ( (bytes[offset_byte + 1]) >> 4 ) & 0x07; | |||
data[offset_data + 5] = ( ( (bytes[offset_byte + 1]) >> 7 ) & 0x01) | ( ( (bytes[offset_byte + 2]) & 0x03) << 1 ); | |||
data[offset_data + 6] = ( (bytes[offset_byte + 2] >> 2) & 0x07 ); | |||
data[offset_data + 7] = ( (bytes[offset_byte + 2] >> 5) & 0x07 ); | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_pack_4bit(uint8_t *bytes, const uint16_t *data) { | |||
uint32_t j; | |||
uint32_t offset_data = 0; | |||
for (j = 0; j < SABER_N / 2; j++) { | |||
offset_data = 2 * j; | |||
bytes[j] = (data[offset_data] & 0x0f) | ( (data[offset_data + 1] & 0x0f) << 4 ); | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_un_pack4bit(uint16_t *data, const uint8_t *bytes) { | |||
uint32_t j; | |||
uint32_t offset_data = 0; | |||
for (j = 0; j < SABER_N / 2; j++) { | |||
offset_data = 2 * j; | |||
data[offset_data] = bytes[j] & 0x0f; | |||
data[offset_data + 1] = (bytes[j] >> 4) & 0x0f; | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_pack_6bit(uint8_t *bytes, const uint16_t *data) { | |||
uint32_t j; | |||
uint32_t offset_data = 0, offset_byte = 0; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = 3 * j; | |||
offset_data = 4 * j; | |||
bytes[offset_byte + 0] = (data[offset_data + 0] & 0x3f) | ((data[offset_data + 1] & 0x03) << 6); | |||
bytes[offset_byte + 1] = ((data[offset_data + 1] >> 2) & 0x0f) | ((data[offset_data + 2] & 0x0f) << 4); | |||
bytes[offset_byte + 2] = ((data[offset_data + 2] >> 4) & 0x03) | ((data[offset_data + 3] & 0x3f) << 2); | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_un_pack6bit(uint16_t *data, const uint8_t *bytes) { | |||
uint32_t j; | |||
uint32_t offset_data = 0, offset_byte = 0; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = 3 * j; | |||
offset_data = 4 * j; | |||
data[offset_data + 0] = bytes[offset_byte + 0] & 0x3f; | |||
data[offset_data + 1] = ((bytes[offset_byte + 0] >> 6) & 0x03) | ((bytes[offset_byte + 1] & 0x0f) << 2) ; | |||
data[offset_data + 2] = ((bytes[offset_byte + 1] & 0xff) >> 4) | ((bytes[offset_byte + 2] & 0x03) << 4) ; | |||
data[offset_data + 3] = ((bytes[offset_byte + 2] & 0xff) >> 2); | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_pack10bit(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 10) / 8; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = offset_byte1 + 5 * j; | |||
offset_data = 4 * j; | |||
bytes[offset_byte + 0] = ( data[i][ offset_data + 0 ] & (0xff)); | |||
bytes[offset_byte + 1] = ( (data[i][ offset_data + 0 ] >> 8) & 0x03 ) | ((data[i][ offset_data + 1 ] & 0x3f) << 2); | |||
bytes[offset_byte + 2] = ( (data[i][ offset_data + 1 ] >> 6) & 0x0f ) | ( (data[i][ offset_data + 2 ] & 0x0f) << 4); | |||
bytes[offset_byte + 3] = ( (data[i][ offset_data + 2 ] >> 4) & 0x3f ) | ((data[i][ offset_data + 3 ] & 0x03) << 6); | |||
bytes[offset_byte + 4] = ( (data[i][ offset_data + 3 ] >> 2) & 0xff ); | |||
} | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_AVX2_POLVECp2BS(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 10) / 8; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = offset_byte1 + 5 * j; | |||
offset_data = 4 * j; | |||
bytes[offset_byte + 0] = ( data[i][ offset_data + 0 ] & (0xff)); | |||
bytes[offset_byte + 1] = ( (data[i][ offset_data + 0 ] >> 8) & 0x03 ) | ((data[i][ offset_data + 1 ] & 0x3f) << 2); | |||
bytes[offset_byte + 2] = ( (data[i][ offset_data + 1 ] >> 6) & 0x0f ) | ( (data[i][ offset_data + 2 ] & 0x0f) << 4); | |||
bytes[offset_byte + 3] = ( (data[i][ offset_data + 2 ] >> 4) & 0x3f ) | ((data[i][ offset_data + 3 ] & 0x03) << 6); | |||
bytes[offset_byte + 4] = ( (data[i][ offset_data + 3 ] >> 2) & 0xff ); | |||
} | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_AVX2_POLVECq2BS(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 13) / 8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = offset_byte1 + 13 * j; | |||
offset_data = 8 * j; | |||
bytes[offset_byte + 0] = ( data[i][ offset_data + 0 ] & (0xff)); | |||
bytes[offset_byte + 1] = ( (data[i][ offset_data + 0 ] >> 8) & 0x1f ) | ((data[i][ offset_data + 1 ] & 0x07) << 5); | |||
bytes[offset_byte + 2] = ( (data[i][ offset_data + 1 ] >> 3) & 0xff ); | |||
bytes[offset_byte + 3] = ( (data[i][ offset_data + 1 ] >> 11) & 0x03 ) | ((data[i][ offset_data + 2 ] & 0x3f) << 2); | |||
bytes[offset_byte + 4] = ( (data[i][ offset_data + 2 ] >> 6) & 0x7f ) | ( (data[i][ offset_data + 3 ] & 0x01) << 7 ); | |||
bytes[offset_byte + 5] = ( (data[i][ offset_data + 3 ] >> 1) & 0xff ); | |||
bytes[offset_byte + 6] = ( (data[i][ offset_data + 3 ] >> 9) & 0x0f ) | ( (data[i][ offset_data + 4 ] & 0x0f) << 4 ); | |||
bytes[offset_byte + 7] = ( (data[i][ offset_data + 4] >> 4) & 0xff ); | |||
bytes[offset_byte + 8] = ( (data[i][ offset_data + 4 ] >> 12) & 0x01 ) | ( (data[i][ offset_data + 5 ] & 0x7f) << 1 ); | |||
bytes[offset_byte + 9] = ( (data[i][ offset_data + 5 ] >> 7) & 0x3f ) | ( (data[i][ offset_data + 6 ] & 0x03) << 6 ); | |||
bytes[offset_byte + 10] = ( (data[i][ offset_data + 6 ] >> 2) & 0xff ); | |||
bytes[offset_byte + 11] = ( (data[i][ offset_data + 6 ] >> 10) & 0x07 ) | ( (data[i][ offset_data + 7 ] & 0x1f) << 3 ); | |||
bytes[offset_byte + 12] = ( (data[i][ offset_data + 7 ] >> 5) & 0xff ); | |||
} | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_AVX2_BS2POLq(uint16_t data[SABER_N], const uint8_t *bytes) { | |||
uint32_t j; | |||
uint32_t offset_data = 0, offset_byte = 0; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = 13 * j; | |||
offset_data = 8 * j; | |||
data[offset_data + 0] = ( bytes[ offset_byte + 0 ] & (0xff)) | ((bytes[offset_byte + 1] & 0x1f) << 8); | |||
data[offset_data + 1] = ( bytes[ offset_byte + 1 ] >> 5 & (0x07)) | ((bytes[offset_byte + 2] & 0xff) << 3) | ((bytes[offset_byte + 3] & 0x03) << 11); | |||
data[offset_data + 2] = ( bytes[ offset_byte + 3 ] >> 2 & (0x3f)) | ((bytes[offset_byte + 4] & 0x7f) << 6); | |||
data[offset_data + 3] = ( bytes[ offset_byte + 4 ] >> 7 & (0x01)) | ((bytes[offset_byte + 5] & 0xff) << 1) | ((bytes[offset_byte + 6] & 0x0f) << 9); | |||
data[offset_data + 4] = ( bytes[ offset_byte + 6 ] >> 4 & (0x0f)) | ((bytes[offset_byte + 7] & 0xff) << 4) | ((bytes[offset_byte + 8] & 0x01) << 12); | |||
data[offset_data + 5] = ( bytes[ offset_byte + 8] >> 1 & (0x7f)) | ((bytes[offset_byte + 9] & 0x3f) << 7); | |||
data[offset_data + 6] = ( bytes[ offset_byte + 9] >> 6 & (0x03)) | ((bytes[offset_byte + 10] & 0xff) << 2) | ((bytes[offset_byte + 11] & 0x07) << 10); | |||
data[offset_data + 7] = ( bytes[ offset_byte + 11] >> 3 & (0x1f)) | ((bytes[offset_byte + 12] & 0xff) << 5); | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_AVX2_BS2POLVECp(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 10) / 8; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = offset_byte1 + 5 * j; | |||
offset_data = 4 * j; | |||
data[i][offset_data + 0] = ( bytes[ offset_byte + 0 ] & (0xff)) | ((bytes[ offset_byte + 1 ] & 0x03) << 8); | |||
data[i][offset_data + 1] = ( (bytes[ offset_byte + 1 ] >> 2) & (0x3f)) | ((bytes[ offset_byte + 2 ] & 0x0f) << 6); | |||
data[i][offset_data + 2] = ( (bytes[ offset_byte + 2 ] >> 4) & (0x0f)) | ((bytes[ offset_byte + 3 ] & 0x3f) << 4); | |||
data[i][offset_data + 3] = ( (bytes[ offset_byte + 3 ] >> 6) & (0x03)) | ((bytes[ offset_byte + 4 ] & 0xff) << 2); | |||
} | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_AVX2_BS2POLVECq(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 13) / 8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = offset_byte1 + 13 * j; | |||
offset_data = 8 * j; | |||
data[i][offset_data + 0] = ( bytes[ offset_byte + 0 ] & (0xff)) | ((bytes[offset_byte + 1] & 0x1f) << 8); | |||
data[i][offset_data + 1] = ( bytes[ offset_byte + 1 ] >> 5 & (0x07)) | ((bytes[offset_byte + 2] & 0xff) << 3) | ((bytes[offset_byte + 3] & 0x03) << 11); | |||
data[i][offset_data + 2] = ( bytes[ offset_byte + 3 ] >> 2 & (0x3f)) | ((bytes[offset_byte + 4] & 0x7f) << 6); | |||
data[i][offset_data + 3] = ( bytes[ offset_byte + 4 ] >> 7 & (0x01)) | ((bytes[offset_byte + 5] & 0xff) << 1) | ((bytes[offset_byte + 6] & 0x0f) << 9); | |||
data[i][offset_data + 4] = ( bytes[ offset_byte + 6 ] >> 4 & (0x0f)) | ((bytes[offset_byte + 7] & 0xff) << 4) | ((bytes[offset_byte + 8] & 0x01) << 12); | |||
data[i][offset_data + 5] = ( bytes[ offset_byte + 8] >> 1 & (0x7f)) | ((bytes[offset_byte + 9] & 0x3f) << 7); | |||
data[i][offset_data + 6] = ( bytes[ offset_byte + 9] >> 6 & (0x03)) | ((bytes[offset_byte + 10] & 0xff) << 2) | ((bytes[offset_byte + 11] & 0x07) << 10); | |||
data[i][offset_data + 7] = ( bytes[ offset_byte + 11] >> 3 & (0x1f)) | ((bytes[offset_byte + 12] & 0xff) << 5); | |||
} | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_un_pack10bit(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 10) / 8; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = offset_byte1 + 5 * j; | |||
offset_data = 4 * j; | |||
data[i][offset_data + 0] = ( bytes[ offset_byte + 0 ] & (0xff)) | ((bytes[ offset_byte + 1 ] & 0x03) << 8); | |||
data[i][offset_data + 1] = ( (bytes[ offset_byte + 1 ] >> 2) & (0x3f)) | ((bytes[ offset_byte + 2 ] & 0x0f) << 6); | |||
data[i][offset_data + 2] = ( (bytes[ offset_byte + 2 ] >> 4) & (0x0f)) | ((bytes[ offset_byte + 3 ] & 0x3f) << 4); | |||
data[i][offset_data + 3] = ( (bytes[ offset_byte + 3 ] >> 6) & (0x03)) | ((bytes[ offset_byte + 4 ] & 0xff) << 2); | |||
} | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_pack13bit(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 13) / 8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = offset_byte1 + 13 * j; | |||
offset_data = 8 * j; | |||
bytes[offset_byte + 0] = ( data[i][ offset_data + 0 ] & (0xff)); | |||
bytes[offset_byte + 1] = ( (data[i][ offset_data + 0 ] >> 8) & 0x1f ) | ((data[i][ offset_data + 1 ] & 0x07) << 5); | |||
bytes[offset_byte + 2] = ( (data[i][ offset_data + 1 ] >> 3) & 0xff ); | |||
bytes[offset_byte + 3] = ( (data[i][ offset_data + 1 ] >> 11) & 0x03 ) | ((data[i][ offset_data + 2 ] & 0x3f) << 2); | |||
bytes[offset_byte + 4] = ( (data[i][ offset_data + 2 ] >> 6) & 0x7f ) | ( (data[i][ offset_data + 3 ] & 0x01) << 7 ); | |||
bytes[offset_byte + 5] = ( (data[i][ offset_data + 3 ] >> 1) & 0xff ); | |||
bytes[offset_byte + 6] = ( (data[i][ offset_data + 3 ] >> 9) & 0x0f ) | ( (data[i][ offset_data + 4 ] & 0x0f) << 4 ); | |||
bytes[offset_byte + 7] = ( (data[i][ offset_data + 4] >> 4) & 0xff ); | |||
bytes[offset_byte + 8] = ( (data[i][ offset_data + 4 ] >> 12) & 0x01 ) | ( (data[i][ offset_data + 5 ] & 0x7f) << 1 ); | |||
bytes[offset_byte + 9] = ( (data[i][ offset_data + 5 ] >> 7) & 0x3f ) | ( (data[i][ offset_data + 6 ] & 0x03) << 6 ); | |||
bytes[offset_byte + 10] = ( (data[i][ offset_data + 6 ] >> 2) & 0xff ); | |||
bytes[offset_byte + 11] = ( (data[i][ offset_data + 6 ] >> 10) & 0x07 ) | ( (data[i][ offset_data + 7 ] & 0x1f) << 3 ); | |||
bytes[offset_byte + 12] = ( (data[i][ offset_data + 7 ] >> 5) & 0xff ); | |||
} | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_un_pack13bit(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 13) / 8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = offset_byte1 + 13 * j; | |||
offset_data = 8 * j; | |||
data[i][offset_data + 0] = ( bytes[ offset_byte + 0 ] & (0xff)) | ((bytes[offset_byte + 1] & 0x1f) << 8); | |||
data[i][offset_data + 1] = ( bytes[ offset_byte + 1 ] >> 5 & (0x07)) | ((bytes[offset_byte + 2] & 0xff) << 3) | ((bytes[offset_byte + 3] & 0x03) << 11); | |||
data[i][offset_data + 2] = ( bytes[ offset_byte + 3 ] >> 2 & (0x3f)) | ((bytes[offset_byte + 4] & 0x7f) << 6); | |||
data[i][offset_data + 3] = ( bytes[ offset_byte + 4 ] >> 7 & (0x01)) | ((bytes[offset_byte + 5] & 0xff) << 1) | ((bytes[offset_byte + 6] & 0x0f) << 9); | |||
data[i][offset_data + 4] = ( bytes[ offset_byte + 6 ] >> 4 & (0x0f)) | ((bytes[offset_byte + 7] & 0xff) << 4) | ((bytes[offset_byte + 8] & 0x01) << 12); | |||
data[i][offset_data + 5] = ( bytes[ offset_byte + 8] >> 1 & (0x7f)) | ((bytes[offset_byte + 9] & 0x3f) << 7); | |||
data[i][offset_data + 6] = ( bytes[ offset_byte + 9] >> 6 & (0x03)) | ((bytes[offset_byte + 10] & 0xff) << 2) | ((bytes[offset_byte + 11] & 0x07) << 10); | |||
data[i][offset_data + 7] = ( bytes[ offset_byte + 11] >> 3 & (0x1f)) | ((bytes[offset_byte + 12] & 0xff) << 5); | |||
} | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_poly_un_pack13bit(uint16_t data[SABER_N], const uint8_t *bytes) { | |||
uint32_t j; | |||
uint32_t offset_data = 0, offset_byte = 0; | |||
//for(i=0;i<SABER_K;i++){ | |||
//i=0; | |||
//offset_byte1=i*(SABER_N*13)/8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
//offset_byte=offset_byte1+13*j; | |||
offset_byte = 13 * j; | |||
offset_data = 8 * j; | |||
data[offset_data + 0] = ( bytes[ offset_byte + 0 ] & (0xff)) | ((bytes[offset_byte + 1] & 0x1f) << 8); | |||
data[offset_data + 1] = ( bytes[ offset_byte + 1 ] >> 5 & (0x07)) | ((bytes[offset_byte + 2] & 0xff) << 3) | ((bytes[offset_byte + 3] & 0x03) << 11); | |||
data[offset_data + 2] = ( bytes[ offset_byte + 3 ] >> 2 & (0x3f)) | ((bytes[offset_byte + 4] & 0x7f) << 6); | |||
data[offset_data + 3] = ( bytes[ offset_byte + 4 ] >> 7 & (0x01)) | ((bytes[offset_byte + 5] & 0xff) << 1) | ((bytes[offset_byte + 6] & 0x0f) << 9); | |||
data[offset_data + 4] = ( bytes[ offset_byte + 6 ] >> 4 & (0x0f)) | ((bytes[offset_byte + 7] & 0xff) << 4) | ((bytes[offset_byte + 8] & 0x01) << 12); | |||
data[offset_data + 5] = ( bytes[ offset_byte + 8] >> 1 & (0x7f)) | ((bytes[offset_byte + 9] & 0x3f) << 7); | |||
data[offset_data + 6] = ( bytes[ offset_byte + 9] >> 6 & (0x03)) | ((bytes[offset_byte + 10] & 0xff) << 2) | ((bytes[offset_byte + 11] & 0x07) << 10); | |||
data[offset_data + 7] = ( bytes[ offset_byte + 11] >> 3 & (0x1f)) | ((bytes[offset_byte + 12] & 0xff) << 5); | |||
} | |||
//} | |||
} | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_pack11bit(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]) { | |||
/*This function packs 11 bit data stream into 8 bits of data. | |||
*/ | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 11) / 8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = offset_byte1 + 11 * j; | |||
offset_data = 8 * j; | |||
bytes[offset_byte + 0] = ( data[i][ offset_data + 0 ] & (0xff)); | |||
bytes[offset_byte + 1] = ( (data[i][ offset_data + 0 ] >> 8) & 0x07 ) | ((data[i][ offset_data + 1 ] & 0x1f) << 3); | |||
bytes[offset_byte + 2] = ( (data[i][ offset_data + 1 ] >> 5) & 0x3f ) | ((data[i][ offset_data + 2 ] & 0x03) << 6); | |||
bytes[offset_byte + 3] = ( (data[i][ offset_data + 2 ] >> 2) & 0xff ); | |||
bytes[offset_byte + 4] = ( (data[i][ offset_data + 2 ] >> 10) & 0x01 ) | ((data[i][ offset_data + 3 ] & 0x7f) << 1); | |||
bytes[offset_byte + 5] = ( (data[i][ offset_data + 3 ] >> 7) & 0x0f ) | ((data[i][ offset_data + 4 ] & 0x0f) << 4); | |||
bytes[offset_byte + 6] = ( (data[i][ offset_data + 4 ] >> 4) & 0x7f ) | ((data[i][ offset_data + 5 ] & 0x01) << 7); | |||
bytes[offset_byte + 7] = ( (data[i][ offset_data + 5 ] >> 1) & 0xff ); | |||
bytes[offset_byte + 8] = ( (data[i][ offset_data + 5 ] >> 9) & 0x03 ) | ((data[i][ offset_data + 6 ] & 0x3f) << 2); | |||
bytes[offset_byte + 9] = ( (data[i][ offset_data + 6 ] >> 6) & 0x1f ) | ((data[i][ offset_data + 7 ] & 0x07) << 5); | |||
bytes[offset_byte + 10] = ( (data[i][ offset_data + 7 ] >> 3) & 0xff ); | |||
} | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_un_pack11bit(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 11) / 8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = offset_byte1 + 11 * j; | |||
offset_data = 8 * j; | |||
data[i][offset_data + 0] = (bytes[offset_byte + 0]) | ( (bytes[offset_byte + 1] & 0x07) << 8 ); | |||
data[i][offset_data + 1] = ( (bytes[offset_byte + 1] >> 3) & 0x1f) | ( (bytes[offset_byte + 2] & 0x3f) << 5 ); | |||
data[i][offset_data + 2] = ( (bytes[offset_byte + 2] >> 6) & 0x03) | ( (bytes[offset_byte + 3] & 0xff) << 2 ) | ( (bytes[offset_byte + 4] & 0x01) << 10 ); | |||
data[i][offset_data + 3] = ( (bytes[offset_byte + 4] >> 1) & 0x7f) | ( (bytes[offset_byte + 5] & 0x0f) << 7 ); | |||
data[i][offset_data + 4] = ( (bytes[offset_byte + 5] >> 4) & 0x0f) | ( (bytes[offset_byte + 6] & 0x7f) << 4 ); | |||
data[i][offset_data + 5] = ( (bytes[offset_byte + 6] >> 7) & 0x01) | ( (bytes[offset_byte + 7] & 0xff) << 1 ) | ( (bytes[offset_byte + 8] & 0x03) << 9 ); | |||
data[i][offset_data + 6] = ( (bytes[offset_byte + 8] >> 2) & 0x3f) | ( (bytes[offset_byte + 9] & 0x1f) << 6 ); | |||
data[i][offset_data + 7] = ( (bytes[offset_byte + 9] >> 5) & 0x07) | ( (bytes[offset_byte + 10] & 0xff) << 3 ); | |||
} | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_pack14bit(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 14) / 8; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = offset_byte1 + 7 * j; | |||
offset_data = 4 * j; | |||
bytes[offset_byte + 0] = ( data[i][ offset_data + 0 ] & (0xff)); | |||
bytes[offset_byte + 1] = ( (data[i][ offset_data + 0 ] >> 8) & 0x3f ) | ((data[i][ offset_data + 1 ] & 0x03) << 6); | |||
bytes[offset_byte + 2] = ( (data[i][ offset_data + 1 ] >> 2) & 0xff ); | |||
bytes[offset_byte + 3] = ( (data[i][ offset_data + 1 ] >> 10) & 0x0f ) | ((data[i][ offset_data + 2 ] & 0x0f) << 4); | |||
bytes[offset_byte + 4] = ( (data[i][ offset_data + 2 ] >> 4) & 0xff ); | |||
bytes[offset_byte + 5] = ( (data[i][ offset_data + 2 ] >> 12) & 0x03 ) | ((data[i][ offset_data + 3 ] & 0x3f) << 2); | |||
bytes[offset_byte + 6] = ( (data[i][ offset_data + 3 ] >> 6) & 0xff ); | |||
} | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_un_pack14bit(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 14) / 8; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = offset_byte1 + 7 * j; | |||
offset_data = 4 * j; | |||
data[i][offset_data + 0] = (bytes[offset_byte + 0] & 0xff) | ( (bytes[offset_byte + 1] & 0x3f) << 8 ); | |||
data[i][offset_data + 1] = ( (bytes[offset_byte + 1] >> 6) & 0x03) | ((bytes[offset_byte + 2] & 0xff) << 2 ) | ( (bytes[offset_byte + 3] & 0x0f) << 10 ); | |||
data[i][offset_data + 2] = ( (bytes[offset_byte + 3] >> 4) & 0x0f) | ( (bytes[offset_byte + 4] ) << 4 ) | ( (bytes[offset_byte + 5] & 0x03) << 12 ); | |||
data[i][offset_data + 3] = ( (bytes[offset_byte + 5] >> 2) & 0x3f) | ( (bytes[offset_byte + 6] ) << 6 ); | |||
} | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_AVX2_POLVEC2BS(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N], uint16_t modulus) { | |||
if (modulus == 1024) { | |||
PQCLEAN_LIGHTSABER_AVX2_POLVECp2BS(bytes, data); | |||
} else if (modulus == 8192) { | |||
PQCLEAN_LIGHTSABER_AVX2_POLVECq2BS(bytes, data); | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_AVX2_BS2POLVEC(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes, uint16_t modulus) { | |||
if (modulus == 1024) { | |||
PQCLEAN_LIGHTSABER_AVX2_BS2POLVECp(data, bytes); | |||
} else if (modulus == 8192) { | |||
PQCLEAN_LIGHTSABER_AVX2_BS2POLVECq(data, bytes); | |||
} | |||
} |
@@ -0,0 +1,56 @@ | |||
#ifndef PACK_UNPACK_H | |||
#define PACK_UNPACK_H | |||
#include "SABER_params.h" | |||
#include <stdint.h> | |||
#include <stdio.h> | |||
void PQCLEAN_LIGHTSABER_AVX2_BS2POLq(uint16_t data[SABER_N], const uint8_t *bytes); | |||
void PQCLEAN_LIGHTSABER_AVX2_BS2POLVEC(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes, uint16_t modulus); | |||
void PQCLEAN_LIGHTSABER_AVX2_BS2POLVECq(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes); | |||
void PQCLEAN_LIGHTSABER_AVX2_BS2POLVECp(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes); | |||
void PQCLEAN_LIGHTSABER_AVX2_POLVEC2BS(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N], uint16_t modulus); | |||
void PQCLEAN_LIGHTSABER_AVX2_POLVECq2BS(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]); | |||
void PQCLEAN_LIGHTSABER_AVX2_POLVECp2BS(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]); | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_pack_3bit(uint8_t *bytes, const uint16_t *data); | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_pack_4bit(uint8_t *bytes, const uint16_t *data); | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_pack_6bit(uint8_t *bytes, const uint16_t *data); | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_pack10bit(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]); | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_pack11bit(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]); | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_pack13bit(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]); | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_pack14bit(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]); | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_poly_un_pack13bit(uint16_t data[SABER_N], const uint8_t *bytes); | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_un_pack3bit(uint16_t *data, const uint8_t *bytes); | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_un_pack4bit(uint16_t *data, const uint8_t *bytes); | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_un_pack6bit(uint16_t *data, const uint8_t *bytes); | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_un_pack10bit(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes); | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_un_pack11bit(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes); | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_un_pack13bit(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes); | |||
void PQCLEAN_LIGHTSABER_AVX2_SABER_un_pack14bit(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes); | |||
#endif |
@@ -0,0 +1,27 @@ | |||
#ifndef POLY_H | |||
#define POLY_H | |||
/*--------------------------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at, Public Domain https://github.com/pq-crystals/kyber) | |||
of "CRYSTALS – Kyber: a CCA-secure module-lattice-based KEM" | |||
by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
----------------------------------------------------------------------*/ | |||
#include "SABER_params.h" | |||
#include <stdint.h> | |||
typedef struct { | |||
uint16_t coeffs[SABER_N]; | |||
} poly; | |||
typedef struct { | |||
poly vec[SABER_K]; | |||
} polyvec; | |||
void PQCLEAN_LIGHTSABER_AVX2_poly_getnoise(uint16_t *r, const unsigned char *seed, unsigned char nonce); | |||
void PQCLEAN_LIGHTSABER_AVX2_poly_getnoise4x(uint16_t *r0, uint16_t *r1, uint16_t *r2, const unsigned char *seed, unsigned char nonce0, unsigned char nonce1, unsigned char nonce2, unsigned char nonce3); | |||
#endif |
@@ -0,0 +1,20 @@ | |||
#include "../SABER_params.h" | |||
#define AVX_N (SABER_N >> 4) | |||
#define small_len_avx (AVX_N >> 2) | |||
#define SCHB_N 16 | |||
#define N_SB (SABER_N >> 2) | |||
#define N_SB_RES (2*N_SB-1) | |||
#define N_SB_16 (N_SB >> 2) | |||
#define N_SB_16_RES (2*N_SB_16-1) | |||
#define AVX_N1 16 /*N/16*/ | |||
#define SCM_SIZE 16 | |||
// The dimension of a vector. i.e vector has NUM_POLY elements and Matrix has NUM_POLY X NUM_POLY elements | |||
#define NUM_POLY SABER_K | |||
//int NUM_POLY=2; |
@@ -0,0 +1,303 @@ | |||
#include <immintrin.h> | |||
static void transpose_n1(__m256i *M) | |||
{ | |||
//int i; | |||
register __m256i r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; | |||
register __m256i temp, temp0, temp1, temp2; | |||
//for(i=0; i<8; i=i+1) | |||
//{ | |||
r0 = _mm256_unpacklo_epi16(M[0], M[1]); | |||
r1 = _mm256_unpacklo_epi16(M[2], M[3]); | |||
r2 = _mm256_unpacklo_epi16(M[4], M[5]); | |||
r3 = _mm256_unpacklo_epi16(M[6], M[7]); | |||
r4 = _mm256_unpacklo_epi16(M[8], M[9]); | |||
r5 = _mm256_unpacklo_epi16(M[10], M[11]); | |||
r6 = _mm256_unpacklo_epi16(M[12], M[13]); | |||
r7 = _mm256_unpacklo_epi16(M[14], M[15]); | |||
temp = _mm256_unpacklo_epi32(r0, r1); | |||
temp0 = _mm256_unpacklo_epi32(r2, r3); | |||
temp1 = _mm256_unpacklo_epi32(r4, r5); | |||
temp2 = _mm256_unpacklo_epi32(r6, r7); | |||
r8 = _mm256_unpackhi_epi32(r0, r1); | |||
r9 = _mm256_unpackhi_epi32(r2, r3); | |||
r10 = _mm256_unpackhi_epi32(r4, r5); | |||
r11 = _mm256_unpackhi_epi32(r6, r7); | |||
r0 = _mm256_unpacklo_epi64(temp, temp0); | |||
r2 = _mm256_unpackhi_epi64(temp, temp0); | |||
r1 = _mm256_unpacklo_epi64(temp1, temp2); | |||
r3 = _mm256_unpackhi_epi64(temp1, temp2); | |||
temp = _mm256_unpackhi_epi16(M[0], M[1]); | |||
temp0 = _mm256_unpackhi_epi16(M[2], M[3]); | |||
temp1 = _mm256_unpackhi_epi16(M[4], M[5]); | |||
temp2 = _mm256_unpackhi_epi16(M[6], M[7]); | |||
r4 = _mm256_unpackhi_epi16(M[8], M[9]); | |||
M[0] = _mm256_permute2f128_si256(r0, r1, 0x20); | |||
M[8] = _mm256_permute2f128_si256(r0, r1, 0x31); | |||
M[1] = _mm256_permute2f128_si256(r2, r3, 0x20); | |||
M[9] = _mm256_permute2f128_si256(r2, r3, 0x31); | |||
r5 = _mm256_unpackhi_epi16(M[10], M[11]); | |||
r6 = _mm256_unpackhi_epi16(M[12], M[13]); | |||
r7 = _mm256_unpackhi_epi16(M[14], M[15]); | |||
r0 = _mm256_unpacklo_epi64(r8, r9); | |||
r1 = _mm256_unpacklo_epi64(r10, r11); | |||
r2 = _mm256_unpackhi_epi64(r8, r9); | |||
r3 = _mm256_unpackhi_epi64(r10, r11); | |||
M[3] = _mm256_permute2f128_si256(r2, r3, 0x20); | |||
M[11] = _mm256_permute2f128_si256(r2, r3, 0x31); | |||
M[2] = _mm256_permute2f128_si256(r0, r1, 0x20); | |||
M[10] = _mm256_permute2f128_si256(r0, r1, 0x31); | |||
//for(i=0; i<4; i=i+1) | |||
//{ | |||
r0 = _mm256_unpacklo_epi32(temp, temp0); | |||
r1 = _mm256_unpacklo_epi32(temp1, temp2); | |||
r2 = _mm256_unpacklo_epi32(r4, r5); | |||
r3 = _mm256_unpacklo_epi32(r6, r7); | |||
//} | |||
//for(i=0; i<2; i=i+1) | |||
//{ | |||
r8 = _mm256_unpacklo_epi64(r0, r1); | |||
r10 = _mm256_unpackhi_epi64(r0, r1); | |||
r9 = _mm256_unpacklo_epi64(r2, r3); | |||
r11 = _mm256_unpackhi_epi64(r2, r3); | |||
M[4] = _mm256_permute2f128_si256(r8, r9, 0x20); | |||
M[12] = _mm256_permute2f128_si256(r8, r9, 0x31); | |||
M[5] = _mm256_permute2f128_si256(r10, r11, 0x20); | |||
M[13] = _mm256_permute2f128_si256(r10, r11, 0x31); | |||
r0 = _mm256_unpackhi_epi32(temp, temp0); | |||
r1 = _mm256_unpackhi_epi32(temp1, temp2); | |||
r2 = _mm256_unpackhi_epi32(r4, r5); | |||
r3 = _mm256_unpackhi_epi32(r6, r7); | |||
//} | |||
// for(i=0; i<2; i=i+1) | |||
// { | |||
r4 = _mm256_unpacklo_epi64(r0, r1); | |||
r6 = _mm256_unpackhi_epi64(r0, r1); | |||
r5 = _mm256_unpacklo_epi64(r2, r3); | |||
r7 = _mm256_unpackhi_epi64(r2, r3); | |||
// } | |||
//------------------------------------------------------- | |||
M[6] = _mm256_permute2f128_si256(r4, r5, 0x20); | |||
M[14] = _mm256_permute2f128_si256(r4, r5, 0x31); | |||
M[7] = _mm256_permute2f128_si256(r6, r7, 0x20); | |||
M[15] = _mm256_permute2f128_si256(r6, r7, 0x31); | |||
} | |||
/* | |||
void transpose_unrolled(__m256i *M) | |||
{ | |||
int i; | |||
__m256i tL[8], tH[8]; | |||
__m256i bL[4], bH[4], cL[4], cH[4]; | |||
__m256i dL[2], dH[2], eL[2], eH[2], fL[2], fH[2], gL[2], gH[2]; | |||
__m256i r0, r1, r2, r3, r4, r5, r6, r7; | |||
//for(i=0; i<8; i=i+1) | |||
//{ | |||
tL[0] = _mm256_unpacklo_epi16(M[0], M[1]); | |||
tH[0] = _mm256_unpackhi_epi16(M[0], M[1]); | |||
tL[1] = _mm256_unpacklo_epi16(M[2], M[3]); | |||
tH[1] = _mm256_unpackhi_epi16(M[2], M[3]); | |||
tL[2] = _mm256_unpacklo_epi16(M[4], M[5]); | |||
tH[2] = _mm256_unpackhi_epi16(M[4], M[5]); | |||
tL[3] = _mm256_unpacklo_epi16(M[6], M[7]); | |||
tH[3] = _mm256_unpackhi_epi16(M[6], M[7]); | |||
tL[4] = _mm256_unpacklo_epi16(M[8], M[9]); | |||
tH[4] = _mm256_unpackhi_epi16(M[8], M[9]); | |||
tL[5] = _mm256_unpacklo_epi16(M[10], M[11]); | |||
tH[5] = _mm256_unpackhi_epi16(M[10], M[11]); | |||
tL[6] = _mm256_unpacklo_epi16(M[12], M[13]); | |||
tH[6] = _mm256_unpackhi_epi16(M[12], M[13]); | |||
tL[7] = _mm256_unpacklo_epi16(M[14], M[15]); | |||
tH[7] = _mm256_unpackhi_epi16(M[14], M[15]); | |||
//} | |||
//------------------------------------------------------- | |||
//for(i=0; i<4; i=i+1) | |||
//{ | |||
bL[0] = _mm256_unpacklo_epi32(tL[0], tL[1]); | |||
bH[0] = _mm256_unpackhi_epi32(tL[0], tL[1]); | |||
bL[1] = _mm256_unpacklo_epi32(tL[2], tL[3]); | |||
bH[1] = _mm256_unpackhi_epi32(tL[2], tL[3]); | |||
bL[2] = _mm256_unpacklo_epi32(tL[4], tL[5]); | |||
bH[2] = _mm256_unpackhi_epi32(tL[4], tL[5]); | |||
bL[3] = _mm256_unpacklo_epi32(tL[6], tL[7]); | |||
bH[3] = _mm256_unpackhi_epi32(tL[6], tL[7]); | |||
//} | |||
//for(i=0; i<2; i=i+1) | |||
//{ | |||
dL[0] = _mm256_unpacklo_epi64(bL[0], bL[1]); | |||
dH[0] = _mm256_unpackhi_epi64(bL[0], bL[1]); | |||
dL[1] = _mm256_unpacklo_epi64(bL[2], bL[3]); | |||
dH[1] = _mm256_unpackhi_epi64(bL[2], bL[3]); | |||
M[0] = _mm256_permute2f128_si256(dL[0], dL[1], 0x20); | |||
M[8] = _mm256_permute2f128_si256(dL[0], dL[1], 0x31); | |||
M[1] = _mm256_permute2f128_si256(dH[0], dH[1], 0x20); | |||
M[9] = _mm256_permute2f128_si256(dH[0], dH[1], 0x31); | |||
//} | |||
//for(i=0; i<2; i=i+1) | |||
//{ | |||
eL[0] = _mm256_unpacklo_epi64(bH[0], bH[1]); | |||
eH[0] = _mm256_unpackhi_epi64(bH[0], bH[1]); | |||
eL[1] = _mm256_unpacklo_epi64(bH[2], bH[3]); | |||
eH[1] = _mm256_unpackhi_epi64(bH[2], bH[3]); | |||
//} | |||
//------------------------------------------------------- | |||
//------------------------------------------------------- | |||
for(i=0; i<4; i=i+1) | |||
{ | |||
cL[i] = _mm256_unpacklo_epi32(tH[2*i], tH[2*i+1]); | |||
cH[i] = _mm256_unpackhi_epi32(tH[2*i], tH[2*i+1]); | |||
} | |||
for(i=0; i<2; i=i+1) | |||
{ | |||
fL[i] = _mm256_unpacklo_epi64(cL[2*i], cL[2*i+1]); | |||
fH[i] = _mm256_unpackhi_epi64(cL[2*i], cL[2*i+1]); | |||
} | |||
for(i=0; i<2; i=i+1) | |||
{ | |||
gL[i] = _mm256_unpacklo_epi64(cH[2*i], cH[2*i+1]); | |||
gH[i] = _mm256_unpackhi_epi64(cH[2*i], cH[2*i+1]); | |||
} | |||
//------------------------------------------------------- | |||
M[2] = _mm256_permute2f128_si256(eL[0], eL[1], 0x20); | |||
M[10] = _mm256_permute2f128_si256(eL[0], eL[1], 0x31); | |||
M[3] = _mm256_permute2f128_si256(eH[0], eH[1], 0x20); | |||
M[11] = _mm256_permute2f128_si256(eH[0], eH[1], 0x31); | |||
M[4] = _mm256_permute2f128_si256(fL[0], fL[1], 0x20); | |||
M[12] = _mm256_permute2f128_si256(fL[0], fL[1], 0x31); | |||
M[5] = _mm256_permute2f128_si256(fH[0], fH[1], 0x20); | |||
M[13] = _mm256_permute2f128_si256(fH[0], fH[1], 0x31); | |||
M[6] = _mm256_permute2f128_si256(gL[0], gL[1], 0x20); | |||
M[14] = _mm256_permute2f128_si256(gL[0], gL[1], 0x31); | |||
M[7] = _mm256_permute2f128_si256(gH[0], gH[1], 0x20); | |||
M[15] = _mm256_permute2f128_si256(gH[0], gH[1], 0x31); | |||
} | |||
void transpose1(__m256i *M) | |||
{ | |||
int i; | |||
__m256i tL[8], tH[8]; | |||
__m256i bL[4], bH[4], cL[4], cH[4]; | |||
__m256i dL[2], dH[2], eL[2], eH[2], fL[2], fH[2], gL[2], gH[2]; | |||
for(i=0; i<8; i=i+1) | |||
{ | |||
tL[i] = _mm256_unpacklo_epi16(M[2*i], M[2*i+1]); | |||
tH[i] = _mm256_unpackhi_epi16(M[2*i], M[2*i+1]); | |||
} | |||
for(i=0; i<4; i=i+1) | |||
{ | |||
bL[i] = _mm256_unpacklo_epi32(tL[2*i], tL[2*i+1]); | |||
bH[i] = _mm256_unpackhi_epi32(tL[2*i], tL[2*i+1]); | |||
} | |||
for(i=0; i<4; i=i+1) | |||
{ | |||
cL[i] = _mm256_unpacklo_epi32(tH[2*i], tH[2*i+1]); | |||
cH[i] = _mm256_unpackhi_epi32(tH[2*i], tH[2*i+1]); | |||
} | |||
for(i=0; i<2; i=i+1) | |||
{ | |||
dL[i] = _mm256_unpacklo_epi64(bL[2*i], bL[2*i+1]); | |||
dH[i] = _mm256_unpackhi_epi64(bL[2*i], bL[2*i+1]); | |||
} | |||
for(i=0; i<2; i=i+1) | |||
{ | |||
eL[i] = _mm256_unpacklo_epi64(bH[2*i], bH[2*i+1]); | |||
eH[i] = _mm256_unpackhi_epi64(bH[2*i], bH[2*i+1]); | |||
} | |||
for(i=0; i<2; i=i+1) | |||
{ | |||
fL[i] = _mm256_unpacklo_epi64(cL[2*i], cL[2*i+1]); | |||
fH[i] = _mm256_unpackhi_epi64(cL[2*i], cL[2*i+1]); | |||
} | |||
for(i=0; i<2; i=i+1) | |||
{ | |||
gL[i] = _mm256_unpacklo_epi64(cH[2*i], cH[2*i+1]); | |||
gH[i] = _mm256_unpackhi_epi64(cH[2*i], cH[2*i+1]); | |||
} | |||
M[0] = _mm256_permute2f128_si256(dL[0], dL[1], 0x20); | |||
M[8] = _mm256_permute2f128_si256(dL[0], dL[1], 0x31); | |||
M[1] = _mm256_permute2f128_si256(dH[0], dH[1], 0x20); | |||
M[9] = _mm256_permute2f128_si256(dH[0], dH[1], 0x31); | |||
M[2] = _mm256_permute2f128_si256(eL[0], eL[1], 0x20); | |||
M[10] = _mm256_permute2f128_si256(eL[0], eL[1], 0x31); | |||
M[3] = _mm256_permute2f128_si256(eH[0], eH[1], 0x20); | |||
M[11] = _mm256_permute2f128_si256(eH[0], eH[1], 0x31); | |||
M[4] = _mm256_permute2f128_si256(fL[0], fL[1], 0x20); | |||
M[12] = _mm256_permute2f128_si256(fL[0], fL[1], 0x31); | |||
M[5] = _mm256_permute2f128_si256(fH[0], fH[1], 0x20); | |||
M[13] = _mm256_permute2f128_si256(fH[0], fH[1], 0x31); | |||
M[6] = _mm256_permute2f128_si256(gL[0], gL[1], 0x20); | |||
M[14] = _mm256_permute2f128_si256(gL[0], gL[1], 0x31); | |||
M[7] = _mm256_permute2f128_si256(gH[0], gH[1], 0x20); | |||
M[15] = _mm256_permute2f128_si256(gH[0], gH[1], 0x31); | |||
} | |||
*/ |
@@ -0,0 +1,753 @@ | |||
//#define SCM_SIZE 16 | |||
//#pragma STDC FP_CONTRACT ON | |||
#include <immintrin.h> | |||
inline __m256i mul_add(__m256i a, __m256i b, __m256i c) { | |||
return _mm256_add_epi16(_mm256_mullo_epi16(a, b), c); | |||
} | |||
static void schoolbook_avx_new3_acc(__m256i* a, __m256i* b, __m256i* c_avx) ////8 coefficients of a and b has been prefetched | |||
//the c_avx are added cummulatively | |||
{ | |||
register __m256i a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7; | |||
register __m256i temp; | |||
a0=a[0]; | |||
a1=a[1]; | |||
a2=a[2]; | |||
a3=a[3]; | |||
a4=a[4]; | |||
a5=a[5]; | |||
a6=a[6]; | |||
a7=a[7]; | |||
b0=b[0]; | |||
b1=b[1]; | |||
b2=b[2]; | |||
b3=b[3]; | |||
b4=b[4]; | |||
b5=b[5]; | |||
b6=b[6]; | |||
b7=b[7]; | |||
// New Unrolled first triangle | |||
//otherwise accumulate | |||
c_avx[0] = mul_add(a0, b0, c_avx[0]); | |||
temp = _mm256_mullo_epi16 (a0, b1); | |||
temp=mul_add(a1, b0, temp); | |||
c_avx[1] = _mm256_add_epi16(temp, c_avx[1]); | |||
temp = _mm256_mullo_epi16 (a0, b2); | |||
temp = mul_add(a1, b1, temp); | |||
temp=mul_add(a2, b0, temp); | |||
c_avx[2] = _mm256_add_epi16(temp, c_avx[2]); | |||
temp = _mm256_mullo_epi16 (a0, b3); | |||
temp = mul_add(a1, b2, temp); | |||
temp = mul_add(a2, b1, temp); | |||
temp=mul_add(a3, b0, temp); | |||
c_avx[3] = _mm256_add_epi16(temp, c_avx[3]); | |||
temp = _mm256_mullo_epi16 (a0, b4); | |||
temp = mul_add(a1, b3, temp); | |||
temp = mul_add(a3, b1, temp); | |||
temp = mul_add(a4, b0, temp); | |||
temp=mul_add(a2, b2, temp); | |||
c_avx[4] = _mm256_add_epi16(temp, c_avx[4]); | |||
temp = _mm256_mullo_epi16 (a0, b5); | |||
temp = mul_add(a1, b4 , temp); | |||
temp = mul_add(a2, b3, temp); | |||
temp = mul_add(a3, b2, temp); | |||
temp = mul_add( a4, b1, temp); | |||
temp=mul_add(a5, b0, temp); | |||
c_avx[5] = _mm256_add_epi16(temp, c_avx[5]); | |||
temp = _mm256_mullo_epi16 (a0, b6); | |||
temp = mul_add(a1, b5, temp); | |||
temp = mul_add(a5, b1, temp); | |||
temp = mul_add(a6, b0, temp); | |||
temp = mul_add(a2, b4, temp); | |||
temp = mul_add(a3, b3, temp); | |||
temp=mul_add(a4, b2, temp); | |||
c_avx[6] = _mm256_add_epi16(temp, c_avx[6]); | |||
temp = _mm256_mullo_epi16 (a0, b7); | |||
temp = mul_add(a1, b6, temp); | |||
temp = mul_add (a6, b1, temp); | |||
temp = mul_add (a7, b0, temp); | |||
temp = mul_add(a2, b5, temp); | |||
temp = mul_add (a3, b4, temp); | |||
temp = mul_add (a4, b3, temp); | |||
temp=mul_add(a5, b2, temp); | |||
c_avx[7] = _mm256_add_epi16(temp, c_avx[7]); | |||
temp = _mm256_mullo_epi16 (a0, b[8]); | |||
temp = mul_add (a1, b7, temp); | |||
temp = mul_add (a7, b1, temp); | |||
temp = mul_add (a[8], b0, temp); | |||
temp = mul_add (a2, b6,temp); | |||
temp = mul_add(a3, b5, temp); | |||
temp = mul_add (a4, b4,temp); | |||
temp = mul_add (a5, b3, temp); | |||
temp=mul_add(a6, b2, temp); | |||
c_avx[8] = _mm256_add_epi16(temp, c_avx[8]); | |||
temp = _mm256_mullo_epi16 (a0, b[9]); | |||
temp = mul_add (a1, b[8], temp); | |||
temp = mul_add (a[8], b1, temp); | |||
temp = mul_add (a[9], b0, temp); | |||
temp = mul_add (a2, b7, temp); | |||
temp = mul_add (a3, b6, temp); | |||
temp = mul_add (a4, b5, temp); | |||
temp = mul_add (a5, b4, temp); | |||
temp = mul_add (a6, b3, temp); | |||
temp=mul_add(a7, b2, temp); | |||
c_avx[9] = _mm256_add_epi16(temp, c_avx[9]); | |||
temp= _mm256_mullo_epi16 (a0, b[10]); | |||
temp = mul_add (a1, b[9], temp); | |||
temp = mul_add (a[9], b1, temp); | |||
temp = mul_add (a[10], b0, temp); | |||
temp = mul_add (a2, b[8], temp); | |||
temp = mul_add (a3, b7, temp); | |||
temp = mul_add (a4, b6, temp); | |||
temp = mul_add (a5, b5, temp); | |||
temp = mul_add (a6, b4, temp); | |||
temp = mul_add (a7, b3, temp); | |||
temp=mul_add(a[8], b2, temp); | |||
c_avx[10] = _mm256_add_epi16(temp, c_avx[10]); | |||
temp = _mm256_mullo_epi16 (a0, b[11]); | |||
temp = mul_add (a1, b[10], temp ); | |||
temp = mul_add (a[10], b1, temp ); | |||
temp = mul_add (a[11], b0, temp ); | |||
temp = mul_add (a2, b[9], temp ); | |||
temp = mul_add (a3, b[8], temp ); | |||
temp = mul_add (a4, b7, temp ); | |||
temp = mul_add (a5, b6, temp ); | |||
temp = mul_add (a6, b5, temp ); | |||
temp = mul_add (a7, b4, temp ); | |||
temp = mul_add (a[8], b3, temp ); | |||
temp=mul_add(a[9], b2, temp); | |||
c_avx[11] = _mm256_add_epi16(temp, c_avx[11]); | |||
temp = _mm256_mullo_epi16 (a0, b[12]); | |||
temp = mul_add (a1, b[11], temp); | |||
temp = mul_add (a[11], b1, temp); | |||
temp = mul_add (a[12], b0, temp); | |||
temp = mul_add (a2, b[10], temp); | |||
temp = mul_add (a3, b[9], temp); | |||
temp = mul_add (a4, b[8], temp); | |||
temp = mul_add (a5, b7, temp); | |||
temp = mul_add (a6, b6, temp); | |||
temp = mul_add (a7, b5, temp); | |||
temp = mul_add (a[8], b4, temp); | |||
temp = mul_add (a[9], b3, temp); | |||
temp=mul_add(a[10], b2, temp); | |||
c_avx[12] = _mm256_add_epi16(temp, c_avx[12]); | |||
temp = _mm256_mullo_epi16 (a0, b[13]); | |||
temp = mul_add (a1, b[12], temp ); | |||
temp = mul_add (a[12], b1, temp ); | |||
temp = mul_add (a[13], b0, temp ); | |||
temp = mul_add (a2, b[11], temp ); | |||
temp = mul_add (a3, b[10], temp ); | |||
temp = mul_add (a4, b[9], temp ); | |||
temp = mul_add (a5, b[8], temp ); | |||
temp = mul_add (a6, b7, temp ); | |||
temp = mul_add (a7, b6, temp ); | |||
temp = mul_add (a[8], b5, temp ); | |||
temp = mul_add (a[9], b4, temp ); | |||
temp = mul_add (a[10], b3, temp ); | |||
temp=mul_add(a[11], b2, temp); | |||
c_avx[13] = _mm256_add_epi16(temp, c_avx[13]); | |||
temp = _mm256_mullo_epi16 (a0, b[14]); | |||
temp = mul_add (a1, b[13], temp ); | |||
temp = mul_add (a[13], b1, temp ); | |||
temp = mul_add (a[14], b0, temp ); | |||
temp = mul_add (a2, b[12], temp ); | |||
temp = mul_add (a3, b[11], temp ); | |||
temp = mul_add (a4, b[10], temp ); | |||
temp = mul_add (a5, b[9], temp ); | |||
temp = mul_add (a6, b[8], temp ); | |||
temp = mul_add (a7, b7, temp ); | |||
temp = mul_add (a[8], b6, temp ); | |||
temp = mul_add (a[9], b5, temp ); | |||
temp = mul_add (a[10], b4, temp ); | |||
temp = mul_add (a[11], b3, temp ); | |||
temp=mul_add(a[12], b2, temp); | |||
c_avx[14] = _mm256_add_epi16(temp, c_avx[14]); | |||
temp = _mm256_mullo_epi16 (a0, b[15]); | |||
temp = mul_add (a1, b[14], temp ); | |||
temp = mul_add (a[14], b1, temp ); | |||
temp = mul_add (a[15], b0, temp ); | |||
temp = mul_add (a2, b[13], temp ); | |||
temp = mul_add (a3, b[12], temp ); | |||
temp = mul_add (a4, b[11], temp ); | |||
temp = mul_add (a5, b[10], temp ); | |||
temp = mul_add (a6, b[9], temp ); | |||
temp = mul_add (a7, b[8], temp ); | |||
temp = mul_add (a[8], b7, temp ); | |||
temp = mul_add (a[9], b6, temp ); | |||
temp = mul_add (a[10], b5, temp ); | |||
temp = mul_add (a[11], b4, temp ); | |||
temp = mul_add (a[12], b3, temp ); | |||
temp=mul_add(a[13], b2, temp); | |||
c_avx[15] = _mm256_add_epi16(temp, c_avx[15]); | |||
// unrolled second triangle | |||
a0=a[14]; | |||
a1=a[15]; | |||
a2=a[13]; | |||
a3=a[12]; | |||
a4=a[11]; | |||
a5=a[10]; | |||
a6=a[9]; | |||
a7=a[8]; | |||
b0=b[14]; | |||
b1=b[15]; | |||
b2=b[13]; | |||
b3=b[12]; | |||
b4=b[11]; | |||
b5=b[10]; | |||
b6=b[9]; | |||
b7=b[8]; | |||
temp = _mm256_mullo_epi16 (a[1], b1); | |||
temp = mul_add (a[2], b0, temp ); | |||
temp = mul_add (a[3], b2, temp ); | |||
temp = mul_add (a[4], b3, temp ); | |||
temp = mul_add (a[5], b4, temp ); | |||
temp = mul_add (a[6], b5, temp ); | |||
temp = mul_add (a[7], b6, temp ); | |||
temp = mul_add (a7, b7, temp ); | |||
temp = mul_add (a6, b[7], temp ); | |||
temp = mul_add (a5, b[6], temp ); | |||
temp = mul_add (a4, b[5], temp ); | |||
temp = mul_add (a3, b[4], temp ); | |||
temp = mul_add (a2, b[3], temp ); | |||
temp = mul_add (a0, b[2], temp ); | |||
temp=mul_add(a1, b[1], temp); | |||
c_avx[16] = _mm256_add_epi16(temp, c_avx[16]); | |||
temp = _mm256_mullo_epi16 (a[2], b1); | |||
temp = mul_add (a[3], b0, temp ); | |||
temp = mul_add (a[4], b2, temp ); | |||
temp = mul_add (a[5], b3, temp ); | |||
temp = mul_add (a[6], b4, temp ); | |||
temp = mul_add (a[7], b5, temp ); | |||
temp = mul_add (a7, b6, temp ); | |||
temp = mul_add (a6, b7, temp ); | |||
temp = mul_add (a5, b[7], temp ); | |||
temp = mul_add (a4, b[6], temp ); | |||
temp = mul_add (a3, b[5], temp ); | |||
temp = mul_add (a2, b[4], temp ); | |||
temp = mul_add (a0, b[3], temp ); | |||
temp=mul_add(a1, b[2], temp); | |||
c_avx[17] = _mm256_add_epi16(temp, c_avx[17]); | |||
temp = _mm256_mullo_epi16 (a[3], b1); | |||
temp = mul_add (a[4], b0, temp ); | |||
temp = mul_add (a[5], b2, temp ); | |||
temp = mul_add (a[6], b3, temp ); | |||
temp = mul_add (a[7], b4, temp ); | |||
temp = mul_add (a7, b5, temp ); | |||
temp = mul_add (a6, b6, temp ); | |||
temp = mul_add (a5, b7, temp ); | |||
temp = mul_add (a4, b[7], temp ); | |||
temp = mul_add (a3, b[6], temp ); | |||
temp = mul_add (a2, b[5], temp ); | |||
temp = mul_add (a0, b[4], temp ); | |||
temp=mul_add(a1, b[3], temp); | |||
c_avx[18] = _mm256_add_epi16(temp, c_avx[18]); | |||
temp = _mm256_mullo_epi16 (a[4], b1); | |||
temp = mul_add (a[5], b0, temp ); | |||
temp = mul_add (a[6], b2, temp ); | |||
temp = mul_add (a[7], b3, temp ); | |||
temp = mul_add (a7, b4, temp ); | |||
temp = mul_add (a6, b5, temp ); | |||
temp = mul_add (a5, b6, temp ); | |||
temp = mul_add (a4, b7, temp ); | |||
temp = mul_add (a3, b[7], temp ); | |||
temp = mul_add (a2, b[6], temp ); | |||
temp = mul_add (a0, b[5], temp ); | |||
temp=mul_add(a1, b[4], temp); | |||
c_avx[19] = _mm256_add_epi16(temp, c_avx[19]); | |||
temp = _mm256_mullo_epi16 (a[5], b1); | |||
temp = mul_add (a[6], b0, temp ); | |||
temp = mul_add (a[7], b2, temp ); | |||
temp = mul_add (a7, b3, temp ); | |||
temp = mul_add (a6, b4, temp ); | |||
temp = mul_add (a5, b5, temp ); | |||
temp = mul_add (a4, b6, temp ); | |||
temp = mul_add (a3, b7, temp ); | |||
temp = mul_add (a2, b[7], temp ); | |||
temp = mul_add (a0, b[6], temp ); | |||
temp=mul_add(a1, b[5], temp); | |||
c_avx[20] = _mm256_add_epi16(temp, c_avx[20]); | |||
temp = _mm256_mullo_epi16 (a[6], b1); | |||
temp = mul_add (a[7], b0, temp ); | |||
temp = mul_add (a7, b2, temp ); | |||
temp = mul_add (a6, b3, temp ); | |||
temp = mul_add (a5, b4, temp ); | |||
temp = mul_add (a4, b5, temp ); | |||
temp = mul_add (a3, b6, temp ); | |||
temp = mul_add (a2, b7, temp ); | |||
temp = mul_add (a0, b[7], temp ); | |||
temp=mul_add(a1, b[6], temp); | |||
c_avx[21] = _mm256_add_epi16(temp, c_avx[21]); | |||
temp = _mm256_mullo_epi16 (a[7], b1); | |||
temp = mul_add (a7, b0, temp ); | |||
temp = mul_add (a6, b2, temp ); | |||
temp = mul_add (a5, b3, temp ); | |||
temp = mul_add (a4, b4, temp ); | |||
temp = mul_add (a3, b5, temp ); | |||
temp = mul_add (a2, b6, temp ); | |||
temp = mul_add (a0, b7, temp ); | |||
temp=mul_add(a1, b[7], temp); | |||
c_avx[22] = _mm256_add_epi16(temp, c_avx[22]); | |||
temp = _mm256_mullo_epi16 (a7, b1); | |||
temp = mul_add (a6, b0, temp ); | |||
temp = mul_add (a5, b2, temp ); | |||
temp = mul_add (a4, b3, temp ); | |||
temp = mul_add (a3, b4, temp ); | |||
temp = mul_add (a2, b5, temp ); | |||
temp = mul_add (a0, b6, temp ); | |||
temp=mul_add(a1, b7, temp); | |||
c_avx[23] = _mm256_add_epi16(temp, c_avx[23]); | |||
temp = _mm256_mullo_epi16 (a6, b1); | |||
temp = mul_add (a5, b0, temp ); | |||
temp = mul_add (a4, b2, temp ); | |||
temp = mul_add (a3, b3, temp ); | |||
temp = mul_add (a2, b4, temp ); | |||
temp = mul_add (a0, b5, temp ); | |||
temp=mul_add(a1, b6, temp); | |||
c_avx[24] = _mm256_add_epi16(temp, c_avx[24]); | |||
temp = _mm256_mullo_epi16 (a5, b1); | |||
temp = mul_add (a4, b0, temp ); | |||
temp = mul_add (a3, b2, temp ); | |||
temp = mul_add (a2, b3, temp ); | |||
temp = mul_add (a0, b4, temp ); | |||
temp=mul_add(a1, b5, temp); | |||
c_avx[25] = _mm256_add_epi16(temp, c_avx[25]); | |||
temp = _mm256_mullo_epi16 (a4, b1); | |||
temp = mul_add (a3, b0, temp ); | |||
temp = mul_add (a2, b2, temp ); | |||
temp = mul_add (a0, b3, temp ); | |||
temp=mul_add(a1, b4, temp); | |||
c_avx[26] = _mm256_add_epi16(temp, c_avx[26]); | |||
temp = _mm256_mullo_epi16 (a3, b1); | |||
temp = mul_add (a2, b0, temp ); | |||
temp = mul_add (a0, b2, temp ); | |||
temp=mul_add(a1, b3, temp); | |||
c_avx[27] = _mm256_add_epi16(temp, c_avx[27]); | |||
temp = _mm256_mullo_epi16 (a2, b1); | |||
temp = mul_add (a0, b0, temp ); | |||
temp=mul_add(a1, b2, temp); | |||
c_avx[28] = _mm256_add_epi16(temp, c_avx[28]); | |||
temp = _mm256_mullo_epi16 (a0, b1); | |||
temp=mul_add(a1, b0, temp); | |||
c_avx[29] = _mm256_add_epi16(temp, c_avx[29]); | |||
c_avx[30] = mul_add(a1, b1, c_avx[30]); | |||
c_avx[2*SCM_SIZE-1] = _mm256_set_epi64x(0, 0, 0, 0); | |||
} | |||
static void schoolbook_avx_new2(__m256i* a, __m256i* b, __m256i* c_avx) ////8 coefficients of a and b has been prefetched | |||
//the c_avx are not added cummulatively | |||
{ | |||
__m256i a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7; | |||
__m256i temp; | |||
a0=a[0]; | |||
a1=a[1]; | |||
a2=a[2]; | |||
a3=a[3]; | |||
a4=a[4]; | |||
a5=a[5]; | |||
a6=a[6]; | |||
a7=a[7]; | |||
b0=b[0]; | |||
b1=b[1]; | |||
b2=b[2]; | |||
b3=b[3]; | |||
b4=b[4]; | |||
b5=b[5]; | |||
b6=b[6]; | |||
b7=b[7]; | |||
// New Unrolled first triangle | |||
c_avx[0] = _mm256_mullo_epi16 (a0, b0); | |||
temp = _mm256_mullo_epi16 (a0, b1); | |||
c_avx[1]=mul_add(a1, b0, temp); | |||
temp = _mm256_mullo_epi16 (a0, b2); | |||
temp = mul_add(a1, b1, temp); | |||
c_avx[2]= mul_add(a2, b0, temp); | |||
temp = _mm256_mullo_epi16 (a0, b3); | |||
temp = mul_add(a1, b2, temp); | |||
temp = mul_add(a2, b1, temp); | |||
c_avx[3]= mul_add(a3, b0, temp); | |||
temp = _mm256_mullo_epi16 (a0, b4); | |||
temp = mul_add(a1, b3, temp); | |||
temp = mul_add(a3, b1, temp); | |||
temp = mul_add(a4, b0, temp); | |||
c_avx[4]= mul_add(a2, b2, temp); | |||
temp = _mm256_mullo_epi16 (a0, b5); | |||
temp = mul_add(a1, b4 , temp); | |||
temp = mul_add(a2, b3, temp); | |||
temp = mul_add(a3, b2, temp); | |||
temp = mul_add( a4, b1, temp); | |||
c_avx[5] = mul_add(a5, b0, temp); | |||
temp = _mm256_mullo_epi16 (a0, b6); | |||
temp = mul_add(a1, b5, temp); | |||
temp = mul_add(a5, b1, temp); | |||
temp = mul_add(a6, b0, temp); | |||
temp = mul_add(a2, b4, temp); | |||
temp = mul_add(a3, b3, temp); | |||
c_avx[6] = mul_add(a4, b2, temp); | |||
temp = _mm256_mullo_epi16 (a0, b7); | |||
temp = mul_add(a1, b6, temp); | |||
temp = mul_add (a6, b1, temp); | |||
temp = mul_add (a7, b0, temp); | |||
temp = mul_add(a2, b5, temp); | |||
temp = mul_add (a3, b4, temp); | |||
temp = mul_add (a4, b3, temp); | |||
c_avx[7] = mul_add (a5, b2, temp); | |||
temp = _mm256_mullo_epi16 (a0, b[8]); | |||
temp = mul_add (a1, b7, temp); | |||
temp = mul_add (a7, b1, temp); | |||
temp = mul_add (a[8], b0, temp); | |||
temp = mul_add (a2, b6,temp); | |||
temp = mul_add(a3, b5, temp); | |||
temp = mul_add (a4, b4,temp); | |||
temp = mul_add (a5, b3, temp); | |||
c_avx[8] = mul_add (a6, b2, temp); | |||
temp = _mm256_mullo_epi16 (a0, b[9]); | |||
temp = mul_add (a1, b[8], temp); | |||
temp = mul_add (a[8], b1, temp); | |||
temp = mul_add (a[9], b0, temp); | |||
temp = mul_add (a2, b7, temp); | |||
temp = mul_add (a3, b6, temp); | |||
temp = mul_add (a4, b5, temp); | |||
temp = mul_add (a5, b4, temp); | |||
temp = mul_add (a6, b3, temp); | |||
c_avx[9] = mul_add (a7, b2, temp); | |||
temp= _mm256_mullo_epi16 (a0, b[10]); | |||
temp = mul_add (a1, b[9], temp); | |||
temp = mul_add (a[9], b1, temp); | |||
temp = mul_add (a[10], b0, temp); | |||
temp = mul_add (a2, b[8], temp); | |||
temp = mul_add (a3, b7, temp); | |||
temp = mul_add (a4, b6, temp); | |||
temp = mul_add (a5, b5, temp); | |||
temp = mul_add (a6, b4, temp); | |||
temp = mul_add (a7, b3, temp); | |||
c_avx[10] = mul_add (a[8], b2, temp); | |||
temp = _mm256_mullo_epi16 (a0, b[11]); | |||
temp = mul_add (a1, b[10], temp ); | |||
temp = mul_add (a[10], b1, temp ); | |||
temp = mul_add (a[11], b0, temp ); | |||
temp = mul_add (a2, b[9], temp ); | |||
temp = mul_add (a3, b[8], temp ); | |||
temp = mul_add (a4, b7, temp ); | |||
temp = mul_add (a5, b6, temp ); | |||
temp = mul_add (a6, b5, temp ); | |||
temp = mul_add (a7, b4, temp ); | |||
temp = mul_add (a[8], b3, temp ); | |||
c_avx[11] = mul_add (a[9], b2, temp ); | |||
temp = _mm256_mullo_epi16 (a0, b[12]); | |||
temp = mul_add (a1, b[11], temp); | |||
temp = mul_add (a[11], b1, temp); | |||
temp = mul_add (a[12], b0, temp); | |||
temp = mul_add (a2, b[10], temp); | |||
temp = mul_add (a3, b[9], temp); | |||
temp = mul_add (a4, b[8], temp); | |||
temp = mul_add (a5, b7, temp); | |||
temp = mul_add (a6, b6, temp); | |||
temp = mul_add (a7, b5, temp); | |||
temp = mul_add (a[8], b4, temp); | |||
temp = mul_add (a[9], b3, temp); | |||
c_avx[12] = mul_add (a[10], b2, temp); | |||
temp = _mm256_mullo_epi16 (a0, b[13]); | |||
temp = mul_add (a1, b[12], temp ); | |||
temp = mul_add (a[12], b1, temp ); | |||
temp = mul_add (a[13], b0, temp ); | |||
temp = mul_add (a2, b[11], temp ); | |||
temp = mul_add (a3, b[10], temp ); | |||
temp = mul_add (a4, b[9], temp ); | |||
temp = mul_add (a5, b[8], temp ); | |||
temp = mul_add (a6, b7, temp ); | |||
temp = mul_add (a7, b6, temp ); | |||
temp = mul_add (a[8], b5, temp ); | |||
temp = mul_add (a[9], b4, temp ); | |||
temp = mul_add (a[10], b3, temp ); | |||
c_avx[13] = mul_add (a[11], b2, temp ); | |||
temp = _mm256_mullo_epi16 (a0, b[14]); | |||
temp = mul_add (a1, b[13], temp ); | |||
temp = mul_add (a[13], b1, temp ); | |||
temp = mul_add (a[14], b0, temp ); | |||
temp = mul_add (a2, b[12], temp ); | |||
temp = mul_add (a3, b[11], temp ); | |||
temp = mul_add (a4, b[10], temp ); | |||
temp = mul_add (a5, b[9], temp ); | |||
temp = mul_add (a6, b[8], temp ); | |||
temp = mul_add (a7, b7, temp ); | |||
temp = mul_add (a[8], b6, temp ); | |||
temp = mul_add (a[9], b5, temp ); | |||
temp = mul_add (a[10], b4, temp ); | |||
temp = mul_add (a[11], b3, temp ); | |||
c_avx[14] = mul_add (a[12], b2, temp ); | |||
temp = _mm256_mullo_epi16 (a0, b[15]); | |||
temp = mul_add (a1, b[14], temp ); | |||
temp = mul_add (a[14], b1, temp ); | |||
temp = mul_add (a[15], b0, temp ); | |||
temp = mul_add (a2, b[13], temp ); | |||
temp = mul_add (a3, b[12], temp ); | |||
temp = mul_add (a4, b[11], temp ); | |||
temp = mul_add (a5, b[10], temp ); | |||
temp = mul_add (a6, b[9], temp ); | |||
temp = mul_add (a7, b[8], temp ); | |||
temp = mul_add (a[8], b7, temp ); | |||
temp = mul_add (a[9], b6, temp ); | |||
temp = mul_add (a[10], b5, temp ); | |||
temp = mul_add (a[11], b4, temp ); | |||
temp = mul_add (a[12], b3, temp ); | |||
c_avx[15] = mul_add (a[13], b2, temp ); | |||
// unrolled second triangle | |||
a0=a[14]; | |||
a1=a[15]; | |||
a2=a[13]; | |||
a3=a[12]; | |||
a4=a[11]; | |||
a5=a[10]; | |||
a6=a[9]; | |||
a7=a[8]; | |||
b0=b[14]; | |||
b1=b[15]; | |||
b2=b[13]; | |||
b3=b[12]; | |||
b4=b[11]; | |||
b5=b[10]; | |||
b6=b[9]; | |||
b7=b[8]; | |||
temp = _mm256_mullo_epi16 (a[1], b1); | |||
temp = mul_add (a[2], b0, temp ); | |||
temp = mul_add (a[3], b2, temp ); | |||
temp = mul_add (a[4], b3, temp ); | |||
temp = mul_add (a[5], b4, temp ); | |||
temp = mul_add (a[6], b5, temp ); | |||
temp = mul_add (a[7], b6, temp ); | |||
temp = mul_add (a7, b7, temp ); | |||
temp = mul_add (a6, b[7], temp ); | |||
temp = mul_add (a5, b[6], temp ); | |||
temp = mul_add (a4, b[5], temp ); | |||
temp = mul_add (a3, b[4], temp ); | |||
temp = mul_add (a2, b[3], temp ); | |||
temp = mul_add (a0, b[2], temp ); | |||
c_avx[16] = mul_add (a1, b[1], temp ); | |||
temp = _mm256_mullo_epi16 (a[2], b1); | |||
temp = mul_add (a[3], b0, temp ); | |||
temp = mul_add (a[4], b2, temp ); | |||
temp = mul_add (a[5], b3, temp ); | |||
temp = mul_add (a[6], b4, temp ); | |||
temp = mul_add (a[7], b5, temp ); | |||
temp = mul_add (a7, b6, temp ); | |||
temp = mul_add (a6, b7, temp ); | |||
temp = mul_add (a5, b[7], temp ); | |||
temp = mul_add (a4, b[6], temp ); | |||
temp = mul_add (a3, b[5], temp ); | |||
temp = mul_add (a2, b[4], temp ); | |||
temp = mul_add (a0, b[3], temp ); | |||
c_avx[17] = mul_add (a1, b[2], temp ); | |||
temp = _mm256_mullo_epi16 (a[3], b1); | |||
temp = mul_add (a[4], b0, temp ); | |||
temp = mul_add (a[5], b2, temp ); | |||
temp = mul_add (a[6], b3, temp ); | |||
temp = mul_add (a[7], b4, temp ); | |||
temp = mul_add (a7, b5, temp ); | |||
temp = mul_add (a6, b6, temp ); | |||
temp = mul_add (a5, b7, temp ); | |||
temp = mul_add (a4, b[7], temp ); | |||
temp = mul_add (a3, b[6], temp ); | |||
temp = mul_add (a2, b[5], temp ); | |||
temp = mul_add (a0, b[4], temp ); | |||
c_avx[18] = mul_add (a1, b[3], temp ); | |||
temp = _mm256_mullo_epi16 (a[4], b1); | |||
temp = mul_add (a[5], b0, temp ); | |||
temp = mul_add (a[6], b2, temp ); | |||
temp = mul_add (a[7], b3, temp ); | |||
temp = mul_add (a7, b4, temp ); | |||
temp = mul_add (a6, b5, temp ); | |||
temp = mul_add (a5, b6, temp ); | |||
temp = mul_add (a4, b7, temp ); | |||
temp = mul_add (a3, b[7], temp ); | |||
temp = mul_add (a2, b[6], temp ); | |||
temp = mul_add (a0, b[5], temp ); | |||
c_avx[19] = mul_add (a1, b[4], temp ); | |||
temp = _mm256_mullo_epi16 (a[5], b1); | |||
temp = mul_add (a[6], b0, temp ); | |||
temp = mul_add (a[7], b2, temp ); | |||
temp = mul_add (a7, b3, temp ); | |||
temp = mul_add (a6, b4, temp ); | |||
temp = mul_add (a5, b5, temp ); | |||
temp = mul_add (a4, b6, temp ); | |||
temp = mul_add (a3, b7, temp ); | |||
temp = mul_add (a2, b[7], temp ); | |||
temp = mul_add (a0, b[6], temp ); | |||
c_avx[20] = mul_add (a1, b[5], temp ); | |||
temp = _mm256_mullo_epi16 (a[6], b1); | |||
temp = mul_add (a[7], b0, temp ); | |||
temp = mul_add (a7, b2, temp ); | |||
temp = mul_add (a6, b3, temp ); | |||
temp = mul_add (a5, b4, temp ); | |||
temp = mul_add (a4, b5, temp ); | |||
temp = mul_add (a3, b6, temp ); | |||
temp = mul_add (a2, b7, temp ); | |||
temp = mul_add (a0, b[7], temp ); | |||
c_avx[21] = mul_add (a1, b[6], temp ); | |||
temp = _mm256_mullo_epi16 (a[7], b1); | |||
temp = mul_add (a7, b0, temp ); | |||
temp = mul_add (a6, b2, temp ); | |||
temp = mul_add (a5, b3, temp ); | |||
temp = mul_add (a4, b4, temp ); | |||
temp = mul_add (a3, b5, temp ); | |||
temp = mul_add (a2, b6, temp ); | |||
temp = mul_add (a0, b7, temp ); | |||
c_avx[22] = mul_add (a1, b[7], temp ); | |||
temp = _mm256_mullo_epi16 (a7, b1); | |||
temp = mul_add (a6, b0, temp ); | |||
temp = mul_add (a5, b2, temp ); | |||
temp = mul_add (a4, b3, temp ); | |||
temp = mul_add (a3, b4, temp ); | |||
temp = mul_add (a2, b5, temp ); | |||
temp = mul_add (a0, b6, temp ); | |||
c_avx[23] = mul_add (a1, b7, temp ); | |||
temp = _mm256_mullo_epi16 (a6, b1); | |||
temp = mul_add (a5, b0, temp ); | |||
temp = mul_add (a4, b2, temp ); | |||
temp = mul_add (a3, b3, temp ); | |||
temp = mul_add (a2, b4, temp ); | |||
temp = mul_add (a0, b5, temp ); | |||
c_avx[24] = mul_add (a1, b6, temp ); | |||
temp = _mm256_mullo_epi16 (a5, b1); | |||
temp = mul_add (a4, b0, temp ); | |||
temp = mul_add (a3, b2, temp ); | |||
temp = mul_add (a2, b3, temp ); | |||
temp = mul_add (a0, b4, temp ); | |||
c_avx[25] = mul_add (a1, b5, temp ); | |||
temp = _mm256_mullo_epi16 (a4, b1); | |||
temp = mul_add (a3, b0, temp ); | |||
temp = mul_add (a2, b2, temp ); | |||
temp = mul_add (a0, b3, temp ); | |||
c_avx[26] = mul_add (a1, b4, temp ); | |||
temp = _mm256_mullo_epi16 (a3, b1); | |||
temp = mul_add (a2, b0, temp ); | |||
temp = mul_add (a0, b2, temp ); | |||
c_avx[27] = mul_add (a1, b3, temp ); | |||
temp = _mm256_mullo_epi16 (a2, b1); | |||
temp = mul_add (a0, b0, temp ); | |||
c_avx[28] = mul_add (a1, b2, temp ); | |||
temp = _mm256_mullo_epi16 (a0, b1); | |||
c_avx[29] = mul_add (a1, b0, temp); | |||
c_avx[30] = _mm256_mullo_epi16 (a1, b1); | |||
c_avx[2*SCM_SIZE-1] = _mm256_set_epi64x(0, 0, 0, 0); | |||
} |
@@ -0,0 +1,35 @@ | |||
#include "verify.h" | |||
/*------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at https://github.com/pq-crystals/kyber) of | |||
"CRYSTALS – Kyber: a CCA-secure module-lattice-based KEM" | |||
by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
----------------------------------------------------*/ | |||
/* returns 0 for equal strings, 1 for non-equal strings */ | |||
uint8_t PQCLEAN_LIGHTSABER_AVX2_verify(const uint8_t *a, const uint8_t *b, size_t len) { | |||
uint64_t r; | |||
size_t i; | |||
r = 0; | |||
for (i = 0; i < len; i++) { | |||
r |= a[i] ^ b[i]; | |||
} | |||
r = (~r + 1); // Two's complement | |||
r >>= 63; | |||
return (uint8_t) r; | |||
} | |||
/* b = 1 means mov, b = 0 means don't mov*/ | |||
void PQCLEAN_LIGHTSABER_AVX2_cmov(uint8_t *r, const uint8_t *x, size_t len, uint8_t b) { | |||
size_t i; | |||
b = -b; | |||
for (i = 0; i < len; i++) { | |||
r[i] ^= b & (x[i] ^ r[i]); | |||
} | |||
} |
@@ -0,0 +1,22 @@ | |||
#ifndef VERIFY_H | |||
#define VERIFY_H | |||
/*------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at https://github.com/pq-crystals/kyber) of | |||
"CRYSTALS – Kyber: a CCA-secure module-lattice-based KEM" | |||
by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
----------------------------------------------------*/ | |||
#include <stddef.h> | |||
#include <stdint.h> | |||
/* returns 0 for equal strings, 1 for non-equal strings */ | |||
uint8_t PQCLEAN_LIGHTSABER_AVX2_verify(const uint8_t *a, const uint8_t *b, size_t len); | |||
/* b = 1 means mov, b = 0 means don't mov*/ | |||
void PQCLEAN_LIGHTSABER_AVX2_cmov(uint8_t *r, const uint8_t *x, size_t len, uint8_t b); | |||
#endif |
@@ -1,8 +1 @@ | |||
SABER_v1.1 | |||
Public domain | |||
Authors: Jan-Pieter D'Anvers, Angshuman Karmakar, Sujoy Sinha Roy, | |||
Frederik Vercauteren | |||
Public Domain |
@@ -1,10 +1,10 @@ | |||
# This Makefile can be used with GNU Make or BSD Make | |||
LIB=liblightsaber_clean.a | |||
HEADERS=api.h cbd.h poly.h poly_mul.h SABER_indcpa.h SABER_params.h verify.h pack_unpack.h | |||
HEADERS=api.h cbd.h pack_unpack.h poly.h poly_mul.h SABER_indcpa.h SABER_params.h verify.h | |||
OBJECTS=cbd.o kem.o pack_unpack.o poly.o poly_mul.o SABER_indcpa.o verify.o | |||
CFLAGS=-O3 -Wall -Wextra -Wpedantic -Werror -Wmissing-prototypes -Wredundant-decls -std=c99 -I../../../common $(EXTRAFLAGS) | |||
CFLAGS=-O3 -Wall -Wextra -Wpedantic -Wvla -Werror -Wredundant-decls -Wmissing-prototypes -std=c99 -I../../../common $(EXTRAFLAGS) | |||
all: $(LIB) | |||
@@ -3,296 +3,90 @@ | |||
#include "fips202.h" | |||
#include "pack_unpack.h" | |||
#include "poly.h" | |||
#include "poly_mul.h" | |||
#include "randombytes.h" | |||
#include <stdint.h> | |||
#include <string.h> | |||
#define h1 (1 << (SABER_EQ - SABER_EP - 1)) | |||
#define h2 ((1 << (SABER_EP - 2)) - (1 << (SABER_EP - SABER_ET - 1)) + (1 << (SABER_EQ - SABER_EP - 1))) | |||
void PQCLEAN_LIGHTSABER_CLEAN_indcpa_kem_keypair(uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES], uint8_t sk[SABER_INDCPA_SECRETKEYBYTES]) { | |||
uint16_t A[SABER_L][SABER_L][SABER_N]; | |||
uint16_t s[SABER_L][SABER_N]; | |||
uint16_t b[SABER_L][SABER_N] = {0}; | |||
/*----------------------------------------------------------------------------------- | |||
This routine generates a=[Matrix K x K] of 256-coefficient polynomials | |||
uint8_t seed_A[SABER_SEEDBYTES]; | |||
uint8_t seed_s[SABER_NOISE_SEEDBYTES]; | |||
int i, j; | |||
#define h1 4 //2^(EQ-EP-1) | |||
randombytes(seed_A, SABER_SEEDBYTES); | |||
shake128(seed_A, SABER_SEEDBYTES, seed_A, SABER_SEEDBYTES); // for not revealing system RNG state | |||
randombytes(seed_s, SABER_NOISE_SEEDBYTES); | |||
#define h2 ( (1<<(SABER_EP-2)) - (1<<(SABER_EP-SABER_ET-1)) + (1<<(SABER_EQ-SABER_EP-1)) ) | |||
PQCLEAN_LIGHTSABER_CLEAN_GenMatrix(A, seed_A); | |||
PQCLEAN_LIGHTSABER_CLEAN_GenSecret(s, seed_s); | |||
PQCLEAN_LIGHTSABER_CLEAN_MatrixVectorMul(b, (const uint16_t (*)[SABER_L][SABER_N])A, (const uint16_t (*)[SABER_N])s, 1); | |||
static void InnerProd(uint16_t pkcl[SABER_K][SABER_N], uint16_t skpv[SABER_K][SABER_N], uint16_t mod, uint16_t res[SABER_N]); | |||
static void MatrixVectorMul(polyvec *a, uint16_t skpv[SABER_K][SABER_N], uint16_t res[SABER_K][SABER_N], uint16_t mod, int16_t transpose); | |||
static void POL2MSG(const uint16_t *message_dec_unpacked, unsigned char *message_dec); | |||
static void GenMatrix(polyvec *a, const unsigned char *seed) { | |||
unsigned char buf[SABER_K * SABER_K * (13 * SABER_N / 8)]; | |||
uint16_t temp_ar[SABER_N]; | |||
int i, j, k; | |||
uint16_t mod = (SABER_Q - 1); | |||
shake128(buf, sizeof(buf), seed, SABER_SEEDBYTES); | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_K; j++) { | |||
PQCLEAN_LIGHTSABER_CLEAN_BS2POL(buf + (i * SABER_K + j) * (13 * SABER_N / 8), temp_ar); | |||
for (k = 0; k < SABER_N; k++) { | |||
a[i].vec[j].coeffs[k] = (temp_ar[k])& mod ; | |||
} | |||
} | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_CLEAN_indcpa_kem_keypair(unsigned char *pk, unsigned char *sk) { | |||
polyvec a[SABER_K]; | |||
uint16_t skpv[SABER_K][SABER_N]; | |||
unsigned char seed[SABER_SEEDBYTES]; | |||
unsigned char noiseseed[SABER_COINBYTES]; | |||
int32_t i, j; | |||
uint16_t mod_q = SABER_Q - 1; | |||
uint16_t res[SABER_K][SABER_N]; | |||
randombytes(seed, SABER_SEEDBYTES); | |||
// for not revealing system RNG state | |||
shake128(seed, SABER_SEEDBYTES, seed, SABER_SEEDBYTES); | |||
randombytes(noiseseed, SABER_COINBYTES); | |||
GenMatrix(a, seed); //sample matrix A | |||
// generate secret from constant-time binomial distribution | |||
PQCLEAN_LIGHTSABER_CLEAN_GenSecret(skpv, noiseseed); | |||
// do the matrix vector multiplication and rounding | |||
for (i = 0; i < SABER_K; i++) { | |||
for (i = 0; i < SABER_L; i++) { | |||
for (j = 0; j < SABER_N; j++) { | |||
res[i][j] = 0; | |||
b[i][j] = (b[i][j] + h1) >> (SABER_EQ - SABER_EP); | |||
} | |||
} | |||
MatrixVectorMul(a, skpv, res, SABER_Q - 1, 1); | |||
// now rounding | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_N; j++) { | |||
// shift right 3 bits | |||
res[i][j] = (res[i][j] + h1) & (mod_q); | |||
res[i][j] = (res[i][j] >> (SABER_EQ - SABER_EP)); | |||
} | |||
} | |||
// unload and pack sk=3 x (256 coefficients of 14 bits) | |||
PQCLEAN_LIGHTSABER_CLEAN_POLVEC2BS(sk, skpv, SABER_Q); | |||
// unload and pack pk=256 bits seed and 3 x (256 coefficients of 11 bits) | |||
// load the public-key coefficients | |||
PQCLEAN_LIGHTSABER_CLEAN_POLVEC2BS(pk, res, SABER_P); | |||
// now load the seedbytes in PK. Easy since seed bytes are kept in byte format. | |||
for (i = 0; i < SABER_SEEDBYTES; i++) { | |||
pk[SABER_POLYVECCOMPRESSEDBYTES + i] = seed[i]; | |||
} | |||
PQCLEAN_LIGHTSABER_CLEAN_POLVECq2BS(sk, (const uint16_t (*)[SABER_N])s); | |||
PQCLEAN_LIGHTSABER_CLEAN_POLVECp2BS(pk, (const uint16_t (*)[SABER_N])b); | |||
memcpy(pk + SABER_POLYVECCOMPRESSEDBYTES, seed_A, sizeof(seed_A)); | |||
} | |||
void PQCLEAN_LIGHTSABER_CLEAN_indcpa_kem_enc(const unsigned char *message_received, unsigned char *noiseseed, const unsigned char *pk, unsigned char *ciphertext) { | |||
uint32_t i, j, k; | |||
polyvec a[SABER_K]; | |||
unsigned char seed[SABER_SEEDBYTES]; | |||
// public key of received by the client | |||
uint16_t pkcl[SABER_K][SABER_N]; | |||
uint16_t skpv1[SABER_K][SABER_N]; | |||
uint16_t message[SABER_KEYBYTES * 8]; | |||
uint16_t res[SABER_K][SABER_N]; | |||
uint16_t mod_p = SABER_P - 1; | |||
uint16_t mod_q = SABER_Q - 1; | |||
uint16_t vprime[SABER_N]; | |||
unsigned char msk_c[SABER_SCALEBYTES_KEM]; | |||
// extract the seedbytes from Public Key. | |||
for (i = 0; i < SABER_SEEDBYTES; i++) { | |||
seed[i] = pk[ SABER_POLYVECCOMPRESSEDBYTES + i]; | |||
} | |||
GenMatrix(a, seed); | |||
// generate secret from constant-time binomial distribution | |||
PQCLEAN_LIGHTSABER_CLEAN_GenSecret(skpv1, noiseseed); | |||
// matrix-vector multiplication and rounding | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_N; j++) { | |||
res[i][j] = 0; | |||
} | |||
} | |||
MatrixVectorMul(a, skpv1, res, SABER_Q - 1, 0); | |||
// now rounding | |||
//shift right 3 bits | |||
for (i = 0; i < SABER_K; i++) { | |||
void PQCLEAN_LIGHTSABER_CLEAN_indcpa_kem_enc(uint8_t ciphertext[SABER_BYTES_CCA_DEC], const uint8_t m[SABER_KEYBYTES], const uint8_t seed_sp[SABER_NOISE_SEEDBYTES], const uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES]) { | |||
uint16_t A[SABER_L][SABER_L][SABER_N]; | |||
uint16_t sp[SABER_L][SABER_N]; | |||
uint16_t bp[SABER_L][SABER_N] = {0}; | |||
uint16_t vp[SABER_N] = {0}; | |||
uint16_t mp[SABER_N]; | |||
uint16_t b[SABER_L][SABER_N]; | |||
int i, j; | |||
const uint8_t *seed_A = pk + SABER_POLYVECCOMPRESSEDBYTES; | |||
PQCLEAN_LIGHTSABER_CLEAN_GenMatrix(A, seed_A); | |||
PQCLEAN_LIGHTSABER_CLEAN_GenSecret(sp, seed_sp); | |||
PQCLEAN_LIGHTSABER_CLEAN_MatrixVectorMul(bp, (const uint16_t (*)[SABER_L][SABER_N])A, (const uint16_t (*)[SABER_N])sp, 0); | |||
for (i = 0; i < SABER_L; i++) { | |||
for (j = 0; j < SABER_N; j++) { | |||
res[i][j] = ( res[i][j] + h1 ) & mod_q; | |||
res[i][j] = (res[i][j] >> (SABER_EQ - SABER_EP) ); | |||
bp[i][j] = (bp[i][j] + h1) >> (SABER_EQ - SABER_EP); | |||
} | |||
} | |||
PQCLEAN_LIGHTSABER_CLEAN_POLVEC2BS(ciphertext, res, SABER_P); | |||
PQCLEAN_LIGHTSABER_CLEAN_POLVECp2BS(ciphertext, (const uint16_t (*)[SABER_N])bp); | |||
PQCLEAN_LIGHTSABER_CLEAN_BS2POLVECp(b, pk); | |||
PQCLEAN_LIGHTSABER_CLEAN_InnerProd(vp, (const uint16_t (*)[SABER_N])b, (const uint16_t (*)[SABER_N])sp); | |||
// ************client matrix-vector multiplication ends************ | |||
PQCLEAN_LIGHTSABER_CLEAN_BS2POLmsg(mp, m); | |||
// now calculate the v' | |||
// unpack the public_key | |||
// pkcl is the b in the protocol | |||
PQCLEAN_LIGHTSABER_CLEAN_BS2POLVEC(pk, pkcl, SABER_P); | |||
for (i = 0; i < SABER_N; i++) { | |||
vprime[i] = 0; | |||
} | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_N; j++) { | |||
skpv1[i][j] = skpv1[i][j] & (mod_p); | |||
} | |||
for (j = 0; j < SABER_N; j++) { | |||
vp[j] = (vp[j] - (mp[j] << (SABER_EP - 1)) + h1) >> (SABER_EP - SABER_ET); | |||
} | |||
// vector-vector scalar multiplication with mod p | |||
InnerProd(pkcl, skpv1, mod_p, vprime); | |||
// addition of h1 to vprime | |||
for (i = 0; i < SABER_N; i++) { | |||
vprime[i] = vprime[i] + h1; | |||
} | |||
// unpack message_received; | |||
for (j = 0; j < SABER_KEYBYTES; j++) { | |||
for (i = 0; i < 8; i++) { | |||
message[8 * j + i] = ((message_received[j] >> i) & 0x01); | |||
} | |||
} | |||
// message encoding | |||
for (i = 0; i < SABER_N; i++) { | |||
message[i] = (message[i] << (SABER_EP - 1)); | |||
} | |||
for (k = 0; k < SABER_N; k++) { | |||
vprime[k] = ( (vprime[k] - message[k]) & (mod_p) ) >> (SABER_EP - SABER_ET); | |||
} | |||
PQCLEAN_LIGHTSABER_CLEAN_pack_3bit(msk_c, vprime); | |||
for (j = 0; j < SABER_SCALEBYTES_KEM; j++) { | |||
ciphertext[SABER_POLYVECCOMPRESSEDBYTES + j] = msk_c[j]; | |||
} | |||
PQCLEAN_LIGHTSABER_CLEAN_POLT2BS(ciphertext + SABER_POLYVECCOMPRESSEDBYTES, vp); | |||
} | |||
void PQCLEAN_LIGHTSABER_CLEAN_indcpa_kem_dec(uint8_t m[SABER_KEYBYTES], const uint8_t sk[SABER_INDCPA_SECRETKEYBYTES], const uint8_t ciphertext[SABER_BYTES_CCA_DEC]) { | |||
void PQCLEAN_LIGHTSABER_CLEAN_indcpa_kem_dec(const unsigned char *sk, const unsigned char *ciphertext, unsigned char message_dec[]) { | |||
uint32_t i, j; | |||
// secret key of the server | |||
uint16_t sksv[SABER_K][SABER_N]; | |||
uint16_t pksv[SABER_K][SABER_N]; | |||
uint8_t scale_ar[SABER_SCALEBYTES_KEM]; | |||
uint16_t mod_p = SABER_P - 1; | |||
uint16_t v[SABER_N]; | |||
uint16_t op[SABER_N]; | |||
// sksv is the secret-key | |||
PQCLEAN_LIGHTSABER_CLEAN_BS2POLVEC(sk, sksv, SABER_Q); | |||
// pksv is the ciphertext | |||
PQCLEAN_LIGHTSABER_CLEAN_BS2POLVEC(ciphertext, pksv, SABER_P); | |||
// vector-vector scalar multiplication with mod p | |||
for (i = 0; i < SABER_N; i++) { | |||
v[i] = 0; | |||
} | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_N; j++) { | |||
sksv[i][j] = sksv[i][j] & (mod_p); | |||
} | |||
} | |||
InnerProd(pksv, sksv, mod_p, v); | |||
//Extraction | |||
for (i = 0; i < SABER_SCALEBYTES_KEM; i++) { | |||
scale_ar[i] = ciphertext[SABER_POLYVECCOMPRESSEDBYTES + i]; | |||
} | |||
uint16_t s[SABER_L][SABER_N]; | |||
uint16_t b[SABER_L][SABER_N]; | |||
uint16_t v[SABER_N] = {0}; | |||
uint16_t cm[SABER_N]; | |||
int i; | |||
PQCLEAN_LIGHTSABER_CLEAN_un_pack3bit(scale_ar, op); | |||
PQCLEAN_LIGHTSABER_CLEAN_BS2POLVECq(s, sk); | |||
PQCLEAN_LIGHTSABER_CLEAN_BS2POLVECp(b, ciphertext); | |||
PQCLEAN_LIGHTSABER_CLEAN_InnerProd(v, (const uint16_t (*)[SABER_N])b, (const uint16_t (*)[SABER_N])s); | |||
PQCLEAN_LIGHTSABER_CLEAN_BS2POLT(cm, ciphertext + SABER_POLYVECCOMPRESSEDBYTES); | |||
//addition of h1 | |||
for (i = 0; i < SABER_N; i++) { | |||
v[i] = ( ( v[i] + h2 - (op[i] << (SABER_EP - SABER_ET)) ) & (mod_p) ) >> (SABER_EP - 1); | |||
v[i] = (v[i] + h2 - (cm[i] << (SABER_EP - SABER_ET))) >> (SABER_EP - 1); | |||
} | |||
// pack decrypted message | |||
POL2MSG(v, message_dec); | |||
} | |||
static void MatrixVectorMul(polyvec *a, uint16_t skpv[SABER_K][SABER_N], uint16_t res[SABER_K][SABER_N], uint16_t mod, int16_t transpose) { | |||
uint16_t acc[SABER_N]; | |||
int32_t i, j, k; | |||
if (transpose == 1) { | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_K; j++) { | |||
PQCLEAN_LIGHTSABER_CLEAN_pol_mul((uint16_t *)&a[j].vec[i], skpv[j], acc, SABER_Q, SABER_N); | |||
for (k = 0; k < SABER_N; k++) { | |||
res[i][k] = res[i][k] + acc[k]; | |||
//reduction mod p | |||
res[i][k] = (res[i][k] & mod); | |||
//clear the accumulator | |||
acc[k] = 0; | |||
} | |||
} | |||
} | |||
} else { | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_K; j++) { | |||
PQCLEAN_LIGHTSABER_CLEAN_pol_mul((uint16_t *)&a[i].vec[j], skpv[j], acc, SABER_Q, SABER_N); | |||
for (k = 0; k < SABER_N; k++) { | |||
res[i][k] = res[i][k] + acc[k]; | |||
// reduction | |||
res[i][k] = res[i][k] & mod; | |||
// clear the accumulator | |||
acc[k] = 0; | |||
} | |||
} | |||
} | |||
} | |||
} | |||
static void POL2MSG(const uint16_t *message_dec_unpacked, unsigned char *message_dec) { | |||
int32_t i, j; | |||
for (j = 0; j < SABER_KEYBYTES; j++) { | |||
message_dec[j] = 0; | |||
for (i = 0; i < 8; i++) { | |||
message_dec[j] = message_dec[j] | (uint8_t) (message_dec_unpacked[j * 8 + i] << i); | |||
} | |||
} | |||
} | |||
static void InnerProd(uint16_t pkcl[SABER_K][SABER_N], uint16_t skpv[SABER_K][SABER_N], uint16_t mod, uint16_t res[SABER_N]) { | |||
uint32_t j, k; | |||
uint16_t acc[SABER_N]; | |||
// vector-vector scalar multiplication with mod p | |||
for (j = 0; j < SABER_K; j++) { | |||
PQCLEAN_LIGHTSABER_CLEAN_pol_mul(pkcl[j], skpv[j], acc, SABER_P, SABER_N); | |||
for (k = 0; k < SABER_N; k++) { | |||
res[k] = res[k] + acc[k]; | |||
// reduction | |||
res[k] = res[k] & mod; | |||
// clear the accumulator | |||
acc[k] = 0; | |||
} | |||
} | |||
PQCLEAN_LIGHTSABER_CLEAN_POLmsg2BS(m, v); | |||
} |
@@ -1,9 +1,13 @@ | |||
#ifndef INDCPA_H | |||
#define INDCPA_H | |||
#include "SABER_params.h" | |||
#include <stdint.h> | |||
void PQCLEAN_LIGHTSABER_CLEAN_indcpa_kem_keypair(unsigned char *pk, unsigned char *sk); | |||
void PQCLEAN_LIGHTSABER_CLEAN_indcpa_kem_enc(const unsigned char *message, unsigned char *noiseseed, const unsigned char *pk, unsigned char *ciphertext); | |||
void PQCLEAN_LIGHTSABER_CLEAN_indcpa_kem_dec(const unsigned char *sk, const unsigned char *ciphertext, unsigned char *message_dec); | |||
void PQCLEAN_LIGHTSABER_CLEAN_indcpa_kem_keypair(uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES], uint8_t sk[SABER_INDCPA_SECRETKEYBYTES]); | |||
#endif | |||
void PQCLEAN_LIGHTSABER_CLEAN_indcpa_kem_enc(uint8_t ciphertext[SABER_BYTES_CCA_DEC], const uint8_t m[SABER_KEYBYTES], const uint8_t seed_sp[SABER_NOISE_SEEDBYTES], const uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES]); | |||
void PQCLEAN_LIGHTSABER_CLEAN_indcpa_kem_dec(uint8_t m[SABER_KEYBYTES], const uint8_t sk[SABER_INDCPA_SECRETKEYBYTES], const uint8_t ciphertext[SABER_BYTES_CCA_DEC]); | |||
#endif |
@@ -1,50 +1,39 @@ | |||
#ifndef PARAMS_H | |||
#define PARAMS_H | |||
#include "api.h" | |||
#define SABER_K 2 | |||
/* Change this for different security strengths */ | |||
/* Don't change anything below this line */ | |||
#define SABER_L 2 | |||
#define SABER_MU 10 | |||
#define SABER_ET 3 | |||
#define SABER_EQ 13 | |||
#define SABER_EP 10 | |||
#define SABER_N 256 | |||
#define SABER_Q 8192 | |||
#define SABER_P 1024 | |||
#define SABER_SEEDBYTES 32 | |||
#define SABER_NOISESEEDBYTES 32 | |||
#define SABER_COINBYTES 32 | |||
#define SABER_KEYBYTES 32 | |||
#define SABER_HASHBYTES 32 | |||
#define SABER_POLYBYTES 416 //13*256/8 | |||
#define SABER_POLYVECBYTES (SABER_K * SABER_POLYBYTES) | |||
#define SABER_SEEDBYTES 32 | |||
#define SABER_NOISE_SEEDBYTES 32 | |||
#define SABER_KEYBYTES 32 | |||
#define SABER_HASHBYTES 32 | |||
#define SABER_POLYVECCOMPRESSEDBYTES (SABER_K * 320) //10*256/8 NOTE : changed till here due to parameter adaptation | |||
#define SABER_POLYCOINBYTES (SABER_MU * SABER_N / 8) | |||
#define SABER_CIPHERTEXTBYTES (SABER_POLYVECCOMPRESSEDBYTES) | |||
#define SABER_POLYBYTES (SABER_EQ * SABER_N / 8) | |||
#define SABER_POLYVECBYTES (SABER_L * SABER_POLYBYTES) | |||
#define SABER_SCALEBYTES (SABER_DELTA*SABER_N/8) | |||
#define SABER_POLYCOMPRESSEDBYTES (SABER_EP * SABER_N / 8) | |||
#define SABER_POLYVECCOMPRESSEDBYTES (SABER_L * SABER_POLYCOMPRESSEDBYTES) | |||
#define SABER_SCALEBYTES_KEM ((SABER_ET)*SABER_N/8) | |||
#define SABER_SCALEBYTES_KEM (SABER_ET * SABER_N / 8) | |||
#define SABER_INDCPA_PUBLICKEYBYTES (SABER_POLYVECCOMPRESSEDBYTES + SABER_SEEDBYTES) | |||
#define SABER_INDCPA_SECRETKEYBYTES (SABER_POLYVECBYTES) | |||
#define SABER_PUBLICKEYBYTES (SABER_INDCPA_PUBLICKEYBYTES) | |||
#define SABER_SECRETKEYBYTES (SABER_INDCPA_SECRETKEYBYTES + SABER_INDCPA_PUBLICKEYBYTES + SABER_HASHBYTES + SABER_KEYBYTES) | |||
#define SABER_SECRETKEYBYTES (SABER_INDCPA_SECRETKEYBYTES + SABER_INDCPA_PUBLICKEYBYTES + SABER_HASHBYTES + SABER_KEYBYTES) | |||
#define SABER_BYTES_CCA_DEC (SABER_POLYVECCOMPRESSEDBYTES + SABER_SCALEBYTES_KEM) /* Second part is for Targhi-Unruh */ | |||
#define SABER_BYTES_CCA_DEC (SABER_POLYVECCOMPRESSEDBYTES + SABER_SCALEBYTES_KEM) | |||
#endif | |||
@@ -1,14 +1,18 @@ | |||
#ifndef PQCLEAN_LIGHTSABER_CLEAN_API_H | |||
#define PQCLEAN_LIGHTSABER_CLEAN_API_H | |||
#define PQCLEAN_LIGHTSABER_CLEAN_CRYPTO_ALGNAME "LightSaber" | |||
#define PQCLEAN_LIGHTSABER_CLEAN_CRYPTO_SECRETKEYBYTES 1568 | |||
#define PQCLEAN_LIGHTSABER_CLEAN_CRYPTO_PUBLICKEYBYTES (2*320+32) | |||
#define PQCLEAN_LIGHTSABER_CLEAN_CRYPTO_BYTES 32 | |||
#define PQCLEAN_LIGHTSABER_CLEAN_CRYPTO_CIPHERTEXTBYTES 736 | |||
#define PQCLEAN_LIGHTSABER_CLEAN_CRYPTO_PUBLICKEYBYTES 672 | |||
#define PQCLEAN_LIGHTSABER_CLEAN_CRYPTO_SECRETKEYBYTES 1568 | |||
int PQCLEAN_LIGHTSABER_CLEAN_crypto_kem_keypair(unsigned char *pk, unsigned char *sk); | |||
int PQCLEAN_LIGHTSABER_CLEAN_crypto_kem_enc(unsigned char *ct, unsigned char *ss, const unsigned char *pk); | |||
int PQCLEAN_LIGHTSABER_CLEAN_crypto_kem_dec(unsigned char *ss, const unsigned char *ct, const unsigned char *sk); | |||
int PQCLEAN_LIGHTSABER_CLEAN_crypto_kem_enc(unsigned char *ct, unsigned char *k, const unsigned char *pk); | |||
int PQCLEAN_LIGHTSABER_CLEAN_crypto_kem_dec(unsigned char *k, const unsigned char *ct, const unsigned char *sk); | |||
#endif /* api_h */ |
@@ -1,3 +1,7 @@ | |||
#include "SABER_params.h" | |||
#include "api.h" | |||
#include "cbd.h" | |||
#include <stdint.h> | |||
/*--------------------------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at, Public Domain https://github.com/pq-crystals/kyber) | |||
@@ -6,12 +10,8 @@ by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
----------------------------------------------------------------------*/ | |||
#include "SABER_params.h" | |||
#include "api.h" | |||
#include "cbd.h" | |||
#include <stdint.h> | |||
static uint64_t load_littleendian(const unsigned char *x, int bytes) { | |||
static uint64_t load_littleendian(const uint8_t *x, int bytes) { | |||
int i; | |||
uint64_t r = x[0]; | |||
for (i = 1; i < bytes; i++) { | |||
@@ -20,10 +20,7 @@ static uint64_t load_littleendian(const unsigned char *x, int bytes) { | |||
return r; | |||
} | |||
void PQCLEAN_LIGHTSABER_CLEAN_cbd(uint16_t *r, const unsigned char *buf) { | |||
uint16_t Qmod_minus1 = SABER_Q - 1; | |||
void PQCLEAN_LIGHTSABER_CLEAN_cbd(uint16_t s[SABER_N], const uint8_t buf[SABER_POLYCOINBYTES]) { | |||
uint64_t t, d, a[4], b[4]; | |||
int i, j; | |||
@@ -34,8 +31,8 @@ void PQCLEAN_LIGHTSABER_CLEAN_cbd(uint16_t *r, const unsigned char *buf) { | |||
d += (t >> j) & 0x0842108421UL; | |||
} | |||
a[0] = d & 0x1f; | |||
b[0] = (d >> 5) & 0x1f; | |||
a[0] = d & 0x1f; | |||
b[0] = (d >> 5) & 0x1f; | |||
a[1] = (d >> 10) & 0x1f; | |||
b[1] = (d >> 15) & 0x1f; | |||
a[2] = (d >> 20) & 0x1f; | |||
@@ -43,9 +40,9 @@ void PQCLEAN_LIGHTSABER_CLEAN_cbd(uint16_t *r, const unsigned char *buf) { | |||
a[3] = (d >> 30) & 0x1f; | |||
b[3] = (d >> 35); | |||
r[4 * i + 0] = (uint16_t)(a[0] - b[0]) & Qmod_minus1; | |||
r[4 * i + 1] = (uint16_t)(a[1] - b[1]) & Qmod_minus1; | |||
r[4 * i + 2] = (uint16_t)(a[2] - b[2]) & Qmod_minus1; | |||
r[4 * i + 3] = (uint16_t)(a[3] - b[3]) & Qmod_minus1; | |||
s[4 * i + 0] = (uint16_t)(a[0] - b[0]); | |||
s[4 * i + 1] = (uint16_t)(a[1] - b[1]); | |||
s[4 * i + 2] = (uint16_t)(a[2] - b[2]); | |||
s[4 * i + 3] = (uint16_t)(a[3] - b[3]); | |||
} | |||
} |
@@ -1,6 +1,5 @@ | |||
#ifndef CBD_H | |||
#define CBD_H | |||
/*--------------------------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at, Public Domain https://github.com/pq-crystals/kyber) | |||
@@ -8,10 +7,10 @@ of "CRYSTALS – Kyber: a CCA-secure module-lattice-based KEM" | |||
by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
----------------------------------------------------------------------*/ | |||
#include "poly.h" | |||
#include "SABER_params.h" | |||
#include <stdint.h> | |||
void PQCLEAN_LIGHTSABER_CLEAN_cbd(uint16_t *r, const unsigned char *buf); | |||
void PQCLEAN_LIGHTSABER_CLEAN_cbd(uint16_t s[SABER_N], const uint8_t buf[SABER_POLYCOINBYTES]); | |||
#endif |
@@ -1,5 +1,6 @@ | |||
#include "SABER_indcpa.h" | |||
#include "SABER_params.h" | |||
#include "api.h" | |||
#include "fips202.h" | |||
#include "randombytes.h" | |||
#include "verify.h" | |||
@@ -7,90 +8,71 @@ | |||
#include <stdio.h> | |||
#include <string.h> | |||
int PQCLEAN_LIGHTSABER_CLEAN_crypto_kem_keypair(unsigned char *pk, unsigned char *sk) { | |||
int i; | |||
// sk[0:SABER_INDCPA_SECRETKEYBYTES-1] <-- sk | |||
PQCLEAN_LIGHTSABER_CLEAN_indcpa_kem_keypair(pk, sk); | |||
int PQCLEAN_LIGHTSABER_CLEAN_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) { | |||
int i; | |||
// sk[SABER_INDCPA_SECRETKEYBYTES:SABER_INDCPA_SECRETKEYBYTES+SABER_INDCPA_SECRETKEYBYTES-1] <-- pk | |||
PQCLEAN_LIGHTSABER_CLEAN_indcpa_kem_keypair(pk, sk); // sk[0:SABER_INDCPA_SECRETKEYBYTES-1] <-- sk | |||
for (i = 0; i < SABER_INDCPA_PUBLICKEYBYTES; i++) { | |||
sk[i + SABER_INDCPA_SECRETKEYBYTES] = pk[i]; | |||
sk[i + SABER_INDCPA_SECRETKEYBYTES] = pk[i]; // sk[SABER_INDCPA_SECRETKEYBYTES:SABER_INDCPA_SECRETKEYBYTES+SABER_INDCPA_SECRETKEYBYTES-1] <-- pk | |||
} | |||
// Then hash(pk) is appended. | |||
sha3_256(sk + SABER_SECRETKEYBYTES - 64, pk, SABER_INDCPA_PUBLICKEYBYTES); | |||
sha3_256(sk + SABER_SECRETKEYBYTES - 64, pk, SABER_INDCPA_PUBLICKEYBYTES); // Then hash(pk) is appended. | |||
// Remaining part of sk contains a pseudo-random number. | |||
// This is output when check in crypto_kem_dec() fails. | |||
randombytes(sk + SABER_SECRETKEYBYTES - SABER_KEYBYTES, SABER_KEYBYTES ); | |||
randombytes(sk + SABER_SECRETKEYBYTES - SABER_KEYBYTES, SABER_KEYBYTES); // Remaining part of sk contains a pseudo-random number. | |||
// This is output when check in PQCLEAN_LIGHTSABER_CLEAN_crypto_kem_dec() fails. | |||
return (0); | |||
} | |||
int PQCLEAN_LIGHTSABER_CLEAN_crypto_kem_enc(unsigned char *ct, unsigned char *ss, const unsigned char *pk) { | |||
// Will contain key, coins | |||
unsigned char kr[64]; | |||
unsigned char buf[64]; | |||
int PQCLEAN_LIGHTSABER_CLEAN_crypto_kem_enc(uint8_t *c, uint8_t *k, const uint8_t *pk) { | |||
randombytes(buf, 32); | |||
uint8_t kr[64]; // Will contain key, coins | |||
uint8_t buf[64]; | |||
// BUF[0:31] <-- random message (will be used as the key for client) Note: hash doesnot release system RNG output | |||
sha3_256(buf, buf, 32); | |||
randombytes(buf, 32); | |||
// BUF[32:63] <-- Hash(public key); Multitarget countermeasure for coins + contributory KEM | |||
sha3_256(buf + 32, pk, SABER_INDCPA_PUBLICKEYBYTES); | |||
sha3_256(buf, buf, 32); // BUF[0:31] <-- random message (will be used as the key for client) Note: hash doesnot release system RNG output | |||
// kr[0:63] <-- Hash(buf[0:63]); | |||
sha3_512(kr, buf, 64); | |||
sha3_256(buf + 32, pk, SABER_INDCPA_PUBLICKEYBYTES); // BUF[32:63] <-- Hash(public key); Multitarget countermeasure for coins + contributory KEM | |||
sha3_512(kr, buf, 64); // kr[0:63] <-- Hash(buf[0:63]); | |||
// K^ <-- kr[0:31] | |||
// noiseseed (r) <-- kr[32:63]; | |||
// buf[0:31] contains message; kr[32:63] contains randomness r; | |||
PQCLEAN_LIGHTSABER_CLEAN_indcpa_kem_enc(buf, kr + 32, pk, ct); | |||
PQCLEAN_LIGHTSABER_CLEAN_indcpa_kem_enc(c, buf, kr + 32, pk); // buf[0:31] contains message; kr[32:63] contains randomness r; | |||
sha3_256(kr + 32, ct, SABER_BYTES_CCA_DEC); | |||
sha3_256(kr + 32, c, SABER_BYTES_CCA_DEC); | |||
// hash concatenation of pre-k and h(c) to k | |||
sha3_256(ss, kr, 64); | |||
sha3_256(k, kr, 64); // hash concatenation of pre-k and h(c) to k | |||
return (0); | |||
} | |||
int PQCLEAN_LIGHTSABER_CLEAN_crypto_kem_dec(unsigned char *ss, const unsigned char *ct, const unsigned char *sk) { | |||
int PQCLEAN_LIGHTSABER_CLEAN_crypto_kem_dec(uint8_t *k, const uint8_t *c, const uint8_t *sk) { | |||
int i; | |||
unsigned char fail; | |||
unsigned char cmp[SABER_BYTES_CCA_DEC]; | |||
unsigned char buf[64]; | |||
// Will contain key, coins | |||
unsigned char kr[64]; | |||
const unsigned char *pk = sk + SABER_INDCPA_SECRETKEYBYTES; | |||
// buf[0:31] <-- message | |||
PQCLEAN_LIGHTSABER_CLEAN_indcpa_kem_dec(sk, ct, buf); | |||
uint8_t fail; | |||
uint8_t cmp[SABER_BYTES_CCA_DEC]; | |||
uint8_t buf[64]; | |||
uint8_t kr[64]; // Will contain key, coins | |||
const uint8_t *pk = sk + SABER_INDCPA_SECRETKEYBYTES; | |||
PQCLEAN_LIGHTSABER_CLEAN_indcpa_kem_dec(buf, sk, c); // buf[0:31] <-- message | |||
// Multitarget countermeasure for coins + contributory KEM | |||
// Save hash by storing h(pk) in sk | |||
for (i = 0; i < 32; i++) { | |||
for (i = 0; i < 32; i++) { // Save hash by storing h(pk) in sk | |||
buf[32 + i] = sk[SABER_SECRETKEYBYTES - 64 + i]; | |||
} | |||
sha3_512(kr, buf, 64); | |||
PQCLEAN_LIGHTSABER_CLEAN_indcpa_kem_enc(buf, kr + 32, pk, cmp); | |||
PQCLEAN_LIGHTSABER_CLEAN_indcpa_kem_enc(cmp, buf, kr + 32, pk); | |||
fail = PQCLEAN_LIGHTSABER_CLEAN_verify(ct, cmp, SABER_BYTES_CCA_DEC); | |||
fail = PQCLEAN_LIGHTSABER_CLEAN_verify(c, cmp, SABER_BYTES_CCA_DEC); | |||
// overwrite coins in kr with h(c) | |||
sha3_256(kr + 32, ct, SABER_BYTES_CCA_DEC); | |||
sha3_256(kr + 32, c, SABER_BYTES_CCA_DEC); // overwrite coins in kr with h(c) | |||
PQCLEAN_LIGHTSABER_CLEAN_cmov(kr, sk + SABER_SECRETKEYBYTES - SABER_KEYBYTES, SABER_KEYBYTES, fail); | |||
// hash concatenation of pre-k and h(c) to k | |||
sha3_256(ss, kr, 64); | |||
sha3_256(k, kr, 64); // hash concatenation of pre-k and h(c) to k | |||
return (0); | |||
} |
@@ -1,254 +1,140 @@ | |||
#include "api.h" | |||
#include "pack_unpack.h" | |||
#include <string.h> | |||
void PQCLEAN_LIGHTSABER_CLEAN_pack_3bit(uint8_t *bytes, const uint16_t *data) { | |||
uint32_t j; | |||
uint32_t offset_data, offset_byte; | |||
void PQCLEAN_LIGHTSABER_CLEAN_POLT2BS(uint8_t bytes[SABER_SCALEBYTES_KEM], const uint16_t data[SABER_N]) { | |||
size_t j, offset_byte, offset_data; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = 3 * j; | |||
offset_data = 8 * j; | |||
bytes[offset_byte + 0] = (data[offset_data + 0] & 0x7) | | |||
((data[offset_data + 1] & 0x7) << 3) | | |||
((data[offset_data + 2] & 0x3) << 6); | |||
bytes[offset_byte + 1] = ((data[offset_data + 2] >> 2 ) & 0x01) | | |||
((data[offset_data + 3] & 0x7) << 1) | | |||
((data[offset_data + 4] & 0x7) << 4) | | |||
(((data[offset_data + 5]) & 0x01) << 7); | |||
bytes[offset_byte + 2] = ((data[offset_data + 5] >> 1 ) & 0x03) | | |||
((data[offset_data + 6] & 0x7) << 2) | | |||
((data[offset_data + 7] & 0x7) << 5); | |||
bytes[offset_byte + 0] = (data[offset_data + 0] & 0x7) | ((data[offset_data + 1] & 0x7) << 3) | ((data[offset_data + 2] & 0x3) << 6); | |||
bytes[offset_byte + 1] = ((data[offset_data + 2] >> 2) & 0x01) | ((data[offset_data + 3] & 0x7) << 1) | ((data[offset_data + 4] & 0x7) << 4) | (((data[offset_data + 5]) & 0x01) << 7); | |||
bytes[offset_byte + 2] = ((data[offset_data + 5] >> 1) & 0x03) | ((data[offset_data + 6] & 0x7) << 2) | ((data[offset_data + 7] & 0x7) << 5); | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_CLEAN_un_pack3bit(const uint8_t *bytes, uint16_t *data) { | |||
uint32_t j; | |||
uint32_t offset_data, offset_byte; | |||
void PQCLEAN_LIGHTSABER_CLEAN_BS2POLT(uint16_t data[SABER_N], const uint8_t bytes[SABER_SCALEBYTES_KEM]) { | |||
size_t j, offset_byte, offset_data; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = 3 * j; | |||
offset_data = 8 * j; | |||
data[offset_data + 0] = (bytes[offset_byte + 0]) & 0x07; | |||
data[offset_data + 1] = ((bytes[offset_byte + 0]) >> 3 ) & 0x07; | |||
data[offset_data + 2] = (((bytes[offset_byte + 0]) >> 6 ) & 0x03) | | |||
(((bytes[offset_byte + 1]) & 0x01) << 2); | |||
data[offset_data + 3] = ((bytes[offset_byte + 1]) >> 1 ) & 0x07; | |||
data[offset_data + 4] = ((bytes[offset_byte + 1]) >> 4 ) & 0x07; | |||
data[offset_data + 5] = (((bytes[offset_byte + 1]) >> 7 ) & 0x01) | | |||
(((bytes[offset_byte + 2]) & 0x03) << 1); | |||
data[offset_data + 1] = ((bytes[offset_byte + 0]) >> 3) & 0x07; | |||
data[offset_data + 2] = (((bytes[offset_byte + 0]) >> 6) & 0x03) | (((bytes[offset_byte + 1]) & 0x01) << 2); | |||
data[offset_data + 3] = ((bytes[offset_byte + 1]) >> 1) & 0x07; | |||
data[offset_data + 4] = ((bytes[offset_byte + 1]) >> 4) & 0x07; | |||
data[offset_data + 5] = (((bytes[offset_byte + 1]) >> 7) & 0x01) | (((bytes[offset_byte + 2]) & 0x03) << 1); | |||
data[offset_data + 6] = ((bytes[offset_byte + 2] >> 2) & 0x07); | |||
data[offset_data + 7] = ((bytes[offset_byte + 2] >> 5) & 0x07); | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_CLEAN_pack_4bit(uint8_t *bytes, const uint16_t *data) { | |||
uint32_t j; | |||
uint32_t offset_data; | |||
for (j = 0; j < SABER_N / 2; j++) { | |||
offset_data = 2 * j; | |||
bytes[j] = (data[offset_data] & 0x0f) | | |||
((data[offset_data + 1] & 0x0f) << 4); | |||
static void POLq2BS(uint8_t bytes[SABER_POLYBYTES], const uint16_t data[SABER_N]) { | |||
size_t j, offset_byte, offset_data; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = 13 * j; | |||
offset_data = 8 * j; | |||
bytes[offset_byte + 0] = (data[offset_data + 0] & (0xff)); | |||
bytes[offset_byte + 1] = ((data[offset_data + 0] >> 8) & 0x1f) | ((data[offset_data + 1] & 0x07) << 5); | |||
bytes[offset_byte + 2] = ((data[offset_data + 1] >> 3) & 0xff); | |||
bytes[offset_byte + 3] = ((data[offset_data + 1] >> 11) & 0x03) | ((data[offset_data + 2] & 0x3f) << 2); | |||
bytes[offset_byte + 4] = ((data[offset_data + 2] >> 6) & 0x7f) | ((data[offset_data + 3] & 0x01) << 7); | |||
bytes[offset_byte + 5] = ((data[offset_data + 3] >> 1) & 0xff); | |||
bytes[offset_byte + 6] = ((data[offset_data + 3] >> 9) & 0x0f) | ((data[offset_data + 4] & 0x0f) << 4); | |||
bytes[offset_byte + 7] = ((data[offset_data + 4] >> 4) & 0xff); | |||
bytes[offset_byte + 8] = ((data[offset_data + 4] >> 12) & 0x01) | ((data[offset_data + 5] & 0x7f) << 1); | |||
bytes[offset_byte + 9] = ((data[offset_data + 5] >> 7) & 0x3f) | ((data[offset_data + 6] & 0x03) << 6); | |||
bytes[offset_byte + 10] = ((data[offset_data + 6] >> 2) & 0xff); | |||
bytes[offset_byte + 11] = ((data[offset_data + 6] >> 10) & 0x07) | ((data[offset_data + 7] & 0x1f) << 3); | |||
bytes[offset_byte + 12] = ((data[offset_data + 7] >> 5) & 0xff); | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_CLEAN_un_pack4bit(const unsigned char *bytes, uint16_t *ar) { | |||
uint32_t j; | |||
uint32_t offset_data; | |||
for (j = 0; j < SABER_N / 2; j++) { | |||
offset_data = 2 * j; | |||
ar[offset_data] = bytes[j] & 0x0f; | |||
ar[offset_data + 1] = (bytes[j] >> 4) & 0x0f; | |||
static void BS2POLq(uint16_t data[SABER_N], const uint8_t bytes[SABER_POLYBYTES]) { | |||
size_t j, offset_byte, offset_data; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = 13 * j; | |||
offset_data = 8 * j; | |||
data[offset_data + 0] = (bytes[offset_byte + 0] & (0xff)) | ((bytes[offset_byte + 1] & 0x1f) << 8); | |||
data[offset_data + 1] = (bytes[offset_byte + 1] >> 5 & (0x07)) | ((bytes[offset_byte + 2] & 0xff) << 3) | ((bytes[offset_byte + 3] & 0x03) << 11); | |||
data[offset_data + 2] = (bytes[offset_byte + 3] >> 2 & (0x3f)) | ((bytes[offset_byte + 4] & 0x7f) << 6); | |||
data[offset_data + 3] = (bytes[offset_byte + 4] >> 7 & (0x01)) | ((bytes[offset_byte + 5] & 0xff) << 1) | ((bytes[offset_byte + 6] & 0x0f) << 9); | |||
data[offset_data + 4] = (bytes[offset_byte + 6] >> 4 & (0x0f)) | ((bytes[offset_byte + 7] & 0xff) << 4) | ((bytes[offset_byte + 8] & 0x01) << 12); | |||
data[offset_data + 5] = (bytes[offset_byte + 8] >> 1 & (0x7f)) | ((bytes[offset_byte + 9] & 0x3f) << 7); | |||
data[offset_data + 6] = (bytes[offset_byte + 9] >> 6 & (0x03)) | ((bytes[offset_byte + 10] & 0xff) << 2) | ((bytes[offset_byte + 11] & 0x07) << 10); | |||
data[offset_data + 7] = (bytes[offset_byte + 11] >> 3 & (0x1f)) | ((bytes[offset_byte + 12] & 0xff) << 5); | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_CLEAN_pack_6bit(uint8_t *bytes, const uint16_t *data) { | |||
uint32_t j; | |||
uint32_t offset_data, offset_byte; | |||
static void POLp2BS(uint8_t bytes[SABER_POLYCOMPRESSEDBYTES], const uint16_t data[SABER_N]) { | |||
size_t j, offset_byte, offset_data; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = 3 * j; | |||
offset_byte = 5 * j; | |||
offset_data = 4 * j; | |||
bytes[offset_byte + 0] = (data[offset_data + 0] & 0x3f) | | |||
((data[offset_data + 1] & 0x03) << 6); | |||
bytes[offset_byte + 1] = ((data[offset_data + 1] >> 2) & 0x0f) | | |||
((data[offset_data + 2] & 0x0f) << 4); | |||
bytes[offset_byte + 2] = ((data[offset_data + 2] >> 4) & 0x03) | | |||
((data[offset_data + 3] & 0x3f) << 2); | |||
bytes[offset_byte + 0] = (data[offset_data + 0] & (0xff)); | |||
bytes[offset_byte + 1] = ((data[offset_data + 0] >> 8) & 0x03) | ((data[offset_data + 1] & 0x3f) << 2); | |||
bytes[offset_byte + 2] = ((data[offset_data + 1] >> 6) & 0x0f) | ((data[offset_data + 2] & 0x0f) << 4); | |||
bytes[offset_byte + 3] = ((data[offset_data + 2] >> 4) & 0x3f) | ((data[offset_data + 3] & 0x03) << 6); | |||
bytes[offset_byte + 4] = ((data[offset_data + 3] >> 2) & 0xff); | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_CLEAN_un_pack6bit(const unsigned char *bytes, uint16_t *data) { | |||
uint32_t j; | |||
uint32_t offset_data, offset_byte; | |||
static void BS2POLp(uint16_t data[SABER_N], const uint8_t bytes[SABER_POLYCOMPRESSEDBYTES]) { | |||
size_t j, offset_byte, offset_data; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = 3 * j; | |||
offset_byte = 5 * j; | |||
offset_data = 4 * j; | |||
data[offset_data + 0] = bytes[offset_byte + 0] & 0x3f; | |||
data[offset_data + 1] = ((bytes[offset_byte + 0] >> 6) & 0x03) | | |||
((bytes[offset_byte + 1] & 0x0f) << 2); | |||
data[offset_data + 2] = ((bytes[offset_byte + 1] & 0xff) >> 4) | | |||
((bytes[offset_byte + 2] & 0x03) << 4); | |||
data[offset_data + 3] = ((bytes[offset_byte + 2] & 0xff) >> 2); | |||
data[offset_data + 0] = (bytes[offset_byte + 0] & (0xff)) | ((bytes[offset_byte + 1] & 0x03) << 8); | |||
data[offset_data + 1] = ((bytes[offset_byte + 1] >> 2) & (0x3f)) | ((bytes[offset_byte + 2] & 0x0f) << 6); | |||
data[offset_data + 2] = ((bytes[offset_byte + 2] >> 4) & (0x0f)) | ((bytes[offset_byte + 3] & 0x3f) << 4); | |||
data[offset_data + 3] = ((bytes[offset_byte + 3] >> 6) & (0x03)) | ((bytes[offset_byte + 4] & 0xff) << 2); | |||
} | |||
} | |||
static void POLVECp2BS(uint8_t *bytes, uint16_t data[SABER_K][SABER_N]) { | |||
uint32_t i, j; | |||
uint32_t offset_data, offset_byte, offset_byte1; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 10) / 8; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = offset_byte1 + 5 * j; | |||
offset_data = 4 * j; | |||
bytes[offset_byte + 0] = (data[i][offset_data + 0] & (0xff)); | |||
bytes[offset_byte + 1] = ((data[i][offset_data + 0] >> 8) & 0x03) | | |||
((data[i][offset_data + 1] & 0x3f) << 2); | |||
bytes[offset_byte + 2] = ((data[i][offset_data + 1] >> 6) & 0x0f) | | |||
((data[i][offset_data + 2] & 0x0f) << 4); | |||
bytes[offset_byte + 3] = ((data[i][offset_data + 2] >> 4) & 0x3f) | | |||
((data[i][offset_data + 3] & 0x03) << 6); | |||
bytes[offset_byte + 4] = ((data[i][offset_data + 3] >> 2) & 0xff); | |||
} | |||
void PQCLEAN_LIGHTSABER_CLEAN_POLVECq2BS(uint8_t bytes[SABER_POLYVECBYTES], const uint16_t data[SABER_L][SABER_N]) { | |||
size_t i; | |||
for (i = 0; i < SABER_L; i++) { | |||
POLq2BS(bytes + i * SABER_POLYBYTES, data[i]); | |||
} | |||
} | |||
static void BS2POLVECp(const unsigned char *bytes, uint16_t data[SABER_K][SABER_N]) { | |||
uint32_t i, j; | |||
uint32_t offset_data, offset_byte, offset_byte1; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 10) / 8; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = offset_byte1 + 5 * j; | |||
offset_data = 4 * j; | |||
data[i][offset_data + 0] = (bytes[offset_byte + 0] & (0xff)) | | |||
((bytes[offset_byte + 1] & 0x03) << 8); | |||
data[i][offset_data + 1] = ((bytes[offset_byte + 1] >> 2) & (0x3f)) | | |||
((bytes[offset_byte + 2] & 0x0f) << 6); | |||
data[i][offset_data + 2] = ((bytes[offset_byte + 2] >> 4) & (0x0f)) | | |||
((bytes[offset_byte + 3] & 0x3f) << 4); | |||
data[i][offset_data + 3] = ((bytes[offset_byte + 3] >> 6) & (0x03)) | | |||
((bytes[offset_byte + 4] & 0xff) << 2); | |||
} | |||
void PQCLEAN_LIGHTSABER_CLEAN_BS2POLVECq(uint16_t data[SABER_L][SABER_N], const uint8_t bytes[SABER_POLYVECBYTES]) { | |||
size_t i; | |||
for (i = 0; i < SABER_L; i++) { | |||
BS2POLq(data[i], bytes + i * SABER_POLYBYTES); | |||
} | |||
} | |||
static void POLVECq2BS(uint8_t *bytes, uint16_t data[SABER_K][SABER_N]) { | |||
uint32_t i, j; | |||
uint32_t offset_data, offset_byte, offset_byte1; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 13) / 8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = offset_byte1 + 13 * j; | |||
offset_data = 8 * j; | |||
bytes[offset_byte + 0] = (data[i][offset_data + 0] & (0xff)); | |||
bytes[offset_byte + 1] = ((data[i][offset_data + 0] >> 8) & 0x1f) | | |||
((data[i][offset_data + 1] & 0x07) << 5); | |||
bytes[offset_byte + 2] = ((data[i][offset_data + 1] >> 3) & 0xff); | |||
bytes[offset_byte + 3] = ((data[i][offset_data + 1] >> 11) & 0x03) | | |||
((data[i][offset_data + 2] & 0x3f) << 2); | |||
bytes[offset_byte + 4] = ((data[i][offset_data + 2] >> 6) & 0x7f) | | |||
((data[i][offset_data + 3] & 0x01) << 7); | |||
bytes[offset_byte + 5] = ((data[i][offset_data + 3] >> 1) & 0xff); | |||
bytes[offset_byte + 6] = ((data[i][offset_data + 3] >> 9) & 0x0f) | | |||
((data[i][offset_data + 4] & 0x0f) << 4); | |||
bytes[offset_byte + 7] = ((data[i][offset_data + 4] >> 4) & 0xff); | |||
bytes[offset_byte + 8] = ((data[i][offset_data + 4] >> 12) & 0x01) | | |||
((data[i][offset_data + 5] & 0x7f) << 1); | |||
bytes[offset_byte + 9] = ((data[i][offset_data + 5] >> 7) & 0x3f) | | |||
((data[i][offset_data + 6] & 0x03) << 6); | |||
bytes[offset_byte + 10] = ((data[i][offset_data + 6] >> 2) & 0xff); | |||
bytes[offset_byte + 11] = ((data[i][offset_data + 6] >> 10) & 0x07) | | |||
((data[i][offset_data + 7] & 0x1f) << 3); | |||
bytes[offset_byte + 12] = ((data[i][offset_data + 7] >> 5) & 0xff); | |||
} | |||
void PQCLEAN_LIGHTSABER_CLEAN_POLVECp2BS(uint8_t bytes[SABER_POLYVECCOMPRESSEDBYTES], const uint16_t data[SABER_L][SABER_N]) { | |||
size_t i; | |||
for (i = 0; i < SABER_L; i++) { | |||
POLp2BS(bytes + i * (SABER_EP * SABER_N / 8), data[i]); | |||
} | |||
} | |||
static void BS2POLVECq(const unsigned char *bytes, uint16_t data[SABER_K][SABER_N]) { | |||
uint32_t i, j; | |||
uint32_t offset_data, offset_byte, offset_byte1; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 13) / 8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = offset_byte1 + 13 * j; | |||
offset_data = 8 * j; | |||
data[i][offset_data + 0] = (bytes[offset_byte + 0] & (0xff)) | | |||
((bytes[offset_byte + 1] & 0x1f) << 8); | |||
data[i][offset_data + 1] = (bytes[offset_byte + 1] >> 5 & (0x07)) | | |||
((bytes[offset_byte + 2] & 0xff) << 3) | | |||
((bytes[offset_byte + 3] & 0x03) << 11); | |||
data[i][offset_data + 2] = (bytes[offset_byte + 3] >> 2 & (0x3f)) | | |||
((bytes[offset_byte + 4] & 0x7f) << 6); | |||
data[i][offset_data + 3] = (bytes[offset_byte + 4] >> 7 & (0x01)) | | |||
((bytes[offset_byte + 5] & 0xff) << 1) | | |||
((bytes[offset_byte + 6] & 0x0f) << 9); | |||
data[i][offset_data + 4] = (bytes[offset_byte + 6] >> 4 & (0x0f)) | | |||
((bytes[offset_byte + 7] & 0xff) << 4) | | |||
((bytes[offset_byte + 8] & 0x01) << 12); | |||
data[i][offset_data + 5] = (bytes[offset_byte + 8] >> 1 & (0x7f)) | | |||
((bytes[offset_byte + 9] & 0x3f) << 7); | |||
data[i][offset_data + 6] = (bytes[offset_byte + 9] >> 6 & (0x03)) | | |||
((bytes[offset_byte + 10] & 0xff) << 2) | | |||
((bytes[offset_byte + 11] & 0x07) << 10); | |||
data[i][offset_data + 7] = (bytes[offset_byte + 11] >> 3 & (0x1f)) | | |||
((bytes[offset_byte + 12] & 0xff) << 5); | |||
} | |||
void PQCLEAN_LIGHTSABER_CLEAN_BS2POLVECp(uint16_t data[SABER_L][SABER_N], const uint8_t bytes[SABER_POLYVECCOMPRESSEDBYTES]) { | |||
size_t i; | |||
for (i = 0; i < SABER_L; i++) { | |||
BS2POLp(data[i], bytes + i * (SABER_EP * SABER_N / 8)); | |||
} | |||
} | |||
//only BS2POLq no BS2POLp | |||
void PQCLEAN_LIGHTSABER_CLEAN_BS2POL(const unsigned char *bytes, uint16_t data[SABER_N]) { | |||
uint32_t j; | |||
uint32_t offset_data, offset_byte; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = 13 * j; | |||
offset_data = 8 * j; | |||
data[offset_data + 0] = (bytes[offset_byte + 0] & (0xff)) | | |||
((bytes[offset_byte + 1] & 0x1f) << 8); | |||
data[offset_data + 1] = (bytes[offset_byte + 1] >> 5 & (0x07)) | | |||
((bytes[offset_byte + 2] & 0xff) << 3) | | |||
((bytes[offset_byte + 3] & 0x03) << 11); | |||
data[offset_data + 2] = (bytes[offset_byte + 3] >> 2 & (0x3f)) | | |||
((bytes[offset_byte + 4] & 0x7f) << 6); | |||
data[offset_data + 3] = (bytes[offset_byte + 4] >> 7 & (0x01)) | | |||
((bytes[offset_byte + 5] & 0xff) << 1) | | |||
((bytes[offset_byte + 6] & 0x0f) << 9); | |||
data[offset_data + 4] = (bytes[offset_byte + 6] >> 4 & (0x0f)) | | |||
((bytes[offset_byte + 7] & 0xff) << 4) | | |||
((bytes[offset_byte + 8] & 0x01) << 12); | |||
data[offset_data + 5] = (bytes[offset_byte + 8] >> 1 & (0x7f)) | | |||
((bytes[offset_byte + 9] & 0x3f) << 7); | |||
data[offset_data + 6] = (bytes[offset_byte + 9] >> 6 & (0x03)) | | |||
((bytes[offset_byte + 10] & 0xff) << 2) | | |||
((bytes[offset_byte + 11] & 0x07) << 10); | |||
data[offset_data + 7] = (bytes[offset_byte + 11] >> 3 & (0x1f)) | | |||
((bytes[offset_byte + 12] & 0xff) << 5); | |||
void PQCLEAN_LIGHTSABER_CLEAN_BS2POLmsg(uint16_t data[SABER_N], const uint8_t bytes[SABER_KEYBYTES]) { | |||
size_t i, j; | |||
for (j = 0; j < SABER_KEYBYTES; j++) { | |||
for (i = 0; i < 8; i++) { | |||
data[j * 8 + i] = ((bytes[j] >> i) & 0x01); | |||
} | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_CLEAN_POLVEC2BS(uint8_t *bytes, uint16_t data[SABER_K][SABER_N], uint16_t modulus) { | |||
if (modulus == 1024) { | |||
POLVECp2BS(bytes, data); | |||
} else if (modulus == 8192) { | |||
POLVECq2BS(bytes, data); | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_CLEAN_POLmsg2BS(uint8_t bytes[SABER_KEYBYTES], const uint16_t data[SABER_N]) { | |||
size_t i, j; | |||
memset(bytes, 0, SABER_KEYBYTES); | |||
void PQCLEAN_LIGHTSABER_CLEAN_BS2POLVEC(const unsigned char *bytes, uint16_t data[SABER_K][SABER_N], uint16_t modulus) { | |||
if (modulus == 1024) { | |||
BS2POLVECp(bytes, data); | |||
} else if (modulus == 8192) { | |||
BS2POLVECq(bytes, data); | |||
for (j = 0; j < SABER_KEYBYTES; j++) { | |||
for (i = 0; i < 8; i++) { | |||
bytes[j] = bytes[j] | ((data[j * 8 + i] & 0x01) << i); | |||
} | |||
} | |||
} |
@@ -1,28 +1,27 @@ | |||
#ifndef PACK_UNPACK_H | |||
#define PACK_UNPACK_H | |||
#include "SABER_params.h" | |||
#include <stdint.h> | |||
#include <stdio.h> | |||
void PQCLEAN_LIGHTSABER_CLEAN_POLT2BS(uint8_t bytes[SABER_SCALEBYTES_KEM], const uint16_t data[SABER_N]); | |||
void PQCLEAN_LIGHTSABER_CLEAN_BS2POLT(uint16_t data[SABER_N], const uint8_t bytes[SABER_SCALEBYTES_KEM]); | |||
void PQCLEAN_LIGHTSABER_CLEAN_pack_3bit(uint8_t *bytes, const uint16_t *data); | |||
void PQCLEAN_LIGHTSABER_CLEAN_un_pack3bit(const uint8_t *bytes, uint16_t *data); | |||
void PQCLEAN_LIGHTSABER_CLEAN_POLVECq2BS(uint8_t bytes[SABER_POLYVECBYTES], const uint16_t data[SABER_L][SABER_N]); | |||
void PQCLEAN_LIGHTSABER_CLEAN_pack_4bit(uint8_t *bytes, const uint16_t *data); | |||
void PQCLEAN_LIGHTSABER_CLEAN_POLVECp2BS(uint8_t bytes[SABER_POLYVECCOMPRESSEDBYTES], const uint16_t data[SABER_L][SABER_N]); | |||
void PQCLEAN_LIGHTSABER_CLEAN_un_pack4bit(const unsigned char *bytes, uint16_t *ar); | |||
void PQCLEAN_LIGHTSABER_CLEAN_pack_6bit(uint8_t *bytes, const uint16_t *data); | |||
void PQCLEAN_LIGHTSABER_CLEAN_BS2POLVECq(uint16_t data[SABER_L][SABER_N], const uint8_t bytes[SABER_POLYVECBYTES]); | |||
void PQCLEAN_LIGHTSABER_CLEAN_un_pack6bit(const unsigned char *bytes, uint16_t *data); | |||
void PQCLEAN_LIGHTSABER_CLEAN_BS2POLVECp(uint16_t data[SABER_L][SABER_N], const uint8_t bytes[SABER_POLYVECCOMPRESSEDBYTES]); | |||
void PQCLEAN_LIGHTSABER_CLEAN_BS2POL(const unsigned char *bytes, uint16_t data[SABER_N]); | |||
void PQCLEAN_LIGHTSABER_CLEAN_BS2POLmsg(uint16_t data[SABER_N], const uint8_t bytes[SABER_KEYBYTES]); | |||
void PQCLEAN_LIGHTSABER_CLEAN_POLVEC2BS(uint8_t *bytes, uint16_t data[SABER_K][SABER_N], uint16_t modulus); | |||
void PQCLEAN_LIGHTSABER_CLEAN_POLmsg2BS(uint8_t bytes[SABER_KEYBYTES], const uint16_t data[SABER_N]); | |||
void PQCLEAN_LIGHTSABER_CLEAN_BS2POLVEC(const unsigned char *bytes, uint16_t data[SABER_K][SABER_N], uint16_t modulus); | |||
#endif |
@@ -1,21 +1,49 @@ | |||
/*--------------------------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at, Public Domain https://github.com/pq-crystals/kyber) | |||
of "CRYSTALS – Kyber: a CCA-secure module-lattice-based KEM" | |||
by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
#include "SABER_params.h" | |||
#include "api.h" | |||
#include "cbd.h" | |||
#include "fips202.h" | |||
#include "pack_unpack.h" | |||
#include "poly.h" | |||
#include "poly_mul.h" | |||
#include <stdio.h> | |||
void PQCLEAN_LIGHTSABER_CLEAN_GenSecret(uint16_t r[SABER_K][SABER_N], const unsigned char *seed) { | |||
uint8_t buf[SABER_MU * SABER_N * SABER_K / 8]; | |||
void PQCLEAN_LIGHTSABER_CLEAN_MatrixVectorMul(uint16_t res[SABER_L][SABER_N], const uint16_t A[SABER_L][SABER_L][SABER_N], const uint16_t s[SABER_L][SABER_N], int16_t transpose) { | |||
int i, j; | |||
for (i = 0; i < SABER_L; i++) { | |||
for (j = 0; j < SABER_L; j++) { | |||
if (transpose == 1) { | |||
PQCLEAN_LIGHTSABER_CLEAN_poly_mul_acc(A[j][i], s[j], res[i]); | |||
} else { | |||
PQCLEAN_LIGHTSABER_CLEAN_poly_mul_acc(A[i][j], s[j], res[i]); | |||
} | |||
} | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_CLEAN_InnerProd(uint16_t res[SABER_N], const uint16_t b[SABER_L][SABER_N], const uint16_t s[SABER_L][SABER_N]) { | |||
int j; | |||
for (j = 0; j < SABER_L; j++) { | |||
PQCLEAN_LIGHTSABER_CLEAN_poly_mul_acc(b[j], s[j], res); | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_CLEAN_GenMatrix(uint16_t A[SABER_L][SABER_L][SABER_N], const uint8_t seed[SABER_SEEDBYTES]) { | |||
uint8_t buf[SABER_L * SABER_POLYVECBYTES]; | |||
int i; | |||
shake128(buf, sizeof(buf), seed, SABER_SEEDBYTES); | |||
for (i = 0; i < SABER_L; i++) { | |||
PQCLEAN_LIGHTSABER_CLEAN_BS2POLVECq(A[i], buf + i * SABER_POLYVECBYTES); | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_CLEAN_GenSecret(uint16_t s[SABER_L][SABER_N], const uint8_t seed[SABER_NOISE_SEEDBYTES]) { | |||
uint8_t buf[SABER_L * SABER_POLYCOINBYTES]; | |||
size_t i; | |||
shake128(buf, sizeof(buf), seed, SABER_NOISESEEDBYTES); | |||
shake128(buf, sizeof(buf), seed, SABER_NOISE_SEEDBYTES); | |||
for (size_t i = 0; i < SABER_K; i++) { | |||
PQCLEAN_LIGHTSABER_CLEAN_cbd(r[i], buf + i * SABER_MU * SABER_N / 8); | |||
for (i = 0; i < SABER_L; i++) { | |||
PQCLEAN_LIGHTSABER_CLEAN_cbd(s[i], buf + i * SABER_POLYCOINBYTES); | |||
} | |||
} |
@@ -1,26 +1,15 @@ | |||
#ifndef POLY_H | |||
#define POLY_H | |||
/*--------------------------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at, Public Domain https://github.com/pq-crystals/kyber) | |||
of "CRYSTALS – Kyber: a CCA-secure module-lattice-based KEM" | |||
by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
#include "SABER_params.h" | |||
#include <stdint.h> | |||
typedef struct { | |||
uint16_t coeffs[SABER_N]; | |||
} poly; | |||
void PQCLEAN_LIGHTSABER_CLEAN_MatrixVectorMul(uint16_t res[SABER_L][SABER_N], const uint16_t a[SABER_L][SABER_L][SABER_N], const uint16_t s[SABER_L][SABER_N], int16_t transpose); | |||
void PQCLEAN_LIGHTSABER_CLEAN_InnerProd(uint16_t res[SABER_N], const uint16_t b[SABER_L][SABER_N], const uint16_t s[SABER_L][SABER_N]); | |||
void PQCLEAN_LIGHTSABER_CLEAN_GenMatrix(uint16_t a[SABER_L][SABER_L][SABER_N], const uint8_t seed[SABER_SEEDBYTES]); | |||
typedef struct { | |||
poly vec[SABER_K]; | |||
} polyvec; | |||
void PQCLEAN_LIGHTSABER_CLEAN_GenSecret(uint16_t s[SABER_L][SABER_N], const uint8_t seed[SABER_NOISE_SEEDBYTES]); | |||
void PQCLEAN_LIGHTSABER_CLEAN_GenSecret(uint16_t r[SABER_K][SABER_N], const unsigned char *seed); | |||
#endif |
@@ -228,19 +228,15 @@ static void toom_cook_4way (const uint16_t *a1, const uint16_t *b1, uint16_t *re | |||
} | |||
} | |||
void PQCLEAN_LIGHTSABER_CLEAN_pol_mul(uint16_t *a, uint16_t *b, uint16_t *res, uint16_t p, uint32_t n) { | |||
uint32_t i; | |||
// normal multiplication | |||
uint16_t c[512]; | |||
for (i = 0; i < 512; i++) { | |||
c[i] = 0; | |||
} | |||
/* res += a*b */ | |||
void PQCLEAN_LIGHTSABER_CLEAN_poly_mul_acc(const uint16_t a[SABER_N], const uint16_t b[SABER_N], uint16_t res[SABER_N]) { | |||
uint16_t c[2 * SABER_N] = {0}; | |||
int i; | |||
toom_cook_4way(a, b, c); | |||
// reduction | |||
for (i = n; i < 2 * n; i++) { | |||
res[i - n] = (c[i - n] - c[i]) & (p - 1); | |||
/* reduction */ | |||
for (i = SABER_N; i < 2 * SABER_N; i++) { | |||
res[i - SABER_N] += (c[i - SABER_N] - c[i]); | |||
} | |||
} |
@@ -1,9 +1,9 @@ | |||
#ifndef POLYMUL_H | |||
#define POLYMUL_H | |||
#ifndef POLY_MUL_H | |||
#define POLY_MUL_H | |||
#include "SABER_params.h" | |||
#include <stdint.h> | |||
void PQCLEAN_LIGHTSABER_CLEAN_pol_mul(uint16_t *a, uint16_t *b, uint16_t *res, uint16_t p, uint32_t n); | |||
void PQCLEAN_LIGHTSABER_CLEAN_poly_mul_acc(const uint16_t a[SABER_N], const uint16_t b[SABER_N], uint16_t res[SABER_N]); | |||
#endif |
@@ -1,3 +1,5 @@ | |||
#include "verify.h" | |||
/*------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at https://github.com/pq-crystals/kyber) of | |||
@@ -5,26 +7,25 @@ This file has been adapted from the implementation | |||
by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
----------------------------------------------------*/ | |||
#include "verify.h" | |||
#include <stdint.h> | |||
/* returns 0 for equal strings, 1 for non-equal strings */ | |||
unsigned char PQCLEAN_LIGHTSABER_CLEAN_verify(const unsigned char *a, const unsigned char *b, size_t len) { | |||
uint8_t PQCLEAN_LIGHTSABER_CLEAN_verify(const uint8_t *a, const uint8_t *b, size_t len) { | |||
uint64_t r; | |||
size_t i; | |||
r = 0; | |||
for (i = 0; i < len; i++) { | |||
r |= a[i] ^ b[i]; | |||
} | |||
r = (~r + 1); // Two's complement | |||
r >>= 63; | |||
return (unsigned char)r; | |||
return (uint8_t) r; | |||
} | |||
/* b = 1 means mov, b = 0 means don't mov*/ | |||
void PQCLEAN_LIGHTSABER_CLEAN_cmov(unsigned char *r, const unsigned char *x, size_t len, unsigned char b) { | |||
void PQCLEAN_LIGHTSABER_CLEAN_cmov(uint8_t *r, const uint8_t *x, size_t len, uint8_t b) { | |||
size_t i; | |||
b = -b; | |||
@@ -1,6 +1,5 @@ | |||
#ifndef VERIFY_H | |||
#define VERIFY_H | |||
/*------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at https://github.com/pq-crystals/kyber) of | |||
@@ -13,9 +12,11 @@ Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
#include <stdint.h> | |||
/* returns 0 for equal strings, 1 for non-equal strings */ | |||
unsigned char PQCLEAN_LIGHTSABER_CLEAN_verify(const unsigned char *a, const unsigned char *b, size_t len); | |||
uint8_t PQCLEAN_LIGHTSABER_CLEAN_verify(const uint8_t *a, const uint8_t *b, size_t len); | |||
/* b = 1 means mov, b = 0 means don't mov*/ | |||
void PQCLEAN_LIGHTSABER_CLEAN_cmov(unsigned char *r, const unsigned char *x, size_t len, unsigned char b); | |||
void PQCLEAN_LIGHTSABER_CLEAN_cmov(uint8_t *r, const uint8_t *x, size_t len, uint8_t b); | |||
#endif |
@@ -14,4 +14,13 @@ principal-submitters: | |||
- Frederik Vercauteren | |||
implementations: | |||
- name: clean | |||
version: https://github.com/KULeuven-COSIC/SABER/commit/14ede83f1ff3bcc41f0464543542366c68b55871 | |||
version: https://github.com/KULeuven-COSIC/SABER/commit/509cc5ec3a7e12a751ccdd2ef5bd6e54e00bd350 | |||
- name: avx2 | |||
version: https://github.com/KULeuven-COSIC/SABER/commit/509cc5ec3a7e12a751ccdd2ef5bd6e54e00bd350 | |||
supported_platforms: | |||
- architecture: x86_64 | |||
operating_systems: | |||
- Linux | |||
- Darwin | |||
required_flags: | |||
- avx2 |
@@ -0,0 +1 @@ | |||
Public Domain |
@@ -0,0 +1,22 @@ | |||
# This Makefile can be used with GNU Make or BSD Make | |||
LIB=libsaber_avx2.a | |||
HEADERS=api.h cbd.h kem.h pack_unpack.h poly.h SABER_indcpa.h SABER_params.h verify.h | |||
OBJECTS=cbd.o kem.o pack_unpack.o SABER_indcpa.o verify.o | |||
CFLAGS=-O3 -mavx2 -Wall -Wextra -Wpedantic -Wvla -Werror -Wredundant-decls -Wmissing-prototypes -std=c99 -I../../../common $(EXTRAFLAGS) | |||
all: $(LIB) | |||
%.o: %.s $(HEADERS) | |||
$(AS) -o $@ $< | |||
%.o: %.c $(HEADERS) | |||
$(CC) $(CFLAGS) -c -o $@ $< | |||
$(LIB): $(OBJECTS) | |||
$(AR) -r $@ $(OBJECTS) | |||
clean: | |||
$(RM) $(OBJECTS) | |||
$(RM) $(LIB) |
@@ -0,0 +1,416 @@ | |||
#include "./polymul/toom-cook_4way.c" | |||
#include "SABER_indcpa.h" | |||
#include "SABER_params.h" | |||
#include "api.h" | |||
#include "cbd.h" | |||
#include "fips202.h" | |||
#include "pack_unpack.h" | |||
#include "randombytes.h" | |||
#include <stdint.h> | |||
#include <stdio.h> | |||
#include <string.h> | |||
//#include "randombytes.h" | |||
//#include "./polymul/toom_cook_4/toom-cook_4way.c" | |||
#define h1 4 //2^(EQ-EP-1) | |||
#define h2 ( (1<<(SABER_EP-2)) - (1<<(SABER_EP-SABER_ET-1)) + (1<<(SABER_EQ-SABER_EP-1)) ) | |||
static void POL2MSG(uint8_t *message_dec, const uint16_t *message_dec_unpacked) { | |||
int32_t i, j; | |||
for (j = 0; j < SABER_KEYBYTES; j++) { | |||
message_dec[j] = 0; | |||
for (i = 0; i < 8; i++) { | |||
message_dec[j] = message_dec[j] | (message_dec_unpacked[j * 8 + i] << i); | |||
} | |||
} | |||
} | |||
/*----------------------------------------------------------------------------------- | |||
This routine generates a=[Matrix K x K] of 256-coefficient polynomials | |||
-------------------------------------------------------------------------------------*/ | |||
static void GenMatrix(polyvec *a, const uint8_t *seed) { | |||
uint8_t buf[SABER_K * SABER_K * 13 * SABER_N / 8]; | |||
uint16_t temp_ar[SABER_N]; | |||
int i, j, k; | |||
uint16_t mod = (SABER_Q - 1); | |||
shake128(buf, sizeof(buf), seed, SABER_SEEDBYTES); | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_K; j++) { | |||
PQCLEAN_SABER_AVX2_BS2POLq(temp_ar, buf + (i * SABER_K + j) * 13 * SABER_N / 8); | |||
for (k = 0; k < SABER_N; k++) { | |||
a[i].vec[j].coeffs[k] = (temp_ar[k])& mod ; | |||
} | |||
} | |||
} | |||
} | |||
static void GenSecret(uint16_t r[SABER_K][SABER_N], const uint8_t *seed) { | |||
uint32_t i; | |||
uint8_t buf[SABER_MU * SABER_N * SABER_K / 8]; | |||
shake128(buf, sizeof(buf), seed, SABER_NOISESEEDBYTES); | |||
for (i = 0; i < SABER_K; i++) { | |||
PQCLEAN_SABER_AVX2_cbd(r[i], buf + i * SABER_MU * SABER_N / 8); | |||
} | |||
} | |||
//********************************matrix-vector mul routines***************************************************** | |||
static void matrix_vector_mul(__m256i a1_avx_combined[NUM_POLY][NUM_POLY][AVX_N1], __m256i b_bucket[NUM_POLY][SCHB_N * 4], __m256i res_avx[NUM_POLY][AVX_N1], int isTranspose) { | |||
int64_t i, j; | |||
__m256i c_bucket[2 * SCM_SIZE * 4]; //Holds results for 9 Karatsuba at a time | |||
for (i = 0; i < NUM_POLY; i++) { | |||
for (j = 0; j < NUM_POLY; j++) { | |||
if (isTranspose == 0) { | |||
toom_cook_4way_avx_n1(a1_avx_combined[i][j], b_bucket[j], c_bucket, j); | |||
} else { | |||
toom_cook_4way_avx_n1(a1_avx_combined[j][i], b_bucket[j], c_bucket, j); | |||
} | |||
} | |||
TC_interpol(c_bucket, res_avx[i]); | |||
} | |||
} | |||
static void vector_vector_mul(__m256i a_avx[NUM_POLY][AVX_N1], __m256i b_bucket[NUM_POLY][SCHB_N * 4], __m256i res_avx[AVX_N1]) { | |||
int64_t i; | |||
__m256i c_bucket[2 * SCM_SIZE * 4]; //Holds results for 9 Karatsuba at a time | |||
for (i = 0; i < NUM_POLY; i++) { | |||
toom_cook_4way_avx_n1(a_avx[i], b_bucket[i], c_bucket, i); | |||
} | |||
TC_interpol(c_bucket, res_avx); | |||
} | |||
//********************************matrix-vector mul routines***************************************************** | |||
void PQCLEAN_SABER_AVX2_indcpa_kem_keypair(uint8_t *pk, uint8_t *sk) { | |||
polyvec a[SABER_K]; | |||
uint16_t skpv1[SABER_K][SABER_N]; | |||
uint8_t seed[SABER_SEEDBYTES]; | |||
uint8_t noiseseed[SABER_COINBYTES]; | |||
int32_t i, j, k; | |||
//--------------AVX declaration------------------ | |||
__m256i sk_avx[SABER_K][SABER_N / 16]; | |||
__m256i mod; | |||
__m256i res_avx[SABER_K][SABER_N / 16]; | |||
__m256i a_avx[SABER_K][SABER_K][SABER_N / 16]; | |||
//__m256i acc[2*SABER_N/16]; | |||
mod = _mm256_set1_epi16(SABER_Q - 1); | |||
__m256i b_bucket[NUM_POLY][SCHB_N * 4]; | |||
//--------------AVX declaration ends------------------ | |||
randombytes(seed, SABER_SEEDBYTES); | |||
shake128(seed, SABER_SEEDBYTES, seed, SABER_SEEDBYTES); // for not revealing system RNG state | |||
randombytes(noiseseed, SABER_COINBYTES); | |||
GenMatrix(a, seed); //sample matrix A | |||
GenSecret(skpv1, noiseseed); | |||
// Load sk into avx vectors | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
sk_avx[i][j] = _mm256_loadu_si256 ((__m256i const *) (&skpv1[i][j * 16])); | |||
} | |||
} | |||
// Load a into avx vectors | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_K; j++) { | |||
for (k = 0; k < SABER_N / 16; k++) { | |||
a_avx[i][j][k] = _mm256_loadu_si256 ((__m256i const *) (&a[i].vec[j].coeffs[k * 16])); | |||
} | |||
} | |||
} | |||
//------------------------do the matrix vector multiplication and rounding------------ | |||
for (j = 0; j < NUM_POLY; j++) { | |||
TC_eval(sk_avx[j], b_bucket[j]); | |||
} | |||
matrix_vector_mul(a_avx, b_bucket, res_avx, 1);// Matrix-vector multiplication; Matrix in transposed order | |||
// Now truncation | |||
for (i = 0; i < SABER_K; i++) { //shift right EQ-EP bits | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
res_avx[i][j] = _mm256_add_epi16 (res_avx[i][j], _mm256_set1_epi16(h1)); | |||
res_avx[i][j] = _mm256_srli_epi16 (res_avx[i][j], (SABER_EQ - SABER_EP) ); | |||
res_avx[i][j] = _mm256_and_si256 (res_avx[i][j], mod); | |||
} | |||
} | |||
//------------------Pack sk into byte string------- | |||
PQCLEAN_SABER_AVX2_POLVEC2BS(sk, (const uint16_t (*)[SABER_N])skpv1, SABER_Q); | |||
//------------------Pack pk into byte string------- | |||
for (i = 0; i < SABER_K; i++) { // reuses skpv1[] for unpacking avx of public-key | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
_mm256_maskstore_epi32 ((int *) (skpv1[i] + j * 16), _mm256_set1_epi32(-1), res_avx[i][j]); | |||
} | |||
} | |||
PQCLEAN_SABER_AVX2_POLVEC2BS(pk, (const uint16_t (*)[SABER_N])skpv1, SABER_P); // load the public-key into pk byte string | |||
for (i = 0; i < SABER_SEEDBYTES; i++) { // now load the seedbytes in PK. Easy since seed bytes are kept in byte format. | |||
pk[SABER_POLYVECCOMPRESSEDBYTES + i] = seed[i]; | |||
} | |||
} | |||
void PQCLEAN_SABER_AVX2_indcpa_kem_enc(uint8_t ciphertext[SABER_BYTES_CCA_DEC], const uint8_t m[SABER_KEYBYTES], const uint8_t noiseseed[SABER_NOISESEEDBYTES], const uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES]) { | |||
uint32_t i, j, k; | |||
polyvec a[SABER_K]; // skpv; | |||
uint8_t seed[SABER_SEEDBYTES]; | |||
uint16_t pkcl[SABER_K][SABER_N]; //public key of received by the client | |||
uint16_t skpv1[SABER_K][SABER_N]; | |||
uint16_t temp[SABER_K][SABER_N]; | |||
uint16_t message[SABER_KEYBYTES * 8]; | |||
uint8_t msk_c[SABER_SCALEBYTES_KEM]; | |||
//--------------AVX declaration------------------ | |||
__m256i sk_avx[SABER_K][SABER_N / 16]; | |||
__m256i mod, mod_p; | |||
__m256i res_avx[SABER_K][SABER_N / 16]; | |||
__m256i vprime_avx[SABER_N / 16]; | |||
__m256i a_avx[SABER_K][SABER_K][SABER_N / 16]; | |||
//__m256i acc[2*SABER_N/16]; | |||
__m256i pkcl_avx[SABER_K][SABER_N / 16]; | |||
__m256i message_avx[SABER_N / 16]; | |||
mod = _mm256_set1_epi16(SABER_Q - 1); | |||
mod_p = _mm256_set1_epi16(SABER_P - 1); | |||
__m256i b_bucket[NUM_POLY][SCHB_N * 4]; | |||
//--------------AVX declaration ends------------------ | |||
for (i = 0; i < SABER_SEEDBYTES; i++) { // Load the seedbytes in the client seed from PK. | |||
seed[i] = pk[ SABER_POLYVECCOMPRESSEDBYTES + i]; | |||
} | |||
GenMatrix(a, seed); | |||
GenSecret(skpv1, noiseseed); | |||
// ----------- Load skpv1 into avx vectors ---------- | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
sk_avx[i][j] = _mm256_loadu_si256 ((__m256i const *) (&skpv1[i][j * 16])); | |||
} | |||
} | |||
// ----------- Load skpv1 into avx vectors ---------- | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_K; j++) { | |||
for (k = 0; k < SABER_N / 16; k++) { | |||
a_avx[i][j][k] = _mm256_loadu_si256 ((__m256i const *) (&a[i].vec[j].coeffs[k * 16])); | |||
} | |||
} | |||
} | |||
//-----------------matrix-vector multiplication and rounding | |||
for (j = 0; j < NUM_POLY; j++) { | |||
TC_eval(sk_avx[j], b_bucket[j]); | |||
} | |||
matrix_vector_mul(a_avx, b_bucket, res_avx, 0);// Matrix-vector multiplication; Matrix in normal order | |||
// Now truncation | |||
for (i = 0; i < SABER_K; i++) { //shift right EQ-EP bits | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
res_avx[i][j] = _mm256_add_epi16 (res_avx[i][j], _mm256_set1_epi16(h1)); | |||
res_avx[i][j] = _mm256_srli_epi16 (res_avx[i][j], (SABER_EQ - SABER_EP) ); | |||
res_avx[i][j] = _mm256_and_si256 (res_avx[i][j], mod); | |||
} | |||
} | |||
//-----this result should be put in b_prime for later use in server. | |||
for (i = 0; i < SABER_K; i++) { // first store in 16 bit arrays | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
_mm256_maskstore_epi32 ((int *)(temp[i] + j * 16), _mm256_set1_epi32(-1), res_avx[i][j]); | |||
} | |||
} | |||
PQCLEAN_SABER_AVX2_POLVEC2BS(ciphertext, (const uint16_t (*)[SABER_N])temp, SABER_P); // Pack b_prime into ciphertext byte string | |||
//**************client matrix-vector multiplication ends******************// | |||
//------now calculate the v' | |||
//-------unpack the public_key | |||
PQCLEAN_SABER_AVX2_BS2POLVEC(pkcl, pk, SABER_P); | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
pkcl_avx[i][j] = _mm256_loadu_si256 ((__m256i const *) (&pkcl[i][j * 16])); | |||
} | |||
} | |||
// InnerProduct | |||
//for(k=0;k<SABER_N/16;k++){ | |||
// vprime_avx[k]=_mm256_xor_si256(vprime_avx[k],vprime_avx[k]); | |||
//} | |||
// vector-vector scalar multiplication with mod p | |||
vector_vector_mul(pkcl_avx, b_bucket, vprime_avx); | |||
// Computation of v'+h1 | |||
for (i = 0; i < SABER_N / 16; i++) { //adding h1 | |||
vprime_avx[i] = _mm256_add_epi16(vprime_avx[i], _mm256_set1_epi16(h1)); | |||
} | |||
// unpack m; | |||
for (j = 0; j < SABER_KEYBYTES; j++) { | |||
for (i = 0; i < 8; i++) { | |||
message[8 * j + i] = ((m[j] >> i) & 0x01); | |||
} | |||
} | |||
// message encoding | |||
for (i = 0; i < SABER_N / 16; i++) { | |||
message_avx[i] = _mm256_loadu_si256 ((__m256i const *) (&message[i * 16])); | |||
message_avx[i] = _mm256_slli_epi16 (message_avx[i], (SABER_EP - 1) ); | |||
} | |||
// SHIFTRIGHT(v'+h1-m mod p, EP-ET) | |||
for (k = 0; k < SABER_N / 16; k++) { | |||
vprime_avx[k] = _mm256_sub_epi16(vprime_avx[k], message_avx[k]); | |||
vprime_avx[k] = _mm256_and_si256(vprime_avx[k], mod_p); | |||
vprime_avx[k] = _mm256_srli_epi16 (vprime_avx[k], (SABER_EP - SABER_ET) ); | |||
} | |||
// Unpack avx | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
_mm256_maskstore_epi32 ((int *) (temp[0] + j * 16), _mm256_set1_epi32(-1), vprime_avx[j]); | |||
} | |||
PQCLEAN_SABER_AVX2_SABER_pack_4bit(msk_c, temp[0]); | |||
for (j = 0; j < SABER_SCALEBYTES_KEM; j++) { | |||
ciphertext[SABER_CIPHERTEXTBYTES + j] = msk_c[j]; | |||
} | |||
} | |||
void PQCLEAN_SABER_AVX2_indcpa_kem_dec(uint8_t m[SABER_KEYBYTES], const uint8_t sk[SABER_INDCPA_SECRETKEYBYTES], const uint8_t ciphertext[SABER_BYTES_CCA_DEC]) { | |||
uint32_t i, j; | |||
uint16_t sksv[SABER_K][SABER_N]; //secret key of the server | |||
uint16_t pksv[SABER_K][SABER_N]; | |||
uint16_t message_dec_unpacked[SABER_KEYBYTES * 8]; // one element containes on decrypted bit; | |||
uint8_t scale_ar[SABER_SCALEBYTES_KEM]; | |||
uint16_t op[SABER_N]; | |||
//--------------AVX declaration------------------ | |||
//__m256i mod_p; | |||
__m256i v_avx[SABER_N / 16]; | |||
//__m256i acc[2*SABER_N/16]; | |||
__m256i sksv_avx[SABER_K][SABER_N / 16]; | |||
__m256i pksv_avx[SABER_K][SABER_N / 16]; | |||
//mod_p=_mm256_set1_epi16(SABER_P-1); | |||
__m256i b_bucket[NUM_POLY][SCHB_N * 4]; | |||
//--------------AVX declaration ends------------------ | |||
//-------unpack the public_key | |||
PQCLEAN_SABER_AVX2_BS2POLVEC(sksv, sk, SABER_Q); //sksv is the secret-key | |||
PQCLEAN_SABER_AVX2_BS2POLVEC(pksv, ciphertext, SABER_P); //pksv is the ciphertext | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_N / 16; j++) { | |||
sksv_avx[i][j] = _mm256_loadu_si256 ((__m256i const *) (&sksv[i][j * 16])); | |||
pksv_avx[i][j] = _mm256_loadu_si256 ((__m256i const *) (&pksv[i][j * 16])); | |||
} | |||
} | |||
for (i = 0; i < SABER_N / 16; i++) { | |||
v_avx[i] = _mm256_xor_si256(v_avx[i], v_avx[i]); | |||
} | |||
// InnerProduct(b', s, mod p) | |||
for (j = 0; j < NUM_POLY; j++) { | |||
TC_eval(sksv_avx[j], b_bucket[j]); | |||
} | |||
vector_vector_mul(pksv_avx, b_bucket, v_avx); | |||
for (i = 0; i < SABER_N / 16; i++) { | |||
_mm256_maskstore_epi32 ((int *)(message_dec_unpacked + i * 16), _mm256_set1_epi32(-1), v_avx[i]); | |||
} | |||
for (i = 0; i < SABER_SCALEBYTES_KEM; i++) { | |||
scale_ar[i] = ciphertext[SABER_CIPHERTEXTBYTES + i]; | |||
} | |||
PQCLEAN_SABER_AVX2_SABER_un_pack4bit(op, scale_ar); | |||
//addition of h2 | |||
for (i = 0; i < SABER_N; i++) { | |||
message_dec_unpacked[i] = ( ( message_dec_unpacked[i] + h2 - (op[i] << (SABER_EP - SABER_ET)) ) & (SABER_P - 1) ) >> (SABER_EP - 1); | |||
} | |||
POL2MSG(m, message_dec_unpacked); | |||
} |
@@ -0,0 +1,13 @@ | |||
#ifndef INDCPA_H | |||
#define INDCPA_H | |||
#include "SABER_params.h" | |||
#include <stdint.h> | |||
void PQCLEAN_SABER_AVX2_indcpa_kem_keypair(uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES], uint8_t sk[SABER_INDCPA_SECRETKEYBYTES]); | |||
void PQCLEAN_SABER_AVX2_indcpa_kem_enc(uint8_t ciphertext[SABER_BYTES_CCA_DEC], const uint8_t m[SABER_KEYBYTES], const uint8_t noiseseed[SABER_NOISESEEDBYTES], const uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES]); | |||
void PQCLEAN_SABER_AVX2_indcpa_kem_dec(uint8_t m[SABER_KEYBYTES], const uint8_t sk[SABER_INDCPA_SECRETKEYBYTES], const uint8_t ciphertext[SABER_BYTES_CCA_DEC]); | |||
#endif |
@@ -0,0 +1,46 @@ | |||
#ifndef PARAMS_H | |||
#define PARAMS_H | |||
#include "api.h" | |||
#define SABER_K 3 | |||
#define SABER_MU 8 | |||
#define SABER_ET 4 | |||
#define SABER_EQ 13 | |||
#define SABER_EP 10 | |||
#define SABER_N 256 | |||
#define SABER_Q 8192 //2^13 | |||
#define SABER_P 1024 | |||
#define SABER_SEEDBYTES 32 | |||
#define SABER_NOISESEEDBYTES 32 | |||
#define SABER_COINBYTES 32 | |||
#define SABER_KEYBYTES 32 | |||
#define SABER_HASHBYTES 32 | |||
#define SABER_POLYBYTES 416 //13*256/8 | |||
#define SABER_POLYVECBYTES (SABER_K * SABER_POLYBYTES) | |||
#define SABER_POLYVECCOMPRESSEDBYTES (SABER_K * 320) //10*256/8 NOTE : changed till here due to parameter adaptation | |||
#define SABER_CIPHERTEXTBYTES (SABER_POLYVECCOMPRESSEDBYTES) | |||
#define SABER_SCALEBYTES_KEM ((SABER_ET)*SABER_N/8) | |||
#define SABER_INDCPA_PUBLICKEYBYTES (SABER_POLYVECCOMPRESSEDBYTES + SABER_SEEDBYTES) | |||
#define SABER_INDCPA_SECRETKEYBYTES (SABER_POLYVECBYTES) | |||
#define SABER_PUBLICKEYBYTES (SABER_INDCPA_PUBLICKEYBYTES) | |||
#define SABER_SECRETKEYBYTES (SABER_INDCPA_SECRETKEYBYTES + SABER_INDCPA_PUBLICKEYBYTES + SABER_HASHBYTES + SABER_KEYBYTES) | |||
#define SABER_BYTES_CCA_DEC (SABER_POLYVECCOMPRESSEDBYTES + SABER_SCALEBYTES_KEM) /* Second part is for Targhi-Unruh */ | |||
#endif |
@@ -0,0 +1,18 @@ | |||
#ifndef PQCLEAN_SABER_AVX2_API_H | |||
#define PQCLEAN_SABER_AVX2_API_H | |||
#define PQCLEAN_SABER_AVX2_CRYPTO_ALGNAME "Saber" | |||
#define PQCLEAN_SABER_AVX2_CRYPTO_BYTES 32 | |||
#define PQCLEAN_SABER_AVX2_CRYPTO_CIPHERTEXTBYTES 1088 | |||
#define PQCLEAN_SABER_AVX2_CRYPTO_PUBLICKEYBYTES 992 | |||
#define PQCLEAN_SABER_AVX2_CRYPTO_SECRETKEYBYTES 2304 | |||
int PQCLEAN_SABER_AVX2_crypto_kem_keypair(unsigned char *pk, unsigned char *sk); | |||
int PQCLEAN_SABER_AVX2_crypto_kem_enc(unsigned char *ct, unsigned char *k, const unsigned char *pk); | |||
int PQCLEAN_SABER_AVX2_crypto_kem_dec(unsigned char *k, const unsigned char *ct, const unsigned char *sk); | |||
#endif /* PQCLEAN_SABER_AVX2_API_H */ |
@@ -0,0 +1,51 @@ | |||
#include "SABER_params.h" | |||
#include "api.h" | |||
#include "cbd.h" | |||
#include <stdint.h> | |||
/*--------------------------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at, Public Domain https://github.com/pq-crystals/kyber) | |||
of "CRYSTALS – Kyber: a CCA-secure module-lattice-based KEM" | |||
by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
----------------------------------------------------------------------*/ | |||
static uint64_t load_littleendian(const unsigned char *x, int bytes) { | |||
int i; | |||
uint64_t r = x[0]; | |||
for (i = 1; i < bytes; i++) { | |||
r |= (uint64_t)x[i] << (8 * i); | |||
} | |||
return r; | |||
} | |||
void PQCLEAN_SABER_AVX2_cbd(uint16_t *r, const unsigned char *buf) { | |||
uint16_t Qmod_minus1 = SABER_Q - 1; | |||
uint32_t t, d, a[4], b[4]; | |||
int i, j; | |||
for (i = 0; i < SABER_N / 4; i++) { | |||
t = load_littleendian(buf + 4 * i, 4); | |||
d = 0; | |||
for (j = 0; j < 4; j++) { | |||
d += (t >> j) & 0x11111111; | |||
} | |||
a[0] = d & 0xf; | |||
b[0] = (d >> 4) & 0xf; | |||
a[1] = (d >> 8) & 0xf; | |||
b[1] = (d >> 12) & 0xf; | |||
a[2] = (d >> 16) & 0xf; | |||
b[2] = (d >> 20) & 0xf; | |||
a[3] = (d >> 24) & 0xf; | |||
b[3] = (d >> 28); | |||
r[4 * i + 0] = (uint16_t)(a[0] - b[0]) & Qmod_minus1; | |||
r[4 * i + 1] = (uint16_t)(a[1] - b[1]) & Qmod_minus1; | |||
r[4 * i + 2] = (uint16_t)(a[2] - b[2]) & Qmod_minus1; | |||
r[4 * i + 3] = (uint16_t)(a[3] - b[3]) & Qmod_minus1; | |||
} | |||
} |
@@ -0,0 +1,16 @@ | |||
#ifndef CBD_H | |||
#define CBD_H | |||
/*--------------------------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at, Public Domain https://github.com/pq-crystals/kyber) | |||
of "CRYSTALS – Kyber: a CCA-secure module-lattice-based KEM" | |||
by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
----------------------------------------------------------------------*/ | |||
#include "poly.h" | |||
#include <stdint.h> | |||
void PQCLEAN_SABER_AVX2_cbd(uint16_t *r, const unsigned char *buf); | |||
#endif |
@@ -0,0 +1,79 @@ | |||
#include "SABER_indcpa.h" | |||
#include "SABER_params.h" | |||
#include "api.h" | |||
#include "fips202.h" | |||
#include "randombytes.h" | |||
#include "verify.h" | |||
#include <immintrin.h> | |||
#include <stdint.h> | |||
#include <stdio.h> | |||
#include <string.h> | |||
int PQCLEAN_SABER_AVX2_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) { | |||
int i; | |||
PQCLEAN_SABER_AVX2_indcpa_kem_keypair(pk, sk); // sk[0:SABER_INDCPA_SECRETKEYBYTES-1] <-- sk | |||
for (i = 0; i < SABER_INDCPA_PUBLICKEYBYTES; i++) { | |||
sk[i + SABER_INDCPA_SECRETKEYBYTES] = pk[i]; // sk[SABER_INDCPA_SECRETKEYBYTES:SABER_INDCPA_SECRETKEYBYTES+SABER_INDCPA_SECRETKEYBYTES-1] <-- pk | |||
} | |||
sha3_256(sk + SABER_SECRETKEYBYTES - 64, pk, SABER_INDCPA_PUBLICKEYBYTES); // Then hash(pk) is appended. | |||
randombytes(sk + SABER_SECRETKEYBYTES - SABER_KEYBYTES, SABER_KEYBYTES); // Remaining part of sk contains a pseudo-random number. | |||
// This is output when check in PQCLEAN_SABER_AVX2_crypto_kem_dec() fails. | |||
return (0); | |||
} | |||
int PQCLEAN_SABER_AVX2_crypto_kem_enc(uint8_t *c, uint8_t *k, const uint8_t *pk) { | |||
uint8_t kr[64]; // Will contain key, coins | |||
uint8_t buf[64]; | |||
randombytes(buf, 32); | |||
sha3_256(buf, buf, 32); // BUF[0:31] <-- random message (will be used as the key for client) Note: hash doesnot release system RNG output | |||
sha3_256(buf + 32, pk, SABER_INDCPA_PUBLICKEYBYTES); // BUF[32:63] <-- Hash(public key); Multitarget countermeasure for coins + contributory KEM | |||
sha3_512(kr, buf, 64); // kr[0:63] <-- Hash(buf[0:63]); | |||
// K^ <-- kr[0:31] | |||
// noiseseed (r) <-- kr[32:63]; | |||
PQCLEAN_SABER_AVX2_indcpa_kem_enc(c, buf, (const uint8_t *) (kr + 32), pk); // buf[0:31] contains message; kr[32:63] contains randomness r; | |||
sha3_256(kr + 32, c, SABER_BYTES_CCA_DEC); | |||
sha3_256(k, kr, 64); // hash concatenation of pre-k and h(c) to k | |||
return (0); | |||
} | |||
int PQCLEAN_SABER_AVX2_crypto_kem_dec(uint8_t *k, const uint8_t *c, const uint8_t *sk) { | |||
int i; | |||
uint8_t fail; | |||
uint8_t cmp[SABER_BYTES_CCA_DEC]; | |||
uint8_t buf[64]; | |||
uint8_t kr[64]; // Will contain key, coins | |||
const uint8_t *pk = sk + SABER_INDCPA_SECRETKEYBYTES; | |||
PQCLEAN_SABER_AVX2_indcpa_kem_dec(buf, sk, c); // buf[0:31] <-- message | |||
// Multitarget countermeasure for coins + contributory KEM | |||
for (i = 0; i < 32; i++) { // Save hash by storing h(pk) in sk | |||
buf[32 + i] = sk[SABER_SECRETKEYBYTES - 64 + i]; | |||
} | |||
sha3_512(kr, buf, 64); | |||
PQCLEAN_SABER_AVX2_indcpa_kem_enc(cmp, buf, (const uint8_t *) (kr + 32), pk); | |||
fail = PQCLEAN_SABER_AVX2_verify(c, cmp, SABER_BYTES_CCA_DEC); | |||
sha3_256(kr + 32, c, SABER_BYTES_CCA_DEC); // overwrite coins in kr with h(c) | |||
PQCLEAN_SABER_AVX2_cmov(kr, sk + SABER_SECRETKEYBYTES - SABER_KEYBYTES, SABER_KEYBYTES, fail); | |||
sha3_256(k, kr, 64); // hash concatenation of pre-k and h(c) to k | |||
return (0); | |||
} |
@@ -0,0 +1,35 @@ | |||
#ifndef INDCPA_H | |||
#define INDCPA_H | |||
#include <stdint.h> | |||
void PQCLEAN_SABER_AVX2_indcpa_keypair(uint8_t *pk, uint8_t *sk); | |||
void PQCLEAN_SABER_AVX2_indcpa_client(uint8_t *pk, uint8_t *b_prime, uint8_t *c, uint8_t *key); | |||
void PQCLEAN_SABER_AVX2_indcpa_server(uint8_t *pk, uint8_t *b_prime, uint8_t *c, uint8_t *key); | |||
void PQCLEAN_SABER_AVX2_indcpa_kem_keypair(uint8_t *pk, uint8_t *sk); | |||
void PQCLEAN_SABER_AVX2_indcpa_kem_enc(uint8_t *message, uint8_t *noiseseed, uint8_t *pk, uint8_t *ciphertext); | |||
void PQCLEAN_SABER_AVX2_indcpa_kem_dec(uint8_t *sk, uint8_t *ciphertext, uint8_t message_dec[]); | |||
int PQCLEAN_SABER_AVX2_crypto_kem_keypair(unsigned char *pk, unsigned char *sk); | |||
int PQCLEAN_SABER_AVX2_crypto_kem_enc(unsigned char *c, unsigned char *k, const unsigned char *pk); | |||
int PQCLEAN_SABER_AVX2_crypto_kem_dec(unsigned char *k, const unsigned char *c, const unsigned char *sk); | |||
//uint64_t clock1,clock2; | |||
//uint64_t clock_kp_kex, clock_enc_kex, clock_dec_kex; | |||
#endif |
@@ -0,0 +1,502 @@ | |||
#include "pack_unpack.h" | |||
void PQCLEAN_SABER_AVX2_SABER_pack_3bit(uint8_t *bytes, const uint16_t *data) { | |||
uint32_t j; | |||
uint32_t offset_data = 0, offset_byte = 0; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = 3 * j; | |||
offset_data = 8 * j; | |||
bytes[offset_byte + 0] = (data[offset_data + 0] & 0x7) | ( (data[offset_data + 1] & 0x7) << 3 ) | ((data[offset_data + 2] & 0x3) << 6); | |||
bytes[offset_byte + 1] = ((data[offset_data + 2] >> 2 ) & 0x01) | ( (data[offset_data + 3] & 0x7) << 1 ) | ( (data[offset_data + 4] & 0x7) << 4 ) | (((data[offset_data + 5]) & 0x01) << 7); | |||
bytes[offset_byte + 2] = ((data[offset_data + 5] >> 1 ) & 0x03) | ( (data[offset_data + 6] & 0x7) << 2 ) | ( (data[offset_data + 7] & 0x7) << 5 ); | |||
} | |||
} | |||
void PQCLEAN_SABER_AVX2_SABER_un_pack3bit(uint16_t *data, const uint8_t *bytes) { | |||
uint32_t j; | |||
uint32_t offset_data = 0, offset_byte = 0; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = 3 * j; | |||
offset_data = 8 * j; | |||
data[offset_data + 0] = (bytes[offset_byte + 0]) & 0x07; | |||
data[offset_data + 1] = ( (bytes[offset_byte + 0]) >> 3 ) & 0x07; | |||
data[offset_data + 2] = ( ( (bytes[offset_byte + 0]) >> 6 ) & 0x03) | ( ( (bytes[offset_byte + 1]) & 0x01) << 2 ); | |||
data[offset_data + 3] = ( (bytes[offset_byte + 1]) >> 1 ) & 0x07; | |||
data[offset_data + 4] = ( (bytes[offset_byte + 1]) >> 4 ) & 0x07; | |||
data[offset_data + 5] = ( ( (bytes[offset_byte + 1]) >> 7 ) & 0x01) | ( ( (bytes[offset_byte + 2]) & 0x03) << 1 ); | |||
data[offset_data + 6] = ( (bytes[offset_byte + 2] >> 2) & 0x07 ); | |||
data[offset_data + 7] = ( (bytes[offset_byte + 2] >> 5) & 0x07 ); | |||
} | |||
} | |||
void PQCLEAN_SABER_AVX2_SABER_pack_4bit(uint8_t *bytes, const uint16_t *data) { | |||
uint32_t j; | |||
uint32_t offset_data = 0; | |||
for (j = 0; j < SABER_N / 2; j++) { | |||
offset_data = 2 * j; | |||
bytes[j] = (data[offset_data] & 0x0f) | ( (data[offset_data + 1] & 0x0f) << 4 ); | |||
} | |||
} | |||
void PQCLEAN_SABER_AVX2_SABER_un_pack4bit(uint16_t *data, const uint8_t *bytes) { | |||
uint32_t j; | |||
uint32_t offset_data = 0; | |||
for (j = 0; j < SABER_N / 2; j++) { | |||
offset_data = 2 * j; | |||
data[offset_data] = bytes[j] & 0x0f; | |||
data[offset_data + 1] = (bytes[j] >> 4) & 0x0f; | |||
} | |||
} | |||
void PQCLEAN_SABER_AVX2_SABER_pack_6bit(uint8_t *bytes, const uint16_t *data) { | |||
uint32_t j; | |||
uint32_t offset_data = 0, offset_byte = 0; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = 3 * j; | |||
offset_data = 4 * j; | |||
bytes[offset_byte + 0] = (data[offset_data + 0] & 0x3f) | ((data[offset_data + 1] & 0x03) << 6); | |||
bytes[offset_byte + 1] = ((data[offset_data + 1] >> 2) & 0x0f) | ((data[offset_data + 2] & 0x0f) << 4); | |||
bytes[offset_byte + 2] = ((data[offset_data + 2] >> 4) & 0x03) | ((data[offset_data + 3] & 0x3f) << 2); | |||
} | |||
} | |||
void PQCLEAN_SABER_AVX2_SABER_un_pack6bit(uint16_t *data, const uint8_t *bytes) { | |||
uint32_t j; | |||
uint32_t offset_data = 0, offset_byte = 0; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = 3 * j; | |||
offset_data = 4 * j; | |||
data[offset_data + 0] = bytes[offset_byte + 0] & 0x3f; | |||
data[offset_data + 1] = ((bytes[offset_byte + 0] >> 6) & 0x03) | ((bytes[offset_byte + 1] & 0x0f) << 2) ; | |||
data[offset_data + 2] = ((bytes[offset_byte + 1] & 0xff) >> 4) | ((bytes[offset_byte + 2] & 0x03) << 4) ; | |||
data[offset_data + 3] = ((bytes[offset_byte + 2] & 0xff) >> 2); | |||
} | |||
} | |||
void PQCLEAN_SABER_AVX2_SABER_pack10bit(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 10) / 8; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = offset_byte1 + 5 * j; | |||
offset_data = 4 * j; | |||
bytes[offset_byte + 0] = ( data[i][ offset_data + 0 ] & (0xff)); | |||
bytes[offset_byte + 1] = ( (data[i][ offset_data + 0 ] >> 8) & 0x03 ) | ((data[i][ offset_data + 1 ] & 0x3f) << 2); | |||
bytes[offset_byte + 2] = ( (data[i][ offset_data + 1 ] >> 6) & 0x0f ) | ( (data[i][ offset_data + 2 ] & 0x0f) << 4); | |||
bytes[offset_byte + 3] = ( (data[i][ offset_data + 2 ] >> 4) & 0x3f ) | ((data[i][ offset_data + 3 ] & 0x03) << 6); | |||
bytes[offset_byte + 4] = ( (data[i][ offset_data + 3 ] >> 2) & 0xff ); | |||
} | |||
} | |||
} | |||
void PQCLEAN_SABER_AVX2_POLVECp2BS(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 10) / 8; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = offset_byte1 + 5 * j; | |||
offset_data = 4 * j; | |||
bytes[offset_byte + 0] = ( data[i][ offset_data + 0 ] & (0xff)); | |||
bytes[offset_byte + 1] = ( (data[i][ offset_data + 0 ] >> 8) & 0x03 ) | ((data[i][ offset_data + 1 ] & 0x3f) << 2); | |||
bytes[offset_byte + 2] = ( (data[i][ offset_data + 1 ] >> 6) & 0x0f ) | ( (data[i][ offset_data + 2 ] & 0x0f) << 4); | |||
bytes[offset_byte + 3] = ( (data[i][ offset_data + 2 ] >> 4) & 0x3f ) | ((data[i][ offset_data + 3 ] & 0x03) << 6); | |||
bytes[offset_byte + 4] = ( (data[i][ offset_data + 3 ] >> 2) & 0xff ); | |||
} | |||
} | |||
} | |||
void PQCLEAN_SABER_AVX2_POLVECq2BS(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 13) / 8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = offset_byte1 + 13 * j; | |||
offset_data = 8 * j; | |||
bytes[offset_byte + 0] = ( data[i][ offset_data + 0 ] & (0xff)); | |||
bytes[offset_byte + 1] = ( (data[i][ offset_data + 0 ] >> 8) & 0x1f ) | ((data[i][ offset_data + 1 ] & 0x07) << 5); | |||
bytes[offset_byte + 2] = ( (data[i][ offset_data + 1 ] >> 3) & 0xff ); | |||
bytes[offset_byte + 3] = ( (data[i][ offset_data + 1 ] >> 11) & 0x03 ) | ((data[i][ offset_data + 2 ] & 0x3f) << 2); | |||
bytes[offset_byte + 4] = ( (data[i][ offset_data + 2 ] >> 6) & 0x7f ) | ( (data[i][ offset_data + 3 ] & 0x01) << 7 ); | |||
bytes[offset_byte + 5] = ( (data[i][ offset_data + 3 ] >> 1) & 0xff ); | |||
bytes[offset_byte + 6] = ( (data[i][ offset_data + 3 ] >> 9) & 0x0f ) | ( (data[i][ offset_data + 4 ] & 0x0f) << 4 ); | |||
bytes[offset_byte + 7] = ( (data[i][ offset_data + 4] >> 4) & 0xff ); | |||
bytes[offset_byte + 8] = ( (data[i][ offset_data + 4 ] >> 12) & 0x01 ) | ( (data[i][ offset_data + 5 ] & 0x7f) << 1 ); | |||
bytes[offset_byte + 9] = ( (data[i][ offset_data + 5 ] >> 7) & 0x3f ) | ( (data[i][ offset_data + 6 ] & 0x03) << 6 ); | |||
bytes[offset_byte + 10] = ( (data[i][ offset_data + 6 ] >> 2) & 0xff ); | |||
bytes[offset_byte + 11] = ( (data[i][ offset_data + 6 ] >> 10) & 0x07 ) | ( (data[i][ offset_data + 7 ] & 0x1f) << 3 ); | |||
bytes[offset_byte + 12] = ( (data[i][ offset_data + 7 ] >> 5) & 0xff ); | |||
} | |||
} | |||
} | |||
void PQCLEAN_SABER_AVX2_BS2POLq(uint16_t data[SABER_N], const uint8_t *bytes) { | |||
uint32_t j; | |||
uint32_t offset_data = 0, offset_byte = 0; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = 13 * j; | |||
offset_data = 8 * j; | |||
data[offset_data + 0] = ( bytes[ offset_byte + 0 ] & (0xff)) | ((bytes[offset_byte + 1] & 0x1f) << 8); | |||
data[offset_data + 1] = ( bytes[ offset_byte + 1 ] >> 5 & (0x07)) | ((bytes[offset_byte + 2] & 0xff) << 3) | ((bytes[offset_byte + 3] & 0x03) << 11); | |||
data[offset_data + 2] = ( bytes[ offset_byte + 3 ] >> 2 & (0x3f)) | ((bytes[offset_byte + 4] & 0x7f) << 6); | |||
data[offset_data + 3] = ( bytes[ offset_byte + 4 ] >> 7 & (0x01)) | ((bytes[offset_byte + 5] & 0xff) << 1) | ((bytes[offset_byte + 6] & 0x0f) << 9); | |||
data[offset_data + 4] = ( bytes[ offset_byte + 6 ] >> 4 & (0x0f)) | ((bytes[offset_byte + 7] & 0xff) << 4) | ((bytes[offset_byte + 8] & 0x01) << 12); | |||
data[offset_data + 5] = ( bytes[ offset_byte + 8] >> 1 & (0x7f)) | ((bytes[offset_byte + 9] & 0x3f) << 7); | |||
data[offset_data + 6] = ( bytes[ offset_byte + 9] >> 6 & (0x03)) | ((bytes[offset_byte + 10] & 0xff) << 2) | ((bytes[offset_byte + 11] & 0x07) << 10); | |||
data[offset_data + 7] = ( bytes[ offset_byte + 11] >> 3 & (0x1f)) | ((bytes[offset_byte + 12] & 0xff) << 5); | |||
} | |||
} | |||
void PQCLEAN_SABER_AVX2_BS2POLVECp(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 10) / 8; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = offset_byte1 + 5 * j; | |||
offset_data = 4 * j; | |||
data[i][offset_data + 0] = ( bytes[ offset_byte + 0 ] & (0xff)) | ((bytes[ offset_byte + 1 ] & 0x03) << 8); | |||
data[i][offset_data + 1] = ( (bytes[ offset_byte + 1 ] >> 2) & (0x3f)) | ((bytes[ offset_byte + 2 ] & 0x0f) << 6); | |||
data[i][offset_data + 2] = ( (bytes[ offset_byte + 2 ] >> 4) & (0x0f)) | ((bytes[ offset_byte + 3 ] & 0x3f) << 4); | |||
data[i][offset_data + 3] = ( (bytes[ offset_byte + 3 ] >> 6) & (0x03)) | ((bytes[ offset_byte + 4 ] & 0xff) << 2); | |||
} | |||
} | |||
} | |||
void PQCLEAN_SABER_AVX2_BS2POLVECq(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 13) / 8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = offset_byte1 + 13 * j; | |||
offset_data = 8 * j; | |||
data[i][offset_data + 0] = ( bytes[ offset_byte + 0 ] & (0xff)) | ((bytes[offset_byte + 1] & 0x1f) << 8); | |||
data[i][offset_data + 1] = ( bytes[ offset_byte + 1 ] >> 5 & (0x07)) | ((bytes[offset_byte + 2] & 0xff) << 3) | ((bytes[offset_byte + 3] & 0x03) << 11); | |||
data[i][offset_data + 2] = ( bytes[ offset_byte + 3 ] >> 2 & (0x3f)) | ((bytes[offset_byte + 4] & 0x7f) << 6); | |||
data[i][offset_data + 3] = ( bytes[ offset_byte + 4 ] >> 7 & (0x01)) | ((bytes[offset_byte + 5] & 0xff) << 1) | ((bytes[offset_byte + 6] & 0x0f) << 9); | |||
data[i][offset_data + 4] = ( bytes[ offset_byte + 6 ] >> 4 & (0x0f)) | ((bytes[offset_byte + 7] & 0xff) << 4) | ((bytes[offset_byte + 8] & 0x01) << 12); | |||
data[i][offset_data + 5] = ( bytes[ offset_byte + 8] >> 1 & (0x7f)) | ((bytes[offset_byte + 9] & 0x3f) << 7); | |||
data[i][offset_data + 6] = ( bytes[ offset_byte + 9] >> 6 & (0x03)) | ((bytes[offset_byte + 10] & 0xff) << 2) | ((bytes[offset_byte + 11] & 0x07) << 10); | |||
data[i][offset_data + 7] = ( bytes[ offset_byte + 11] >> 3 & (0x1f)) | ((bytes[offset_byte + 12] & 0xff) << 5); | |||
} | |||
} | |||
} | |||
void PQCLEAN_SABER_AVX2_SABER_un_pack10bit(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 10) / 8; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = offset_byte1 + 5 * j; | |||
offset_data = 4 * j; | |||
data[i][offset_data + 0] = ( bytes[ offset_byte + 0 ] & (0xff)) | ((bytes[ offset_byte + 1 ] & 0x03) << 8); | |||
data[i][offset_data + 1] = ( (bytes[ offset_byte + 1 ] >> 2) & (0x3f)) | ((bytes[ offset_byte + 2 ] & 0x0f) << 6); | |||
data[i][offset_data + 2] = ( (bytes[ offset_byte + 2 ] >> 4) & (0x0f)) | ((bytes[ offset_byte + 3 ] & 0x3f) << 4); | |||
data[i][offset_data + 3] = ( (bytes[ offset_byte + 3 ] >> 6) & (0x03)) | ((bytes[ offset_byte + 4 ] & 0xff) << 2); | |||
} | |||
} | |||
} | |||
void PQCLEAN_SABER_AVX2_SABER_pack13bit(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 13) / 8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = offset_byte1 + 13 * j; | |||
offset_data = 8 * j; | |||
bytes[offset_byte + 0] = ( data[i][ offset_data + 0 ] & (0xff)); | |||
bytes[offset_byte + 1] = ( (data[i][ offset_data + 0 ] >> 8) & 0x1f ) | ((data[i][ offset_data + 1 ] & 0x07) << 5); | |||
bytes[offset_byte + 2] = ( (data[i][ offset_data + 1 ] >> 3) & 0xff ); | |||
bytes[offset_byte + 3] = ( (data[i][ offset_data + 1 ] >> 11) & 0x03 ) | ((data[i][ offset_data + 2 ] & 0x3f) << 2); | |||
bytes[offset_byte + 4] = ( (data[i][ offset_data + 2 ] >> 6) & 0x7f ) | ( (data[i][ offset_data + 3 ] & 0x01) << 7 ); | |||
bytes[offset_byte + 5] = ( (data[i][ offset_data + 3 ] >> 1) & 0xff ); | |||
bytes[offset_byte + 6] = ( (data[i][ offset_data + 3 ] >> 9) & 0x0f ) | ( (data[i][ offset_data + 4 ] & 0x0f) << 4 ); | |||
bytes[offset_byte + 7] = ( (data[i][ offset_data + 4] >> 4) & 0xff ); | |||
bytes[offset_byte + 8] = ( (data[i][ offset_data + 4 ] >> 12) & 0x01 ) | ( (data[i][ offset_data + 5 ] & 0x7f) << 1 ); | |||
bytes[offset_byte + 9] = ( (data[i][ offset_data + 5 ] >> 7) & 0x3f ) | ( (data[i][ offset_data + 6 ] & 0x03) << 6 ); | |||
bytes[offset_byte + 10] = ( (data[i][ offset_data + 6 ] >> 2) & 0xff ); | |||
bytes[offset_byte + 11] = ( (data[i][ offset_data + 6 ] >> 10) & 0x07 ) | ( (data[i][ offset_data + 7 ] & 0x1f) << 3 ); | |||
bytes[offset_byte + 12] = ( (data[i][ offset_data + 7 ] >> 5) & 0xff ); | |||
} | |||
} | |||
} | |||
void PQCLEAN_SABER_AVX2_SABER_un_pack13bit(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 13) / 8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = offset_byte1 + 13 * j; | |||
offset_data = 8 * j; | |||
data[i][offset_data + 0] = ( bytes[ offset_byte + 0 ] & (0xff)) | ((bytes[offset_byte + 1] & 0x1f) << 8); | |||
data[i][offset_data + 1] = ( bytes[ offset_byte + 1 ] >> 5 & (0x07)) | ((bytes[offset_byte + 2] & 0xff) << 3) | ((bytes[offset_byte + 3] & 0x03) << 11); | |||
data[i][offset_data + 2] = ( bytes[ offset_byte + 3 ] >> 2 & (0x3f)) | ((bytes[offset_byte + 4] & 0x7f) << 6); | |||
data[i][offset_data + 3] = ( bytes[ offset_byte + 4 ] >> 7 & (0x01)) | ((bytes[offset_byte + 5] & 0xff) << 1) | ((bytes[offset_byte + 6] & 0x0f) << 9); | |||
data[i][offset_data + 4] = ( bytes[ offset_byte + 6 ] >> 4 & (0x0f)) | ((bytes[offset_byte + 7] & 0xff) << 4) | ((bytes[offset_byte + 8] & 0x01) << 12); | |||
data[i][offset_data + 5] = ( bytes[ offset_byte + 8] >> 1 & (0x7f)) | ((bytes[offset_byte + 9] & 0x3f) << 7); | |||
data[i][offset_data + 6] = ( bytes[ offset_byte + 9] >> 6 & (0x03)) | ((bytes[offset_byte + 10] & 0xff) << 2) | ((bytes[offset_byte + 11] & 0x07) << 10); | |||
data[i][offset_data + 7] = ( bytes[ offset_byte + 11] >> 3 & (0x1f)) | ((bytes[offset_byte + 12] & 0xff) << 5); | |||
} | |||
} | |||
} | |||
void PQCLEAN_SABER_AVX2_SABER_poly_un_pack13bit(uint16_t data[SABER_N], const uint8_t *bytes) { | |||
uint32_t j; | |||
uint32_t offset_data = 0, offset_byte = 0; | |||
//for(i=0;i<SABER_K;i++){ | |||
//i=0; | |||
//offset_byte1=i*(SABER_N*13)/8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
//offset_byte=offset_byte1+13*j; | |||
offset_byte = 13 * j; | |||
offset_data = 8 * j; | |||
data[offset_data + 0] = ( bytes[ offset_byte + 0 ] & (0xff)) | ((bytes[offset_byte + 1] & 0x1f) << 8); | |||
data[offset_data + 1] = ( bytes[ offset_byte + 1 ] >> 5 & (0x07)) | ((bytes[offset_byte + 2] & 0xff) << 3) | ((bytes[offset_byte + 3] & 0x03) << 11); | |||
data[offset_data + 2] = ( bytes[ offset_byte + 3 ] >> 2 & (0x3f)) | ((bytes[offset_byte + 4] & 0x7f) << 6); | |||
data[offset_data + 3] = ( bytes[ offset_byte + 4 ] >> 7 & (0x01)) | ((bytes[offset_byte + 5] & 0xff) << 1) | ((bytes[offset_byte + 6] & 0x0f) << 9); | |||
data[offset_data + 4] = ( bytes[ offset_byte + 6 ] >> 4 & (0x0f)) | ((bytes[offset_byte + 7] & 0xff) << 4) | ((bytes[offset_byte + 8] & 0x01) << 12); | |||
data[offset_data + 5] = ( bytes[ offset_byte + 8] >> 1 & (0x7f)) | ((bytes[offset_byte + 9] & 0x3f) << 7); | |||
data[offset_data + 6] = ( bytes[ offset_byte + 9] >> 6 & (0x03)) | ((bytes[offset_byte + 10] & 0xff) << 2) | ((bytes[offset_byte + 11] & 0x07) << 10); | |||
data[offset_data + 7] = ( bytes[ offset_byte + 11] >> 3 & (0x1f)) | ((bytes[offset_byte + 12] & 0xff) << 5); | |||
} | |||
//} | |||
} | |||
void PQCLEAN_SABER_AVX2_SABER_pack11bit(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]) { | |||
/*This function packs 11 bit data stream into 8 bits of data. | |||
*/ | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 11) / 8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = offset_byte1 + 11 * j; | |||
offset_data = 8 * j; | |||
bytes[offset_byte + 0] = ( data[i][ offset_data + 0 ] & (0xff)); | |||
bytes[offset_byte + 1] = ( (data[i][ offset_data + 0 ] >> 8) & 0x07 ) | ((data[i][ offset_data + 1 ] & 0x1f) << 3); | |||
bytes[offset_byte + 2] = ( (data[i][ offset_data + 1 ] >> 5) & 0x3f ) | ((data[i][ offset_data + 2 ] & 0x03) << 6); | |||
bytes[offset_byte + 3] = ( (data[i][ offset_data + 2 ] >> 2) & 0xff ); | |||
bytes[offset_byte + 4] = ( (data[i][ offset_data + 2 ] >> 10) & 0x01 ) | ((data[i][ offset_data + 3 ] & 0x7f) << 1); | |||
bytes[offset_byte + 5] = ( (data[i][ offset_data + 3 ] >> 7) & 0x0f ) | ((data[i][ offset_data + 4 ] & 0x0f) << 4); | |||
bytes[offset_byte + 6] = ( (data[i][ offset_data + 4 ] >> 4) & 0x7f ) | ((data[i][ offset_data + 5 ] & 0x01) << 7); | |||
bytes[offset_byte + 7] = ( (data[i][ offset_data + 5 ] >> 1) & 0xff ); | |||
bytes[offset_byte + 8] = ( (data[i][ offset_data + 5 ] >> 9) & 0x03 ) | ((data[i][ offset_data + 6 ] & 0x3f) << 2); | |||
bytes[offset_byte + 9] = ( (data[i][ offset_data + 6 ] >> 6) & 0x1f ) | ((data[i][ offset_data + 7 ] & 0x07) << 5); | |||
bytes[offset_byte + 10] = ( (data[i][ offset_data + 7 ] >> 3) & 0xff ); | |||
} | |||
} | |||
} | |||
void PQCLEAN_SABER_AVX2_SABER_un_pack11bit(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 11) / 8; | |||
for (j = 0; j < SABER_N / 8; j++) { | |||
offset_byte = offset_byte1 + 11 * j; | |||
offset_data = 8 * j; | |||
data[i][offset_data + 0] = (bytes[offset_byte + 0]) | ( (bytes[offset_byte + 1] & 0x07) << 8 ); | |||
data[i][offset_data + 1] = ( (bytes[offset_byte + 1] >> 3) & 0x1f) | ( (bytes[offset_byte + 2] & 0x3f) << 5 ); | |||
data[i][offset_data + 2] = ( (bytes[offset_byte + 2] >> 6) & 0x03) | ( (bytes[offset_byte + 3] & 0xff) << 2 ) | ( (bytes[offset_byte + 4] & 0x01) << 10 ); | |||
data[i][offset_data + 3] = ( (bytes[offset_byte + 4] >> 1) & 0x7f) | ( (bytes[offset_byte + 5] & 0x0f) << 7 ); | |||
data[i][offset_data + 4] = ( (bytes[offset_byte + 5] >> 4) & 0x0f) | ( (bytes[offset_byte + 6] & 0x7f) << 4 ); | |||
data[i][offset_data + 5] = ( (bytes[offset_byte + 6] >> 7) & 0x01) | ( (bytes[offset_byte + 7] & 0xff) << 1 ) | ( (bytes[offset_byte + 8] & 0x03) << 9 ); | |||
data[i][offset_data + 6] = ( (bytes[offset_byte + 8] >> 2) & 0x3f) | ( (bytes[offset_byte + 9] & 0x1f) << 6 ); | |||
data[i][offset_data + 7] = ( (bytes[offset_byte + 9] >> 5) & 0x07) | ( (bytes[offset_byte + 10] & 0xff) << 3 ); | |||
} | |||
} | |||
} | |||
void PQCLEAN_SABER_AVX2_SABER_pack14bit(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 14) / 8; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = offset_byte1 + 7 * j; | |||
offset_data = 4 * j; | |||
bytes[offset_byte + 0] = ( data[i][ offset_data + 0 ] & (0xff)); | |||
bytes[offset_byte + 1] = ( (data[i][ offset_data + 0 ] >> 8) & 0x3f ) | ((data[i][ offset_data + 1 ] & 0x03) << 6); | |||
bytes[offset_byte + 2] = ( (data[i][ offset_data + 1 ] >> 2) & 0xff ); | |||
bytes[offset_byte + 3] = ( (data[i][ offset_data + 1 ] >> 10) & 0x0f ) | ((data[i][ offset_data + 2 ] & 0x0f) << 4); | |||
bytes[offset_byte + 4] = ( (data[i][ offset_data + 2 ] >> 4) & 0xff ); | |||
bytes[offset_byte + 5] = ( (data[i][ offset_data + 2 ] >> 12) & 0x03 ) | ((data[i][ offset_data + 3 ] & 0x3f) << 2); | |||
bytes[offset_byte + 6] = ( (data[i][ offset_data + 3 ] >> 6) & 0xff ); | |||
} | |||
} | |||
} | |||
void PQCLEAN_SABER_AVX2_SABER_un_pack14bit(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes) { | |||
uint32_t i, j; | |||
uint32_t offset_data = 0, offset_byte = 0, offset_byte1 = 0; | |||
for (i = 0; i < SABER_K; i++) { | |||
offset_byte1 = i * (SABER_N * 14) / 8; | |||
for (j = 0; j < SABER_N / 4; j++) { | |||
offset_byte = offset_byte1 + 7 * j; | |||
offset_data = 4 * j; | |||
data[i][offset_data + 0] = (bytes[offset_byte + 0] & 0xff) | ( (bytes[offset_byte + 1] & 0x3f) << 8 ); | |||
data[i][offset_data + 1] = ( (bytes[offset_byte + 1] >> 6) & 0x03) | ((bytes[offset_byte + 2] & 0xff) << 2 ) | ( (bytes[offset_byte + 3] & 0x0f) << 10 ); | |||
data[i][offset_data + 2] = ( (bytes[offset_byte + 3] >> 4) & 0x0f) | ( (bytes[offset_byte + 4] ) << 4 ) | ( (bytes[offset_byte + 5] & 0x03) << 12 ); | |||
data[i][offset_data + 3] = ( (bytes[offset_byte + 5] >> 2) & 0x3f) | ( (bytes[offset_byte + 6] ) << 6 ); | |||
} | |||
} | |||
} | |||
void PQCLEAN_SABER_AVX2_POLVEC2BS(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N], uint16_t modulus) { | |||
if (modulus == 1024) { | |||
PQCLEAN_SABER_AVX2_POLVECp2BS(bytes, data); | |||
} else if (modulus == 8192) { | |||
PQCLEAN_SABER_AVX2_POLVECq2BS(bytes, data); | |||
} | |||
} | |||
void PQCLEAN_SABER_AVX2_BS2POLVEC(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes, uint16_t modulus) { | |||
if (modulus == 1024) { | |||
PQCLEAN_SABER_AVX2_BS2POLVECp(data, bytes); | |||
} else if (modulus == 8192) { | |||
PQCLEAN_SABER_AVX2_BS2POLVECq(data, bytes); | |||
} | |||
} |
@@ -0,0 +1,56 @@ | |||
#ifndef PACK_UNPACK_H | |||
#define PACK_UNPACK_H | |||
#include "SABER_params.h" | |||
#include <stdint.h> | |||
#include <stdio.h> | |||
void PQCLEAN_SABER_AVX2_BS2POLq(uint16_t data[SABER_N], const uint8_t *bytes); | |||
void PQCLEAN_SABER_AVX2_BS2POLVEC(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes, uint16_t modulus); | |||
void PQCLEAN_SABER_AVX2_BS2POLVECq(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes); | |||
void PQCLEAN_SABER_AVX2_BS2POLVECp(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes); | |||
void PQCLEAN_SABER_AVX2_POLVEC2BS(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N], uint16_t modulus); | |||
void PQCLEAN_SABER_AVX2_POLVECq2BS(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]); | |||
void PQCLEAN_SABER_AVX2_POLVECp2BS(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]); | |||
void PQCLEAN_SABER_AVX2_SABER_pack_3bit(uint8_t *bytes, const uint16_t *data); | |||
void PQCLEAN_SABER_AVX2_SABER_pack_4bit(uint8_t *bytes, const uint16_t *data); | |||
void PQCLEAN_SABER_AVX2_SABER_pack_6bit(uint8_t *bytes, const uint16_t *data); | |||
void PQCLEAN_SABER_AVX2_SABER_pack10bit(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]); | |||
void PQCLEAN_SABER_AVX2_SABER_pack11bit(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]); | |||
void PQCLEAN_SABER_AVX2_SABER_pack13bit(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]); | |||
void PQCLEAN_SABER_AVX2_SABER_pack14bit(uint8_t *bytes, const uint16_t data[SABER_K][SABER_N]); | |||
void PQCLEAN_SABER_AVX2_SABER_poly_un_pack13bit(uint16_t data[SABER_N], const uint8_t *bytes); | |||
void PQCLEAN_SABER_AVX2_SABER_un_pack3bit(uint16_t *data, const uint8_t *bytes); | |||
void PQCLEAN_SABER_AVX2_SABER_un_pack4bit(uint16_t *data, const uint8_t *bytes); | |||
void PQCLEAN_SABER_AVX2_SABER_un_pack6bit(uint16_t *data, const uint8_t *bytes); | |||
void PQCLEAN_SABER_AVX2_SABER_un_pack10bit(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes); | |||
void PQCLEAN_SABER_AVX2_SABER_un_pack11bit(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes); | |||
void PQCLEAN_SABER_AVX2_SABER_un_pack13bit(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes); | |||
void PQCLEAN_SABER_AVX2_SABER_un_pack14bit(uint16_t data[SABER_K][SABER_N], const uint8_t *bytes); | |||
#endif |
@@ -0,0 +1,27 @@ | |||
#ifndef POLY_H | |||
#define POLY_H | |||
/*--------------------------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at, Public Domain https://github.com/pq-crystals/kyber) | |||
of "CRYSTALS – Kyber: a CCA-secure module-lattice-based KEM" | |||
by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
----------------------------------------------------------------------*/ | |||
#include "SABER_params.h" | |||
#include <stdint.h> | |||
typedef struct { | |||
uint16_t coeffs[SABER_N]; | |||
} poly; | |||
typedef struct { | |||
poly vec[SABER_K]; | |||
} polyvec; | |||
void PQCLEAN_SABER_AVX2_poly_getnoise(uint16_t *r, const unsigned char *seed, unsigned char nonce); | |||
void PQCLEAN_SABER_AVX2_poly_getnoise4x(uint16_t *r0, uint16_t *r1, uint16_t *r2, const unsigned char *seed, unsigned char nonce0, unsigned char nonce1, unsigned char nonce2, unsigned char nonce3); | |||
#endif |
@@ -0,0 +1,20 @@ | |||
#include "../SABER_params.h" | |||
#define AVX_N (SABER_N >> 4) | |||
#define small_len_avx (AVX_N >> 2) | |||
#define SCHB_N 16 | |||
#define N_SB (SABER_N >> 2) | |||
#define N_SB_RES (2*N_SB-1) | |||
#define N_SB_16 (N_SB >> 2) | |||
#define N_SB_16_RES (2*N_SB_16-1) | |||
#define AVX_N1 16 /*N/16*/ | |||
#define SCM_SIZE 16 | |||
// The dimension of a vector. i.e vector has NUM_POLY elements and Matrix has NUM_POLY X NUM_POLY elements | |||
#define NUM_POLY SABER_K | |||
//int NUM_POLY=2; |
@@ -0,0 +1,303 @@ | |||
#include <immintrin.h> | |||
static void transpose_n1(__m256i *M) | |||
{ | |||
//int i; | |||
register __m256i r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; | |||
register __m256i temp, temp0, temp1, temp2; | |||
//for(i=0; i<8; i=i+1) | |||
//{ | |||
r0 = _mm256_unpacklo_epi16(M[0], M[1]); | |||
r1 = _mm256_unpacklo_epi16(M[2], M[3]); | |||
r2 = _mm256_unpacklo_epi16(M[4], M[5]); | |||
r3 = _mm256_unpacklo_epi16(M[6], M[7]); | |||
r4 = _mm256_unpacklo_epi16(M[8], M[9]); | |||
r5 = _mm256_unpacklo_epi16(M[10], M[11]); | |||
r6 = _mm256_unpacklo_epi16(M[12], M[13]); | |||
r7 = _mm256_unpacklo_epi16(M[14], M[15]); | |||
temp = _mm256_unpacklo_epi32(r0, r1); | |||
temp0 = _mm256_unpacklo_epi32(r2, r3); | |||
temp1 = _mm256_unpacklo_epi32(r4, r5); | |||
temp2 = _mm256_unpacklo_epi32(r6, r7); | |||
r8 = _mm256_unpackhi_epi32(r0, r1); | |||
r9 = _mm256_unpackhi_epi32(r2, r3); | |||
r10 = _mm256_unpackhi_epi32(r4, r5); | |||
r11 = _mm256_unpackhi_epi32(r6, r7); | |||
r0 = _mm256_unpacklo_epi64(temp, temp0); | |||
r2 = _mm256_unpackhi_epi64(temp, temp0); | |||
r1 = _mm256_unpacklo_epi64(temp1, temp2); | |||
r3 = _mm256_unpackhi_epi64(temp1, temp2); | |||
temp = _mm256_unpackhi_epi16(M[0], M[1]); | |||
temp0 = _mm256_unpackhi_epi16(M[2], M[3]); | |||
temp1 = _mm256_unpackhi_epi16(M[4], M[5]); | |||
temp2 = _mm256_unpackhi_epi16(M[6], M[7]); | |||
r4 = _mm256_unpackhi_epi16(M[8], M[9]); | |||
M[0] = _mm256_permute2f128_si256(r0, r1, 0x20); | |||
M[8] = _mm256_permute2f128_si256(r0, r1, 0x31); | |||
M[1] = _mm256_permute2f128_si256(r2, r3, 0x20); | |||
M[9] = _mm256_permute2f128_si256(r2, r3, 0x31); | |||
r5 = _mm256_unpackhi_epi16(M[10], M[11]); | |||
r6 = _mm256_unpackhi_epi16(M[12], M[13]); | |||
r7 = _mm256_unpackhi_epi16(M[14], M[15]); | |||
r0 = _mm256_unpacklo_epi64(r8, r9); | |||
r1 = _mm256_unpacklo_epi64(r10, r11); | |||
r2 = _mm256_unpackhi_epi64(r8, r9); | |||
r3 = _mm256_unpackhi_epi64(r10, r11); | |||
M[3] = _mm256_permute2f128_si256(r2, r3, 0x20); | |||
M[11] = _mm256_permute2f128_si256(r2, r3, 0x31); | |||
M[2] = _mm256_permute2f128_si256(r0, r1, 0x20); | |||
M[10] = _mm256_permute2f128_si256(r0, r1, 0x31); | |||
//for(i=0; i<4; i=i+1) | |||
//{ | |||
r0 = _mm256_unpacklo_epi32(temp, temp0); | |||
r1 = _mm256_unpacklo_epi32(temp1, temp2); | |||
r2 = _mm256_unpacklo_epi32(r4, r5); | |||
r3 = _mm256_unpacklo_epi32(r6, r7); | |||
//} | |||
//for(i=0; i<2; i=i+1) | |||
//{ | |||
r8 = _mm256_unpacklo_epi64(r0, r1); | |||
r10 = _mm256_unpackhi_epi64(r0, r1); | |||
r9 = _mm256_unpacklo_epi64(r2, r3); | |||
r11 = _mm256_unpackhi_epi64(r2, r3); | |||
M[4] = _mm256_permute2f128_si256(r8, r9, 0x20); | |||
M[12] = _mm256_permute2f128_si256(r8, r9, 0x31); | |||
M[5] = _mm256_permute2f128_si256(r10, r11, 0x20); | |||
M[13] = _mm256_permute2f128_si256(r10, r11, 0x31); | |||
r0 = _mm256_unpackhi_epi32(temp, temp0); | |||
r1 = _mm256_unpackhi_epi32(temp1, temp2); | |||
r2 = _mm256_unpackhi_epi32(r4, r5); | |||
r3 = _mm256_unpackhi_epi32(r6, r7); | |||
//} | |||
// for(i=0; i<2; i=i+1) | |||
// { | |||
r4 = _mm256_unpacklo_epi64(r0, r1); | |||
r6 = _mm256_unpackhi_epi64(r0, r1); | |||
r5 = _mm256_unpacklo_epi64(r2, r3); | |||
r7 = _mm256_unpackhi_epi64(r2, r3); | |||
// } | |||
//------------------------------------------------------- | |||
M[6] = _mm256_permute2f128_si256(r4, r5, 0x20); | |||
M[14] = _mm256_permute2f128_si256(r4, r5, 0x31); | |||
M[7] = _mm256_permute2f128_si256(r6, r7, 0x20); | |||
M[15] = _mm256_permute2f128_si256(r6, r7, 0x31); | |||
} | |||
/* | |||
void transpose_unrolled(__m256i *M) | |||
{ | |||
int i; | |||
__m256i tL[8], tH[8]; | |||
__m256i bL[4], bH[4], cL[4], cH[4]; | |||
__m256i dL[2], dH[2], eL[2], eH[2], fL[2], fH[2], gL[2], gH[2]; | |||
__m256i r0, r1, r2, r3, r4, r5, r6, r7; | |||
//for(i=0; i<8; i=i+1) | |||
//{ | |||
tL[0] = _mm256_unpacklo_epi16(M[0], M[1]); | |||
tH[0] = _mm256_unpackhi_epi16(M[0], M[1]); | |||
tL[1] = _mm256_unpacklo_epi16(M[2], M[3]); | |||
tH[1] = _mm256_unpackhi_epi16(M[2], M[3]); | |||
tL[2] = _mm256_unpacklo_epi16(M[4], M[5]); | |||
tH[2] = _mm256_unpackhi_epi16(M[4], M[5]); | |||
tL[3] = _mm256_unpacklo_epi16(M[6], M[7]); | |||
tH[3] = _mm256_unpackhi_epi16(M[6], M[7]); | |||
tL[4] = _mm256_unpacklo_epi16(M[8], M[9]); | |||
tH[4] = _mm256_unpackhi_epi16(M[8], M[9]); | |||
tL[5] = _mm256_unpacklo_epi16(M[10], M[11]); | |||
tH[5] = _mm256_unpackhi_epi16(M[10], M[11]); | |||
tL[6] = _mm256_unpacklo_epi16(M[12], M[13]); | |||
tH[6] = _mm256_unpackhi_epi16(M[12], M[13]); | |||
tL[7] = _mm256_unpacklo_epi16(M[14], M[15]); | |||
tH[7] = _mm256_unpackhi_epi16(M[14], M[15]); | |||
//} | |||
//------------------------------------------------------- | |||
//for(i=0; i<4; i=i+1) | |||
//{ | |||
bL[0] = _mm256_unpacklo_epi32(tL[0], tL[1]); | |||
bH[0] = _mm256_unpackhi_epi32(tL[0], tL[1]); | |||
bL[1] = _mm256_unpacklo_epi32(tL[2], tL[3]); | |||
bH[1] = _mm256_unpackhi_epi32(tL[2], tL[3]); | |||
bL[2] = _mm256_unpacklo_epi32(tL[4], tL[5]); | |||
bH[2] = _mm256_unpackhi_epi32(tL[4], tL[5]); | |||
bL[3] = _mm256_unpacklo_epi32(tL[6], tL[7]); | |||
bH[3] = _mm256_unpackhi_epi32(tL[6], tL[7]); | |||
//} | |||
//for(i=0; i<2; i=i+1) | |||
//{ | |||
dL[0] = _mm256_unpacklo_epi64(bL[0], bL[1]); | |||
dH[0] = _mm256_unpackhi_epi64(bL[0], bL[1]); | |||
dL[1] = _mm256_unpacklo_epi64(bL[2], bL[3]); | |||
dH[1] = _mm256_unpackhi_epi64(bL[2], bL[3]); | |||
M[0] = _mm256_permute2f128_si256(dL[0], dL[1], 0x20); | |||
M[8] = _mm256_permute2f128_si256(dL[0], dL[1], 0x31); | |||
M[1] = _mm256_permute2f128_si256(dH[0], dH[1], 0x20); | |||
M[9] = _mm256_permute2f128_si256(dH[0], dH[1], 0x31); | |||
//} | |||
//for(i=0; i<2; i=i+1) | |||
//{ | |||
eL[0] = _mm256_unpacklo_epi64(bH[0], bH[1]); | |||
eH[0] = _mm256_unpackhi_epi64(bH[0], bH[1]); | |||
eL[1] = _mm256_unpacklo_epi64(bH[2], bH[3]); | |||
eH[1] = _mm256_unpackhi_epi64(bH[2], bH[3]); | |||
//} | |||
//------------------------------------------------------- | |||
//------------------------------------------------------- | |||
for(i=0; i<4; i=i+1) | |||
{ | |||
cL[i] = _mm256_unpacklo_epi32(tH[2*i], tH[2*i+1]); | |||
cH[i] = _mm256_unpackhi_epi32(tH[2*i], tH[2*i+1]); | |||
} | |||
for(i=0; i<2; i=i+1) | |||
{ | |||
fL[i] = _mm256_unpacklo_epi64(cL[2*i], cL[2*i+1]); | |||
fH[i] = _mm256_unpackhi_epi64(cL[2*i], cL[2*i+1]); | |||
} | |||
for(i=0; i<2; i=i+1) | |||
{ | |||
gL[i] = _mm256_unpacklo_epi64(cH[2*i], cH[2*i+1]); | |||
gH[i] = _mm256_unpackhi_epi64(cH[2*i], cH[2*i+1]); | |||
} | |||
//------------------------------------------------------- | |||
M[2] = _mm256_permute2f128_si256(eL[0], eL[1], 0x20); | |||
M[10] = _mm256_permute2f128_si256(eL[0], eL[1], 0x31); | |||
M[3] = _mm256_permute2f128_si256(eH[0], eH[1], 0x20); | |||
M[11] = _mm256_permute2f128_si256(eH[0], eH[1], 0x31); | |||
M[4] = _mm256_permute2f128_si256(fL[0], fL[1], 0x20); | |||
M[12] = _mm256_permute2f128_si256(fL[0], fL[1], 0x31); | |||
M[5] = _mm256_permute2f128_si256(fH[0], fH[1], 0x20); | |||
M[13] = _mm256_permute2f128_si256(fH[0], fH[1], 0x31); | |||
M[6] = _mm256_permute2f128_si256(gL[0], gL[1], 0x20); | |||
M[14] = _mm256_permute2f128_si256(gL[0], gL[1], 0x31); | |||
M[7] = _mm256_permute2f128_si256(gH[0], gH[1], 0x20); | |||
M[15] = _mm256_permute2f128_si256(gH[0], gH[1], 0x31); | |||
} | |||
void transpose1(__m256i *M) | |||
{ | |||
int i; | |||
__m256i tL[8], tH[8]; | |||
__m256i bL[4], bH[4], cL[4], cH[4]; | |||
__m256i dL[2], dH[2], eL[2], eH[2], fL[2], fH[2], gL[2], gH[2]; | |||
for(i=0; i<8; i=i+1) | |||
{ | |||
tL[i] = _mm256_unpacklo_epi16(M[2*i], M[2*i+1]); | |||
tH[i] = _mm256_unpackhi_epi16(M[2*i], M[2*i+1]); | |||
} | |||
for(i=0; i<4; i=i+1) | |||
{ | |||
bL[i] = _mm256_unpacklo_epi32(tL[2*i], tL[2*i+1]); | |||
bH[i] = _mm256_unpackhi_epi32(tL[2*i], tL[2*i+1]); | |||
} | |||
for(i=0; i<4; i=i+1) | |||
{ | |||
cL[i] = _mm256_unpacklo_epi32(tH[2*i], tH[2*i+1]); | |||
cH[i] = _mm256_unpackhi_epi32(tH[2*i], tH[2*i+1]); | |||
} | |||
for(i=0; i<2; i=i+1) | |||
{ | |||
dL[i] = _mm256_unpacklo_epi64(bL[2*i], bL[2*i+1]); | |||
dH[i] = _mm256_unpackhi_epi64(bL[2*i], bL[2*i+1]); | |||
} | |||
for(i=0; i<2; i=i+1) | |||
{ | |||
eL[i] = _mm256_unpacklo_epi64(bH[2*i], bH[2*i+1]); | |||
eH[i] = _mm256_unpackhi_epi64(bH[2*i], bH[2*i+1]); | |||
} | |||
for(i=0; i<2; i=i+1) | |||
{ | |||
fL[i] = _mm256_unpacklo_epi64(cL[2*i], cL[2*i+1]); | |||
fH[i] = _mm256_unpackhi_epi64(cL[2*i], cL[2*i+1]); | |||
} | |||
for(i=0; i<2; i=i+1) | |||
{ | |||
gL[i] = _mm256_unpacklo_epi64(cH[2*i], cH[2*i+1]); | |||
gH[i] = _mm256_unpackhi_epi64(cH[2*i], cH[2*i+1]); | |||
} | |||
M[0] = _mm256_permute2f128_si256(dL[0], dL[1], 0x20); | |||
M[8] = _mm256_permute2f128_si256(dL[0], dL[1], 0x31); | |||
M[1] = _mm256_permute2f128_si256(dH[0], dH[1], 0x20); | |||
M[9] = _mm256_permute2f128_si256(dH[0], dH[1], 0x31); | |||
M[2] = _mm256_permute2f128_si256(eL[0], eL[1], 0x20); | |||
M[10] = _mm256_permute2f128_si256(eL[0], eL[1], 0x31); | |||
M[3] = _mm256_permute2f128_si256(eH[0], eH[1], 0x20); | |||
M[11] = _mm256_permute2f128_si256(eH[0], eH[1], 0x31); | |||
M[4] = _mm256_permute2f128_si256(fL[0], fL[1], 0x20); | |||
M[12] = _mm256_permute2f128_si256(fL[0], fL[1], 0x31); | |||
M[5] = _mm256_permute2f128_si256(fH[0], fH[1], 0x20); | |||
M[13] = _mm256_permute2f128_si256(fH[0], fH[1], 0x31); | |||
M[6] = _mm256_permute2f128_si256(gL[0], gL[1], 0x20); | |||
M[14] = _mm256_permute2f128_si256(gL[0], gL[1], 0x31); | |||
M[7] = _mm256_permute2f128_si256(gH[0], gH[1], 0x20); | |||
M[15] = _mm256_permute2f128_si256(gH[0], gH[1], 0x31); | |||
} | |||
*/ |
@@ -0,0 +1,753 @@ | |||
//#define SCM_SIZE 16 | |||
//#pragma STDC FP_CONTRACT ON | |||
#include <immintrin.h> | |||
inline __m256i mul_add(__m256i a, __m256i b, __m256i c) { | |||
return _mm256_add_epi16(_mm256_mullo_epi16(a, b), c); | |||
} | |||
static void schoolbook_avx_new3_acc(__m256i* a, __m256i* b, __m256i* c_avx) ////8 coefficients of a and b has been prefetched | |||
//the c_avx are added cummulatively | |||
{ | |||
register __m256i a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7; | |||
register __m256i temp; | |||
a0=a[0]; | |||
a1=a[1]; | |||
a2=a[2]; | |||
a3=a[3]; | |||
a4=a[4]; | |||
a5=a[5]; | |||
a6=a[6]; | |||
a7=a[7]; | |||
b0=b[0]; | |||
b1=b[1]; | |||
b2=b[2]; | |||
b3=b[3]; | |||
b4=b[4]; | |||
b5=b[5]; | |||
b6=b[6]; | |||
b7=b[7]; | |||
// New Unrolled first triangle | |||
//otherwise accumulate | |||
c_avx[0] = mul_add(a0, b0, c_avx[0]); | |||
temp = _mm256_mullo_epi16 (a0, b1); | |||
temp=mul_add(a1, b0, temp); | |||
c_avx[1] = _mm256_add_epi16(temp, c_avx[1]); | |||
temp = _mm256_mullo_epi16 (a0, b2); | |||
temp = mul_add(a1, b1, temp); | |||
temp=mul_add(a2, b0, temp); | |||
c_avx[2] = _mm256_add_epi16(temp, c_avx[2]); | |||
temp = _mm256_mullo_epi16 (a0, b3); | |||
temp = mul_add(a1, b2, temp); | |||
temp = mul_add(a2, b1, temp); | |||
temp=mul_add(a3, b0, temp); | |||
c_avx[3] = _mm256_add_epi16(temp, c_avx[3]); | |||
temp = _mm256_mullo_epi16 (a0, b4); | |||
temp = mul_add(a1, b3, temp); | |||
temp = mul_add(a3, b1, temp); | |||
temp = mul_add(a4, b0, temp); | |||
temp=mul_add(a2, b2, temp); | |||
c_avx[4] = _mm256_add_epi16(temp, c_avx[4]); | |||
temp = _mm256_mullo_epi16 (a0, b5); | |||
temp = mul_add(a1, b4 , temp); | |||
temp = mul_add(a2, b3, temp); | |||
temp = mul_add(a3, b2, temp); | |||
temp = mul_add( a4, b1, temp); | |||
temp=mul_add(a5, b0, temp); | |||
c_avx[5] = _mm256_add_epi16(temp, c_avx[5]); | |||
temp = _mm256_mullo_epi16 (a0, b6); | |||
temp = mul_add(a1, b5, temp); | |||
temp = mul_add(a5, b1, temp); | |||
temp = mul_add(a6, b0, temp); | |||
temp = mul_add(a2, b4, temp); | |||
temp = mul_add(a3, b3, temp); | |||
temp=mul_add(a4, b2, temp); | |||
c_avx[6] = _mm256_add_epi16(temp, c_avx[6]); | |||
temp = _mm256_mullo_epi16 (a0, b7); | |||
temp = mul_add(a1, b6, temp); | |||
temp = mul_add (a6, b1, temp); | |||
temp = mul_add (a7, b0, temp); | |||
temp = mul_add(a2, b5, temp); | |||
temp = mul_add (a3, b4, temp); | |||
temp = mul_add (a4, b3, temp); | |||
temp=mul_add(a5, b2, temp); | |||
c_avx[7] = _mm256_add_epi16(temp, c_avx[7]); | |||
temp = _mm256_mullo_epi16 (a0, b[8]); | |||
temp = mul_add (a1, b7, temp); | |||
temp = mul_add (a7, b1, temp); | |||
temp = mul_add (a[8], b0, temp); | |||
temp = mul_add (a2, b6,temp); | |||
temp = mul_add(a3, b5, temp); | |||
temp = mul_add (a4, b4,temp); | |||
temp = mul_add (a5, b3, temp); | |||
temp=mul_add(a6, b2, temp); | |||
c_avx[8] = _mm256_add_epi16(temp, c_avx[8]); | |||
temp = _mm256_mullo_epi16 (a0, b[9]); | |||
temp = mul_add (a1, b[8], temp); | |||
temp = mul_add (a[8], b1, temp); | |||
temp = mul_add (a[9], b0, temp); | |||
temp = mul_add (a2, b7, temp); | |||
temp = mul_add (a3, b6, temp); | |||
temp = mul_add (a4, b5, temp); | |||
temp = mul_add (a5, b4, temp); | |||
temp = mul_add (a6, b3, temp); | |||
temp=mul_add(a7, b2, temp); | |||
c_avx[9] = _mm256_add_epi16(temp, c_avx[9]); | |||
temp= _mm256_mullo_epi16 (a0, b[10]); | |||
temp = mul_add (a1, b[9], temp); | |||
temp = mul_add (a[9], b1, temp); | |||
temp = mul_add (a[10], b0, temp); | |||
temp = mul_add (a2, b[8], temp); | |||
temp = mul_add (a3, b7, temp); | |||
temp = mul_add (a4, b6, temp); | |||
temp = mul_add (a5, b5, temp); | |||
temp = mul_add (a6, b4, temp); | |||
temp = mul_add (a7, b3, temp); | |||
temp=mul_add(a[8], b2, temp); | |||
c_avx[10] = _mm256_add_epi16(temp, c_avx[10]); | |||
temp = _mm256_mullo_epi16 (a0, b[11]); | |||
temp = mul_add (a1, b[10], temp ); | |||
temp = mul_add (a[10], b1, temp ); | |||
temp = mul_add (a[11], b0, temp ); | |||
temp = mul_add (a2, b[9], temp ); | |||
temp = mul_add (a3, b[8], temp ); | |||
temp = mul_add (a4, b7, temp ); | |||
temp = mul_add (a5, b6, temp ); | |||
temp = mul_add (a6, b5, temp ); | |||
temp = mul_add (a7, b4, temp ); | |||
temp = mul_add (a[8], b3, temp ); | |||
temp=mul_add(a[9], b2, temp); | |||
c_avx[11] = _mm256_add_epi16(temp, c_avx[11]); | |||
temp = _mm256_mullo_epi16 (a0, b[12]); | |||
temp = mul_add (a1, b[11], temp); | |||
temp = mul_add (a[11], b1, temp); | |||
temp = mul_add (a[12], b0, temp); | |||
temp = mul_add (a2, b[10], temp); | |||
temp = mul_add (a3, b[9], temp); | |||
temp = mul_add (a4, b[8], temp); | |||
temp = mul_add (a5, b7, temp); | |||
temp = mul_add (a6, b6, temp); | |||
temp = mul_add (a7, b5, temp); | |||
temp = mul_add (a[8], b4, temp); | |||
temp = mul_add (a[9], b3, temp); | |||
temp=mul_add(a[10], b2, temp); | |||
c_avx[12] = _mm256_add_epi16(temp, c_avx[12]); | |||
temp = _mm256_mullo_epi16 (a0, b[13]); | |||
temp = mul_add (a1, b[12], temp ); | |||
temp = mul_add (a[12], b1, temp ); | |||
temp = mul_add (a[13], b0, temp ); | |||
temp = mul_add (a2, b[11], temp ); | |||
temp = mul_add (a3, b[10], temp ); | |||
temp = mul_add (a4, b[9], temp ); | |||
temp = mul_add (a5, b[8], temp ); | |||
temp = mul_add (a6, b7, temp ); | |||
temp = mul_add (a7, b6, temp ); | |||
temp = mul_add (a[8], b5, temp ); | |||
temp = mul_add (a[9], b4, temp ); | |||
temp = mul_add (a[10], b3, temp ); | |||
temp=mul_add(a[11], b2, temp); | |||
c_avx[13] = _mm256_add_epi16(temp, c_avx[13]); | |||
temp = _mm256_mullo_epi16 (a0, b[14]); | |||
temp = mul_add (a1, b[13], temp ); | |||
temp = mul_add (a[13], b1, temp ); | |||
temp = mul_add (a[14], b0, temp ); | |||
temp = mul_add (a2, b[12], temp ); | |||
temp = mul_add (a3, b[11], temp ); | |||
temp = mul_add (a4, b[10], temp ); | |||
temp = mul_add (a5, b[9], temp ); | |||
temp = mul_add (a6, b[8], temp ); | |||
temp = mul_add (a7, b7, temp ); | |||
temp = mul_add (a[8], b6, temp ); | |||
temp = mul_add (a[9], b5, temp ); | |||
temp = mul_add (a[10], b4, temp ); | |||
temp = mul_add (a[11], b3, temp ); | |||
temp=mul_add(a[12], b2, temp); | |||
c_avx[14] = _mm256_add_epi16(temp, c_avx[14]); | |||
temp = _mm256_mullo_epi16 (a0, b[15]); | |||
temp = mul_add (a1, b[14], temp ); | |||
temp = mul_add (a[14], b1, temp ); | |||
temp = mul_add (a[15], b0, temp ); | |||
temp = mul_add (a2, b[13], temp ); | |||
temp = mul_add (a3, b[12], temp ); | |||
temp = mul_add (a4, b[11], temp ); | |||
temp = mul_add (a5, b[10], temp ); | |||
temp = mul_add (a6, b[9], temp ); | |||
temp = mul_add (a7, b[8], temp ); | |||
temp = mul_add (a[8], b7, temp ); | |||
temp = mul_add (a[9], b6, temp ); | |||
temp = mul_add (a[10], b5, temp ); | |||
temp = mul_add (a[11], b4, temp ); | |||
temp = mul_add (a[12], b3, temp ); | |||
temp=mul_add(a[13], b2, temp); | |||
c_avx[15] = _mm256_add_epi16(temp, c_avx[15]); | |||
// unrolled second triangle | |||
a0=a[14]; | |||
a1=a[15]; | |||
a2=a[13]; | |||
a3=a[12]; | |||
a4=a[11]; | |||
a5=a[10]; | |||
a6=a[9]; | |||
a7=a[8]; | |||
b0=b[14]; | |||
b1=b[15]; | |||
b2=b[13]; | |||
b3=b[12]; | |||
b4=b[11]; | |||
b5=b[10]; | |||
b6=b[9]; | |||
b7=b[8]; | |||
temp = _mm256_mullo_epi16 (a[1], b1); | |||
temp = mul_add (a[2], b0, temp ); | |||
temp = mul_add (a[3], b2, temp ); | |||
temp = mul_add (a[4], b3, temp ); | |||
temp = mul_add (a[5], b4, temp ); | |||
temp = mul_add (a[6], b5, temp ); | |||
temp = mul_add (a[7], b6, temp ); | |||
temp = mul_add (a7, b7, temp ); | |||
temp = mul_add (a6, b[7], temp ); | |||
temp = mul_add (a5, b[6], temp ); | |||
temp = mul_add (a4, b[5], temp ); | |||
temp = mul_add (a3, b[4], temp ); | |||
temp = mul_add (a2, b[3], temp ); | |||
temp = mul_add (a0, b[2], temp ); | |||
temp=mul_add(a1, b[1], temp); | |||
c_avx[16] = _mm256_add_epi16(temp, c_avx[16]); | |||
temp = _mm256_mullo_epi16 (a[2], b1); | |||
temp = mul_add (a[3], b0, temp ); | |||
temp = mul_add (a[4], b2, temp ); | |||
temp = mul_add (a[5], b3, temp ); | |||
temp = mul_add (a[6], b4, temp ); | |||
temp = mul_add (a[7], b5, temp ); | |||
temp = mul_add (a7, b6, temp ); | |||
temp = mul_add (a6, b7, temp ); | |||
temp = mul_add (a5, b[7], temp ); | |||
temp = mul_add (a4, b[6], temp ); | |||
temp = mul_add (a3, b[5], temp ); | |||
temp = mul_add (a2, b[4], temp ); | |||
temp = mul_add (a0, b[3], temp ); | |||
temp=mul_add(a1, b[2], temp); | |||
c_avx[17] = _mm256_add_epi16(temp, c_avx[17]); | |||
temp = _mm256_mullo_epi16 (a[3], b1); | |||
temp = mul_add (a[4], b0, temp ); | |||
temp = mul_add (a[5], b2, temp ); | |||
temp = mul_add (a[6], b3, temp ); | |||
temp = mul_add (a[7], b4, temp ); | |||
temp = mul_add (a7, b5, temp ); | |||
temp = mul_add (a6, b6, temp ); | |||
temp = mul_add (a5, b7, temp ); | |||
temp = mul_add (a4, b[7], temp ); | |||
temp = mul_add (a3, b[6], temp ); | |||
temp = mul_add (a2, b[5], temp ); | |||
temp = mul_add (a0, b[4], temp ); | |||
temp=mul_add(a1, b[3], temp); | |||
c_avx[18] = _mm256_add_epi16(temp, c_avx[18]); | |||
temp = _mm256_mullo_epi16 (a[4], b1); | |||
temp = mul_add (a[5], b0, temp ); | |||
temp = mul_add (a[6], b2, temp ); | |||
temp = mul_add (a[7], b3, temp ); | |||
temp = mul_add (a7, b4, temp ); | |||
temp = mul_add (a6, b5, temp ); | |||
temp = mul_add (a5, b6, temp ); | |||
temp = mul_add (a4, b7, temp ); | |||
temp = mul_add (a3, b[7], temp ); | |||
temp = mul_add (a2, b[6], temp ); | |||
temp = mul_add (a0, b[5], temp ); | |||
temp=mul_add(a1, b[4], temp); | |||
c_avx[19] = _mm256_add_epi16(temp, c_avx[19]); | |||
temp = _mm256_mullo_epi16 (a[5], b1); | |||
temp = mul_add (a[6], b0, temp ); | |||
temp = mul_add (a[7], b2, temp ); | |||
temp = mul_add (a7, b3, temp ); | |||
temp = mul_add (a6, b4, temp ); | |||
temp = mul_add (a5, b5, temp ); | |||
temp = mul_add (a4, b6, temp ); | |||
temp = mul_add (a3, b7, temp ); | |||
temp = mul_add (a2, b[7], temp ); | |||
temp = mul_add (a0, b[6], temp ); | |||
temp=mul_add(a1, b[5], temp); | |||
c_avx[20] = _mm256_add_epi16(temp, c_avx[20]); | |||
temp = _mm256_mullo_epi16 (a[6], b1); | |||
temp = mul_add (a[7], b0, temp ); | |||
temp = mul_add (a7, b2, temp ); | |||
temp = mul_add (a6, b3, temp ); | |||
temp = mul_add (a5, b4, temp ); | |||
temp = mul_add (a4, b5, temp ); | |||
temp = mul_add (a3, b6, temp ); | |||
temp = mul_add (a2, b7, temp ); | |||
temp = mul_add (a0, b[7], temp ); | |||
temp=mul_add(a1, b[6], temp); | |||
c_avx[21] = _mm256_add_epi16(temp, c_avx[21]); | |||
temp = _mm256_mullo_epi16 (a[7], b1); | |||
temp = mul_add (a7, b0, temp ); | |||
temp = mul_add (a6, b2, temp ); | |||
temp = mul_add (a5, b3, temp ); | |||
temp = mul_add (a4, b4, temp ); | |||
temp = mul_add (a3, b5, temp ); | |||
temp = mul_add (a2, b6, temp ); | |||
temp = mul_add (a0, b7, temp ); | |||
temp=mul_add(a1, b[7], temp); | |||
c_avx[22] = _mm256_add_epi16(temp, c_avx[22]); | |||
temp = _mm256_mullo_epi16 (a7, b1); | |||
temp = mul_add (a6, b0, temp ); | |||
temp = mul_add (a5, b2, temp ); | |||
temp = mul_add (a4, b3, temp ); | |||
temp = mul_add (a3, b4, temp ); | |||
temp = mul_add (a2, b5, temp ); | |||
temp = mul_add (a0, b6, temp ); | |||
temp=mul_add(a1, b7, temp); | |||
c_avx[23] = _mm256_add_epi16(temp, c_avx[23]); | |||
temp = _mm256_mullo_epi16 (a6, b1); | |||
temp = mul_add (a5, b0, temp ); | |||
temp = mul_add (a4, b2, temp ); | |||
temp = mul_add (a3, b3, temp ); | |||
temp = mul_add (a2, b4, temp ); | |||
temp = mul_add (a0, b5, temp ); | |||
temp=mul_add(a1, b6, temp); | |||
c_avx[24] = _mm256_add_epi16(temp, c_avx[24]); | |||
temp = _mm256_mullo_epi16 (a5, b1); | |||
temp = mul_add (a4, b0, temp ); | |||
temp = mul_add (a3, b2, temp ); | |||
temp = mul_add (a2, b3, temp ); | |||
temp = mul_add (a0, b4, temp ); | |||
temp=mul_add(a1, b5, temp); | |||
c_avx[25] = _mm256_add_epi16(temp, c_avx[25]); | |||
temp = _mm256_mullo_epi16 (a4, b1); | |||
temp = mul_add (a3, b0, temp ); | |||
temp = mul_add (a2, b2, temp ); | |||
temp = mul_add (a0, b3, temp ); | |||
temp=mul_add(a1, b4, temp); | |||
c_avx[26] = _mm256_add_epi16(temp, c_avx[26]); | |||
temp = _mm256_mullo_epi16 (a3, b1); | |||
temp = mul_add (a2, b0, temp ); | |||
temp = mul_add (a0, b2, temp ); | |||
temp=mul_add(a1, b3, temp); | |||
c_avx[27] = _mm256_add_epi16(temp, c_avx[27]); | |||
temp = _mm256_mullo_epi16 (a2, b1); | |||
temp = mul_add (a0, b0, temp ); | |||
temp=mul_add(a1, b2, temp); | |||
c_avx[28] = _mm256_add_epi16(temp, c_avx[28]); | |||
temp = _mm256_mullo_epi16 (a0, b1); | |||
temp=mul_add(a1, b0, temp); | |||
c_avx[29] = _mm256_add_epi16(temp, c_avx[29]); | |||
c_avx[30] = mul_add(a1, b1, c_avx[30]); | |||
c_avx[2*SCM_SIZE-1] = _mm256_set_epi64x(0, 0, 0, 0); | |||
} | |||
static void schoolbook_avx_new2(__m256i* a, __m256i* b, __m256i* c_avx) ////8 coefficients of a and b has been prefetched | |||
//the c_avx are not added cummulatively | |||
{ | |||
__m256i a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7; | |||
__m256i temp; | |||
a0=a[0]; | |||
a1=a[1]; | |||
a2=a[2]; | |||
a3=a[3]; | |||
a4=a[4]; | |||
a5=a[5]; | |||
a6=a[6]; | |||
a7=a[7]; | |||
b0=b[0]; | |||
b1=b[1]; | |||
b2=b[2]; | |||
b3=b[3]; | |||
b4=b[4]; | |||
b5=b[5]; | |||
b6=b[6]; | |||
b7=b[7]; | |||
// New Unrolled first triangle | |||
c_avx[0] = _mm256_mullo_epi16 (a0, b0); | |||
temp = _mm256_mullo_epi16 (a0, b1); | |||
c_avx[1]=mul_add(a1, b0, temp); | |||
temp = _mm256_mullo_epi16 (a0, b2); | |||
temp = mul_add(a1, b1, temp); | |||
c_avx[2]= mul_add(a2, b0, temp); | |||
temp = _mm256_mullo_epi16 (a0, b3); | |||
temp = mul_add(a1, b2, temp); | |||
temp = mul_add(a2, b1, temp); | |||
c_avx[3]= mul_add(a3, b0, temp); | |||
temp = _mm256_mullo_epi16 (a0, b4); | |||
temp = mul_add(a1, b3, temp); | |||
temp = mul_add(a3, b1, temp); | |||
temp = mul_add(a4, b0, temp); | |||
c_avx[4]= mul_add(a2, b2, temp); | |||
temp = _mm256_mullo_epi16 (a0, b5); | |||
temp = mul_add(a1, b4 , temp); | |||
temp = mul_add(a2, b3, temp); | |||
temp = mul_add(a3, b2, temp); | |||
temp = mul_add( a4, b1, temp); | |||
c_avx[5] = mul_add(a5, b0, temp); | |||
temp = _mm256_mullo_epi16 (a0, b6); | |||
temp = mul_add(a1, b5, temp); | |||
temp = mul_add(a5, b1, temp); | |||
temp = mul_add(a6, b0, temp); | |||
temp = mul_add(a2, b4, temp); | |||
temp = mul_add(a3, b3, temp); | |||
c_avx[6] = mul_add(a4, b2, temp); | |||
temp = _mm256_mullo_epi16 (a0, b7); | |||
temp = mul_add(a1, b6, temp); | |||
temp = mul_add (a6, b1, temp); | |||
temp = mul_add (a7, b0, temp); | |||
temp = mul_add(a2, b5, temp); | |||
temp = mul_add (a3, b4, temp); | |||
temp = mul_add (a4, b3, temp); | |||
c_avx[7] = mul_add (a5, b2, temp); | |||
temp = _mm256_mullo_epi16 (a0, b[8]); | |||
temp = mul_add (a1, b7, temp); | |||
temp = mul_add (a7, b1, temp); | |||
temp = mul_add (a[8], b0, temp); | |||
temp = mul_add (a2, b6,temp); | |||
temp = mul_add(a3, b5, temp); | |||
temp = mul_add (a4, b4,temp); | |||
temp = mul_add (a5, b3, temp); | |||
c_avx[8] = mul_add (a6, b2, temp); | |||
temp = _mm256_mullo_epi16 (a0, b[9]); | |||
temp = mul_add (a1, b[8], temp); | |||
temp = mul_add (a[8], b1, temp); | |||
temp = mul_add (a[9], b0, temp); | |||
temp = mul_add (a2, b7, temp); | |||
temp = mul_add (a3, b6, temp); | |||
temp = mul_add (a4, b5, temp); | |||
temp = mul_add (a5, b4, temp); | |||
temp = mul_add (a6, b3, temp); | |||
c_avx[9] = mul_add (a7, b2, temp); | |||
temp= _mm256_mullo_epi16 (a0, b[10]); | |||
temp = mul_add (a1, b[9], temp); | |||
temp = mul_add (a[9], b1, temp); | |||
temp = mul_add (a[10], b0, temp); | |||
temp = mul_add (a2, b[8], temp); | |||
temp = mul_add (a3, b7, temp); | |||
temp = mul_add (a4, b6, temp); | |||
temp = mul_add (a5, b5, temp); | |||
temp = mul_add (a6, b4, temp); | |||
temp = mul_add (a7, b3, temp); | |||
c_avx[10] = mul_add (a[8], b2, temp); | |||
temp = _mm256_mullo_epi16 (a0, b[11]); | |||
temp = mul_add (a1, b[10], temp ); | |||
temp = mul_add (a[10], b1, temp ); | |||
temp = mul_add (a[11], b0, temp ); | |||
temp = mul_add (a2, b[9], temp ); | |||
temp = mul_add (a3, b[8], temp ); | |||
temp = mul_add (a4, b7, temp ); | |||
temp = mul_add (a5, b6, temp ); | |||
temp = mul_add (a6, b5, temp ); | |||
temp = mul_add (a7, b4, temp ); | |||
temp = mul_add (a[8], b3, temp ); | |||
c_avx[11] = mul_add (a[9], b2, temp ); | |||
temp = _mm256_mullo_epi16 (a0, b[12]); | |||
temp = mul_add (a1, b[11], temp); | |||
temp = mul_add (a[11], b1, temp); | |||
temp = mul_add (a[12], b0, temp); | |||
temp = mul_add (a2, b[10], temp); | |||
temp = mul_add (a3, b[9], temp); | |||
temp = mul_add (a4, b[8], temp); | |||
temp = mul_add (a5, b7, temp); | |||
temp = mul_add (a6, b6, temp); | |||
temp = mul_add (a7, b5, temp); | |||
temp = mul_add (a[8], b4, temp); | |||
temp = mul_add (a[9], b3, temp); | |||
c_avx[12] = mul_add (a[10], b2, temp); | |||
temp = _mm256_mullo_epi16 (a0, b[13]); | |||
temp = mul_add (a1, b[12], temp ); | |||
temp = mul_add (a[12], b1, temp ); | |||
temp = mul_add (a[13], b0, temp ); | |||
temp = mul_add (a2, b[11], temp ); | |||
temp = mul_add (a3, b[10], temp ); | |||
temp = mul_add (a4, b[9], temp ); | |||
temp = mul_add (a5, b[8], temp ); | |||
temp = mul_add (a6, b7, temp ); | |||
temp = mul_add (a7, b6, temp ); | |||
temp = mul_add (a[8], b5, temp ); | |||
temp = mul_add (a[9], b4, temp ); | |||
temp = mul_add (a[10], b3, temp ); | |||
c_avx[13] = mul_add (a[11], b2, temp ); | |||
temp = _mm256_mullo_epi16 (a0, b[14]); | |||
temp = mul_add (a1, b[13], temp ); | |||
temp = mul_add (a[13], b1, temp ); | |||
temp = mul_add (a[14], b0, temp ); | |||
temp = mul_add (a2, b[12], temp ); | |||
temp = mul_add (a3, b[11], temp ); | |||
temp = mul_add (a4, b[10], temp ); | |||
temp = mul_add (a5, b[9], temp ); | |||
temp = mul_add (a6, b[8], temp ); | |||
temp = mul_add (a7, b7, temp ); | |||
temp = mul_add (a[8], b6, temp ); | |||
temp = mul_add (a[9], b5, temp ); | |||
temp = mul_add (a[10], b4, temp ); | |||
temp = mul_add (a[11], b3, temp ); | |||
c_avx[14] = mul_add (a[12], b2, temp ); | |||
temp = _mm256_mullo_epi16 (a0, b[15]); | |||
temp = mul_add (a1, b[14], temp ); | |||
temp = mul_add (a[14], b1, temp ); | |||
temp = mul_add (a[15], b0, temp ); | |||
temp = mul_add (a2, b[13], temp ); | |||
temp = mul_add (a3, b[12], temp ); | |||
temp = mul_add (a4, b[11], temp ); | |||
temp = mul_add (a5, b[10], temp ); | |||
temp = mul_add (a6, b[9], temp ); | |||
temp = mul_add (a7, b[8], temp ); | |||
temp = mul_add (a[8], b7, temp ); | |||
temp = mul_add (a[9], b6, temp ); | |||
temp = mul_add (a[10], b5, temp ); | |||
temp = mul_add (a[11], b4, temp ); | |||
temp = mul_add (a[12], b3, temp ); | |||
c_avx[15] = mul_add (a[13], b2, temp ); | |||
// unrolled second triangle | |||
a0=a[14]; | |||
a1=a[15]; | |||
a2=a[13]; | |||
a3=a[12]; | |||
a4=a[11]; | |||
a5=a[10]; | |||
a6=a[9]; | |||
a7=a[8]; | |||
b0=b[14]; | |||
b1=b[15]; | |||
b2=b[13]; | |||
b3=b[12]; | |||
b4=b[11]; | |||
b5=b[10]; | |||
b6=b[9]; | |||
b7=b[8]; | |||
temp = _mm256_mullo_epi16 (a[1], b1); | |||
temp = mul_add (a[2], b0, temp ); | |||
temp = mul_add (a[3], b2, temp ); | |||
temp = mul_add (a[4], b3, temp ); | |||
temp = mul_add (a[5], b4, temp ); | |||
temp = mul_add (a[6], b5, temp ); | |||
temp = mul_add (a[7], b6, temp ); | |||
temp = mul_add (a7, b7, temp ); | |||
temp = mul_add (a6, b[7], temp ); | |||
temp = mul_add (a5, b[6], temp ); | |||
temp = mul_add (a4, b[5], temp ); | |||
temp = mul_add (a3, b[4], temp ); | |||
temp = mul_add (a2, b[3], temp ); | |||
temp = mul_add (a0, b[2], temp ); | |||
c_avx[16] = mul_add (a1, b[1], temp ); | |||
temp = _mm256_mullo_epi16 (a[2], b1); | |||
temp = mul_add (a[3], b0, temp ); | |||
temp = mul_add (a[4], b2, temp ); | |||
temp = mul_add (a[5], b3, temp ); | |||
temp = mul_add (a[6], b4, temp ); | |||
temp = mul_add (a[7], b5, temp ); | |||
temp = mul_add (a7, b6, temp ); | |||
temp = mul_add (a6, b7, temp ); | |||
temp = mul_add (a5, b[7], temp ); | |||
temp = mul_add (a4, b[6], temp ); | |||
temp = mul_add (a3, b[5], temp ); | |||
temp = mul_add (a2, b[4], temp ); | |||
temp = mul_add (a0, b[3], temp ); | |||
c_avx[17] = mul_add (a1, b[2], temp ); | |||
temp = _mm256_mullo_epi16 (a[3], b1); | |||
temp = mul_add (a[4], b0, temp ); | |||
temp = mul_add (a[5], b2, temp ); | |||
temp = mul_add (a[6], b3, temp ); | |||
temp = mul_add (a[7], b4, temp ); | |||
temp = mul_add (a7, b5, temp ); | |||
temp = mul_add (a6, b6, temp ); | |||
temp = mul_add (a5, b7, temp ); | |||
temp = mul_add (a4, b[7], temp ); | |||
temp = mul_add (a3, b[6], temp ); | |||
temp = mul_add (a2, b[5], temp ); | |||
temp = mul_add (a0, b[4], temp ); | |||
c_avx[18] = mul_add (a1, b[3], temp ); | |||
temp = _mm256_mullo_epi16 (a[4], b1); | |||
temp = mul_add (a[5], b0, temp ); | |||
temp = mul_add (a[6], b2, temp ); | |||
temp = mul_add (a[7], b3, temp ); | |||
temp = mul_add (a7, b4, temp ); | |||
temp = mul_add (a6, b5, temp ); | |||
temp = mul_add (a5, b6, temp ); | |||
temp = mul_add (a4, b7, temp ); | |||
temp = mul_add (a3, b[7], temp ); | |||
temp = mul_add (a2, b[6], temp ); | |||
temp = mul_add (a0, b[5], temp ); | |||
c_avx[19] = mul_add (a1, b[4], temp ); | |||
temp = _mm256_mullo_epi16 (a[5], b1); | |||
temp = mul_add (a[6], b0, temp ); | |||
temp = mul_add (a[7], b2, temp ); | |||
temp = mul_add (a7, b3, temp ); | |||
temp = mul_add (a6, b4, temp ); | |||
temp = mul_add (a5, b5, temp ); | |||
temp = mul_add (a4, b6, temp ); | |||
temp = mul_add (a3, b7, temp ); | |||
temp = mul_add (a2, b[7], temp ); | |||
temp = mul_add (a0, b[6], temp ); | |||
c_avx[20] = mul_add (a1, b[5], temp ); | |||
temp = _mm256_mullo_epi16 (a[6], b1); | |||
temp = mul_add (a[7], b0, temp ); | |||
temp = mul_add (a7, b2, temp ); | |||
temp = mul_add (a6, b3, temp ); | |||
temp = mul_add (a5, b4, temp ); | |||
temp = mul_add (a4, b5, temp ); | |||
temp = mul_add (a3, b6, temp ); | |||
temp = mul_add (a2, b7, temp ); | |||
temp = mul_add (a0, b[7], temp ); | |||
c_avx[21] = mul_add (a1, b[6], temp ); | |||
temp = _mm256_mullo_epi16 (a[7], b1); | |||
temp = mul_add (a7, b0, temp ); | |||
temp = mul_add (a6, b2, temp ); | |||
temp = mul_add (a5, b3, temp ); | |||
temp = mul_add (a4, b4, temp ); | |||
temp = mul_add (a3, b5, temp ); | |||
temp = mul_add (a2, b6, temp ); | |||
temp = mul_add (a0, b7, temp ); | |||
c_avx[22] = mul_add (a1, b[7], temp ); | |||
temp = _mm256_mullo_epi16 (a7, b1); | |||
temp = mul_add (a6, b0, temp ); | |||
temp = mul_add (a5, b2, temp ); | |||
temp = mul_add (a4, b3, temp ); | |||
temp = mul_add (a3, b4, temp ); | |||
temp = mul_add (a2, b5, temp ); | |||
temp = mul_add (a0, b6, temp ); | |||
c_avx[23] = mul_add (a1, b7, temp ); | |||
temp = _mm256_mullo_epi16 (a6, b1); | |||
temp = mul_add (a5, b0, temp ); | |||
temp = mul_add (a4, b2, temp ); | |||
temp = mul_add (a3, b3, temp ); | |||
temp = mul_add (a2, b4, temp ); | |||
temp = mul_add (a0, b5, temp ); | |||
c_avx[24] = mul_add (a1, b6, temp ); | |||
temp = _mm256_mullo_epi16 (a5, b1); | |||
temp = mul_add (a4, b0, temp ); | |||
temp = mul_add (a3, b2, temp ); | |||
temp = mul_add (a2, b3, temp ); | |||
temp = mul_add (a0, b4, temp ); | |||
c_avx[25] = mul_add (a1, b5, temp ); | |||
temp = _mm256_mullo_epi16 (a4, b1); | |||
temp = mul_add (a3, b0, temp ); | |||
temp = mul_add (a2, b2, temp ); | |||
temp = mul_add (a0, b3, temp ); | |||
c_avx[26] = mul_add (a1, b4, temp ); | |||
temp = _mm256_mullo_epi16 (a3, b1); | |||
temp = mul_add (a2, b0, temp ); | |||
temp = mul_add (a0, b2, temp ); | |||
c_avx[27] = mul_add (a1, b3, temp ); | |||
temp = _mm256_mullo_epi16 (a2, b1); | |||
temp = mul_add (a0, b0, temp ); | |||
c_avx[28] = mul_add (a1, b2, temp ); | |||
temp = _mm256_mullo_epi16 (a0, b1); | |||
c_avx[29] = mul_add (a1, b0, temp); | |||
c_avx[30] = _mm256_mullo_epi16 (a1, b1); | |||
c_avx[2*SCM_SIZE-1] = _mm256_set_epi64x(0, 0, 0, 0); | |||
} |
@@ -0,0 +1,35 @@ | |||
#include "verify.h" | |||
/*------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at https://github.com/pq-crystals/kyber) of | |||
"CRYSTALS – Kyber: a CCA-secure module-lattice-based KEM" | |||
by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
----------------------------------------------------*/ | |||
/* returns 0 for equal strings, 1 for non-equal strings */ | |||
uint8_t PQCLEAN_SABER_AVX2_verify(const uint8_t *a, const uint8_t *b, size_t len) { | |||
uint64_t r; | |||
size_t i; | |||
r = 0; | |||
for (i = 0; i < len; i++) { | |||
r |= a[i] ^ b[i]; | |||
} | |||
r = (~r + 1); // Two's complement | |||
r >>= 63; | |||
return (uint8_t) r; | |||
} | |||
/* b = 1 means mov, b = 0 means don't mov*/ | |||
void PQCLEAN_SABER_AVX2_cmov(uint8_t *r, const uint8_t *x, size_t len, uint8_t b) { | |||
size_t i; | |||
b = -b; | |||
for (i = 0; i < len; i++) { | |||
r[i] ^= b & (x[i] ^ r[i]); | |||
} | |||
} |
@@ -0,0 +1,22 @@ | |||
#ifndef VERIFY_H | |||
#define VERIFY_H | |||
/*------------------------------------------------- | |||
This file has been adapted from the implementation | |||
(available at https://github.com/pq-crystals/kyber) of | |||
"CRYSTALS – Kyber: a CCA-secure module-lattice-based KEM" | |||
by : Joppe Bos, Leo Ducas, Eike Kiltz, Tancrede Lepoint, | |||
Vadim Lyubashevsky, John M. Schanck, Peter Schwabe & Damien stehle | |||
----------------------------------------------------*/ | |||
#include <stddef.h> | |||
#include <stdint.h> | |||
/* returns 0 for equal strings, 1 for non-equal strings */ | |||
uint8_t PQCLEAN_SABER_AVX2_verify(const uint8_t *a, const uint8_t *b, size_t len); | |||
/* b = 1 means mov, b = 0 means don't mov*/ | |||
void PQCLEAN_SABER_AVX2_cmov(uint8_t *r, const uint8_t *x, size_t len, uint8_t b); | |||
#endif |
@@ -1,8 +1 @@ | |||
SABER_v1.1 | |||
Public domain | |||
Authors: Jan-Pieter D'Anvers, Angshuman Karmakar, Sujoy Sinha Roy, | |||
Frederik Vercauteren | |||
Public Domain |
@@ -1,10 +1,10 @@ | |||
# This Makefile can be used with GNU Make or BSD Make | |||
LIB=libsaber_clean.a | |||
HEADERS=api.h cbd.h poly.h poly_mul.h SABER_indcpa.h SABER_params.h verify.h pack_unpack.h | |||
HEADERS=api.h cbd.h pack_unpack.h poly.h poly_mul.h SABER_indcpa.h SABER_params.h verify.h | |||
OBJECTS=cbd.o kem.o pack_unpack.o poly.o poly_mul.o SABER_indcpa.o verify.o | |||
CFLAGS=-O3 -Wall -Wextra -Wpedantic -Werror -Wmissing-prototypes -Wredundant-decls -std=c99 -I../../../common $(EXTRAFLAGS) | |||
CFLAGS=-O3 -Wall -Wextra -Wpedantic -Wvla -Werror -Wredundant-decls -Wmissing-prototypes -std=c99 -I../../../common $(EXTRAFLAGS) | |||
all: $(LIB) | |||
@@ -3,296 +3,90 @@ | |||
#include "fips202.h" | |||
#include "pack_unpack.h" | |||
#include "poly.h" | |||
#include "poly_mul.h" | |||
#include "randombytes.h" | |||
#include <stdint.h> | |||
#include <string.h> | |||
#define h1 (1 << (SABER_EQ - SABER_EP - 1)) | |||
#define h2 ((1 << (SABER_EP - 2)) - (1 << (SABER_EP - SABER_ET - 1)) + (1 << (SABER_EQ - SABER_EP - 1))) | |||
void PQCLEAN_SABER_CLEAN_indcpa_kem_keypair(uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES], uint8_t sk[SABER_INDCPA_SECRETKEYBYTES]) { | |||
uint16_t A[SABER_L][SABER_L][SABER_N]; | |||
uint16_t s[SABER_L][SABER_N]; | |||
uint16_t b[SABER_L][SABER_N] = {0}; | |||
/*----------------------------------------------------------------------------------- | |||
This routine generates a=[Matrix K x K] of 256-coefficient polynomials | |||
uint8_t seed_A[SABER_SEEDBYTES]; | |||
uint8_t seed_s[SABER_NOISE_SEEDBYTES]; | |||
int i, j; | |||
#define h1 4 //2^(EQ-EP-1) | |||
randombytes(seed_A, SABER_SEEDBYTES); | |||
shake128(seed_A, SABER_SEEDBYTES, seed_A, SABER_SEEDBYTES); // for not revealing system RNG state | |||
randombytes(seed_s, SABER_NOISE_SEEDBYTES); | |||
#define h2 ( (1<<(SABER_EP-2)) - (1<<(SABER_EP-SABER_ET-1)) + (1<<(SABER_EQ-SABER_EP-1)) ) | |||
PQCLEAN_SABER_CLEAN_GenMatrix(A, seed_A); | |||
PQCLEAN_SABER_CLEAN_GenSecret(s, seed_s); | |||
PQCLEAN_SABER_CLEAN_MatrixVectorMul(b, (const uint16_t (*)[SABER_L][SABER_N])A, (const uint16_t (*)[SABER_N])s, 1); | |||
static void InnerProd(uint16_t pkcl[SABER_K][SABER_N], uint16_t skpv[SABER_K][SABER_N], uint16_t mod, uint16_t res[SABER_N]); | |||
static void MatrixVectorMul(polyvec *a, uint16_t skpv[SABER_K][SABER_N], uint16_t res[SABER_K][SABER_N], uint16_t mod, int16_t transpose); | |||
static void POL2MSG(const uint16_t *message_dec_unpacked, unsigned char *message_dec); | |||
static void GenMatrix(polyvec *a, const unsigned char *seed) { | |||
unsigned char buf[SABER_K * SABER_K * (13 * SABER_N / 8)]; | |||
uint16_t temp_ar[SABER_N]; | |||
int i, j, k; | |||
uint16_t mod = (SABER_Q - 1); | |||
shake128(buf, sizeof(buf), seed, SABER_SEEDBYTES); | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_K; j++) { | |||
PQCLEAN_SABER_CLEAN_BS2POL(buf + (i * SABER_K + j) * (13 * SABER_N / 8), temp_ar); | |||
for (k = 0; k < SABER_N; k++) { | |||
a[i].vec[j].coeffs[k] = (temp_ar[k])& mod ; | |||
} | |||
} | |||
} | |||
} | |||
void PQCLEAN_SABER_CLEAN_indcpa_kem_keypair(unsigned char *pk, unsigned char *sk) { | |||
polyvec a[SABER_K]; | |||
uint16_t skpv[SABER_K][SABER_N]; | |||
unsigned char seed[SABER_SEEDBYTES]; | |||
unsigned char noiseseed[SABER_COINBYTES]; | |||
int32_t i, j; | |||
uint16_t mod_q = SABER_Q - 1; | |||
uint16_t res[SABER_K][SABER_N]; | |||
randombytes(seed, SABER_SEEDBYTES); | |||
// for not revealing system RNG state | |||
shake128(seed, SABER_SEEDBYTES, seed, SABER_SEEDBYTES); | |||
randombytes(noiseseed, SABER_COINBYTES); | |||
GenMatrix(a, seed); //sample matrix A | |||
// generate secret from constant-time binomial distribution | |||
PQCLEAN_SABER_CLEAN_GenSecret(skpv, noiseseed); | |||
// do the matrix vector multiplication and rounding | |||
for (i = 0; i < SABER_K; i++) { | |||
for (i = 0; i < SABER_L; i++) { | |||
for (j = 0; j < SABER_N; j++) { | |||
res[i][j] = 0; | |||
b[i][j] = (b[i][j] + h1) >> (SABER_EQ - SABER_EP); | |||
} | |||
} | |||
MatrixVectorMul(a, skpv, res, SABER_Q - 1, 1); | |||
// now rounding | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_N; j++) { | |||
// shift right 3 bits | |||
res[i][j] = (res[i][j] + h1) & (mod_q); | |||
res[i][j] = (res[i][j] >> (SABER_EQ - SABER_EP)); | |||
} | |||
} | |||
// unload and pack sk=3 x (256 coefficients of 14 bits) | |||
PQCLEAN_SABER_CLEAN_POLVEC2BS(sk, skpv, SABER_Q); | |||
// unload and pack pk=256 bits seed and 3 x (256 coefficients of 11 bits) | |||
// load the public-key coefficients | |||
PQCLEAN_SABER_CLEAN_POLVEC2BS(pk, res, SABER_P); | |||
// now load the seedbytes in PK. Easy since seed bytes are kept in byte format. | |||
for (i = 0; i < SABER_SEEDBYTES; i++) { | |||
pk[SABER_POLYVECCOMPRESSEDBYTES + i] = seed[i]; | |||
} | |||
PQCLEAN_SABER_CLEAN_POLVECq2BS(sk, (const uint16_t (*)[SABER_N])s); | |||
PQCLEAN_SABER_CLEAN_POLVECp2BS(pk, (const uint16_t (*)[SABER_N])b); | |||
memcpy(pk + SABER_POLYVECCOMPRESSEDBYTES, seed_A, sizeof(seed_A)); | |||
} | |||
void PQCLEAN_SABER_CLEAN_indcpa_kem_enc(const unsigned char *message_received, unsigned char *noiseseed, const unsigned char *pk, unsigned char *ciphertext) { | |||
uint32_t i, j, k; | |||
polyvec a[SABER_K]; | |||
unsigned char seed[SABER_SEEDBYTES]; | |||
// public key of received by the client | |||
uint16_t pkcl[SABER_K][SABER_N]; | |||
uint16_t skpv1[SABER_K][SABER_N]; | |||
uint16_t message[SABER_KEYBYTES * 8]; | |||
uint16_t res[SABER_K][SABER_N]; | |||
uint16_t mod_p = SABER_P - 1; | |||
uint16_t mod_q = SABER_Q - 1; | |||
uint16_t vprime[SABER_N]; | |||
unsigned char msk_c[SABER_SCALEBYTES_KEM]; | |||
// extract the seedbytes from Public Key. | |||
for (i = 0; i < SABER_SEEDBYTES; i++) { | |||
seed[i] = pk[ SABER_POLYVECCOMPRESSEDBYTES + i]; | |||
} | |||
GenMatrix(a, seed); | |||
// generate secret from constant-time binomial distribution | |||
PQCLEAN_SABER_CLEAN_GenSecret(skpv1, noiseseed); | |||
// matrix-vector multiplication and rounding | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_N; j++) { | |||
res[i][j] = 0; | |||
} | |||
} | |||
MatrixVectorMul(a, skpv1, res, SABER_Q - 1, 0); | |||
// now rounding | |||
//shift right 3 bits | |||
for (i = 0; i < SABER_K; i++) { | |||
void PQCLEAN_SABER_CLEAN_indcpa_kem_enc(uint8_t ciphertext[SABER_BYTES_CCA_DEC], const uint8_t m[SABER_KEYBYTES], const uint8_t seed_sp[SABER_NOISE_SEEDBYTES], const uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES]) { | |||
uint16_t A[SABER_L][SABER_L][SABER_N]; | |||
uint16_t sp[SABER_L][SABER_N]; | |||
uint16_t bp[SABER_L][SABER_N] = {0}; | |||
uint16_t vp[SABER_N] = {0}; | |||
uint16_t mp[SABER_N]; | |||
uint16_t b[SABER_L][SABER_N]; | |||
int i, j; | |||
const uint8_t *seed_A = pk + SABER_POLYVECCOMPRESSEDBYTES; | |||
PQCLEAN_SABER_CLEAN_GenMatrix(A, seed_A); | |||
PQCLEAN_SABER_CLEAN_GenSecret(sp, seed_sp); | |||
PQCLEAN_SABER_CLEAN_MatrixVectorMul(bp, (const uint16_t (*)[SABER_L][SABER_N])A, (const uint16_t (*)[SABER_N])sp, 0); | |||
for (i = 0; i < SABER_L; i++) { | |||
for (j = 0; j < SABER_N; j++) { | |||
res[i][j] = ( res[i][j] + h1 ) & mod_q; | |||
res[i][j] = (res[i][j] >> (SABER_EQ - SABER_EP) ); | |||
bp[i][j] = (bp[i][j] + h1) >> (SABER_EQ - SABER_EP); | |||
} | |||
} | |||
PQCLEAN_SABER_CLEAN_POLVEC2BS(ciphertext, res, SABER_P); | |||
PQCLEAN_SABER_CLEAN_POLVECp2BS(ciphertext, (const uint16_t (*)[SABER_N])bp); | |||
PQCLEAN_SABER_CLEAN_BS2POLVECp(b, pk); | |||
PQCLEAN_SABER_CLEAN_InnerProd(vp, (const uint16_t (*)[SABER_N])b, (const uint16_t (*)[SABER_N])sp); | |||
// ************client matrix-vector multiplication ends************ | |||
PQCLEAN_SABER_CLEAN_BS2POLmsg(mp, m); | |||
// now calculate the v' | |||
// unpack the public_key | |||
// pkcl is the b in the protocol | |||
PQCLEAN_SABER_CLEAN_BS2POLVEC(pk, pkcl, SABER_P); | |||
for (i = 0; i < SABER_N; i++) { | |||
vprime[i] = 0; | |||
} | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_N; j++) { | |||
skpv1[i][j] = skpv1[i][j] & (mod_p); | |||
} | |||
for (j = 0; j < SABER_N; j++) { | |||
vp[j] = (vp[j] - (mp[j] << (SABER_EP - 1)) + h1) >> (SABER_EP - SABER_ET); | |||
} | |||
// vector-vector scalar multiplication with mod p | |||
InnerProd(pkcl, skpv1, mod_p, vprime); | |||
// addition of h1 to vprime | |||
for (i = 0; i < SABER_N; i++) { | |||
vprime[i] = vprime[i] + h1; | |||
} | |||
// unpack message_received; | |||
for (j = 0; j < SABER_KEYBYTES; j++) { | |||
for (i = 0; i < 8; i++) { | |||
message[8 * j + i] = ((message_received[j] >> i) & 0x01); | |||
} | |||
} | |||
// message encoding | |||
for (i = 0; i < SABER_N; i++) { | |||
message[i] = (message[i] << (SABER_EP - 1)); | |||
} | |||
for (k = 0; k < SABER_N; k++) { | |||
vprime[k] = ( (vprime[k] - message[k]) & (mod_p) ) >> (SABER_EP - SABER_ET); | |||
} | |||
PQCLEAN_SABER_CLEAN_pack_4bit(msk_c, vprime); | |||
for (j = 0; j < SABER_SCALEBYTES_KEM; j++) { | |||
ciphertext[SABER_POLYVECCOMPRESSEDBYTES + j] = msk_c[j]; | |||
} | |||
PQCLEAN_SABER_CLEAN_POLT2BS(ciphertext + SABER_POLYVECCOMPRESSEDBYTES, vp); | |||
} | |||
void PQCLEAN_SABER_CLEAN_indcpa_kem_dec(uint8_t m[SABER_KEYBYTES], const uint8_t sk[SABER_INDCPA_SECRETKEYBYTES], const uint8_t ciphertext[SABER_BYTES_CCA_DEC]) { | |||
void PQCLEAN_SABER_CLEAN_indcpa_kem_dec(const unsigned char *sk, const unsigned char *ciphertext, unsigned char message_dec[]) { | |||
uint32_t i, j; | |||
// secret key of the server | |||
uint16_t sksv[SABER_K][SABER_N]; | |||
uint16_t pksv[SABER_K][SABER_N]; | |||
uint8_t scale_ar[SABER_SCALEBYTES_KEM]; | |||
uint16_t mod_p = SABER_P - 1; | |||
uint16_t v[SABER_N]; | |||
uint16_t op[SABER_N]; | |||
// sksv is the secret-key | |||
PQCLEAN_SABER_CLEAN_BS2POLVEC(sk, sksv, SABER_Q); | |||
// pksv is the ciphertext | |||
PQCLEAN_SABER_CLEAN_BS2POLVEC(ciphertext, pksv, SABER_P); | |||
// vector-vector scalar multiplication with mod p | |||
for (i = 0; i < SABER_N; i++) { | |||
v[i] = 0; | |||
} | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_N; j++) { | |||
sksv[i][j] = sksv[i][j] & (mod_p); | |||
} | |||
} | |||
InnerProd(pksv, sksv, mod_p, v); | |||
//Extraction | |||
for (i = 0; i < SABER_SCALEBYTES_KEM; i++) { | |||
scale_ar[i] = ciphertext[SABER_POLYVECCOMPRESSEDBYTES + i]; | |||
} | |||
uint16_t s[SABER_L][SABER_N]; | |||
uint16_t b[SABER_L][SABER_N]; | |||
uint16_t v[SABER_N] = {0}; | |||
uint16_t cm[SABER_N]; | |||
int i; | |||
PQCLEAN_SABER_CLEAN_un_pack4bit(scale_ar, op); | |||
PQCLEAN_SABER_CLEAN_BS2POLVECq(s, sk); | |||
PQCLEAN_SABER_CLEAN_BS2POLVECp(b, ciphertext); | |||
PQCLEAN_SABER_CLEAN_InnerProd(v, (const uint16_t (*)[SABER_N])b, (const uint16_t (*)[SABER_N])s); | |||
PQCLEAN_SABER_CLEAN_BS2POLT(cm, ciphertext + SABER_POLYVECCOMPRESSEDBYTES); | |||
//addition of h1 | |||
for (i = 0; i < SABER_N; i++) { | |||
v[i] = ( ( v[i] + h2 - (op[i] << (SABER_EP - SABER_ET)) ) & (mod_p) ) >> (SABER_EP - 1); | |||
v[i] = (v[i] + h2 - (cm[i] << (SABER_EP - SABER_ET))) >> (SABER_EP - 1); | |||
} | |||
// pack decrypted message | |||
POL2MSG(v, message_dec); | |||
} | |||
static void MatrixVectorMul(polyvec *a, uint16_t skpv[SABER_K][SABER_N], uint16_t res[SABER_K][SABER_N], uint16_t mod, int16_t transpose) { | |||
uint16_t acc[SABER_N]; | |||
int32_t i, j, k; | |||
if (transpose == 1) { | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_K; j++) { | |||
PQCLEAN_SABER_CLEAN_pol_mul((uint16_t *)&a[j].vec[i], skpv[j], acc, SABER_Q, SABER_N); | |||
for (k = 0; k < SABER_N; k++) { | |||
res[i][k] = res[i][k] + acc[k]; | |||
//reduction mod p | |||
res[i][k] = (res[i][k] & mod); | |||
//clear the accumulator | |||
acc[k] = 0; | |||
} | |||
} | |||
} | |||
} else { | |||
for (i = 0; i < SABER_K; i++) { | |||
for (j = 0; j < SABER_K; j++) { | |||
PQCLEAN_SABER_CLEAN_pol_mul((uint16_t *)&a[i].vec[j], skpv[j], acc, SABER_Q, SABER_N); | |||
for (k = 0; k < SABER_N; k++) { | |||
res[i][k] = res[i][k] + acc[k]; | |||
// reduction | |||
res[i][k] = res[i][k] & mod; | |||
// clear the accumulator | |||
acc[k] = 0; | |||
} | |||
} | |||
} | |||
} | |||
} | |||
static void POL2MSG(const uint16_t *message_dec_unpacked, unsigned char *message_dec) { | |||
int32_t i, j; | |||
for (j = 0; j < SABER_KEYBYTES; j++) { | |||
message_dec[j] = 0; | |||
for (i = 0; i < 8; i++) { | |||
message_dec[j] = message_dec[j] | (uint8_t) (message_dec_unpacked[j * 8 + i] << i); | |||
} | |||
} | |||
} | |||
static void InnerProd(uint16_t pkcl[SABER_K][SABER_N], uint16_t skpv[SABER_K][SABER_N], uint16_t mod, uint16_t res[SABER_N]) { | |||
uint32_t j, k; | |||
uint16_t acc[SABER_N]; | |||
// vector-vector scalar multiplication with mod p | |||
for (j = 0; j < SABER_K; j++) { | |||
PQCLEAN_SABER_CLEAN_pol_mul(pkcl[j], skpv[j], acc, SABER_P, SABER_N); | |||
for (k = 0; k < SABER_N; k++) { | |||
res[k] = res[k] + acc[k]; | |||
// reduction | |||
res[k] = res[k] & mod; | |||
// clear the accumulator | |||
acc[k] = 0; | |||
} | |||
} | |||
PQCLEAN_SABER_CLEAN_POLmsg2BS(m, v); | |||
} |
@@ -1,9 +1,13 @@ | |||
#ifndef INDCPA_H | |||
#define INDCPA_H | |||
#include "SABER_params.h" | |||
#include <stdint.h> | |||
void PQCLEAN_SABER_CLEAN_indcpa_kem_keypair(unsigned char *pk, unsigned char *sk); | |||
void PQCLEAN_SABER_CLEAN_indcpa_kem_enc(const unsigned char *message, unsigned char *noiseseed, const unsigned char *pk, unsigned char *ciphertext); | |||
void PQCLEAN_SABER_CLEAN_indcpa_kem_dec(const unsigned char *sk, const unsigned char *ciphertext, unsigned char *message_dec); | |||
void PQCLEAN_SABER_CLEAN_indcpa_kem_keypair(uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES], uint8_t sk[SABER_INDCPA_SECRETKEYBYTES]); | |||
#endif | |||
void PQCLEAN_SABER_CLEAN_indcpa_kem_enc(uint8_t ciphertext[SABER_BYTES_CCA_DEC], const uint8_t m[SABER_KEYBYTES], const uint8_t seed_sp[SABER_NOISE_SEEDBYTES], const uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES]); | |||
void PQCLEAN_SABER_CLEAN_indcpa_kem_dec(uint8_t m[SABER_KEYBYTES], const uint8_t sk[SABER_INDCPA_SECRETKEYBYTES], const uint8_t ciphertext[SABER_BYTES_CCA_DEC]); | |||
#endif |
@@ -1,50 +1,39 @@ | |||
#ifndef PARAMS_H | |||
#define PARAMS_H | |||
#include "api.h" | |||
#define SABER_K 3 | |||
/* Change this for different security strengths */ | |||
/* Don't change anything below this line */ | |||
#define SABER_L 3 | |||
#define SABER_MU 8 | |||
#define SABER_ET 4 | |||
#define SABER_EQ 13 | |||
#define SABER_EP 10 | |||
#define SABER_N 256 | |||
#define SABER_Q 8192 | |||
#define SABER_P 1024 | |||
#define SABER_SEEDBYTES 32 | |||
#define SABER_NOISESEEDBYTES 32 | |||
#define SABER_COINBYTES 32 | |||
#define SABER_KEYBYTES 32 | |||
#define SABER_HASHBYTES 32 | |||
#define SABER_POLYBYTES 416 //13*256/8 | |||
#define SABER_POLYVECBYTES (SABER_K * SABER_POLYBYTES) | |||
#define SABER_SEEDBYTES 32 | |||
#define SABER_NOISE_SEEDBYTES 32 | |||
#define SABER_KEYBYTES 32 | |||
#define SABER_HASHBYTES 32 | |||
#define SABER_POLYVECCOMPRESSEDBYTES (SABER_K * 320) //10*256/8 NOTE : changed till here due to parameter adaptation | |||
#define SABER_POLYCOINBYTES (SABER_MU * SABER_N / 8) | |||
#define SABER_CIPHERTEXTBYTES (SABER_POLYVECCOMPRESSEDBYTES) | |||
#define SABER_POLYBYTES (SABER_EQ * SABER_N / 8) | |||
#define SABER_POLYVECBYTES (SABER_L * SABER_POLYBYTES) | |||
#define SABER_SCALEBYTES (SABER_DELTA*SABER_N/8) | |||
#define SABER_POLYCOMPRESSEDBYTES (SABER_EP * SABER_N / 8) | |||
#define SABER_POLYVECCOMPRESSEDBYTES (SABER_L * SABER_POLYCOMPRESSEDBYTES) | |||
#define SABER_SCALEBYTES_KEM ((SABER_ET)*SABER_N/8) | |||
#define SABER_SCALEBYTES_KEM (SABER_ET * SABER_N / 8) | |||
#define SABER_INDCPA_PUBLICKEYBYTES (SABER_POLYVECCOMPRESSEDBYTES + SABER_SEEDBYTES) | |||
#define SABER_INDCPA_SECRETKEYBYTES (SABER_POLYVECBYTES) | |||
#define SABER_PUBLICKEYBYTES (SABER_INDCPA_PUBLICKEYBYTES) | |||
#define SABER_SECRETKEYBYTES (SABER_INDCPA_SECRETKEYBYTES + SABER_INDCPA_PUBLICKEYBYTES + SABER_HASHBYTES + SABER_KEYBYTES) | |||
#define SABER_SECRETKEYBYTES (SABER_INDCPA_SECRETKEYBYTES + SABER_INDCPA_PUBLICKEYBYTES + SABER_HASHBYTES + SABER_KEYBYTES) | |||
#define SABER_BYTES_CCA_DEC (SABER_POLYVECCOMPRESSEDBYTES + SABER_SCALEBYTES_KEM) /* Second part is for Targhi-Unruh */ | |||
#define SABER_BYTES_CCA_DEC (SABER_POLYVECCOMPRESSEDBYTES + SABER_SCALEBYTES_KEM) | |||
#endif | |||
@@ -1,14 +1,18 @@ | |||
#ifndef PQCLEAN_SABER_CLEAN_API_H | |||
#define PQCLEAN_SABER_CLEAN_API_H | |||
#define PQCLEAN_SABER_CLEAN_CRYPTO_ALGNAME "Saber" | |||
#define PQCLEAN_SABER_CLEAN_CRYPTO_SECRETKEYBYTES 2304 | |||
#define PQCLEAN_SABER_CLEAN_CRYPTO_PUBLICKEYBYTES (3*320+32) | |||
#define PQCLEAN_SABER_CLEAN_CRYPTO_BYTES 32 | |||
#define PQCLEAN_SABER_CLEAN_CRYPTO_CIPHERTEXTBYTES 1088 | |||
#define PQCLEAN_SABER_CLEAN_CRYPTO_PUBLICKEYBYTES 992 | |||
#define PQCLEAN_SABER_CLEAN_CRYPTO_SECRETKEYBYTES 2304 | |||
int PQCLEAN_SABER_CLEAN_crypto_kem_keypair(unsigned char *pk, unsigned char *sk); | |||
int PQCLEAN_SABER_CLEAN_crypto_kem_enc(unsigned char *ct, unsigned char *ss, const unsigned char *pk); | |||
int PQCLEAN_SABER_CLEAN_crypto_kem_dec(unsigned char *ss, const unsigned char *ct, const unsigned char *sk); | |||
int PQCLEAN_SABER_CLEAN_crypto_kem_enc(unsigned char *ct, unsigned char *k, const unsigned char *pk); | |||
int PQCLEAN_SABER_CLEAN_crypto_kem_dec(unsigned char *k, const unsigned char *ct, const unsigned char *sk); | |||
#endif /* api_h */ |