From 31190562b723b66d81a62f61e0d0ff56d51e6205 Mon Sep 17 00:00:00 2001 From: "John M. Schanck" Date: Wed, 21 Oct 2020 16:37:33 -0400 Subject: [PATCH 1/3] Add AVX2 Falcon --- crypto_sign/falcon-1024/META.yml | 11 +- crypto_sign/falcon-1024/avx2/LICENSE | 24 + crypto_sign/falcon-1024/avx2/Makefile | 24 + crypto_sign/falcon-1024/avx2/api.h | 80 + crypto_sign/falcon-1024/avx2/codec.c | 555 +++ crypto_sign/falcon-1024/avx2/common.c | 294 ++ crypto_sign/falcon-1024/avx2/fft.c | 1109 +++++ crypto_sign/falcon-1024/avx2/fpr.c | 1078 +++++ crypto_sign/falcon-1024/avx2/fpr.h | 349 ++ crypto_sign/falcon-1024/avx2/inner.h | 826 ++++ crypto_sign/falcon-1024/avx2/keygen.c | 4231 +++++++++++++++++ crypto_sign/falcon-1024/avx2/pqclean.c | 386 ++ crypto_sign/falcon-1024/avx2/rng.c | 195 + crypto_sign/falcon-1024/avx2/sign.c | 1312 +++++ crypto_sign/falcon-1024/avx2/vrfy.c | 853 ++++ crypto_sign/falcon-1024/clean/LICENSE | 2 + crypto_sign/falcon-1024/clean/Makefile | 8 +- .../clean/Makefile.Microsoft_nmake | 6 +- crypto_sign/falcon-1024/clean/codec.c | 3 +- crypto_sign/falcon-1024/clean/common.c | 3 +- crypto_sign/falcon-1024/clean/fft.c | 3 +- crypto_sign/falcon-1024/clean/fpr.c | 4 +- crypto_sign/falcon-1024/clean/fpr.h | 5 +- crypto_sign/falcon-1024/clean/inner.h | 11 +- crypto_sign/falcon-1024/clean/keygen.c | 4 +- crypto_sign/falcon-1024/clean/pqclean.c | 35 +- crypto_sign/falcon-1024/clean/rng.c | 43 +- crypto_sign/falcon-1024/clean/sign.c | 10 +- crypto_sign/falcon-1024/clean/vrfy.c | 3 +- crypto_sign/falcon-512/META.yml | 11 +- crypto_sign/falcon-512/avx2/LICENSE | 24 + crypto_sign/falcon-512/avx2/Makefile | 24 + crypto_sign/falcon-512/avx2/api.h | 80 + crypto_sign/falcon-512/avx2/codec.c | 555 +++ crypto_sign/falcon-512/avx2/common.c | 294 ++ crypto_sign/falcon-512/avx2/fft.c | 1109 +++++ crypto_sign/falcon-512/avx2/fpr.c | 1078 +++++ crypto_sign/falcon-512/avx2/fpr.h | 349 ++ crypto_sign/falcon-512/avx2/inner.h | 826 ++++ crypto_sign/falcon-512/avx2/keygen.c | 4231 +++++++++++++++++ crypto_sign/falcon-512/avx2/pqclean.c | 384 ++ crypto_sign/falcon-512/avx2/rng.c | 195 + crypto_sign/falcon-512/avx2/sign.c | 1312 +++++ crypto_sign/falcon-512/avx2/vrfy.c | 853 ++++ crypto_sign/falcon-512/clean/LICENSE | 2 + crypto_sign/falcon-512/clean/Makefile | 8 +- .../falcon-512/clean/Makefile.Microsoft_nmake | 6 +- crypto_sign/falcon-512/clean/codec.c | 3 +- crypto_sign/falcon-512/clean/common.c | 3 +- crypto_sign/falcon-512/clean/fft.c | 3 +- crypto_sign/falcon-512/clean/fpr.c | 4 +- crypto_sign/falcon-512/clean/fpr.h | 5 +- crypto_sign/falcon-512/clean/inner.h | 15 +- crypto_sign/falcon-512/clean/keygen.c | 4 +- crypto_sign/falcon-512/clean/pqclean.c | 25 +- crypto_sign/falcon-512/clean/rng.c | 43 +- crypto_sign/falcon-512/clean/sign.c | 10 +- crypto_sign/falcon-512/clean/vrfy.c | 3 +- .../falcon-1024_avx2.yml | 33 + .../falcon-1024_clean.yml | 32 + .../duplicate_consistency/falcon-512_avx2.yml | 33 + .../falcon-512_clean.yml | 32 + .../duplicate_consistency/falcon1024_avx2.yml | 11 + .../falcon1024_clean.yml | 11 + test/duplicate_consistency/falcon512_avx2.yml | 33 + .../duplicate_consistency/falcon512_clean.yml | 32 + 66 files changed, 23019 insertions(+), 124 deletions(-) create mode 100644 crypto_sign/falcon-1024/avx2/LICENSE create mode 100644 crypto_sign/falcon-1024/avx2/Makefile create mode 100644 crypto_sign/falcon-1024/avx2/api.h create mode 100644 crypto_sign/falcon-1024/avx2/codec.c create mode 100644 crypto_sign/falcon-1024/avx2/common.c create mode 100644 crypto_sign/falcon-1024/avx2/fft.c create mode 100644 crypto_sign/falcon-1024/avx2/fpr.c create mode 100644 crypto_sign/falcon-1024/avx2/fpr.h create mode 100644 crypto_sign/falcon-1024/avx2/inner.h create mode 100644 crypto_sign/falcon-1024/avx2/keygen.c create mode 100644 crypto_sign/falcon-1024/avx2/pqclean.c create mode 100644 crypto_sign/falcon-1024/avx2/rng.c create mode 100644 crypto_sign/falcon-1024/avx2/sign.c create mode 100644 crypto_sign/falcon-1024/avx2/vrfy.c create mode 100644 crypto_sign/falcon-512/avx2/LICENSE create mode 100644 crypto_sign/falcon-512/avx2/Makefile create mode 100644 crypto_sign/falcon-512/avx2/api.h create mode 100644 crypto_sign/falcon-512/avx2/codec.c create mode 100644 crypto_sign/falcon-512/avx2/common.c create mode 100644 crypto_sign/falcon-512/avx2/fft.c create mode 100644 crypto_sign/falcon-512/avx2/fpr.c create mode 100644 crypto_sign/falcon-512/avx2/fpr.h create mode 100644 crypto_sign/falcon-512/avx2/inner.h create mode 100644 crypto_sign/falcon-512/avx2/keygen.c create mode 100644 crypto_sign/falcon-512/avx2/pqclean.c create mode 100644 crypto_sign/falcon-512/avx2/rng.c create mode 100644 crypto_sign/falcon-512/avx2/sign.c create mode 100644 crypto_sign/falcon-512/avx2/vrfy.c create mode 100644 test/duplicate_consistency/falcon-1024_avx2.yml create mode 100644 test/duplicate_consistency/falcon-1024_clean.yml create mode 100644 test/duplicate_consistency/falcon-512_avx2.yml create mode 100644 test/duplicate_consistency/falcon-512_clean.yml create mode 100644 test/duplicate_consistency/falcon1024_avx2.yml create mode 100644 test/duplicate_consistency/falcon1024_clean.yml create mode 100644 test/duplicate_consistency/falcon512_avx2.yml create mode 100644 test/duplicate_consistency/falcon512_clean.yml diff --git a/crypto_sign/falcon-1024/META.yml b/crypto_sign/falcon-1024/META.yml index 26488107..3cfb0b8c 100644 --- a/crypto_sign/falcon-1024/META.yml +++ b/crypto_sign/falcon-1024/META.yml @@ -20,4 +20,13 @@ auxiliary-submitters: - Zhenfei Zhang implementations: - name: clean - version: 20190920 + version: supercop-20201018 via https://github.com/jschanck/package-pqclean/tree/6f6f4227/falcon + - name: avx2 + version: supercop-20201018 via https://github.com/jschanck/package-pqclean/tree/6f6f4227/falcon + supported_platforms: + - architecture: x86_64 + operating_systems: + - Linux + - Darwin + required_flags: + - avx2 diff --git a/crypto_sign/falcon-1024/avx2/LICENSE b/crypto_sign/falcon-1024/avx2/LICENSE new file mode 100644 index 00000000..12c7b56c --- /dev/null +++ b/crypto_sign/falcon-1024/avx2/LICENSE @@ -0,0 +1,24 @@ +\ +MIT License + +Copyright (c) 2017-2019 Falcon Project + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/crypto_sign/falcon-1024/avx2/Makefile b/crypto_sign/falcon-1024/avx2/Makefile new file mode 100644 index 00000000..3ea67daa --- /dev/null +++ b/crypto_sign/falcon-1024/avx2/Makefile @@ -0,0 +1,24 @@ +# This Makefile can be used with GNU Make or BSD Make + +LIB=libfalcon1024_avx2.a + +SOURCES = codec.c common.c fft.c fpr.c keygen.c pqclean.c rng.c sign.c vrfy.c +OBJECTS = codec.o common.o fft.o fpr.o keygen.o pqclean.o rng.o sign.o vrfy.o +HEADERS = api.h fpr.h inner.h + +CFLAGS=-O3 -Wconversion -mavx2 -Wall -Wextra -Wpedantic -Wvla -Werror -Wmissing-prototypes -Wredundant-decls -std=c99 -I../../../common $(EXTRAFLAGS) + +all: $(LIB) + +%.o: %.s $(HEADERS) + $(AS) -o $@ $< + +%.o: %.c $(HEADERS) + $(CC) $(CFLAGS) -c -o $@ $< + +$(LIB): $(OBJECTS) + $(AR) -r $@ $(OBJECTS) + +clean: + $(RM) $(OBJECTS) + $(RM) $(LIB) diff --git a/crypto_sign/falcon-1024/avx2/api.h b/crypto_sign/falcon-1024/avx2/api.h new file mode 100644 index 00000000..b92c7c2c --- /dev/null +++ b/crypto_sign/falcon-1024/avx2/api.h @@ -0,0 +1,80 @@ +#ifndef PQCLEAN_FALCON1024_AVX2_API_H +#define PQCLEAN_FALCON1024_AVX2_API_H + +#include +#include + +#define PQCLEAN_FALCON1024_AVX2_CRYPTO_SECRETKEYBYTES 2305 +#define PQCLEAN_FALCON1024_AVX2_CRYPTO_PUBLICKEYBYTES 1793 +#define PQCLEAN_FALCON1024_AVX2_CRYPTO_BYTES 1330 + +#define PQCLEAN_FALCON1024_AVX2_CRYPTO_ALGNAME "Falcon-1024" + +/* + * Generate a new key pair. Public key goes into pk[], private key in sk[]. + * Key sizes are exact (in bytes): + * public (pk): PQCLEAN_FALCON1024_AVX2_CRYPTO_PUBLICKEYBYTES + * private (sk): PQCLEAN_FALCON1024_AVX2_CRYPTO_SECRETKEYBYTES + * + * Return value: 0 on success, -1 on error. + */ +int PQCLEAN_FALCON1024_AVX2_crypto_sign_keypair( + uint8_t *pk, uint8_t *sk); + +/* + * Compute a signature on a provided message (m, mlen), with a given + * private key (sk). Signature is written in sig[], with length written + * into *siglen. Signature length is variable; maximum signature length + * (in bytes) is PQCLEAN_FALCON1024_AVX2_CRYPTO_BYTES. + * + * sig[], m[] and sk[] may overlap each other arbitrarily. + * + * Return value: 0 on success, -1 on error. + */ +int PQCLEAN_FALCON1024_AVX2_crypto_sign_signature( + uint8_t *sig, size_t *siglen, + const uint8_t *m, size_t mlen, const uint8_t *sk); + +/* + * Verify a signature (sig, siglen) on a message (m, mlen) with a given + * public key (pk). + * + * sig[], m[] and pk[] may overlap each other arbitrarily. + * + * Return value: 0 on success, -1 on error. + */ +int PQCLEAN_FALCON1024_AVX2_crypto_sign_verify( + const uint8_t *sig, size_t siglen, + const uint8_t *m, size_t mlen, const uint8_t *pk); + +/* + * Compute a signature on a message and pack the signature and message + * into a single object, written into sm[]. The length of that output is + * written in *smlen; that length may be larger than the message length + * (mlen) by up to PQCLEAN_FALCON1024_AVX2_CRYPTO_BYTES. + * + * sm[] and m[] may overlap each other arbitrarily; however, sm[] shall + * not overlap with sk[]. + * + * Return value: 0 on success, -1 on error. + */ +int PQCLEAN_FALCON1024_AVX2_crypto_sign( + uint8_t *sm, size_t *smlen, + const uint8_t *m, size_t mlen, const uint8_t *sk); + +/* + * Open a signed message object (sm, smlen) and verify the signature; + * on success, the message itself is written into m[] and its length + * into *mlen. The message is shorter than the signed message object, + * but the size difference depends on the signature value; the difference + * may range up to PQCLEAN_FALCON1024_AVX2_CRYPTO_BYTES. + * + * m[], sm[] and pk[] may overlap each other arbitrarily. + * + * Return value: 0 on success, -1 on error. + */ +int PQCLEAN_FALCON1024_AVX2_crypto_sign_open( + uint8_t *m, size_t *mlen, + const uint8_t *sm, size_t smlen, const uint8_t *pk); + +#endif diff --git a/crypto_sign/falcon-1024/avx2/codec.c b/crypto_sign/falcon-1024/avx2/codec.c new file mode 100644 index 00000000..3f47ed37 --- /dev/null +++ b/crypto_sign/falcon-1024/avx2/codec.c @@ -0,0 +1,555 @@ +#include "inner.h" + +/* + * Encoding/decoding of keys and signatures. + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2017-2019 Falcon Project + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @author Thomas Pornin + */ + + +/* see inner.h */ +size_t +PQCLEAN_FALCON1024_AVX2_modq_encode( + void *out, size_t max_out_len, + const uint16_t *x, unsigned logn) { + size_t n, out_len, u; + uint8_t *buf; + uint32_t acc; + int acc_len; + + n = (size_t)1 << logn; + for (u = 0; u < n; u ++) { + if (x[u] >= 12289) { + return 0; + } + } + out_len = ((n * 14) + 7) >> 3; + if (out == NULL) { + return out_len; + } + if (out_len > max_out_len) { + return 0; + } + buf = out; + acc = 0; + acc_len = 0; + for (u = 0; u < n; u ++) { + acc = (acc << 14) | x[u]; + acc_len += 14; + while (acc_len >= 8) { + acc_len -= 8; + *buf ++ = (uint8_t)(acc >> acc_len); + } + } + if (acc_len > 0) { + *buf = (uint8_t)(acc << (8 - acc_len)); + } + return out_len; +} + +/* see inner.h */ +size_t +PQCLEAN_FALCON1024_AVX2_modq_decode( + uint16_t *x, unsigned logn, + const void *in, size_t max_in_len) { + size_t n, in_len, u; + const uint8_t *buf; + uint32_t acc; + int acc_len; + + n = (size_t)1 << logn; + in_len = ((n * 14) + 7) >> 3; + if (in_len > max_in_len) { + return 0; + } + buf = in; + acc = 0; + acc_len = 0; + u = 0; + while (u < n) { + acc = (acc << 8) | (*buf ++); + acc_len += 8; + if (acc_len >= 14) { + unsigned w; + + acc_len -= 14; + w = (acc >> acc_len) & 0x3FFF; + if (w >= 12289) { + return 0; + } + x[u ++] = (uint16_t)w; + } + } + if ((acc & (((uint32_t)1 << acc_len) - 1)) != 0) { + return 0; + } + return in_len; +} + +/* see inner.h */ +size_t +PQCLEAN_FALCON1024_AVX2_trim_i16_encode( + void *out, size_t max_out_len, + const int16_t *x, unsigned logn, unsigned bits) { + size_t n, u, out_len; + int minv, maxv; + uint8_t *buf; + uint32_t acc, mask; + unsigned acc_len; + + n = (size_t)1 << logn; + maxv = (1 << (bits - 1)) - 1; + minv = -maxv; + for (u = 0; u < n; u ++) { + if (x[u] < minv || x[u] > maxv) { + return 0; + } + } + out_len = ((n * bits) + 7) >> 3; + if (out == NULL) { + return out_len; + } + if (out_len > max_out_len) { + return 0; + } + buf = out; + acc = 0; + acc_len = 0; + mask = ((uint32_t)1 << bits) - 1; + for (u = 0; u < n; u ++) { + acc = (acc << bits) | ((uint16_t)x[u] & mask); + acc_len += bits; + while (acc_len >= 8) { + acc_len -= 8; + *buf ++ = (uint8_t)(acc >> acc_len); + } + } + if (acc_len > 0) { + *buf ++ = (uint8_t)(acc << (8 - acc_len)); + } + return out_len; +} + +/* see inner.h */ +size_t +PQCLEAN_FALCON1024_AVX2_trim_i16_decode( + int16_t *x, unsigned logn, unsigned bits, + const void *in, size_t max_in_len) { + size_t n, in_len; + const uint8_t *buf; + size_t u; + uint32_t acc, mask1, mask2; + unsigned acc_len; + + n = (size_t)1 << logn; + in_len = ((n * bits) + 7) >> 3; + if (in_len > max_in_len) { + return 0; + } + buf = in; + u = 0; + acc = 0; + acc_len = 0; + mask1 = ((uint32_t)1 << bits) - 1; + mask2 = (uint32_t)1 << (bits - 1); + while (u < n) { + acc = (acc << 8) | *buf ++; + acc_len += 8; + while (acc_len >= bits && u < n) { + uint32_t w; + + acc_len -= bits; + w = (acc >> acc_len) & mask1; + w |= -(w & mask2); + if (w == -mask2) { + /* + * The -2^(bits-1) value is forbidden. + */ + return 0; + } + w |= -(w & mask2); + x[u ++] = (int16_t) * (int32_t *)&w; + } + } + if ((acc & (((uint32_t)1 << acc_len) - 1)) != 0) { + /* + * Extra bits in the last byte must be zero. + */ + return 0; + } + return in_len; +} + +/* see inner.h */ +size_t +PQCLEAN_FALCON1024_AVX2_trim_i8_encode( + void *out, size_t max_out_len, + const int8_t *x, unsigned logn, unsigned bits) { + size_t n, u, out_len; + int minv, maxv; + uint8_t *buf; + uint32_t acc, mask; + unsigned acc_len; + + n = (size_t)1 << logn; + maxv = (1 << (bits - 1)) - 1; + minv = -maxv; + for (u = 0; u < n; u ++) { + if (x[u] < minv || x[u] > maxv) { + return 0; + } + } + out_len = ((n * bits) + 7) >> 3; + if (out == NULL) { + return out_len; + } + if (out_len > max_out_len) { + return 0; + } + buf = out; + acc = 0; + acc_len = 0; + mask = ((uint32_t)1 << bits) - 1; + for (u = 0; u < n; u ++) { + acc = (acc << bits) | ((uint8_t)x[u] & mask); + acc_len += bits; + while (acc_len >= 8) { + acc_len -= 8; + *buf ++ = (uint8_t)(acc >> acc_len); + } + } + if (acc_len > 0) { + *buf ++ = (uint8_t)(acc << (8 - acc_len)); + } + return out_len; +} + +/* see inner.h */ +size_t +PQCLEAN_FALCON1024_AVX2_trim_i8_decode( + int8_t *x, unsigned logn, unsigned bits, + const void *in, size_t max_in_len) { + size_t n, in_len; + const uint8_t *buf; + size_t u; + uint32_t acc, mask1, mask2; + unsigned acc_len; + + n = (size_t)1 << logn; + in_len = ((n * bits) + 7) >> 3; + if (in_len > max_in_len) { + return 0; + } + buf = in; + u = 0; + acc = 0; + acc_len = 0; + mask1 = ((uint32_t)1 << bits) - 1; + mask2 = (uint32_t)1 << (bits - 1); + while (u < n) { + acc = (acc << 8) | *buf ++; + acc_len += 8; + while (acc_len >= bits && u < n) { + uint32_t w; + + acc_len -= bits; + w = (acc >> acc_len) & mask1; + w |= -(w & mask2); + if (w == -mask2) { + /* + * The -2^(bits-1) value is forbidden. + */ + return 0; + } + x[u ++] = (int8_t) * (int32_t *)&w; + } + } + if ((acc & (((uint32_t)1 << acc_len) - 1)) != 0) { + /* + * Extra bits in the last byte must be zero. + */ + return 0; + } + return in_len; +} + +/* see inner.h */ +size_t +PQCLEAN_FALCON1024_AVX2_comp_encode( + void *out, size_t max_out_len, + const int16_t *x, unsigned logn) { + uint8_t *buf; + size_t n, u, v; + uint32_t acc; + unsigned acc_len; + + n = (size_t)1 << logn; + buf = out; + + /* + * Make sure that all values are within the -2047..+2047 range. + */ + for (u = 0; u < n; u ++) { + if (x[u] < -2047 || x[u] > +2047) { + return 0; + } + } + + acc = 0; + acc_len = 0; + v = 0; + for (u = 0; u < n; u ++) { + int t; + unsigned w; + + /* + * Get sign and absolute value of next integer; push the + * sign bit. + */ + acc <<= 1; + t = x[u]; + if (t < 0) { + t = -t; + acc |= 1; + } + w = (unsigned)t; + + /* + * Push the low 7 bits of the absolute value. + */ + acc <<= 7; + acc |= w & 127u; + w >>= 7; + + /* + * We pushed exactly 8 bits. + */ + acc_len += 8; + + /* + * Push as many zeros as necessary, then a one. Since the + * absolute value is at most 2047, w can only range up to + * 15 at this point, thus we will add at most 16 bits + * here. With the 8 bits above and possibly up to 7 bits + * from previous iterations, we may go up to 31 bits, which + * will fit in the accumulator, which is an uint32_t. + */ + acc <<= (w + 1); + acc |= 1; + acc_len += w + 1; + + /* + * Produce all full bytes. + */ + while (acc_len >= 8) { + acc_len -= 8; + if (buf != NULL) { + if (v >= max_out_len) { + return 0; + } + buf[v] = (uint8_t)(acc >> acc_len); + } + v ++; + } + } + + /* + * Flush remaining bits (if any). + */ + if (acc_len > 0) { + if (buf != NULL) { + if (v >= max_out_len) { + return 0; + } + buf[v] = (uint8_t)(acc << (8 - acc_len)); + } + v ++; + } + + return v; +} + +/* see inner.h */ +size_t +PQCLEAN_FALCON1024_AVX2_comp_decode( + int16_t *x, unsigned logn, + const void *in, size_t max_in_len) { + const uint8_t *buf; + size_t n, u, v; + uint32_t acc; + unsigned acc_len; + + n = (size_t)1 << logn; + buf = in; + acc = 0; + acc_len = 0; + v = 0; + for (u = 0; u < n; u ++) { + unsigned b, s, m; + + /* + * Get next eight bits: sign and low seven bits of the + * absolute value. + */ + if (v >= max_in_len) { + return 0; + } + acc = (acc << 8) | (uint32_t)buf[v ++]; + b = acc >> acc_len; + s = b & 128; + m = b & 127; + + /* + * Get next bits until a 1 is reached. + */ + for (;;) { + if (acc_len == 0) { + if (v >= max_in_len) { + return 0; + } + acc = (acc << 8) | (uint32_t)buf[v ++]; + acc_len = 8; + } + acc_len --; + if (((acc >> acc_len) & 1) != 0) { + break; + } + m += 128; + if (m > 2047) { + return 0; + } + } + x[u] = (int16_t) m; + if (s) { + x[u] = (int16_t) - x[u]; + } + } + return v; +} + +/* + * Key elements and signatures are polynomials with small integer + * coefficients. Here are some statistics gathered over many + * generated key pairs (10000 or more for each degree): + * + * log(n) n max(f,g) std(f,g) max(F,G) std(F,G) + * 1 2 129 56.31 143 60.02 + * 2 4 123 40.93 160 46.52 + * 3 8 97 28.97 159 38.01 + * 4 16 100 21.48 154 32.50 + * 5 32 71 15.41 151 29.36 + * 6 64 59 11.07 138 27.77 + * 7 128 39 7.91 144 27.00 + * 8 256 32 5.63 148 26.61 + * 9 512 22 4.00 137 26.46 + * 10 1024 15 2.84 146 26.41 + * + * We want a compact storage format for private key, and, as part of + * key generation, we are allowed to reject some keys which would + * otherwise be fine (this does not induce any noticeable vulnerability + * as long as we reject only a small proportion of possible keys). + * Hence, we enforce at key generation time maximum values for the + * elements of f, g, F and G, so that their encoding can be expressed + * in fixed-width values. Limits have been chosen so that generated + * keys are almost always within bounds, thus not impacting neither + * security or performance. + * + * IMPORTANT: the code assumes that all coefficients of f, g, F and G + * ultimately fit in the -127..+127 range. Thus, none of the elements + * of max_fg_bits[] and max_FG_bits[] shall be greater than 8. + */ + +const uint8_t PQCLEAN_FALCON1024_AVX2_max_fg_bits[] = { + 0, /* unused */ + 8, + 8, + 8, + 8, + 8, + 7, + 7, + 6, + 6, + 5 +}; + +const uint8_t PQCLEAN_FALCON1024_AVX2_max_FG_bits[] = { + 0, /* unused */ + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8 +}; + +/* + * When generating a new key pair, we can always reject keys which + * feature an abnormally large coefficient. This can also be done for + * signatures, albeit with some care: in case the signature process is + * used in a derandomized setup (explicitly seeded with the message and + * private key), we have to follow the specification faithfully, and the + * specification only enforces a limit on the L2 norm of the signature + * vector. The limit on the L2 norm implies that the absolute value of + * a coefficient of the signature cannot be more than the following: + * + * log(n) n max sig coeff (theoretical) + * 1 2 412 + * 2 4 583 + * 3 8 824 + * 4 16 1166 + * 5 32 1649 + * 6 64 2332 + * 7 128 3299 + * 8 256 4665 + * 9 512 6598 + * 10 1024 9331 + * + * However, the largest observed signature coefficients during our + * experiments was 1077 (in absolute value), hence we can assume that, + * with overwhelming probability, signature coefficients will fit + * in -2047..2047, i.e. 12 bits. + */ + +const uint8_t PQCLEAN_FALCON1024_AVX2_max_sig_bits[] = { + 0, /* unused */ + 10, + 11, + 11, + 12, + 12, + 12, + 12, + 12, + 12, + 12 +}; diff --git a/crypto_sign/falcon-1024/avx2/common.c b/crypto_sign/falcon-1024/avx2/common.c new file mode 100644 index 00000000..c8a9066f --- /dev/null +++ b/crypto_sign/falcon-1024/avx2/common.c @@ -0,0 +1,294 @@ +#include "inner.h" + +/* + * Support functions for signatures (hash-to-point, norm). + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2017-2019 Falcon Project + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @author Thomas Pornin + */ + + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_hash_to_point_vartime( + inner_shake256_context *sc, + uint16_t *x, unsigned logn) { + /* + * This is the straightforward per-the-spec implementation. It + * is not constant-time, thus it might reveal information on the + * plaintext (at least, enough to check the plaintext against a + * list of potential plaintexts) in a scenario where the + * attacker does not have access to the signature value or to + * the public key, but knows the nonce (without knowledge of the + * nonce, the hashed output cannot be matched against potential + * plaintexts). + */ + size_t n; + + n = (size_t)1 << logn; + while (n > 0) { + uint8_t buf[2]; + uint32_t w; + + inner_shake256_extract(sc, (void *)buf, sizeof buf); + w = ((unsigned)buf[0] << 8) | (unsigned)buf[1]; + if (w < 61445) { + while (w >= 12289) { + w -= 12289; + } + *x ++ = (uint16_t)w; + n --; + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_hash_to_point_ct( + inner_shake256_context *sc, + uint16_t *x, unsigned logn, uint8_t *tmp) { + /* + * Each 16-bit sample is a value in 0..65535. The value is + * kept if it falls in 0..61444 (because 61445 = 5*12289) + * and rejected otherwise; thus, each sample has probability + * about 0.93758 of being selected. + * + * We want to oversample enough to be sure that we will + * have enough values with probability at least 1 - 2^(-256). + * Depending on degree N, this leads to the following + * required oversampling: + * + * logn n oversampling + * 1 2 65 + * 2 4 67 + * 3 8 71 + * 4 16 77 + * 5 32 86 + * 6 64 100 + * 7 128 122 + * 8 256 154 + * 9 512 205 + * 10 1024 287 + * + * If logn >= 7, then the provided temporary buffer is large + * enough. Otherwise, we use a stack buffer of 63 entries + * (i.e. 126 bytes) for the values that do not fit in tmp[]. + */ + + static const uint16_t overtab[] = { + 0, /* unused */ + 65, + 67, + 71, + 77, + 86, + 100, + 122, + 154, + 205, + 287 + }; + + unsigned n, n2, u, m, p, over; + uint16_t *tt1, tt2[63]; + + /* + * We first generate m 16-bit value. Values 0..n-1 go to x[]. + * Values n..2*n-1 go to tt1[]. Values 2*n and later go to tt2[]. + * We also reduce modulo q the values; rejected values are set + * to 0xFFFF. + */ + n = 1U << logn; + n2 = n << 1; + over = overtab[logn]; + m = n + over; + tt1 = (uint16_t *)tmp; + for (u = 0; u < m; u ++) { + uint8_t buf[2]; + uint32_t w, wr; + + inner_shake256_extract(sc, buf, sizeof buf); + w = ((uint32_t)buf[0] << 8) | (uint32_t)buf[1]; + wr = w - ((uint32_t)24578 & (((w - 24578) >> 31) - 1)); + wr = wr - ((uint32_t)24578 & (((wr - 24578) >> 31) - 1)); + wr = wr - ((uint32_t)12289 & (((wr - 12289) >> 31) - 1)); + wr |= ((w - 61445) >> 31) - 1; + if (u < n) { + x[u] = (uint16_t)wr; + } else if (u < n2) { + tt1[u - n] = (uint16_t)wr; + } else { + tt2[u - n2] = (uint16_t)wr; + } + } + + /* + * Now we must "squeeze out" the invalid values. We do this in + * a logarithmic sequence of passes; each pass computes where a + * value should go, and moves it down by 'p' slots if necessary, + * where 'p' uses an increasing powers-of-two scale. It can be + * shown that in all cases where the loop decides that a value + * has to be moved down by p slots, the destination slot is + * "free" (i.e. contains an invalid value). + */ + for (p = 1; p <= over; p <<= 1) { + unsigned v; + + /* + * In the loop below: + * + * - v contains the index of the final destination of + * the value; it is recomputed dynamically based on + * whether values are valid or not. + * + * - u is the index of the value we consider ("source"); + * its address is s. + * + * - The loop may swap the value with the one at index + * u-p. The address of the swap destination is d. + */ + v = 0; + for (u = 0; u < m; u ++) { + uint16_t *s, *d; + unsigned j, sv, dv, mk; + + if (u < n) { + s = &x[u]; + } else if (u < n2) { + s = &tt1[u - n]; + } else { + s = &tt2[u - n2]; + } + sv = *s; + + /* + * The value in sv should ultimately go to + * address v, i.e. jump back by u-v slots. + */ + j = u - v; + + /* + * We increment v for the next iteration, but + * only if the source value is valid. The mask + * 'mk' is -1 if the value is valid, 0 otherwise, + * so we _subtract_ mk. + */ + mk = (sv >> 15) - 1U; + v -= mk; + + /* + * In this loop we consider jumps by p slots; if + * u < p then there is nothing more to do. + */ + if (u < p) { + continue; + } + + /* + * Destination for the swap: value at address u-p. + */ + if ((u - p) < n) { + d = &x[u - p]; + } else if ((u - p) < n2) { + d = &tt1[(u - p) - n]; + } else { + d = &tt2[(u - p) - n2]; + } + dv = *d; + + /* + * The swap should be performed only if the source + * is valid AND the jump j has its 'p' bit set. + */ + mk &= -(((j & p) + 0x1FF) >> 9); + + *s = (uint16_t)(sv ^ (mk & (sv ^ dv))); + *d = (uint16_t)(dv ^ (mk & (sv ^ dv))); + } + } +} + +/* see inner.h */ +int +PQCLEAN_FALCON1024_AVX2_is_short( + const int16_t *s1, const int16_t *s2, unsigned logn) { + /* + * We use the l2-norm. Code below uses only 32-bit operations to + * compute the square of the norm with saturation to 2^32-1 if + * the value exceeds 2^31-1. + */ + size_t n, u; + uint32_t s, ng; + + n = (size_t)1 << logn; + s = 0; + ng = 0; + for (u = 0; u < n; u ++) { + int32_t z; + + z = s1[u]; + s += (uint32_t)(z * z); + ng |= s; + z = s2[u]; + s += (uint32_t)(z * z); + ng |= s; + } + s |= -(ng >> 31); + + /* + * Acceptance bound on the l2-norm is: + * 1.2*1.55*sqrt(q)*sqrt(2*N) + * Value 7085 is floor((1.2^2)*(1.55^2)*2*1024). + */ + return s < (((uint32_t)7085 * (uint32_t)12289) >> (10 - logn)); +} + +/* see inner.h */ +int +PQCLEAN_FALCON1024_AVX2_is_short_half( + uint32_t sqn, const int16_t *s2, unsigned logn) { + size_t n, u; + uint32_t ng; + + n = (size_t)1 << logn; + ng = -(sqn >> 31); + for (u = 0; u < n; u ++) { + int32_t z; + + z = s2[u]; + sqn += (uint32_t)(z * z); + ng |= sqn; + } + sqn |= -(ng >> 31); + + /* + * Acceptance bound on the l2-norm is: + * 1.2*1.55*sqrt(q)*sqrt(2*N) + * Value 7085 is floor((1.2^2)*(1.55^2)*2*1024). + */ + return sqn < (((uint32_t)7085 * (uint32_t)12289) >> (10 - logn)); +} diff --git a/crypto_sign/falcon-1024/avx2/fft.c b/crypto_sign/falcon-1024/avx2/fft.c new file mode 100644 index 00000000..1398f99b --- /dev/null +++ b/crypto_sign/falcon-1024/avx2/fft.c @@ -0,0 +1,1109 @@ +#include "inner.h" + +/* + * FFT code. + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2017-2019 Falcon Project + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @author Thomas Pornin + */ + + +/* + * Rules for complex number macros: + * -------------------------------- + * + * Operand order is: destination, source1, source2... + * + * Each operand is a real and an imaginary part. + * + * All overlaps are allowed. + */ + +/* + * Addition of two complex numbers (d = a + b). + */ +#define FPC_ADD(d_re, d_im, a_re, a_im, b_re, b_im) do { \ + fpr fpct_re, fpct_im; \ + fpct_re = fpr_add(a_re, b_re); \ + fpct_im = fpr_add(a_im, b_im); \ + (d_re) = fpct_re; \ + (d_im) = fpct_im; \ + } while (0) + +/* + * Subtraction of two complex numbers (d = a - b). + */ +#define FPC_SUB(d_re, d_im, a_re, a_im, b_re, b_im) do { \ + fpr fpct_re, fpct_im; \ + fpct_re = fpr_sub(a_re, b_re); \ + fpct_im = fpr_sub(a_im, b_im); \ + (d_re) = fpct_re; \ + (d_im) = fpct_im; \ + } while (0) + +/* + * Multplication of two complex numbers (d = a * b). + */ +#define FPC_MUL(d_re, d_im, a_re, a_im, b_re, b_im) do { \ + fpr fpct_a_re, fpct_a_im; \ + fpr fpct_b_re, fpct_b_im; \ + fpr fpct_d_re, fpct_d_im; \ + fpct_a_re = (a_re); \ + fpct_a_im = (a_im); \ + fpct_b_re = (b_re); \ + fpct_b_im = (b_im); \ + fpct_d_re = fpr_sub( \ + fpr_mul(fpct_a_re, fpct_b_re), \ + fpr_mul(fpct_a_im, fpct_b_im)); \ + fpct_d_im = fpr_add( \ + fpr_mul(fpct_a_re, fpct_b_im), \ + fpr_mul(fpct_a_im, fpct_b_re)); \ + (d_re) = fpct_d_re; \ + (d_im) = fpct_d_im; \ + } while (0) + +/* + * Squaring of a complex number (d = a * a). + */ +#define FPC_SQR(d_re, d_im, a_re, a_im) do { \ + fpr fpct_a_re, fpct_a_im; \ + fpr fpct_d_re, fpct_d_im; \ + fpct_a_re = (a_re); \ + fpct_a_im = (a_im); \ + fpct_d_re = fpr_sub(fpr_sqr(fpct_a_re), fpr_sqr(fpct_a_im)); \ + fpct_d_im = fpr_double(fpr_mul(fpct_a_re, fpct_a_im)); \ + (d_re) = fpct_d_re; \ + (d_im) = fpct_d_im; \ + } while (0) + +/* + * Inversion of a complex number (d = 1 / a). + */ +#define FPC_INV(d_re, d_im, a_re, a_im) do { \ + fpr fpct_a_re, fpct_a_im; \ + fpr fpct_d_re, fpct_d_im; \ + fpr fpct_m; \ + fpct_a_re = (a_re); \ + fpct_a_im = (a_im); \ + fpct_m = fpr_add(fpr_sqr(fpct_a_re), fpr_sqr(fpct_a_im)); \ + fpct_m = fpr_inv(fpct_m); \ + fpct_d_re = fpr_mul(fpct_a_re, fpct_m); \ + fpct_d_im = fpr_mul(fpr_neg(fpct_a_im), fpct_m); \ + (d_re) = fpct_d_re; \ + (d_im) = fpct_d_im; \ + } while (0) + +/* + * Division of complex numbers (d = a / b). + */ +#define FPC_DIV(d_re, d_im, a_re, a_im, b_re, b_im) do { \ + fpr fpct_a_re, fpct_a_im; \ + fpr fpct_b_re, fpct_b_im; \ + fpr fpct_d_re, fpct_d_im; \ + fpr fpct_m; \ + fpct_a_re = (a_re); \ + fpct_a_im = (a_im); \ + fpct_b_re = (b_re); \ + fpct_b_im = (b_im); \ + fpct_m = fpr_add(fpr_sqr(fpct_b_re), fpr_sqr(fpct_b_im)); \ + fpct_m = fpr_inv(fpct_m); \ + fpct_b_re = fpr_mul(fpct_b_re, fpct_m); \ + fpct_b_im = fpr_mul(fpr_neg(fpct_b_im), fpct_m); \ + fpct_d_re = fpr_sub( \ + fpr_mul(fpct_a_re, fpct_b_re), \ + fpr_mul(fpct_a_im, fpct_b_im)); \ + fpct_d_im = fpr_add( \ + fpr_mul(fpct_a_re, fpct_b_im), \ + fpr_mul(fpct_a_im, fpct_b_re)); \ + (d_re) = fpct_d_re; \ + (d_im) = fpct_d_im; \ + } while (0) + +/* + * Let w = exp(i*pi/N); w is a primitive 2N-th root of 1. We define the + * values w_j = w^(2j+1) for all j from 0 to N-1: these are the roots + * of X^N+1 in the field of complex numbers. A crucial property is that + * w_{N-1-j} = conj(w_j) = 1/w_j for all j. + * + * FFT representation of a polynomial f (taken modulo X^N+1) is the + * set of values f(w_j). Since f is real, conj(f(w_j)) = f(conj(w_j)), + * thus f(w_{N-1-j}) = conj(f(w_j)). We thus store only half the values, + * for j = 0 to N/2-1; the other half can be recomputed easily when (if) + * needed. A consequence is that FFT representation has the same size + * as normal representation: N/2 complex numbers use N real numbers (each + * complex number is the combination of a real and an imaginary part). + * + * We use a specific ordering which makes computations easier. Let rev() + * be the bit-reversal function over log(N) bits. For j in 0..N/2-1, we + * store the real and imaginary parts of f(w_j) in slots: + * + * Re(f(w_j)) -> slot rev(j)/2 + * Im(f(w_j)) -> slot rev(j)/2+N/2 + * + * (Note that rev(j) is even for j < N/2.) + */ + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_FFT(fpr *f, unsigned logn) { + /* + * FFT algorithm in bit-reversal order uses the following + * iterative algorithm: + * + * t = N + * for m = 1; m < N; m *= 2: + * ht = t/2 + * for i1 = 0; i1 < m; i1 ++: + * j1 = i1 * t + * s = GM[m + i1] + * for j = j1; j < (j1 + ht); j ++: + * x = f[j] + * y = s * f[j + ht] + * f[j] = x + y + * f[j + ht] = x - y + * t = ht + * + * GM[k] contains w^rev(k) for primitive root w = exp(i*pi/N). + * + * In the description above, f[] is supposed to contain complex + * numbers. In our in-memory representation, the real and + * imaginary parts of f[k] are in array slots k and k+N/2. + * + * We only keep the first half of the complex numbers. We can + * see that after the first iteration, the first and second halves + * of the array of complex numbers have separate lives, so we + * simply ignore the second part. + */ + + unsigned u; + size_t t, n, hn, m; + + /* + * First iteration: compute f[j] + i * f[j+N/2] for all j < N/2 + * (because GM[1] = w^rev(1) = w^(N/2) = i). + * In our chosen representation, this is a no-op: everything is + * already where it should be. + */ + + /* + * Subsequent iterations are truncated to use only the first + * half of values. + */ + n = (size_t)1 << logn; + hn = n >> 1; + t = hn; + for (u = 1, m = 2; u < logn; u ++, m <<= 1) { + size_t ht, hm, i1, j1; + + ht = t >> 1; + hm = m >> 1; + for (i1 = 0, j1 = 0; i1 < hm; i1 ++, j1 += t) { + size_t j, j2; + + j2 = j1 + ht; + if (ht >= 4) { + __m256d s_re, s_im; + + s_re = _mm256_set1_pd( + fpr_gm_tab[((m + i1) << 1) + 0].v); + s_im = _mm256_set1_pd( + fpr_gm_tab[((m + i1) << 1) + 1].v); + for (j = j1; j < j2; j += 4) { + __m256d x_re, x_im, y_re, y_im; + __m256d z_re, z_im; + + x_re = _mm256_loadu_pd(&f[j].v); + x_im = _mm256_loadu_pd(&f[j + hn].v); + z_re = _mm256_loadu_pd(&f[j + ht].v); + z_im = _mm256_loadu_pd(&f[j + ht + hn].v); + y_re = FMSUB(z_re, s_re, + _mm256_mul_pd(z_im, s_im)); + y_im = FMADD(z_re, s_im, + _mm256_mul_pd(z_im, s_re)); + _mm256_storeu_pd(&f[j].v, + _mm256_add_pd(x_re, y_re)); + _mm256_storeu_pd(&f[j + hn].v, + _mm256_add_pd(x_im, y_im)); + _mm256_storeu_pd(&f[j + ht].v, + _mm256_sub_pd(x_re, y_re)); + _mm256_storeu_pd(&f[j + ht + hn].v, + _mm256_sub_pd(x_im, y_im)); + } + } else { + fpr s_re, s_im; + + s_re = fpr_gm_tab[((m + i1) << 1) + 0]; + s_im = fpr_gm_tab[((m + i1) << 1) + 1]; + for (j = j1; j < j2; j ++) { + fpr x_re, x_im, y_re, y_im; + + x_re = f[j]; + x_im = f[j + hn]; + y_re = f[j + ht]; + y_im = f[j + ht + hn]; + FPC_MUL(y_re, y_im, + y_re, y_im, s_re, s_im); + FPC_ADD(f[j], f[j + hn], + x_re, x_im, y_re, y_im); + FPC_SUB(f[j + ht], f[j + ht + hn], + x_re, x_im, y_re, y_im); + } + } + } + t = ht; + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_iFFT(fpr *f, unsigned logn) { + /* + * Inverse FFT algorithm in bit-reversal order uses the following + * iterative algorithm: + * + * t = 1 + * for m = N; m > 1; m /= 2: + * hm = m/2 + * dt = t*2 + * for i1 = 0; i1 < hm; i1 ++: + * j1 = i1 * dt + * s = iGM[hm + i1] + * for j = j1; j < (j1 + t); j ++: + * x = f[j] + * y = f[j + t] + * f[j] = x + y + * f[j + t] = s * (x - y) + * t = dt + * for i1 = 0; i1 < N; i1 ++: + * f[i1] = f[i1] / N + * + * iGM[k] contains (1/w)^rev(k) for primitive root w = exp(i*pi/N) + * (actually, iGM[k] = 1/GM[k] = conj(GM[k])). + * + * In the main loop (not counting the final division loop), in + * all iterations except the last, the first and second half of f[] + * (as an array of complex numbers) are separate. In our chosen + * representation, we do not keep the second half. + * + * The last iteration recombines the recomputed half with the + * implicit half, and should yield only real numbers since the + * target polynomial is real; moreover, s = i at that step. + * Thus, when considering x and y: + * y = conj(x) since the final f[j] must be real + * Therefore, f[j] is filled with 2*Re(x), and f[j + t] is + * filled with 2*Im(x). + * But we already have Re(x) and Im(x) in array slots j and j+t + * in our chosen representation. That last iteration is thus a + * simple doubling of the values in all the array. + * + * We make the last iteration a no-op by tweaking the final + * division into a division by N/2, not N. + */ + size_t u, n, hn, t, m; + + n = (size_t)1 << logn; + t = 1; + m = n; + hn = n >> 1; + for (u = logn; u > 1; u --) { + size_t hm, dt, i1, j1; + + hm = m >> 1; + dt = t << 1; + for (i1 = 0, j1 = 0; j1 < hn; i1 ++, j1 += dt) { + size_t j, j2; + + j2 = j1 + t; + if (t >= 4) { + __m256d s_re, s_im; + + s_re = _mm256_set1_pd( + fpr_gm_tab[((hm + i1) << 1) + 0].v); + s_im = _mm256_set1_pd( + fpr_gm_tab[((hm + i1) << 1) + 1].v); + for (j = j1; j < j2; j += 4) { + __m256d x_re, x_im, y_re, y_im; + __m256d z_re, z_im; + + x_re = _mm256_loadu_pd(&f[j].v); + x_im = _mm256_loadu_pd(&f[j + hn].v); + y_re = _mm256_loadu_pd(&f[j + t].v); + y_im = _mm256_loadu_pd(&f[j + t + hn].v); + _mm256_storeu_pd(&f[j].v, + _mm256_add_pd(x_re, y_re)); + _mm256_storeu_pd(&f[j + hn].v, + _mm256_add_pd(x_im, y_im)); + x_re = _mm256_sub_pd(y_re, x_re); + x_im = _mm256_sub_pd(x_im, y_im); + z_re = FMSUB(x_im, s_im, + _mm256_mul_pd(x_re, s_re)); + z_im = FMADD(x_re, s_im, + _mm256_mul_pd(x_im, s_re)); + _mm256_storeu_pd(&f[j + t].v, z_re); + _mm256_storeu_pd(&f[j + t + hn].v, z_im); + } + } else { + fpr s_re, s_im; + + s_re = fpr_gm_tab[((hm + i1) << 1) + 0]; + s_im = fpr_neg(fpr_gm_tab[((hm + i1) << 1) + 1]); + for (j = j1; j < j2; j ++) { + fpr x_re, x_im, y_re, y_im; + + x_re = f[j]; + x_im = f[j + hn]; + y_re = f[j + t]; + y_im = f[j + t + hn]; + FPC_ADD(f[j], f[j + hn], + x_re, x_im, y_re, y_im); + FPC_SUB(x_re, x_im, + x_re, x_im, y_re, y_im); + FPC_MUL(f[j + t], f[j + t + hn], + x_re, x_im, s_re, s_im); + } + } + } + t = dt; + m = hm; + } + + /* + * Last iteration is a no-op, provided that we divide by N/2 + * instead of N. We need to make a special case for logn = 0. + */ + if (logn > 0) { + fpr ni; + + ni = fpr_p2_tab[logn]; + for (u = 0; u < n; u ++) { + f[u] = fpr_mul(f[u], ni); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_poly_add( + fpr *a, const fpr *b, unsigned logn) { + size_t n, u; + + n = (size_t)1 << logn; + if (n >= 4) { + for (u = 0; u < n; u += 4) { + _mm256_storeu_pd(&a[u].v, + _mm256_add_pd( + _mm256_loadu_pd(&a[u].v), + _mm256_loadu_pd(&b[u].v))); + } + } else { + for (u = 0; u < n; u ++) { + a[u] = fpr_add(a[u], b[u]); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_poly_sub( + fpr *a, const fpr *b, unsigned logn) { + size_t n, u; + + n = (size_t)1 << logn; + if (n >= 4) { + for (u = 0; u < n; u += 4) { + _mm256_storeu_pd(&a[u].v, + _mm256_sub_pd( + _mm256_loadu_pd(&a[u].v), + _mm256_loadu_pd(&b[u].v))); + } + } else { + for (u = 0; u < n; u ++) { + a[u] = fpr_sub(a[u], b[u]); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_poly_neg(fpr *a, unsigned logn) { + size_t n, u; + + n = (size_t)1 << logn; + if (n >= 4) { + __m256d s; + + s = _mm256_set1_pd(-0.0); + for (u = 0; u < n; u += 4) { + _mm256_storeu_pd(&a[u].v, + _mm256_xor_pd(_mm256_loadu_pd(&a[u].v), s)); + } + } else { + for (u = 0; u < n; u ++) { + a[u] = fpr_neg(a[u]); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_poly_adj_fft(fpr *a, unsigned logn) { + size_t n, u; + + n = (size_t)1 << logn; + if (n >= 8) { + __m256d s; + + s = _mm256_set1_pd(-0.0); + for (u = (n >> 1); u < n; u += 4) { + _mm256_storeu_pd(&a[u].v, + _mm256_xor_pd(_mm256_loadu_pd(&a[u].v), s)); + } + } else { + for (u = (n >> 1); u < n; u ++) { + a[u] = fpr_neg(a[u]); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_poly_mul_fft( + fpr *a, const fpr *b, unsigned logn) { + size_t n, hn, u; + + n = (size_t)1 << logn; + hn = n >> 1; + if (n >= 8) { + for (u = 0; u < hn; u += 4) { + __m256d a_re, a_im, b_re, b_im, c_re, c_im; + + a_re = _mm256_loadu_pd(&a[u].v); + a_im = _mm256_loadu_pd(&a[u + hn].v); + b_re = _mm256_loadu_pd(&b[u].v); + b_im = _mm256_loadu_pd(&b[u + hn].v); + c_re = FMSUB( + a_re, b_re, _mm256_mul_pd(a_im, b_im)); + c_im = FMADD( + a_re, b_im, _mm256_mul_pd(a_im, b_re)); + _mm256_storeu_pd(&a[u].v, c_re); + _mm256_storeu_pd(&a[u + hn].v, c_im); + } + } else { + for (u = 0; u < hn; u ++) { + fpr a_re, a_im, b_re, b_im; + + a_re = a[u]; + a_im = a[u + hn]; + b_re = b[u]; + b_im = b[u + hn]; + FPC_MUL(a[u], a[u + hn], a_re, a_im, b_re, b_im); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_poly_muladj_fft( + fpr *a, const fpr *b, unsigned logn) { + size_t n, hn, u; + + n = (size_t)1 << logn; + hn = n >> 1; + if (n >= 8) { + for (u = 0; u < hn; u += 4) { + __m256d a_re, a_im, b_re, b_im, c_re, c_im; + + a_re = _mm256_loadu_pd(&a[u].v); + a_im = _mm256_loadu_pd(&a[u + hn].v); + b_re = _mm256_loadu_pd(&b[u].v); + b_im = _mm256_loadu_pd(&b[u + hn].v); + c_re = FMADD( + a_re, b_re, _mm256_mul_pd(a_im, b_im)); + c_im = FMSUB( + a_im, b_re, _mm256_mul_pd(a_re, b_im)); + _mm256_storeu_pd(&a[u].v, c_re); + _mm256_storeu_pd(&a[u + hn].v, c_im); + } + } else { + for (u = 0; u < hn; u ++) { + fpr a_re, a_im, b_re, b_im; + + a_re = a[u]; + a_im = a[u + hn]; + b_re = b[u]; + b_im = fpr_neg(b[u + hn]); + FPC_MUL(a[u], a[u + hn], a_re, a_im, b_re, b_im); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_poly_mulselfadj_fft(fpr *a, unsigned logn) { + /* + * Since each coefficient is multiplied with its own conjugate, + * the result contains only real values. + */ + size_t n, hn, u; + + n = (size_t)1 << logn; + hn = n >> 1; + if (n >= 8) { + __m256d zero; + + zero = _mm256_setzero_pd(); + for (u = 0; u < hn; u += 4) { + __m256d a_re, a_im; + + a_re = _mm256_loadu_pd(&a[u].v); + a_im = _mm256_loadu_pd(&a[u + hn].v); + _mm256_storeu_pd(&a[u].v, + FMADD(a_re, a_re, + _mm256_mul_pd(a_im, a_im))); + _mm256_storeu_pd(&a[u + hn].v, zero); + } + } else { + for (u = 0; u < hn; u ++) { + fpr a_re, a_im; + + a_re = a[u]; + a_im = a[u + hn]; + a[u] = fpr_add(fpr_sqr(a_re), fpr_sqr(a_im)); + a[u + hn] = fpr_zero; + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_poly_mulconst(fpr *a, fpr x, unsigned logn) { + size_t n, u; + + n = (size_t)1 << logn; + if (n >= 4) { + __m256d x4; + + x4 = _mm256_set1_pd(x.v); + for (u = 0; u < n; u += 4) { + _mm256_storeu_pd(&a[u].v, + _mm256_mul_pd(x4, _mm256_loadu_pd(&a[u].v))); + } + } else { + for (u = 0; u < n; u ++) { + a[u] = fpr_mul(a[u], x); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_poly_div_fft( + fpr *a, const fpr *b, unsigned logn) { + size_t n, hn, u; + + n = (size_t)1 << logn; + hn = n >> 1; + if (n >= 8) { + __m256d one; + + one = _mm256_set1_pd(1.0); + for (u = 0; u < hn; u += 4) { + __m256d a_re, a_im, b_re, b_im, c_re, c_im, t; + + a_re = _mm256_loadu_pd(&a[u].v); + a_im = _mm256_loadu_pd(&a[u + hn].v); + b_re = _mm256_loadu_pd(&b[u].v); + b_im = _mm256_loadu_pd(&b[u + hn].v); + t = _mm256_div_pd(one, + FMADD(b_re, b_re, + _mm256_mul_pd(b_im, b_im))); + b_re = _mm256_mul_pd(b_re, t); + b_im = _mm256_mul_pd(b_im, t); + c_re = FMADD( + a_re, b_re, _mm256_mul_pd(a_im, b_im)); + c_im = FMSUB( + a_im, b_re, _mm256_mul_pd(a_re, b_im)); + _mm256_storeu_pd(&a[u].v, c_re); + _mm256_storeu_pd(&a[u + hn].v, c_im); + } + } else { + for (u = 0; u < hn; u ++) { + fpr a_re, a_im, b_re, b_im; + + a_re = a[u]; + a_im = a[u + hn]; + b_re = b[u]; + b_im = b[u + hn]; + FPC_DIV(a[u], a[u + hn], a_re, a_im, b_re, b_im); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_poly_invnorm2_fft(fpr *d, + const fpr *a, const fpr *b, unsigned logn) { + size_t n, hn, u; + + n = (size_t)1 << logn; + hn = n >> 1; + if (n >= 8) { + __m256d one; + + one = _mm256_set1_pd(1.0); + for (u = 0; u < hn; u += 4) { + __m256d a_re, a_im, b_re, b_im, dv; + + a_re = _mm256_loadu_pd(&a[u].v); + a_im = _mm256_loadu_pd(&a[u + hn].v); + b_re = _mm256_loadu_pd(&b[u].v); + b_im = _mm256_loadu_pd(&b[u + hn].v); + dv = _mm256_div_pd(one, + _mm256_add_pd( + FMADD(a_re, a_re, + _mm256_mul_pd(a_im, a_im)), + FMADD(b_re, b_re, + _mm256_mul_pd(b_im, b_im)))); + _mm256_storeu_pd(&d[u].v, dv); + } + } else { + for (u = 0; u < hn; u ++) { + fpr a_re, a_im; + fpr b_re, b_im; + + a_re = a[u]; + a_im = a[u + hn]; + b_re = b[u]; + b_im = b[u + hn]; + d[u] = fpr_inv(fpr_add( + fpr_add(fpr_sqr(a_re), fpr_sqr(a_im)), + fpr_add(fpr_sqr(b_re), fpr_sqr(b_im)))); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_poly_add_muladj_fft(fpr *d, + const fpr *F, const fpr *G, + const fpr *f, const fpr *g, unsigned logn) { + size_t n, hn, u; + + n = (size_t)1 << logn; + hn = n >> 1; + if (n >= 8) { + for (u = 0; u < hn; u += 4) { + __m256d F_re, F_im, G_re, G_im; + __m256d f_re, f_im, g_re, g_im; + __m256d a_re, a_im, b_re, b_im; + + F_re = _mm256_loadu_pd(&F[u].v); + F_im = _mm256_loadu_pd(&F[u + hn].v); + G_re = _mm256_loadu_pd(&G[u].v); + G_im = _mm256_loadu_pd(&G[u + hn].v); + f_re = _mm256_loadu_pd(&f[u].v); + f_im = _mm256_loadu_pd(&f[u + hn].v); + g_re = _mm256_loadu_pd(&g[u].v); + g_im = _mm256_loadu_pd(&g[u + hn].v); + + a_re = FMADD(F_re, f_re, + _mm256_mul_pd(F_im, f_im)); + a_im = FMSUB(F_im, f_re, + _mm256_mul_pd(F_re, f_im)); + b_re = FMADD(G_re, g_re, + _mm256_mul_pd(G_im, g_im)); + b_im = FMSUB(G_im, g_re, + _mm256_mul_pd(G_re, g_im)); + _mm256_storeu_pd(&d[u].v, + _mm256_add_pd(a_re, b_re)); + _mm256_storeu_pd(&d[u + hn].v, + _mm256_add_pd(a_im, b_im)); + } + } else { + for (u = 0; u < hn; u ++) { + fpr F_re, F_im, G_re, G_im; + fpr f_re, f_im, g_re, g_im; + fpr a_re, a_im, b_re, b_im; + + F_re = F[u]; + F_im = F[u + hn]; + G_re = G[u]; + G_im = G[u + hn]; + f_re = f[u]; + f_im = f[u + hn]; + g_re = g[u]; + g_im = g[u + hn]; + + FPC_MUL(a_re, a_im, F_re, F_im, f_re, fpr_neg(f_im)); + FPC_MUL(b_re, b_im, G_re, G_im, g_re, fpr_neg(g_im)); + d[u] = fpr_add(a_re, b_re); + d[u + hn] = fpr_add(a_im, b_im); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_poly_mul_autoadj_fft( + fpr *a, const fpr *b, unsigned logn) { + size_t n, hn, u; + + n = (size_t)1 << logn; + hn = n >> 1; + if (n >= 8) { + for (u = 0; u < hn; u += 4) { + __m256d a_re, a_im, bv; + + a_re = _mm256_loadu_pd(&a[u].v); + a_im = _mm256_loadu_pd(&a[u + hn].v); + bv = _mm256_loadu_pd(&b[u].v); + _mm256_storeu_pd(&a[u].v, + _mm256_mul_pd(a_re, bv)); + _mm256_storeu_pd(&a[u + hn].v, + _mm256_mul_pd(a_im, bv)); + } + } else { + for (u = 0; u < hn; u ++) { + a[u] = fpr_mul(a[u], b[u]); + a[u + hn] = fpr_mul(a[u + hn], b[u]); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_poly_div_autoadj_fft( + fpr *a, const fpr *b, unsigned logn) { + size_t n, hn, u; + + n = (size_t)1 << logn; + hn = n >> 1; + if (n >= 8) { + __m256d one; + + one = _mm256_set1_pd(1.0); + for (u = 0; u < hn; u += 4) { + __m256d ib, a_re, a_im; + + ib = _mm256_div_pd(one, _mm256_loadu_pd(&b[u].v)); + a_re = _mm256_loadu_pd(&a[u].v); + a_im = _mm256_loadu_pd(&a[u + hn].v); + _mm256_storeu_pd(&a[u].v, _mm256_mul_pd(a_re, ib)); + _mm256_storeu_pd(&a[u + hn].v, _mm256_mul_pd(a_im, ib)); + } + } else { + for (u = 0; u < hn; u ++) { + fpr ib; + + ib = fpr_inv(b[u]); + a[u] = fpr_mul(a[u], ib); + a[u + hn] = fpr_mul(a[u + hn], ib); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_poly_LDL_fft( + const fpr *g00, + fpr *g01, fpr *g11, unsigned logn) { + size_t n, hn, u; + + n = (size_t)1 << logn; + hn = n >> 1; + if (n >= 8) { + __m256d one; + + one = _mm256_set1_pd(1.0); + for (u = 0; u < hn; u += 4) { + __m256d g00_re, g00_im, g01_re, g01_im, g11_re, g11_im; + __m256d t, mu_re, mu_im, xi_re, xi_im; + + g00_re = _mm256_loadu_pd(&g00[u].v); + g00_im = _mm256_loadu_pd(&g00[u + hn].v); + g01_re = _mm256_loadu_pd(&g01[u].v); + g01_im = _mm256_loadu_pd(&g01[u + hn].v); + g11_re = _mm256_loadu_pd(&g11[u].v); + g11_im = _mm256_loadu_pd(&g11[u + hn].v); + + t = _mm256_div_pd(one, + FMADD(g00_re, g00_re, + _mm256_mul_pd(g00_im, g00_im))); + g00_re = _mm256_mul_pd(g00_re, t); + g00_im = _mm256_mul_pd(g00_im, t); + mu_re = FMADD(g01_re, g00_re, + _mm256_mul_pd(g01_im, g00_im)); + mu_im = FMSUB(g01_re, g00_im, + _mm256_mul_pd(g01_im, g00_re)); + xi_re = FMSUB(mu_re, g01_re, + _mm256_mul_pd(mu_im, g01_im)); + xi_im = FMADD(mu_im, g01_re, + _mm256_mul_pd(mu_re, g01_im)); + _mm256_storeu_pd(&g11[u].v, + _mm256_sub_pd(g11_re, xi_re)); + _mm256_storeu_pd(&g11[u + hn].v, + _mm256_add_pd(g11_im, xi_im)); + _mm256_storeu_pd(&g01[u].v, mu_re); + _mm256_storeu_pd(&g01[u + hn].v, mu_im); + } + } else { + for (u = 0; u < hn; u ++) { + fpr g00_re, g00_im, g01_re, g01_im, g11_re, g11_im; + fpr mu_re, mu_im; + + g00_re = g00[u]; + g00_im = g00[u + hn]; + g01_re = g01[u]; + g01_im = g01[u + hn]; + g11_re = g11[u]; + g11_im = g11[u + hn]; + FPC_DIV(mu_re, mu_im, g01_re, g01_im, g00_re, g00_im); + FPC_MUL(g01_re, g01_im, + mu_re, mu_im, g01_re, fpr_neg(g01_im)); + FPC_SUB(g11[u], g11[u + hn], + g11_re, g11_im, g01_re, g01_im); + g01[u] = mu_re; + g01[u + hn] = fpr_neg(mu_im); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_poly_LDLmv_fft( + fpr *d11, fpr *l10, + const fpr *g00, const fpr *g01, + const fpr *g11, unsigned logn) { + size_t n, hn, u; + + n = (size_t)1 << logn; + hn = n >> 1; + if (n >= 8) { + __m256d one; + + one = _mm256_set1_pd(1.0); + for (u = 0; u < hn; u += 4) { + __m256d g00_re, g00_im, g01_re, g01_im, g11_re, g11_im; + __m256d t, mu_re, mu_im, xi_re, xi_im; + + g00_re = _mm256_loadu_pd(&g00[u].v); + g00_im = _mm256_loadu_pd(&g00[u + hn].v); + g01_re = _mm256_loadu_pd(&g01[u].v); + g01_im = _mm256_loadu_pd(&g01[u + hn].v); + g11_re = _mm256_loadu_pd(&g11[u].v); + g11_im = _mm256_loadu_pd(&g11[u + hn].v); + + t = _mm256_div_pd(one, + FMADD(g00_re, g00_re, + _mm256_mul_pd(g00_im, g00_im))); + g00_re = _mm256_mul_pd(g00_re, t); + g00_im = _mm256_mul_pd(g00_im, t); + mu_re = FMADD(g01_re, g00_re, + _mm256_mul_pd(g01_im, g00_im)); + mu_im = FMSUB(g01_re, g00_im, + _mm256_mul_pd(g01_im, g00_re)); + xi_re = FMSUB(mu_re, g01_re, + _mm256_mul_pd(mu_im, g01_im)); + xi_im = FMADD(mu_im, g01_re, + _mm256_mul_pd(mu_re, g01_im)); + _mm256_storeu_pd(&d11[u].v, + _mm256_sub_pd(g11_re, xi_re)); + _mm256_storeu_pd(&d11[u + hn].v, + _mm256_add_pd(g11_im, xi_im)); + _mm256_storeu_pd(&l10[u].v, mu_re); + _mm256_storeu_pd(&l10[u + hn].v, mu_im); + } + } else { + for (u = 0; u < hn; u ++) { + fpr g00_re, g00_im, g01_re, g01_im, g11_re, g11_im; + fpr mu_re, mu_im; + + g00_re = g00[u]; + g00_im = g00[u + hn]; + g01_re = g01[u]; + g01_im = g01[u + hn]; + g11_re = g11[u]; + g11_im = g11[u + hn]; + FPC_DIV(mu_re, mu_im, g01_re, g01_im, g00_re, g00_im); + FPC_MUL(g01_re, g01_im, + mu_re, mu_im, g01_re, fpr_neg(g01_im)); + FPC_SUB(d11[u], d11[u + hn], + g11_re, g11_im, g01_re, g01_im); + l10[u] = mu_re; + l10[u + hn] = fpr_neg(mu_im); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_poly_split_fft( + fpr *f0, fpr *f1, + const fpr *f, unsigned logn) { + /* + * The FFT representation we use is in bit-reversed order + * (element i contains f(w^(rev(i))), where rev() is the + * bit-reversal function over the ring degree. This changes + * indexes with regards to the Falcon specification. + */ + size_t n, hn, qn, u; + + n = (size_t)1 << logn; + hn = n >> 1; + qn = hn >> 1; + + if (n >= 8) { + __m256d half, sv; + + half = _mm256_set1_pd(0.5); + sv = _mm256_set_pd(-0.0, 0.0, -0.0, 0.0); + for (u = 0; u < qn; u += 2) { + __m256d ab_re, ab_im, ff0, ff1, ff2, ff3, gmt; + + ab_re = _mm256_loadu_pd(&f[(u << 1)].v); + ab_im = _mm256_loadu_pd(&f[(u << 1) + hn].v); + ff0 = _mm256_mul_pd(_mm256_hadd_pd(ab_re, ab_im), half); + ff0 = _mm256_permute4x64_pd(ff0, 0xD8); + _mm_storeu_pd(&f0[u].v, + _mm256_extractf128_pd(ff0, 0)); + _mm_storeu_pd(&f0[u + qn].v, + _mm256_extractf128_pd(ff0, 1)); + + ff1 = _mm256_mul_pd(_mm256_hsub_pd(ab_re, ab_im), half); + gmt = _mm256_loadu_pd(&fpr_gm_tab[(u + hn) << 1].v); + ff2 = _mm256_shuffle_pd(ff1, ff1, 0x5); + ff3 = _mm256_hadd_pd( + _mm256_mul_pd(ff1, gmt), + _mm256_xor_pd(_mm256_mul_pd(ff2, gmt), sv)); + ff3 = _mm256_permute4x64_pd(ff3, 0xD8); + _mm_storeu_pd(&f1[u].v, + _mm256_extractf128_pd(ff3, 0)); + _mm_storeu_pd(&f1[u + qn].v, + _mm256_extractf128_pd(ff3, 1)); + } + } else { + f0[0] = f[0]; + f1[0] = f[hn]; + + for (u = 0; u < qn; u ++) { + fpr a_re, a_im, b_re, b_im; + fpr t_re, t_im; + + a_re = f[(u << 1) + 0]; + a_im = f[(u << 1) + 0 + hn]; + b_re = f[(u << 1) + 1]; + b_im = f[(u << 1) + 1 + hn]; + + FPC_ADD(t_re, t_im, a_re, a_im, b_re, b_im); + f0[u] = fpr_half(t_re); + f0[u + qn] = fpr_half(t_im); + + FPC_SUB(t_re, t_im, a_re, a_im, b_re, b_im); + FPC_MUL(t_re, t_im, t_re, t_im, + fpr_gm_tab[((u + hn) << 1) + 0], + fpr_neg(fpr_gm_tab[((u + hn) << 1) + 1])); + f1[u] = fpr_half(t_re); + f1[u + qn] = fpr_half(t_im); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_poly_merge_fft( + fpr *f, + const fpr *f0, const fpr *f1, unsigned logn) { + size_t n, hn, qn, u; + + n = (size_t)1 << logn; + hn = n >> 1; + qn = hn >> 1; + + if (n >= 16) { + for (u = 0; u < qn; u += 4) { + __m256d a_re, a_im, b_re, b_im, c_re, c_im; + __m256d gm1, gm2, g_re, g_im; + __m256d t_re, t_im, u_re, u_im; + __m256d tu1_re, tu2_re, tu1_im, tu2_im; + + a_re = _mm256_loadu_pd(&f0[u].v); + a_im = _mm256_loadu_pd(&f0[u + qn].v); + c_re = _mm256_loadu_pd(&f1[u].v); + c_im = _mm256_loadu_pd(&f1[u + qn].v); + + gm1 = _mm256_loadu_pd(&fpr_gm_tab[(u + hn) << 1].v); + gm2 = _mm256_loadu_pd(&fpr_gm_tab[(u + 2 + hn) << 1].v); + g_re = _mm256_unpacklo_pd(gm1, gm2); + g_im = _mm256_unpackhi_pd(gm1, gm2); + g_re = _mm256_permute4x64_pd(g_re, 0xD8); + g_im = _mm256_permute4x64_pd(g_im, 0xD8); + + b_re = FMSUB( + c_re, g_re, _mm256_mul_pd(c_im, g_im)); + b_im = FMADD( + c_re, g_im, _mm256_mul_pd(c_im, g_re)); + + t_re = _mm256_add_pd(a_re, b_re); + t_im = _mm256_add_pd(a_im, b_im); + u_re = _mm256_sub_pd(a_re, b_re); + u_im = _mm256_sub_pd(a_im, b_im); + + tu1_re = _mm256_unpacklo_pd(t_re, u_re); + tu2_re = _mm256_unpackhi_pd(t_re, u_re); + tu1_im = _mm256_unpacklo_pd(t_im, u_im); + tu2_im = _mm256_unpackhi_pd(t_im, u_im); + _mm256_storeu_pd(&f[(u << 1)].v, + _mm256_permute2f128_pd(tu1_re, tu2_re, 0x20)); + _mm256_storeu_pd(&f[(u << 1) + 4].v, + _mm256_permute2f128_pd(tu1_re, tu2_re, 0x31)); + _mm256_storeu_pd(&f[(u << 1) + hn].v, + _mm256_permute2f128_pd(tu1_im, tu2_im, 0x20)); + _mm256_storeu_pd(&f[(u << 1) + 4 + hn].v, + _mm256_permute2f128_pd(tu1_im, tu2_im, 0x31)); + } + } else { + f[0] = f0[0]; + f[hn] = f1[0]; + + for (u = 0; u < qn; u ++) { + fpr a_re, a_im, b_re, b_im; + fpr t_re, t_im; + + a_re = f0[u]; + a_im = f0[u + qn]; + FPC_MUL(b_re, b_im, f1[u], f1[u + qn], + fpr_gm_tab[((u + hn) << 1) + 0], + fpr_gm_tab[((u + hn) << 1) + 1]); + FPC_ADD(t_re, t_im, a_re, a_im, b_re, b_im); + f[(u << 1) + 0] = t_re; + f[(u << 1) + 0 + hn] = t_im; + FPC_SUB(t_re, t_im, a_re, a_im, b_re, b_im); + f[(u << 1) + 1] = t_re; + f[(u << 1) + 1 + hn] = t_im; + } + } +} diff --git a/crypto_sign/falcon-1024/avx2/fpr.c b/crypto_sign/falcon-1024/avx2/fpr.c new file mode 100644 index 00000000..2f04a35d --- /dev/null +++ b/crypto_sign/falcon-1024/avx2/fpr.c @@ -0,0 +1,1078 @@ +#include "inner.h" + +/* + * Floating-point operations. + * + * This file implements the non-inline functions declared in + * fpr.h, as well as the constants for FFT / iFFT. + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2017-2019 Falcon Project + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @author Thomas Pornin + */ + + + +const fpr fpr_gm_tab[] = { + {0}, {0}, /* unused */ + {-0.000000000000000000000000000}, { 1.000000000000000000000000000}, + { 0.707106781186547524400844362}, { 0.707106781186547524400844362}, + {-0.707106781186547524400844362}, { 0.707106781186547524400844362}, + { 0.923879532511286756128183189}, { 0.382683432365089771728459984}, + {-0.382683432365089771728459984}, { 0.923879532511286756128183189}, + { 0.382683432365089771728459984}, { 0.923879532511286756128183189}, + {-0.923879532511286756128183189}, { 0.382683432365089771728459984}, + { 0.980785280403230449126182236}, { 0.195090322016128267848284868}, + {-0.195090322016128267848284868}, { 0.980785280403230449126182236}, + { 0.555570233019602224742830814}, { 0.831469612302545237078788378}, + {-0.831469612302545237078788378}, { 0.555570233019602224742830814}, + { 0.831469612302545237078788378}, { 0.555570233019602224742830814}, + {-0.555570233019602224742830814}, { 0.831469612302545237078788378}, + { 0.195090322016128267848284868}, { 0.980785280403230449126182236}, + {-0.980785280403230449126182236}, { 0.195090322016128267848284868}, + { 0.995184726672196886244836953}, { 0.098017140329560601994195564}, + {-0.098017140329560601994195564}, { 0.995184726672196886244836953}, + { 0.634393284163645498215171613}, { 0.773010453362736960810906610}, + {-0.773010453362736960810906610}, { 0.634393284163645498215171613}, + { 0.881921264348355029712756864}, { 0.471396736825997648556387626}, + {-0.471396736825997648556387626}, { 0.881921264348355029712756864}, + { 0.290284677254462367636192376}, { 0.956940335732208864935797887}, + {-0.956940335732208864935797887}, { 0.290284677254462367636192376}, + { 0.956940335732208864935797887}, { 0.290284677254462367636192376}, + {-0.290284677254462367636192376}, { 0.956940335732208864935797887}, + { 0.471396736825997648556387626}, { 0.881921264348355029712756864}, + {-0.881921264348355029712756864}, { 0.471396736825997648556387626}, + { 0.773010453362736960810906610}, { 0.634393284163645498215171613}, + {-0.634393284163645498215171613}, { 0.773010453362736960810906610}, + { 0.098017140329560601994195564}, { 0.995184726672196886244836953}, + {-0.995184726672196886244836953}, { 0.098017140329560601994195564}, + { 0.998795456205172392714771605}, { 0.049067674327418014254954977}, + {-0.049067674327418014254954977}, { 0.998795456205172392714771605}, + { 0.671558954847018400625376850}, { 0.740951125354959091175616897}, + {-0.740951125354959091175616897}, { 0.671558954847018400625376850}, + { 0.903989293123443331586200297}, { 0.427555093430282094320966857}, + {-0.427555093430282094320966857}, { 0.903989293123443331586200297}, + { 0.336889853392220050689253213}, { 0.941544065183020778412509403}, + {-0.941544065183020778412509403}, { 0.336889853392220050689253213}, + { 0.970031253194543992603984207}, { 0.242980179903263889948274162}, + {-0.242980179903263889948274162}, { 0.970031253194543992603984207}, + { 0.514102744193221726593693839}, { 0.857728610000272069902269984}, + {-0.857728610000272069902269984}, { 0.514102744193221726593693839}, + { 0.803207531480644909806676513}, { 0.595699304492433343467036529}, + {-0.595699304492433343467036529}, { 0.803207531480644909806676513}, + { 0.146730474455361751658850130}, { 0.989176509964780973451673738}, + {-0.989176509964780973451673738}, { 0.146730474455361751658850130}, + { 0.989176509964780973451673738}, { 0.146730474455361751658850130}, + {-0.146730474455361751658850130}, { 0.989176509964780973451673738}, + { 0.595699304492433343467036529}, { 0.803207531480644909806676513}, + {-0.803207531480644909806676513}, { 0.595699304492433343467036529}, + { 0.857728610000272069902269984}, { 0.514102744193221726593693839}, + {-0.514102744193221726593693839}, { 0.857728610000272069902269984}, + { 0.242980179903263889948274162}, { 0.970031253194543992603984207}, + {-0.970031253194543992603984207}, { 0.242980179903263889948274162}, + { 0.941544065183020778412509403}, { 0.336889853392220050689253213}, + {-0.336889853392220050689253213}, { 0.941544065183020778412509403}, + { 0.427555093430282094320966857}, { 0.903989293123443331586200297}, + {-0.903989293123443331586200297}, { 0.427555093430282094320966857}, + { 0.740951125354959091175616897}, { 0.671558954847018400625376850}, + {-0.671558954847018400625376850}, { 0.740951125354959091175616897}, + { 0.049067674327418014254954977}, { 0.998795456205172392714771605}, + {-0.998795456205172392714771605}, { 0.049067674327418014254954977}, + { 0.999698818696204220115765650}, { 0.024541228522912288031734529}, + {-0.024541228522912288031734529}, { 0.999698818696204220115765650}, + { 0.689540544737066924616730630}, { 0.724247082951466920941069243}, + {-0.724247082951466920941069243}, { 0.689540544737066924616730630}, + { 0.914209755703530654635014829}, { 0.405241314004989870908481306}, + {-0.405241314004989870908481306}, { 0.914209755703530654635014829}, + { 0.359895036534988148775104572}, { 0.932992798834738887711660256}, + {-0.932992798834738887711660256}, { 0.359895036534988148775104572}, + { 0.975702130038528544460395766}, { 0.219101240156869797227737547}, + {-0.219101240156869797227737547}, { 0.975702130038528544460395766}, + { 0.534997619887097210663076905}, { 0.844853565249707073259571205}, + {-0.844853565249707073259571205}, { 0.534997619887097210663076905}, + { 0.817584813151583696504920884}, { 0.575808191417845300745972454}, + {-0.575808191417845300745972454}, { 0.817584813151583696504920884}, + { 0.170961888760301226363642357}, { 0.985277642388941244774018433}, + {-0.985277642388941244774018433}, { 0.170961888760301226363642357}, + { 0.992479534598709998156767252}, { 0.122410675199216198498704474}, + {-0.122410675199216198498704474}, { 0.992479534598709998156767252}, + { 0.615231590580626845484913563}, { 0.788346427626606262009164705}, + {-0.788346427626606262009164705}, { 0.615231590580626845484913563}, + { 0.870086991108711418652292404}, { 0.492898192229784036873026689}, + {-0.492898192229784036873026689}, { 0.870086991108711418652292404}, + { 0.266712757474898386325286515}, { 0.963776065795439866686464356}, + {-0.963776065795439866686464356}, { 0.266712757474898386325286515}, + { 0.949528180593036667195936074}, { 0.313681740398891476656478846}, + {-0.313681740398891476656478846}, { 0.949528180593036667195936074}, + { 0.449611329654606600046294579}, { 0.893224301195515320342416447}, + {-0.893224301195515320342416447}, { 0.449611329654606600046294579}, + { 0.757208846506484547575464054}, { 0.653172842953776764084203014}, + {-0.653172842953776764084203014}, { 0.757208846506484547575464054}, + { 0.073564563599667423529465622}, { 0.997290456678690216135597140}, + {-0.997290456678690216135597140}, { 0.073564563599667423529465622}, + { 0.997290456678690216135597140}, { 0.073564563599667423529465622}, + {-0.073564563599667423529465622}, { 0.997290456678690216135597140}, + { 0.653172842953776764084203014}, { 0.757208846506484547575464054}, + {-0.757208846506484547575464054}, { 0.653172842953776764084203014}, + { 0.893224301195515320342416447}, { 0.449611329654606600046294579}, + {-0.449611329654606600046294579}, { 0.893224301195515320342416447}, + { 0.313681740398891476656478846}, { 0.949528180593036667195936074}, + {-0.949528180593036667195936074}, { 0.313681740398891476656478846}, + { 0.963776065795439866686464356}, { 0.266712757474898386325286515}, + {-0.266712757474898386325286515}, { 0.963776065795439866686464356}, + { 0.492898192229784036873026689}, { 0.870086991108711418652292404}, + {-0.870086991108711418652292404}, { 0.492898192229784036873026689}, + { 0.788346427626606262009164705}, { 0.615231590580626845484913563}, + {-0.615231590580626845484913563}, { 0.788346427626606262009164705}, + { 0.122410675199216198498704474}, { 0.992479534598709998156767252}, + {-0.992479534598709998156767252}, { 0.122410675199216198498704474}, + { 0.985277642388941244774018433}, { 0.170961888760301226363642357}, + {-0.170961888760301226363642357}, { 0.985277642388941244774018433}, + { 0.575808191417845300745972454}, { 0.817584813151583696504920884}, + {-0.817584813151583696504920884}, { 0.575808191417845300745972454}, + { 0.844853565249707073259571205}, { 0.534997619887097210663076905}, + {-0.534997619887097210663076905}, { 0.844853565249707073259571205}, + { 0.219101240156869797227737547}, { 0.975702130038528544460395766}, + {-0.975702130038528544460395766}, { 0.219101240156869797227737547}, + { 0.932992798834738887711660256}, { 0.359895036534988148775104572}, + {-0.359895036534988148775104572}, { 0.932992798834738887711660256}, + { 0.405241314004989870908481306}, { 0.914209755703530654635014829}, + {-0.914209755703530654635014829}, { 0.405241314004989870908481306}, + { 0.724247082951466920941069243}, { 0.689540544737066924616730630}, + {-0.689540544737066924616730630}, { 0.724247082951466920941069243}, + { 0.024541228522912288031734529}, { 0.999698818696204220115765650}, + {-0.999698818696204220115765650}, { 0.024541228522912288031734529}, + { 0.999924701839144540921646491}, { 0.012271538285719926079408262}, + {-0.012271538285719926079408262}, { 0.999924701839144540921646491}, + { 0.698376249408972853554813503}, { 0.715730825283818654125532623}, + {-0.715730825283818654125532623}, { 0.698376249408972853554813503}, + { 0.919113851690057743908477789}, { 0.393992040061048108596188661}, + {-0.393992040061048108596188661}, { 0.919113851690057743908477789}, + { 0.371317193951837543411934967}, { 0.928506080473215565937167396}, + {-0.928506080473215565937167396}, { 0.371317193951837543411934967}, + { 0.978317370719627633106240097}, { 0.207111376192218549708116020}, + {-0.207111376192218549708116020}, { 0.978317370719627633106240097}, + { 0.545324988422046422313987347}, { 0.838224705554838043186996856}, + {-0.838224705554838043186996856}, { 0.545324988422046422313987347}, + { 0.824589302785025264474803737}, { 0.565731810783613197389765011}, + {-0.565731810783613197389765011}, { 0.824589302785025264474803737}, + { 0.183039887955140958516532578}, { 0.983105487431216327180301155}, + {-0.983105487431216327180301155}, { 0.183039887955140958516532578}, + { 0.993906970002356041546922813}, { 0.110222207293883058807899140}, + {-0.110222207293883058807899140}, { 0.993906970002356041546922813}, + { 0.624859488142386377084072816}, { 0.780737228572094478301588484}, + {-0.780737228572094478301588484}, { 0.624859488142386377084072816}, + { 0.876070094195406607095844268}, { 0.482183772079122748517344481}, + {-0.482183772079122748517344481}, { 0.876070094195406607095844268}, + { 0.278519689385053105207848526}, { 0.960430519415565811199035138}, + {-0.960430519415565811199035138}, { 0.278519689385053105207848526}, + { 0.953306040354193836916740383}, { 0.302005949319228067003463232}, + {-0.302005949319228067003463232}, { 0.953306040354193836916740383}, + { 0.460538710958240023633181487}, { 0.887639620402853947760181617}, + {-0.887639620402853947760181617}, { 0.460538710958240023633181487}, + { 0.765167265622458925888815999}, { 0.643831542889791465068086063}, + {-0.643831542889791465068086063}, { 0.765167265622458925888815999}, + { 0.085797312344439890461556332}, { 0.996312612182778012627226190}, + {-0.996312612182778012627226190}, { 0.085797312344439890461556332}, + { 0.998118112900149207125155861}, { 0.061320736302208577782614593}, + {-0.061320736302208577782614593}, { 0.998118112900149207125155861}, + { 0.662415777590171761113069817}, { 0.749136394523459325469203257}, + {-0.749136394523459325469203257}, { 0.662415777590171761113069817}, + { 0.898674465693953843041976744}, { 0.438616238538527637647025738}, + {-0.438616238538527637647025738}, { 0.898674465693953843041976744}, + { 0.325310292162262934135954708}, { 0.945607325380521325730945387}, + {-0.945607325380521325730945387}, { 0.325310292162262934135954708}, + { 0.966976471044852109087220226}, { 0.254865659604514571553980779}, + {-0.254865659604514571553980779}, { 0.966976471044852109087220226}, + { 0.503538383725717558691867071}, { 0.863972856121586737918147054}, + {-0.863972856121586737918147054}, { 0.503538383725717558691867071}, + { 0.795836904608883536262791915}, { 0.605511041404325513920626941}, + {-0.605511041404325513920626941}, { 0.795836904608883536262791915}, + { 0.134580708507126186316358409}, { 0.990902635427780025108237011}, + {-0.990902635427780025108237011}, { 0.134580708507126186316358409}, + { 0.987301418157858382399815802}, { 0.158858143333861441684385360}, + {-0.158858143333861441684385360}, { 0.987301418157858382399815802}, + { 0.585797857456438860328080838}, { 0.810457198252594791726703434}, + {-0.810457198252594791726703434}, { 0.585797857456438860328080838}, + { 0.851355193105265142261290312}, { 0.524589682678468906215098464}, + {-0.524589682678468906215098464}, { 0.851355193105265142261290312}, + { 0.231058108280671119643236018}, { 0.972939952205560145467720114}, + {-0.972939952205560145467720114}, { 0.231058108280671119643236018}, + { 0.937339011912574923201899593}, { 0.348418680249434568419308588}, + {-0.348418680249434568419308588}, { 0.937339011912574923201899593}, + { 0.416429560097637182562598911}, { 0.909167983090522376563884788}, + {-0.909167983090522376563884788}, { 0.416429560097637182562598911}, + { 0.732654271672412834615546649}, { 0.680600997795453050594430464}, + {-0.680600997795453050594430464}, { 0.732654271672412834615546649}, + { 0.036807222941358832324332691}, { 0.999322384588349500896221011}, + {-0.999322384588349500896221011}, { 0.036807222941358832324332691}, + { 0.999322384588349500896221011}, { 0.036807222941358832324332691}, + {-0.036807222941358832324332691}, { 0.999322384588349500896221011}, + { 0.680600997795453050594430464}, { 0.732654271672412834615546649}, + {-0.732654271672412834615546649}, { 0.680600997795453050594430464}, + { 0.909167983090522376563884788}, { 0.416429560097637182562598911}, + {-0.416429560097637182562598911}, { 0.909167983090522376563884788}, + { 0.348418680249434568419308588}, { 0.937339011912574923201899593}, + {-0.937339011912574923201899593}, { 0.348418680249434568419308588}, + { 0.972939952205560145467720114}, { 0.231058108280671119643236018}, + {-0.231058108280671119643236018}, { 0.972939952205560145467720114}, + { 0.524589682678468906215098464}, { 0.851355193105265142261290312}, + {-0.851355193105265142261290312}, { 0.524589682678468906215098464}, + { 0.810457198252594791726703434}, { 0.585797857456438860328080838}, + {-0.585797857456438860328080838}, { 0.810457198252594791726703434}, + { 0.158858143333861441684385360}, { 0.987301418157858382399815802}, + {-0.987301418157858382399815802}, { 0.158858143333861441684385360}, + { 0.990902635427780025108237011}, { 0.134580708507126186316358409}, + {-0.134580708507126186316358409}, { 0.990902635427780025108237011}, + { 0.605511041404325513920626941}, { 0.795836904608883536262791915}, + {-0.795836904608883536262791915}, { 0.605511041404325513920626941}, + { 0.863972856121586737918147054}, { 0.503538383725717558691867071}, + {-0.503538383725717558691867071}, { 0.863972856121586737918147054}, + { 0.254865659604514571553980779}, { 0.966976471044852109087220226}, + {-0.966976471044852109087220226}, { 0.254865659604514571553980779}, + { 0.945607325380521325730945387}, { 0.325310292162262934135954708}, + {-0.325310292162262934135954708}, { 0.945607325380521325730945387}, + { 0.438616238538527637647025738}, { 0.898674465693953843041976744}, + {-0.898674465693953843041976744}, { 0.438616238538527637647025738}, + { 0.749136394523459325469203257}, { 0.662415777590171761113069817}, + {-0.662415777590171761113069817}, { 0.749136394523459325469203257}, + { 0.061320736302208577782614593}, { 0.998118112900149207125155861}, + {-0.998118112900149207125155861}, { 0.061320736302208577782614593}, + { 0.996312612182778012627226190}, { 0.085797312344439890461556332}, + {-0.085797312344439890461556332}, { 0.996312612182778012627226190}, + { 0.643831542889791465068086063}, { 0.765167265622458925888815999}, + {-0.765167265622458925888815999}, { 0.643831542889791465068086063}, + { 0.887639620402853947760181617}, { 0.460538710958240023633181487}, + {-0.460538710958240023633181487}, { 0.887639620402853947760181617}, + { 0.302005949319228067003463232}, { 0.953306040354193836916740383}, + {-0.953306040354193836916740383}, { 0.302005949319228067003463232}, + { 0.960430519415565811199035138}, { 0.278519689385053105207848526}, + {-0.278519689385053105207848526}, { 0.960430519415565811199035138}, + { 0.482183772079122748517344481}, { 0.876070094195406607095844268}, + {-0.876070094195406607095844268}, { 0.482183772079122748517344481}, + { 0.780737228572094478301588484}, { 0.624859488142386377084072816}, + {-0.624859488142386377084072816}, { 0.780737228572094478301588484}, + { 0.110222207293883058807899140}, { 0.993906970002356041546922813}, + {-0.993906970002356041546922813}, { 0.110222207293883058807899140}, + { 0.983105487431216327180301155}, { 0.183039887955140958516532578}, + {-0.183039887955140958516532578}, { 0.983105487431216327180301155}, + { 0.565731810783613197389765011}, { 0.824589302785025264474803737}, + {-0.824589302785025264474803737}, { 0.565731810783613197389765011}, + { 0.838224705554838043186996856}, { 0.545324988422046422313987347}, + {-0.545324988422046422313987347}, { 0.838224705554838043186996856}, + { 0.207111376192218549708116020}, { 0.978317370719627633106240097}, + {-0.978317370719627633106240097}, { 0.207111376192218549708116020}, + { 0.928506080473215565937167396}, { 0.371317193951837543411934967}, + {-0.371317193951837543411934967}, { 0.928506080473215565937167396}, + { 0.393992040061048108596188661}, { 0.919113851690057743908477789}, + {-0.919113851690057743908477789}, { 0.393992040061048108596188661}, + { 0.715730825283818654125532623}, { 0.698376249408972853554813503}, + {-0.698376249408972853554813503}, { 0.715730825283818654125532623}, + { 0.012271538285719926079408262}, { 0.999924701839144540921646491}, + {-0.999924701839144540921646491}, { 0.012271538285719926079408262}, + { 0.999981175282601142656990438}, { 0.006135884649154475359640235}, + {-0.006135884649154475359640235}, { 0.999981175282601142656990438}, + { 0.702754744457225302452914421}, { 0.711432195745216441522130290}, + {-0.711432195745216441522130290}, { 0.702754744457225302452914421}, + { 0.921514039342041943465396332}, { 0.388345046698826291624993541}, + {-0.388345046698826291624993541}, { 0.921514039342041943465396332}, + { 0.377007410216418256726567823}, { 0.926210242138311341974793388}, + {-0.926210242138311341974793388}, { 0.377007410216418256726567823}, + { 0.979569765685440534439326110}, { 0.201104634842091911558443546}, + {-0.201104634842091911558443546}, { 0.979569765685440534439326110}, + { 0.550457972936604802977289893}, { 0.834862874986380056304401383}, + {-0.834862874986380056304401383}, { 0.550457972936604802977289893}, + { 0.828045045257755752067527592}, { 0.560661576197336023839710223}, + {-0.560661576197336023839710223}, { 0.828045045257755752067527592}, + { 0.189068664149806212754997837}, { 0.981963869109555264072848154}, + {-0.981963869109555264072848154}, { 0.189068664149806212754997837}, + { 0.994564570734255452119106243}, { 0.104121633872054579120943880}, + {-0.104121633872054579120943880}, { 0.994564570734255452119106243}, + { 0.629638238914927025372981341}, { 0.776888465673232450040827983}, + {-0.776888465673232450040827983}, { 0.629638238914927025372981341}, + { 0.879012226428633477831323711}, { 0.476799230063322133342158117}, + {-0.476799230063322133342158117}, { 0.879012226428633477831323711}, + { 0.284407537211271843618310615}, { 0.958703474895871555374645792}, + {-0.958703474895871555374645792}, { 0.284407537211271843618310615}, + { 0.955141168305770721498157712}, { 0.296150888243623824121786128}, + {-0.296150888243623824121786128}, { 0.955141168305770721498157712}, + { 0.465976495767966177902756065}, { 0.884797098430937780104007041}, + {-0.884797098430937780104007041}, { 0.465976495767966177902756065}, + { 0.769103337645579639346626069}, { 0.639124444863775743801488193}, + {-0.639124444863775743801488193}, { 0.769103337645579639346626069}, + { 0.091908956497132728624990979}, { 0.995767414467659793982495643}, + {-0.995767414467659793982495643}, { 0.091908956497132728624990979}, + { 0.998475580573294752208559038}, { 0.055195244349689939809447526}, + {-0.055195244349689939809447526}, { 0.998475580573294752208559038}, + { 0.666999922303637506650154222}, { 0.745057785441465962407907310}, + {-0.745057785441465962407907310}, { 0.666999922303637506650154222}, + { 0.901348847046022014570746093}, { 0.433093818853151968484222638}, + {-0.433093818853151968484222638}, { 0.901348847046022014570746093}, + { 0.331106305759876401737190737}, { 0.943593458161960361495301445}, + {-0.943593458161960361495301445}, { 0.331106305759876401737190737}, + { 0.968522094274417316221088329}, { 0.248927605745720168110682816}, + {-0.248927605745720168110682816}, { 0.968522094274417316221088329}, + { 0.508830142543107036931749324}, { 0.860866938637767279344583877}, + {-0.860866938637767279344583877}, { 0.508830142543107036931749324}, + { 0.799537269107905033500246232}, { 0.600616479383868926653875896}, + {-0.600616479383868926653875896}, { 0.799537269107905033500246232}, + { 0.140658239332849230714788846}, { 0.990058210262297105505906464}, + {-0.990058210262297105505906464}, { 0.140658239332849230714788846}, + { 0.988257567730749491404792538}, { 0.152797185258443427720336613}, + {-0.152797185258443427720336613}, { 0.988257567730749491404792538}, + { 0.590759701858874228423887908}, { 0.806847553543799272206514313}, + {-0.806847553543799272206514313}, { 0.590759701858874228423887908}, + { 0.854557988365400520767862276}, { 0.519355990165589587361829932}, + {-0.519355990165589587361829932}, { 0.854557988365400520767862276}, + { 0.237023605994367206867735915}, { 0.971503890986251775537099622}, + {-0.971503890986251775537099622}, { 0.237023605994367206867735915}, + { 0.939459223602189911962669246}, { 0.342660717311994397592781983}, + {-0.342660717311994397592781983}, { 0.939459223602189911962669246}, + { 0.422000270799799685941287941}, { 0.906595704514915365332960588}, + {-0.906595704514915365332960588}, { 0.422000270799799685941287941}, + { 0.736816568877369875090132520}, { 0.676092703575315960360419228}, + {-0.676092703575315960360419228}, { 0.736816568877369875090132520}, + { 0.042938256934940823077124540}, { 0.999077727752645382888781997}, + {-0.999077727752645382888781997}, { 0.042938256934940823077124540}, + { 0.999529417501093163079703322}, { 0.030674803176636625934021028}, + {-0.030674803176636625934021028}, { 0.999529417501093163079703322}, + { 0.685083667772700381362052545}, { 0.728464390448225196492035438}, + {-0.728464390448225196492035438}, { 0.685083667772700381362052545}, + { 0.911706032005429851404397325}, { 0.410843171057903942183466675}, + {-0.410843171057903942183466675}, { 0.911706032005429851404397325}, + { 0.354163525420490382357395796}, { 0.935183509938947577642207480}, + {-0.935183509938947577642207480}, { 0.354163525420490382357395796}, + { 0.974339382785575860518721668}, { 0.225083911359792835991642120}, + {-0.225083911359792835991642120}, { 0.974339382785575860518721668}, + { 0.529803624686294668216054671}, { 0.848120344803297251279133563}, + {-0.848120344803297251279133563}, { 0.529803624686294668216054671}, + { 0.814036329705948361654516690}, { 0.580813958095764545075595272}, + {-0.580813958095764545075595272}, { 0.814036329705948361654516690}, + { 0.164913120489969921418189113}, { 0.986308097244598647863297524}, + {-0.986308097244598647863297524}, { 0.164913120489969921418189113}, + { 0.991709753669099522860049931}, { 0.128498110793793172624415589}, + {-0.128498110793793172624415589}, { 0.991709753669099522860049931}, + { 0.610382806276309452716352152}, { 0.792106577300212351782342879}, + {-0.792106577300212351782342879}, { 0.610382806276309452716352152}, + { 0.867046245515692651480195629}, { 0.498227666972781852410983869}, + {-0.498227666972781852410983869}, { 0.867046245515692651480195629}, + { 0.260794117915275518280186509}, { 0.965394441697689374550843858}, + {-0.965394441697689374550843858}, { 0.260794117915275518280186509}, + { 0.947585591017741134653387321}, { 0.319502030816015677901518272}, + {-0.319502030816015677901518272}, { 0.947585591017741134653387321}, + { 0.444122144570429231642069418}, { 0.895966249756185155914560282}, + {-0.895966249756185155914560282}, { 0.444122144570429231642069418}, + { 0.753186799043612482483430486}, { 0.657806693297078656931182264}, + {-0.657806693297078656931182264}, { 0.753186799043612482483430486}, + { 0.067443919563664057897972422}, { 0.997723066644191609848546728}, + {-0.997723066644191609848546728}, { 0.067443919563664057897972422}, + { 0.996820299291165714972629398}, { 0.079682437971430121147120656}, + {-0.079682437971430121147120656}, { 0.996820299291165714972629398}, + { 0.648514401022112445084560551}, { 0.761202385484261814029709836}, + {-0.761202385484261814029709836}, { 0.648514401022112445084560551}, + { 0.890448723244757889952150560}, { 0.455083587126343823535869268}, + {-0.455083587126343823535869268}, { 0.890448723244757889952150560}, + { 0.307849640041534893682063646}, { 0.951435020969008369549175569}, + {-0.951435020969008369549175569}, { 0.307849640041534893682063646}, + { 0.962121404269041595429604316}, { 0.272621355449948984493347477}, + {-0.272621355449948984493347477}, { 0.962121404269041595429604316}, + { 0.487550160148435954641485027}, { 0.873094978418290098636085973}, + {-0.873094978418290098636085973}, { 0.487550160148435954641485027}, + { 0.784556597155575233023892575}, { 0.620057211763289178646268191}, + {-0.620057211763289178646268191}, { 0.784556597155575233023892575}, + { 0.116318630911904767252544319}, { 0.993211949234794533104601012}, + {-0.993211949234794533104601012}, { 0.116318630911904767252544319}, + { 0.984210092386929073193874387}, { 0.177004220412148756196839844}, + {-0.177004220412148756196839844}, { 0.984210092386929073193874387}, + { 0.570780745886967280232652864}, { 0.821102514991104679060430820}, + {-0.821102514991104679060430820}, { 0.570780745886967280232652864}, + { 0.841554977436898409603499520}, { 0.540171472729892881297845480}, + {-0.540171472729892881297845480}, { 0.841554977436898409603499520}, + { 0.213110319916091373967757518}, { 0.977028142657754351485866211}, + {-0.977028142657754351485866211}, { 0.213110319916091373967757518}, + { 0.930766961078983731944872340}, { 0.365612997804773870011745909}, + {-0.365612997804773870011745909}, { 0.930766961078983731944872340}, + { 0.399624199845646828544117031}, { 0.916679059921042663116457013}, + {-0.916679059921042663116457013}, { 0.399624199845646828544117031}, + { 0.720002507961381629076682999}, { 0.693971460889654009003734389}, + {-0.693971460889654009003734389}, { 0.720002507961381629076682999}, + { 0.018406729905804820927366313}, { 0.999830581795823422015722275}, + {-0.999830581795823422015722275}, { 0.018406729905804820927366313}, + { 0.999830581795823422015722275}, { 0.018406729905804820927366313}, + {-0.018406729905804820927366313}, { 0.999830581795823422015722275}, + { 0.693971460889654009003734389}, { 0.720002507961381629076682999}, + {-0.720002507961381629076682999}, { 0.693971460889654009003734389}, + { 0.916679059921042663116457013}, { 0.399624199845646828544117031}, + {-0.399624199845646828544117031}, { 0.916679059921042663116457013}, + { 0.365612997804773870011745909}, { 0.930766961078983731944872340}, + {-0.930766961078983731944872340}, { 0.365612997804773870011745909}, + { 0.977028142657754351485866211}, { 0.213110319916091373967757518}, + {-0.213110319916091373967757518}, { 0.977028142657754351485866211}, + { 0.540171472729892881297845480}, { 0.841554977436898409603499520}, + {-0.841554977436898409603499520}, { 0.540171472729892881297845480}, + { 0.821102514991104679060430820}, { 0.570780745886967280232652864}, + {-0.570780745886967280232652864}, { 0.821102514991104679060430820}, + { 0.177004220412148756196839844}, { 0.984210092386929073193874387}, + {-0.984210092386929073193874387}, { 0.177004220412148756196839844}, + { 0.993211949234794533104601012}, { 0.116318630911904767252544319}, + {-0.116318630911904767252544319}, { 0.993211949234794533104601012}, + { 0.620057211763289178646268191}, { 0.784556597155575233023892575}, + {-0.784556597155575233023892575}, { 0.620057211763289178646268191}, + { 0.873094978418290098636085973}, { 0.487550160148435954641485027}, + {-0.487550160148435954641485027}, { 0.873094978418290098636085973}, + { 0.272621355449948984493347477}, { 0.962121404269041595429604316}, + {-0.962121404269041595429604316}, { 0.272621355449948984493347477}, + { 0.951435020969008369549175569}, { 0.307849640041534893682063646}, + {-0.307849640041534893682063646}, { 0.951435020969008369549175569}, + { 0.455083587126343823535869268}, { 0.890448723244757889952150560}, + {-0.890448723244757889952150560}, { 0.455083587126343823535869268}, + { 0.761202385484261814029709836}, { 0.648514401022112445084560551}, + {-0.648514401022112445084560551}, { 0.761202385484261814029709836}, + { 0.079682437971430121147120656}, { 0.996820299291165714972629398}, + {-0.996820299291165714972629398}, { 0.079682437971430121147120656}, + { 0.997723066644191609848546728}, { 0.067443919563664057897972422}, + {-0.067443919563664057897972422}, { 0.997723066644191609848546728}, + { 0.657806693297078656931182264}, { 0.753186799043612482483430486}, + {-0.753186799043612482483430486}, { 0.657806693297078656931182264}, + { 0.895966249756185155914560282}, { 0.444122144570429231642069418}, + {-0.444122144570429231642069418}, { 0.895966249756185155914560282}, + { 0.319502030816015677901518272}, { 0.947585591017741134653387321}, + {-0.947585591017741134653387321}, { 0.319502030816015677901518272}, + { 0.965394441697689374550843858}, { 0.260794117915275518280186509}, + {-0.260794117915275518280186509}, { 0.965394441697689374550843858}, + { 0.498227666972781852410983869}, { 0.867046245515692651480195629}, + {-0.867046245515692651480195629}, { 0.498227666972781852410983869}, + { 0.792106577300212351782342879}, { 0.610382806276309452716352152}, + {-0.610382806276309452716352152}, { 0.792106577300212351782342879}, + { 0.128498110793793172624415589}, { 0.991709753669099522860049931}, + {-0.991709753669099522860049931}, { 0.128498110793793172624415589}, + { 0.986308097244598647863297524}, { 0.164913120489969921418189113}, + {-0.164913120489969921418189113}, { 0.986308097244598647863297524}, + { 0.580813958095764545075595272}, { 0.814036329705948361654516690}, + {-0.814036329705948361654516690}, { 0.580813958095764545075595272}, + { 0.848120344803297251279133563}, { 0.529803624686294668216054671}, + {-0.529803624686294668216054671}, { 0.848120344803297251279133563}, + { 0.225083911359792835991642120}, { 0.974339382785575860518721668}, + {-0.974339382785575860518721668}, { 0.225083911359792835991642120}, + { 0.935183509938947577642207480}, { 0.354163525420490382357395796}, + {-0.354163525420490382357395796}, { 0.935183509938947577642207480}, + { 0.410843171057903942183466675}, { 0.911706032005429851404397325}, + {-0.911706032005429851404397325}, { 0.410843171057903942183466675}, + { 0.728464390448225196492035438}, { 0.685083667772700381362052545}, + {-0.685083667772700381362052545}, { 0.728464390448225196492035438}, + { 0.030674803176636625934021028}, { 0.999529417501093163079703322}, + {-0.999529417501093163079703322}, { 0.030674803176636625934021028}, + { 0.999077727752645382888781997}, { 0.042938256934940823077124540}, + {-0.042938256934940823077124540}, { 0.999077727752645382888781997}, + { 0.676092703575315960360419228}, { 0.736816568877369875090132520}, + {-0.736816568877369875090132520}, { 0.676092703575315960360419228}, + { 0.906595704514915365332960588}, { 0.422000270799799685941287941}, + {-0.422000270799799685941287941}, { 0.906595704514915365332960588}, + { 0.342660717311994397592781983}, { 0.939459223602189911962669246}, + {-0.939459223602189911962669246}, { 0.342660717311994397592781983}, + { 0.971503890986251775537099622}, { 0.237023605994367206867735915}, + {-0.237023605994367206867735915}, { 0.971503890986251775537099622}, + { 0.519355990165589587361829932}, { 0.854557988365400520767862276}, + {-0.854557988365400520767862276}, { 0.519355990165589587361829932}, + { 0.806847553543799272206514313}, { 0.590759701858874228423887908}, + {-0.590759701858874228423887908}, { 0.806847553543799272206514313}, + { 0.152797185258443427720336613}, { 0.988257567730749491404792538}, + {-0.988257567730749491404792538}, { 0.152797185258443427720336613}, + { 0.990058210262297105505906464}, { 0.140658239332849230714788846}, + {-0.140658239332849230714788846}, { 0.990058210262297105505906464}, + { 0.600616479383868926653875896}, { 0.799537269107905033500246232}, + {-0.799537269107905033500246232}, { 0.600616479383868926653875896}, + { 0.860866938637767279344583877}, { 0.508830142543107036931749324}, + {-0.508830142543107036931749324}, { 0.860866938637767279344583877}, + { 0.248927605745720168110682816}, { 0.968522094274417316221088329}, + {-0.968522094274417316221088329}, { 0.248927605745720168110682816}, + { 0.943593458161960361495301445}, { 0.331106305759876401737190737}, + {-0.331106305759876401737190737}, { 0.943593458161960361495301445}, + { 0.433093818853151968484222638}, { 0.901348847046022014570746093}, + {-0.901348847046022014570746093}, { 0.433093818853151968484222638}, + { 0.745057785441465962407907310}, { 0.666999922303637506650154222}, + {-0.666999922303637506650154222}, { 0.745057785441465962407907310}, + { 0.055195244349689939809447526}, { 0.998475580573294752208559038}, + {-0.998475580573294752208559038}, { 0.055195244349689939809447526}, + { 0.995767414467659793982495643}, { 0.091908956497132728624990979}, + {-0.091908956497132728624990979}, { 0.995767414467659793982495643}, + { 0.639124444863775743801488193}, { 0.769103337645579639346626069}, + {-0.769103337645579639346626069}, { 0.639124444863775743801488193}, + { 0.884797098430937780104007041}, { 0.465976495767966177902756065}, + {-0.465976495767966177902756065}, { 0.884797098430937780104007041}, + { 0.296150888243623824121786128}, { 0.955141168305770721498157712}, + {-0.955141168305770721498157712}, { 0.296150888243623824121786128}, + { 0.958703474895871555374645792}, { 0.284407537211271843618310615}, + {-0.284407537211271843618310615}, { 0.958703474895871555374645792}, + { 0.476799230063322133342158117}, { 0.879012226428633477831323711}, + {-0.879012226428633477831323711}, { 0.476799230063322133342158117}, + { 0.776888465673232450040827983}, { 0.629638238914927025372981341}, + {-0.629638238914927025372981341}, { 0.776888465673232450040827983}, + { 0.104121633872054579120943880}, { 0.994564570734255452119106243}, + {-0.994564570734255452119106243}, { 0.104121633872054579120943880}, + { 0.981963869109555264072848154}, { 0.189068664149806212754997837}, + {-0.189068664149806212754997837}, { 0.981963869109555264072848154}, + { 0.560661576197336023839710223}, { 0.828045045257755752067527592}, + {-0.828045045257755752067527592}, { 0.560661576197336023839710223}, + { 0.834862874986380056304401383}, { 0.550457972936604802977289893}, + {-0.550457972936604802977289893}, { 0.834862874986380056304401383}, + { 0.201104634842091911558443546}, { 0.979569765685440534439326110}, + {-0.979569765685440534439326110}, { 0.201104634842091911558443546}, + { 0.926210242138311341974793388}, { 0.377007410216418256726567823}, + {-0.377007410216418256726567823}, { 0.926210242138311341974793388}, + { 0.388345046698826291624993541}, { 0.921514039342041943465396332}, + {-0.921514039342041943465396332}, { 0.388345046698826291624993541}, + { 0.711432195745216441522130290}, { 0.702754744457225302452914421}, + {-0.702754744457225302452914421}, { 0.711432195745216441522130290}, + { 0.006135884649154475359640235}, { 0.999981175282601142656990438}, + {-0.999981175282601142656990438}, { 0.006135884649154475359640235}, + { 0.999995293809576171511580126}, { 0.003067956762965976270145365}, + {-0.003067956762965976270145365}, { 0.999995293809576171511580126}, + { 0.704934080375904908852523758}, { 0.709272826438865651316533772}, + {-0.709272826438865651316533772}, { 0.704934080375904908852523758}, + { 0.922701128333878570437264227}, { 0.385516053843918864075607949}, + {-0.385516053843918864075607949}, { 0.922701128333878570437264227}, + { 0.379847208924051170576281147}, { 0.925049240782677590302371869}, + {-0.925049240782677590302371869}, { 0.379847208924051170576281147}, + { 0.980182135968117392690210009}, { 0.198098410717953586179324918}, + {-0.198098410717953586179324918}, { 0.980182135968117392690210009}, + { 0.553016705580027531764226988}, { 0.833170164701913186439915922}, + {-0.833170164701913186439915922}, { 0.553016705580027531764226988}, + { 0.829761233794523042469023765}, { 0.558118531220556115693702964}, + {-0.558118531220556115693702964}, { 0.829761233794523042469023765}, + { 0.192080397049892441679288205}, { 0.981379193313754574318224190}, + {-0.981379193313754574318224190}, { 0.192080397049892441679288205}, + { 0.994879330794805620591166107}, { 0.101069862754827824987887585}, + {-0.101069862754827824987887585}, { 0.994879330794805620591166107}, + { 0.632018735939809021909403706}, { 0.774953106594873878359129282}, + {-0.774953106594873878359129282}, { 0.632018735939809021909403706}, + { 0.880470889052160770806542929}, { 0.474100214650550014398580015}, + {-0.474100214650550014398580015}, { 0.880470889052160770806542929}, + { 0.287347459544729526477331841}, { 0.957826413027532890321037029}, + {-0.957826413027532890321037029}, { 0.287347459544729526477331841}, + { 0.956045251349996443270479823}, { 0.293219162694258650606608599}, + {-0.293219162694258650606608599}, { 0.956045251349996443270479823}, + { 0.468688822035827933697617870}, { 0.883363338665731594736308015}, + {-0.883363338665731594736308015}, { 0.468688822035827933697617870}, + { 0.771060524261813773200605759}, { 0.636761861236284230413943435}, + {-0.636761861236284230413943435}, { 0.771060524261813773200605759}, + { 0.094963495329638998938034312}, { 0.995480755491926941769171600}, + {-0.995480755491926941769171600}, { 0.094963495329638998938034312}, + { 0.998640218180265222418199049}, { 0.052131704680283321236358216}, + {-0.052131704680283321236358216}, { 0.998640218180265222418199049}, + { 0.669282588346636065720696366}, { 0.743007952135121693517362293}, + {-0.743007952135121693517362293}, { 0.669282588346636065720696366}, + { 0.902673318237258806751502391}, { 0.430326481340082633908199031}, + {-0.430326481340082633908199031}, { 0.902673318237258806751502391}, + { 0.333999651442009404650865481}, { 0.942573197601446879280758735}, + {-0.942573197601446879280758735}, { 0.333999651442009404650865481}, + { 0.969281235356548486048290738}, { 0.245955050335794611599924709}, + {-0.245955050335794611599924709}, { 0.969281235356548486048290738}, + { 0.511468850437970399504391001}, { 0.859301818357008404783582139}, + {-0.859301818357008404783582139}, { 0.511468850437970399504391001}, + { 0.801376171723140219430247777}, { 0.598160706996342311724958652}, + {-0.598160706996342311724958652}, { 0.801376171723140219430247777}, + { 0.143695033150294454819773349}, { 0.989622017463200834623694454}, + {-0.989622017463200834623694454}, { 0.143695033150294454819773349}, + { 0.988721691960323767604516485}, { 0.149764534677321517229695737}, + {-0.149764534677321517229695737}, { 0.988721691960323767604516485}, + { 0.593232295039799808047809426}, { 0.805031331142963597922659282}, + {-0.805031331142963597922659282}, { 0.593232295039799808047809426}, + { 0.856147328375194481019630732}, { 0.516731799017649881508753876}, + {-0.516731799017649881508753876}, { 0.856147328375194481019630732}, + { 0.240003022448741486568922365}, { 0.970772140728950302138169611}, + {-0.970772140728950302138169611}, { 0.240003022448741486568922365}, + { 0.940506070593268323787291309}, { 0.339776884406826857828825803}, + {-0.339776884406826857828825803}, { 0.940506070593268323787291309}, + { 0.424779681209108833357226189}, { 0.905296759318118774354048329}, + {-0.905296759318118774354048329}, { 0.424779681209108833357226189}, + { 0.738887324460615147933116508}, { 0.673829000378756060917568372}, + {-0.673829000378756060917568372}, { 0.738887324460615147933116508}, + { 0.046003182130914628814301788}, { 0.998941293186856850633930266}, + {-0.998941293186856850633930266}, { 0.046003182130914628814301788}, + { 0.999618822495178597116830637}, { 0.027608145778965741612354872}, + {-0.027608145778965741612354872}, { 0.999618822495178597116830637}, + { 0.687315340891759108199186948}, { 0.726359155084345976817494315}, + {-0.726359155084345976817494315}, { 0.687315340891759108199186948}, + { 0.912962190428398164628018233}, { 0.408044162864978680820747499}, + {-0.408044162864978680820747499}, { 0.912962190428398164628018233}, + { 0.357030961233430032614954036}, { 0.934092550404258914729877883}, + {-0.934092550404258914729877883}, { 0.357030961233430032614954036}, + { 0.975025345066994146844913468}, { 0.222093620973203534094094721}, + {-0.222093620973203534094094721}, { 0.975025345066994146844913468}, + { 0.532403127877197971442805218}, { 0.846490938774052078300544488}, + {-0.846490938774052078300544488}, { 0.532403127877197971442805218}, + { 0.815814410806733789010772660}, { 0.578313796411655563342245019}, + {-0.578313796411655563342245019}, { 0.815814410806733789010772660}, + { 0.167938294974731178054745536}, { 0.985797509167567424700995000}, + {-0.985797509167567424700995000}, { 0.167938294974731178054745536}, + { 0.992099313142191757112085445}, { 0.125454983411546238542336453}, + {-0.125454983411546238542336453}, { 0.992099313142191757112085445}, + { 0.612810082429409703935211936}, { 0.790230221437310055030217152}, + {-0.790230221437310055030217152}, { 0.612810082429409703935211936}, + { 0.868570705971340895340449876}, { 0.495565261825772531150266670}, + {-0.495565261825772531150266670}, { 0.868570705971340895340449876}, + { 0.263754678974831383611349322}, { 0.964589793289812723836432159}, + {-0.964589793289812723836432159}, { 0.263754678974831383611349322}, + { 0.948561349915730288158494826}, { 0.316593375556165867243047035}, + {-0.316593375556165867243047035}, { 0.948561349915730288158494826}, + { 0.446868840162374195353044389}, { 0.894599485631382678433072126}, + {-0.894599485631382678433072126}, { 0.446868840162374195353044389}, + { 0.755201376896536527598710756}, { 0.655492852999615385312679701}, + {-0.655492852999615385312679701}, { 0.755201376896536527598710756}, + { 0.070504573389613863027351471}, { 0.997511456140303459699448390}, + {-0.997511456140303459699448390}, { 0.070504573389613863027351471}, + { 0.997060070339482978987989949}, { 0.076623861392031492278332463}, + {-0.076623861392031492278332463}, { 0.997060070339482978987989949}, + { 0.650846684996380915068975573}, { 0.759209188978388033485525443}, + {-0.759209188978388033485525443}, { 0.650846684996380915068975573}, + { 0.891840709392342727796478697}, { 0.452349587233770874133026703}, + {-0.452349587233770874133026703}, { 0.891840709392342727796478697}, + { 0.310767152749611495835997250}, { 0.950486073949481721759926101}, + {-0.950486073949481721759926101}, { 0.310767152749611495835997250}, + { 0.962953266873683886347921481}, { 0.269668325572915106525464462}, + {-0.269668325572915106525464462}, { 0.962953266873683886347921481}, + { 0.490226483288291154229598449}, { 0.871595086655951034842481435}, + {-0.871595086655951034842481435}, { 0.490226483288291154229598449}, + { 0.786455213599085757522319464}, { 0.617647307937803932403979402}, + {-0.617647307937803932403979402}, { 0.786455213599085757522319464}, + { 0.119365214810991364593637790}, { 0.992850414459865090793563344}, + {-0.992850414459865090793563344}, { 0.119365214810991364593637790}, + { 0.984748501801904218556553176}, { 0.173983873387463827950700807}, + {-0.173983873387463827950700807}, { 0.984748501801904218556553176}, + { 0.573297166698042212820171239}, { 0.819347520076796960824689637}, + {-0.819347520076796960824689637}, { 0.573297166698042212820171239}, + { 0.843208239641845437161743865}, { 0.537587076295645482502214932}, + {-0.537587076295645482502214932}, { 0.843208239641845437161743865}, + { 0.216106797076219509948385131}, { 0.976369731330021149312732194}, + {-0.976369731330021149312732194}, { 0.216106797076219509948385131}, + { 0.931884265581668106718557199}, { 0.362755724367397216204854462}, + {-0.362755724367397216204854462}, { 0.931884265581668106718557199}, + { 0.402434650859418441082533934}, { 0.915448716088267819566431292}, + {-0.915448716088267819566431292}, { 0.402434650859418441082533934}, + { 0.722128193929215321243607198}, { 0.691759258364157774906734132}, + {-0.691759258364157774906734132}, { 0.722128193929215321243607198}, + { 0.021474080275469507418374898}, { 0.999769405351215321657617036}, + {-0.999769405351215321657617036}, { 0.021474080275469507418374898}, + { 0.999882347454212525633049627}, { 0.015339206284988101044151868}, + {-0.015339206284988101044151868}, { 0.999882347454212525633049627}, + { 0.696177131491462944788582591}, { 0.717870045055731736211325329}, + {-0.717870045055731736211325329}, { 0.696177131491462944788582591}, + { 0.917900775621390457642276297}, { 0.396809987416710328595290911}, + {-0.396809987416710328595290911}, { 0.917900775621390457642276297}, + { 0.368466829953372331712746222}, { 0.929640895843181265457918066}, + {-0.929640895843181265457918066}, { 0.368466829953372331712746222}, + { 0.977677357824509979943404762}, { 0.210111836880469621717489972}, + {-0.210111836880469621717489972}, { 0.977677357824509979943404762}, + { 0.542750784864515906586768661}, { 0.839893794195999504583383987}, + {-0.839893794195999504583383987}, { 0.542750784864515906586768661}, + { 0.822849781375826332046780034}, { 0.568258952670131549790548489}, + {-0.568258952670131549790548489}, { 0.822849781375826332046780034}, + { 0.180022901405699522679906590}, { 0.983662419211730274396237776}, + {-0.983662419211730274396237776}, { 0.180022901405699522679906590}, + { 0.993564135520595333782021697}, { 0.113270952177564349018228733}, + {-0.113270952177564349018228733}, { 0.993564135520595333782021697}, + { 0.622461279374149972519166721}, { 0.782650596166575738458949301}, + {-0.782650596166575738458949301}, { 0.622461279374149972519166721}, + { 0.874586652278176112634431897}, { 0.484869248000791101822951699}, + {-0.484869248000791101822951699}, { 0.874586652278176112634431897}, + { 0.275571819310958163076425168}, { 0.961280485811320641748659653}, + {-0.961280485811320641748659653}, { 0.275571819310958163076425168}, + { 0.952375012719765858529893608}, { 0.304929229735402406490728633}, + {-0.304929229735402406490728633}, { 0.952375012719765858529893608}, + { 0.457813303598877221904961155}, { 0.889048355854664562540777729}, + {-0.889048355854664562540777729}, { 0.457813303598877221904961155}, + { 0.763188417263381271704838297}, { 0.646176012983316364832802220}, + {-0.646176012983316364832802220}, { 0.763188417263381271704838297}, + { 0.082740264549375693111987083}, { 0.996571145790554847093566910}, + {-0.996571145790554847093566910}, { 0.082740264549375693111987083}, + { 0.997925286198596012623025462}, { 0.064382630929857460819324537}, + {-0.064382630929857460819324537}, { 0.997925286198596012623025462}, + { 0.660114342067420478559490747}, { 0.751165131909686411205819422}, + {-0.751165131909686411205819422}, { 0.660114342067420478559490747}, + { 0.897324580705418281231391836}, { 0.441371268731716692879988968}, + {-0.441371268731716692879988968}, { 0.897324580705418281231391836}, + { 0.322407678801069848384807478}, { 0.946600913083283570044599823}, + {-0.946600913083283570044599823}, { 0.322407678801069848384807478}, + { 0.966190003445412555433832961}, { 0.257831102162159005614471295}, + {-0.257831102162159005614471295}, { 0.966190003445412555433832961}, + { 0.500885382611240786241285004}, { 0.865513624090569082825488358}, + {-0.865513624090569082825488358}, { 0.500885382611240786241285004}, + { 0.793975477554337164895083757}, { 0.607949784967773667243642671}, + {-0.607949784967773667243642671}, { 0.793975477554337164895083757}, + { 0.131540028702883111103387493}, { 0.991310859846115418957349799}, + {-0.991310859846115418957349799}, { 0.131540028702883111103387493}, + { 0.986809401814185476970235952}, { 0.161886393780111837641387995}, + {-0.161886393780111837641387995}, { 0.986809401814185476970235952}, + { 0.583308652937698294392830961}, { 0.812250586585203913049744181}, + {-0.812250586585203913049744181}, { 0.583308652937698294392830961}, + { 0.849741768000852489471268395}, { 0.527199134781901348464274575}, + {-0.527199134781901348464274575}, { 0.849741768000852489471268395}, + { 0.228072083170885739254457379}, { 0.973644249650811925318383912}, + {-0.973644249650811925318383912}, { 0.228072083170885739254457379}, + { 0.936265667170278246576310996}, { 0.351292756085567125601307623}, + {-0.351292756085567125601307623}, { 0.936265667170278246576310996}, + { 0.413638312238434547471944324}, { 0.910441292258067196934095369}, + {-0.910441292258067196934095369}, { 0.413638312238434547471944324}, + { 0.730562769227827561177758850}, { 0.682845546385248068164596123}, + {-0.682845546385248068164596123}, { 0.730562769227827561177758850}, + { 0.033741171851377584833716112}, { 0.999430604555461772019008327}, + {-0.999430604555461772019008327}, { 0.033741171851377584833716112}, + { 0.999204758618363895492950001}, { 0.039872927587739811128578738}, + {-0.039872927587739811128578738}, { 0.999204758618363895492950001}, + { 0.678350043129861486873655042}, { 0.734738878095963464563223604}, + {-0.734738878095963464563223604}, { 0.678350043129861486873655042}, + { 0.907886116487666212038681480}, { 0.419216888363223956433010020}, + {-0.419216888363223956433010020}, { 0.907886116487666212038681480}, + { 0.345541324963989065539191723}, { 0.938403534063108112192420774}, + {-0.938403534063108112192420774}, { 0.345541324963989065539191723}, + { 0.972226497078936305708321144}, { 0.234041958583543423191242045}, + {-0.234041958583543423191242045}, { 0.972226497078936305708321144}, + { 0.521975292937154342694258318}, { 0.852960604930363657746588082}, + {-0.852960604930363657746588082}, { 0.521975292937154342694258318}, + { 0.808656181588174991946968128}, { 0.588281548222645304786439813}, + {-0.588281548222645304786439813}, { 0.808656181588174991946968128}, + { 0.155828397654265235743101486}, { 0.987784141644572154230969032}, + {-0.987784141644572154230969032}, { 0.155828397654265235743101486}, + { 0.990485084256457037998682243}, { 0.137620121586486044948441663}, + {-0.137620121586486044948441663}, { 0.990485084256457037998682243}, + { 0.603066598540348201693430617}, { 0.797690840943391108362662755}, + {-0.797690840943391108362662755}, { 0.603066598540348201693430617}, + { 0.862423956111040538690933878}, { 0.506186645345155291048942344}, + {-0.506186645345155291048942344}, { 0.862423956111040538690933878}, + { 0.251897818154216950498106628}, { 0.967753837093475465243391912}, + {-0.967753837093475465243391912}, { 0.251897818154216950498106628}, + { 0.944604837261480265659265493}, { 0.328209843579092526107916817}, + {-0.328209843579092526107916817}, { 0.944604837261480265659265493}, + { 0.435857079922255491032544080}, { 0.900015892016160228714535267}, + {-0.900015892016160228714535267}, { 0.435857079922255491032544080}, + { 0.747100605980180144323078847}, { 0.664710978203344868130324985}, + {-0.664710978203344868130324985}, { 0.747100605980180144323078847}, + { 0.058258264500435759613979782}, { 0.998301544933892840738782163}, + {-0.998301544933892840738782163}, { 0.058258264500435759613979782}, + { 0.996044700901251989887944810}, { 0.088853552582524596561586535}, + {-0.088853552582524596561586535}, { 0.996044700901251989887944810}, + { 0.641481012808583151988739898}, { 0.767138911935820381181694573}, + {-0.767138911935820381181694573}, { 0.641481012808583151988739898}, + { 0.886222530148880631647990821}, { 0.463259783551860197390719637}, + {-0.463259783551860197390719637}, { 0.886222530148880631647990821}, + { 0.299079826308040476750336973}, { 0.954228095109105629780430732}, + {-0.954228095109105629780430732}, { 0.299079826308040476750336973}, + { 0.959571513081984528335528181}, { 0.281464937925757984095231007}, + {-0.281464937925757984095231007}, { 0.959571513081984528335528181}, + { 0.479493757660153026679839798}, { 0.877545290207261291668470750}, + {-0.877545290207261291668470750}, { 0.479493757660153026679839798}, + { 0.778816512381475953374724325}, { 0.627251815495144113509622565}, + {-0.627251815495144113509622565}, { 0.778816512381475953374724325}, + { 0.107172424956808849175529148}, { 0.994240449453187946358413442}, + {-0.994240449453187946358413442}, { 0.107172424956808849175529148}, + { 0.982539302287441255907040396}, { 0.186055151663446648105438304}, + {-0.186055151663446648105438304}, { 0.982539302287441255907040396}, + { 0.563199344013834115007363772}, { 0.826321062845663480311195452}, + {-0.826321062845663480311195452}, { 0.563199344013834115007363772}, + { 0.836547727223511984524285790}, { 0.547894059173100165608820571}, + {-0.547894059173100165608820571}, { 0.836547727223511984524285790}, + { 0.204108966092816874181696950}, { 0.978948175319062194715480124}, + {-0.978948175319062194715480124}, { 0.204108966092816874181696950}, + { 0.927362525650401087274536959}, { 0.374164062971457997104393020}, + {-0.374164062971457997104393020}, { 0.927362525650401087274536959}, + { 0.391170384302253888687512949}, { 0.920318276709110566440076541}, + {-0.920318276709110566440076541}, { 0.391170384302253888687512949}, + { 0.713584868780793592903125099}, { 0.700568793943248366792866380}, + {-0.700568793943248366792866380}, { 0.713584868780793592903125099}, + { 0.009203754782059819315102378}, { 0.999957644551963866333120920}, + {-0.999957644551963866333120920}, { 0.009203754782059819315102378}, + { 0.999957644551963866333120920}, { 0.009203754782059819315102378}, + {-0.009203754782059819315102378}, { 0.999957644551963866333120920}, + { 0.700568793943248366792866380}, { 0.713584868780793592903125099}, + {-0.713584868780793592903125099}, { 0.700568793943248366792866380}, + { 0.920318276709110566440076541}, { 0.391170384302253888687512949}, + {-0.391170384302253888687512949}, { 0.920318276709110566440076541}, + { 0.374164062971457997104393020}, { 0.927362525650401087274536959}, + {-0.927362525650401087274536959}, { 0.374164062971457997104393020}, + { 0.978948175319062194715480124}, { 0.204108966092816874181696950}, + {-0.204108966092816874181696950}, { 0.978948175319062194715480124}, + { 0.547894059173100165608820571}, { 0.836547727223511984524285790}, + {-0.836547727223511984524285790}, { 0.547894059173100165608820571}, + { 0.826321062845663480311195452}, { 0.563199344013834115007363772}, + {-0.563199344013834115007363772}, { 0.826321062845663480311195452}, + { 0.186055151663446648105438304}, { 0.982539302287441255907040396}, + {-0.982539302287441255907040396}, { 0.186055151663446648105438304}, + { 0.994240449453187946358413442}, { 0.107172424956808849175529148}, + {-0.107172424956808849175529148}, { 0.994240449453187946358413442}, + { 0.627251815495144113509622565}, { 0.778816512381475953374724325}, + {-0.778816512381475953374724325}, { 0.627251815495144113509622565}, + { 0.877545290207261291668470750}, { 0.479493757660153026679839798}, + {-0.479493757660153026679839798}, { 0.877545290207261291668470750}, + { 0.281464937925757984095231007}, { 0.959571513081984528335528181}, + {-0.959571513081984528335528181}, { 0.281464937925757984095231007}, + { 0.954228095109105629780430732}, { 0.299079826308040476750336973}, + {-0.299079826308040476750336973}, { 0.954228095109105629780430732}, + { 0.463259783551860197390719637}, { 0.886222530148880631647990821}, + {-0.886222530148880631647990821}, { 0.463259783551860197390719637}, + { 0.767138911935820381181694573}, { 0.641481012808583151988739898}, + {-0.641481012808583151988739898}, { 0.767138911935820381181694573}, + { 0.088853552582524596561586535}, { 0.996044700901251989887944810}, + {-0.996044700901251989887944810}, { 0.088853552582524596561586535}, + { 0.998301544933892840738782163}, { 0.058258264500435759613979782}, + {-0.058258264500435759613979782}, { 0.998301544933892840738782163}, + { 0.664710978203344868130324985}, { 0.747100605980180144323078847}, + {-0.747100605980180144323078847}, { 0.664710978203344868130324985}, + { 0.900015892016160228714535267}, { 0.435857079922255491032544080}, + {-0.435857079922255491032544080}, { 0.900015892016160228714535267}, + { 0.328209843579092526107916817}, { 0.944604837261480265659265493}, + {-0.944604837261480265659265493}, { 0.328209843579092526107916817}, + { 0.967753837093475465243391912}, { 0.251897818154216950498106628}, + {-0.251897818154216950498106628}, { 0.967753837093475465243391912}, + { 0.506186645345155291048942344}, { 0.862423956111040538690933878}, + {-0.862423956111040538690933878}, { 0.506186645345155291048942344}, + { 0.797690840943391108362662755}, { 0.603066598540348201693430617}, + {-0.603066598540348201693430617}, { 0.797690840943391108362662755}, + { 0.137620121586486044948441663}, { 0.990485084256457037998682243}, + {-0.990485084256457037998682243}, { 0.137620121586486044948441663}, + { 0.987784141644572154230969032}, { 0.155828397654265235743101486}, + {-0.155828397654265235743101486}, { 0.987784141644572154230969032}, + { 0.588281548222645304786439813}, { 0.808656181588174991946968128}, + {-0.808656181588174991946968128}, { 0.588281548222645304786439813}, + { 0.852960604930363657746588082}, { 0.521975292937154342694258318}, + {-0.521975292937154342694258318}, { 0.852960604930363657746588082}, + { 0.234041958583543423191242045}, { 0.972226497078936305708321144}, + {-0.972226497078936305708321144}, { 0.234041958583543423191242045}, + { 0.938403534063108112192420774}, { 0.345541324963989065539191723}, + {-0.345541324963989065539191723}, { 0.938403534063108112192420774}, + { 0.419216888363223956433010020}, { 0.907886116487666212038681480}, + {-0.907886116487666212038681480}, { 0.419216888363223956433010020}, + { 0.734738878095963464563223604}, { 0.678350043129861486873655042}, + {-0.678350043129861486873655042}, { 0.734738878095963464563223604}, + { 0.039872927587739811128578738}, { 0.999204758618363895492950001}, + {-0.999204758618363895492950001}, { 0.039872927587739811128578738}, + { 0.999430604555461772019008327}, { 0.033741171851377584833716112}, + {-0.033741171851377584833716112}, { 0.999430604555461772019008327}, + { 0.682845546385248068164596123}, { 0.730562769227827561177758850}, + {-0.730562769227827561177758850}, { 0.682845546385248068164596123}, + { 0.910441292258067196934095369}, { 0.413638312238434547471944324}, + {-0.413638312238434547471944324}, { 0.910441292258067196934095369}, + { 0.351292756085567125601307623}, { 0.936265667170278246576310996}, + {-0.936265667170278246576310996}, { 0.351292756085567125601307623}, + { 0.973644249650811925318383912}, { 0.228072083170885739254457379}, + {-0.228072083170885739254457379}, { 0.973644249650811925318383912}, + { 0.527199134781901348464274575}, { 0.849741768000852489471268395}, + {-0.849741768000852489471268395}, { 0.527199134781901348464274575}, + { 0.812250586585203913049744181}, { 0.583308652937698294392830961}, + {-0.583308652937698294392830961}, { 0.812250586585203913049744181}, + { 0.161886393780111837641387995}, { 0.986809401814185476970235952}, + {-0.986809401814185476970235952}, { 0.161886393780111837641387995}, + { 0.991310859846115418957349799}, { 0.131540028702883111103387493}, + {-0.131540028702883111103387493}, { 0.991310859846115418957349799}, + { 0.607949784967773667243642671}, { 0.793975477554337164895083757}, + {-0.793975477554337164895083757}, { 0.607949784967773667243642671}, + { 0.865513624090569082825488358}, { 0.500885382611240786241285004}, + {-0.500885382611240786241285004}, { 0.865513624090569082825488358}, + { 0.257831102162159005614471295}, { 0.966190003445412555433832961}, + {-0.966190003445412555433832961}, { 0.257831102162159005614471295}, + { 0.946600913083283570044599823}, { 0.322407678801069848384807478}, + {-0.322407678801069848384807478}, { 0.946600913083283570044599823}, + { 0.441371268731716692879988968}, { 0.897324580705418281231391836}, + {-0.897324580705418281231391836}, { 0.441371268731716692879988968}, + { 0.751165131909686411205819422}, { 0.660114342067420478559490747}, + {-0.660114342067420478559490747}, { 0.751165131909686411205819422}, + { 0.064382630929857460819324537}, { 0.997925286198596012623025462}, + {-0.997925286198596012623025462}, { 0.064382630929857460819324537}, + { 0.996571145790554847093566910}, { 0.082740264549375693111987083}, + {-0.082740264549375693111987083}, { 0.996571145790554847093566910}, + { 0.646176012983316364832802220}, { 0.763188417263381271704838297}, + {-0.763188417263381271704838297}, { 0.646176012983316364832802220}, + { 0.889048355854664562540777729}, { 0.457813303598877221904961155}, + {-0.457813303598877221904961155}, { 0.889048355854664562540777729}, + { 0.304929229735402406490728633}, { 0.952375012719765858529893608}, + {-0.952375012719765858529893608}, { 0.304929229735402406490728633}, + { 0.961280485811320641748659653}, { 0.275571819310958163076425168}, + {-0.275571819310958163076425168}, { 0.961280485811320641748659653}, + { 0.484869248000791101822951699}, { 0.874586652278176112634431897}, + {-0.874586652278176112634431897}, { 0.484869248000791101822951699}, + { 0.782650596166575738458949301}, { 0.622461279374149972519166721}, + {-0.622461279374149972519166721}, { 0.782650596166575738458949301}, + { 0.113270952177564349018228733}, { 0.993564135520595333782021697}, + {-0.993564135520595333782021697}, { 0.113270952177564349018228733}, + { 0.983662419211730274396237776}, { 0.180022901405699522679906590}, + {-0.180022901405699522679906590}, { 0.983662419211730274396237776}, + { 0.568258952670131549790548489}, { 0.822849781375826332046780034}, + {-0.822849781375826332046780034}, { 0.568258952670131549790548489}, + { 0.839893794195999504583383987}, { 0.542750784864515906586768661}, + {-0.542750784864515906586768661}, { 0.839893794195999504583383987}, + { 0.210111836880469621717489972}, { 0.977677357824509979943404762}, + {-0.977677357824509979943404762}, { 0.210111836880469621717489972}, + { 0.929640895843181265457918066}, { 0.368466829953372331712746222}, + {-0.368466829953372331712746222}, { 0.929640895843181265457918066}, + { 0.396809987416710328595290911}, { 0.917900775621390457642276297}, + {-0.917900775621390457642276297}, { 0.396809987416710328595290911}, + { 0.717870045055731736211325329}, { 0.696177131491462944788582591}, + {-0.696177131491462944788582591}, { 0.717870045055731736211325329}, + { 0.015339206284988101044151868}, { 0.999882347454212525633049627}, + {-0.999882347454212525633049627}, { 0.015339206284988101044151868}, + { 0.999769405351215321657617036}, { 0.021474080275469507418374898}, + {-0.021474080275469507418374898}, { 0.999769405351215321657617036}, + { 0.691759258364157774906734132}, { 0.722128193929215321243607198}, + {-0.722128193929215321243607198}, { 0.691759258364157774906734132}, + { 0.915448716088267819566431292}, { 0.402434650859418441082533934}, + {-0.402434650859418441082533934}, { 0.915448716088267819566431292}, + { 0.362755724367397216204854462}, { 0.931884265581668106718557199}, + {-0.931884265581668106718557199}, { 0.362755724367397216204854462}, + { 0.976369731330021149312732194}, { 0.216106797076219509948385131}, + {-0.216106797076219509948385131}, { 0.976369731330021149312732194}, + { 0.537587076295645482502214932}, { 0.843208239641845437161743865}, + {-0.843208239641845437161743865}, { 0.537587076295645482502214932}, + { 0.819347520076796960824689637}, { 0.573297166698042212820171239}, + {-0.573297166698042212820171239}, { 0.819347520076796960824689637}, + { 0.173983873387463827950700807}, { 0.984748501801904218556553176}, + {-0.984748501801904218556553176}, { 0.173983873387463827950700807}, + { 0.992850414459865090793563344}, { 0.119365214810991364593637790}, + {-0.119365214810991364593637790}, { 0.992850414459865090793563344}, + { 0.617647307937803932403979402}, { 0.786455213599085757522319464}, + {-0.786455213599085757522319464}, { 0.617647307937803932403979402}, + { 0.871595086655951034842481435}, { 0.490226483288291154229598449}, + {-0.490226483288291154229598449}, { 0.871595086655951034842481435}, + { 0.269668325572915106525464462}, { 0.962953266873683886347921481}, + {-0.962953266873683886347921481}, { 0.269668325572915106525464462}, + { 0.950486073949481721759926101}, { 0.310767152749611495835997250}, + {-0.310767152749611495835997250}, { 0.950486073949481721759926101}, + { 0.452349587233770874133026703}, { 0.891840709392342727796478697}, + {-0.891840709392342727796478697}, { 0.452349587233770874133026703}, + { 0.759209188978388033485525443}, { 0.650846684996380915068975573}, + {-0.650846684996380915068975573}, { 0.759209188978388033485525443}, + { 0.076623861392031492278332463}, { 0.997060070339482978987989949}, + {-0.997060070339482978987989949}, { 0.076623861392031492278332463}, + { 0.997511456140303459699448390}, { 0.070504573389613863027351471}, + {-0.070504573389613863027351471}, { 0.997511456140303459699448390}, + { 0.655492852999615385312679701}, { 0.755201376896536527598710756}, + {-0.755201376896536527598710756}, { 0.655492852999615385312679701}, + { 0.894599485631382678433072126}, { 0.446868840162374195353044389}, + {-0.446868840162374195353044389}, { 0.894599485631382678433072126}, + { 0.316593375556165867243047035}, { 0.948561349915730288158494826}, + {-0.948561349915730288158494826}, { 0.316593375556165867243047035}, + { 0.964589793289812723836432159}, { 0.263754678974831383611349322}, + {-0.263754678974831383611349322}, { 0.964589793289812723836432159}, + { 0.495565261825772531150266670}, { 0.868570705971340895340449876}, + {-0.868570705971340895340449876}, { 0.495565261825772531150266670}, + { 0.790230221437310055030217152}, { 0.612810082429409703935211936}, + {-0.612810082429409703935211936}, { 0.790230221437310055030217152}, + { 0.125454983411546238542336453}, { 0.992099313142191757112085445}, + {-0.992099313142191757112085445}, { 0.125454983411546238542336453}, + { 0.985797509167567424700995000}, { 0.167938294974731178054745536}, + {-0.167938294974731178054745536}, { 0.985797509167567424700995000}, + { 0.578313796411655563342245019}, { 0.815814410806733789010772660}, + {-0.815814410806733789010772660}, { 0.578313796411655563342245019}, + { 0.846490938774052078300544488}, { 0.532403127877197971442805218}, + {-0.532403127877197971442805218}, { 0.846490938774052078300544488}, + { 0.222093620973203534094094721}, { 0.975025345066994146844913468}, + {-0.975025345066994146844913468}, { 0.222093620973203534094094721}, + { 0.934092550404258914729877883}, { 0.357030961233430032614954036}, + {-0.357030961233430032614954036}, { 0.934092550404258914729877883}, + { 0.408044162864978680820747499}, { 0.912962190428398164628018233}, + {-0.912962190428398164628018233}, { 0.408044162864978680820747499}, + { 0.726359155084345976817494315}, { 0.687315340891759108199186948}, + {-0.687315340891759108199186948}, { 0.726359155084345976817494315}, + { 0.027608145778965741612354872}, { 0.999618822495178597116830637}, + {-0.999618822495178597116830637}, { 0.027608145778965741612354872}, + { 0.998941293186856850633930266}, { 0.046003182130914628814301788}, + {-0.046003182130914628814301788}, { 0.998941293186856850633930266}, + { 0.673829000378756060917568372}, { 0.738887324460615147933116508}, + {-0.738887324460615147933116508}, { 0.673829000378756060917568372}, + { 0.905296759318118774354048329}, { 0.424779681209108833357226189}, + {-0.424779681209108833357226189}, { 0.905296759318118774354048329}, + { 0.339776884406826857828825803}, { 0.940506070593268323787291309}, + {-0.940506070593268323787291309}, { 0.339776884406826857828825803}, + { 0.970772140728950302138169611}, { 0.240003022448741486568922365}, + {-0.240003022448741486568922365}, { 0.970772140728950302138169611}, + { 0.516731799017649881508753876}, { 0.856147328375194481019630732}, + {-0.856147328375194481019630732}, { 0.516731799017649881508753876}, + { 0.805031331142963597922659282}, { 0.593232295039799808047809426}, + {-0.593232295039799808047809426}, { 0.805031331142963597922659282}, + { 0.149764534677321517229695737}, { 0.988721691960323767604516485}, + {-0.988721691960323767604516485}, { 0.149764534677321517229695737}, + { 0.989622017463200834623694454}, { 0.143695033150294454819773349}, + {-0.143695033150294454819773349}, { 0.989622017463200834623694454}, + { 0.598160706996342311724958652}, { 0.801376171723140219430247777}, + {-0.801376171723140219430247777}, { 0.598160706996342311724958652}, + { 0.859301818357008404783582139}, { 0.511468850437970399504391001}, + {-0.511468850437970399504391001}, { 0.859301818357008404783582139}, + { 0.245955050335794611599924709}, { 0.969281235356548486048290738}, + {-0.969281235356548486048290738}, { 0.245955050335794611599924709}, + { 0.942573197601446879280758735}, { 0.333999651442009404650865481}, + {-0.333999651442009404650865481}, { 0.942573197601446879280758735}, + { 0.430326481340082633908199031}, { 0.902673318237258806751502391}, + {-0.902673318237258806751502391}, { 0.430326481340082633908199031}, + { 0.743007952135121693517362293}, { 0.669282588346636065720696366}, + {-0.669282588346636065720696366}, { 0.743007952135121693517362293}, + { 0.052131704680283321236358216}, { 0.998640218180265222418199049}, + {-0.998640218180265222418199049}, { 0.052131704680283321236358216}, + { 0.995480755491926941769171600}, { 0.094963495329638998938034312}, + {-0.094963495329638998938034312}, { 0.995480755491926941769171600}, + { 0.636761861236284230413943435}, { 0.771060524261813773200605759}, + {-0.771060524261813773200605759}, { 0.636761861236284230413943435}, + { 0.883363338665731594736308015}, { 0.468688822035827933697617870}, + {-0.468688822035827933697617870}, { 0.883363338665731594736308015}, + { 0.293219162694258650606608599}, { 0.956045251349996443270479823}, + {-0.956045251349996443270479823}, { 0.293219162694258650606608599}, + { 0.957826413027532890321037029}, { 0.287347459544729526477331841}, + {-0.287347459544729526477331841}, { 0.957826413027532890321037029}, + { 0.474100214650550014398580015}, { 0.880470889052160770806542929}, + {-0.880470889052160770806542929}, { 0.474100214650550014398580015}, + { 0.774953106594873878359129282}, { 0.632018735939809021909403706}, + {-0.632018735939809021909403706}, { 0.774953106594873878359129282}, + { 0.101069862754827824987887585}, { 0.994879330794805620591166107}, + {-0.994879330794805620591166107}, { 0.101069862754827824987887585}, + { 0.981379193313754574318224190}, { 0.192080397049892441679288205}, + {-0.192080397049892441679288205}, { 0.981379193313754574318224190}, + { 0.558118531220556115693702964}, { 0.829761233794523042469023765}, + {-0.829761233794523042469023765}, { 0.558118531220556115693702964}, + { 0.833170164701913186439915922}, { 0.553016705580027531764226988}, + {-0.553016705580027531764226988}, { 0.833170164701913186439915922}, + { 0.198098410717953586179324918}, { 0.980182135968117392690210009}, + {-0.980182135968117392690210009}, { 0.198098410717953586179324918}, + { 0.925049240782677590302371869}, { 0.379847208924051170576281147}, + {-0.379847208924051170576281147}, { 0.925049240782677590302371869}, + { 0.385516053843918864075607949}, { 0.922701128333878570437264227}, + {-0.922701128333878570437264227}, { 0.385516053843918864075607949}, + { 0.709272826438865651316533772}, { 0.704934080375904908852523758}, + {-0.704934080375904908852523758}, { 0.709272826438865651316533772}, + { 0.003067956762965976270145365}, { 0.999995293809576171511580126}, + {-0.999995293809576171511580126}, { 0.003067956762965976270145365} +}; + +const fpr fpr_p2_tab[] = { + { 2.00000000000 }, + { 1.00000000000 }, + { 0.50000000000 }, + { 0.25000000000 }, + { 0.12500000000 }, + { 0.06250000000 }, + { 0.03125000000 }, + { 0.01562500000 }, + { 0.00781250000 }, + { 0.00390625000 }, + { 0.00195312500 } +}; diff --git a/crypto_sign/falcon-1024/avx2/fpr.h b/crypto_sign/falcon-1024/avx2/fpr.h new file mode 100644 index 00000000..618115a3 --- /dev/null +++ b/crypto_sign/falcon-1024/avx2/fpr.h @@ -0,0 +1,349 @@ +#ifndef PQCLEAN_FALCON1024_AVX2_FPR_H +#define PQCLEAN_FALCON1024_AVX2_FPR_H + +/* + * Floating-point operations. + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2017-2019 Falcon Project + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @author Thomas Pornin + */ + + +/* ====================================================================== */ + +#include +#include + +#define FMADD(a, b, c) _mm256_add_pd(_mm256_mul_pd(a, b), c) +#define FMSUB(a, b, c) _mm256_sub_pd(_mm256_mul_pd(a, b), c) + +/* + * We wrap the native 'double' type into a structure so that the C compiler + * complains if we inadvertently use raw arithmetic operators on the 'fpr' + * type instead of using the inline functions below. This should have no + * extra runtime cost, since all the functions below are 'inline'. + */ +typedef struct { + double v; +} fpr; + +static inline fpr +FPR(double v) { + fpr x; + + x.v = v; + return x; +} + +static inline fpr +fpr_of(int64_t i) { + return FPR((double)i); +} + +static const fpr fpr_q = { 12289.0 }; +static const fpr fpr_inverse_of_q = { 1.0 / 12289.0 }; +static const fpr fpr_inv_2sqrsigma0 = { .150865048875372721532312163019 }; +static const fpr fpr_inv_sigma = { .005819826392951607426919370871 }; +static const fpr fpr_sigma_min_9 = { 1.291500756233514568549480827642 }; +static const fpr fpr_sigma_min_10 = { 1.311734375905083682667395805765 }; +static const fpr fpr_log2 = { 0.69314718055994530941723212146 }; +static const fpr fpr_inv_log2 = { 1.4426950408889634073599246810 }; +static const fpr fpr_bnorm_max = { 16822.4121 }; +static const fpr fpr_zero = { 0.0 }; +static const fpr fpr_one = { 1.0 }; +static const fpr fpr_two = { 2.0 }; +static const fpr fpr_onehalf = { 0.5 }; +static const fpr fpr_invsqrt2 = { 0.707106781186547524400844362105 }; +static const fpr fpr_invsqrt8 = { 0.353553390593273762200422181052 }; +static const fpr fpr_ptwo31 = { 2147483648.0 }; +static const fpr fpr_ptwo31m1 = { 2147483647.0 }; +static const fpr fpr_mtwo31m1 = { -2147483647.0 }; +static const fpr fpr_ptwo63m1 = { 9223372036854775807.0 }; +static const fpr fpr_mtwo63m1 = { -9223372036854775807.0 }; +static const fpr fpr_ptwo63 = { 9223372036854775808.0 }; + +static inline int64_t +fpr_rint(fpr x) { + /* + * We do not want to use llrint() since it might be not + * constant-time. + * + * Suppose that x >= 0. If x >= 2^52, then it is already an + * integer. Otherwise, if x < 2^52, then computing x+2^52 will + * yield a value that will be rounded to the nearest integer + * with exactly the right rules (round-to-nearest-even). + * + * In order to have constant-time processing, we must do the + * computation for both x >= 0 and x < 0 cases, and use a + * cast to an integer to access the sign and select the proper + * value. Such casts also allow us to find out if |x| < 2^52. + */ + int64_t sx, tx, rp, rn, m; + uint32_t ub; + + sx = (int64_t)(x.v - 1.0); + tx = (int64_t)x.v; + rp = (int64_t)(x.v + 4503599627370496.0) - 4503599627370496; + rn = (int64_t)(x.v - 4503599627370496.0) + 4503599627370496; + + /* + * If tx >= 2^52 or tx < -2^52, then result is tx. + * Otherwise, if sx >= 0, then result is rp. + * Otherwise, result is rn. We use the fact that when x is + * close to 0 (|x| <= 0.25) then both rp and rn are correct; + * and if x is not close to 0, then trunc(x-1.0) yields the + * appropriate sign. + */ + + /* + * Clamp rp to zero if tx < 0. + * Clamp rn to zero if tx >= 0. + */ + m = sx >> 63; + rn &= m; + rp &= ~m; + + /* + * Get the 12 upper bits of tx; if they are not all zeros or + * all ones, then tx >= 2^52 or tx < -2^52, and we clamp both + * rp and rn to zero. Otherwise, we clamp tx to zero. + */ + ub = (uint32_t)((uint64_t)tx >> 52); + m = -(int64_t)((((ub + 1) & 0xFFF) - 2) >> 31); + rp &= m; + rn &= m; + tx &= ~m; + + /* + * Only one of tx, rn or rp (at most) can be non-zero at this + * point. + */ + return tx | rn | rp; +} + +static inline int64_t +fpr_floor(fpr x) { + int64_t r; + + /* + * The cast performs a trunc() (rounding toward 0) and thus is + * wrong by 1 for most negative values. The correction below is + * constant-time as long as the compiler turns the + * floating-point conversion result into a 0/1 integer without a + * conditional branch or another non-constant-time construction. + * This should hold on all modern architectures with an FPU (and + * if it is false on a given arch, then chances are that the FPU + * itself is not constant-time, making the point moot). + */ + r = (int64_t)x.v; + return r - (x.v < (double)r); +} + +static inline int64_t +fpr_trunc(fpr x) { + return (int64_t)x.v; +} + +static inline fpr +fpr_add(fpr x, fpr y) { + return FPR(x.v + y.v); +} + +static inline fpr +fpr_sub(fpr x, fpr y) { + return FPR(x.v - y.v); +} + +static inline fpr +fpr_neg(fpr x) { + return FPR(-x.v); +} + +static inline fpr +fpr_half(fpr x) { + return FPR(x.v * 0.5); +} + +static inline fpr +fpr_double(fpr x) { + return FPR(x.v + x.v); +} + +static inline fpr +fpr_mul(fpr x, fpr y) { + return FPR(x.v * y.v); +} + +static inline fpr +fpr_sqr(fpr x) { + return FPR(x.v * x.v); +} + +static inline fpr +fpr_inv(fpr x) { + return FPR(1.0 / x.v); +} + +static inline fpr +fpr_div(fpr x, fpr y) { + return FPR(x.v / y.v); +} + +static inline void +fpr_sqrt_avx2(double *t) { + __m128d x; + + x = _mm_load1_pd(t); + x = _mm_sqrt_pd(x); + _mm_storel_pd(t, x); +} + +static inline fpr +fpr_sqrt(fpr x) { + /* + * We prefer not to have a dependency on libm when it can be + * avoided. On x86, calling the sqrt() libm function inlines + * the relevant opcode (fsqrt or sqrtsd, depending on whether + * the 387 FPU or SSE2 is used for floating-point operations) + * but then makes an optional call to the library function + * for proper error handling, in case the operand is negative. + * + * To avoid this dependency, we use intrinsics or inline assembly + * on recognized platforms: + * + * - If AVX2 is explicitly enabled, then we use SSE2 intrinsics. + * + * - On GCC/Clang with SSE maths, we use SSE2 intrinsics. + * + * - On GCC/Clang on i386, or MSVC on i386, we use inline assembly + * to call the 387 FPU fsqrt opcode. + * + * - On GCC/Clang/XLC on PowerPC, we use inline assembly to call + * the fsqrt opcode (Clang needs a special hack). + * + * - On GCC/Clang on ARM with hardware floating-point, we use + * inline assembly to call the vqsrt.f64 opcode. Due to a + * complex ecosystem of compilers and assembly syntaxes, we + * have to call it "fsqrt" or "fsqrtd", depending on case. + * + * If the platform is not recognized, a call to the system + * library function sqrt() is performed. On some compilers, this + * may actually inline the relevant opcode, and call the library + * function only when the input is invalid (e.g. negative); + * Falcon never actually calls sqrt() on a negative value, but + * the dependency to libm will still be there. + */ + + fpr_sqrt_avx2(&x.v); + return x; +} + +static inline int +fpr_lt(fpr x, fpr y) { + return x.v < y.v; +} + +static inline uint64_t +fpr_expm_p63(fpr x, fpr ccs) { + /* + * Polynomial approximation of exp(-x) is taken from FACCT: + * https://eprint.iacr.org/2018/1234 + * Specifically, values are extracted from the implementation + * referenced from the FACCT article, and available at: + * https://github.com/raykzhao/gaussian + * Tests over more than 24 billions of random inputs in the + * 0..log(2) range have never shown a deviation larger than + * 2^(-50) from the true mathematical value. + */ + + + /* + * AVX2 implementation uses more operations than Horner's method, + * but with a lower expression tree depth. This helps because + * additions and multiplications have a latency of 4 cycles on + * a Skylake, but the CPU can issue two of them per cycle. + */ + + static const union { + double d[12]; + __m256d v[3]; + } c = { + { + 0.999999999999994892974086724280, + 0.500000000000019206858326015208, + 0.166666666666984014666397229121, + 0.041666666666110491190622155955, + 0.008333333327800835146903501993, + 0.001388888894063186997887560103, + 0.000198412739277311890541063977, + 0.000024801566833585381209939524, + 0.000002755586350219122514855659, + 0.000000275607356160477811864927, + 0.000000025299506379442070029551, + 0.000000002073772366009083061987 + } + }; + + double d1, d2, d4, d8, y; + __m256d d14, d58, d9c; + + d1 = -x.v; + d2 = d1 * d1; + d4 = d2 * d2; + d8 = d4 * d4; + d14 = _mm256_set_pd(d4, d2 * d1, d2, d1); + d58 = _mm256_mul_pd(d14, _mm256_set1_pd(d4)); + d9c = _mm256_mul_pd(d14, _mm256_set1_pd(d8)); + d14 = _mm256_mul_pd(d14, _mm256_loadu_pd(&c.d[0])); + d58 = FMADD(d58, _mm256_loadu_pd(&c.d[4]), d14); + d9c = FMADD(d9c, _mm256_loadu_pd(&c.d[8]), d58); + d9c = _mm256_hadd_pd(d9c, d9c); + y = 1.0 + _mm_cvtsd_f64(_mm256_castpd256_pd128(d9c)) // _mm256_cvtsd_f64(d9c) + + _mm_cvtsd_f64(_mm256_extractf128_pd(d9c, 1)); + y *= ccs.v; + + /* + * Final conversion goes through int64_t first, because that's what + * the underlying opcode (vcvttsd2si) will do, and we know that the + * result will fit, since x >= 0 and ccs < 1. If we did the + * conversion directly to uint64_t, then the compiler would add some + * extra code to cover the case of a source value of 2^63 or more, + * and though the alternate path would never be exercised, the + * extra comparison would cost us some cycles. + */ + return (uint64_t)(int64_t)(y * fpr_ptwo63.v); + +} + +#define fpr_gm_tab PQCLEAN_FALCON1024_AVX2_fpr_gm_tab +extern const fpr fpr_gm_tab[]; + +#define fpr_p2_tab PQCLEAN_FALCON1024_AVX2_fpr_p2_tab +extern const fpr fpr_p2_tab[]; + +/* ====================================================================== */ +#endif diff --git a/crypto_sign/falcon-1024/avx2/inner.h b/crypto_sign/falcon-1024/avx2/inner.h new file mode 100644 index 00000000..d7239cf9 --- /dev/null +++ b/crypto_sign/falcon-1024/avx2/inner.h @@ -0,0 +1,826 @@ +#ifndef PQCLEAN_FALCON1024_AVX2_INNER_H +#define PQCLEAN_FALCON1024_AVX2_INNER_H + + +/* + * Internal functions for Falcon. This is not the API intended to be + * used by applications; instead, this internal API provides all the + * primitives on which wrappers build to provide external APIs. + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2017-2019 Falcon Project + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @author Thomas Pornin + */ + +/* + * IMPORTANT API RULES + * ------------------- + * + * This API has some non-trivial usage rules: + * + * + * - All public functions (i.e. the non-static ones) must be referenced + * with the PQCLEAN_FALCON1024_AVX2_ macro (e.g. PQCLEAN_FALCON1024_AVX2_verify_raw for the verify_raw() + * function). That macro adds a prefix to the name, which is + * configurable with the FALCON_PREFIX macro. This allows compiling + * the code into a specific "namespace" and potentially including + * several versions of this code into a single application (e.g. to + * have an AVX2 and a non-AVX2 variants and select the one to use at + * runtime based on availability of AVX2 opcodes). + * + * - Functions that need temporary buffers expects them as a final + * tmp[] array of type uint8_t*, with a size which is documented for + * each function. However, most have some alignment requirements, + * because they will use the array to store 16-bit, 32-bit or 64-bit + * values (e.g. uint64_t or double). The caller must ensure proper + * alignment. What happens on unaligned access depends on the + * underlying architecture, ranging from a slight time penalty + * to immediate termination of the process. + * + * - Some functions rely on specific rounding rules and precision for + * floating-point numbers. On some systems (in particular 32-bit x86 + * with the 387 FPU), this requires setting an hardware control + * word. The caller MUST use set_fpu_cw() to ensure proper precision: + * + * oldcw = set_fpu_cw(2); + * PQCLEAN_FALCON1024_AVX2_sign_dyn(...); + * set_fpu_cw(oldcw); + * + * On systems where the native floating-point precision is already + * proper, or integer-based emulation is used, the set_fpu_cw() + * function does nothing, so it can be called systematically. + */ +#include "fips202.h" +#include "fpr.h" +#include +#include +#include + +/* + * Some computations with floating-point elements, in particular + * rounding to the nearest integer, rely on operations using _exactly_ + * the precision of IEEE-754 binary64 type (i.e. 52 bits). On 32-bit + * x86, the 387 FPU may be used (depending on the target OS) and, in + * that case, may use more precision bits (i.e. 64 bits, for an 80-bit + * total type length); to prevent miscomputations, we define an explicit + * function that modifies the precision in the FPU control word. + * + * set_fpu_cw() sets the precision to the provided value, and returns + * the previously set precision; callers are supposed to restore the + * previous precision on exit. The correct (52-bit) precision is + * configured with the value "2". On unsupported compilers, or on + * targets other than 32-bit x86, or when the native 'double' type is + * not used, the set_fpu_cw() function does nothing at all. + */ +static inline unsigned +set_fpu_cw(unsigned x) { + return x; +} + + + + +/* ==================================================================== */ +/* + * SHAKE256 implementation (shake.c). + * + * API is defined to be easily replaced with the fips202.h API defined + * as part of PQClean. + */ + + + +#define inner_shake256_context shake256incctx +#define inner_shake256_init(sc) shake256_inc_init(sc) +#define inner_shake256_inject(sc, in, len) shake256_inc_absorb(sc, in, len) +#define inner_shake256_flip(sc) shake256_inc_finalize(sc) +#define inner_shake256_extract(sc, out, len) shake256_inc_squeeze(out, len, sc) +#define inner_shake256_ctx_release(sc) shake256_inc_ctx_release(sc) + + +/* ==================================================================== */ +/* + * Encoding/decoding functions (codec.c). + * + * Encoding functions take as parameters an output buffer (out) with + * a given maximum length (max_out_len); returned value is the actual + * number of bytes which have been written. If the output buffer is + * not large enough, then 0 is returned (some bytes may have been + * written to the buffer). If 'out' is NULL, then 'max_out_len' is + * ignored; instead, the function computes and returns the actual + * required output length (in bytes). + * + * Decoding functions take as parameters an input buffer (in) with + * its maximum length (max_in_len); returned value is the actual number + * of bytes that have been read from the buffer. If the provided length + * is too short, then 0 is returned. + * + * Values to encode or decode are vectors of integers, with N = 2^logn + * elements. + * + * Three encoding formats are defined: + * + * - modq: sequence of values modulo 12289, each encoded over exactly + * 14 bits. The encoder and decoder verify that integers are within + * the valid range (0..12288). Values are arrays of uint16. + * + * - trim: sequence of signed integers, a specified number of bits + * each. The number of bits is provided as parameter and includes + * the sign bit. Each integer x must be such that |x| < 2^(bits-1) + * (which means that the -2^(bits-1) value is forbidden); encode and + * decode functions check that property. Values are arrays of + * int16_t or int8_t, corresponding to names 'trim_i16' and + * 'trim_i8', respectively. + * + * - comp: variable-length encoding for signed integers; each integer + * uses a minimum of 9 bits, possibly more. This is normally used + * only for signatures. + * + */ + +size_t PQCLEAN_FALCON1024_AVX2_modq_encode(void *out, size_t max_out_len, + const uint16_t *x, unsigned logn); +size_t PQCLEAN_FALCON1024_AVX2_trim_i16_encode(void *out, size_t max_out_len, + const int16_t *x, unsigned logn, unsigned bits); +size_t PQCLEAN_FALCON1024_AVX2_trim_i8_encode(void *out, size_t max_out_len, + const int8_t *x, unsigned logn, unsigned bits); +size_t PQCLEAN_FALCON1024_AVX2_comp_encode(void *out, size_t max_out_len, + const int16_t *x, unsigned logn); + +size_t PQCLEAN_FALCON1024_AVX2_modq_decode(uint16_t *x, unsigned logn, + const void *in, size_t max_in_len); +size_t PQCLEAN_FALCON1024_AVX2_trim_i16_decode(int16_t *x, unsigned logn, unsigned bits, + const void *in, size_t max_in_len); +size_t PQCLEAN_FALCON1024_AVX2_trim_i8_decode(int8_t *x, unsigned logn, unsigned bits, + const void *in, size_t max_in_len); +size_t PQCLEAN_FALCON1024_AVX2_comp_decode(int16_t *x, unsigned logn, + const void *in, size_t max_in_len); + +/* + * Number of bits for key elements, indexed by logn (1 to 10). This + * is at most 8 bits for all degrees, but some degrees may have shorter + * elements. + */ +extern const uint8_t PQCLEAN_FALCON1024_AVX2_max_fg_bits[]; +extern const uint8_t PQCLEAN_FALCON1024_AVX2_max_FG_bits[]; + +/* + * Maximum size, in bits, of elements in a signature, indexed by logn + * (1 to 10). The size includes the sign bit. + */ +extern const uint8_t PQCLEAN_FALCON1024_AVX2_max_sig_bits[]; + +/* ==================================================================== */ +/* + * Support functions used for both signature generation and signature + * verification (common.c). + */ + +/* + * From a SHAKE256 context (must be already flipped), produce a new + * point. This is the non-constant-time version, which may leak enough + * information to serve as a stop condition on a brute force attack on + * the hashed message (provided that the nonce value is known). + */ +void PQCLEAN_FALCON1024_AVX2_hash_to_point_vartime(inner_shake256_context *sc, + uint16_t *x, unsigned logn); + +/* + * From a SHAKE256 context (must be already flipped), produce a new + * point. The temporary buffer (tmp) must have room for 2*2^logn bytes. + * This function is constant-time but is typically more expensive than + * PQCLEAN_FALCON1024_AVX2_hash_to_point_vartime(). + * + * tmp[] must have 16-bit alignment. + */ +void PQCLEAN_FALCON1024_AVX2_hash_to_point_ct(inner_shake256_context *sc, + uint16_t *x, unsigned logn, uint8_t *tmp); + +/* + * Tell whether a given vector (2N coordinates, in two halves) is + * acceptable as a signature. This compares the appropriate norm of the + * vector with the acceptance bound. Returned value is 1 on success + * (vector is short enough to be acceptable), 0 otherwise. + */ +int PQCLEAN_FALCON1024_AVX2_is_short(const int16_t *s1, const int16_t *s2, unsigned logn); + +/* + * Tell whether a given vector (2N coordinates, in two halves) is + * acceptable as a signature. Instead of the first half s1, this + * function receives the "saturated squared norm" of s1, i.e. the + * sum of the squares of the coordinates of s1 (saturated at 2^32-1 + * if the sum exceeds 2^31-1). + * + * Returned value is 1 on success (vector is short enough to be + * acceptable), 0 otherwise. + */ +int PQCLEAN_FALCON1024_AVX2_is_short_half(uint32_t sqn, const int16_t *s2, unsigned logn); + +/* ==================================================================== */ +/* + * Signature verification functions (vrfy.c). + */ + +/* + * Convert a public key to NTT + Montgomery format. Conversion is done + * in place. + */ +void PQCLEAN_FALCON1024_AVX2_to_ntt_monty(uint16_t *h, unsigned logn); + +/* + * Internal signature verification code: + * c0[] contains the hashed nonce+message + * s2[] is the decoded signature + * h[] contains the public key, in NTT + Montgomery format + * logn is the degree log + * tmp[] temporary, must have at least 2*2^logn bytes + * Returned value is 1 on success, 0 on error. + * + * tmp[] must have 16-bit alignment. + */ +int PQCLEAN_FALCON1024_AVX2_verify_raw(const uint16_t *c0, const int16_t *s2, + const uint16_t *h, unsigned logn, uint8_t *tmp); + +/* + * Compute the public key h[], given the private key elements f[] and + * g[]. This computes h = g/f mod phi mod q, where phi is the polynomial + * modulus. This function returns 1 on success, 0 on error (an error is + * reported if f is not invertible mod phi mod q). + * + * The tmp[] array must have room for at least 2*2^logn elements. + * tmp[] must have 16-bit alignment. + */ +int PQCLEAN_FALCON1024_AVX2_compute_public(uint16_t *h, + const int8_t *f, const int8_t *g, unsigned logn, uint8_t *tmp); + +/* + * Recompute the fourth private key element. Private key consists in + * four polynomials with small coefficients f, g, F and G, which are + * such that fG - gF = q mod phi; furthermore, f is invertible modulo + * phi and modulo q. This function recomputes G from f, g and F. + * + * The tmp[] array must have room for at least 4*2^logn bytes. + * + * Returned value is 1 in success, 0 on error (f not invertible). + * tmp[] must have 16-bit alignment. + */ +int PQCLEAN_FALCON1024_AVX2_complete_private(int8_t *G, + const int8_t *f, const int8_t *g, const int8_t *F, + unsigned logn, uint8_t *tmp); + +/* + * Test whether a given polynomial is invertible modulo phi and q. + * Polynomial coefficients are small integers. + * + * tmp[] must have 16-bit alignment. + */ +int PQCLEAN_FALCON1024_AVX2_is_invertible( + const int16_t *s2, unsigned logn, uint8_t *tmp); + +/* + * Count the number of elements of value zero in the NTT representation + * of the given polynomial: this is the number of primitive 2n-th roots + * of unity (modulo q = 12289) that are roots of the provided polynomial + * (taken modulo q). + * + * tmp[] must have 16-bit alignment. + */ +int PQCLEAN_FALCON1024_AVX2_count_nttzero(const int16_t *sig, unsigned logn, uint8_t *tmp); + +/* + * Internal signature verification with public key recovery: + * h[] receives the public key (NOT in NTT/Montgomery format) + * c0[] contains the hashed nonce+message + * s1[] is the first signature half + * s2[] is the second signature half + * logn is the degree log + * tmp[] temporary, must have at least 2*2^logn bytes + * Returned value is 1 on success, 0 on error. Success is returned if + * the signature is a short enough vector; in that case, the public + * key has been written to h[]. However, the caller must still + * verify that h[] is the correct value (e.g. with regards to a known + * hash of the public key). + * + * h[] may not overlap with any of the other arrays. + * + * tmp[] must have 16-bit alignment. + */ +int PQCLEAN_FALCON1024_AVX2_verify_recover(uint16_t *h, + const uint16_t *c0, const int16_t *s1, const int16_t *s2, + unsigned logn, uint8_t *tmp); + +/* ==================================================================== */ +/* + * Implementation of floating-point real numbers (fpr.h, fpr.c). + */ + +/* + * Real numbers are implemented by an extra header file, included below. + * This is meant to support pluggable implementations. The default + * implementation relies on the C type 'double'. + * + * The included file must define the following types, functions and + * constants: + * + * fpr + * type for a real number + * + * fpr fpr_of(int64_t i) + * cast an integer into a real number; source must be in the + * -(2^63-1)..+(2^63-1) range + * + * fpr fpr_scaled(int64_t i, int sc) + * compute i*2^sc as a real number; source 'i' must be in the + * -(2^63-1)..+(2^63-1) range + * + * fpr fpr_ldexp(fpr x, int e) + * compute x*2^e + * + * int64_t fpr_rint(fpr x) + * round x to the nearest integer; x must be in the -(2^63-1) + * to +(2^63-1) range + * + * int64_t fpr_trunc(fpr x) + * round to an integer; this rounds towards zero; value must + * be in the -(2^63-1) to +(2^63-1) range + * + * fpr fpr_add(fpr x, fpr y) + * compute x + y + * + * fpr fpr_sub(fpr x, fpr y) + * compute x - y + * + * fpr fpr_neg(fpr x) + * compute -x + * + * fpr fpr_half(fpr x) + * compute x/2 + * + * fpr fpr_double(fpr x) + * compute x*2 + * + * fpr fpr_mul(fpr x, fpr y) + * compute x * y + * + * fpr fpr_sqr(fpr x) + * compute x * x + * + * fpr fpr_inv(fpr x) + * compute 1/x + * + * fpr fpr_div(fpr x, fpr y) + * compute x/y + * + * fpr fpr_sqrt(fpr x) + * compute the square root of x + * + * int fpr_lt(fpr x, fpr y) + * return 1 if x < y, 0 otherwise + * + * uint64_t fpr_expm_p63(fpr x) + * return exp(x), assuming that 0 <= x < log(2). Returned value + * is scaled to 63 bits (i.e. it really returns 2^63*exp(-x), + * rounded to the nearest integer). Computation should have a + * precision of at least 45 bits. + * + * const fpr fpr_gm_tab[] + * array of constants for FFT / iFFT + * + * const fpr fpr_p2_tab[] + * precomputed powers of 2 (by index, 0 to 10) + * + * Constants of type 'fpr': + * + * fpr fpr_q 12289 + * fpr fpr_inverse_of_q 1/12289 + * fpr fpr_inv_2sqrsigma0 1/(2*(1.8205^2)) + * fpr fpr_inv_sigma 1/(1.55*sqrt(12289)) + * fpr fpr_sigma_min_9 1.291500756233514568549480827642 + * fpr fpr_sigma_min_10 1.311734375905083682667395805765 + * fpr fpr_log2 log(2) + * fpr fpr_inv_log2 1/log(2) + * fpr fpr_bnorm_max 16822.4121 + * fpr fpr_zero 0 + * fpr fpr_one 1 + * fpr fpr_two 2 + * fpr fpr_onehalf 0.5 + * fpr fpr_ptwo31 2^31 + * fpr fpr_ptwo31m1 2^31-1 + * fpr fpr_mtwo31m1 -(2^31-1) + * fpr fpr_ptwo63m1 2^63-1 + * fpr fpr_mtwo63m1 -(2^63-1) + * fpr fpr_ptwo63 2^63 + */ + +/* ==================================================================== */ +/* + * RNG (rng.c). + * + * A PRNG based on ChaCha20 is implemented; it is seeded from a SHAKE256 + * context (flipped) and is used for bulk pseudorandom generation. + * A system-dependent seed generator is also provided. + */ + +/* + * Obtain a random seed from the system RNG. + * + * Returned value is 1 on success, 0 on error. + */ +int PQCLEAN_FALCON1024_AVX2_get_seed(void *seed, size_t seed_len); + +/* + * Structure for a PRNG. This includes a large buffer so that values + * get generated in advance. The 'state' is used to keep the current + * PRNG algorithm state (contents depend on the selected algorithm). + * + * The unions with 'dummy_u64' are there to ensure proper alignment for + * 64-bit direct access. + */ +typedef struct { + union { + uint8_t d[512]; /* MUST be 512, exactly */ + uint64_t dummy_u64; + } buf; + size_t ptr; + union { + uint8_t d[256]; + uint64_t dummy_u64; + } state; + int type; +} prng; + +/* + * Instantiate a PRNG. That PRNG will feed over the provided SHAKE256 + * context (in "flipped" state) to obtain its initial state. + */ +void PQCLEAN_FALCON1024_AVX2_prng_init(prng *p, inner_shake256_context *src); + +/* + * Refill the PRNG buffer. This is normally invoked automatically, and + * is declared here only so that prng_get_u64() may be inlined. + */ +void PQCLEAN_FALCON1024_AVX2_prng_refill(prng *p); + +/* + * Get some bytes from a PRNG. + */ +void PQCLEAN_FALCON1024_AVX2_prng_get_bytes(prng *p, void *dst, size_t len); + +/* + * Get a 64-bit random value from a PRNG. + */ +static inline uint64_t +prng_get_u64(prng *p) { + size_t u; + + /* + * If there are less than 9 bytes in the buffer, we refill it. + * This means that we may drop the last few bytes, but this allows + * for faster extraction code. Also, it means that we never leave + * an empty buffer. + */ + u = p->ptr; + if (u >= (sizeof p->buf.d) - 9) { + PQCLEAN_FALCON1024_AVX2_prng_refill(p); + u = 0; + } + p->ptr = u + 8; + + return (uint64_t)p->buf.d[u + 0] + | ((uint64_t)p->buf.d[u + 1] << 8) + | ((uint64_t)p->buf.d[u + 2] << 16) + | ((uint64_t)p->buf.d[u + 3] << 24) + | ((uint64_t)p->buf.d[u + 4] << 32) + | ((uint64_t)p->buf.d[u + 5] << 40) + | ((uint64_t)p->buf.d[u + 6] << 48) + | ((uint64_t)p->buf.d[u + 7] << 56); +} + +/* + * Get an 8-bit random value from a PRNG. + */ +static inline unsigned +prng_get_u8(prng *p) { + unsigned v; + + v = p->buf.d[p->ptr ++]; + if (p->ptr == sizeof p->buf.d) { + PQCLEAN_FALCON1024_AVX2_prng_refill(p); + } + return v; +} + +/* ==================================================================== */ +/* + * FFT (falcon-fft.c). + * + * A real polynomial is represented as an array of N 'fpr' elements. + * The FFT representation of a real polynomial contains N/2 complex + * elements; each is stored as two real numbers, for the real and + * imaginary parts, respectively. See falcon-fft.c for details on the + * internal representation. + */ + +/* + * Compute FFT in-place: the source array should contain a real + * polynomial (N coefficients); its storage area is reused to store + * the FFT representation of that polynomial (N/2 complex numbers). + * + * 'logn' MUST lie between 1 and 10 (inclusive). + */ +void PQCLEAN_FALCON1024_AVX2_FFT(fpr *f, unsigned logn); + +/* + * Compute the inverse FFT in-place: the source array should contain the + * FFT representation of a real polynomial (N/2 elements); the resulting + * real polynomial (N coefficients of type 'fpr') is written over the + * array. + * + * 'logn' MUST lie between 1 and 10 (inclusive). + */ +void PQCLEAN_FALCON1024_AVX2_iFFT(fpr *f, unsigned logn); + +/* + * Add polynomial b to polynomial a. a and b MUST NOT overlap. This + * function works in both normal and FFT representations. + */ +void PQCLEAN_FALCON1024_AVX2_poly_add(fpr *a, const fpr *b, unsigned logn); + +/* + * Subtract polynomial b from polynomial a. a and b MUST NOT overlap. This + * function works in both normal and FFT representations. + */ +void PQCLEAN_FALCON1024_AVX2_poly_sub(fpr *a, const fpr *b, unsigned logn); + +/* + * Negate polynomial a. This function works in both normal and FFT + * representations. + */ +void PQCLEAN_FALCON1024_AVX2_poly_neg(fpr *a, unsigned logn); + +/* + * Compute adjoint of polynomial a. This function works only in FFT + * representation. + */ +void PQCLEAN_FALCON1024_AVX2_poly_adj_fft(fpr *a, unsigned logn); + +/* + * Multiply polynomial a with polynomial b. a and b MUST NOT overlap. + * This function works only in FFT representation. + */ +void PQCLEAN_FALCON1024_AVX2_poly_mul_fft(fpr *a, const fpr *b, unsigned logn); + +/* + * Multiply polynomial a with the adjoint of polynomial b. a and b MUST NOT + * overlap. This function works only in FFT representation. + */ +void PQCLEAN_FALCON1024_AVX2_poly_muladj_fft(fpr *a, const fpr *b, unsigned logn); + +/* + * Multiply polynomial with its own adjoint. This function works only in FFT + * representation. + */ +void PQCLEAN_FALCON1024_AVX2_poly_mulselfadj_fft(fpr *a, unsigned logn); + +/* + * Multiply polynomial with a real constant. This function works in both + * normal and FFT representations. + */ +void PQCLEAN_FALCON1024_AVX2_poly_mulconst(fpr *a, fpr x, unsigned logn); + +/* + * Divide polynomial a by polynomial b, modulo X^N+1 (FFT representation). + * a and b MUST NOT overlap. + */ +void PQCLEAN_FALCON1024_AVX2_poly_div_fft(fpr *a, const fpr *b, unsigned logn); + +/* + * Given f and g (in FFT representation), compute 1/(f*adj(f)+g*adj(g)) + * (also in FFT representation). Since the result is auto-adjoint, all its + * coordinates in FFT representation are real; as such, only the first N/2 + * values of d[] are filled (the imaginary parts are skipped). + * + * Array d MUST NOT overlap with either a or b. + */ +void PQCLEAN_FALCON1024_AVX2_poly_invnorm2_fft(fpr *d, + const fpr *a, const fpr *b, unsigned logn); + +/* + * Given F, G, f and g (in FFT representation), compute F*adj(f)+G*adj(g) + * (also in FFT representation). Destination d MUST NOT overlap with + * any of the source arrays. + */ +void PQCLEAN_FALCON1024_AVX2_poly_add_muladj_fft(fpr *d, + const fpr *F, const fpr *G, + const fpr *f, const fpr *g, unsigned logn); + +/* + * Multiply polynomial a by polynomial b, where b is autoadjoint. Both + * a and b are in FFT representation. Since b is autoadjoint, all its + * FFT coefficients are real, and the array b contains only N/2 elements. + * a and b MUST NOT overlap. + */ +void PQCLEAN_FALCON1024_AVX2_poly_mul_autoadj_fft(fpr *a, + const fpr *b, unsigned logn); + +/* + * Divide polynomial a by polynomial b, where b is autoadjoint. Both + * a and b are in FFT representation. Since b is autoadjoint, all its + * FFT coefficients are real, and the array b contains only N/2 elements. + * a and b MUST NOT overlap. + */ +void PQCLEAN_FALCON1024_AVX2_poly_div_autoadj_fft(fpr *a, + const fpr *b, unsigned logn); + +/* + * Perform an LDL decomposition of an auto-adjoint matrix G, in FFT + * representation. On input, g00, g01 and g11 are provided (where the + * matrix G = [[g00, g01], [adj(g01), g11]]). On output, the d00, l10 + * and d11 values are written in g00, g01 and g11, respectively + * (with D = [[d00, 0], [0, d11]] and L = [[1, 0], [l10, 1]]). + * (In fact, d00 = g00, so the g00 operand is left unmodified.) + */ +void PQCLEAN_FALCON1024_AVX2_poly_LDL_fft(const fpr *g00, + fpr *g01, fpr *g11, unsigned logn); + +/* + * Perform an LDL decomposition of an auto-adjoint matrix G, in FFT + * representation. This is identical to poly_LDL_fft() except that + * g00, g01 and g11 are unmodified; the outputs d11 and l10 are written + * in two other separate buffers provided as extra parameters. + */ +void PQCLEAN_FALCON1024_AVX2_poly_LDLmv_fft(fpr *d11, fpr *l10, + const fpr *g00, const fpr *g01, + const fpr *g11, unsigned logn); + +/* + * Apply "split" operation on a polynomial in FFT representation: + * f = f0(x^2) + x*f1(x^2), for half-size polynomials f0 and f1 + * (polynomials modulo X^(N/2)+1). f0, f1 and f MUST NOT overlap. + */ +void PQCLEAN_FALCON1024_AVX2_poly_split_fft(fpr *f0, fpr *f1, + const fpr *f, unsigned logn); + +/* + * Apply "merge" operation on two polynomials in FFT representation: + * given f0 and f1, polynomials moduo X^(N/2)+1, this function computes + * f = f0(x^2) + x*f1(x^2), in FFT representation modulo X^N+1. + * f MUST NOT overlap with either f0 or f1. + */ +void PQCLEAN_FALCON1024_AVX2_poly_merge_fft(fpr *f, + const fpr *f0, const fpr *f1, unsigned logn); + +/* ==================================================================== */ +/* + * Key pair generation. + */ + +/* + * Required sizes of the temporary buffer (in bytes). + * + * This size is 28*2^logn bytes, except for degrees 2 and 4 (logn = 1 + * or 2) where it is slightly greater. + */ +#define FALCON_KEYGEN_TEMP_1 136 +#define FALCON_KEYGEN_TEMP_2 272 +#define FALCON_KEYGEN_TEMP_3 224 +#define FALCON_KEYGEN_TEMP_4 448 +#define FALCON_KEYGEN_TEMP_5 896 +#define FALCON_KEYGEN_TEMP_6 1792 +#define FALCON_KEYGEN_TEMP_7 3584 +#define FALCON_KEYGEN_TEMP_8 7168 +#define FALCON_KEYGEN_TEMP_9 14336 +#define FALCON_KEYGEN_TEMP_10 28672 + +/* + * Generate a new key pair. Randomness is extracted from the provided + * SHAKE256 context, which must have already been seeded and flipped. + * The tmp[] array must have suitable size (see FALCON_KEYGEN_TEMP_* + * macros) and be aligned for the uint32_t, uint64_t and fpr types. + * + * The private key elements are written in f, g, F and G, and the + * public key is written in h. Either or both of G and h may be NULL, + * in which case the corresponding element is not returned (they can + * be recomputed from f, g and F). + * + * tmp[] must have 64-bit alignment. + * This function uses floating-point rounding (see set_fpu_cw()). + */ +void PQCLEAN_FALCON1024_AVX2_keygen(inner_shake256_context *rng, + int8_t *f, int8_t *g, int8_t *F, int8_t *G, uint16_t *h, + unsigned logn, uint8_t *tmp); + +/* ==================================================================== */ +/* + * Signature generation. + */ + +/* + * Expand a private key into the B0 matrix in FFT representation and + * the LDL tree. All the values are written in 'expanded_key', for + * a total of (8*logn+40)*2^logn bytes. + * + * The tmp[] array must have room for at least 48*2^logn bytes. + * + * tmp[] must have 64-bit alignment. + * This function uses floating-point rounding (see set_fpu_cw()). + */ +void PQCLEAN_FALCON1024_AVX2_expand_privkey(fpr *expanded_key, + const int8_t *f, const int8_t *g, const int8_t *F, const int8_t *G, + unsigned logn, uint8_t *tmp); + +/* + * Compute a signature over the provided hashed message (hm); the + * signature value is one short vector. This function uses an + * expanded key (as generated by PQCLEAN_FALCON1024_AVX2_expand_privkey()). + * + * The sig[] and hm[] buffers may overlap. + * + * On successful output, the start of the tmp[] buffer contains the s1 + * vector (as int16_t elements). + * + * The minimal size (in bytes) of tmp[] is 48*2^logn bytes. + * + * tmp[] must have 64-bit alignment. + * This function uses floating-point rounding (see set_fpu_cw()). + */ +void PQCLEAN_FALCON1024_AVX2_sign_tree(int16_t *sig, inner_shake256_context *rng, + const fpr *expanded_key, + const uint16_t *hm, unsigned logn, uint8_t *tmp); + +/* + * Compute a signature over the provided hashed message (hm); the + * signature value is one short vector. This function uses a raw + * key and dynamically recompute the B0 matrix and LDL tree; this + * saves RAM since there is no needed for an expanded key, but + * increases the signature cost. + * + * The sig[] and hm[] buffers may overlap. + * + * On successful output, the start of the tmp[] buffer contains the s1 + * vector (as int16_t elements). + * + * The minimal size (in bytes) of tmp[] is 72*2^logn bytes. + * + * tmp[] must have 64-bit alignment. + * This function uses floating-point rounding (see set_fpu_cw()). + */ +void PQCLEAN_FALCON1024_AVX2_sign_dyn(int16_t *sig, inner_shake256_context *rng, + const int8_t *f, const int8_t *g, + const int8_t *F, const int8_t *G, + const uint16_t *hm, unsigned logn, uint8_t *tmp); + +/* + * Internal sampler engine. Exported for tests. + * + * sampler_context wraps around a source of random numbers (PRNG) and + * the sigma_min value (nominally dependent on the degree). + * + * sampler() takes as parameters: + * ctx pointer to the sampler_context structure + * mu center for the distribution + * isigma inverse of the distribution standard deviation + * It returns an integer sampled along the Gaussian distribution centered + * on mu and of standard deviation sigma = 1/isigma. + * + * gaussian0_sampler() takes as parameter a pointer to a PRNG, and + * returns an integer sampled along a half-Gaussian with standard + * deviation sigma0 = 1.8205 (center is 0, returned value is + * nonnegative). + */ + +typedef struct { + prng p; + fpr sigma_min; +} sampler_context; + +int PQCLEAN_FALCON1024_AVX2_sampler(void *ctx, fpr mu, fpr isigma); + +int PQCLEAN_FALCON1024_AVX2_gaussian0_sampler(prng *p); + +/* ==================================================================== */ + +#endif diff --git a/crypto_sign/falcon-1024/avx2/keygen.c b/crypto_sign/falcon-1024/avx2/keygen.c new file mode 100644 index 00000000..53a3682d --- /dev/null +++ b/crypto_sign/falcon-1024/avx2/keygen.c @@ -0,0 +1,4231 @@ +#include "inner.h" + +/* + * Falcon key pair generation. + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2017-2019 Falcon Project + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @author Thomas Pornin + */ + + +#define MKN(logn) ((size_t)1 << (logn)) + +/* ==================================================================== */ +/* + * Modular arithmetics. + * + * We implement a few functions for computing modulo a small integer p. + * + * All functions require that 2^30 < p < 2^31. Moreover, operands must + * be in the 0..p-1 range. + * + * Modular addition and subtraction work for all such p. + * + * Montgomery multiplication requires that p is odd, and must be provided + * with an additional value p0i = -1/p mod 2^31. See below for some basics + * on Montgomery multiplication. + * + * Division computes an inverse modulo p by an exponentiation (with + * exponent p-2): this works only if p is prime. Multiplication + * requirements also apply, i.e. p must be odd and p0i must be provided. + * + * The NTT and inverse NTT need all of the above, and also that + * p = 1 mod 2048. + * + * ----------------------------------------------------------------------- + * + * We use Montgomery representation with 31-bit values: + * + * Let R = 2^31 mod p. When 2^30 < p < 2^31, R = 2^31 - p. + * Montgomery representation of an integer x modulo p is x*R mod p. + * + * Montgomery multiplication computes (x*y)/R mod p for + * operands x and y. Therefore: + * + * - if operands are x*R and y*R (Montgomery representations of x and + * y), then Montgomery multiplication computes (x*R*y*R)/R = (x*y)*R + * mod p, which is the Montgomery representation of the product x*y; + * + * - if operands are x*R and y (or x and y*R), then Montgomery + * multiplication returns x*y mod p: mixed-representation + * multiplications yield results in normal representation. + * + * To convert to Montgomery representation, we multiply by R, which is done + * by Montgomery-multiplying by R^2. Stand-alone conversion back from + * Montgomery representation is Montgomery-multiplication by 1. + */ + +/* + * Precomputed small primes. Each element contains the following: + * + * p The prime itself. + * + * g A primitive root of phi = X^N+1 (in field Z_p). + * + * s The inverse of the product of all previous primes in the array, + * computed modulo p and in Montgomery representation. + * + * All primes are such that p = 1 mod 2048, and are lower than 2^31. They + * are listed in decreasing order. + */ + +typedef struct { + uint32_t p; + uint32_t g; + uint32_t s; +} small_prime; + +static const small_prime PRIMES[] = { + { 2147473409, 383167813, 10239 }, + { 2147389441, 211808905, 471403745 }, + { 2147387393, 37672282, 1329335065 }, + { 2147377153, 1977035326, 968223422 }, + { 2147358721, 1067163706, 132460015 }, + { 2147352577, 1606082042, 598693809 }, + { 2147346433, 2033915641, 1056257184 }, + { 2147338241, 1653770625, 421286710 }, + { 2147309569, 631200819, 1111201074 }, + { 2147297281, 2038364663, 1042003613 }, + { 2147295233, 1962540515, 19440033 }, + { 2147239937, 2100082663, 353296760 }, + { 2147235841, 1991153006, 1703918027 }, + { 2147217409, 516405114, 1258919613 }, + { 2147205121, 409347988, 1089726929 }, + { 2147196929, 927788991, 1946238668 }, + { 2147178497, 1136922411, 1347028164 }, + { 2147100673, 868626236, 701164723 }, + { 2147082241, 1897279176, 617820870 }, + { 2147074049, 1888819123, 158382189 }, + { 2147051521, 25006327, 522758543 }, + { 2147043329, 327546255, 37227845 }, + { 2147039233, 766324424, 1133356428 }, + { 2146988033, 1862817362, 73861329 }, + { 2146963457, 404622040, 653019435 }, + { 2146959361, 1936581214, 995143093 }, + { 2146938881, 1559770096, 634921513 }, + { 2146908161, 422623708, 1985060172 }, + { 2146885633, 1751189170, 298238186 }, + { 2146871297, 578919515, 291810829 }, + { 2146846721, 1114060353, 915902322 }, + { 2146834433, 2069565474, 47859524 }, + { 2146818049, 1552824584, 646281055 }, + { 2146775041, 1906267847, 1597832891 }, + { 2146756609, 1847414714, 1228090888 }, + { 2146744321, 1818792070, 1176377637 }, + { 2146738177, 1118066398, 1054971214 }, + { 2146736129, 52057278, 933422153 }, + { 2146713601, 592259376, 1406621510 }, + { 2146695169, 263161877, 1514178701 }, + { 2146656257, 685363115, 384505091 }, + { 2146650113, 927727032, 537575289 }, + { 2146646017, 52575506, 1799464037 }, + { 2146643969, 1276803876, 1348954416 }, + { 2146603009, 814028633, 1521547704 }, + { 2146572289, 1846678872, 1310832121 }, + { 2146547713, 919368090, 1019041349 }, + { 2146508801, 671847612, 38582496 }, + { 2146492417, 283911680, 532424562 }, + { 2146490369, 1780044827, 896447978 }, + { 2146459649, 327980850, 1327906900 }, + { 2146447361, 1310561493, 958645253 }, + { 2146441217, 412148926, 287271128 }, + { 2146437121, 293186449, 2009822534 }, + { 2146430977, 179034356, 1359155584 }, + { 2146418689, 1517345488, 1790248672 }, + { 2146406401, 1615820390, 1584833571 }, + { 2146404353, 826651445, 607120498 }, + { 2146379777, 3816988, 1897049071 }, + { 2146363393, 1221409784, 1986921567 }, + { 2146355201, 1388081168, 849968120 }, + { 2146336769, 1803473237, 1655544036 }, + { 2146312193, 1023484977, 273671831 }, + { 2146293761, 1074591448, 467406983 }, + { 2146283521, 831604668, 1523950494 }, + { 2146203649, 712865423, 1170834574 }, + { 2146154497, 1764991362, 1064856763 }, + { 2146142209, 627386213, 1406840151 }, + { 2146127873, 1638674429, 2088393537 }, + { 2146099201, 1516001018, 690673370 }, + { 2146093057, 1294931393, 315136610 }, + { 2146091009, 1942399533, 973539425 }, + { 2146078721, 1843461814, 2132275436 }, + { 2146060289, 1098740778, 360423481 }, + { 2146048001, 1617213232, 1951981294 }, + { 2146041857, 1805783169, 2075683489 }, + { 2146019329, 272027909, 1753219918 }, + { 2145986561, 1206530344, 2034028118 }, + { 2145976321, 1243769360, 1173377644 }, + { 2145964033, 887200839, 1281344586 }, + { 2145906689, 1651026455, 906178216 }, + { 2145875969, 1673238256, 1043521212 }, + { 2145871873, 1226591210, 1399796492 }, + { 2145841153, 1465353397, 1324527802 }, + { 2145832961, 1150638905, 554084759 }, + { 2145816577, 221601706, 427340863 }, + { 2145785857, 608896761, 316590738 }, + { 2145755137, 1712054942, 1684294304 }, + { 2145742849, 1302302867, 724873116 }, + { 2145728513, 516717693, 431671476 }, + { 2145699841, 524575579, 1619722537 }, + { 2145691649, 1925625239, 982974435 }, + { 2145687553, 463795662, 1293154300 }, + { 2145673217, 771716636, 881778029 }, + { 2145630209, 1509556977, 837364988 }, + { 2145595393, 229091856, 851648427 }, + { 2145587201, 1796903241, 635342424 }, + { 2145525761, 715310882, 1677228081 }, + { 2145495041, 1040930522, 200685896 }, + { 2145466369, 949804237, 1809146322 }, + { 2145445889, 1673903706, 95316881 }, + { 2145390593, 806941852, 1428671135 }, + { 2145372161, 1402525292, 159350694 }, + { 2145361921, 2124760298, 1589134749 }, + { 2145359873, 1217503067, 1561543010 }, + { 2145355777, 338341402, 83865711 }, + { 2145343489, 1381532164, 641430002 }, + { 2145325057, 1883895478, 1528469895 }, + { 2145318913, 1335370424, 65809740 }, + { 2145312769, 2000008042, 1919775760 }, + { 2145300481, 961450962, 1229540578 }, + { 2145282049, 910466767, 1964062701 }, + { 2145232897, 816527501, 450152063 }, + { 2145218561, 1435128058, 1794509700 }, + { 2145187841, 33505311, 1272467582 }, + { 2145181697, 269767433, 1380363849 }, + { 2145175553, 56386299, 1316870546 }, + { 2145079297, 2106880293, 1391797340 }, + { 2145021953, 1347906152, 720510798 }, + { 2145015809, 206769262, 1651459955 }, + { 2145003521, 1885513236, 1393381284 }, + { 2144960513, 1810381315, 31937275 }, + { 2144944129, 1306487838, 2019419520 }, + { 2144935937, 37304730, 1841489054 }, + { 2144894977, 1601434616, 157985831 }, + { 2144888833, 98749330, 2128592228 }, + { 2144880641, 1772327002, 2076128344 }, + { 2144864257, 1404514762, 2029969964 }, + { 2144827393, 801236594, 406627220 }, + { 2144806913, 349217443, 1501080290 }, + { 2144796673, 1542656776, 2084736519 }, + { 2144778241, 1210734884, 1746416203 }, + { 2144759809, 1146598851, 716464489 }, + { 2144757761, 286328400, 1823728177 }, + { 2144729089, 1347555695, 1836644881 }, + { 2144727041, 1795703790, 520296412 }, + { 2144696321, 1302475157, 852964281 }, + { 2144667649, 1075877614, 504992927 }, + { 2144573441, 198765808, 1617144982 }, + { 2144555009, 321528767, 155821259 }, + { 2144550913, 814139516, 1819937644 }, + { 2144536577, 571143206, 962942255 }, + { 2144524289, 1746733766, 2471321 }, + { 2144512001, 1821415077, 124190939 }, + { 2144468993, 917871546, 1260072806 }, + { 2144458753, 378417981, 1569240563 }, + { 2144421889, 175229668, 1825620763 }, + { 2144409601, 1699216963, 351648117 }, + { 2144370689, 1071885991, 958186029 }, + { 2144348161, 1763151227, 540353574 }, + { 2144335873, 1060214804, 919598847 }, + { 2144329729, 663515846, 1448552668 }, + { 2144327681, 1057776305, 590222840 }, + { 2144309249, 1705149168, 1459294624 }, + { 2144296961, 325823721, 1649016934 }, + { 2144290817, 738775789, 447427206 }, + { 2144243713, 962347618, 893050215 }, + { 2144237569, 1655257077, 900860862 }, + { 2144161793, 242206694, 1567868672 }, + { 2144155649, 769415308, 1247993134 }, + { 2144137217, 320492023, 515841070 }, + { 2144120833, 1639388522, 770877302 }, + { 2144071681, 1761785233, 964296120 }, + { 2144065537, 419817825, 204564472 }, + { 2144028673, 666050597, 2091019760 }, + { 2144010241, 1413657615, 1518702610 }, + { 2143952897, 1238327946, 475672271 }, + { 2143940609, 307063413, 1176750846 }, + { 2143918081, 2062905559, 786785803 }, + { 2143899649, 1338112849, 1562292083 }, + { 2143891457, 68149545, 87166451 }, + { 2143885313, 921750778, 394460854 }, + { 2143854593, 719766593, 133877196 }, + { 2143836161, 1149399850, 1861591875 }, + { 2143762433, 1848739366, 1335934145 }, + { 2143756289, 1326674710, 102999236 }, + { 2143713281, 808061791, 1156900308 }, + { 2143690753, 388399459, 1926468019 }, + { 2143670273, 1427891374, 1756689401 }, + { 2143666177, 1912173949, 986629565 }, + { 2143645697, 2041160111, 371842865 }, + { 2143641601, 1279906897, 2023974350 }, + { 2143635457, 720473174, 1389027526 }, + { 2143621121, 1298309455, 1732632006 }, + { 2143598593, 1548762216, 1825417506 }, + { 2143567873, 620475784, 1073787233 }, + { 2143561729, 1932954575, 949167309 }, + { 2143553537, 354315656, 1652037534 }, + { 2143541249, 577424288, 1097027618 }, + { 2143531009, 357862822, 478640055 }, + { 2143522817, 2017706025, 1550531668 }, + { 2143506433, 2078127419, 1824320165 }, + { 2143488001, 613475285, 1604011510 }, + { 2143469569, 1466594987, 502095196 }, + { 2143426561, 1115430331, 1044637111 }, + { 2143383553, 9778045, 1902463734 }, + { 2143377409, 1557401276, 2056861771 }, + { 2143363073, 652036455, 1965915971 }, + { 2143260673, 1464581171, 1523257541 }, + { 2143246337, 1876119649, 764541916 }, + { 2143209473, 1614992673, 1920672844 }, + { 2143203329, 981052047, 2049774209 }, + { 2143160321, 1847355533, 728535665 }, + { 2143129601, 965558457, 603052992 }, + { 2143123457, 2140817191, 8348679 }, + { 2143100929, 1547263683, 694209023 }, + { 2143092737, 643459066, 1979934533 }, + { 2143082497, 188603778, 2026175670 }, + { 2143062017, 1657329695, 377451099 }, + { 2143051777, 114967950, 979255473 }, + { 2143025153, 1698431342, 1449196896 }, + { 2143006721, 1862741675, 1739650365 }, + { 2142996481, 756660457, 996160050 }, + { 2142976001, 927864010, 1166847574 }, + { 2142965761, 905070557, 661974566 }, + { 2142916609, 40932754, 1787161127 }, + { 2142892033, 1987985648, 675335382 }, + { 2142885889, 797497211, 1323096997 }, + { 2142871553, 2068025830, 1411877159 }, + { 2142861313, 1217177090, 1438410687 }, + { 2142830593, 409906375, 1767860634 }, + { 2142803969, 1197788993, 359782919 }, + { 2142785537, 643817365, 513932862 }, + { 2142779393, 1717046338, 218943121 }, + { 2142724097, 89336830, 416687049 }, + { 2142707713, 5944581, 1356813523 }, + { 2142658561, 887942135, 2074011722 }, + { 2142638081, 151851972, 1647339939 }, + { 2142564353, 1691505537, 1483107336 }, + { 2142533633, 1989920200, 1135938817 }, + { 2142529537, 959263126, 1531961857 }, + { 2142527489, 453251129, 1725566162 }, + { 2142502913, 1536028102, 182053257 }, + { 2142498817, 570138730, 701443447 }, + { 2142416897, 326965800, 411931819 }, + { 2142363649, 1675665410, 1517191733 }, + { 2142351361, 968529566, 1575712703 }, + { 2142330881, 1384953238, 1769087884 }, + { 2142314497, 1977173242, 1833745524 }, + { 2142289921, 95082313, 1714775493 }, + { 2142283777, 109377615, 1070584533 }, + { 2142277633, 16960510, 702157145 }, + { 2142263297, 553850819, 431364395 }, + { 2142208001, 241466367, 2053967982 }, + { 2142164993, 1795661326, 1031836848 }, + { 2142097409, 1212530046, 712772031 }, + { 2142087169, 1763869720, 822276067 }, + { 2142078977, 644065713, 1765268066 }, + { 2142074881, 112671944, 643204925 }, + { 2142044161, 1387785471, 1297890174 }, + { 2142025729, 783885537, 1000425730 }, + { 2142011393, 905662232, 1679401033 }, + { 2141974529, 799788433, 468119557 }, + { 2141943809, 1932544124, 449305555 }, + { 2141933569, 1527403256, 841867925 }, + { 2141931521, 1247076451, 743823916 }, + { 2141902849, 1199660531, 401687910 }, + { 2141890561, 150132350, 1720336972 }, + { 2141857793, 1287438162, 663880489 }, + { 2141833217, 618017731, 1819208266 }, + { 2141820929, 999578638, 1403090096 }, + { 2141786113, 81834325, 1523542501 }, + { 2141771777, 120001928, 463556492 }, + { 2141759489, 122455485, 2124928282 }, + { 2141749249, 141986041, 940339153 }, + { 2141685761, 889088734, 477141499 }, + { 2141673473, 324212681, 1122558298 }, + { 2141669377, 1175806187, 1373818177 }, + { 2141655041, 1113654822, 296887082 }, + { 2141587457, 991103258, 1585913875 }, + { 2141583361, 1401451409, 1802457360 }, + { 2141575169, 1571977166, 712760980 }, + { 2141546497, 1107849376, 1250270109 }, + { 2141515777, 196544219, 356001130 }, + { 2141495297, 1733571506, 1060744866 }, + { 2141483009, 321552363, 1168297026 }, + { 2141458433, 505818251, 733225819 }, + { 2141360129, 1026840098, 948342276 }, + { 2141325313, 945133744, 2129965998 }, + { 2141317121, 1871100260, 1843844634 }, + { 2141286401, 1790639498, 1750465696 }, + { 2141267969, 1376858592, 186160720 }, + { 2141255681, 2129698296, 1876677959 }, + { 2141243393, 2138900688, 1340009628 }, + { 2141214721, 1933049835, 1087819477 }, + { 2141212673, 1898664939, 1786328049 }, + { 2141202433, 990234828, 940682169 }, + { 2141175809, 1406392421, 993089586 }, + { 2141165569, 1263518371, 289019479 }, + { 2141073409, 1485624211, 507864514 }, + { 2141052929, 1885134788, 311252465 }, + { 2141040641, 1285021247, 280941862 }, + { 2141028353, 1527610374, 375035110 }, + { 2141011969, 1400626168, 164696620 }, + { 2140999681, 632959608, 966175067 }, + { 2140997633, 2045628978, 1290889438 }, + { 2140993537, 1412755491, 375366253 }, + { 2140942337, 719477232, 785367828 }, + { 2140925953, 45224252, 836552317 }, + { 2140917761, 1157376588, 1001839569 }, + { 2140887041, 278480752, 2098732796 }, + { 2140837889, 1663139953, 924094810 }, + { 2140788737, 802501511, 2045368990 }, + { 2140766209, 1820083885, 1800295504 }, + { 2140764161, 1169561905, 2106792035 }, + { 2140696577, 127781498, 1885987531 }, + { 2140684289, 16014477, 1098116827 }, + { 2140653569, 665960598, 1796728247 }, + { 2140594177, 1043085491, 377310938 }, + { 2140579841, 1732838211, 1504505945 }, + { 2140569601, 302071939, 358291016 }, + { 2140567553, 192393733, 1909137143 }, + { 2140557313, 406595731, 1175330270 }, + { 2140549121, 1748850918, 525007007 }, + { 2140477441, 499436566, 1031159814 }, + { 2140469249, 1886004401, 1029951320 }, + { 2140426241, 1483168100, 1676273461 }, + { 2140420097, 1779917297, 846024476 }, + { 2140413953, 522948893, 1816354149 }, + { 2140383233, 1931364473, 1296921241 }, + { 2140366849, 1917356555, 147196204 }, + { 2140354561, 16466177, 1349052107 }, + { 2140348417, 1875366972, 1860485634 }, + { 2140323841, 456498717, 1790256483 }, + { 2140321793, 1629493973, 150031888 }, + { 2140315649, 1904063898, 395510935 }, + { 2140280833, 1784104328, 831417909 }, + { 2140250113, 256087139, 697349101 }, + { 2140229633, 388553070, 243875754 }, + { 2140223489, 747459608, 1396270850 }, + { 2140200961, 507423743, 1895572209 }, + { 2140162049, 580106016, 2045297469 }, + { 2140149761, 712426444, 785217995 }, + { 2140137473, 1441607584, 536866543 }, + { 2140119041, 346538902, 1740434653 }, + { 2140090369, 282642885, 21051094 }, + { 2140076033, 1407456228, 319910029 }, + { 2140047361, 1619330500, 1488632070 }, + { 2140041217, 2089408064, 2012026134 }, + { 2140008449, 1705524800, 1613440760 }, + { 2139924481, 1846208233, 1280649481 }, + { 2139906049, 989438755, 1185646076 }, + { 2139867137, 1522314850, 372783595 }, + { 2139842561, 1681587377, 216848235 }, + { 2139826177, 2066284988, 1784999464 }, + { 2139824129, 480888214, 1513323027 }, + { 2139789313, 847937200, 858192859 }, + { 2139783169, 1642000434, 1583261448 }, + { 2139770881, 940699589, 179702100 }, + { 2139768833, 315623242, 964612676 }, + { 2139666433, 331649203, 764666914 }, + { 2139641857, 2118730799, 1313764644 }, + { 2139635713, 519149027, 519212449 }, + { 2139598849, 1526413634, 1769667104 }, + { 2139574273, 551148610, 820739925 }, + { 2139568129, 1386800242, 472447405 }, + { 2139549697, 813760130, 1412328531 }, + { 2139537409, 1615286260, 1609362979 }, + { 2139475969, 1352559299, 1696720421 }, + { 2139455489, 1048691649, 1584935400 }, + { 2139432961, 836025845, 950121150 }, + { 2139424769, 1558281165, 1635486858 }, + { 2139406337, 1728402143, 1674423301 }, + { 2139396097, 1727715782, 1483470544 }, + { 2139383809, 1092853491, 1741699084 }, + { 2139369473, 690776899, 1242798709 }, + { 2139351041, 1768782380, 2120712049 }, + { 2139334657, 1739968247, 1427249225 }, + { 2139332609, 1547189119, 623011170 }, + { 2139310081, 1346827917, 1605466350 }, + { 2139303937, 369317948, 828392831 }, + { 2139301889, 1560417239, 1788073219 }, + { 2139283457, 1303121623, 595079358 }, + { 2139248641, 1354555286, 573424177 }, + { 2139240449, 60974056, 885781403 }, + { 2139222017, 355573421, 1221054839 }, + { 2139215873, 566477826, 1724006500 }, + { 2139150337, 871437673, 1609133294 }, + { 2139144193, 1478130914, 1137491905 }, + { 2139117569, 1854880922, 964728507 }, + { 2139076609, 202405335, 756508944 }, + { 2139062273, 1399715741, 884826059 }, + { 2139045889, 1051045798, 1202295476 }, + { 2139033601, 1707715206, 632234634 }, + { 2139006977, 2035853139, 231626690 }, + { 2138951681, 183867876, 838350879 }, + { 2138945537, 1403254661, 404460202 }, + { 2138920961, 310865011, 1282911681 }, + { 2138910721, 1328496553, 103472415 }, + { 2138904577, 78831681, 993513549 }, + { 2138902529, 1319697451, 1055904361 }, + { 2138816513, 384338872, 1706202469 }, + { 2138810369, 1084868275, 405677177 }, + { 2138787841, 401181788, 1964773901 }, + { 2138775553, 1850532988, 1247087473 }, + { 2138767361, 874261901, 1576073565 }, + { 2138757121, 1187474742, 993541415 }, + { 2138748929, 1782458888, 1043206483 }, + { 2138744833, 1221500487, 800141243 }, + { 2138738689, 413465368, 1450660558 }, + { 2138695681, 739045140, 342611472 }, + { 2138658817, 1355845756, 672674190 }, + { 2138644481, 608379162, 1538874380 }, + { 2138632193, 1444914034, 686911254 }, + { 2138607617, 484707818, 1435142134 }, + { 2138591233, 539460669, 1290458549 }, + { 2138572801, 2093538990, 2011138646 }, + { 2138552321, 1149786988, 1076414907 }, + { 2138546177, 840688206, 2108985273 }, + { 2138533889, 209669619, 198172413 }, + { 2138523649, 1975879426, 1277003968 }, + { 2138490881, 1351891144, 1976858109 }, + { 2138460161, 1817321013, 1979278293 }, + { 2138429441, 1950077177, 203441928 }, + { 2138400769, 908970113, 628395069 }, + { 2138398721, 219890864, 758486760 }, + { 2138376193, 1306654379, 977554090 }, + { 2138351617, 298822498, 2004708503 }, + { 2138337281, 441457816, 1049002108 }, + { 2138320897, 1517731724, 1442269609 }, + { 2138290177, 1355911197, 1647139103 }, + { 2138234881, 531313247, 1746591962 }, + { 2138214401, 1899410930, 781416444 }, + { 2138202113, 1813477173, 1622508515 }, + { 2138191873, 1086458299, 1025408615 }, + { 2138183681, 1998800427, 827063290 }, + { 2138173441, 1921308898, 749670117 }, + { 2138103809, 1620902804, 2126787647 }, + { 2138099713, 828647069, 1892961817 }, + { 2138085377, 179405355, 1525506535 }, + { 2138060801, 615683235, 1259580138 }, + { 2138044417, 2030277840, 1731266562 }, + { 2138042369, 2087222316, 1627902259 }, + { 2138032129, 126388712, 1108640984 }, + { 2138011649, 715026550, 1017980050 }, + { 2137993217, 1693714349, 1351778704 }, + { 2137888769, 1289762259, 1053090405 }, + { 2137853953, 199991890, 1254192789 }, + { 2137833473, 941421685, 896995556 }, + { 2137817089, 750416446, 1251031181 }, + { 2137792513, 798075119, 368077456 }, + { 2137786369, 878543495, 1035375025 }, + { 2137767937, 9351178, 1156563902 }, + { 2137755649, 1382297614, 1686559583 }, + { 2137724929, 1345472850, 1681096331 }, + { 2137704449, 834666929, 630551727 }, + { 2137673729, 1646165729, 1892091571 }, + { 2137620481, 778943821, 48456461 }, + { 2137618433, 1730837875, 1713336725 }, + { 2137581569, 805610339, 1378891359 }, + { 2137538561, 204342388, 1950165220 }, + { 2137526273, 1947629754, 1500789441 }, + { 2137516033, 719902645, 1499525372 }, + { 2137491457, 230451261, 556382829 }, + { 2137440257, 979573541, 412760291 }, + { 2137374721, 927841248, 1954137185 }, + { 2137362433, 1243778559, 861024672 }, + { 2137313281, 1341338501, 980638386 }, + { 2137311233, 937415182, 1793212117 }, + { 2137255937, 795331324, 1410253405 }, + { 2137243649, 150756339, 1966999887 }, + { 2137182209, 163346914, 1939301431 }, + { 2137171969, 1952552395, 758913141 }, + { 2137159681, 570788721, 218668666 }, + { 2137147393, 1896656810, 2045670345 }, + { 2137141249, 358493842, 518199643 }, + { 2137139201, 1505023029, 674695848 }, + { 2137133057, 27911103, 830956306 }, + { 2137122817, 439771337, 1555268614 }, + { 2137116673, 790988579, 1871449599 }, + { 2137110529, 432109234, 811805080 }, + { 2137102337, 1357900653, 1184997641 }, + { 2137098241, 515119035, 1715693095 }, + { 2137090049, 408575203, 2085660657 }, + { 2137085953, 2097793407, 1349626963 }, + { 2137055233, 1556739954, 1449960883 }, + { 2137030657, 1545758650, 1369303716 }, + { 2136987649, 332602570, 103875114 }, + { 2136969217, 1499989506, 1662964115 }, + { 2136924161, 857040753, 4738842 }, + { 2136895489, 1948872712, 570436091 }, + { 2136893441, 58969960, 1568349634 }, + { 2136887297, 2127193379, 273612548 }, + { 2136850433, 111208983, 1181257116 }, + { 2136809473, 1627275942, 1680317971 }, + { 2136764417, 1574888217, 14011331 }, + { 2136741889, 14011055, 1129154251 }, + { 2136727553, 35862563, 1838555253 }, + { 2136721409, 310235666, 1363928244 }, + { 2136698881, 1612429202, 1560383828 }, + { 2136649729, 1138540131, 800014364 }, + { 2136606721, 602323503, 1433096652 }, + { 2136563713, 182209265, 1919611038 }, + { 2136555521, 324156477, 165591039 }, + { 2136549377, 195513113, 217165345 }, + { 2136526849, 1050768046, 939647887 }, + { 2136508417, 1886286237, 1619926572 }, + { 2136477697, 609647664, 35065157 }, + { 2136471553, 679352216, 1452259468 }, + { 2136457217, 128630031, 824816521 }, + { 2136422401, 19787464, 1526049830 }, + { 2136420353, 698316836, 1530623527 }, + { 2136371201, 1651862373, 1804812805 }, + { 2136334337, 326596005, 336977082 }, + { 2136322049, 63253370, 1904972151 }, + { 2136297473, 312176076, 172182411 }, + { 2136248321, 381261841, 369032670 }, + { 2136242177, 358688773, 1640007994 }, + { 2136229889, 512677188, 75585225 }, + { 2136219649, 2095003250, 1970086149 }, + { 2136207361, 1909650722, 537760675 }, + { 2136176641, 1334616195, 1533487619 }, + { 2136158209, 2096285632, 1793285210 }, + { 2136143873, 1897347517, 293843959 }, + { 2136133633, 923586222, 1022655978 }, + { 2136096769, 1464868191, 1515074410 }, + { 2136094721, 2020679520, 2061636104 }, + { 2136076289, 290798503, 1814726809 }, + { 2136041473, 156415894, 1250757633 }, + { 2135996417, 297459940, 1132158924 }, + { 2135955457, 538755304, 1688831340 }, + { 0, 0, 0 } +}; + +/* + * Reduce a small signed integer modulo a small prime. The source + * value x MUST be such that -p < x < p. + */ +static inline uint32_t +modp_set(int32_t x, uint32_t p) { + uint32_t w; + + w = (uint32_t)x; + w += p & -(w >> 31); + return w; +} + +/* + * Normalize a modular integer around 0. + */ +static inline int32_t +modp_norm(uint32_t x, uint32_t p) { + return (int32_t)(x - (p & (((x - ((p + 1) >> 1)) >> 31) - 1))); +} + +/* + * Compute -1/p mod 2^31. This works for all odd integers p that fit + * on 31 bits. + */ +static uint32_t +modp_ninv31(uint32_t p) { + uint32_t y; + + y = 2 - p; + y *= 2 - p * y; + y *= 2 - p * y; + y *= 2 - p * y; + y *= 2 - p * y; + return (uint32_t)0x7FFFFFFF & -y; +} + +/* + * Compute R = 2^31 mod p. + */ +static inline uint32_t +modp_R(uint32_t p) { + /* + * Since 2^30 < p < 2^31, we know that 2^31 mod p is simply + * 2^31 - p. + */ + return ((uint32_t)1 << 31) - p; +} + +/* + * Addition modulo p. + */ +static inline uint32_t +modp_add(uint32_t a, uint32_t b, uint32_t p) { + uint32_t d; + + d = a + b - p; + d += p & -(d >> 31); + return d; +} + +/* + * Subtraction modulo p. + */ +static inline uint32_t +modp_sub(uint32_t a, uint32_t b, uint32_t p) { + uint32_t d; + + d = a - b; + d += p & -(d >> 31); + return d; +} + +/* + * Halving modulo p. + */ +/* unused +static inline uint32_t +modp_half(uint32_t a, uint32_t p) +{ + a += p & -(a & 1); + return a >> 1; +} +*/ + +/* + * Montgomery multiplication modulo p. The 'p0i' value is -1/p mod 2^31. + * It is required that p is an odd integer. + */ +static inline uint32_t +modp_montymul(uint32_t a, uint32_t b, uint32_t p, uint32_t p0i) { + uint64_t z, w; + uint32_t d; + + z = (uint64_t)a * (uint64_t)b; + w = ((z * p0i) & (uint64_t)0x7FFFFFFF) * p; + d = (uint32_t)((z + w) >> 31) - p; + d += p & -(d >> 31); + return d; +} + +/* + * Compute R2 = 2^62 mod p. + */ +static uint32_t +modp_R2(uint32_t p, uint32_t p0i) { + uint32_t z; + + /* + * Compute z = 2^31 mod p (this is the value 1 in Montgomery + * representation), then double it with an addition. + */ + z = modp_R(p); + z = modp_add(z, z, p); + + /* + * Square it five times to obtain 2^32 in Montgomery representation + * (i.e. 2^63 mod p). + */ + z = modp_montymul(z, z, p, p0i); + z = modp_montymul(z, z, p, p0i); + z = modp_montymul(z, z, p, p0i); + z = modp_montymul(z, z, p, p0i); + z = modp_montymul(z, z, p, p0i); + + /* + * Halve the value mod p to get 2^62. + */ + z = (z + (p & -(z & 1))) >> 1; + return z; +} + +/* + * Compute 2^(31*x) modulo p. This works for integers x up to 2^11. + * p must be prime such that 2^30 < p < 2^31; p0i must be equal to + * -1/p mod 2^31; R2 must be equal to 2^62 mod p. + */ +static inline uint32_t +modp_Rx(unsigned x, uint32_t p, uint32_t p0i, uint32_t R2) { + int i; + uint32_t r, z; + + /* + * 2^(31*x) = (2^31)*(2^(31*(x-1))); i.e. we want the Montgomery + * representation of (2^31)^e mod p, where e = x-1. + * R2 is 2^31 in Montgomery representation. + */ + x --; + r = R2; + z = modp_R(p); + for (i = 0; (1U << i) <= x; i ++) { + if ((x & (1U << i)) != 0) { + z = modp_montymul(z, r, p, p0i); + } + r = modp_montymul(r, r, p, p0i); + } + return z; +} + +/* + * Division modulo p. If the divisor (b) is 0, then 0 is returned. + * This function computes proper results only when p is prime. + * Parameters: + * a dividend + * b divisor + * p odd prime modulus + * p0i -1/p mod 2^31 + * R 2^31 mod R + */ +static uint32_t +modp_div(uint32_t a, uint32_t b, uint32_t p, uint32_t p0i, uint32_t R) { + uint32_t z, e; + int i; + + e = p - 2; + z = R; + for (i = 30; i >= 0; i --) { + uint32_t z2; + + z = modp_montymul(z, z, p, p0i); + z2 = modp_montymul(z, b, p, p0i); + z ^= (z ^ z2) & -(uint32_t)((e >> i) & 1); + } + + /* + * The loop above just assumed that b was in Montgomery + * representation, i.e. really contained b*R; under that + * assumption, it returns 1/b in Montgomery representation, + * which is R/b. But we gave it b in normal representation, + * so the loop really returned R/(b/R) = R^2/b. + * + * We want a/b, so we need one Montgomery multiplication with a, + * which also remove one of the R factors, and another such + * multiplication to remove the second R factor. + */ + z = modp_montymul(z, 1, p, p0i); + return modp_montymul(a, z, p, p0i); +} + +/* + * Bit-reversal index table. + */ +static const uint16_t REV10[] = { + 0, 512, 256, 768, 128, 640, 384, 896, 64, 576, 320, 832, + 192, 704, 448, 960, 32, 544, 288, 800, 160, 672, 416, 928, + 96, 608, 352, 864, 224, 736, 480, 992, 16, 528, 272, 784, + 144, 656, 400, 912, 80, 592, 336, 848, 208, 720, 464, 976, + 48, 560, 304, 816, 176, 688, 432, 944, 112, 624, 368, 880, + 240, 752, 496, 1008, 8, 520, 264, 776, 136, 648, 392, 904, + 72, 584, 328, 840, 200, 712, 456, 968, 40, 552, 296, 808, + 168, 680, 424, 936, 104, 616, 360, 872, 232, 744, 488, 1000, + 24, 536, 280, 792, 152, 664, 408, 920, 88, 600, 344, 856, + 216, 728, 472, 984, 56, 568, 312, 824, 184, 696, 440, 952, + 120, 632, 376, 888, 248, 760, 504, 1016, 4, 516, 260, 772, + 132, 644, 388, 900, 68, 580, 324, 836, 196, 708, 452, 964, + 36, 548, 292, 804, 164, 676, 420, 932, 100, 612, 356, 868, + 228, 740, 484, 996, 20, 532, 276, 788, 148, 660, 404, 916, + 84, 596, 340, 852, 212, 724, 468, 980, 52, 564, 308, 820, + 180, 692, 436, 948, 116, 628, 372, 884, 244, 756, 500, 1012, + 12, 524, 268, 780, 140, 652, 396, 908, 76, 588, 332, 844, + 204, 716, 460, 972, 44, 556, 300, 812, 172, 684, 428, 940, + 108, 620, 364, 876, 236, 748, 492, 1004, 28, 540, 284, 796, + 156, 668, 412, 924, 92, 604, 348, 860, 220, 732, 476, 988, + 60, 572, 316, 828, 188, 700, 444, 956, 124, 636, 380, 892, + 252, 764, 508, 1020, 2, 514, 258, 770, 130, 642, 386, 898, + 66, 578, 322, 834, 194, 706, 450, 962, 34, 546, 290, 802, + 162, 674, 418, 930, 98, 610, 354, 866, 226, 738, 482, 994, + 18, 530, 274, 786, 146, 658, 402, 914, 82, 594, 338, 850, + 210, 722, 466, 978, 50, 562, 306, 818, 178, 690, 434, 946, + 114, 626, 370, 882, 242, 754, 498, 1010, 10, 522, 266, 778, + 138, 650, 394, 906, 74, 586, 330, 842, 202, 714, 458, 970, + 42, 554, 298, 810, 170, 682, 426, 938, 106, 618, 362, 874, + 234, 746, 490, 1002, 26, 538, 282, 794, 154, 666, 410, 922, + 90, 602, 346, 858, 218, 730, 474, 986, 58, 570, 314, 826, + 186, 698, 442, 954, 122, 634, 378, 890, 250, 762, 506, 1018, + 6, 518, 262, 774, 134, 646, 390, 902, 70, 582, 326, 838, + 198, 710, 454, 966, 38, 550, 294, 806, 166, 678, 422, 934, + 102, 614, 358, 870, 230, 742, 486, 998, 22, 534, 278, 790, + 150, 662, 406, 918, 86, 598, 342, 854, 214, 726, 470, 982, + 54, 566, 310, 822, 182, 694, 438, 950, 118, 630, 374, 886, + 246, 758, 502, 1014, 14, 526, 270, 782, 142, 654, 398, 910, + 78, 590, 334, 846, 206, 718, 462, 974, 46, 558, 302, 814, + 174, 686, 430, 942, 110, 622, 366, 878, 238, 750, 494, 1006, + 30, 542, 286, 798, 158, 670, 414, 926, 94, 606, 350, 862, + 222, 734, 478, 990, 62, 574, 318, 830, 190, 702, 446, 958, + 126, 638, 382, 894, 254, 766, 510, 1022, 1, 513, 257, 769, + 129, 641, 385, 897, 65, 577, 321, 833, 193, 705, 449, 961, + 33, 545, 289, 801, 161, 673, 417, 929, 97, 609, 353, 865, + 225, 737, 481, 993, 17, 529, 273, 785, 145, 657, 401, 913, + 81, 593, 337, 849, 209, 721, 465, 977, 49, 561, 305, 817, + 177, 689, 433, 945, 113, 625, 369, 881, 241, 753, 497, 1009, + 9, 521, 265, 777, 137, 649, 393, 905, 73, 585, 329, 841, + 201, 713, 457, 969, 41, 553, 297, 809, 169, 681, 425, 937, + 105, 617, 361, 873, 233, 745, 489, 1001, 25, 537, 281, 793, + 153, 665, 409, 921, 89, 601, 345, 857, 217, 729, 473, 985, + 57, 569, 313, 825, 185, 697, 441, 953, 121, 633, 377, 889, + 249, 761, 505, 1017, 5, 517, 261, 773, 133, 645, 389, 901, + 69, 581, 325, 837, 197, 709, 453, 965, 37, 549, 293, 805, + 165, 677, 421, 933, 101, 613, 357, 869, 229, 741, 485, 997, + 21, 533, 277, 789, 149, 661, 405, 917, 85, 597, 341, 853, + 213, 725, 469, 981, 53, 565, 309, 821, 181, 693, 437, 949, + 117, 629, 373, 885, 245, 757, 501, 1013, 13, 525, 269, 781, + 141, 653, 397, 909, 77, 589, 333, 845, 205, 717, 461, 973, + 45, 557, 301, 813, 173, 685, 429, 941, 109, 621, 365, 877, + 237, 749, 493, 1005, 29, 541, 285, 797, 157, 669, 413, 925, + 93, 605, 349, 861, 221, 733, 477, 989, 61, 573, 317, 829, + 189, 701, 445, 957, 125, 637, 381, 893, 253, 765, 509, 1021, + 3, 515, 259, 771, 131, 643, 387, 899, 67, 579, 323, 835, + 195, 707, 451, 963, 35, 547, 291, 803, 163, 675, 419, 931, + 99, 611, 355, 867, 227, 739, 483, 995, 19, 531, 275, 787, + 147, 659, 403, 915, 83, 595, 339, 851, 211, 723, 467, 979, + 51, 563, 307, 819, 179, 691, 435, 947, 115, 627, 371, 883, + 243, 755, 499, 1011, 11, 523, 267, 779, 139, 651, 395, 907, + 75, 587, 331, 843, 203, 715, 459, 971, 43, 555, 299, 811, + 171, 683, 427, 939, 107, 619, 363, 875, 235, 747, 491, 1003, + 27, 539, 283, 795, 155, 667, 411, 923, 91, 603, 347, 859, + 219, 731, 475, 987, 59, 571, 315, 827, 187, 699, 443, 955, + 123, 635, 379, 891, 251, 763, 507, 1019, 7, 519, 263, 775, + 135, 647, 391, 903, 71, 583, 327, 839, 199, 711, 455, 967, + 39, 551, 295, 807, 167, 679, 423, 935, 103, 615, 359, 871, + 231, 743, 487, 999, 23, 535, 279, 791, 151, 663, 407, 919, + 87, 599, 343, 855, 215, 727, 471, 983, 55, 567, 311, 823, + 183, 695, 439, 951, 119, 631, 375, 887, 247, 759, 503, 1015, + 15, 527, 271, 783, 143, 655, 399, 911, 79, 591, 335, 847, + 207, 719, 463, 975, 47, 559, 303, 815, 175, 687, 431, 943, + 111, 623, 367, 879, 239, 751, 495, 1007, 31, 543, 287, 799, + 159, 671, 415, 927, 95, 607, 351, 863, 223, 735, 479, 991, + 63, 575, 319, 831, 191, 703, 447, 959, 127, 639, 383, 895, + 255, 767, 511, 1023 +}; + +/* + * Compute the roots for NTT and inverse NTT (binary case). Input + * parameter g is a primitive 2048-th root of 1 modulo p (i.e. g^1024 = + * -1 mod p). This fills gm[] and igm[] with powers of g and 1/g: + * gm[rev(i)] = g^i mod p + * igm[rev(i)] = (1/g)^i mod p + * where rev() is the "bit reversal" function over 10 bits. It fills + * the arrays only up to N = 2^logn values. + * + * The values stored in gm[] and igm[] are in Montgomery representation. + * + * p must be a prime such that p = 1 mod 2048. + */ +static void +modp_mkgm2(uint32_t *gm, uint32_t *igm, unsigned logn, + uint32_t g, uint32_t p, uint32_t p0i) { + size_t u, n; + unsigned k; + uint32_t ig, x1, x2, R2; + + n = (size_t)1 << logn; + + /* + * We want g such that g^(2N) = 1 mod p, but the provided + * generator has order 2048. We must square it a few times. + */ + R2 = modp_R2(p, p0i); + g = modp_montymul(g, R2, p, p0i); + for (k = logn; k < 10; k ++) { + g = modp_montymul(g, g, p, p0i); + } + + ig = modp_div(R2, g, p, p0i, modp_R(p)); + k = 10 - logn; + x1 = x2 = modp_R(p); + for (u = 0; u < n; u ++) { + size_t v; + + v = REV10[u << k]; + gm[v] = x1; + igm[v] = x2; + x1 = modp_montymul(x1, g, p, p0i); + x2 = modp_montymul(x2, ig, p, p0i); + } +} + +/* + * Compute the NTT over a polynomial (binary case). Polynomial elements + * are a[0], a[stride], a[2 * stride]... + */ +static void +modp_NTT2_ext(uint32_t *a, size_t stride, const uint32_t *gm, unsigned logn, + uint32_t p, uint32_t p0i) { + size_t t, m, n; + + if (logn == 0) { + return; + } + n = (size_t)1 << logn; + t = n; + for (m = 1; m < n; m <<= 1) { + size_t ht, u, v1; + + ht = t >> 1; + for (u = 0, v1 = 0; u < m; u ++, v1 += t) { + uint32_t s; + size_t v; + uint32_t *r1, *r2; + + s = gm[m + u]; + r1 = a + v1 * stride; + r2 = r1 + ht * stride; + for (v = 0; v < ht; v ++, r1 += stride, r2 += stride) { + uint32_t x, y; + + x = *r1; + y = modp_montymul(*r2, s, p, p0i); + *r1 = modp_add(x, y, p); + *r2 = modp_sub(x, y, p); + } + } + t = ht; + } +} + +/* + * Compute the inverse NTT over a polynomial (binary case). + */ +static void +modp_iNTT2_ext(uint32_t *a, size_t stride, const uint32_t *igm, unsigned logn, + uint32_t p, uint32_t p0i) { + size_t t, m, n, k; + uint32_t ni; + uint32_t *r; + + if (logn == 0) { + return; + } + n = (size_t)1 << logn; + t = 1; + for (m = n; m > 1; m >>= 1) { + size_t hm, dt, u, v1; + + hm = m >> 1; + dt = t << 1; + for (u = 0, v1 = 0; u < hm; u ++, v1 += dt) { + uint32_t s; + size_t v; + uint32_t *r1, *r2; + + s = igm[hm + u]; + r1 = a + v1 * stride; + r2 = r1 + t * stride; + for (v = 0; v < t; v ++, r1 += stride, r2 += stride) { + uint32_t x, y; + + x = *r1; + y = *r2; + *r1 = modp_add(x, y, p); + *r2 = modp_montymul( + modp_sub(x, y, p), s, p, p0i);; + } + } + t = dt; + } + + /* + * We need 1/n in Montgomery representation, i.e. R/n. Since + * 1 <= logn <= 10, R/n is an integer; morever, R/n <= 2^30 < p, + * thus a simple shift will do. + */ + ni = (uint32_t)1 << (31 - logn); + for (k = 0, r = a; k < n; k ++, r += stride) { + *r = modp_montymul(*r, ni, p, p0i); + } +} + +/* + * Simplified macros for NTT and iNTT (binary case) when the elements + * are consecutive in RAM. + */ +#define modp_NTT2(a, gm, logn, p, p0i) modp_NTT2_ext(a, 1, gm, logn, p, p0i) +#define modp_iNTT2(a, igm, logn, p, p0i) modp_iNTT2_ext(a, 1, igm, logn, p, p0i) + +/* + * Given polynomial f in NTT representation modulo p, compute f' of degree + * less than N/2 such that f' = f0^2 - X*f1^2, where f0 and f1 are + * polynomials of degree less than N/2 such that f = f0(X^2) + X*f1(X^2). + * + * The new polynomial is written "in place" over the first N/2 elements + * of f. + * + * If applied logn times successively on a given polynomial, the resulting + * degree-0 polynomial is the resultant of f and X^N+1 modulo p. + * + * This function applies only to the binary case; it is invoked from + * solve_NTRU_binary_depth1(). + */ +static void +modp_poly_rec_res(uint32_t *f, unsigned logn, + uint32_t p, uint32_t p0i, uint32_t R2) { + size_t hn, u; + + hn = (size_t)1 << (logn - 1); + for (u = 0; u < hn; u ++) { + uint32_t w0, w1; + + w0 = f[(u << 1) + 0]; + w1 = f[(u << 1) + 1]; + f[u] = modp_montymul(modp_montymul(w0, w1, p, p0i), R2, p, p0i); + } +} + +/* ==================================================================== */ +/* + * Custom bignum implementation. + * + * This is a very reduced set of functionalities. We need to do the + * following operations: + * + * - Rebuild the resultant and the polynomial coefficients from their + * values modulo small primes (of length 31 bits each). + * + * - Compute an extended GCD between the two computed resultants. + * + * - Extract top bits and add scaled values during the successive steps + * of Babai rounding. + * + * When rebuilding values using CRT, we must also recompute the product + * of the small prime factors. We always do it one small factor at a + * time, so the "complicated" operations can be done modulo the small + * prime with the modp_* functions. CRT coefficients (inverses) are + * precomputed. + * + * All values are positive until the last step: when the polynomial + * coefficients have been rebuilt, we normalize them around 0. But then, + * only additions and subtractions on the upper few bits are needed + * afterwards. + * + * We keep big integers as arrays of 31-bit words (in uint32_t values); + * the top bit of each uint32_t is kept equal to 0. Using 31-bit words + * makes it easier to keep track of carries. When negative values are + * used, two's complement is used. + */ + +/* + * Subtract integer b from integer a. Both integers are supposed to have + * the same size. The carry (0 or 1) is returned. Source arrays a and b + * MUST be distinct. + * + * The operation is performed as described above if ctr = 1. If + * ctl = 0, the value a[] is unmodified, but all memory accesses are + * still performed, and the carry is computed and returned. + */ +static uint32_t +zint_sub(uint32_t *a, const uint32_t *b, size_t len, + uint32_t ctl) { + size_t u; + uint32_t cc, m; + + cc = 0; + m = -ctl; + for (u = 0; u < len; u ++) { + uint32_t aw, w; + + aw = a[u]; + w = aw - b[u] - cc; + cc = w >> 31; + aw ^= ((w & 0x7FFFFFFF) ^ aw) & m; + a[u] = aw; + } + return cc; +} + +/* + * Mutiply the provided big integer m with a small value x. + * This function assumes that x < 2^31. The carry word is returned. + */ +static uint32_t +zint_mul_small(uint32_t *m, size_t mlen, uint32_t x) { + size_t u; + uint32_t cc; + + cc = 0; + for (u = 0; u < mlen; u ++) { + uint64_t z; + + z = (uint64_t)m[u] * (uint64_t)x + cc; + m[u] = (uint32_t)z & 0x7FFFFFFF; + cc = (uint32_t)(z >> 31); + } + return cc; +} + +/* + * Reduce a big integer d modulo a small integer p. + * Rules: + * d is unsigned + * p is prime + * 2^30 < p < 2^31 + * p0i = -(1/p) mod 2^31 + * R2 = 2^62 mod p + */ +static uint32_t +zint_mod_small_unsigned(const uint32_t *d, size_t dlen, + uint32_t p, uint32_t p0i, uint32_t R2) { + uint32_t x; + size_t u; + + /* + * Algorithm: we inject words one by one, starting with the high + * word. Each step is: + * - multiply x by 2^31 + * - add new word + */ + x = 0; + u = dlen; + while (u -- > 0) { + uint32_t w; + + x = modp_montymul(x, R2, p, p0i); + w = d[u] - p; + w += p & -(w >> 31); + x = modp_add(x, w, p); + } + return x; +} + +/* + * Similar to zint_mod_small_unsigned(), except that d may be signed. + * Extra parameter is Rx = 2^(31*dlen) mod p. + */ +static uint32_t +zint_mod_small_signed(const uint32_t *d, size_t dlen, + uint32_t p, uint32_t p0i, uint32_t R2, uint32_t Rx) { + uint32_t z; + + if (dlen == 0) { + return 0; + } + z = zint_mod_small_unsigned(d, dlen, p, p0i, R2); + z = modp_sub(z, Rx & -(d[dlen - 1] >> 30), p); + return z; +} + +/* + * Add y*s to x. x and y initially have length 'len' words; the new x + * has length 'len+1' words. 's' must fit on 31 bits. x[] and y[] must + * not overlap. + */ +static void +zint_add_mul_small(uint32_t *x, + const uint32_t *y, size_t len, uint32_t s) { + size_t u; + uint32_t cc; + + cc = 0; + for (u = 0; u < len; u ++) { + uint32_t xw, yw; + uint64_t z; + + xw = x[u]; + yw = y[u]; + z = (uint64_t)yw * (uint64_t)s + (uint64_t)xw + (uint64_t)cc; + x[u] = (uint32_t)z & 0x7FFFFFFF; + cc = (uint32_t)(z >> 31); + } + x[len] = cc; +} + +/* + * Normalize a modular integer around 0: if x > p/2, then x is replaced + * with x - p (signed encoding with two's complement); otherwise, x is + * untouched. The two integers x and p are encoded over the same length. + */ +static void +zint_norm_zero(uint32_t *x, const uint32_t *p, size_t len) { + size_t u; + uint32_t r, bb; + + /* + * Compare x with p/2. We use the shifted version of p, and p + * is odd, so we really compare with (p-1)/2; we want to perform + * the subtraction if and only if x > (p-1)/2. + */ + r = 0; + bb = 0; + u = len; + while (u -- > 0) { + uint32_t wx, wp, cc; + + /* + * Get the two words to compare in wx and wp (both over + * 31 bits exactly). + */ + wx = x[u]; + wp = (p[u] >> 1) | (bb << 30); + bb = p[u] & 1; + + /* + * We set cc to -1, 0 or 1, depending on whether wp is + * lower than, equal to, or greater than wx. + */ + cc = wp - wx; + cc = ((-cc) >> 31) | -(cc >> 31); + + /* + * If r != 0 then it is either 1 or -1, and we keep its + * value. Otherwise, if r = 0, then we replace it with cc. + */ + r |= cc & ((r & 1) - 1); + } + + /* + * At this point, r = -1, 0 or 1, depending on whether (p-1)/2 + * is lower than, equal to, or greater than x. We thus want to + * do the subtraction only if r = -1. + */ + zint_sub(x, p, len, r >> 31); +} + +/* + * Rebuild integers from their RNS representation. There are 'num' + * integers, and each consists in 'xlen' words. 'xx' points at that + * first word of the first integer; subsequent integers are accessed + * by adding 'xstride' repeatedly. + * + * The words of an integer are the RNS representation of that integer, + * using the provided 'primes' are moduli. This function replaces + * each integer with its multi-word value (little-endian order). + * + * If "normalize_signed" is non-zero, then the returned value is + * normalized to the -m/2..m/2 interval (where m is the product of all + * small prime moduli); two's complement is used for negative values. + */ +static void +zint_rebuild_CRT(uint32_t *xx, size_t xlen, size_t xstride, + size_t num, const small_prime *primes, int normalize_signed, + uint32_t *tmp) { + size_t u; + uint32_t *x; + + tmp[0] = primes[0].p; + for (u = 1; u < xlen; u ++) { + /* + * At the entry of each loop iteration: + * - the first u words of each array have been + * reassembled; + * - the first u words of tmp[] contains the + * product of the prime moduli processed so far. + * + * We call 'q' the product of all previous primes. + */ + uint32_t p, p0i, s, R2; + size_t v; + + p = primes[u].p; + s = primes[u].s; + p0i = modp_ninv31(p); + R2 = modp_R2(p, p0i); + + for (v = 0, x = xx; v < num; v ++, x += xstride) { + uint32_t xp, xq, xr; + /* + * xp = the integer x modulo the prime p for this + * iteration + * xq = (x mod q) mod p + */ + xp = x[u]; + xq = zint_mod_small_unsigned(x, u, p, p0i, R2); + + /* + * New value is (x mod q) + q * (s * (xp - xq) mod p) + */ + xr = modp_montymul(s, modp_sub(xp, xq, p), p, p0i); + zint_add_mul_small(x, tmp, u, xr); + } + + /* + * Update product of primes in tmp[]. + */ + tmp[u] = zint_mul_small(tmp, u, p); + } + + /* + * Normalize the reconstructed values around 0. + */ + if (normalize_signed) { + for (u = 0, x = xx; u < num; u ++, x += xstride) { + zint_norm_zero(x, tmp, xlen); + } + } +} + +/* + * Negate a big integer conditionally: value a is replaced with -a if + * and only if ctl = 1. Control value ctl must be 0 or 1. + */ +static void +zint_negate(uint32_t *a, size_t len, uint32_t ctl) { + size_t u; + uint32_t cc, m; + + /* + * If ctl = 1 then we flip the bits of a by XORing with + * 0x7FFFFFFF, and we add 1 to the value. If ctl = 0 then we XOR + * with 0 and add 0, which leaves the value unchanged. + */ + cc = ctl; + m = -ctl >> 1; + for (u = 0; u < len; u ++) { + uint32_t aw; + + aw = a[u]; + aw = (aw ^ m) + cc; + a[u] = aw & 0x7FFFFFFF; + cc = aw >> 31; + } +} + +/* + * Replace a with (a*xa+b*xb)/(2^31) and b with (a*ya+b*yb)/(2^31). + * The low bits are dropped (the caller should compute the coefficients + * such that these dropped bits are all zeros). If either or both + * yields a negative value, then the value is negated. + * + * Returned value is: + * 0 both values were positive + * 1 new a had to be negated + * 2 new b had to be negated + * 3 both new a and new b had to be negated + * + * Coefficients xa, xb, ya and yb may use the full signed 32-bit range. + */ +static uint32_t +zint_co_reduce(uint32_t *a, uint32_t *b, size_t len, + int64_t xa, int64_t xb, int64_t ya, int64_t yb) { + size_t u; + int64_t cca, ccb; + uint32_t nega, negb; + + cca = 0; + ccb = 0; + for (u = 0; u < len; u ++) { + uint32_t wa, wb; + uint64_t za, zb; + + wa = a[u]; + wb = b[u]; + za = wa * (uint64_t)xa + wb * (uint64_t)xb + (uint64_t)cca; + zb = wa * (uint64_t)ya + wb * (uint64_t)yb + (uint64_t)ccb; + if (u > 0) { + a[u - 1] = (uint32_t)za & 0x7FFFFFFF; + b[u - 1] = (uint32_t)zb & 0x7FFFFFFF; + } + cca = *(int64_t *)&za >> 31; + ccb = *(int64_t *)&zb >> 31; + } + a[len - 1] = (uint32_t)cca; + b[len - 1] = (uint32_t)ccb; + + nega = (uint32_t)((uint64_t)cca >> 63); + negb = (uint32_t)((uint64_t)ccb >> 63); + zint_negate(a, len, nega); + zint_negate(b, len, negb); + return nega | (negb << 1); +} + +/* + * Finish modular reduction. Rules on input parameters: + * + * if neg = 1, then -m <= a < 0 + * if neg = 0, then 0 <= a < 2*m + * + * If neg = 0, then the top word of a[] is allowed to use 32 bits. + * + * Modulus m must be odd. + */ +static void +zint_finish_mod(uint32_t *a, size_t len, const uint32_t *m, uint32_t neg) { + size_t u; + uint32_t cc, xm, ym; + + /* + * First pass: compare a (assumed nonnegative) with m. Note that + * if the top word uses 32 bits, subtracting m must yield a + * value less than 2^31 since a < 2*m. + */ + cc = 0; + for (u = 0; u < len; u ++) { + cc = (a[u] - m[u] - cc) >> 31; + } + + /* + * If neg = 1 then we must add m (regardless of cc) + * If neg = 0 and cc = 0 then we must subtract m + * If neg = 0 and cc = 1 then we must do nothing + * + * In the loop below, we conditionally subtract either m or -m + * from a. Word xm is a word of m (if neg = 0) or -m (if neg = 1); + * but if neg = 0 and cc = 1, then ym = 0 and it forces mw to 0. + */ + xm = -neg >> 1; + ym = -(neg | (1 - cc)); + cc = neg; + for (u = 0; u < len; u ++) { + uint32_t aw, mw; + + aw = a[u]; + mw = (m[u] ^ xm) & ym; + aw = aw - mw - cc; + a[u] = aw & 0x7FFFFFFF; + cc = aw >> 31; + } +} + +/* + * Replace a with (a*xa+b*xb)/(2^31) mod m, and b with + * (a*ya+b*yb)/(2^31) mod m. Modulus m must be odd; m0i = -1/m[0] mod 2^31. + */ +static void +zint_co_reduce_mod(uint32_t *a, uint32_t *b, const uint32_t *m, size_t len, + uint32_t m0i, int64_t xa, int64_t xb, int64_t ya, int64_t yb) { + size_t u; + int64_t cca, ccb; + uint32_t fa, fb; + + /* + * These are actually four combined Montgomery multiplications. + */ + cca = 0; + ccb = 0; + fa = ((a[0] * (uint32_t)xa + b[0] * (uint32_t)xb) * m0i) & 0x7FFFFFFF; + fb = ((a[0] * (uint32_t)ya + b[0] * (uint32_t)yb) * m0i) & 0x7FFFFFFF; + for (u = 0; u < len; u ++) { + uint32_t wa, wb; + uint64_t za, zb; + + wa = a[u]; + wb = b[u]; + za = wa * (uint64_t)xa + wb * (uint64_t)xb + + m[u] * (uint64_t)fa + (uint64_t)cca; + zb = wa * (uint64_t)ya + wb * (uint64_t)yb + + m[u] * (uint64_t)fb + (uint64_t)ccb; + if (u > 0) { + a[u - 1] = (uint32_t)za & 0x7FFFFFFF; + b[u - 1] = (uint32_t)zb & 0x7FFFFFFF; + } + cca = *(int64_t *)&za >> 31; + ccb = *(int64_t *)&zb >> 31; + } + a[len - 1] = (uint32_t)cca; + b[len - 1] = (uint32_t)ccb; + + /* + * At this point: + * -m <= a < 2*m + * -m <= b < 2*m + * (this is a case of Montgomery reduction) + * The top words of 'a' and 'b' may have a 32-th bit set. + * We want to add or subtract the modulus, as required. + */ + zint_finish_mod(a, len, m, (uint32_t)((uint64_t)cca >> 63)); + zint_finish_mod(b, len, m, (uint32_t)((uint64_t)ccb >> 63)); +} + +/* + * Compute a GCD between two positive big integers x and y. The two + * integers must be odd. Returned value is 1 if the GCD is 1, 0 + * otherwise. When 1 is returned, arrays u and v are filled with values + * such that: + * 0 <= u <= y + * 0 <= v <= x + * x*u - y*v = 1 + * x[] and y[] are unmodified. Both input values must have the same + * encoded length. Temporary array must be large enough to accommodate 4 + * extra values of that length. Arrays u, v and tmp may not overlap with + * each other, or with either x or y. + */ +static int +zint_bezout(uint32_t *u, uint32_t *v, + const uint32_t *x, const uint32_t *y, + size_t len, uint32_t *tmp) { + /* + * Algorithm is an extended binary GCD. We maintain 6 values + * a, b, u0, u1, v0 and v1 with the following invariants: + * + * a = x*u0 - y*v0 + * b = x*u1 - y*v1 + * 0 <= a <= x + * 0 <= b <= y + * 0 <= u0 < y + * 0 <= v0 < x + * 0 <= u1 <= y + * 0 <= v1 < x + * + * Initial values are: + * + * a = x u0 = 1 v0 = 0 + * b = y u1 = y v1 = x-1 + * + * Each iteration reduces either a or b, and maintains the + * invariants. Algorithm stops when a = b, at which point their + * common value is GCD(a,b) and (u0,v0) (or (u1,v1)) contains + * the values (u,v) we want to return. + * + * The formal definition of the algorithm is a sequence of steps: + * + * - If a is even, then: + * a <- a/2 + * u0 <- u0/2 mod y + * v0 <- v0/2 mod x + * + * - Otherwise, if b is even, then: + * b <- b/2 + * u1 <- u1/2 mod y + * v1 <- v1/2 mod x + * + * - Otherwise, if a > b, then: + * a <- (a-b)/2 + * u0 <- (u0-u1)/2 mod y + * v0 <- (v0-v1)/2 mod x + * + * - Otherwise: + * b <- (b-a)/2 + * u1 <- (u1-u0)/2 mod y + * v1 <- (v1-v0)/2 mod y + * + * We can show that the operations above preserve the invariants: + * + * - If a is even, then u0 and v0 are either both even or both + * odd (since a = x*u0 - y*v0, and x and y are both odd). + * If u0 and v0 are both even, then (u0,v0) <- (u0/2,v0/2). + * Otherwise, (u0,v0) <- ((u0+y)/2,(v0+x)/2). Either way, + * the a = x*u0 - y*v0 invariant is preserved. + * + * - The same holds for the case where b is even. + * + * - If a and b are odd, and a > b, then: + * + * a-b = x*(u0-u1) - y*(v0-v1) + * + * In that situation, if u0 < u1, then x*(u0-u1) < 0, but + * a-b > 0; therefore, it must be that v0 < v1, and the + * first part of the update is: (u0,v0) <- (u0-u1+y,v0-v1+x), + * which preserves the invariants. Otherwise, if u0 > u1, + * then u0-u1 >= 1, thus x*(u0-u1) >= x. But a <= x and + * b >= 0, hence a-b <= x. It follows that, in that case, + * v0-v1 >= 0. The first part of the update is then: + * (u0,v0) <- (u0-u1,v0-v1), which again preserves the + * invariants. + * + * Either way, once the subtraction is done, the new value of + * a, which is the difference of two odd values, is even, + * and the remaining of this step is a subcase of the + * first algorithm case (i.e. when a is even). + * + * - If a and b are odd, and b > a, then the a similar + * argument holds. + * + * The values a and b start at x and y, respectively. Since x + * and y are odd, their GCD is odd, and it is easily seen that + * all steps conserve the GCD (GCD(a-b,b) = GCD(a, b); + * GCD(a/2,b) = GCD(a,b) if GCD(a,b) is odd). Moreover, either a + * or b is reduced by at least one bit at each iteration, so + * the algorithm necessarily converges on the case a = b, at + * which point the common value is the GCD. + * + * In the algorithm expressed above, when a = b, the fourth case + * applies, and sets b = 0. Since a contains the GCD of x and y, + * which are both odd, a must be odd, and subsequent iterations + * (if any) will simply divide b by 2 repeatedly, which has no + * consequence. Thus, the algorithm can run for more iterations + * than necessary; the final GCD will be in a, and the (u,v) + * coefficients will be (u0,v0). + * + * + * The presentation above is bit-by-bit. It can be sped up by + * noticing that all decisions are taken based on the low bits + * and high bits of a and b. We can extract the two top words + * and low word of each of a and b, and compute reduction + * parameters pa, pb, qa and qb such that the new values for + * a and b are: + * a' = (a*pa + b*pb) / (2^31) + * b' = (a*qa + b*qb) / (2^31) + * the two divisions being exact. The coefficients are obtained + * just from the extracted words, and may be slightly off, requiring + * an optional correction: if a' < 0, then we replace pa with -pa + * and pb with -pb. Each such step will reduce the total length + * (sum of lengths of a and b) by at least 30 bits at each + * iteration. + */ + uint32_t *u0, *u1, *v0, *v1, *a, *b; + uint32_t x0i, y0i; + uint32_t num, rc; + size_t j; + + if (len == 0) { + return 0; + } + + /* + * u0 and v0 are the u and v result buffers; the four other + * values (u1, v1, a and b) are taken from tmp[]. + */ + u0 = u; + v0 = v; + u1 = tmp; + v1 = u1 + len; + a = v1 + len; + b = a + len; + + /* + * We'll need the Montgomery reduction coefficients. + */ + x0i = modp_ninv31(x[0]); + y0i = modp_ninv31(y[0]); + + /* + * Initialize a, b, u0, u1, v0 and v1. + * a = x u0 = 1 v0 = 0 + * b = y u1 = y v1 = x-1 + * Note that x is odd, so computing x-1 is easy. + */ + memcpy(a, x, len * sizeof * x); + memcpy(b, y, len * sizeof * y); + u0[0] = 1; + memset(u0 + 1, 0, (len - 1) * sizeof * u0); + memset(v0, 0, len * sizeof * v0); + memcpy(u1, y, len * sizeof * u1); + memcpy(v1, x, len * sizeof * v1); + v1[0] --; + + /* + * Each input operand may be as large as 31*len bits, and we + * reduce the total length by at least 30 bits at each iteration. + */ + for (num = 62 * (uint32_t)len + 30; num >= 30; num -= 30) { + uint32_t c0, c1; + uint32_t a0, a1, b0, b1; + uint64_t a_hi, b_hi; + uint32_t a_lo, b_lo; + int64_t pa, pb, qa, qb; + int i; + uint32_t r; + + /* + * Extract the top words of a and b. If j is the highest + * index >= 1 such that a[j] != 0 or b[j] != 0, then we + * want (a[j] << 31) + a[j-1] and (b[j] << 31) + b[j-1]. + * If a and b are down to one word each, then we use + * a[0] and b[0]. + */ + c0 = (uint32_t) -1; + c1 = (uint32_t) -1; + a0 = 0; + a1 = 0; + b0 = 0; + b1 = 0; + j = len; + while (j -- > 0) { + uint32_t aw, bw; + + aw = a[j]; + bw = b[j]; + a0 ^= (a0 ^ aw) & c0; + a1 ^= (a1 ^ aw) & c1; + b0 ^= (b0 ^ bw) & c0; + b1 ^= (b1 ^ bw) & c1; + c1 = c0; + c0 &= (((aw | bw) + 0x7FFFFFFF) >> 31) - (uint32_t)1; + } + + /* + * If c1 = 0, then we grabbed two words for a and b. + * If c1 != 0 but c0 = 0, then we grabbed one word. It + * is not possible that c1 != 0 and c0 != 0, because that + * would mean that both integers are zero. + */ + a1 |= a0 & c1; + a0 &= ~c1; + b1 |= b0 & c1; + b0 &= ~c1; + a_hi = ((uint64_t)a0 << 31) + a1; + b_hi = ((uint64_t)b0 << 31) + b1; + a_lo = a[0]; + b_lo = b[0]; + + /* + * Compute reduction factors: + * + * a' = a*pa + b*pb + * b' = a*qa + b*qb + * + * such that a' and b' are both multiple of 2^31, but are + * only marginally larger than a and b. + */ + pa = 1; + pb = 0; + qa = 0; + qb = 1; + for (i = 0; i < 31; i ++) { + /* + * At each iteration: + * + * a <- (a-b)/2 if: a is odd, b is odd, a_hi > b_hi + * b <- (b-a)/2 if: a is odd, b is odd, a_hi <= b_hi + * a <- a/2 if: a is even + * b <- b/2 if: a is odd, b is even + * + * We multiply a_lo and b_lo by 2 at each + * iteration, thus a division by 2 really is a + * non-multiplication by 2. + */ + uint32_t rt, oa, ob, cAB, cBA, cA; + uint64_t rz; + + /* + * rt = 1 if a_hi > b_hi, 0 otherwise. + */ + rz = b_hi - a_hi; + rt = (uint32_t)((rz ^ ((a_hi ^ b_hi) + & (a_hi ^ rz))) >> 63); + + /* + * cAB = 1 if b must be subtracted from a + * cBA = 1 if a must be subtracted from b + * cA = 1 if a must be divided by 2 + * + * Rules: + * + * cAB and cBA cannot both be 1. + * If a is not divided by 2, b is. + */ + oa = (a_lo >> i) & 1; + ob = (b_lo >> i) & 1; + cAB = oa & ob & rt; + cBA = oa & ob & ~rt; + cA = cAB | (oa ^ 1); + + /* + * Conditional subtractions. + */ + a_lo -= b_lo & -cAB; + a_hi -= b_hi & -(uint64_t)cAB; + pa -= qa & -(int64_t)cAB; + pb -= qb & -(int64_t)cAB; + b_lo -= a_lo & -cBA; + b_hi -= a_hi & -(uint64_t)cBA; + qa -= pa & -(int64_t)cBA; + qb -= pb & -(int64_t)cBA; + + /* + * Shifting. + */ + a_lo += a_lo & (cA - 1); + pa += pa & ((int64_t)cA - 1); + pb += pb & ((int64_t)cA - 1); + a_hi ^= (a_hi ^ (a_hi >> 1)) & -(uint64_t)cA; + b_lo += b_lo & -cA; + qa += qa & -(int64_t)cA; + qb += qb & -(int64_t)cA; + b_hi ^= (b_hi ^ (b_hi >> 1)) & ((uint64_t)cA - 1); + } + + /* + * Apply the computed parameters to our values. We + * may have to correct pa and pb depending on the + * returned value of zint_co_reduce() (when a and/or b + * had to be negated). + */ + r = zint_co_reduce(a, b, len, pa, pb, qa, qb); + pa -= (pa + pa) & -(int64_t)(r & 1); + pb -= (pb + pb) & -(int64_t)(r & 1); + qa -= (qa + qa) & -(int64_t)(r >> 1); + qb -= (qb + qb) & -(int64_t)(r >> 1); + zint_co_reduce_mod(u0, u1, y, len, y0i, pa, pb, qa, qb); + zint_co_reduce_mod(v0, v1, x, len, x0i, pa, pb, qa, qb); + } + + /* + * At that point, array a[] should contain the GCD, and the + * results (u,v) should already be set. We check that the GCD + * is indeed 1. We also check that the two operands x and y + * are odd. + */ + rc = a[0] ^ 1; + for (j = 1; j < len; j ++) { + rc |= a[j]; + } + return (int)((1 - ((rc | -rc) >> 31)) & x[0] & y[0]); +} + +/* + * Add k*y*2^sc to x. The result is assumed to fit in the array of + * size xlen (truncation is applied if necessary). + * Scale factor 'sc' is provided as sch and scl, such that: + * sch = sc / 31 + * scl = sc % 31 + * xlen MUST NOT be lower than ylen. + * + * x[] and y[] are both signed integers, using two's complement for + * negative values. + */ +static void +zint_add_scaled_mul_small(uint32_t *x, size_t xlen, + const uint32_t *y, size_t ylen, int32_t k, + uint32_t sch, uint32_t scl) { + size_t u; + uint32_t ysign, tw; + int32_t cc; + + if (ylen == 0) { + return; + } + + ysign = -(y[ylen - 1] >> 30) >> 1; + tw = 0; + cc = 0; + for (u = sch; u < xlen; u ++) { + size_t v; + uint32_t wy, wys, ccu; + uint64_t z; + + /* + * Get the next word of y (scaled). + */ + v = u - sch; + if (v < ylen) { + wy = y[v]; + } else { + wy = ysign; + } + wys = ((wy << scl) & 0x7FFFFFFF) | tw; + tw = wy >> (31 - scl); + + /* + * The expression below does not overflow. + */ + z = (uint64_t)((int64_t)wys * (int64_t)k + (int64_t)x[u] + cc); + x[u] = (uint32_t)z & 0x7FFFFFFF; + + /* + * Right-shifting the signed value z would yield + * implementation-defined results (arithmetic shift is + * not guaranteed). However, we can cast to unsigned, + * and get the next carry as an unsigned word. We can + * then convert it back to signed by using the guaranteed + * fact that 'int32_t' uses two's complement with no + * trap representation or padding bit, and with a layout + * compatible with that of 'uint32_t'. + */ + ccu = (uint32_t)(z >> 31); + cc = *(int32_t *)&ccu; + } +} + +/* + * Subtract y*2^sc from x. The result is assumed to fit in the array of + * size xlen (truncation is applied if necessary). + * Scale factor 'sc' is provided as sch and scl, such that: + * sch = sc / 31 + * scl = sc % 31 + * xlen MUST NOT be lower than ylen. + * + * x[] and y[] are both signed integers, using two's complement for + * negative values. + */ +static void +zint_sub_scaled(uint32_t *x, size_t xlen, + const uint32_t *y, size_t ylen, uint32_t sch, uint32_t scl) { + size_t u; + uint32_t ysign, tw; + uint32_t cc; + + if (ylen == 0) { + return; + } + + ysign = -(y[ylen - 1] >> 30) >> 1; + tw = 0; + cc = 0; + for (u = sch; u < xlen; u ++) { + size_t v; + uint32_t w, wy, wys; + + /* + * Get the next word of y (scaled). + */ + v = u - sch; + if (v < ylen) { + wy = y[v]; + } else { + wy = ysign; + } + wys = ((wy << scl) & 0x7FFFFFFF) | tw; + tw = wy >> (31 - scl); + + w = x[u] - wys - cc; + x[u] = w & 0x7FFFFFFF; + cc = w >> 31; + } +} + +/* + * Convert a one-word signed big integer into a signed value. + */ +static inline int32_t +zint_one_to_plain(const uint32_t *x) { + uint32_t w; + + w = x[0]; + w |= (w & 0x40000000) << 1; + return *(int32_t *)&w; +} + +/* ==================================================================== */ + +/* + * Convert a polynomial to floating-point values. + * + * Each coefficient has length flen words, and starts fstride words after + * the previous. + * + * IEEE-754 binary64 values can represent values in a finite range, + * roughly 2^(-1023) to 2^(+1023); thus, if coefficients are too large, + * they should be "trimmed" by pointing not to the lowest word of each, + * but upper. + */ +static void +poly_big_to_fp(fpr *d, const uint32_t *f, size_t flen, size_t fstride, + unsigned logn) { + size_t n, u; + + n = MKN(logn); + if (flen == 0) { + for (u = 0; u < n; u ++) { + d[u] = fpr_zero; + } + return; + } + for (u = 0; u < n; u ++, f += fstride) { + size_t v; + uint32_t neg, cc, xm; + fpr x, fsc; + + /* + * Get sign of the integer; if it is negative, then we + * will load its absolute value instead, and negate the + * result. + */ + neg = -(f[flen - 1] >> 30); + xm = neg >> 1; + cc = neg & 1; + x = fpr_zero; + fsc = fpr_one; + for (v = 0; v < flen; v ++, fsc = fpr_mul(fsc, fpr_ptwo31)) { + uint32_t w; + + w = (f[v] ^ xm) + cc; + cc = w >> 31; + w &= 0x7FFFFFFF; + w -= (w << 1) & neg; + x = fpr_add(x, fpr_mul(fpr_of(*(int32_t *)&w), fsc)); + } + d[u] = x; + } +} + +/* + * Convert a polynomial to small integers. Source values are supposed + * to be one-word integers, signed over 31 bits. Returned value is 0 + * if any of the coefficients exceeds the provided limit (in absolute + * value), or 1 on success. + * + * This is not constant-time; this is not a problem here, because on + * any failure, the NTRU-solving process will be deemed to have failed + * and the (f,g) polynomials will be discarded. + */ +static int +poly_big_to_small(int8_t *d, const uint32_t *s, int lim, unsigned logn) { + size_t n, u; + + n = MKN(logn); + for (u = 0; u < n; u ++) { + int32_t z; + + z = zint_one_to_plain(s + u); + if (z < -lim || z > lim) { + return 0; + } + d[u] = (int8_t)z; + } + return 1; +} + +/* + * Subtract k*f from F, where F, f and k are polynomials modulo X^N+1. + * Coefficients of polynomial k are small integers (signed values in the + * -2^31..2^31 range) scaled by 2^sc. Value sc is provided as sch = sc / 31 + * and scl = sc % 31. + * + * This function implements the basic quadratic multiplication algorithm, + * which is efficient in space (no extra buffer needed) but slow at + * high degree. + */ +static void +poly_sub_scaled(uint32_t *F, size_t Flen, size_t Fstride, + const uint32_t *f, size_t flen, size_t fstride, + const int32_t *k, uint32_t sch, uint32_t scl, unsigned logn) { + size_t n, u; + + n = MKN(logn); + for (u = 0; u < n; u ++) { + int32_t kf; + size_t v; + uint32_t *x; + const uint32_t *y; + + kf = -k[u]; + x = F + u * Fstride; + y = f; + for (v = 0; v < n; v ++) { + zint_add_scaled_mul_small( + x, Flen, y, flen, kf, sch, scl); + if (u + v == n - 1) { + x = F; + kf = -kf; + } else { + x += Fstride; + } + y += fstride; + } + } +} + +/* + * Subtract k*f from F. Coefficients of polynomial k are small integers + * (signed values in the -2^31..2^31 range) scaled by 2^sc. This function + * assumes that the degree is large, and integers relatively small. + * The value sc is provided as sch = sc / 31 and scl = sc % 31. + */ +static void +poly_sub_scaled_ntt(uint32_t *F, size_t Flen, size_t Fstride, + const uint32_t *f, size_t flen, size_t fstride, + const int32_t *k, uint32_t sch, uint32_t scl, unsigned logn, + uint32_t *tmp) { + uint32_t *gm, *igm, *fk, *t1, *x; + const uint32_t *y; + size_t n, u, tlen; + const small_prime *primes; + + n = MKN(logn); + tlen = flen + 1; + gm = tmp; + igm = gm + MKN(logn); + fk = igm + MKN(logn); + t1 = fk + n * tlen; + + primes = PRIMES; + + /* + * Compute k*f in fk[], in RNS notation. + */ + for (u = 0; u < tlen; u ++) { + uint32_t p, p0i, R2, Rx; + size_t v; + + p = primes[u].p; + p0i = modp_ninv31(p); + R2 = modp_R2(p, p0i); + Rx = modp_Rx((unsigned)flen, p, p0i, R2); + modp_mkgm2(gm, igm, logn, primes[u].g, p, p0i); + + for (v = 0; v < n; v ++) { + t1[v] = modp_set(k[v], p); + } + modp_NTT2(t1, gm, logn, p, p0i); + for (v = 0, y = f, x = fk + u; + v < n; v ++, y += fstride, x += tlen) { + *x = zint_mod_small_signed(y, flen, p, p0i, R2, Rx); + } + modp_NTT2_ext(fk + u, tlen, gm, logn, p, p0i); + for (v = 0, x = fk + u; v < n; v ++, x += tlen) { + *x = modp_montymul( + modp_montymul(t1[v], *x, p, p0i), R2, p, p0i); + } + modp_iNTT2_ext(fk + u, tlen, igm, logn, p, p0i); + } + + /* + * Rebuild k*f. + */ + zint_rebuild_CRT(fk, tlen, tlen, n, primes, 1, t1); + + /* + * Subtract k*f, scaled, from F. + */ + for (u = 0, x = F, y = fk; u < n; u ++, x += Fstride, y += tlen) { + zint_sub_scaled(x, Flen, y, tlen, sch, scl); + } +} + +/* ==================================================================== */ + + +#define RNG_CONTEXT inner_shake256_context + +/* + * Get a random 8-byte integer from a SHAKE-based RNG. This function + * ensures consistent interpretation of the SHAKE output so that + * the same values will be obtained over different platforms, in case + * a known seed is used. + */ +static inline uint64_t +get_rng_u64(inner_shake256_context *rng) { + /* + * We enforce little-endian representation. + */ + + uint8_t tmp[8]; + + inner_shake256_extract(rng, tmp, sizeof tmp); + return (uint64_t)tmp[0] + | ((uint64_t)tmp[1] << 8) + | ((uint64_t)tmp[2] << 16) + | ((uint64_t)tmp[3] << 24) + | ((uint64_t)tmp[4] << 32) + | ((uint64_t)tmp[5] << 40) + | ((uint64_t)tmp[6] << 48) + | ((uint64_t)tmp[7] << 56); +} + +/* + * Table below incarnates a discrete Gaussian distribution: + * D(x) = exp(-(x^2)/(2*sigma^2)) + * where sigma = 1.17*sqrt(q/(2*N)), q = 12289, and N = 1024. + * Element 0 of the table is P(x = 0). + * For k > 0, element k is P(x >= k+1 | x > 0). + * Probabilities are scaled up by 2^63. + */ +static const uint64_t gauss_1024_12289[] = { + 1283868770400643928u, 6416574995475331444u, 4078260278032692663u, + 2353523259288686585u, 1227179971273316331u, 575931623374121527u, + 242543240509105209u, 91437049221049666u, 30799446349977173u, + 9255276791179340u, 2478152334826140u, 590642893610164u, + 125206034929641u, 23590435911403u, 3948334035941u, + 586753615614u, 77391054539u, 9056793210u, + 940121950u, 86539696u, 7062824u, + 510971u, 32764u, 1862u, + 94u, 4u, 0u +}; + +/* + * Generate a random value with a Gaussian distribution centered on 0. + * The RNG must be ready for extraction (already flipped). + * + * Distribution has standard deviation 1.17*sqrt(q/(2*N)). The + * precomputed table is for N = 1024. Since the sum of two independent + * values of standard deviation sigma has standard deviation + * sigma*sqrt(2), then we can just generate more values and add them + * together for lower dimensions. + */ +static int +mkgauss(RNG_CONTEXT *rng, unsigned logn) { + unsigned u, g; + int val; + + g = 1U << (10 - logn); + val = 0; + for (u = 0; u < g; u ++) { + /* + * Each iteration generates one value with the + * Gaussian distribution for N = 1024. + * + * We use two random 64-bit values. First value + * decides on whether the generated value is 0, and, + * if not, the sign of the value. Second random 64-bit + * word is used to generate the non-zero value. + * + * For constant-time code we have to read the complete + * table. This has negligible cost, compared with the + * remainder of the keygen process (solving the NTRU + * equation). + */ + uint64_t r; + uint32_t f, v, k, neg; + + /* + * First value: + * - flag 'neg' is randomly selected to be 0 or 1. + * - flag 'f' is set to 1 if the generated value is zero, + * or set to 0 otherwise. + */ + r = get_rng_u64(rng); + neg = (uint32_t)(r >> 63); + r &= ~((uint64_t)1 << 63); + f = (uint32_t)((r - gauss_1024_12289[0]) >> 63); + + /* + * We produce a new random 63-bit integer r, and go over + * the array, starting at index 1. We store in v the + * index of the first array element which is not greater + * than r, unless the flag f was already 1. + */ + v = 0; + r = get_rng_u64(rng); + r &= ~((uint64_t)1 << 63); + for (k = 1; k < (uint32_t)((sizeof gauss_1024_12289) + / (sizeof gauss_1024_12289[0])); k ++) { + uint32_t t; + + t = (uint32_t)((r - gauss_1024_12289[k]) >> 63) ^ 1; + v |= k & -(t & (f ^ 1)); + f |= t; + } + + /* + * We apply the sign ('neg' flag). If the value is zero, + * the sign has no effect. + */ + v = (v ^ -neg) + neg; + + /* + * Generated value is added to val. + */ + val += *(int32_t *)&v; + } + return val; +} + +/* + * The MAX_BL_SMALL[] and MAX_BL_LARGE[] contain the lengths, in 31-bit + * words, of intermediate values in the computation: + * + * MAX_BL_SMALL[depth]: length for the input f and g at that depth + * MAX_BL_LARGE[depth]: length for the unreduced F and G at that depth + * + * Rules: + * + * - Within an array, values grow. + * + * - The 'SMALL' array must have an entry for maximum depth, corresponding + * to the size of values used in the binary GCD. There is no such value + * for the 'LARGE' array (the binary GCD yields already reduced + * coefficients). + * + * - MAX_BL_LARGE[depth] >= MAX_BL_SMALL[depth + 1]. + * + * - Values must be large enough to handle the common cases, with some + * margins. + * + * - Values must not be "too large" either because we will convert some + * integers into floating-point values by considering the top 10 words, + * i.e. 310 bits; hence, for values of length more than 10 words, we + * should take care to have the length centered on the expected size. + * + * The following average lengths, in bits, have been measured on thousands + * of random keys (fg = max length of the absolute value of coefficients + * of f and g at that depth; FG = idem for the unreduced F and G; for the + * maximum depth, F and G are the output of binary GCD, multiplied by q; + * for each value, the average and standard deviation are provided). + * + * Binary case: + * depth: 10 fg: 6307.52 (24.48) FG: 6319.66 (24.51) + * depth: 9 fg: 3138.35 (12.25) FG: 9403.29 (27.55) + * depth: 8 fg: 1576.87 ( 7.49) FG: 4703.30 (14.77) + * depth: 7 fg: 794.17 ( 4.98) FG: 2361.84 ( 9.31) + * depth: 6 fg: 400.67 ( 3.10) FG: 1188.68 ( 6.04) + * depth: 5 fg: 202.22 ( 1.87) FG: 599.81 ( 3.87) + * depth: 4 fg: 101.62 ( 1.02) FG: 303.49 ( 2.38) + * depth: 3 fg: 50.37 ( 0.53) FG: 153.65 ( 1.39) + * depth: 2 fg: 24.07 ( 0.25) FG: 78.20 ( 0.73) + * depth: 1 fg: 10.99 ( 0.08) FG: 39.82 ( 0.41) + * depth: 0 fg: 4.00 ( 0.00) FG: 19.61 ( 0.49) + * + * Integers are actually represented either in binary notation over + * 31-bit words (signed, using two's complement), or in RNS, modulo + * many small primes. These small primes are close to, but slightly + * lower than, 2^31. Use of RNS loses less than two bits, even for + * the largest values. + * + * IMPORTANT: if these values are modified, then the temporary buffer + * sizes (FALCON_KEYGEN_TEMP_*, in inner.h) must be recomputed + * accordingly. + */ + +static const size_t MAX_BL_SMALL[] = { + 1, 1, 2, 2, 4, 7, 14, 27, 53, 106, 209 +}; + +static const size_t MAX_BL_LARGE[] = { + 2, 2, 5, 7, 12, 21, 40, 78, 157, 308 +}; + +/* + * Average and standard deviation for the maximum size (in bits) of + * coefficients of (f,g), depending on depth. These values are used + * to compute bounds for Babai's reduction. + */ +static const struct { + int avg; + int std; +} BITLENGTH[] = { + { 4, 0 }, + { 11, 1 }, + { 24, 1 }, + { 50, 1 }, + { 102, 1 }, + { 202, 2 }, + { 401, 4 }, + { 794, 5 }, + { 1577, 8 }, + { 3138, 13 }, + { 6308, 25 } +}; + +/* + * Minimal recursion depth at which we rebuild intermediate values + * when reconstructing f and g. + */ +#define DEPTH_INT_FG 4 + +/* + * Compute squared norm of a short vector. Returned value is saturated to + * 2^32-1 if it is not lower than 2^31. + */ +static uint32_t +poly_small_sqnorm(const int8_t *f, unsigned logn) { + size_t n, u; + uint32_t s, ng; + + n = MKN(logn); + s = 0; + ng = 0; + for (u = 0; u < n; u ++) { + int32_t z; + + z = f[u]; + s += (uint32_t)(z * z); + ng |= s; + } + return s | -(ng >> 31); +} + +/* + * Align (upwards) the provided 'data' pointer with regards to 'base' + * so that the offset is a multiple of the size of 'fpr'. + */ +static fpr * +align_fpr(void *base, void *data) { + uint8_t *cb, *cd; + size_t k, km; + + cb = base; + cd = data; + k = (size_t)(cd - cb); + km = k % sizeof(fpr); + if (km) { + k += (sizeof(fpr)) - km; + } + return (fpr *)(cb + k); +} + +/* + * Align (upwards) the provided 'data' pointer with regards to 'base' + * so that the offset is a multiple of the size of 'uint32_t'. + */ +static uint32_t * +align_u32(void *base, void *data) { + uint8_t *cb, *cd; + size_t k, km; + + cb = base; + cd = data; + k = (size_t)(cd - cb); + km = k % sizeof(uint32_t); + if (km) { + k += (sizeof(uint32_t)) - km; + } + return (uint32_t *)(cb + k); +} + +/* + * Convert a small vector to floating point. + */ +static void +poly_small_to_fp(fpr *x, const int8_t *f, unsigned logn) { + size_t n, u; + + n = MKN(logn); + for (u = 0; u < n; u ++) { + x[u] = fpr_of(f[u]); + } +} + +/* + * Input: f,g of degree N = 2^logn; 'depth' is used only to get their + * individual length. + * + * Output: f',g' of degree N/2, with the length for 'depth+1'. + * + * Values are in RNS; input and/or output may also be in NTT. + */ +static void +make_fg_step(uint32_t *data, unsigned logn, unsigned depth, + int in_ntt, int out_ntt) { + size_t n, hn, u; + size_t slen, tlen; + uint32_t *fd, *gd, *fs, *gs, *gm, *igm, *t1; + const small_prime *primes; + + n = (size_t)1 << logn; + hn = n >> 1; + slen = MAX_BL_SMALL[depth]; + tlen = MAX_BL_SMALL[depth + 1]; + primes = PRIMES; + + /* + * Prepare room for the result. + */ + fd = data; + gd = fd + hn * tlen; + fs = gd + hn * tlen; + gs = fs + n * slen; + gm = gs + n * slen; + igm = gm + n; + t1 = igm + n; + memmove(fs, data, 2 * n * slen * sizeof * data); + + /* + * First slen words: we use the input values directly, and apply + * inverse NTT as we go. + */ + for (u = 0; u < slen; u ++) { + uint32_t p, p0i, R2; + size_t v; + uint32_t *x; + + p = primes[u].p; + p0i = modp_ninv31(p); + R2 = modp_R2(p, p0i); + modp_mkgm2(gm, igm, logn, primes[u].g, p, p0i); + + for (v = 0, x = fs + u; v < n; v ++, x += slen) { + t1[v] = *x; + } + if (!in_ntt) { + modp_NTT2(t1, gm, logn, p, p0i); + } + for (v = 0, x = fd + u; v < hn; v ++, x += tlen) { + uint32_t w0, w1; + + w0 = t1[(v << 1) + 0]; + w1 = t1[(v << 1) + 1]; + *x = modp_montymul( + modp_montymul(w0, w1, p, p0i), R2, p, p0i); + } + if (in_ntt) { + modp_iNTT2_ext(fs + u, slen, igm, logn, p, p0i); + } + + for (v = 0, x = gs + u; v < n; v ++, x += slen) { + t1[v] = *x; + } + if (!in_ntt) { + modp_NTT2(t1, gm, logn, p, p0i); + } + for (v = 0, x = gd + u; v < hn; v ++, x += tlen) { + uint32_t w0, w1; + + w0 = t1[(v << 1) + 0]; + w1 = t1[(v << 1) + 1]; + *x = modp_montymul( + modp_montymul(w0, w1, p, p0i), R2, p, p0i); + } + if (in_ntt) { + modp_iNTT2_ext(gs + u, slen, igm, logn, p, p0i); + } + + if (!out_ntt) { + modp_iNTT2_ext(fd + u, tlen, igm, logn - 1, p, p0i); + modp_iNTT2_ext(gd + u, tlen, igm, logn - 1, p, p0i); + } + } + + /* + * Since the fs and gs words have been de-NTTized, we can use the + * CRT to rebuild the values. + */ + zint_rebuild_CRT(fs, slen, slen, n, primes, 1, gm); + zint_rebuild_CRT(gs, slen, slen, n, primes, 1, gm); + + /* + * Remaining words: use modular reductions to extract the values. + */ + for (u = slen; u < tlen; u ++) { + uint32_t p, p0i, R2, Rx; + size_t v; + uint32_t *x; + + p = primes[u].p; + p0i = modp_ninv31(p); + R2 = modp_R2(p, p0i); + Rx = modp_Rx((unsigned)slen, p, p0i, R2); + modp_mkgm2(gm, igm, logn, primes[u].g, p, p0i); + for (v = 0, x = fs; v < n; v ++, x += slen) { + t1[v] = zint_mod_small_signed(x, slen, p, p0i, R2, Rx); + } + modp_NTT2(t1, gm, logn, p, p0i); + for (v = 0, x = fd + u; v < hn; v ++, x += tlen) { + uint32_t w0, w1; + + w0 = t1[(v << 1) + 0]; + w1 = t1[(v << 1) + 1]; + *x = modp_montymul( + modp_montymul(w0, w1, p, p0i), R2, p, p0i); + } + for (v = 0, x = gs; v < n; v ++, x += slen) { + t1[v] = zint_mod_small_signed(x, slen, p, p0i, R2, Rx); + } + modp_NTT2(t1, gm, logn, p, p0i); + for (v = 0, x = gd + u; v < hn; v ++, x += tlen) { + uint32_t w0, w1; + + w0 = t1[(v << 1) + 0]; + w1 = t1[(v << 1) + 1]; + *x = modp_montymul( + modp_montymul(w0, w1, p, p0i), R2, p, p0i); + } + + if (!out_ntt) { + modp_iNTT2_ext(fd + u, tlen, igm, logn - 1, p, p0i); + modp_iNTT2_ext(gd + u, tlen, igm, logn - 1, p, p0i); + } + } +} + +/* + * Compute f and g at a specific depth, in RNS notation. + * + * Returned values are stored in the data[] array, at slen words per integer. + * + * Conditions: + * 0 <= depth <= logn + * + * Space use in data[]: enough room for any two successive values (f', g', + * f and g). + */ +static void +make_fg(uint32_t *data, const int8_t *f, const int8_t *g, + unsigned logn, unsigned depth, int out_ntt) { + size_t n, u; + uint32_t *ft, *gt, p0; + unsigned d; + const small_prime *primes; + + n = MKN(logn); + ft = data; + gt = ft + n; + primes = PRIMES; + p0 = primes[0].p; + for (u = 0; u < n; u ++) { + ft[u] = modp_set(f[u], p0); + gt[u] = modp_set(g[u], p0); + } + + if (depth == 0 && out_ntt) { + uint32_t *gm, *igm; + uint32_t p, p0i; + + p = primes[0].p; + p0i = modp_ninv31(p); + gm = gt + n; + igm = gm + MKN(logn); + modp_mkgm2(gm, igm, logn, primes[0].g, p, p0i); + modp_NTT2(ft, gm, logn, p, p0i); + modp_NTT2(gt, gm, logn, p, p0i); + return; + } + + if (depth == 0) { + return; + } + if (depth == 1) { + make_fg_step(data, logn, 0, 0, out_ntt); + return; + } + make_fg_step(data, logn, 0, 0, 1); + for (d = 1; d + 1 < depth; d ++) { + make_fg_step(data, logn - d, d, 1, 1); + } + make_fg_step(data, logn - depth + 1, depth - 1, 1, out_ntt); +} + +/* + * Solving the NTRU equation, deepest level: compute the resultants of + * f and g with X^N+1, and use binary GCD. The F and G values are + * returned in tmp[]. + * + * Returned value: 1 on success, 0 on error. + */ +static int +solve_NTRU_deepest(unsigned logn_top, + const int8_t *f, const int8_t *g, uint32_t *tmp) { + size_t len; + uint32_t *Fp, *Gp, *fp, *gp, *t1, q; + const small_prime *primes; + + len = MAX_BL_SMALL[logn_top]; + primes = PRIMES; + + Fp = tmp; + Gp = Fp + len; + fp = Gp + len; + gp = fp + len; + t1 = gp + len; + + make_fg(fp, f, g, logn_top, logn_top, 0); + + /* + * We use the CRT to rebuild the resultants as big integers. + * There are two such big integers. The resultants are always + * nonnegative. + */ + zint_rebuild_CRT(fp, len, len, 2, primes, 0, t1); + + /* + * Apply the binary GCD. The zint_bezout() function works only + * if both inputs are odd. + * + * We can test on the result and return 0 because that would + * imply failure of the NTRU solving equation, and the (f,g) + * values will be abandoned in that case. + */ + if (!zint_bezout(Gp, Fp, fp, gp, len, t1)) { + return 0; + } + + /* + * Multiply the two values by the target value q. Values must + * fit in the destination arrays. + * We can again test on the returned words: a non-zero output + * of zint_mul_small() means that we exceeded our array + * capacity, and that implies failure and rejection of (f,g). + */ + q = 12289; + if (zint_mul_small(Fp, len, q) != 0 + || zint_mul_small(Gp, len, q) != 0) { + return 0; + } + + return 1; +} + +/* + * Solving the NTRU equation, intermediate level. Upon entry, the F and G + * from the previous level should be in the tmp[] array. + * This function MAY be invoked for the top-level (in which case depth = 0). + * + * Returned value: 1 on success, 0 on error. + */ +static int +solve_NTRU_intermediate(unsigned logn_top, + const int8_t *f, const int8_t *g, unsigned depth, uint32_t *tmp) { + /* + * In this function, 'logn' is the log2 of the degree for + * this step. If N = 2^logn, then: + * - the F and G values already in fk->tmp (from the deeper + * levels) have degree N/2; + * - this function should return F and G of degree N. + */ + unsigned logn; + size_t n, hn, slen, dlen, llen, rlen, FGlen, u; + uint32_t *Fd, *Gd, *Ft, *Gt, *ft, *gt, *t1; + fpr *rt1, *rt2, *rt3, *rt4, *rt5; + int scale_fg, minbl_fg, maxbl_fg, maxbl_FG, scale_k; + uint32_t *x, *y; + int32_t *k; + const small_prime *primes; + + logn = logn_top - depth; + n = (size_t)1 << logn; + hn = n >> 1; + + /* + * slen = size for our input f and g; also size of the reduced + * F and G we return (degree N) + * + * dlen = size of the F and G obtained from the deeper level + * (degree N/2 or N/3) + * + * llen = size for intermediary F and G before reduction (degree N) + * + * We build our non-reduced F and G as two independent halves each, + * of degree N/2 (F = F0 + X*F1, G = G0 + X*G1). + */ + slen = MAX_BL_SMALL[depth]; + dlen = MAX_BL_SMALL[depth + 1]; + llen = MAX_BL_LARGE[depth]; + primes = PRIMES; + + /* + * Fd and Gd are the F and G from the deeper level. + */ + Fd = tmp; + Gd = Fd + dlen * hn; + + /* + * Compute the input f and g for this level. Note that we get f + * and g in RNS + NTT representation. + */ + ft = Gd + dlen * hn; + make_fg(ft, f, g, logn_top, depth, 1); + + /* + * Move the newly computed f and g to make room for our candidate + * F and G (unreduced). + */ + Ft = tmp; + Gt = Ft + n * llen; + t1 = Gt + n * llen; + memmove(t1, ft, 2 * n * slen * sizeof * ft); + ft = t1; + gt = ft + slen * n; + t1 = gt + slen * n; + + /* + * Move Fd and Gd _after_ f and g. + */ + memmove(t1, Fd, 2 * hn * dlen * sizeof * Fd); + Fd = t1; + Gd = Fd + hn * dlen; + + /* + * We reduce Fd and Gd modulo all the small primes we will need, + * and store the values in Ft and Gt (only n/2 values in each). + */ + for (u = 0; u < llen; u ++) { + uint32_t p, p0i, R2, Rx; + size_t v; + uint32_t *xs, *ys, *xd, *yd; + + p = primes[u].p; + p0i = modp_ninv31(p); + R2 = modp_R2(p, p0i); + Rx = modp_Rx((unsigned)dlen, p, p0i, R2); + for (v = 0, xs = Fd, ys = Gd, xd = Ft + u, yd = Gt + u; + v < hn; + v ++, xs += dlen, ys += dlen, xd += llen, yd += llen) { + *xd = zint_mod_small_signed(xs, dlen, p, p0i, R2, Rx); + *yd = zint_mod_small_signed(ys, dlen, p, p0i, R2, Rx); + } + } + + /* + * We do not need Fd and Gd after that point. + */ + + /* + * Compute our F and G modulo sufficiently many small primes. + */ + for (u = 0; u < llen; u ++) { + uint32_t p, p0i, R2; + uint32_t *gm, *igm, *fx, *gx, *Fp, *Gp; + size_t v; + + /* + * All computations are done modulo p. + */ + p = primes[u].p; + p0i = modp_ninv31(p); + R2 = modp_R2(p, p0i); + + /* + * If we processed slen words, then f and g have been + * de-NTTized, and are in RNS; we can rebuild them. + */ + if (u == slen) { + zint_rebuild_CRT(ft, slen, slen, n, primes, 1, t1); + zint_rebuild_CRT(gt, slen, slen, n, primes, 1, t1); + } + + gm = t1; + igm = gm + n; + fx = igm + n; + gx = fx + n; + + modp_mkgm2(gm, igm, logn, primes[u].g, p, p0i); + + if (u < slen) { + for (v = 0, x = ft + u, y = gt + u; + v < n; v ++, x += slen, y += slen) { + fx[v] = *x; + gx[v] = *y; + } + modp_iNTT2_ext(ft + u, slen, igm, logn, p, p0i); + modp_iNTT2_ext(gt + u, slen, igm, logn, p, p0i); + } else { + uint32_t Rx; + + Rx = modp_Rx((unsigned)slen, p, p0i, R2); + for (v = 0, x = ft, y = gt; + v < n; v ++, x += slen, y += slen) { + fx[v] = zint_mod_small_signed(x, slen, + p, p0i, R2, Rx); + gx[v] = zint_mod_small_signed(y, slen, + p, p0i, R2, Rx); + } + modp_NTT2(fx, gm, logn, p, p0i); + modp_NTT2(gx, gm, logn, p, p0i); + } + + /* + * Get F' and G' modulo p and in NTT representation + * (they have degree n/2). These values were computed in + * a previous step, and stored in Ft and Gt. + */ + Fp = gx + n; + Gp = Fp + hn; + for (v = 0, x = Ft + u, y = Gt + u; + v < hn; v ++, x += llen, y += llen) { + Fp[v] = *x; + Gp[v] = *y; + } + modp_NTT2(Fp, gm, logn - 1, p, p0i); + modp_NTT2(Gp, gm, logn - 1, p, p0i); + + /* + * Compute our F and G modulo p. + * + * General case: + * + * we divide degree by d = 2 or 3 + * f'(x^d) = N(f)(x^d) = f * adj(f) + * g'(x^d) = N(g)(x^d) = g * adj(g) + * f'*G' - g'*F' = q + * F = F'(x^d) * adj(g) + * G = G'(x^d) * adj(f) + * + * We compute things in the NTT. We group roots of phi + * such that all roots x in a group share the same x^d. + * If the roots in a group are x_1, x_2... x_d, then: + * + * N(f)(x_1^d) = f(x_1)*f(x_2)*...*f(x_d) + * + * Thus, we have: + * + * G(x_1) = f(x_2)*f(x_3)*...*f(x_d)*G'(x_1^d) + * G(x_2) = f(x_1)*f(x_3)*...*f(x_d)*G'(x_1^d) + * ... + * G(x_d) = f(x_1)*f(x_2)*...*f(x_{d-1})*G'(x_1^d) + * + * In all cases, we can thus compute F and G in NTT + * representation by a few simple multiplications. + * Moreover, in our chosen NTT representation, roots + * from the same group are consecutive in RAM. + */ + for (v = 0, x = Ft + u, y = Gt + u; v < hn; + v ++, x += (llen << 1), y += (llen << 1)) { + uint32_t ftA, ftB, gtA, gtB; + uint32_t mFp, mGp; + + ftA = fx[(v << 1) + 0]; + ftB = fx[(v << 1) + 1]; + gtA = gx[(v << 1) + 0]; + gtB = gx[(v << 1) + 1]; + mFp = modp_montymul(Fp[v], R2, p, p0i); + mGp = modp_montymul(Gp[v], R2, p, p0i); + x[0] = modp_montymul(gtB, mFp, p, p0i); + x[llen] = modp_montymul(gtA, mFp, p, p0i); + y[0] = modp_montymul(ftB, mGp, p, p0i); + y[llen] = modp_montymul(ftA, mGp, p, p0i); + } + modp_iNTT2_ext(Ft + u, llen, igm, logn, p, p0i); + modp_iNTT2_ext(Gt + u, llen, igm, logn, p, p0i); + } + + /* + * Rebuild F and G with the CRT. + */ + zint_rebuild_CRT(Ft, llen, llen, n, primes, 1, t1); + zint_rebuild_CRT(Gt, llen, llen, n, primes, 1, t1); + + /* + * At that point, Ft, Gt, ft and gt are consecutive in RAM (in that + * order). + */ + + /* + * Apply Babai reduction to bring back F and G to size slen. + * + * We use the FFT to compute successive approximations of the + * reduction coefficient. We first isolate the top bits of + * the coefficients of f and g, and convert them to floating + * point; with the FFT, we compute adj(f), adj(g), and + * 1/(f*adj(f)+g*adj(g)). + * + * Then, we repeatedly apply the following: + * + * - Get the top bits of the coefficients of F and G into + * floating point, and use the FFT to compute: + * (F*adj(f)+G*adj(g))/(f*adj(f)+g*adj(g)) + * + * - Convert back that value into normal representation, and + * round it to the nearest integers, yielding a polynomial k. + * Proper scaling is applied to f, g, F and G so that the + * coefficients fit on 32 bits (signed). + * + * - Subtract k*f from F and k*g from G. + * + * Under normal conditions, this process reduces the size of F + * and G by some bits at each iteration. For constant-time + * operation, we do not want to measure the actual length of + * F and G; instead, we do the following: + * + * - f and g are converted to floating-point, with some scaling + * if necessary to keep values in the representable range. + * + * - For each iteration, we _assume_ a maximum size for F and G, + * and use the values at that size. If we overreach, then + * we get zeros, which is harmless: the resulting coefficients + * of k will be 0 and the value won't be reduced. + * + * - We conservatively assume that F and G will be reduced by + * at least 25 bits at each iteration. + * + * Even when reaching the bottom of the reduction, reduction + * coefficient will remain low. If it goes out-of-range, then + * something wrong occurred and the whole NTRU solving fails. + */ + + /* + * Memory layout: + * - We need to compute and keep adj(f), adj(g), and + * 1/(f*adj(f)+g*adj(g)) (sizes N, N and N/2 fp numbers, + * respectively). + * - At each iteration we need two extra fp buffer (N fp values), + * and produce a k (N 32-bit words). k will be shared with one + * of the fp buffers. + * - To compute k*f and k*g efficiently (with the NTT), we need + * some extra room; we reuse the space of the temporary buffers. + * + * Arrays of 'fpr' are obtained from the temporary array itself. + * We ensure that the base is at a properly aligned offset (the + * source array tmp[] is supposed to be already aligned). + */ + + rt3 = align_fpr(tmp, t1); + rt4 = rt3 + n; + rt5 = rt4 + n; + rt1 = rt5 + (n >> 1); + k = (int32_t *)align_u32(tmp, rt1); + rt2 = align_fpr(tmp, k + n); + if (rt2 < (rt1 + n)) { + rt2 = rt1 + n; + } + t1 = (uint32_t *)k + n; + + /* + * Get f and g into rt3 and rt4 as floating-point approximations. + * + * We need to "scale down" the floating-point representation of + * coefficients when they are too big. We want to keep the value + * below 2^310 or so. Thus, when values are larger than 10 words, + * we consider only the top 10 words. Array lengths have been + * computed so that average maximum length will fall in the + * middle or the upper half of these top 10 words. + */ + rlen = slen; + if (rlen > 10) { + rlen = 10; + } + poly_big_to_fp(rt3, ft + slen - rlen, rlen, slen, logn); + poly_big_to_fp(rt4, gt + slen - rlen, rlen, slen, logn); + + /* + * Values in rt3 and rt4 are downscaled by 2^(scale_fg). + */ + scale_fg = 31 * (int)(slen - rlen); + + /* + * Estimated boundaries for the maximum size (in bits) of the + * coefficients of (f,g). We use the measured average, and + * allow for a deviation of at most six times the standard + * deviation. + */ + minbl_fg = BITLENGTH[depth].avg - 6 * BITLENGTH[depth].std; + maxbl_fg = BITLENGTH[depth].avg + 6 * BITLENGTH[depth].std; + + /* + * Compute 1/(f*adj(f)+g*adj(g)) in rt5. We also keep adj(f) + * and adj(g) in rt3 and rt4, respectively. + */ + PQCLEAN_FALCON1024_AVX2_FFT(rt3, logn); + PQCLEAN_FALCON1024_AVX2_FFT(rt4, logn); + PQCLEAN_FALCON1024_AVX2_poly_invnorm2_fft(rt5, rt3, rt4, logn); + PQCLEAN_FALCON1024_AVX2_poly_adj_fft(rt3, logn); + PQCLEAN_FALCON1024_AVX2_poly_adj_fft(rt4, logn); + + /* + * Reduce F and G repeatedly. + * + * The expected maximum bit length of coefficients of F and G + * is kept in maxbl_FG, with the corresponding word length in + * FGlen. + */ + FGlen = llen; + maxbl_FG = 31 * (int)llen; + + /* + * Each reduction operation computes the reduction polynomial + * "k". We need that polynomial to have coefficients that fit + * on 32-bit signed integers, with some scaling; thus, we use + * a descending sequence of scaling values, down to zero. + * + * The size of the coefficients of k is (roughly) the difference + * between the size of the coefficients of (F,G) and the size + * of the coefficients of (f,g). Thus, the maximum size of the + * coefficients of k is, at the start, maxbl_FG - minbl_fg; + * this is our starting scale value for k. + * + * We need to estimate the size of (F,G) during the execution of + * the algorithm; we are allowed some overestimation but not too + * much (poly_big_to_fp() uses a 310-bit window). Generally + * speaking, after applying a reduction with k scaled to + * scale_k, the size of (F,G) will be size(f,g) + scale_k + dd, + * where 'dd' is a few bits to account for the fact that the + * reduction is never perfect (intuitively, dd is on the order + * of sqrt(N), so at most 5 bits; we here allow for 10 extra + * bits). + * + * The size of (f,g) is not known exactly, but maxbl_fg is an + * upper bound. + */ + scale_k = maxbl_FG - minbl_fg; + + for (;;) { + int scale_FG, dc, new_maxbl_FG; + uint32_t scl, sch; + fpr pdc, pt; + + /* + * Convert current F and G into floating-point. We apply + * scaling if the current length is more than 10 words. + */ + rlen = FGlen; + if (rlen > 10) { + rlen = 10; + } + scale_FG = 31 * (int)(FGlen - rlen); + poly_big_to_fp(rt1, Ft + FGlen - rlen, rlen, llen, logn); + poly_big_to_fp(rt2, Gt + FGlen - rlen, rlen, llen, logn); + + /* + * Compute (F*adj(f)+G*adj(g))/(f*adj(f)+g*adj(g)) in rt2. + */ + PQCLEAN_FALCON1024_AVX2_FFT(rt1, logn); + PQCLEAN_FALCON1024_AVX2_FFT(rt2, logn); + PQCLEAN_FALCON1024_AVX2_poly_mul_fft(rt1, rt3, logn); + PQCLEAN_FALCON1024_AVX2_poly_mul_fft(rt2, rt4, logn); + PQCLEAN_FALCON1024_AVX2_poly_add(rt2, rt1, logn); + PQCLEAN_FALCON1024_AVX2_poly_mul_autoadj_fft(rt2, rt5, logn); + PQCLEAN_FALCON1024_AVX2_iFFT(rt2, logn); + + /* + * (f,g) are scaled by 'scale_fg', meaning that the + * numbers in rt3/rt4 should be multiplied by 2^(scale_fg) + * to have their true mathematical value. + * + * (F,G) are similarly scaled by 'scale_FG'. Therefore, + * the value we computed in rt2 is scaled by + * 'scale_FG-scale_fg'. + * + * We want that value to be scaled by 'scale_k', hence we + * apply a corrective scaling. After scaling, the values + * should fit in -2^31-1..+2^31-1. + */ + dc = scale_k - scale_FG + scale_fg; + + /* + * We will need to multiply values by 2^(-dc). The value + * 'dc' is not secret, so we can compute 2^(-dc) with a + * non-constant-time process. + * (We could use ldexp(), but we prefer to avoid any + * dependency on libm. When using FP emulation, we could + * use our fpr_ldexp(), which is constant-time.) + */ + if (dc < 0) { + dc = -dc; + pt = fpr_two; + } else { + pt = fpr_onehalf; + } + pdc = fpr_one; + while (dc != 0) { + if ((dc & 1) != 0) { + pdc = fpr_mul(pdc, pt); + } + dc >>= 1; + pt = fpr_sqr(pt); + } + + for (u = 0; u < n; u ++) { + fpr xv; + + xv = fpr_mul(rt2[u], pdc); + + /* + * Sometimes the values can be out-of-bounds if + * the algorithm fails; we must not call + * fpr_rint() (and cast to int32_t) if the value + * is not in-bounds. Note that the test does not + * break constant-time discipline, since any + * failure here implies that we discard the current + * secret key (f,g). + */ + if (!fpr_lt(fpr_mtwo31m1, xv) + || !fpr_lt(xv, fpr_ptwo31m1)) { + return 0; + } + k[u] = (int32_t)fpr_rint(xv); + } + + /* + * Values in k[] are integers. They really are scaled + * down by maxbl_FG - minbl_fg bits. + * + * If we are at low depth, then we use the NTT to + * compute k*f and k*g. + */ + sch = (uint32_t)(scale_k / 31); + scl = (uint32_t)(scale_k % 31); + if (depth <= DEPTH_INT_FG) { + poly_sub_scaled_ntt(Ft, FGlen, llen, ft, slen, slen, + k, sch, scl, logn, t1); + poly_sub_scaled_ntt(Gt, FGlen, llen, gt, slen, slen, + k, sch, scl, logn, t1); + } else { + poly_sub_scaled(Ft, FGlen, llen, ft, slen, slen, + k, sch, scl, logn); + poly_sub_scaled(Gt, FGlen, llen, gt, slen, slen, + k, sch, scl, logn); + } + + /* + * We compute the new maximum size of (F,G), assuming that + * (f,g) has _maximal_ length (i.e. that reduction is + * "late" instead of "early". We also adjust FGlen + * accordingly. + */ + new_maxbl_FG = scale_k + maxbl_fg + 10; + if (new_maxbl_FG < maxbl_FG) { + maxbl_FG = new_maxbl_FG; + if ((int)FGlen * 31 >= maxbl_FG + 31) { + FGlen --; + } + } + + /* + * We suppose that scaling down achieves a reduction by + * at least 25 bits per iteration. We stop when we have + * done the loop with an unscaled k. + */ + if (scale_k <= 0) { + break; + } + scale_k -= 25; + if (scale_k < 0) { + scale_k = 0; + } + } + + /* + * If (F,G) length was lowered below 'slen', then we must take + * care to re-extend the sign. + */ + if (FGlen < slen) { + for (u = 0; u < n; u ++, Ft += llen, Gt += llen) { + size_t v; + uint32_t sw; + + sw = -(Ft[FGlen - 1] >> 30) >> 1; + for (v = FGlen; v < slen; v ++) { + Ft[v] = sw; + } + sw = -(Gt[FGlen - 1] >> 30) >> 1; + for (v = FGlen; v < slen; v ++) { + Gt[v] = sw; + } + } + } + + /* + * Compress encoding of all values to 'slen' words (this is the + * expected output format). + */ + for (u = 0, x = tmp, y = tmp; + u < (n << 1); u ++, x += slen, y += llen) { + memmove(x, y, slen * sizeof * y); + } + return 1; +} + +/* + * Solving the NTRU equation, binary case, depth = 1. Upon entry, the + * F and G from the previous level should be in the tmp[] array. + * + * Returned value: 1 on success, 0 on error. + */ +static int +solve_NTRU_binary_depth1(unsigned logn_top, + const int8_t *f, const int8_t *g, uint32_t *tmp) { + /* + * The first half of this function is a copy of the corresponding + * part in solve_NTRU_intermediate(), for the reconstruction of + * the unreduced F and G. The second half (Babai reduction) is + * done differently, because the unreduced F and G fit in 53 bits + * of precision, allowing a much simpler process with lower RAM + * usage. + */ + unsigned depth, logn; + size_t n_top, n, hn, slen, dlen, llen, u; + uint32_t *Fd, *Gd, *Ft, *Gt, *ft, *gt, *t1; + fpr *rt1, *rt2, *rt3, *rt4, *rt5, *rt6; + uint32_t *x, *y; + + depth = 1; + n_top = (size_t)1 << logn_top; + logn = logn_top - depth; + n = (size_t)1 << logn; + hn = n >> 1; + + /* + * Equations are: + * + * f' = f0^2 - X^2*f1^2 + * g' = g0^2 - X^2*g1^2 + * F' and G' are a solution to f'G' - g'F' = q (from deeper levels) + * F = F'*(g0 - X*g1) + * G = G'*(f0 - X*f1) + * + * f0, f1, g0, g1, f', g', F' and G' are all "compressed" to + * degree N/2 (their odd-indexed coefficients are all zero). + */ + + /* + * slen = size for our input f and g; also size of the reduced + * F and G we return (degree N) + * + * dlen = size of the F and G obtained from the deeper level + * (degree N/2) + * + * llen = size for intermediary F and G before reduction (degree N) + * + * We build our non-reduced F and G as two independent halves each, + * of degree N/2 (F = F0 + X*F1, G = G0 + X*G1). + */ + slen = MAX_BL_SMALL[depth]; + dlen = MAX_BL_SMALL[depth + 1]; + llen = MAX_BL_LARGE[depth]; + + /* + * Fd and Gd are the F and G from the deeper level. Ft and Gt + * are the destination arrays for the unreduced F and G. + */ + Fd = tmp; + Gd = Fd + dlen * hn; + Ft = Gd + dlen * hn; + Gt = Ft + llen * n; + + /* + * We reduce Fd and Gd modulo all the small primes we will need, + * and store the values in Ft and Gt. + */ + for (u = 0; u < llen; u ++) { + uint32_t p, p0i, R2, Rx; + size_t v; + uint32_t *xs, *ys, *xd, *yd; + + p = PRIMES[u].p; + p0i = modp_ninv31(p); + R2 = modp_R2(p, p0i); + Rx = modp_Rx((unsigned)dlen, p, p0i, R2); + for (v = 0, xs = Fd, ys = Gd, xd = Ft + u, yd = Gt + u; + v < hn; + v ++, xs += dlen, ys += dlen, xd += llen, yd += llen) { + *xd = zint_mod_small_signed(xs, dlen, p, p0i, R2, Rx); + *yd = zint_mod_small_signed(ys, dlen, p, p0i, R2, Rx); + } + } + + /* + * Now Fd and Gd are not needed anymore; we can squeeze them out. + */ + memmove(tmp, Ft, llen * n * sizeof(uint32_t)); + Ft = tmp; + memmove(Ft + llen * n, Gt, llen * n * sizeof(uint32_t)); + Gt = Ft + llen * n; + ft = Gt + llen * n; + gt = ft + slen * n; + + t1 = gt + slen * n; + + /* + * Compute our F and G modulo sufficiently many small primes. + */ + for (u = 0; u < llen; u ++) { + uint32_t p, p0i, R2; + uint32_t *gm, *igm, *fx, *gx, *Fp, *Gp; + unsigned e; + size_t v; + + /* + * All computations are done modulo p. + */ + p = PRIMES[u].p; + p0i = modp_ninv31(p); + R2 = modp_R2(p, p0i); + + /* + * We recompute things from the source f and g, of full + * degree. However, we will need only the n first elements + * of the inverse NTT table (igm); the call to modp_mkgm() + * below will fill n_top elements in igm[] (thus overflowing + * into fx[]) but later code will overwrite these extra + * elements. + */ + gm = t1; + igm = gm + n_top; + fx = igm + n; + gx = fx + n_top; + modp_mkgm2(gm, igm, logn_top, PRIMES[u].g, p, p0i); + + /* + * Set ft and gt to f and g modulo p, respectively. + */ + for (v = 0; v < n_top; v ++) { + fx[v] = modp_set(f[v], p); + gx[v] = modp_set(g[v], p); + } + + /* + * Convert to NTT and compute our f and g. + */ + modp_NTT2(fx, gm, logn_top, p, p0i); + modp_NTT2(gx, gm, logn_top, p, p0i); + for (e = logn_top; e > logn; e --) { + modp_poly_rec_res(fx, e, p, p0i, R2); + modp_poly_rec_res(gx, e, p, p0i, R2); + } + + /* + * From that point onward, we only need tables for + * degree n, so we can save some space. + */ + if (depth > 0) { /* always true */ + memmove(gm + n, igm, n * sizeof * igm); + igm = gm + n; + memmove(igm + n, fx, n * sizeof * ft); + fx = igm + n; + memmove(fx + n, gx, n * sizeof * gt); + gx = fx + n; + } + + /* + * Get F' and G' modulo p and in NTT representation + * (they have degree n/2). These values were computed + * in a previous step, and stored in Ft and Gt. + */ + Fp = gx + n; + Gp = Fp + hn; + for (v = 0, x = Ft + u, y = Gt + u; + v < hn; v ++, x += llen, y += llen) { + Fp[v] = *x; + Gp[v] = *y; + } + modp_NTT2(Fp, gm, logn - 1, p, p0i); + modp_NTT2(Gp, gm, logn - 1, p, p0i); + + /* + * Compute our F and G modulo p. + * + * Equations are: + * + * f'(x^2) = N(f)(x^2) = f * adj(f) + * g'(x^2) = N(g)(x^2) = g * adj(g) + * + * f'*G' - g'*F' = q + * + * F = F'(x^2) * adj(g) + * G = G'(x^2) * adj(f) + * + * The NTT representation of f is f(w) for all w which + * are roots of phi. In the binary case, as well as in + * the ternary case for all depth except the deepest, + * these roots can be grouped in pairs (w,-w), and we + * then have: + * + * f(w) = adj(f)(-w) + * f(-w) = adj(f)(w) + * + * and w^2 is then a root for phi at the half-degree. + * + * At the deepest level in the ternary case, this still + * holds, in the following sense: the roots of x^2-x+1 + * are (w,-w^2) (for w^3 = -1, and w != -1), and we + * have: + * + * f(w) = adj(f)(-w^2) + * f(-w^2) = adj(f)(w) + * + * In all case, we can thus compute F and G in NTT + * representation by a few simple multiplications. + * Moreover, the two roots for each pair are consecutive + * in our bit-reversal encoding. + */ + for (v = 0, x = Ft + u, y = Gt + u; + v < hn; v ++, x += (llen << 1), y += (llen << 1)) { + uint32_t ftA, ftB, gtA, gtB; + uint32_t mFp, mGp; + + ftA = fx[(v << 1) + 0]; + ftB = fx[(v << 1) + 1]; + gtA = gx[(v << 1) + 0]; + gtB = gx[(v << 1) + 1]; + mFp = modp_montymul(Fp[v], R2, p, p0i); + mGp = modp_montymul(Gp[v], R2, p, p0i); + x[0] = modp_montymul(gtB, mFp, p, p0i); + x[llen] = modp_montymul(gtA, mFp, p, p0i); + y[0] = modp_montymul(ftB, mGp, p, p0i); + y[llen] = modp_montymul(ftA, mGp, p, p0i); + } + modp_iNTT2_ext(Ft + u, llen, igm, logn, p, p0i); + modp_iNTT2_ext(Gt + u, llen, igm, logn, p, p0i); + + /* + * Also save ft and gt (only up to size slen). + */ + if (u < slen) { + modp_iNTT2(fx, igm, logn, p, p0i); + modp_iNTT2(gx, igm, logn, p, p0i); + for (v = 0, x = ft + u, y = gt + u; + v < n; v ++, x += slen, y += slen) { + *x = fx[v]; + *y = gx[v]; + } + } + } + + /* + * Rebuild f, g, F and G with the CRT. Note that the elements of F + * and G are consecutive, and thus can be rebuilt in a single + * loop; similarly, the elements of f and g are consecutive. + */ + zint_rebuild_CRT(Ft, llen, llen, n << 1, PRIMES, 1, t1); + zint_rebuild_CRT(ft, slen, slen, n << 1, PRIMES, 1, t1); + + /* + * Here starts the Babai reduction, specialized for depth = 1. + * + * Candidates F and G (from Ft and Gt), and base f and g (ft and gt), + * are converted to floating point. There is no scaling, and a + * single pass is sufficient. + */ + + /* + * Convert F and G into floating point (rt1 and rt2). + */ + rt1 = align_fpr(tmp, gt + slen * n); + rt2 = rt1 + n; + poly_big_to_fp(rt1, Ft, llen, llen, logn); + poly_big_to_fp(rt2, Gt, llen, llen, logn); + + /* + * Integer representation of F and G is no longer needed, we + * can remove it. + */ + memmove(tmp, ft, 2 * slen * n * sizeof * ft); + ft = tmp; + gt = ft + slen * n; + rt3 = align_fpr(tmp, gt + slen * n); + memmove(rt3, rt1, 2 * n * sizeof * rt1); + rt1 = rt3; + rt2 = rt1 + n; + rt3 = rt2 + n; + rt4 = rt3 + n; + + /* + * Convert f and g into floating point (rt3 and rt4). + */ + poly_big_to_fp(rt3, ft, slen, slen, logn); + poly_big_to_fp(rt4, gt, slen, slen, logn); + + /* + * Remove unneeded ft and gt. + */ + memmove(tmp, rt1, 4 * n * sizeof * rt1); + rt1 = (fpr *)tmp; + rt2 = rt1 + n; + rt3 = rt2 + n; + rt4 = rt3 + n; + + /* + * We now have: + * rt1 = F + * rt2 = G + * rt3 = f + * rt4 = g + * in that order in RAM. We convert all of them to FFT. + */ + PQCLEAN_FALCON1024_AVX2_FFT(rt1, logn); + PQCLEAN_FALCON1024_AVX2_FFT(rt2, logn); + PQCLEAN_FALCON1024_AVX2_FFT(rt3, logn); + PQCLEAN_FALCON1024_AVX2_FFT(rt4, logn); + + /* + * Compute: + * rt5 = F*adj(f) + G*adj(g) + * rt6 = 1 / (f*adj(f) + g*adj(g)) + * (Note that rt6 is half-length.) + */ + rt5 = rt4 + n; + rt6 = rt5 + n; + PQCLEAN_FALCON1024_AVX2_poly_add_muladj_fft(rt5, rt1, rt2, rt3, rt4, logn); + PQCLEAN_FALCON1024_AVX2_poly_invnorm2_fft(rt6, rt3, rt4, logn); + + /* + * Compute: + * rt5 = (F*adj(f)+G*adj(g)) / (f*adj(f)+g*adj(g)) + */ + PQCLEAN_FALCON1024_AVX2_poly_mul_autoadj_fft(rt5, rt6, logn); + + /* + * Compute k as the rounded version of rt5. Check that none of + * the values is larger than 2^63-1 (in absolute value) + * because that would make the fpr_rint() do something undefined; + * note that any out-of-bounds value here implies a failure and + * (f,g) will be discarded, so we can make a simple test. + */ + PQCLEAN_FALCON1024_AVX2_iFFT(rt5, logn); + for (u = 0; u < n; u ++) { + fpr z; + + z = rt5[u]; + if (!fpr_lt(z, fpr_ptwo63m1) || !fpr_lt(fpr_mtwo63m1, z)) { + return 0; + } + rt5[u] = fpr_of(fpr_rint(z)); + } + PQCLEAN_FALCON1024_AVX2_FFT(rt5, logn); + + /* + * Subtract k*f from F, and k*g from G. + */ + PQCLEAN_FALCON1024_AVX2_poly_mul_fft(rt3, rt5, logn); + PQCLEAN_FALCON1024_AVX2_poly_mul_fft(rt4, rt5, logn); + PQCLEAN_FALCON1024_AVX2_poly_sub(rt1, rt3, logn); + PQCLEAN_FALCON1024_AVX2_poly_sub(rt2, rt4, logn); + PQCLEAN_FALCON1024_AVX2_iFFT(rt1, logn); + PQCLEAN_FALCON1024_AVX2_iFFT(rt2, logn); + + /* + * Convert back F and G to integers, and return. + */ + Ft = tmp; + Gt = Ft + n; + rt3 = align_fpr(tmp, Gt + n); + memmove(rt3, rt1, 2 * n * sizeof * rt1); + rt1 = rt3; + rt2 = rt1 + n; + for (u = 0; u < n; u ++) { + Ft[u] = (uint32_t)fpr_rint(rt1[u]); + Gt[u] = (uint32_t)fpr_rint(rt2[u]); + } + + return 1; +} + +/* + * Solving the NTRU equation, top level. Upon entry, the F and G + * from the previous level should be in the tmp[] array. + * + * Returned value: 1 on success, 0 on error. + */ +static int +solve_NTRU_binary_depth0(unsigned logn, + const int8_t *f, const int8_t *g, uint32_t *tmp) { + size_t n, hn, u; + uint32_t p, p0i, R2; + uint32_t *Fp, *Gp, *t1, *t2, *t3, *t4, *t5; + uint32_t *gm, *igm, *ft, *gt; + fpr *rt2, *rt3; + + n = (size_t)1 << logn; + hn = n >> 1; + + /* + * Equations are: + * + * f' = f0^2 - X^2*f1^2 + * g' = g0^2 - X^2*g1^2 + * F' and G' are a solution to f'G' - g'F' = q (from deeper levels) + * F = F'*(g0 - X*g1) + * G = G'*(f0 - X*f1) + * + * f0, f1, g0, g1, f', g', F' and G' are all "compressed" to + * degree N/2 (their odd-indexed coefficients are all zero). + * + * Everything should fit in 31-bit integers, hence we can just use + * the first small prime p = 2147473409. + */ + p = PRIMES[0].p; + p0i = modp_ninv31(p); + R2 = modp_R2(p, p0i); + + Fp = tmp; + Gp = Fp + hn; + ft = Gp + hn; + gt = ft + n; + gm = gt + n; + igm = gm + n; + + modp_mkgm2(gm, igm, logn, PRIMES[0].g, p, p0i); + + /* + * Convert F' anf G' in NTT representation. + */ + for (u = 0; u < hn; u ++) { + Fp[u] = modp_set(zint_one_to_plain(Fp + u), p); + Gp[u] = modp_set(zint_one_to_plain(Gp + u), p); + } + modp_NTT2(Fp, gm, logn - 1, p, p0i); + modp_NTT2(Gp, gm, logn - 1, p, p0i); + + /* + * Load f and g and convert them to NTT representation. + */ + for (u = 0; u < n; u ++) { + ft[u] = modp_set(f[u], p); + gt[u] = modp_set(g[u], p); + } + modp_NTT2(ft, gm, logn, p, p0i); + modp_NTT2(gt, gm, logn, p, p0i); + + /* + * Build the unreduced F,G in ft and gt. + */ + for (u = 0; u < n; u += 2) { + uint32_t ftA, ftB, gtA, gtB; + uint32_t mFp, mGp; + + ftA = ft[u + 0]; + ftB = ft[u + 1]; + gtA = gt[u + 0]; + gtB = gt[u + 1]; + mFp = modp_montymul(Fp[u >> 1], R2, p, p0i); + mGp = modp_montymul(Gp[u >> 1], R2, p, p0i); + ft[u + 0] = modp_montymul(gtB, mFp, p, p0i); + ft[u + 1] = modp_montymul(gtA, mFp, p, p0i); + gt[u + 0] = modp_montymul(ftB, mGp, p, p0i); + gt[u + 1] = modp_montymul(ftA, mGp, p, p0i); + } + modp_iNTT2(ft, igm, logn, p, p0i); + modp_iNTT2(gt, igm, logn, p, p0i); + + Gp = Fp + n; + t1 = Gp + n; + memmove(Fp, ft, 2 * n * sizeof * ft); + + /* + * We now need to apply the Babai reduction. At that point, + * we have F and G in two n-word arrays. + * + * We can compute F*adj(f)+G*adj(g) and f*adj(f)+g*adj(g) + * modulo p, using the NTT. We still move memory around in + * order to save RAM. + */ + t2 = t1 + n; + t3 = t2 + n; + t4 = t3 + n; + t5 = t4 + n; + + /* + * Compute the NTT tables in t1 and t2. We do not keep t2 + * (we'll recompute it later on). + */ + modp_mkgm2(t1, t2, logn, PRIMES[0].g, p, p0i); + + /* + * Convert F and G to NTT. + */ + modp_NTT2(Fp, t1, logn, p, p0i); + modp_NTT2(Gp, t1, logn, p, p0i); + + /* + * Load f and adj(f) in t4 and t5, and convert them to NTT + * representation. + */ + t4[0] = t5[0] = modp_set(f[0], p); + for (u = 1; u < n; u ++) { + t4[u] = modp_set(f[u], p); + t5[n - u] = modp_set(-f[u], p); + } + modp_NTT2(t4, t1, logn, p, p0i); + modp_NTT2(t5, t1, logn, p, p0i); + + /* + * Compute F*adj(f) in t2, and f*adj(f) in t3. + */ + for (u = 0; u < n; u ++) { + uint32_t w; + + w = modp_montymul(t5[u], R2, p, p0i); + t2[u] = modp_montymul(w, Fp[u], p, p0i); + t3[u] = modp_montymul(w, t4[u], p, p0i); + } + + /* + * Load g and adj(g) in t4 and t5, and convert them to NTT + * representation. + */ + t4[0] = t5[0] = modp_set(g[0], p); + for (u = 1; u < n; u ++) { + t4[u] = modp_set(g[u], p); + t5[n - u] = modp_set(-g[u], p); + } + modp_NTT2(t4, t1, logn, p, p0i); + modp_NTT2(t5, t1, logn, p, p0i); + + /* + * Add G*adj(g) to t2, and g*adj(g) to t3. + */ + for (u = 0; u < n; u ++) { + uint32_t w; + + w = modp_montymul(t5[u], R2, p, p0i); + t2[u] = modp_add(t2[u], + modp_montymul(w, Gp[u], p, p0i), p); + t3[u] = modp_add(t3[u], + modp_montymul(w, t4[u], p, p0i), p); + } + + /* + * Convert back t2 and t3 to normal representation (normalized + * around 0), and then + * move them to t1 and t2. We first need to recompute the + * inverse table for NTT. + */ + modp_mkgm2(t1, t4, logn, PRIMES[0].g, p, p0i); + modp_iNTT2(t2, t4, logn, p, p0i); + modp_iNTT2(t3, t4, logn, p, p0i); + for (u = 0; u < n; u ++) { + t1[u] = (uint32_t)modp_norm(t2[u], p); + t2[u] = (uint32_t)modp_norm(t3[u], p); + } + + /* + * At that point, array contents are: + * + * F (NTT representation) (Fp) + * G (NTT representation) (Gp) + * F*adj(f)+G*adj(g) (t1) + * f*adj(f)+g*adj(g) (t2) + * + * We want to divide t1 by t2. The result is not integral; it + * must be rounded. We thus need to use the FFT. + */ + + /* + * Get f*adj(f)+g*adj(g) in FFT representation. Since this + * polynomial is auto-adjoint, all its coordinates in FFT + * representation are actually real, so we can truncate off + * the imaginary parts. + */ + rt3 = align_fpr(tmp, t3); + for (u = 0; u < n; u ++) { + rt3[u] = fpr_of(((int32_t *)t2)[u]); + } + PQCLEAN_FALCON1024_AVX2_FFT(rt3, logn); + rt2 = align_fpr(tmp, t2); + memmove(rt2, rt3, hn * sizeof * rt3); + + /* + * Convert F*adj(f)+G*adj(g) in FFT representation. + */ + rt3 = rt2 + hn; + for (u = 0; u < n; u ++) { + rt3[u] = fpr_of(((int32_t *)t1)[u]); + } + PQCLEAN_FALCON1024_AVX2_FFT(rt3, logn); + + /* + * Compute (F*adj(f)+G*adj(g))/(f*adj(f)+g*adj(g)) and get + * its rounded normal representation in t1. + */ + PQCLEAN_FALCON1024_AVX2_poly_div_autoadj_fft(rt3, rt2, logn); + PQCLEAN_FALCON1024_AVX2_iFFT(rt3, logn); + for (u = 0; u < n; u ++) { + t1[u] = modp_set((int32_t)fpr_rint(rt3[u]), p); + } + + /* + * RAM contents are now: + * + * F (NTT representation) (Fp) + * G (NTT representation) (Gp) + * k (t1) + * + * We want to compute F-k*f, and G-k*g. + */ + t2 = t1 + n; + t3 = t2 + n; + t4 = t3 + n; + t5 = t4 + n; + modp_mkgm2(t2, t3, logn, PRIMES[0].g, p, p0i); + for (u = 0; u < n; u ++) { + t4[u] = modp_set(f[u], p); + t5[u] = modp_set(g[u], p); + } + modp_NTT2(t1, t2, logn, p, p0i); + modp_NTT2(t4, t2, logn, p, p0i); + modp_NTT2(t5, t2, logn, p, p0i); + for (u = 0; u < n; u ++) { + uint32_t kw; + + kw = modp_montymul(t1[u], R2, p, p0i); + Fp[u] = modp_sub(Fp[u], + modp_montymul(kw, t4[u], p, p0i), p); + Gp[u] = modp_sub(Gp[u], + modp_montymul(kw, t5[u], p, p0i), p); + } + modp_iNTT2(Fp, t3, logn, p, p0i); + modp_iNTT2(Gp, t3, logn, p, p0i); + for (u = 0; u < n; u ++) { + Fp[u] = (uint32_t)modp_norm(Fp[u], p); + Gp[u] = (uint32_t)modp_norm(Gp[u], p); + } + + return 1; +} + +/* + * Solve the NTRU equation. Returned value is 1 on success, 0 on error. + * G can be NULL, in which case that value is computed but not returned. + * If any of the coefficients of F and G exceeds lim (in absolute value), + * then 0 is returned. + */ +static int +solve_NTRU(unsigned logn, int8_t *F, int8_t *G, + const int8_t *f, const int8_t *g, int lim, uint32_t *tmp) { + size_t n, u; + uint32_t *ft, *gt, *Ft, *Gt, *gm; + uint32_t p, p0i, r; + const small_prime *primes; + + n = MKN(logn); + + if (!solve_NTRU_deepest(logn, f, g, tmp)) { + return 0; + } + + /* + * For logn <= 2, we need to use solve_NTRU_intermediate() + * directly, because coefficients are a bit too large and + * do not fit the hypotheses in solve_NTRU_binary_depth0(). + */ + if (logn <= 2) { + unsigned depth; + + depth = logn; + while (depth -- > 0) { + if (!solve_NTRU_intermediate(logn, f, g, depth, tmp)) { + return 0; + } + } + } else { + unsigned depth; + + depth = logn; + while (depth -- > 2) { + if (!solve_NTRU_intermediate(logn, f, g, depth, tmp)) { + return 0; + } + } + if (!solve_NTRU_binary_depth1(logn, f, g, tmp)) { + return 0; + } + if (!solve_NTRU_binary_depth0(logn, f, g, tmp)) { + return 0; + } + } + + /* + * If no buffer has been provided for G, use a temporary one. + */ + if (G == NULL) { + G = (int8_t *)(tmp + 2 * n); + } + + /* + * Final F and G are in fk->tmp, one word per coefficient + * (signed value over 31 bits). + */ + if (!poly_big_to_small(F, tmp, lim, logn) + || !poly_big_to_small(G, tmp + n, lim, logn)) { + return 0; + } + + /* + * Verify that the NTRU equation is fulfilled. Since all elements + * have short lengths, verifying modulo a small prime p works, and + * allows using the NTT. + * + * We put Gt[] first in tmp[], and process it first, so that it does + * not overlap with G[] in case we allocated it ourselves. + */ + Gt = tmp; + ft = Gt + n; + gt = ft + n; + Ft = gt + n; + gm = Ft + n; + + primes = PRIMES; + p = primes[0].p; + p0i = modp_ninv31(p); + modp_mkgm2(gm, tmp, logn, primes[0].g, p, p0i); + for (u = 0; u < n; u ++) { + Gt[u] = modp_set(G[u], p); + } + for (u = 0; u < n; u ++) { + ft[u] = modp_set(f[u], p); + gt[u] = modp_set(g[u], p); + Ft[u] = modp_set(F[u], p); + } + modp_NTT2(ft, gm, logn, p, p0i); + modp_NTT2(gt, gm, logn, p, p0i); + modp_NTT2(Ft, gm, logn, p, p0i); + modp_NTT2(Gt, gm, logn, p, p0i); + r = modp_montymul(12289, 1, p, p0i); + for (u = 0; u < n; u ++) { + uint32_t z; + + z = modp_sub(modp_montymul(ft[u], Gt[u], p, p0i), + modp_montymul(gt[u], Ft[u], p, p0i), p); + if (z != r) { + return 0; + } + } + + return 1; +} + +/* + * Generate a random polynomial with a Gaussian distribution. This function + * also makes sure that the resultant of the polynomial with phi is odd. + */ +static void +poly_small_mkgauss(RNG_CONTEXT *rng, int8_t *f, unsigned logn) { + size_t n, u; + unsigned mod2; + + n = MKN(logn); + mod2 = 0; + for (u = 0; u < n; u ++) { + int s; + +restart: + s = mkgauss(rng, logn); + + /* + * We need the coefficient to fit within -127..+127; + * realistically, this is always the case except for + * the very low degrees (N = 2 or 4), for which there + * is no real security anyway. + */ + if (s < -127 || s > 127) { + goto restart; + } + + /* + * We need the sum of all coefficients to be 1; otherwise, + * the resultant of the polynomial with X^N+1 will be even, + * and the binary GCD will fail. + */ + if (u == n - 1) { + if ((mod2 ^ (unsigned)(s & 1)) == 0) { + goto restart; + } + } else { + mod2 ^= (unsigned)(s & 1); + } + f[u] = (int8_t)s; + } +} + +/* see falcon.h */ +void +PQCLEAN_FALCON1024_AVX2_keygen(inner_shake256_context *rng, + int8_t *f, int8_t *g, int8_t *F, int8_t *G, uint16_t *h, + unsigned logn, uint8_t *tmp) { + /* + * Algorithm is the following: + * + * - Generate f and g with the Gaussian distribution. + * + * - If either Res(f,phi) or Res(g,phi) is even, try again. + * + * - If ||(f,g)|| is too large, try again. + * + * - If ||B~_{f,g}|| is too large, try again. + * + * - If f is not invertible mod phi mod q, try again. + * + * - Compute h = g/f mod phi mod q. + * + * - Solve the NTRU equation fG - gF = q; if the solving fails, + * try again. Usual failure condition is when Res(f,phi) + * and Res(g,phi) are not prime to each other. + */ + size_t n, u; + uint16_t *h2, *tmp2; + RNG_CONTEXT *rc; + + n = MKN(logn); + rc = rng; + + /* + * We need to generate f and g randomly, until we find values + * such that the norm of (g,-f), and of the orthogonalized + * vector, are satisfying. The orthogonalized vector is: + * (q*adj(f)/(f*adj(f)+g*adj(g)), q*adj(g)/(f*adj(f)+g*adj(g))) + * (it is actually the (N+1)-th row of the Gram-Schmidt basis). + * + * In the binary case, coefficients of f and g are generated + * independently of each other, with a discrete Gaussian + * distribution of standard deviation 1.17*sqrt(q/(2*N)). Then, + * the two vectors have expected norm 1.17*sqrt(q), which is + * also our acceptance bound: we require both vectors to be no + * larger than that (this will be satisfied about 1/4th of the + * time, thus we expect sampling new (f,g) about 4 times for that + * step). + * + * We require that Res(f,phi) and Res(g,phi) are both odd (the + * NTRU equation solver requires it). + */ + for (;;) { + fpr *rt1, *rt2, *rt3; + fpr bnorm; + uint32_t normf, normg, norm; + int lim; + + /* + * The poly_small_mkgauss() function makes sure + * that the sum of coefficients is 1 modulo 2 + * (i.e. the resultant of the polynomial with phi + * will be odd). + */ + poly_small_mkgauss(rc, f, logn); + poly_small_mkgauss(rc, g, logn); + + /* + * Verify that all coefficients are within the bounds + * defined in max_fg_bits. This is the case with + * overwhelming probability; this guarantees that the + * key will be encodable with FALCON_COMP_TRIM. + */ + lim = 1 << (PQCLEAN_FALCON1024_AVX2_max_fg_bits[logn] - 1); + for (u = 0; u < n; u ++) { + /* + * We can use non-CT tests since on any failure + * we will discard f and g. + */ + if (f[u] >= lim || f[u] <= -lim + || g[u] >= lim || g[u] <= -lim) { + lim = -1; + break; + } + } + if (lim < 0) { + continue; + } + + /* + * Bound is 1.17*sqrt(q). We compute the squared + * norms. With q = 12289, the squared bound is: + * (1.17^2)* 12289 = 16822.4121 + * Since f and g are integral, the squared norm + * of (g,-f) is an integer. + */ + normf = poly_small_sqnorm(f, logn); + normg = poly_small_sqnorm(g, logn); + norm = (normf + normg) | -((normf | normg) >> 31); + if (norm >= 16823) { + continue; + } + + /* + * We compute the orthogonalized vector norm. + */ + rt1 = (fpr *)tmp; + rt2 = rt1 + n; + rt3 = rt2 + n; + poly_small_to_fp(rt1, f, logn); + poly_small_to_fp(rt2, g, logn); + PQCLEAN_FALCON1024_AVX2_FFT(rt1, logn); + PQCLEAN_FALCON1024_AVX2_FFT(rt2, logn); + PQCLEAN_FALCON1024_AVX2_poly_invnorm2_fft(rt3, rt1, rt2, logn); + PQCLEAN_FALCON1024_AVX2_poly_adj_fft(rt1, logn); + PQCLEAN_FALCON1024_AVX2_poly_adj_fft(rt2, logn); + PQCLEAN_FALCON1024_AVX2_poly_mulconst(rt1, fpr_q, logn); + PQCLEAN_FALCON1024_AVX2_poly_mulconst(rt2, fpr_q, logn); + PQCLEAN_FALCON1024_AVX2_poly_mul_autoadj_fft(rt1, rt3, logn); + PQCLEAN_FALCON1024_AVX2_poly_mul_autoadj_fft(rt2, rt3, logn); + PQCLEAN_FALCON1024_AVX2_iFFT(rt1, logn); + PQCLEAN_FALCON1024_AVX2_iFFT(rt2, logn); + bnorm = fpr_zero; + for (u = 0; u < n; u ++) { + bnorm = fpr_add(bnorm, fpr_sqr(rt1[u])); + bnorm = fpr_add(bnorm, fpr_sqr(rt2[u])); + } + if (!fpr_lt(bnorm, fpr_bnorm_max)) { + continue; + } + + /* + * Compute public key h = g/f mod X^N+1 mod q. If this + * fails, we must restart. + */ + if (h == NULL) { + h2 = (uint16_t *)tmp; + tmp2 = h2 + n; + } else { + h2 = h; + tmp2 = (uint16_t *)tmp; + } + if (!PQCLEAN_FALCON1024_AVX2_compute_public(h2, f, g, logn, (uint8_t *)tmp2)) { + continue; + } + + /* + * Solve the NTRU equation to get F and G. + */ + lim = (1 << (PQCLEAN_FALCON1024_AVX2_max_FG_bits[logn] - 1)) - 1; + if (!solve_NTRU(logn, F, G, f, g, lim, (uint32_t *)tmp)) { + continue; + } + + /* + * Key pair is generated. + */ + break; + } +} diff --git a/crypto_sign/falcon-1024/avx2/pqclean.c b/crypto_sign/falcon-1024/avx2/pqclean.c new file mode 100644 index 00000000..1b254cfc --- /dev/null +++ b/crypto_sign/falcon-1024/avx2/pqclean.c @@ -0,0 +1,386 @@ +#include "api.h" +#include "inner.h" +#include "randombytes.h" +#include +#include +/* + * Wrapper for implementing the PQClean API. + */ + + + +#define NONCELEN 40 +#define SEEDLEN 48 + +/* + * Encoding formats (nnnn = log of degree, 9 for Falcon-512, 10 for Falcon-1024) + * + * private key: + * header byte: 0101nnnn + * private f (6 or 5 bits by element, depending on degree) + * private g (6 or 5 bits by element, depending on degree) + * private F (8 bits by element) + * + * public key: + * header byte: 0000nnnn + * public h (14 bits by element) + * + * signature: + * header byte: 0011nnnn + * nonce 40 bytes + * value (12 bits by element) + * + * message + signature: + * signature length (2 bytes, big-endian) + * nonce 40 bytes + * message + * header byte: 0010nnnn + * value (12 bits by element) + * (signature length is 1+len(value), not counting the nonce) + */ + +/* see api.h */ +int +PQCLEAN_FALCON1024_AVX2_crypto_sign_keypair(unsigned char *pk, unsigned char *sk) { + union { + uint8_t b[28 * 1024]; + uint64_t dummy_u64; + fpr dummy_fpr; + } tmp; + int8_t f[1024], g[1024], F[1024], G[1024]; + uint16_t h[1024]; + unsigned char seed[SEEDLEN]; + inner_shake256_context rng; + size_t u, v; + + + /* + * Generate key pair. + */ + randombytes(seed, sizeof seed); + inner_shake256_init(&rng); + inner_shake256_inject(&rng, seed, sizeof seed); + inner_shake256_flip(&rng); + PQCLEAN_FALCON1024_AVX2_keygen(&rng, f, g, F, G, h, 10, tmp.b); + inner_shake256_ctx_release(&rng); + + /* + * Encode private key. + */ + sk[0] = 0x50 + 10; + u = 1; + v = PQCLEAN_FALCON1024_AVX2_trim_i8_encode( + sk + u, PQCLEAN_FALCON1024_AVX2_CRYPTO_SECRETKEYBYTES - u, + f, 10, PQCLEAN_FALCON1024_AVX2_max_fg_bits[10]); + if (v == 0) { + return -1; + } + u += v; + v = PQCLEAN_FALCON1024_AVX2_trim_i8_encode( + sk + u, PQCLEAN_FALCON1024_AVX2_CRYPTO_SECRETKEYBYTES - u, + g, 10, PQCLEAN_FALCON1024_AVX2_max_fg_bits[10]); + if (v == 0) { + return -1; + } + u += v; + v = PQCLEAN_FALCON1024_AVX2_trim_i8_encode( + sk + u, PQCLEAN_FALCON1024_AVX2_CRYPTO_SECRETKEYBYTES - u, + F, 10, PQCLEAN_FALCON1024_AVX2_max_FG_bits[10]); + if (v == 0) { + return -1; + } + u += v; + if (u != PQCLEAN_FALCON1024_AVX2_CRYPTO_SECRETKEYBYTES) { + return -1; + } + + /* + * Encode public key. + */ + pk[0] = 0x00 + 10; + v = PQCLEAN_FALCON1024_AVX2_modq_encode( + pk + 1, PQCLEAN_FALCON1024_AVX2_CRYPTO_PUBLICKEYBYTES - 1, + h, 10); + if (v != PQCLEAN_FALCON1024_AVX2_CRYPTO_PUBLICKEYBYTES - 1) { + return -1; + } + + return 0; +} + +/* + * Compute the signature. nonce[] receives the nonce and must have length + * NONCELEN bytes. sigbuf[] receives the signature value (without nonce + * or header byte), with *sigbuflen providing the maximum value length and + * receiving the actual value length. + * + * If a signature could be computed but not encoded because it would + * exceed the output buffer size, then a new signature is computed. If + * the provided buffer size is too low, this could loop indefinitely, so + * the caller must provide a size that can accommodate signatures with a + * large enough probability. + * + * Return value: 0 on success, -1 on error. + */ +static int +do_sign(uint8_t *nonce, uint8_t *sigbuf, size_t *sigbuflen, + const uint8_t *m, size_t mlen, const uint8_t *sk) { + union { + uint8_t b[72 * 1024]; + uint64_t dummy_u64; + fpr dummy_fpr; + } tmp; + int8_t f[1024], g[1024], F[1024], G[1024]; + union { + int16_t sig[1024]; + uint16_t hm[1024]; + } r; + unsigned char seed[SEEDLEN]; + inner_shake256_context sc; + size_t u, v; + + /* + * Decode the private key. + */ + if (sk[0] != 0x50 + 10) { + return -1; + } + u = 1; + v = PQCLEAN_FALCON1024_AVX2_trim_i8_decode( + f, 10, PQCLEAN_FALCON1024_AVX2_max_fg_bits[10], + sk + u, PQCLEAN_FALCON1024_AVX2_CRYPTO_SECRETKEYBYTES - u); + if (v == 0) { + return -1; + } + u += v; + v = PQCLEAN_FALCON1024_AVX2_trim_i8_decode( + g, 10, PQCLEAN_FALCON1024_AVX2_max_fg_bits[10], + sk + u, PQCLEAN_FALCON1024_AVX2_CRYPTO_SECRETKEYBYTES - u); + if (v == 0) { + return -1; + } + u += v; + v = PQCLEAN_FALCON1024_AVX2_trim_i8_decode( + F, 10, PQCLEAN_FALCON1024_AVX2_max_FG_bits[10], + sk + u, PQCLEAN_FALCON1024_AVX2_CRYPTO_SECRETKEYBYTES - u); + if (v == 0) { + return -1; + } + u += v; + if (u != PQCLEAN_FALCON1024_AVX2_CRYPTO_SECRETKEYBYTES) { + return -1; + } + if (!PQCLEAN_FALCON1024_AVX2_complete_private(G, f, g, F, 10, tmp.b)) { + return -1; + } + + + /* + * Create a random nonce (40 bytes). + */ + randombytes(nonce, NONCELEN); + + /* + * Hash message nonce + message into a vector. + */ + inner_shake256_init(&sc); + inner_shake256_inject(&sc, nonce, NONCELEN); + inner_shake256_inject(&sc, m, mlen); + inner_shake256_flip(&sc); + PQCLEAN_FALCON1024_AVX2_hash_to_point_vartime(&sc, r.hm, 10); + inner_shake256_ctx_release(&sc); + + /* + * Initialize a RNG. + */ + randombytes(seed, sizeof seed); + inner_shake256_init(&sc); + inner_shake256_inject(&sc, seed, sizeof seed); + inner_shake256_flip(&sc); + + /* + * Compute and return the signature. This loops until a signature + * value is found that fits in the provided buffer. + */ + for (;;) { + PQCLEAN_FALCON1024_AVX2_sign_dyn(r.sig, &sc, f, g, F, G, r.hm, 10, tmp.b); + v = PQCLEAN_FALCON1024_AVX2_comp_encode(sigbuf, *sigbuflen, r.sig, 10); + if (v != 0) { + inner_shake256_ctx_release(&sc); + *sigbuflen = v; + return 0; + } + } +} + +/* + * Verify a sigature. The nonce has size NONCELEN bytes. sigbuf[] + * (of size sigbuflen) contains the signature value, not including the + * header byte or nonce. Return value is 0 on success, -1 on error. + */ +static int +do_verify( + const uint8_t *nonce, const uint8_t *sigbuf, size_t sigbuflen, + const uint8_t *m, size_t mlen, const uint8_t *pk) { + union { + uint8_t b[2 * 1024]; + uint64_t dummy_u64; + fpr dummy_fpr; + } tmp; + uint16_t h[1024], hm[1024]; + int16_t sig[1024]; + inner_shake256_context sc; + + /* + * Decode public key. + */ + if (pk[0] != 0x00 + 10) { + return -1; + } + if (PQCLEAN_FALCON1024_AVX2_modq_decode(h, 10, + pk + 1, PQCLEAN_FALCON1024_AVX2_CRYPTO_PUBLICKEYBYTES - 1) + != PQCLEAN_FALCON1024_AVX2_CRYPTO_PUBLICKEYBYTES - 1) { + return -1; + } + PQCLEAN_FALCON1024_AVX2_to_ntt_monty(h, 10); + + /* + * Decode signature. + */ + if (sigbuflen == 0) { + return -1; + } + if (PQCLEAN_FALCON1024_AVX2_comp_decode(sig, 10, sigbuf, sigbuflen) != sigbuflen) { + return -1; + } + + /* + * Hash nonce + message into a vector. + */ + inner_shake256_init(&sc); + inner_shake256_inject(&sc, nonce, NONCELEN); + inner_shake256_inject(&sc, m, mlen); + inner_shake256_flip(&sc); + PQCLEAN_FALCON1024_AVX2_hash_to_point_ct(&sc, hm, 10, tmp.b); + inner_shake256_ctx_release(&sc); + + /* + * Verify signature. + */ + if (!PQCLEAN_FALCON1024_AVX2_verify_raw(hm, sig, h, 10, tmp.b)) { + return -1; + } + return 0; +} + +/* see api.h */ +int +PQCLEAN_FALCON1024_AVX2_crypto_sign_signature( + uint8_t *sig, size_t *siglen, + const uint8_t *m, size_t mlen, const uint8_t *sk) { + /* + * The PQCLEAN_FALCON1024_AVX2_CRYPTO_BYTES constant is used for + * the signed message object (as produced by PQCLEAN_FALCON1024_AVX2_crypto_sign()) + * and includes a two-byte length value, so we take care here + * to only generate signatures that are two bytes shorter than + * the maximum. This is done to ensure that PQCLEAN_FALCON1024_AVX2_crypto_sign() + * and PQCLEAN_FALCON1024_AVX2_crypto_sign_signature() produce the exact same signature + * value, if used on the same message, with the same private key, + * and using the same output from randombytes() (this is for + * reproducibility of tests). + */ + size_t vlen; + + vlen = PQCLEAN_FALCON1024_AVX2_CRYPTO_BYTES - NONCELEN - 3; + if (do_sign(sig + 1, sig + 1 + NONCELEN, &vlen, m, mlen, sk) < 0) { + return -1; + } + sig[0] = 0x30 + 10; + *siglen = 1 + NONCELEN + vlen; + return 0; +} + +/* see api.h */ +int +PQCLEAN_FALCON1024_AVX2_crypto_sign_verify( + const uint8_t *sig, size_t siglen, + const uint8_t *m, size_t mlen, const uint8_t *pk) { + if (siglen < 1 + NONCELEN) { + return -1; + } + if (sig[0] != 0x30 + 10) { + return -1; + } + return do_verify(sig + 1, + sig + 1 + NONCELEN, siglen - 1 - NONCELEN, m, mlen, pk); +} + +/* see api.h */ +int +PQCLEAN_FALCON1024_AVX2_crypto_sign( + uint8_t *sm, size_t *smlen, + const uint8_t *m, size_t mlen, const uint8_t *sk) { + uint8_t *pm, *sigbuf; + size_t sigbuflen; + + /* + * Move the message to its final location; this is a memmove() so + * it handles overlaps properly. + */ + memmove(sm + 2 + NONCELEN, m, mlen); + pm = sm + 2 + NONCELEN; + sigbuf = pm + 1 + mlen; + sigbuflen = PQCLEAN_FALCON1024_AVX2_CRYPTO_BYTES - NONCELEN - 3; + if (do_sign(sm + 2, sigbuf, &sigbuflen, pm, mlen, sk) < 0) { + return -1; + } + pm[mlen] = 0x20 + 10; + sigbuflen ++; + sm[0] = (uint8_t)(sigbuflen >> 8); + sm[1] = (uint8_t)sigbuflen; + *smlen = mlen + 2 + NONCELEN + sigbuflen; + return 0; +} + +/* see api.h */ +int +PQCLEAN_FALCON1024_AVX2_crypto_sign_open( + uint8_t *m, size_t *mlen, + const uint8_t *sm, size_t smlen, const uint8_t *pk) { + const uint8_t *sigbuf; + size_t pmlen, sigbuflen; + + if (smlen < 3 + NONCELEN) { + return -1; + } + sigbuflen = ((size_t)sm[0] << 8) | (size_t)sm[1]; + if (sigbuflen < 2 || sigbuflen > (smlen - NONCELEN - 2)) { + return -1; + } + sigbuflen --; + pmlen = smlen - NONCELEN - 3 - sigbuflen; + if (sm[2 + NONCELEN + pmlen] != 0x20 + 10) { + return -1; + } + sigbuf = sm + 2 + NONCELEN + pmlen + 1; + + /* + * The 2-byte length header and the one-byte signature header + * have been verified. Nonce is at sm+2, followed by the message + * itself. Message length is in pmlen. sigbuf/sigbuflen point to + * the signature value (excluding the header byte). + */ + if (do_verify(sm + 2, sigbuf, sigbuflen, + sm + 2 + NONCELEN, pmlen, pk) < 0) { + return -1; + } + + /* + * Signature is correct, we just have to copy/move the message + * to its final destination. The memmove() properly handles + * overlaps. + */ + memmove(m, sm + 2 + NONCELEN, pmlen); + *mlen = pmlen; + return 0; +} diff --git a/crypto_sign/falcon-1024/avx2/rng.c b/crypto_sign/falcon-1024/avx2/rng.c new file mode 100644 index 00000000..74207c52 --- /dev/null +++ b/crypto_sign/falcon-1024/avx2/rng.c @@ -0,0 +1,195 @@ +#include "inner.h" +#include +/* + * PRNG and interface to the system RNG. + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2017-2019 Falcon Project + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @author Thomas Pornin + */ + + + +/* + * Include relevant system header files. For Win32, this will also need + * linking with advapi32.dll, which we trigger with an appropriate #pragma. + */ + +/* see inner.h */ +int +PQCLEAN_FALCON1024_AVX2_get_seed(void *seed, size_t len) { + (void)seed; + if (len == 0) { + return 1; + } + return 0; +} + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_prng_init(prng *p, inner_shake256_context *src) { + inner_shake256_extract(src, p->state.d, 56); + PQCLEAN_FALCON1024_AVX2_prng_refill(p); +} + +/* + * PRNG based on ChaCha20. + * + * State consists in key (32 bytes) then IV (16 bytes) and block counter + * (8 bytes). Normally, we should not care about local endianness (this + * is for a PRNG), but for the NIST competition we need reproducible KAT + * vectors that work across architectures, so we enforce little-endian + * interpretation where applicable. Moreover, output words are "spread + * out" over the output buffer with the interleaving pattern that is + * naturally obtained from the AVX2 implementation that runs eight + * ChaCha20 instances in parallel. + * + * The block counter is XORed into the first 8 bytes of the IV. + */ +void +PQCLEAN_FALCON1024_AVX2_prng_refill(prng *p) { + + static const uint32_t CW[] = { + 0x61707865, 0x3320646e, 0x79622d32, 0x6b206574 + }; + + uint64_t cc; + size_t u; + int i; + uint32_t *sw; + union { + uint32_t w[16]; + __m256i y[2]; /* for alignment */ + } t; + __m256i state[16], init[16]; + + sw = (uint32_t *)p->state.d; + + /* + * XOR next counter values into state. + */ + cc = *(uint64_t *)(p->state.d + 48); + for (u = 0; u < 8; u ++) { + t.w[u] = (uint32_t)(cc + u); + t.w[u + 8] = (uint32_t)((cc + u) >> 32); + } + *(uint64_t *)(p->state.d + 48) = cc + 8; + + /* + * Load state. + */ + for (u = 0; u < 4; u ++) { + state[u] = init[u] = + _mm256_broadcastd_epi32(_mm_cvtsi32_si128((int32_t)CW[u])); + } + for (u = 0; u < 10; u ++) { + state[u + 4] = init[u + 4] = + _mm256_broadcastd_epi32(_mm_cvtsi32_si128((int32_t)sw[u])); + } + state[14] = init[14] = _mm256_xor_si256( + _mm256_broadcastd_epi32(_mm_cvtsi32_si128((int32_t)sw[10])), + _mm256_loadu_si256((__m256i *)&t.w[0])); + state[15] = init[15] = _mm256_xor_si256( + _mm256_broadcastd_epi32(_mm_cvtsi32_si128((int32_t)sw[11])), + _mm256_loadu_si256((__m256i *)&t.w[8])); + + /* + * Do all rounds. + */ + for (i = 0; i < 10; i ++) { + +#define QROUND(a, b, c, d) do { \ + state[a] = _mm256_add_epi32(state[a], state[b]); \ + state[d] = _mm256_xor_si256(state[d], state[a]); \ + state[d] = _mm256_or_si256( \ + _mm256_slli_epi32(state[d], 16), \ + _mm256_srli_epi32(state[d], 16)); \ + state[c] = _mm256_add_epi32(state[c], state[d]); \ + state[b] = _mm256_xor_si256(state[b], state[c]); \ + state[b] = _mm256_or_si256( \ + _mm256_slli_epi32(state[b], 12), \ + _mm256_srli_epi32(state[b], 20)); \ + state[a] = _mm256_add_epi32(state[a], state[b]); \ + state[d] = _mm256_xor_si256(state[d], state[a]); \ + state[d] = _mm256_or_si256( \ + _mm256_slli_epi32(state[d], 8), \ + _mm256_srli_epi32(state[d], 24)); \ + state[c] = _mm256_add_epi32(state[c], state[d]); \ + state[b] = _mm256_xor_si256(state[b], state[c]); \ + state[b] = _mm256_or_si256( \ + _mm256_slli_epi32(state[b], 7), \ + _mm256_srli_epi32(state[b], 25)); \ + } while (0) + + QROUND( 0, 4, 8, 12); + QROUND( 1, 5, 9, 13); + QROUND( 2, 6, 10, 14); + QROUND( 3, 7, 11, 15); + QROUND( 0, 5, 10, 15); + QROUND( 1, 6, 11, 12); + QROUND( 2, 7, 8, 13); + QROUND( 3, 4, 9, 14); + +#undef QROUND + + } + + /* + * Add initial state back and encode the result in the destination + * buffer. We can dump the AVX2 values "as is" because the non-AVX2 + * code uses a compatible order of values. + */ + for (u = 0; u < 16; u ++) { + _mm256_storeu_si256((__m256i *)&p->buf.d[u << 5], + _mm256_add_epi32(state[u], init[u])); + } + + + p->ptr = 0; +} + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_prng_get_bytes(prng *p, void *dst, size_t len) { + uint8_t *buf; + + buf = dst; + while (len > 0) { + size_t clen; + + clen = (sizeof p->buf.d) - p->ptr; + if (clen > len) { + clen = len; + } + memcpy(buf, p->buf.d, clen); + buf += clen; + len -= clen; + p->ptr += clen; + if (p->ptr == sizeof p->buf.d) { + PQCLEAN_FALCON1024_AVX2_prng_refill(p); + } + } +} diff --git a/crypto_sign/falcon-1024/avx2/sign.c b/crypto_sign/falcon-1024/avx2/sign.c new file mode 100644 index 00000000..8ef93bf8 --- /dev/null +++ b/crypto_sign/falcon-1024/avx2/sign.c @@ -0,0 +1,1312 @@ +#include "inner.h" + +/* + * Falcon signature generation. + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2017-2019 Falcon Project + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @author Thomas Pornin + */ + + +/* =================================================================== */ + +/* + * Compute degree N from logarithm 'logn'. + */ +#define MKN(logn) ((size_t)1 << (logn)) + +/* =================================================================== */ +/* + * Binary case: + * N = 2^logn + * phi = X^N+1 + */ + +/* + * Get the size of the LDL tree for an input with polynomials of size + * 2^logn. The size is expressed in the number of elements. + */ +static inline unsigned +ffLDL_treesize(unsigned logn) { + /* + * For logn = 0 (polynomials are constant), the "tree" is a + * single element. Otherwise, the tree node has size 2^logn, and + * has two child trees for size logn-1 each. Thus, treesize s() + * must fulfill these two relations: + * + * s(0) = 1 + * s(logn) = (2^logn) + 2*s(logn-1) + */ + return (logn + 1) << logn; +} + +/* + * Inner function for ffLDL_fft(). It expects the matrix to be both + * auto-adjoint and quasicyclic; also, it uses the source operands + * as modifiable temporaries. + * + * tmp[] must have room for at least one polynomial. + */ +static void +ffLDL_fft_inner(fpr *tree, + fpr *g0, fpr *g1, unsigned logn, fpr *tmp) { + size_t n, hn; + + n = MKN(logn); + if (n == 1) { + tree[0] = g0[0]; + return; + } + hn = n >> 1; + + /* + * The LDL decomposition yields L (which is written in the tree) + * and the diagonal of D. Since d00 = g0, we just write d11 + * into tmp. + */ + PQCLEAN_FALCON1024_AVX2_poly_LDLmv_fft(tmp, tree, g0, g1, g0, logn); + + /* + * Split d00 (currently in g0) and d11 (currently in tmp). We + * reuse g0 and g1 as temporary storage spaces: + * d00 splits into g1, g1+hn + * d11 splits into g0, g0+hn + */ + PQCLEAN_FALCON1024_AVX2_poly_split_fft(g1, g1 + hn, g0, logn); + PQCLEAN_FALCON1024_AVX2_poly_split_fft(g0, g0 + hn, tmp, logn); + + /* + * Each split result is the first row of a new auto-adjoint + * quasicyclic matrix for the next recursive step. + */ + ffLDL_fft_inner(tree + n, + g1, g1 + hn, logn - 1, tmp); + ffLDL_fft_inner(tree + n + ffLDL_treesize(logn - 1), + g0, g0 + hn, logn - 1, tmp); +} + +/* + * Compute the ffLDL tree of an auto-adjoint matrix G. The matrix + * is provided as three polynomials (FFT representation). + * + * The "tree" array is filled with the computed tree, of size + * (logn+1)*(2^logn) elements (see ffLDL_treesize()). + * + * Input arrays MUST NOT overlap, except possibly the three unmodified + * arrays g00, g01 and g11. tmp[] should have room for at least three + * polynomials of 2^logn elements each. + */ +static void +ffLDL_fft(fpr *tree, const fpr *g00, + const fpr *g01, const fpr *g11, + unsigned logn, fpr *tmp) { + size_t n, hn; + fpr *d00, *d11; + + n = MKN(logn); + if (n == 1) { + tree[0] = g00[0]; + return; + } + hn = n >> 1; + d00 = tmp; + d11 = tmp + n; + tmp += n << 1; + + memcpy(d00, g00, n * sizeof * g00); + PQCLEAN_FALCON1024_AVX2_poly_LDLmv_fft(d11, tree, g00, g01, g11, logn); + + PQCLEAN_FALCON1024_AVX2_poly_split_fft(tmp, tmp + hn, d00, logn); + PQCLEAN_FALCON1024_AVX2_poly_split_fft(d00, d00 + hn, d11, logn); + memcpy(d11, tmp, n * sizeof * tmp); + ffLDL_fft_inner(tree + n, + d11, d11 + hn, logn - 1, tmp); + ffLDL_fft_inner(tree + n + ffLDL_treesize(logn - 1), + d00, d00 + hn, logn - 1, tmp); +} + +/* + * Normalize an ffLDL tree: each leaf of value x is replaced with + * sigma / sqrt(x). + */ +static void +ffLDL_binary_normalize(fpr *tree, unsigned logn) { + /* + * TODO: make an iterative version. + */ + size_t n; + + n = MKN(logn); + if (n == 1) { + /* + * We actually store in the tree leaf the inverse of + * the value mandated by the specification: this + * saves a division both here and in the sampler. + */ + tree[0] = fpr_mul(fpr_sqrt(tree[0]), fpr_inv_sigma); + } else { + ffLDL_binary_normalize(tree + n, logn - 1); + ffLDL_binary_normalize(tree + n + ffLDL_treesize(logn - 1), + logn - 1); + } +} + +/* =================================================================== */ + +/* + * Convert an integer polynomial (with small values) into the + * representation with complex numbers. + */ +static void +smallints_to_fpr(fpr *r, const int8_t *t, unsigned logn) { + size_t n, u; + + n = MKN(logn); + for (u = 0; u < n; u ++) { + r[u] = fpr_of(t[u]); + } +} + +/* + * The expanded private key contains: + * - The B0 matrix (four elements) + * - The ffLDL tree + */ + +static inline size_t +skoff_b00(unsigned logn) { + (void)logn; + return 0; +} + +static inline size_t +skoff_b01(unsigned logn) { + return MKN(logn); +} + +static inline size_t +skoff_b10(unsigned logn) { + return 2 * MKN(logn); +} + +static inline size_t +skoff_b11(unsigned logn) { + return 3 * MKN(logn); +} + +static inline size_t +skoff_tree(unsigned logn) { + return 4 * MKN(logn); +} + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_expand_privkey(fpr *expanded_key, + const int8_t *f, const int8_t *g, + const int8_t *F, const int8_t *G, + unsigned logn, uint8_t *tmp) { + size_t n; + fpr *rf, *rg, *rF, *rG; + fpr *b00, *b01, *b10, *b11; + fpr *g00, *g01, *g11, *gxx; + fpr *tree; + + n = MKN(logn); + b00 = expanded_key + skoff_b00(logn); + b01 = expanded_key + skoff_b01(logn); + b10 = expanded_key + skoff_b10(logn); + b11 = expanded_key + skoff_b11(logn); + tree = expanded_key + skoff_tree(logn); + + /* + * We load the private key elements directly into the B0 matrix, + * since B0 = [[g, -f], [G, -F]]. + */ + rf = b01; + rg = b00; + rF = b11; + rG = b10; + + smallints_to_fpr(rf, f, logn); + smallints_to_fpr(rg, g, logn); + smallints_to_fpr(rF, F, logn); + smallints_to_fpr(rG, G, logn); + + /* + * Compute the FFT for the key elements, and negate f and F. + */ + PQCLEAN_FALCON1024_AVX2_FFT(rf, logn); + PQCLEAN_FALCON1024_AVX2_FFT(rg, logn); + PQCLEAN_FALCON1024_AVX2_FFT(rF, logn); + PQCLEAN_FALCON1024_AVX2_FFT(rG, logn); + PQCLEAN_FALCON1024_AVX2_poly_neg(rf, logn); + PQCLEAN_FALCON1024_AVX2_poly_neg(rF, logn); + + /* + * The Gram matrix is G = B·B*. Formulas are: + * g00 = b00*adj(b00) + b01*adj(b01) + * g01 = b00*adj(b10) + b01*adj(b11) + * g10 = b10*adj(b00) + b11*adj(b01) + * g11 = b10*adj(b10) + b11*adj(b11) + * + * For historical reasons, this implementation uses + * g00, g01 and g11 (upper triangle). + */ + g00 = (fpr *)tmp; + g01 = g00 + n; + g11 = g01 + n; + gxx = g11 + n; + + memcpy(g00, b00, n * sizeof * b00); + PQCLEAN_FALCON1024_AVX2_poly_mulselfadj_fft(g00, logn); + memcpy(gxx, b01, n * sizeof * b01); + PQCLEAN_FALCON1024_AVX2_poly_mulselfadj_fft(gxx, logn); + PQCLEAN_FALCON1024_AVX2_poly_add(g00, gxx, logn); + + memcpy(g01, b00, n * sizeof * b00); + PQCLEAN_FALCON1024_AVX2_poly_muladj_fft(g01, b10, logn); + memcpy(gxx, b01, n * sizeof * b01); + PQCLEAN_FALCON1024_AVX2_poly_muladj_fft(gxx, b11, logn); + PQCLEAN_FALCON1024_AVX2_poly_add(g01, gxx, logn); + + memcpy(g11, b10, n * sizeof * b10); + PQCLEAN_FALCON1024_AVX2_poly_mulselfadj_fft(g11, logn); + memcpy(gxx, b11, n * sizeof * b11); + PQCLEAN_FALCON1024_AVX2_poly_mulselfadj_fft(gxx, logn); + PQCLEAN_FALCON1024_AVX2_poly_add(g11, gxx, logn); + + /* + * Compute the Falcon tree. + */ + ffLDL_fft(tree, g00, g01, g11, logn, gxx); + + /* + * Normalize tree. + */ + ffLDL_binary_normalize(tree, logn); +} + +typedef int (*samplerZ)(void *ctx, fpr mu, fpr sigma); + +/* + * Perform Fast Fourier Sampling for target vector t. The Gram matrix + * is provided (G = [[g00, g01], [adj(g01), g11]]). The sampled vector + * is written over (t0,t1). The Gram matrix is modified as well. The + * tmp[] buffer must have room for four polynomials. + */ +static void +ffSampling_fft_dyntree(samplerZ samp, void *samp_ctx, + fpr *t0, fpr *t1, + fpr *g00, fpr *g01, fpr *g11, + unsigned logn, fpr *tmp) { + size_t n, hn; + fpr *z0, *z1; + + /* + * Deepest level: the LDL tree leaf value is just g00 (the + * array has length only 1 at this point); we normalize it + * with regards to sigma, then use it for sampling. + */ + if (logn == 0) { + fpr leaf; + + leaf = g00[0]; + leaf = fpr_mul(fpr_sqrt(leaf), fpr_inv_sigma); + t0[0] = fpr_of(samp(samp_ctx, t0[0], leaf)); + t1[0] = fpr_of(samp(samp_ctx, t1[0], leaf)); + return; + } + + n = (size_t)1 << logn; + hn = n >> 1; + + /* + * Decompose G into LDL. We only need d00 (identical to g00), + * d11, and l10; we do that in place. + */ + PQCLEAN_FALCON1024_AVX2_poly_LDL_fft(g00, g01, g11, logn); + + /* + * Split d00 and d11 and expand them into half-size quasi-cyclic + * Gram matrices. We also save l10 in tmp[]. + */ + PQCLEAN_FALCON1024_AVX2_poly_split_fft(tmp, tmp + hn, g00, logn); + memcpy(g00, tmp, n * sizeof * tmp); + PQCLEAN_FALCON1024_AVX2_poly_split_fft(tmp, tmp + hn, g11, logn); + memcpy(g11, tmp, n * sizeof * tmp); + memcpy(tmp, g01, n * sizeof * g01); + memcpy(g01, g00, hn * sizeof * g00); + memcpy(g01 + hn, g11, hn * sizeof * g00); + + /* + * The half-size Gram matrices for the recursive LDL tree + * building are now: + * - left sub-tree: g00, g00+hn, g01 + * - right sub-tree: g11, g11+hn, g01+hn + * l10 is in tmp[]. + */ + + /* + * We split t1 and use the first recursive call on the two + * halves, using the right sub-tree. The result is merged + * back into tmp + 2*n. + */ + z1 = tmp + n; + PQCLEAN_FALCON1024_AVX2_poly_split_fft(z1, z1 + hn, t1, logn); + ffSampling_fft_dyntree(samp, samp_ctx, z1, z1 + hn, + g11, g11 + hn, g01 + hn, logn - 1, z1 + n); + PQCLEAN_FALCON1024_AVX2_poly_merge_fft(tmp + (n << 1), z1, z1 + hn, logn); + + /* + * Compute tb0 = t0 + (t1 - z1) * l10. + * At that point, l10 is in tmp, t1 is unmodified, and z1 is + * in tmp + (n << 1). The buffer in z1 is free. + * + * In the end, z1 is written over t1, and tb0 is in t0. + */ + memcpy(z1, t1, n * sizeof * t1); + PQCLEAN_FALCON1024_AVX2_poly_sub(z1, tmp + (n << 1), logn); + memcpy(t1, tmp + (n << 1), n * sizeof * tmp); + PQCLEAN_FALCON1024_AVX2_poly_mul_fft(tmp, z1, logn); + PQCLEAN_FALCON1024_AVX2_poly_add(t0, tmp, logn); + + /* + * Second recursive invocation, on the split tb0 (currently in t0) + * and the left sub-tree. + */ + z0 = tmp; + PQCLEAN_FALCON1024_AVX2_poly_split_fft(z0, z0 + hn, t0, logn); + ffSampling_fft_dyntree(samp, samp_ctx, z0, z0 + hn, + g00, g00 + hn, g01, logn - 1, z0 + n); + PQCLEAN_FALCON1024_AVX2_poly_merge_fft(t0, z0, z0 + hn, logn); +} + +/* + * Perform Fast Fourier Sampling for target vector t and LDL tree T. + * tmp[] must have size for at least two polynomials of size 2^logn. + */ +static void +ffSampling_fft(samplerZ samp, void *samp_ctx, + fpr *z0, fpr *z1, + const fpr *tree, + const fpr *t0, const fpr *t1, unsigned logn, + fpr *tmp) { + size_t n, hn; + const fpr *tree0, *tree1; + + /* + * When logn == 2, we inline the last two recursion levels. + */ + if (logn == 2) { + fpr w0, w1, w2, w3, sigma; + __m128d ww0, ww1, wa, wb, wc, wd; + __m128d wy0, wy1, wz0, wz1; + __m128d half, invsqrt8, invsqrt2, neghi, neglo; + int si0, si1, si2, si3; + + tree0 = tree + 4; + tree1 = tree + 8; + + half = _mm_set1_pd(0.5); + invsqrt8 = _mm_set1_pd(0.353553390593273762200422181052); + invsqrt2 = _mm_set1_pd(0.707106781186547524400844362105); + neghi = _mm_set_pd(-0.0, 0.0); + neglo = _mm_set_pd(0.0, -0.0); + + /* + * We split t1 into w*, then do the recursive invocation, + * with output in w*. We finally merge back into z1. + */ + ww0 = _mm_loadu_pd(&t1[0].v); + ww1 = _mm_loadu_pd(&t1[2].v); + wa = _mm_unpacklo_pd(ww0, ww1); + wb = _mm_unpackhi_pd(ww0, ww1); + wc = _mm_add_pd(wa, wb); + ww0 = _mm_mul_pd(wc, half); + wc = _mm_sub_pd(wa, wb); + wd = _mm_xor_pd(_mm_permute_pd(wc, 1), neghi); + ww1 = _mm_mul_pd(_mm_add_pd(wc, wd), invsqrt8); + + w2.v = _mm_cvtsd_f64(ww1); + w3.v = _mm_cvtsd_f64(_mm_permute_pd(ww1, 1)); + wa = ww1; + sigma = tree1[3]; + si2 = samp(samp_ctx, w2, sigma); + si3 = samp(samp_ctx, w3, sigma); + ww1 = _mm_set_pd((double)si3, (double)si2); + wa = _mm_sub_pd(wa, ww1); + wb = _mm_loadu_pd(&tree1[0].v); + wc = _mm_mul_pd(wa, wb); + wd = _mm_mul_pd(wa, _mm_permute_pd(wb, 1)); + wa = _mm_unpacklo_pd(wc, wd); + wb = _mm_unpackhi_pd(wc, wd); + ww0 = _mm_add_pd(ww0, _mm_add_pd(wa, _mm_xor_pd(wb, neglo))); + w0.v = _mm_cvtsd_f64(ww0); + w1.v = _mm_cvtsd_f64(_mm_permute_pd(ww0, 1)); + sigma = tree1[2]; + si0 = samp(samp_ctx, w0, sigma); + si1 = samp(samp_ctx, w1, sigma); + ww0 = _mm_set_pd((double)si1, (double)si0); + + wc = _mm_mul_pd( + _mm_set_pd((double)(si2 + si3), (double)(si2 - si3)), + invsqrt2); + wa = _mm_add_pd(ww0, wc); + wb = _mm_sub_pd(ww0, wc); + ww0 = _mm_unpacklo_pd(wa, wb); + ww1 = _mm_unpackhi_pd(wa, wb); + _mm_storeu_pd(&z1[0].v, ww0); + _mm_storeu_pd(&z1[2].v, ww1); + + /* + * Compute tb0 = t0 + (t1 - z1) * L. Value tb0 ends up in w*. + */ + wy0 = _mm_sub_pd(_mm_loadu_pd(&t1[0].v), ww0); + wy1 = _mm_sub_pd(_mm_loadu_pd(&t1[2].v), ww1); + wz0 = _mm_loadu_pd(&tree[0].v); + wz1 = _mm_loadu_pd(&tree[2].v); + ww0 = _mm_sub_pd(_mm_mul_pd(wy0, wz0), _mm_mul_pd(wy1, wz1)); + ww1 = _mm_add_pd(_mm_mul_pd(wy0, wz1), _mm_mul_pd(wy1, wz0)); + ww0 = _mm_add_pd(ww0, _mm_loadu_pd(&t0[0].v)); + ww1 = _mm_add_pd(ww1, _mm_loadu_pd(&t0[2].v)); + + /* + * Second recursive invocation. + */ + wa = _mm_unpacklo_pd(ww0, ww1); + wb = _mm_unpackhi_pd(ww0, ww1); + wc = _mm_add_pd(wa, wb); + ww0 = _mm_mul_pd(wc, half); + wc = _mm_sub_pd(wa, wb); + wd = _mm_xor_pd(_mm_permute_pd(wc, 1), neghi); + ww1 = _mm_mul_pd(_mm_add_pd(wc, wd), invsqrt8); + + w2.v = _mm_cvtsd_f64(ww1); + w3.v = _mm_cvtsd_f64(_mm_permute_pd(ww1, 1)); + wa = ww1; + sigma = tree0[3]; + si2 = samp(samp_ctx, w2, sigma); + si3 = samp(samp_ctx, w3, sigma); + ww1 = _mm_set_pd((double)si3, (double)si2); + wa = _mm_sub_pd(wa, ww1); + wb = _mm_loadu_pd(&tree0[0].v); + wc = _mm_mul_pd(wa, wb); + wd = _mm_mul_pd(wa, _mm_permute_pd(wb, 1)); + wa = _mm_unpacklo_pd(wc, wd); + wb = _mm_unpackhi_pd(wc, wd); + ww0 = _mm_add_pd(ww0, _mm_add_pd(wa, _mm_xor_pd(wb, neglo))); + w0.v = _mm_cvtsd_f64(ww0); + w1.v = _mm_cvtsd_f64(_mm_permute_pd(ww0, 1)); + sigma = tree0[2]; + si0 = samp(samp_ctx, w0, sigma); + si1 = samp(samp_ctx, w1, sigma); + ww0 = _mm_set_pd((double)si1, (double)si0); + + wc = _mm_mul_pd( + _mm_set_pd((double)(si2 + si3), (double)(si2 - si3)), + invsqrt2); + wa = _mm_add_pd(ww0, wc); + wb = _mm_sub_pd(ww0, wc); + ww0 = _mm_unpacklo_pd(wa, wb); + ww1 = _mm_unpackhi_pd(wa, wb); + _mm_storeu_pd(&z0[0].v, ww0); + _mm_storeu_pd(&z0[2].v, ww1); + + return; + } + + /* + * Case logn == 1 is reachable only when using Falcon-2 (the + * smallest size for which Falcon is mathematically defined, but + * of course way too insecure to be of any use). + */ + if (logn == 1) { + fpr x0, x1, y0, y1, sigma; + fpr a_re, a_im, b_re, b_im, c_re, c_im; + + x0 = t1[0]; + x1 = t1[1]; + sigma = tree[3]; + z1[0] = y0 = fpr_of(samp(samp_ctx, x0, sigma)); + z1[1] = y1 = fpr_of(samp(samp_ctx, x1, sigma)); + a_re = fpr_sub(x0, y0); + a_im = fpr_sub(x1, y1); + b_re = tree[0]; + b_im = tree[1]; + c_re = fpr_sub(fpr_mul(a_re, b_re), fpr_mul(a_im, b_im)); + c_im = fpr_add(fpr_mul(a_re, b_im), fpr_mul(a_im, b_re)); + x0 = fpr_add(c_re, t0[0]); + x1 = fpr_add(c_im, t0[1]); + sigma = tree[2]; + z0[0] = fpr_of(samp(samp_ctx, x0, sigma)); + z0[1] = fpr_of(samp(samp_ctx, x1, sigma)); + + return; + } + + /* + * Normal end of recursion is for logn == 0. Since the last + * steps of the recursions were inlined in the blocks above + * (when logn == 1 or 2), this case is not reachable, and is + * retained here only for documentation purposes. + + if (logn == 0) { + fpr x0, x1, sigma; + + x0 = t0[0]; + x1 = t1[0]; + sigma = tree[0]; + z0[0] = fpr_of(samp(samp_ctx, x0, sigma)); + z1[0] = fpr_of(samp(samp_ctx, x1, sigma)); + return; + } + + */ + + /* + * General recursive case (logn >= 3). + */ + + n = (size_t)1 << logn; + hn = n >> 1; + tree0 = tree + n; + tree1 = tree + n + ffLDL_treesize(logn - 1); + + /* + * We split t1 into z1 (reused as temporary storage), then do + * the recursive invocation, with output in tmp. We finally + * merge back into z1. + */ + PQCLEAN_FALCON1024_AVX2_poly_split_fft(z1, z1 + hn, t1, logn); + ffSampling_fft(samp, samp_ctx, tmp, tmp + hn, + tree1, z1, z1 + hn, logn - 1, tmp + n); + PQCLEAN_FALCON1024_AVX2_poly_merge_fft(z1, tmp, tmp + hn, logn); + + /* + * Compute tb0 = t0 + (t1 - z1) * L. Value tb0 ends up in tmp[]. + */ + memcpy(tmp, t1, n * sizeof * t1); + PQCLEAN_FALCON1024_AVX2_poly_sub(tmp, z1, logn); + PQCLEAN_FALCON1024_AVX2_poly_mul_fft(tmp, tree, logn); + PQCLEAN_FALCON1024_AVX2_poly_add(tmp, t0, logn); + + /* + * Second recursive invocation. + */ + PQCLEAN_FALCON1024_AVX2_poly_split_fft(z0, z0 + hn, tmp, logn); + ffSampling_fft(samp, samp_ctx, tmp, tmp + hn, + tree0, z0, z0 + hn, logn - 1, tmp + n); + PQCLEAN_FALCON1024_AVX2_poly_merge_fft(z0, tmp, tmp + hn, logn); +} + +/* + * Compute a signature: the signature contains two vectors, s1 and s2. + * The s1 vector is not returned. The squared norm of (s1,s2) is + * computed, and if it is short enough, then s2 is returned into the + * s2[] buffer, and 1 is returned; otherwise, s2[] is untouched and 0 is + * returned; the caller should then try again. This function uses an + * expanded key. + * + * tmp[] must have room for at least six polynomials. + */ +static int +do_sign_tree(samplerZ samp, void *samp_ctx, int16_t *s2, + const fpr *expanded_key, + const uint16_t *hm, + unsigned logn, fpr *tmp) { + size_t n, u; + fpr *t0, *t1, *tx, *ty; + const fpr *b00, *b01, *b10, *b11, *tree; + fpr ni; + uint32_t sqn, ng; + int16_t *s1tmp, *s2tmp; + + n = MKN(logn); + t0 = tmp; + t1 = t0 + n; + b00 = expanded_key + skoff_b00(logn); + b01 = expanded_key + skoff_b01(logn); + b10 = expanded_key + skoff_b10(logn); + b11 = expanded_key + skoff_b11(logn); + tree = expanded_key + skoff_tree(logn); + + /* + * Set the target vector to [hm, 0] (hm is the hashed message). + */ + for (u = 0; u < n; u ++) { + t0[u] = fpr_of(hm[u]); + /* This is implicit. + t1[u] = fpr_zero; + */ + } + + /* + * Apply the lattice basis to obtain the real target + * vector (after normalization with regards to modulus). + */ + PQCLEAN_FALCON1024_AVX2_FFT(t0, logn); + ni = fpr_inverse_of_q; + memcpy(t1, t0, n * sizeof * t0); + PQCLEAN_FALCON1024_AVX2_poly_mul_fft(t1, b01, logn); + PQCLEAN_FALCON1024_AVX2_poly_mulconst(t1, fpr_neg(ni), logn); + PQCLEAN_FALCON1024_AVX2_poly_mul_fft(t0, b11, logn); + PQCLEAN_FALCON1024_AVX2_poly_mulconst(t0, ni, logn); + + tx = t1 + n; + ty = tx + n; + + /* + * Apply sampling. Output is written back in [tx, ty]. + */ + ffSampling_fft(samp, samp_ctx, tx, ty, tree, t0, t1, logn, ty + n); + + /* + * Get the lattice point corresponding to that tiny vector. + */ + memcpy(t0, tx, n * sizeof * tx); + memcpy(t1, ty, n * sizeof * ty); + PQCLEAN_FALCON1024_AVX2_poly_mul_fft(tx, b00, logn); + PQCLEAN_FALCON1024_AVX2_poly_mul_fft(ty, b10, logn); + PQCLEAN_FALCON1024_AVX2_poly_add(tx, ty, logn); + memcpy(ty, t0, n * sizeof * t0); + PQCLEAN_FALCON1024_AVX2_poly_mul_fft(ty, b01, logn); + + memcpy(t0, tx, n * sizeof * tx); + PQCLEAN_FALCON1024_AVX2_poly_mul_fft(t1, b11, logn); + PQCLEAN_FALCON1024_AVX2_poly_add(t1, ty, logn); + + PQCLEAN_FALCON1024_AVX2_iFFT(t0, logn); + PQCLEAN_FALCON1024_AVX2_iFFT(t1, logn); + + /* + * Compute the signature. + */ + s1tmp = (int16_t *)tx; + sqn = 0; + ng = 0; + for (u = 0; u < n; u ++) { + int32_t z; + + z = (int32_t)hm[u] - (int32_t)fpr_rint(t0[u]); + sqn += (uint32_t)(z * z); + ng |= sqn; + s1tmp[u] = (int16_t)z; + } + sqn |= -(ng >> 31); + + /* + * With "normal" degrees (e.g. 512 or 1024), it is very + * improbable that the computed vector is not short enough; + * however, it may happen in practice for the very reduced + * versions (e.g. degree 16 or below). In that case, the caller + * will loop, and we must not write anything into s2[] because + * s2[] may overlap with the hashed message hm[] and we need + * hm[] for the next iteration. + */ + s2tmp = (int16_t *)tmp; + for (u = 0; u < n; u ++) { + s2tmp[u] = (int16_t) - fpr_rint(t1[u]); + } + if (PQCLEAN_FALCON1024_AVX2_is_short_half(sqn, s2tmp, logn)) { + memcpy(s2, s2tmp, n * sizeof * s2); + memcpy(tmp, s1tmp, n * sizeof * s1tmp); + return 1; + } + return 0; +} + +/* + * Compute a signature: the signature contains two vectors, s1 and s2. + * The s1 vector is not returned. The squared norm of (s1,s2) is + * computed, and if it is short enough, then s2 is returned into the + * s2[] buffer, and 1 is returned; otherwise, s2[] is untouched and 0 is + * returned; the caller should then try again. + * + * tmp[] must have room for at least nine polynomials. + */ +static int +do_sign_dyn(samplerZ samp, void *samp_ctx, int16_t *s2, + const int8_t *f, const int8_t *g, + const int8_t *F, const int8_t *G, + const uint16_t *hm, unsigned logn, fpr *tmp) { + size_t n, u; + fpr *t0, *t1, *tx, *ty; + fpr *b00, *b01, *b10, *b11, *g00, *g01, *g11; + fpr ni; + uint32_t sqn, ng; + int16_t *s1tmp, *s2tmp; + + n = MKN(logn); + + /* + * Lattice basis is B = [[g, -f], [G, -F]]. We convert it to FFT. + */ + b00 = tmp; + b01 = b00 + n; + b10 = b01 + n; + b11 = b10 + n; + smallints_to_fpr(b01, f, logn); + smallints_to_fpr(b00, g, logn); + smallints_to_fpr(b11, F, logn); + smallints_to_fpr(b10, G, logn); + PQCLEAN_FALCON1024_AVX2_FFT(b01, logn); + PQCLEAN_FALCON1024_AVX2_FFT(b00, logn); + PQCLEAN_FALCON1024_AVX2_FFT(b11, logn); + PQCLEAN_FALCON1024_AVX2_FFT(b10, logn); + PQCLEAN_FALCON1024_AVX2_poly_neg(b01, logn); + PQCLEAN_FALCON1024_AVX2_poly_neg(b11, logn); + + /* + * Compute the Gram matrix G = B·B*. Formulas are: + * g00 = b00*adj(b00) + b01*adj(b01) + * g01 = b00*adj(b10) + b01*adj(b11) + * g10 = b10*adj(b00) + b11*adj(b01) + * g11 = b10*adj(b10) + b11*adj(b11) + * + * For historical reasons, this implementation uses + * g00, g01 and g11 (upper triangle). g10 is not kept + * since it is equal to adj(g01). + * + * We _replace_ the matrix B with the Gram matrix, but we + * must keep b01 and b11 for computing the target vector. + */ + t0 = b11 + n; + t1 = t0 + n; + + memcpy(t0, b01, n * sizeof * b01); + PQCLEAN_FALCON1024_AVX2_poly_mulselfadj_fft(t0, logn); // t0 <- b01*adj(b01) + + memcpy(t1, b00, n * sizeof * b00); + PQCLEAN_FALCON1024_AVX2_poly_muladj_fft(t1, b10, logn); // t1 <- b00*adj(b10) + PQCLEAN_FALCON1024_AVX2_poly_mulselfadj_fft(b00, logn); // b00 <- b00*adj(b00) + PQCLEAN_FALCON1024_AVX2_poly_add(b00, t0, logn); // b00 <- g00 + memcpy(t0, b01, n * sizeof * b01); + PQCLEAN_FALCON1024_AVX2_poly_muladj_fft(b01, b11, logn); // b01 <- b01*adj(b11) + PQCLEAN_FALCON1024_AVX2_poly_add(b01, t1, logn); // b01 <- g01 + + PQCLEAN_FALCON1024_AVX2_poly_mulselfadj_fft(b10, logn); // b10 <- b10*adj(b10) + memcpy(t1, b11, n * sizeof * b11); + PQCLEAN_FALCON1024_AVX2_poly_mulselfadj_fft(t1, logn); // t1 <- b11*adj(b11) + PQCLEAN_FALCON1024_AVX2_poly_add(b10, t1, logn); // b10 <- g11 + + /* + * We rename variables to make things clearer. The three elements + * of the Gram matrix uses the first 3*n slots of tmp[], followed + * by b11 and b01 (in that order). + */ + g00 = b00; + g01 = b01; + g11 = b10; + b01 = t0; + t0 = b01 + n; + t1 = t0 + n; + + /* + * Memory layout at that point: + * g00 g01 g11 b11 b01 t0 t1 + */ + + /* + * Set the target vector to [hm, 0] (hm is the hashed message). + */ + for (u = 0; u < n; u ++) { + t0[u] = fpr_of(hm[u]); + /* This is implicit. + t1[u] = fpr_zero; + */ + } + + /* + * Apply the lattice basis to obtain the real target + * vector (after normalization with regards to modulus). + */ + PQCLEAN_FALCON1024_AVX2_FFT(t0, logn); + ni = fpr_inverse_of_q; + memcpy(t1, t0, n * sizeof * t0); + PQCLEAN_FALCON1024_AVX2_poly_mul_fft(t1, b01, logn); + PQCLEAN_FALCON1024_AVX2_poly_mulconst(t1, fpr_neg(ni), logn); + PQCLEAN_FALCON1024_AVX2_poly_mul_fft(t0, b11, logn); + PQCLEAN_FALCON1024_AVX2_poly_mulconst(t0, ni, logn); + + /* + * b01 and b11 can be discarded, so we move back (t0,t1). + * Memory layout is now: + * g00 g01 g11 t0 t1 + */ + memcpy(b11, t0, n * 2 * sizeof * t0); + t0 = g11 + n; + t1 = t0 + n; + + /* + * Apply sampling; result is written over (t0,t1). + */ + ffSampling_fft_dyntree(samp, samp_ctx, + t0, t1, g00, g01, g11, logn, t1 + n); + + /* + * We arrange the layout back to: + * b00 b01 b10 b11 t0 t1 + * + * We did not conserve the matrix basis, so we must recompute + * it now. + */ + b00 = tmp; + b01 = b00 + n; + b10 = b01 + n; + b11 = b10 + n; + memmove(b11 + n, t0, n * 2 * sizeof * t0); + t0 = b11 + n; + t1 = t0 + n; + smallints_to_fpr(b01, f, logn); + smallints_to_fpr(b00, g, logn); + smallints_to_fpr(b11, F, logn); + smallints_to_fpr(b10, G, logn); + PQCLEAN_FALCON1024_AVX2_FFT(b01, logn); + PQCLEAN_FALCON1024_AVX2_FFT(b00, logn); + PQCLEAN_FALCON1024_AVX2_FFT(b11, logn); + PQCLEAN_FALCON1024_AVX2_FFT(b10, logn); + PQCLEAN_FALCON1024_AVX2_poly_neg(b01, logn); + PQCLEAN_FALCON1024_AVX2_poly_neg(b11, logn); + tx = t1 + n; + ty = tx + n; + + /* + * Get the lattice point corresponding to that tiny vector. + */ + memcpy(tx, t0, n * sizeof * t0); + memcpy(ty, t1, n * sizeof * t1); + PQCLEAN_FALCON1024_AVX2_poly_mul_fft(tx, b00, logn); + PQCLEAN_FALCON1024_AVX2_poly_mul_fft(ty, b10, logn); + PQCLEAN_FALCON1024_AVX2_poly_add(tx, ty, logn); + memcpy(ty, t0, n * sizeof * t0); + PQCLEAN_FALCON1024_AVX2_poly_mul_fft(ty, b01, logn); + + memcpy(t0, tx, n * sizeof * tx); + PQCLEAN_FALCON1024_AVX2_poly_mul_fft(t1, b11, logn); + PQCLEAN_FALCON1024_AVX2_poly_add(t1, ty, logn); + PQCLEAN_FALCON1024_AVX2_iFFT(t0, logn); + PQCLEAN_FALCON1024_AVX2_iFFT(t1, logn); + + s1tmp = (int16_t *)tx; + sqn = 0; + ng = 0; + for (u = 0; u < n; u ++) { + int32_t z; + + z = (int32_t)hm[u] - (int32_t)fpr_rint(t0[u]); + sqn += (uint32_t)(z * z); + ng |= sqn; + s1tmp[u] = (int16_t)z; + } + sqn |= -(ng >> 31); + + /* + * With "normal" degrees (e.g. 512 or 1024), it is very + * improbable that the computed vector is not short enough; + * however, it may happen in practice for the very reduced + * versions (e.g. degree 16 or below). In that case, the caller + * will loop, and we must not write anything into s2[] because + * s2[] may overlap with the hashed message hm[] and we need + * hm[] for the next iteration. + */ + s2tmp = (int16_t *)tmp; + for (u = 0; u < n; u ++) { + s2tmp[u] = (int16_t) - fpr_rint(t1[u]); + } + if (PQCLEAN_FALCON1024_AVX2_is_short_half(sqn, s2tmp, logn)) { + memcpy(s2, s2tmp, n * sizeof * s2); + memcpy(tmp, s1tmp, n * sizeof * s1tmp); + return 1; + } + return 0; +} + +/* + * Sample an integer value along a half-gaussian distribution centered + * on zero and standard deviation 1.8205, with a precision of 72 bits. + */ +int +PQCLEAN_FALCON1024_AVX2_gaussian0_sampler(prng *p) { + + /* + * High words. + */ + static const union { + uint16_t u16[16]; + __m256i ymm[1]; + } rhi15 = { + { + 0x51FB, 0x2A69, 0x113E, 0x0568, + 0x014A, 0x003B, 0x0008, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000 + } + }; + + static const union { + uint64_t u64[20]; + __m256i ymm[5]; + } rlo57 = { + { + 0x1F42ED3AC391802, 0x12B181F3F7DDB82, + 0x1CDD0934829C1FF, 0x1754377C7994AE4, + 0x1846CAEF33F1F6F, 0x14AC754ED74BD5F, + 0x024DD542B776AE4, 0x1A1FFDC65AD63DA, + 0x01F80D88A7B6428, 0x001C3FDB2040C69, + 0x00012CF24D031FB, 0x00000949F8B091F, + 0x0000003665DA998, 0x00000000EBF6EBB, + 0x0000000002F5D7E, 0x000000000007098, + 0x0000000000000C6, 0x000000000000001, + 0x000000000000000, 0x000000000000000 + } + }; + + uint64_t lo; + unsigned hi; + __m256i xhi, rhi, gthi, eqhi, eqm; + __m256i xlo, gtlo0, gtlo1, gtlo2, gtlo3, gtlo4; + __m128i t, zt; + int r; + + /* + * Get a 72-bit random value and split it into a low part + * (57 bits) and a high part (15 bits) + */ + lo = prng_get_u64(p); + hi = prng_get_u8(p); + hi = (hi << 7) | (unsigned)(lo >> 57); + lo &= 0x1FFFFFFFFFFFFFF; + + /* + * Broadcast the high part and compare it with the relevant + * values. We need both a "greater than" and an "equal" + * comparisons. + */ + xhi = _mm256_broadcastw_epi16(_mm_cvtsi32_si128((int32_t)hi)); + rhi = _mm256_loadu_si256(&rhi15.ymm[0]); + gthi = _mm256_cmpgt_epi16(rhi, xhi); + eqhi = _mm256_cmpeq_epi16(rhi, xhi); + + /* + * The result is the number of 72-bit values (among the list of 19) + * which are greater than the 72-bit random value. We first count + * all non-zero 16-bit elements in the first eight of gthi. Such + * elements have value -1 or 0, so we first negate them. + */ + t = _mm_srli_epi16(_mm256_castsi256_si128(gthi), 15); + zt = _mm_setzero_si128(); + t = _mm_hadd_epi16(t, zt); + t = _mm_hadd_epi16(t, zt); + t = _mm_hadd_epi16(t, zt); + r = _mm_cvtsi128_si32(t); + + /* + * We must look at the low bits for all values for which the + * high bits are an "equal" match; values 8-18 all have the + * same high bits (0). + * On 32-bit systems, 'lo' really is two registers, requiring + * some extra code. + */ + xlo = _mm256_broadcastq_epi64(_mm_cvtsi64_si128(*(int64_t *)&lo)); + gtlo0 = _mm256_cmpgt_epi64(_mm256_loadu_si256(&rlo57.ymm[0]), xlo); + gtlo1 = _mm256_cmpgt_epi64(_mm256_loadu_si256(&rlo57.ymm[1]), xlo); + gtlo2 = _mm256_cmpgt_epi64(_mm256_loadu_si256(&rlo57.ymm[2]), xlo); + gtlo3 = _mm256_cmpgt_epi64(_mm256_loadu_si256(&rlo57.ymm[3]), xlo); + gtlo4 = _mm256_cmpgt_epi64(_mm256_loadu_si256(&rlo57.ymm[4]), xlo); + + /* + * Keep only comparison results that correspond to the non-zero + * elements in eqhi. + */ + gtlo0 = _mm256_and_si256(gtlo0, _mm256_cvtepi16_epi64( + _mm256_castsi256_si128(eqhi))); + gtlo1 = _mm256_and_si256(gtlo1, _mm256_cvtepi16_epi64( + _mm256_castsi256_si128(_mm256_bsrli_epi128(eqhi, 8)))); + eqm = _mm256_permute4x64_epi64(eqhi, 0xFF); + gtlo2 = _mm256_and_si256(gtlo2, eqm); + gtlo3 = _mm256_and_si256(gtlo3, eqm); + gtlo4 = _mm256_and_si256(gtlo4, eqm); + + /* + * Add all values to count the total number of "-1" elements. + * Since the first eight "high" words are all different, only + * one element (at most) in gtlo0:gtlo1 can be non-zero; however, + * if the high word of the random value is zero, then many + * elements of gtlo2:gtlo3:gtlo4 can be non-zero. + */ + gtlo0 = _mm256_or_si256(gtlo0, gtlo1); + gtlo0 = _mm256_add_epi64( + _mm256_add_epi64(gtlo0, gtlo2), + _mm256_add_epi64(gtlo3, gtlo4)); + t = _mm_add_epi64( + _mm256_castsi256_si128(gtlo0), + _mm256_extracti128_si256(gtlo0, 1)); + t = _mm_add_epi64(t, _mm_srli_si128(t, 8)); + r -= _mm_cvtsi128_si32(t); + + return r; + +} + +/* + * Sample a bit with probability exp(-x) for some x >= 0. + */ +static int +BerExp(prng *p, fpr x, fpr ccs) { + int s, i; + fpr r; + uint32_t sw, w; + uint64_t z; + + /* + * Reduce x modulo log(2): x = s*log(2) + r, with s an integer, + * and 0 <= r < log(2). Since x >= 0, we can use fpr_trunc(). + */ + s = (int)fpr_trunc(fpr_mul(x, fpr_inv_log2)); + r = fpr_sub(x, fpr_mul(fpr_of(s), fpr_log2)); + + /* + * It may happen (quite rarely) that s >= 64; if sigma = 1.2 + * (the minimum value for sigma), r = 0 and b = 1, then we get + * s >= 64 if the half-Gaussian produced a z >= 13, which happens + * with probability about 0.000000000230383991, which is + * approximatively equal to 2^(-32). In any case, if s >= 64, + * then BerExp will be non-zero with probability less than + * 2^(-64), so we can simply saturate s at 63. + */ + sw = (uint32_t)s; + sw ^= (sw ^ 63) & -((63 - sw) >> 31); + s = (int)sw; + + /* + * Compute exp(-r); we know that 0 <= r < log(2) at this point, so + * we can use fpr_expm_p63(), which yields a result scaled to 2^63. + * We scale it up to 2^64, then right-shift it by s bits because + * we really want exp(-x) = 2^(-s)*exp(-r). + * + * The "-1" operation makes sure that the value fits on 64 bits + * (i.e. if r = 0, we may get 2^64, and we prefer 2^64-1 in that + * case). The bias is negligible since fpr_expm_p63() only computes + * with 51 bits of precision or so. + */ + z = ((fpr_expm_p63(r, ccs) << 1) - 1) >> s; + + /* + * Sample a bit with probability exp(-x). Since x = s*log(2) + r, + * exp(-x) = 2^-s * exp(-r), we compare lazily exp(-x) with the + * PRNG output to limit its consumption, the sign of the difference + * yields the expected result. + */ + i = 64; + do { + i -= 8; + w = prng_get_u8(p) - ((uint32_t)(z >> i) & 0xFF); + } while (!w && i > 0); + return (int)(w >> 31); +} + +/* + * The sampler produces a random integer that follows a discrete Gaussian + * distribution, centered on mu, and with standard deviation sigma. The + * provided parameter isigma is equal to 1/sigma. + * + * The value of sigma MUST lie between 1 and 2 (i.e. isigma lies between + * 0.5 and 1); in Falcon, sigma should always be between 1.2 and 1.9. + */ +int +PQCLEAN_FALCON1024_AVX2_sampler(void *ctx, fpr mu, fpr isigma) { + sampler_context *spc; + int s, z0, z, b; + fpr r, dss, ccs, x; + + spc = ctx; + + /* + * Center is mu. We compute mu = s + r where s is an integer + * and 0 <= r < 1. + */ + s = (int)fpr_floor(mu); + r = fpr_sub(mu, fpr_of(s)); + + /* + * dss = 1/(2*sigma^2) = 0.5*(isigma^2). + */ + dss = fpr_half(fpr_sqr(isigma)); + + /* + * ccs = sigma_min / sigma = sigma_min * isigma. + */ + ccs = fpr_mul(isigma, spc->sigma_min); + + /* + * We now need to sample on center r. + */ + for (;;) { + /* + * Sample z for a Gaussian distribution. Then get a + * random bit b to turn the sampling into a bimodal + * distribution: if b = 1, we use z+1, otherwise we + * use -z. We thus have two situations: + * + * - b = 1: z >= 1 and sampled against a Gaussian + * centered on 1. + * - b = 0: z <= 0 and sampled against a Gaussian + * centered on 0. + */ + z0 = PQCLEAN_FALCON1024_AVX2_gaussian0_sampler(&spc->p); + b = (int)prng_get_u8(&spc->p) & 1; + z = b + ((b << 1) - 1) * z0; + + /* + * Rejection sampling. We want a Gaussian centered on r; + * but we sampled against a Gaussian centered on b (0 or + * 1). But we know that z is always in the range where + * our sampling distribution is greater than the Gaussian + * distribution, so rejection works. + * + * We got z with distribution: + * G(z) = exp(-((z-b)^2)/(2*sigma0^2)) + * We target distribution: + * S(z) = exp(-((z-r)^2)/(2*sigma^2)) + * Rejection sampling works by keeping the value z with + * probability S(z)/G(z), and starting again otherwise. + * This requires S(z) <= G(z), which is the case here. + * Thus, we simply need to keep our z with probability: + * P = exp(-x) + * where: + * x = ((z-r)^2)/(2*sigma^2) - ((z-b)^2)/(2*sigma0^2) + * + * Here, we scale up the Bernouilli distribution, which + * makes rejection more probable, but makes rejection + * rate sufficiently decorrelated from the Gaussian + * center and standard deviation that the whole sampler + * can be said to be constant-time. + */ + x = fpr_mul(fpr_sqr(fpr_sub(fpr_of(z), r)), dss); + x = fpr_sub(x, fpr_mul(fpr_of(z0 * z0), fpr_inv_2sqrsigma0)); + if (BerExp(&spc->p, x, ccs)) { + /* + * Rejection sampling was centered on r, but the + * actual center is mu = s + r. + */ + return s + z; + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_sign_tree(int16_t *sig, inner_shake256_context *rng, + const fpr *expanded_key, + const uint16_t *hm, unsigned logn, uint8_t *tmp) { + fpr *ftmp; + + ftmp = (fpr *)tmp; + for (;;) { + /* + * Signature produces short vectors s1 and s2. The + * signature is acceptable only if the aggregate vector + * s1,s2 is short; we must use the same bound as the + * verifier. + * + * If the signature is acceptable, then we return only s2 + * (the verifier recomputes s1 from s2, the hashed message, + * and the public key). + */ + sampler_context spc; + samplerZ samp; + void *samp_ctx; + + /* + * Normal sampling. We use a fast PRNG seeded from our + * SHAKE context ('rng'). + */ + if (logn == 10) { + spc.sigma_min = fpr_sigma_min_10; + } else { + spc.sigma_min = fpr_sigma_min_9; + } + PQCLEAN_FALCON1024_AVX2_prng_init(&spc.p, rng); + samp = PQCLEAN_FALCON1024_AVX2_sampler; + samp_ctx = &spc; + + /* + * Do the actual signature. + */ + if (do_sign_tree(samp, samp_ctx, sig, + expanded_key, hm, logn, ftmp)) { + break; + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_sign_dyn(int16_t *sig, inner_shake256_context *rng, + const int8_t *f, const int8_t *g, + const int8_t *F, const int8_t *G, + const uint16_t *hm, unsigned logn, uint8_t *tmp) { + fpr *ftmp; + + ftmp = (fpr *)tmp; + for (;;) { + /* + * Signature produces short vectors s1 and s2. The + * signature is acceptable only if the aggregate vector + * s1,s2 is short; we must use the same bound as the + * verifier. + * + * If the signature is acceptable, then we return only s2 + * (the verifier recomputes s1 from s2, the hashed message, + * and the public key). + */ + sampler_context spc; + samplerZ samp; + void *samp_ctx; + + /* + * Normal sampling. We use a fast PRNG seeded from our + * SHAKE context ('rng'). + */ + if (logn == 10) { + spc.sigma_min = fpr_sigma_min_10; + } else { + spc.sigma_min = fpr_sigma_min_9; + } + PQCLEAN_FALCON1024_AVX2_prng_init(&spc.p, rng); + samp = PQCLEAN_FALCON1024_AVX2_sampler; + samp_ctx = &spc; + + /* + * Do the actual signature. + */ + if (do_sign_dyn(samp, samp_ctx, sig, + f, g, F, G, hm, logn, ftmp)) { + break; + } + } +} diff --git a/crypto_sign/falcon-1024/avx2/vrfy.c b/crypto_sign/falcon-1024/avx2/vrfy.c new file mode 100644 index 00000000..cf3e8e65 --- /dev/null +++ b/crypto_sign/falcon-1024/avx2/vrfy.c @@ -0,0 +1,853 @@ +#include "inner.h" + +/* + * Falcon signature verification. + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2017-2019 Falcon Project + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @author Thomas Pornin + */ + + +/* ===================================================================== */ +/* + * Constants for NTT. + * + * n = 2^logn (2 <= n <= 1024) + * phi = X^n + 1 + * q = 12289 + * q0i = -1/q mod 2^16 + * R = 2^16 mod q + * R2 = 2^32 mod q + */ + +#define Q 12289 +#define Q0I 12287 +#define R 4091 +#define R2 10952 + +/* + * Table for NTT, binary case: + * GMb[x] = R*(g^rev(x)) mod q + * where g = 7 (it is a 2048-th primitive root of 1 modulo q) + * and rev() is the bit-reversal function over 10 bits. + */ +static const uint16_t GMb[] = { + 4091, 7888, 11060, 11208, 6960, 4342, 6275, 9759, + 1591, 6399, 9477, 5266, 586, 5825, 7538, 9710, + 1134, 6407, 1711, 965, 7099, 7674, 3743, 6442, + 10414, 8100, 1885, 1688, 1364, 10329, 10164, 9180, + 12210, 6240, 997, 117, 4783, 4407, 1549, 7072, + 2829, 6458, 4431, 8877, 7144, 2564, 5664, 4042, + 12189, 432, 10751, 1237, 7610, 1534, 3983, 7863, + 2181, 6308, 8720, 6570, 4843, 1690, 14, 3872, + 5569, 9368, 12163, 2019, 7543, 2315, 4673, 7340, + 1553, 1156, 8401, 11389, 1020, 2967, 10772, 7045, + 3316, 11236, 5285, 11578, 10637, 10086, 9493, 6180, + 9277, 6130, 3323, 883, 10469, 489, 1502, 2851, + 11061, 9729, 2742, 12241, 4970, 10481, 10078, 1195, + 730, 1762, 3854, 2030, 5892, 10922, 9020, 5274, + 9179, 3604, 3782, 10206, 3180, 3467, 4668, 2446, + 7613, 9386, 834, 7703, 6836, 3403, 5351, 12276, + 3580, 1739, 10820, 9787, 10209, 4070, 12250, 8525, + 10401, 2749, 7338, 10574, 6040, 943, 9330, 1477, + 6865, 9668, 3585, 6633, 12145, 4063, 3684, 7680, + 8188, 6902, 3533, 9807, 6090, 727, 10099, 7003, + 6945, 1949, 9731, 10559, 6057, 378, 7871, 8763, + 8901, 9229, 8846, 4551, 9589, 11664, 7630, 8821, + 5680, 4956, 6251, 8388, 10156, 8723, 2341, 3159, + 1467, 5460, 8553, 7783, 2649, 2320, 9036, 6188, + 737, 3698, 4699, 5753, 9046, 3687, 16, 914, + 5186, 10531, 4552, 1964, 3509, 8436, 7516, 5381, + 10733, 3281, 7037, 1060, 2895, 7156, 8887, 5357, + 6409, 8197, 2962, 6375, 5064, 6634, 5625, 278, + 932, 10229, 8927, 7642, 351, 9298, 237, 5858, + 7692, 3146, 12126, 7586, 2053, 11285, 3802, 5204, + 4602, 1748, 11300, 340, 3711, 4614, 300, 10993, + 5070, 10049, 11616, 12247, 7421, 10707, 5746, 5654, + 3835, 5553, 1224, 8476, 9237, 3845, 250, 11209, + 4225, 6326, 9680, 12254, 4136, 2778, 692, 8808, + 6410, 6718, 10105, 10418, 3759, 7356, 11361, 8433, + 6437, 3652, 6342, 8978, 5391, 2272, 6476, 7416, + 8418, 10824, 11986, 5733, 876, 7030, 2167, 2436, + 3442, 9217, 8206, 4858, 5964, 2746, 7178, 1434, + 7389, 8879, 10661, 11457, 4220, 1432, 10832, 4328, + 8557, 1867, 9454, 2416, 3816, 9076, 686, 5393, + 2523, 4339, 6115, 619, 937, 2834, 7775, 3279, + 2363, 7488, 6112, 5056, 824, 10204, 11690, 1113, + 2727, 9848, 896, 2028, 5075, 2654, 10464, 7884, + 12169, 5434, 3070, 6400, 9132, 11672, 12153, 4520, + 1273, 9739, 11468, 9937, 10039, 9720, 2262, 9399, + 11192, 315, 4511, 1158, 6061, 6751, 11865, 357, + 7367, 4550, 983, 8534, 8352, 10126, 7530, 9253, + 4367, 5221, 3999, 8777, 3161, 6990, 4130, 11652, + 3374, 11477, 1753, 292, 8681, 2806, 10378, 12188, + 5800, 11811, 3181, 1988, 1024, 9340, 2477, 10928, + 4582, 6750, 3619, 5503, 5233, 2463, 8470, 7650, + 7964, 6395, 1071, 1272, 3474, 11045, 3291, 11344, + 8502, 9478, 9837, 1253, 1857, 6233, 4720, 11561, + 6034, 9817, 3339, 1797, 2879, 6242, 5200, 2114, + 7962, 9353, 11363, 5475, 6084, 9601, 4108, 7323, + 10438, 9471, 1271, 408, 6911, 3079, 360, 8276, + 11535, 9156, 9049, 11539, 850, 8617, 784, 7919, + 8334, 12170, 1846, 10213, 12184, 7827, 11903, 5600, + 9779, 1012, 721, 2784, 6676, 6552, 5348, 4424, + 6816, 8405, 9959, 5150, 2356, 5552, 5267, 1333, + 8801, 9661, 7308, 5788, 4910, 909, 11613, 4395, + 8238, 6686, 4302, 3044, 2285, 12249, 1963, 9216, + 4296, 11918, 695, 4371, 9793, 4884, 2411, 10230, + 2650, 841, 3890, 10231, 7248, 8505, 11196, 6688, + 4059, 6060, 3686, 4722, 11853, 5816, 7058, 6868, + 11137, 7926, 4894, 12284, 4102, 3908, 3610, 6525, + 7938, 7982, 11977, 6755, 537, 4562, 1623, 8227, + 11453, 7544, 906, 11816, 9548, 10858, 9703, 2815, + 11736, 6813, 6979, 819, 8903, 6271, 10843, 348, + 7514, 8339, 6439, 694, 852, 5659, 2781, 3716, + 11589, 3024, 1523, 8659, 4114, 10738, 3303, 5885, + 2978, 7289, 11884, 9123, 9323, 11830, 98, 2526, + 2116, 4131, 11407, 1844, 3645, 3916, 8133, 2224, + 10871, 8092, 9651, 5989, 7140, 8480, 1670, 159, + 10923, 4918, 128, 7312, 725, 9157, 5006, 6393, + 3494, 6043, 10972, 6181, 11838, 3423, 10514, 7668, + 3693, 6658, 6905, 11953, 10212, 11922, 9101, 8365, + 5110, 45, 2400, 1921, 4377, 2720, 1695, 51, + 2808, 650, 1896, 9997, 9971, 11980, 8098, 4833, + 4135, 4257, 5838, 4765, 10985, 11532, 590, 12198, + 482, 12173, 2006, 7064, 10018, 3912, 12016, 10519, + 11362, 6954, 2210, 284, 5413, 6601, 3865, 10339, + 11188, 6231, 517, 9564, 11281, 3863, 1210, 4604, + 8160, 11447, 153, 7204, 5763, 5089, 9248, 12154, + 11748, 1354, 6672, 179, 5532, 2646, 5941, 12185, + 862, 3158, 477, 7279, 5678, 7914, 4254, 302, + 2893, 10114, 6890, 9560, 9647, 11905, 4098, 9824, + 10269, 1353, 10715, 5325, 6254, 3951, 1807, 6449, + 5159, 1308, 8315, 3404, 1877, 1231, 112, 6398, + 11724, 12272, 7286, 1459, 12274, 9896, 3456, 800, + 1397, 10678, 103, 7420, 7976, 936, 764, 632, + 7996, 8223, 8445, 7758, 10870, 9571, 2508, 1946, + 6524, 10158, 1044, 4338, 2457, 3641, 1659, 4139, + 4688, 9733, 11148, 3946, 2082, 5261, 2036, 11850, + 7636, 12236, 5366, 2380, 1399, 7720, 2100, 3217, + 10912, 8898, 7578, 11995, 2791, 1215, 3355, 2711, + 2267, 2004, 8568, 10176, 3214, 2337, 1750, 4729, + 4997, 7415, 6315, 12044, 4374, 7157, 4844, 211, + 8003, 10159, 9290, 11481, 1735, 2336, 5793, 9875, + 8192, 986, 7527, 1401, 870, 3615, 8465, 2756, + 9770, 2034, 10168, 3264, 6132, 54, 2880, 4763, + 11805, 3074, 8286, 9428, 4881, 6933, 1090, 10038, + 2567, 708, 893, 6465, 4962, 10024, 2090, 5718, + 10743, 780, 4733, 4623, 2134, 2087, 4802, 884, + 5372, 5795, 5938, 4333, 6559, 7549, 5269, 10664, + 4252, 3260, 5917, 10814, 5768, 9983, 8096, 7791, + 6800, 7491, 6272, 1907, 10947, 6289, 11803, 6032, + 11449, 1171, 9201, 7933, 2479, 7970, 11337, 7062, + 8911, 6728, 6542, 8114, 8828, 6595, 3545, 4348, + 4610, 2205, 6999, 8106, 5560, 10390, 9321, 2499, + 2413, 7272, 6881, 10582, 9308, 9437, 3554, 3326, + 5991, 11969, 3415, 12283, 9838, 12063, 4332, 7830, + 11329, 6605, 12271, 2044, 11611, 7353, 11201, 11582, + 3733, 8943, 9978, 1627, 7168, 3935, 5050, 2762, + 7496, 10383, 755, 1654, 12053, 4952, 10134, 4394, + 6592, 7898, 7497, 8904, 12029, 3581, 10748, 5674, + 10358, 4901, 7414, 8771, 710, 6764, 8462, 7193, + 5371, 7274, 11084, 290, 7864, 6827, 11822, 2509, + 6578, 4026, 5807, 1458, 5721, 5762, 4178, 2105, + 11621, 4852, 8897, 2856, 11510, 9264, 2520, 8776, + 7011, 2647, 1898, 7039, 5950, 11163, 5488, 6277, + 9182, 11456, 633, 10046, 11554, 5633, 9587, 2333, + 7008, 7084, 5047, 7199, 9865, 8997, 569, 6390, + 10845, 9679, 8268, 11472, 4203, 1997, 2, 9331, + 162, 6182, 2000, 3649, 9792, 6363, 7557, 6187, + 8510, 9935, 5536, 9019, 3706, 12009, 1452, 3067, + 5494, 9692, 4865, 6019, 7106, 9610, 4588, 10165, + 6261, 5887, 2652, 10172, 1580, 10379, 4638, 9949 +}; + +/* + * Table for inverse NTT, binary case: + * iGMb[x] = R*((1/g)^rev(x)) mod q + * Since g = 7, 1/g = 8778 mod 12289. + */ +static const uint16_t iGMb[] = { + 4091, 4401, 1081, 1229, 2530, 6014, 7947, 5329, + 2579, 4751, 6464, 11703, 7023, 2812, 5890, 10698, + 3109, 2125, 1960, 10925, 10601, 10404, 4189, 1875, + 5847, 8546, 4615, 5190, 11324, 10578, 5882, 11155, + 8417, 12275, 10599, 7446, 5719, 3569, 5981, 10108, + 4426, 8306, 10755, 4679, 11052, 1538, 11857, 100, + 8247, 6625, 9725, 5145, 3412, 7858, 5831, 9460, + 5217, 10740, 7882, 7506, 12172, 11292, 6049, 79, + 13, 6938, 8886, 5453, 4586, 11455, 2903, 4676, + 9843, 7621, 8822, 9109, 2083, 8507, 8685, 3110, + 7015, 3269, 1367, 6397, 10259, 8435, 10527, 11559, + 11094, 2211, 1808, 7319, 48, 9547, 2560, 1228, + 9438, 10787, 11800, 1820, 11406, 8966, 6159, 3012, + 6109, 2796, 2203, 1652, 711, 7004, 1053, 8973, + 5244, 1517, 9322, 11269, 900, 3888, 11133, 10736, + 4949, 7616, 9974, 4746, 10270, 126, 2921, 6720, + 6635, 6543, 1582, 4868, 42, 673, 2240, 7219, + 1296, 11989, 7675, 8578, 11949, 989, 10541, 7687, + 7085, 8487, 1004, 10236, 4703, 163, 9143, 4597, + 6431, 12052, 2991, 11938, 4647, 3362, 2060, 11357, + 12011, 6664, 5655, 7225, 5914, 9327, 4092, 5880, + 6932, 3402, 5133, 9394, 11229, 5252, 9008, 1556, + 6908, 4773, 3853, 8780, 10325, 7737, 1758, 7103, + 11375, 12273, 8602, 3243, 6536, 7590, 8591, 11552, + 6101, 3253, 9969, 9640, 4506, 3736, 6829, 10822, + 9130, 9948, 3566, 2133, 3901, 6038, 7333, 6609, + 3468, 4659, 625, 2700, 7738, 3443, 3060, 3388, + 3526, 4418, 11911, 6232, 1730, 2558, 10340, 5344, + 5286, 2190, 11562, 6199, 2482, 8756, 5387, 4101, + 4609, 8605, 8226, 144, 5656, 8704, 2621, 5424, + 10812, 2959, 11346, 6249, 1715, 4951, 9540, 1888, + 3764, 39, 8219, 2080, 2502, 1469, 10550, 8709, + 5601, 1093, 3784, 5041, 2058, 8399, 11448, 9639, + 2059, 9878, 7405, 2496, 7918, 11594, 371, 7993, + 3073, 10326, 40, 10004, 9245, 7987, 5603, 4051, + 7894, 676, 11380, 7379, 6501, 4981, 2628, 3488, + 10956, 7022, 6737, 9933, 7139, 2330, 3884, 5473, + 7865, 6941, 5737, 5613, 9505, 11568, 11277, 2510, + 6689, 386, 4462, 105, 2076, 10443, 119, 3955, + 4370, 11505, 3672, 11439, 750, 3240, 3133, 754, + 4013, 11929, 9210, 5378, 11881, 11018, 2818, 1851, + 4966, 8181, 2688, 6205, 6814, 926, 2936, 4327, + 10175, 7089, 6047, 9410, 10492, 8950, 2472, 6255, + 728, 7569, 6056, 10432, 11036, 2452, 2811, 3787, + 945, 8998, 1244, 8815, 11017, 11218, 5894, 4325, + 4639, 3819, 9826, 7056, 6786, 8670, 5539, 7707, + 1361, 9812, 2949, 11265, 10301, 9108, 478, 6489, + 101, 1911, 9483, 3608, 11997, 10536, 812, 8915, + 637, 8159, 5299, 9128, 3512, 8290, 7068, 7922, + 3036, 4759, 2163, 3937, 3755, 11306, 7739, 4922, + 11932, 424, 5538, 6228, 11131, 7778, 11974, 1097, + 2890, 10027, 2569, 2250, 2352, 821, 2550, 11016, + 7769, 136, 617, 3157, 5889, 9219, 6855, 120, + 4405, 1825, 9635, 7214, 10261, 11393, 2441, 9562, + 11176, 599, 2085, 11465, 7233, 6177, 4801, 9926, + 9010, 4514, 9455, 11352, 11670, 6174, 7950, 9766, + 6896, 11603, 3213, 8473, 9873, 2835, 10422, 3732, + 7961, 1457, 10857, 8069, 832, 1628, 3410, 4900, + 10855, 5111, 9543, 6325, 7431, 4083, 3072, 8847, + 9853, 10122, 5259, 11413, 6556, 303, 1465, 3871, + 4873, 5813, 10017, 6898, 3311, 5947, 8637, 5852, + 3856, 928, 4933, 8530, 1871, 2184, 5571, 5879, + 3481, 11597, 9511, 8153, 35, 2609, 5963, 8064, + 1080, 12039, 8444, 3052, 3813, 11065, 6736, 8454, + 2340, 7651, 1910, 10709, 2117, 9637, 6402, 6028, + 2124, 7701, 2679, 5183, 6270, 7424, 2597, 6795, + 9222, 10837, 280, 8583, 3270, 6753, 2354, 3779, + 6102, 4732, 5926, 2497, 8640, 10289, 6107, 12127, + 2958, 12287, 10292, 8086, 817, 4021, 2610, 1444, + 5899, 11720, 3292, 2424, 5090, 7242, 5205, 5281, + 9956, 2702, 6656, 735, 2243, 11656, 833, 3107, + 6012, 6801, 1126, 6339, 5250, 10391, 9642, 5278, + 3513, 9769, 3025, 779, 9433, 3392, 7437, 668, + 10184, 8111, 6527, 6568, 10831, 6482, 8263, 5711, + 9780, 467, 5462, 4425, 11999, 1205, 5015, 6918, + 5096, 3827, 5525, 11579, 3518, 4875, 7388, 1931, + 6615, 1541, 8708, 260, 3385, 4792, 4391, 5697, + 7895, 2155, 7337, 236, 10635, 11534, 1906, 4793, + 9527, 7239, 8354, 5121, 10662, 2311, 3346, 8556, + 707, 1088, 4936, 678, 10245, 18, 5684, 960, + 4459, 7957, 226, 2451, 6, 8874, 320, 6298, + 8963, 8735, 2852, 2981, 1707, 5408, 5017, 9876, + 9790, 2968, 1899, 6729, 4183, 5290, 10084, 7679, + 7941, 8744, 5694, 3461, 4175, 5747, 5561, 3378, + 5227, 952, 4319, 9810, 4356, 3088, 11118, 840, + 6257, 486, 6000, 1342, 10382, 6017, 4798, 5489, + 4498, 4193, 2306, 6521, 1475, 6372, 9029, 8037, + 1625, 7020, 4740, 5730, 7956, 6351, 6494, 6917, + 11405, 7487, 10202, 10155, 7666, 7556, 11509, 1546, + 6571, 10199, 2265, 7327, 5824, 11396, 11581, 9722, + 2251, 11199, 5356, 7408, 2861, 4003, 9215, 484, + 7526, 9409, 12235, 6157, 9025, 2121, 10255, 2519, + 9533, 3824, 8674, 11419, 10888, 4762, 11303, 4097, + 2414, 6496, 9953, 10554, 808, 2999, 2130, 4286, + 12078, 7445, 5132, 7915, 245, 5974, 4874, 7292, + 7560, 10539, 9952, 9075, 2113, 3721, 10285, 10022, + 9578, 8934, 11074, 9498, 294, 4711, 3391, 1377, + 9072, 10189, 4569, 10890, 9909, 6923, 53, 4653, + 439, 10253, 7028, 10207, 8343, 1141, 2556, 7601, + 8150, 10630, 8648, 9832, 7951, 11245, 2131, 5765, + 10343, 9781, 2718, 1419, 4531, 3844, 4066, 4293, + 11657, 11525, 11353, 4313, 4869, 12186, 1611, 10892, + 11489, 8833, 2393, 15, 10830, 5003, 17, 565, + 5891, 12177, 11058, 10412, 8885, 3974, 10981, 7130, + 5840, 10482, 8338, 6035, 6964, 1574, 10936, 2020, + 2465, 8191, 384, 2642, 2729, 5399, 2175, 9396, + 11987, 8035, 4375, 6611, 5010, 11812, 9131, 11427, + 104, 6348, 9643, 6757, 12110, 5617, 10935, 541, + 135, 3041, 7200, 6526, 5085, 12136, 842, 4129, + 7685, 11079, 8426, 1008, 2725, 11772, 6058, 1101, + 1950, 8424, 5688, 6876, 12005, 10079, 5335, 927, + 1770, 273, 8377, 2271, 5225, 10283, 116, 11807, + 91, 11699, 757, 1304, 7524, 6451, 8032, 8154, + 7456, 4191, 309, 2318, 2292, 10393, 11639, 9481, + 12238, 10594, 9569, 7912, 10368, 9889, 12244, 7179, + 3924, 3188, 367, 2077, 336, 5384, 5631, 8596, + 4621, 1775, 8866, 451, 6108, 1317, 6246, 8795, + 5896, 7283, 3132, 11564, 4977, 12161, 7371, 1366, + 12130, 10619, 3809, 5149, 6300, 2638, 4197, 1418, + 10065, 4156, 8373, 8644, 10445, 882, 8158, 10173, + 9763, 12191, 459, 2966, 3166, 405, 5000, 9311, + 6404, 8986, 1551, 8175, 3630, 10766, 9265, 700, + 8573, 9508, 6630, 11437, 11595, 5850, 3950, 4775, + 11941, 1446, 6018, 3386, 11470, 5310, 5476, 553, + 9474, 2586, 1431, 2741, 473, 11383, 4745, 836, + 4062, 10666, 7727, 11752, 5534, 312, 4307, 4351, + 5764, 8679, 8381, 8187, 5, 7395, 4363, 1152, + 5421, 5231, 6473, 436, 7567, 8603, 6229, 8230 +}; + +/* + * Reduce a small signed integer modulo q. The source integer MUST + * be between -q/2 and +q/2. + */ +static inline uint32_t +mq_conv_small(int x) { + /* + * If x < 0, the cast to uint32_t will set the high bit to 1. + */ + uint32_t y; + + y = (uint32_t)x; + y += Q & -(y >> 31); + return y; +} + +/* + * Addition modulo q. Operands must be in the 0..q-1 range. + */ +static inline uint32_t +mq_add(uint32_t x, uint32_t y) { + /* + * We compute x + y - q. If the result is negative, then the + * high bit will be set, and 'd >> 31' will be equal to 1; + * thus '-(d >> 31)' will be an all-one pattern. Otherwise, + * it will be an all-zero pattern. In other words, this + * implements a conditional addition of q. + */ + uint32_t d; + + d = x + y - Q; + d += Q & -(d >> 31); + return d; +} + +/* + * Subtraction modulo q. Operands must be in the 0..q-1 range. + */ +static inline uint32_t +mq_sub(uint32_t x, uint32_t y) { + /* + * As in mq_add(), we use a conditional addition to ensure the + * result is in the 0..q-1 range. + */ + uint32_t d; + + d = x - y; + d += Q & -(d >> 31); + return d; +} + +/* + * Division by 2 modulo q. Operand must be in the 0..q-1 range. + */ +static inline uint32_t +mq_rshift1(uint32_t x) { + x += Q & -(x & 1); + return (x >> 1); +} + +/* + * Montgomery multiplication modulo q. If we set R = 2^16 mod q, then + * this function computes: x * y / R mod q + * Operands must be in the 0..q-1 range. + */ +static inline uint32_t +mq_montymul(uint32_t x, uint32_t y) { + uint32_t z, w; + + /* + * We compute x*y + k*q with a value of k chosen so that the 16 + * low bits of the result are 0. We can then shift the value. + * After the shift, result may still be larger than q, but it + * will be lower than 2*q, so a conditional subtraction works. + */ + + z = x * y; + w = ((z * Q0I) & 0xFFFF) * Q; + + /* + * When adding z and w, the result will have its low 16 bits + * equal to 0. Since x, y and z are lower than q, the sum will + * be no more than (2^15 - 1) * q + (q - 1)^2, which will + * fit on 29 bits. + */ + z = (z + w) >> 16; + + /* + * After the shift, analysis shows that the value will be less + * than 2q. We do a subtraction then conditional subtraction to + * ensure the result is in the expected range. + */ + z -= Q; + z += Q & -(z >> 31); + return z; +} + +/* + * Montgomery squaring (computes (x^2)/R). + */ +static inline uint32_t +mq_montysqr(uint32_t x) { + return mq_montymul(x, x); +} + +/* + * Divide x by y modulo q = 12289. + */ +static inline uint32_t +mq_div_12289(uint32_t x, uint32_t y) { + /* + * We invert y by computing y^(q-2) mod q. + * + * We use the following addition chain for exponent e = 12287: + * + * e0 = 1 + * e1 = 2 * e0 = 2 + * e2 = e1 + e0 = 3 + * e3 = e2 + e1 = 5 + * e4 = 2 * e3 = 10 + * e5 = 2 * e4 = 20 + * e6 = 2 * e5 = 40 + * e7 = 2 * e6 = 80 + * e8 = 2 * e7 = 160 + * e9 = e8 + e2 = 163 + * e10 = e9 + e8 = 323 + * e11 = 2 * e10 = 646 + * e12 = 2 * e11 = 1292 + * e13 = e12 + e9 = 1455 + * e14 = 2 * e13 = 2910 + * e15 = 2 * e14 = 5820 + * e16 = e15 + e10 = 6143 + * e17 = 2 * e16 = 12286 + * e18 = e17 + e0 = 12287 + * + * Additions on exponents are converted to Montgomery + * multiplications. We define all intermediate results as so + * many local variables, and let the C compiler work out which + * must be kept around. + */ + uint32_t y0, y1, y2, y3, y4, y5, y6, y7, y8, y9; + uint32_t y10, y11, y12, y13, y14, y15, y16, y17, y18; + + y0 = mq_montymul(y, R2); + y1 = mq_montysqr(y0); + y2 = mq_montymul(y1, y0); + y3 = mq_montymul(y2, y1); + y4 = mq_montysqr(y3); + y5 = mq_montysqr(y4); + y6 = mq_montysqr(y5); + y7 = mq_montysqr(y6); + y8 = mq_montysqr(y7); + y9 = mq_montymul(y8, y2); + y10 = mq_montymul(y9, y8); + y11 = mq_montysqr(y10); + y12 = mq_montysqr(y11); + y13 = mq_montymul(y12, y9); + y14 = mq_montysqr(y13); + y15 = mq_montysqr(y14); + y16 = mq_montymul(y15, y10); + y17 = mq_montysqr(y16); + y18 = mq_montymul(y17, y0); + + /* + * Final multiplication with x, which is not in Montgomery + * representation, computes the correct division result. + */ + return mq_montymul(y18, x); +} + +/* + * Compute NTT on a ring element. + */ +static void +mq_NTT(uint16_t *a, unsigned logn) { + size_t n, t, m; + + n = (size_t)1 << logn; + t = n; + for (m = 1; m < n; m <<= 1) { + size_t ht, i, j1; + + ht = t >> 1; + for (i = 0, j1 = 0; i < m; i ++, j1 += t) { + size_t j, j2; + uint32_t s; + + s = GMb[m + i]; + j2 = j1 + ht; + for (j = j1; j < j2; j ++) { + uint32_t u, v; + + u = a[j]; + v = mq_montymul(a[j + ht], s); + a[j] = (uint16_t)mq_add(u, v); + a[j + ht] = (uint16_t)mq_sub(u, v); + } + } + t = ht; + } +} + +/* + * Compute the inverse NTT on a ring element, binary case. + */ +static void +mq_iNTT(uint16_t *a, unsigned logn) { + size_t n, t, m; + uint32_t ni; + + n = (size_t)1 << logn; + t = 1; + m = n; + while (m > 1) { + size_t hm, dt, i, j1; + + hm = m >> 1; + dt = t << 1; + for (i = 0, j1 = 0; i < hm; i ++, j1 += dt) { + size_t j, j2; + uint32_t s; + + j2 = j1 + t; + s = iGMb[hm + i]; + for (j = j1; j < j2; j ++) { + uint32_t u, v, w; + + u = a[j]; + v = a[j + t]; + a[j] = (uint16_t)mq_add(u, v); + w = mq_sub(u, v); + a[j + t] = (uint16_t) + mq_montymul(w, s); + } + } + t = dt; + m = hm; + } + + /* + * To complete the inverse NTT, we must now divide all values by + * n (the vector size). We thus need the inverse of n, i.e. we + * need to divide 1 by 2 logn times. But we also want it in + * Montgomery representation, i.e. we also want to multiply it + * by R = 2^16. In the common case, this should be a simple right + * shift. The loop below is generic and works also in corner cases; + * its computation time is negligible. + */ + ni = R; + for (m = n; m > 1; m >>= 1) { + ni = mq_rshift1(ni); + } + for (m = 0; m < n; m ++) { + a[m] = (uint16_t)mq_montymul(a[m], ni); + } +} + +/* + * Convert a polynomial (mod q) to Montgomery representation. + */ +static void +mq_poly_tomonty(uint16_t *f, unsigned logn) { + size_t u, n; + + n = (size_t)1 << logn; + for (u = 0; u < n; u ++) { + f[u] = (uint16_t)mq_montymul(f[u], R2); + } +} + +/* + * Multiply two polynomials together (NTT representation, and using + * a Montgomery multiplication). Result f*g is written over f. + */ +static void +mq_poly_montymul_ntt(uint16_t *f, const uint16_t *g, unsigned logn) { + size_t u, n; + + n = (size_t)1 << logn; + for (u = 0; u < n; u ++) { + f[u] = (uint16_t)mq_montymul(f[u], g[u]); + } +} + +/* + * Subtract polynomial g from polynomial f. + */ +static void +mq_poly_sub(uint16_t *f, const uint16_t *g, unsigned logn) { + size_t u, n; + + n = (size_t)1 << logn; + for (u = 0; u < n; u ++) { + f[u] = (uint16_t)mq_sub(f[u], g[u]); + } +} + +/* ===================================================================== */ + +/* see inner.h */ +void +PQCLEAN_FALCON1024_AVX2_to_ntt_monty(uint16_t *h, unsigned logn) { + mq_NTT(h, logn); + mq_poly_tomonty(h, logn); +} + +/* see inner.h */ +int +PQCLEAN_FALCON1024_AVX2_verify_raw(const uint16_t *c0, const int16_t *s2, + const uint16_t *h, unsigned logn, uint8_t *tmp) { + size_t u, n; + uint16_t *tt; + + n = (size_t)1 << logn; + tt = (uint16_t *)tmp; + + /* + * Reduce s2 elements modulo q ([0..q-1] range). + */ + for (u = 0; u < n; u ++) { + uint32_t w; + + w = (uint32_t)s2[u]; + w += Q & -(w >> 31); + tt[u] = (uint16_t)w; + } + + /* + * Compute -s1 = s2*h - c0 mod phi mod q (in tt[]). + */ + mq_NTT(tt, logn); + mq_poly_montymul_ntt(tt, h, logn); + mq_iNTT(tt, logn); + mq_poly_sub(tt, c0, logn); + + /* + * Normalize -s1 elements into the [-q/2..q/2] range. + */ + for (u = 0; u < n; u ++) { + int32_t w; + + w = (int32_t)tt[u]; + w -= (int32_t)(Q & -(((Q >> 1) - (uint32_t)w) >> 31)); + ((int16_t *)tt)[u] = (int16_t)w; + } + + /* + * Signature is valid if and only if the aggregate (-s1,s2) vector + * is short enough. + */ + return PQCLEAN_FALCON1024_AVX2_is_short((int16_t *)tt, s2, logn); +} + +/* see inner.h */ +int +PQCLEAN_FALCON1024_AVX2_compute_public(uint16_t *h, + const int8_t *f, const int8_t *g, unsigned logn, uint8_t *tmp) { + size_t u, n; + uint16_t *tt; + + n = (size_t)1 << logn; + tt = (uint16_t *)tmp; + for (u = 0; u < n; u ++) { + tt[u] = (uint16_t)mq_conv_small(f[u]); + h[u] = (uint16_t)mq_conv_small(g[u]); + } + mq_NTT(h, logn); + mq_NTT(tt, logn); + for (u = 0; u < n; u ++) { + if (tt[u] == 0) { + return 0; + } + h[u] = (uint16_t)mq_div_12289(h[u], tt[u]); + } + mq_iNTT(h, logn); + return 1; +} + +/* see inner.h */ +int +PQCLEAN_FALCON1024_AVX2_complete_private(int8_t *G, + const int8_t *f, const int8_t *g, const int8_t *F, + unsigned logn, uint8_t *tmp) { + size_t u, n; + uint16_t *t1, *t2; + + n = (size_t)1 << logn; + t1 = (uint16_t *)tmp; + t2 = t1 + n; + for (u = 0; u < n; u ++) { + t1[u] = (uint16_t)mq_conv_small(g[u]); + t2[u] = (uint16_t)mq_conv_small(F[u]); + } + mq_NTT(t1, logn); + mq_NTT(t2, logn); + mq_poly_tomonty(t1, logn); + mq_poly_montymul_ntt(t1, t2, logn); + for (u = 0; u < n; u ++) { + t2[u] = (uint16_t)mq_conv_small(f[u]); + } + mq_NTT(t2, logn); + for (u = 0; u < n; u ++) { + if (t2[u] == 0) { + return 0; + } + t1[u] = (uint16_t)mq_div_12289(t1[u], t2[u]); + } + mq_iNTT(t1, logn); + for (u = 0; u < n; u ++) { + uint32_t w; + int32_t gi; + + w = t1[u]; + w -= (Q & ~ -((w - (Q >> 1)) >> 31)); + gi = *(int32_t *)&w; + if (gi < -127 || gi > +127) { + return 0; + } + G[u] = (int8_t)gi; + } + return 1; +} + +/* see inner.h */ +int +PQCLEAN_FALCON1024_AVX2_is_invertible( + const int16_t *s2, unsigned logn, uint8_t *tmp) { + size_t u, n; + uint16_t *tt; + uint32_t r; + + n = (size_t)1 << logn; + tt = (uint16_t *)tmp; + for (u = 0; u < n; u ++) { + uint32_t w; + + w = (uint32_t)s2[u]; + w += Q & -(w >> 31); + tt[u] = (uint16_t)w; + } + mq_NTT(tt, logn); + r = 0; + for (u = 0; u < n; u ++) { + r |= (uint32_t)(tt[u] - 1); + } + return (int)(1u - (r >> 31)); +} + +/* see inner.h */ +int +PQCLEAN_FALCON1024_AVX2_verify_recover(uint16_t *h, + const uint16_t *c0, const int16_t *s1, const int16_t *s2, + unsigned logn, uint8_t *tmp) { + size_t u, n; + uint16_t *tt; + uint32_t r; + + n = (size_t)1 << logn; + + /* + * Reduce elements of s1 and s2 modulo q; then write s2 into tt[] + * and c0 - s1 into h[]. + */ + tt = (uint16_t *)tmp; + for (u = 0; u < n; u ++) { + uint32_t w; + + w = (uint32_t)s2[u]; + w += Q & -(w >> 31); + tt[u] = (uint16_t)w; + + w = (uint32_t)s1[u]; + w += Q & -(w >> 31); + w = mq_sub(c0[u], w); + h[u] = (uint16_t)w; + } + + /* + * Compute h = (c0 - s1) / s2. If one of the coefficients of s2 + * is zero (in NTT representation) then the operation fails. We + * keep that information into a flag so that we do not deviate + * from strict constant-time processing; if all coefficients of + * s2 are non-zero, then the high bit of r will be zero. + */ + mq_NTT(tt, logn); + mq_NTT(h, logn); + r = 0; + for (u = 0; u < n; u ++) { + r |= (uint32_t)(tt[u] - 1); + h[u] = (uint16_t)mq_div_12289(h[u], tt[u]); + } + mq_iNTT(h, logn); + + /* + * Signature is acceptable if and only if it is short enough, + * and s2 was invertible mod phi mod q. The caller must still + * check that the rebuilt public key matches the expected + * value (e.g. through a hash). + */ + r = ~r & (uint32_t) - PQCLEAN_FALCON1024_AVX2_is_short(s1, s2, logn); + return (int)(r >> 31); +} + +/* see inner.h */ +int +PQCLEAN_FALCON1024_AVX2_count_nttzero(const int16_t *sig, unsigned logn, uint8_t *tmp) { + uint16_t *s2; + size_t u, n; + uint32_t r; + + n = (size_t)1 << logn; + s2 = (uint16_t *)tmp; + for (u = 0; u < n; u ++) { + uint32_t w; + + w = (uint32_t)sig[u]; + w += Q & -(w >> 31); + s2[u] = (uint16_t)w; + } + mq_NTT(s2, logn); + r = 0; + for (u = 0; u < n; u ++) { + uint32_t w; + + w = (uint32_t)s2[u] - 1u; + r += (w >> 31); + } + return (int)r; +} diff --git a/crypto_sign/falcon-1024/clean/LICENSE b/crypto_sign/falcon-1024/clean/LICENSE index bf2aeb7d..12c7b56c 100644 --- a/crypto_sign/falcon-1024/clean/LICENSE +++ b/crypto_sign/falcon-1024/clean/LICENSE @@ -1,3 +1,4 @@ +\ MIT License Copyright (c) 2017-2019 Falcon Project @@ -20,3 +21,4 @@ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/crypto_sign/falcon-1024/clean/Makefile b/crypto_sign/falcon-1024/clean/Makefile index e0ca9482..d958ea0e 100644 --- a/crypto_sign/falcon-1024/clean/Makefile +++ b/crypto_sign/falcon-1024/clean/Makefile @@ -1,10 +1,10 @@ # This Makefile can be used with GNU Make or BSD Make -LIB=libfalcon-1024_clean.a +LIB=libfalcon1024_clean.a -SOURCES = codec.c common.c fft.c fpr.c keygen.c pqclean.c rng.c sign.c vrfy.c -OBJECTS = codec.o common.o fft.o fpr.o keygen.o pqclean.o rng.o sign.o vrfy.o -HEADERS = api.h fpr.h inner.h +SOURCES = codec.c common.c fft.c fpr.c keygen.c pqclean.c rng.c sign.c vrfy.c +OBJECTS = codec.o common.o fft.o fpr.o keygen.o pqclean.o rng.o sign.o vrfy.o +HEADERS = api.h fpr.h inner.h CFLAGS=-O3 -Wall -Wconversion -Wextra -Wpedantic -Wvla -Werror -Wmissing-prototypes -Wredundant-decls -std=c99 -I../../../common $(EXTRAFLAGS) diff --git a/crypto_sign/falcon-1024/clean/Makefile.Microsoft_nmake b/crypto_sign/falcon-1024/clean/Makefile.Microsoft_nmake index 1b21baad..5bf6b36f 100644 --- a/crypto_sign/falcon-1024/clean/Makefile.Microsoft_nmake +++ b/crypto_sign/falcon-1024/clean/Makefile.Microsoft_nmake @@ -1,8 +1,8 @@ # This Makefile can be used with Microsoft Visual Studio's nmake using the command: # nmake /f Makefile.Microsoft_nmake -LIBRARY=libfalcon-1024_clean.lib -OBJECTS=codec.obj common.obj fft.obj fpr.obj keygen.obj pqclean.obj rng.obj sign.obj vrfy.obj +LIBRARY=libfalcon1024_clean.lib +OBJECTS=codec.obj common.obj fft.obj fpr.obj keygen.obj pqclean.obj rng.obj sign.obj vrfy.obj # Warning C4146 is raised when a unary minus operator is applied to an # unsigned type; this has nonetheless been standard and portable for as @@ -16,7 +16,7 @@ all: $(LIBRARY) $(OBJECTS): *.h $(LIBRARY): $(OBJECTS) - LIB.EXE /NOLOGO /WX /OUT:$@ $** + LIB.EXE /NOLOGO /WX /OUT:$@ $** clean: -DEL $(OBJECTS) diff --git a/crypto_sign/falcon-1024/clean/codec.c b/crypto_sign/falcon-1024/clean/codec.c index 70856aff..c5ab4938 100644 --- a/crypto_sign/falcon-1024/clean/codec.c +++ b/crypto_sign/falcon-1024/clean/codec.c @@ -1,3 +1,5 @@ +#include "inner.h" + /* * Encoding/decoding of keys and signatures. * @@ -29,7 +31,6 @@ * @author Thomas Pornin */ -#include "inner.h" /* see inner.h */ size_t diff --git a/crypto_sign/falcon-1024/clean/common.c b/crypto_sign/falcon-1024/clean/common.c index bb2d7ece..2e3005b2 100644 --- a/crypto_sign/falcon-1024/clean/common.c +++ b/crypto_sign/falcon-1024/clean/common.c @@ -1,3 +1,5 @@ +#include "inner.h" + /* * Support functions for signatures (hash-to-point, norm). * @@ -29,7 +31,6 @@ * @author Thomas Pornin */ -#include "inner.h" /* see inner.h */ void diff --git a/crypto_sign/falcon-1024/clean/fft.c b/crypto_sign/falcon-1024/clean/fft.c index c64cedc5..a25bac4e 100644 --- a/crypto_sign/falcon-1024/clean/fft.c +++ b/crypto_sign/falcon-1024/clean/fft.c @@ -1,3 +1,5 @@ +#include "inner.h" + /* * FFT code. * @@ -29,7 +31,6 @@ * @author Thomas Pornin */ -#include "inner.h" /* * Rules for complex number macros: diff --git a/crypto_sign/falcon-1024/clean/fpr.c b/crypto_sign/falcon-1024/clean/fpr.c index ff3eda4a..091462a7 100644 --- a/crypto_sign/falcon-1024/clean/fpr.c +++ b/crypto_sign/falcon-1024/clean/fpr.c @@ -1,3 +1,5 @@ +#include "inner.h" + /* * Floating-point operations. * @@ -32,7 +34,6 @@ * @author Thomas Pornin */ -#include "inner.h" /* @@ -1631,4 +1632,3 @@ const fpr fpr_p2_tab[] = { 4571153621781053440U, 4566650022153682944U }; - diff --git a/crypto_sign/falcon-1024/clean/fpr.h b/crypto_sign/falcon-1024/clean/fpr.h index 004bc0df..dd7e15c2 100644 --- a/crypto_sign/falcon-1024/clean/fpr.h +++ b/crypto_sign/falcon-1024/clean/fpr.h @@ -1,3 +1,6 @@ +#ifndef PQCLEAN_FALCON1024_CLEAN_FPR_H +#define PQCLEAN_FALCON1024_CLEAN_FPR_H + /* * Floating-point operations. * @@ -467,4 +470,4 @@ extern const fpr fpr_gm_tab[]; extern const fpr fpr_p2_tab[]; /* ====================================================================== */ - +#endif diff --git a/crypto_sign/falcon-1024/clean/inner.h b/crypto_sign/falcon-1024/clean/inner.h index a10ecd3d..5b0477ac 100644 --- a/crypto_sign/falcon-1024/clean/inner.h +++ b/crypto_sign/falcon-1024/clean/inner.h @@ -1,5 +1,6 @@ -#ifndef FALCON_INNER_H__ -#define FALCON_INNER_H__ +#ifndef PQCLEAN_FALCON1024_CLEAN_INNER_H +#define PQCLEAN_FALCON1024_CLEAN_INNER_H + /* * Internal functions for Falcon. This is not the API intended to be @@ -72,8 +73,8 @@ * proper, or integer-based emulation is used, the set_fpu_cw() * function does nothing, so it can be called systematically. */ - - +#include "fips202.h" +#include "fpr.h" #include #include #include @@ -115,7 +116,6 @@ set_fpu_cw(unsigned x) { */ -#include "fips202.h" #define inner_shake256_context shake256incctx #define inner_shake256_init(sc) shake256_inc_init(sc) @@ -438,7 +438,6 @@ int PQCLEAN_FALCON1024_CLEAN_verify_recover(uint16_t *h, * fpr fpr_mtwo63m1 -(2^63-1) * fpr fpr_ptwo63 2^63 */ -#include "fpr.h" /* ==================================================================== */ /* diff --git a/crypto_sign/falcon-1024/clean/keygen.c b/crypto_sign/falcon-1024/clean/keygen.c index e987b3a5..2d47412d 100644 --- a/crypto_sign/falcon-1024/clean/keygen.c +++ b/crypto_sign/falcon-1024/clean/keygen.c @@ -1,3 +1,5 @@ +#include "inner.h" + /* * Falcon key pair generation. * @@ -29,7 +31,6 @@ * @author Thomas Pornin */ -#include "inner.h" #define MKN(logn) ((size_t)1 << (logn)) @@ -2207,7 +2208,6 @@ get_rng_u64(inner_shake256_context *rng) { | ((uint64_t)tmp[7] << 56); } - /* * Table below incarnates a discrete Gaussian distribution: * D(x) = exp(-(x^2)/(2*sigma^2)) diff --git a/crypto_sign/falcon-1024/clean/pqclean.c b/crypto_sign/falcon-1024/clean/pqclean.c index b6f48a0a..487efd52 100644 --- a/crypto_sign/falcon-1024/clean/pqclean.c +++ b/crypto_sign/falcon-1024/clean/pqclean.c @@ -1,16 +1,16 @@ +#include "api.h" +#include "inner.h" +#include "randombytes.h" +#include +#include /* * Wrapper for implementing the PQClean API. */ -#include -#include -#include "api.h" -#include "inner.h" #define NONCELEN 40 - -#include "randombytes.h" +#define SEEDLEN 48 /* * Encoding formats (nnnn = log of degree, 9 for Falcon-512, 10 for Falcon-1024) @@ -41,19 +41,19 @@ /* see api.h */ int -PQCLEAN_FALCON1024_CLEAN_crypto_sign_keypair( - uint8_t *pk, uint8_t *sk) { +PQCLEAN_FALCON1024_CLEAN_crypto_sign_keypair(unsigned char *pk, unsigned char *sk) { union { - uint8_t b[FALCON_KEYGEN_TEMP_10]; + uint8_t b[28 * 1024]; uint64_t dummy_u64; fpr dummy_fpr; } tmp; - int8_t f[1024], g[1024], F[1024]; + int8_t f[1024], g[1024], F[1024], G[1024]; uint16_t h[1024]; - unsigned char seed[48]; + unsigned char seed[SEEDLEN]; inner_shake256_context rng; size_t u, v; + /* * Generate key pair. */ @@ -61,7 +61,7 @@ PQCLEAN_FALCON1024_CLEAN_crypto_sign_keypair( inner_shake256_init(&rng); inner_shake256_inject(&rng, seed, sizeof seed); inner_shake256_flip(&rng); - PQCLEAN_FALCON1024_CLEAN_keygen(&rng, f, g, F, NULL, h, 10, tmp.b); + PQCLEAN_FALCON1024_CLEAN_keygen(&rng, f, g, F, G, h, 10, tmp.b); inner_shake256_ctx_release(&rng); /* @@ -135,7 +135,7 @@ do_sign(uint8_t *nonce, uint8_t *sigbuf, size_t *sigbuflen, int16_t sig[1024]; uint16_t hm[1024]; } r; - unsigned char seed[48]; + unsigned char seed[SEEDLEN]; inner_shake256_context sc; size_t u, v; @@ -174,6 +174,7 @@ do_sign(uint8_t *nonce, uint8_t *sigbuf, size_t *sigbuflen, return -1; } + /* * Create a random nonce (40 bytes). */ @@ -186,7 +187,7 @@ do_sign(uint8_t *nonce, uint8_t *sigbuf, size_t *sigbuflen, inner_shake256_inject(&sc, nonce, NONCELEN); inner_shake256_inject(&sc, m, mlen); inner_shake256_flip(&sc); - PQCLEAN_FALCON1024_CLEAN_hash_to_point_ct(&sc, r.hm, 10, tmp.b); + PQCLEAN_FALCON1024_CLEAN_hash_to_point_vartime(&sc, r.hm, 10); inner_shake256_ctx_release(&sc); /* @@ -279,11 +280,11 @@ PQCLEAN_FALCON1024_CLEAN_crypto_sign_signature( const uint8_t *m, size_t mlen, const uint8_t *sk) { /* * The PQCLEAN_FALCON1024_CLEAN_CRYPTO_BYTES constant is used for - * the signed message object (as produced by crypto_sign()) + * the signed message object (as produced by PQCLEAN_FALCON1024_CLEAN_crypto_sign()) * and includes a two-byte length value, so we take care here * to only generate signatures that are two bytes shorter than - * the maximum. This is done to ensure that crypto_sign() - * and crypto_sign_signature() produce the exact same signature + * the maximum. This is done to ensure that PQCLEAN_FALCON1024_CLEAN_crypto_sign() + * and PQCLEAN_FALCON1024_CLEAN_crypto_sign_signature() produce the exact same signature * value, if used on the same message, with the same private key, * and using the same output from randombytes() (this is for * reproducibility of tests). diff --git a/crypto_sign/falcon-1024/clean/rng.c b/crypto_sign/falcon-1024/clean/rng.c index 8238306c..f5739a8f 100644 --- a/crypto_sign/falcon-1024/clean/rng.c +++ b/crypto_sign/falcon-1024/clean/rng.c @@ -1,3 +1,5 @@ +#include "inner.h" +#include /* * PRNG and interface to the system RNG. * @@ -29,10 +31,22 @@ * @author Thomas Pornin */ -#include -#include "inner.h" +/* + * Include relevant system header files. For Win32, this will also need + * linking with advapi32.dll, which we trigger with an appropriate #pragma. + */ + +/* see inner.h */ +int +PQCLEAN_FALCON1024_CLEAN_get_seed(void *seed, size_t len) { + (void)seed; + if (len == 0) { + return 1; + } + return 0; +} /* see inner.h */ void @@ -46,9 +60,6 @@ PQCLEAN_FALCON1024_CLEAN_prng_init(prng *p, inner_shake256_context *src) { uint64_t th, tl; int i; - uint32_t *d32 = (uint32_t *) p->state.d; - uint64_t *d64 = (uint64_t *) p->state.d; - inner_shake256_extract(src, tmp, 56); for (i = 0; i < 14; i ++) { uint32_t w; @@ -57,11 +68,11 @@ PQCLEAN_FALCON1024_CLEAN_prng_init(prng *p, inner_shake256_context *src) { | ((uint32_t)tmp[(i << 2) + 1] << 8) | ((uint32_t)tmp[(i << 2) + 2] << 16) | ((uint32_t)tmp[(i << 2) + 3] << 24); - d32[i] = w; + *(uint32_t *)(p->state.d + (i << 2)) = w; } - tl = d32[48 / sizeof(uint32_t)]; - th = d32[52 / sizeof(uint32_t)]; - d64[48 / sizeof(uint64_t)] = tl + (th << 32); + tl = *(uint32_t *)(p->state.d + 48); + th = *(uint32_t *)(p->state.d + 52); + *(uint64_t *)(p->state.d + 48) = tl + (th << 32); PQCLEAN_FALCON1024_CLEAN_prng_refill(p); } @@ -88,14 +99,12 @@ PQCLEAN_FALCON1024_CLEAN_prng_refill(prng *p) { uint64_t cc; size_t u; - uint32_t *d32 = (uint32_t *) p->state.d; - uint64_t *d64 = (uint64_t *) p->state.d; /* * State uses local endianness. Only the output bytes must be * converted to little endian (if used on a big-endian machine). */ - cc = d64[48 / sizeof(uint64_t)]; + cc = *(uint64_t *)(p->state.d + 48); for (u = 0; u < 8; u ++) { uint32_t state[16]; size_t v; @@ -139,10 +148,12 @@ PQCLEAN_FALCON1024_CLEAN_prng_refill(prng *p) { state[v] += CW[v]; } for (v = 4; v < 14; v ++) { - state[v] += d32[v - 4]; + state[v] += ((uint32_t *)p->state.d)[v - 4]; } - state[14] += d32[10] ^ (uint32_t)cc; - state[15] += d32[11] ^ (uint32_t)(cc >> 32); + state[14] += ((uint32_t *)p->state.d)[10] + ^ (uint32_t)cc; + state[15] += ((uint32_t *)p->state.d)[11] + ^ (uint32_t)(cc >> 32); cc ++; /* @@ -160,7 +171,7 @@ PQCLEAN_FALCON1024_CLEAN_prng_refill(prng *p) { (uint8_t)(state[v] >> 24); } } - d64[48 / sizeof(uint64_t)] = cc; + *(uint64_t *)(p->state.d + 48) = cc; p->ptr = 0; diff --git a/crypto_sign/falcon-1024/clean/sign.c b/crypto_sign/falcon-1024/clean/sign.c index 56518bf5..fb05cdad 100644 --- a/crypto_sign/falcon-1024/clean/sign.c +++ b/crypto_sign/falcon-1024/clean/sign.c @@ -1,3 +1,5 @@ +#include "inner.h" + /* * Falcon signature generation. * @@ -29,7 +31,6 @@ * @author Thomas Pornin */ -#include "inner.h" /* =================================================================== */ @@ -1081,8 +1082,8 @@ BerExp(prng *p, fpr x, fpr ccs) { int PQCLEAN_FALCON1024_CLEAN_sampler(void *ctx, fpr mu, fpr isigma) { sampler_context *spc; - int s; - fpr r, dss, ccs; + int s, z0, z, b; + fpr r, dss, ccs, x; spc = ctx; @@ -1107,9 +1108,6 @@ PQCLEAN_FALCON1024_CLEAN_sampler(void *ctx, fpr mu, fpr isigma) { * We now need to sample on center r. */ for (;;) { - int z0, z, b; - fpr x; - /* * Sample z for a Gaussian distribution. Then get a * random bit b to turn the sampling into a bimodal diff --git a/crypto_sign/falcon-1024/clean/vrfy.c b/crypto_sign/falcon-1024/clean/vrfy.c index 780127cf..93f2d526 100644 --- a/crypto_sign/falcon-1024/clean/vrfy.c +++ b/crypto_sign/falcon-1024/clean/vrfy.c @@ -1,3 +1,5 @@ +#include "inner.h" + /* * Falcon signature verification. * @@ -29,7 +31,6 @@ * @author Thomas Pornin */ -#include "inner.h" /* ===================================================================== */ /* diff --git a/crypto_sign/falcon-512/META.yml b/crypto_sign/falcon-512/META.yml index 51e8ac2e..fa45b552 100644 --- a/crypto_sign/falcon-512/META.yml +++ b/crypto_sign/falcon-512/META.yml @@ -20,4 +20,13 @@ auxiliary-submitters: - Zhenfei Zhang implementations: - name: clean - version: 20190920 + version: supercop-20201018 via https://github.com/jschanck/package-pqclean/tree/6f6f4227/falcon + - name: avx2 + version: supercop-20201018 via https://github.com/jschanck/package-pqclean/tree/6f6f4227/falcon + supported_platforms: + - architecture: x86_64 + operating_systems: + - Linux + - Darwin + required_flags: + - avx2 diff --git a/crypto_sign/falcon-512/avx2/LICENSE b/crypto_sign/falcon-512/avx2/LICENSE new file mode 100644 index 00000000..12c7b56c --- /dev/null +++ b/crypto_sign/falcon-512/avx2/LICENSE @@ -0,0 +1,24 @@ +\ +MIT License + +Copyright (c) 2017-2019 Falcon Project + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/crypto_sign/falcon-512/avx2/Makefile b/crypto_sign/falcon-512/avx2/Makefile new file mode 100644 index 00000000..9f3ca4a7 --- /dev/null +++ b/crypto_sign/falcon-512/avx2/Makefile @@ -0,0 +1,24 @@ +# This Makefile can be used with GNU Make or BSD Make + +LIB=libfalcon512_avx2.a + +SOURCES = codec.c common.c fft.c fpr.c keygen.c pqclean.c rng.c sign.c vrfy.c +OBJECTS = codec.o common.o fft.o fpr.o keygen.o pqclean.o rng.o sign.o vrfy.o +HEADERS = api.h fpr.h inner.h + +CFLAGS=-O3 -Wconversion -mavx2 -Wall -Wextra -Wpedantic -Wvla -Werror -Wmissing-prototypes -Wredundant-decls -std=c99 -I../../../common $(EXTRAFLAGS) + +all: $(LIB) + +%.o: %.s $(HEADERS) + $(AS) -o $@ $< + +%.o: %.c $(HEADERS) + $(CC) $(CFLAGS) -c -o $@ $< + +$(LIB): $(OBJECTS) + $(AR) -r $@ $(OBJECTS) + +clean: + $(RM) $(OBJECTS) + $(RM) $(LIB) diff --git a/crypto_sign/falcon-512/avx2/api.h b/crypto_sign/falcon-512/avx2/api.h new file mode 100644 index 00000000..fd8f55cd --- /dev/null +++ b/crypto_sign/falcon-512/avx2/api.h @@ -0,0 +1,80 @@ +#ifndef PQCLEAN_FALCON512_AVX2_API_H +#define PQCLEAN_FALCON512_AVX2_API_H + +#include +#include + +#define PQCLEAN_FALCON512_AVX2_CRYPTO_SECRETKEYBYTES 1281 +#define PQCLEAN_FALCON512_AVX2_CRYPTO_PUBLICKEYBYTES 897 +#define PQCLEAN_FALCON512_AVX2_CRYPTO_BYTES 690 + +#define PQCLEAN_FALCON512_AVX2_CRYPTO_ALGNAME "Falcon-512" + +/* + * Generate a new key pair. Public key goes into pk[], private key in sk[]. + * Key sizes are exact (in bytes): + * public (pk): PQCLEAN_FALCON512_AVX2_CRYPTO_PUBLICKEYBYTES + * private (sk): PQCLEAN_FALCON512_AVX2_CRYPTO_SECRETKEYBYTES + * + * Return value: 0 on success, -1 on error. + */ +int PQCLEAN_FALCON512_AVX2_crypto_sign_keypair( + uint8_t *pk, uint8_t *sk); + +/* + * Compute a signature on a provided message (m, mlen), with a given + * private key (sk). Signature is written in sig[], with length written + * into *siglen. Signature length is variable; maximum signature length + * (in bytes) is PQCLEAN_FALCON512_AVX2_CRYPTO_BYTES. + * + * sig[], m[] and sk[] may overlap each other arbitrarily. + * + * Return value: 0 on success, -1 on error. + */ +int PQCLEAN_FALCON512_AVX2_crypto_sign_signature( + uint8_t *sig, size_t *siglen, + const uint8_t *m, size_t mlen, const uint8_t *sk); + +/* + * Verify a signature (sig, siglen) on a message (m, mlen) with a given + * public key (pk). + * + * sig[], m[] and pk[] may overlap each other arbitrarily. + * + * Return value: 0 on success, -1 on error. + */ +int PQCLEAN_FALCON512_AVX2_crypto_sign_verify( + const uint8_t *sig, size_t siglen, + const uint8_t *m, size_t mlen, const uint8_t *pk); + +/* + * Compute a signature on a message and pack the signature and message + * into a single object, written into sm[]. The length of that output is + * written in *smlen; that length may be larger than the message length + * (mlen) by up to PQCLEAN_FALCON512_AVX2_CRYPTO_BYTES. + * + * sm[] and m[] may overlap each other arbitrarily; however, sm[] shall + * not overlap with sk[]. + * + * Return value: 0 on success, -1 on error. + */ +int PQCLEAN_FALCON512_AVX2_crypto_sign( + uint8_t *sm, size_t *smlen, + const uint8_t *m, size_t mlen, const uint8_t *sk); + +/* + * Open a signed message object (sm, smlen) and verify the signature; + * on success, the message itself is written into m[] and its length + * into *mlen. The message is shorter than the signed message object, + * but the size difference depends on the signature value; the difference + * may range up to PQCLEAN_FALCON512_AVX2_CRYPTO_BYTES. + * + * m[], sm[] and pk[] may overlap each other arbitrarily. + * + * Return value: 0 on success, -1 on error. + */ +int PQCLEAN_FALCON512_AVX2_crypto_sign_open( + uint8_t *m, size_t *mlen, + const uint8_t *sm, size_t smlen, const uint8_t *pk); + +#endif diff --git a/crypto_sign/falcon-512/avx2/codec.c b/crypto_sign/falcon-512/avx2/codec.c new file mode 100644 index 00000000..8b64ed5b --- /dev/null +++ b/crypto_sign/falcon-512/avx2/codec.c @@ -0,0 +1,555 @@ +#include "inner.h" + +/* + * Encoding/decoding of keys and signatures. + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2017-2019 Falcon Project + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @author Thomas Pornin + */ + + +/* see inner.h */ +size_t +PQCLEAN_FALCON512_AVX2_modq_encode( + void *out, size_t max_out_len, + const uint16_t *x, unsigned logn) { + size_t n, out_len, u; + uint8_t *buf; + uint32_t acc; + int acc_len; + + n = (size_t)1 << logn; + for (u = 0; u < n; u ++) { + if (x[u] >= 12289) { + return 0; + } + } + out_len = ((n * 14) + 7) >> 3; + if (out == NULL) { + return out_len; + } + if (out_len > max_out_len) { + return 0; + } + buf = out; + acc = 0; + acc_len = 0; + for (u = 0; u < n; u ++) { + acc = (acc << 14) | x[u]; + acc_len += 14; + while (acc_len >= 8) { + acc_len -= 8; + *buf ++ = (uint8_t)(acc >> acc_len); + } + } + if (acc_len > 0) { + *buf = (uint8_t)(acc << (8 - acc_len)); + } + return out_len; +} + +/* see inner.h */ +size_t +PQCLEAN_FALCON512_AVX2_modq_decode( + uint16_t *x, unsigned logn, + const void *in, size_t max_in_len) { + size_t n, in_len, u; + const uint8_t *buf; + uint32_t acc; + int acc_len; + + n = (size_t)1 << logn; + in_len = ((n * 14) + 7) >> 3; + if (in_len > max_in_len) { + return 0; + } + buf = in; + acc = 0; + acc_len = 0; + u = 0; + while (u < n) { + acc = (acc << 8) | (*buf ++); + acc_len += 8; + if (acc_len >= 14) { + unsigned w; + + acc_len -= 14; + w = (acc >> acc_len) & 0x3FFF; + if (w >= 12289) { + return 0; + } + x[u ++] = (uint16_t)w; + } + } + if ((acc & (((uint32_t)1 << acc_len) - 1)) != 0) { + return 0; + } + return in_len; +} + +/* see inner.h */ +size_t +PQCLEAN_FALCON512_AVX2_trim_i16_encode( + void *out, size_t max_out_len, + const int16_t *x, unsigned logn, unsigned bits) { + size_t n, u, out_len; + int minv, maxv; + uint8_t *buf; + uint32_t acc, mask; + unsigned acc_len; + + n = (size_t)1 << logn; + maxv = (1 << (bits - 1)) - 1; + minv = -maxv; + for (u = 0; u < n; u ++) { + if (x[u] < minv || x[u] > maxv) { + return 0; + } + } + out_len = ((n * bits) + 7) >> 3; + if (out == NULL) { + return out_len; + } + if (out_len > max_out_len) { + return 0; + } + buf = out; + acc = 0; + acc_len = 0; + mask = ((uint32_t)1 << bits) - 1; + for (u = 0; u < n; u ++) { + acc = (acc << bits) | ((uint16_t)x[u] & mask); + acc_len += bits; + while (acc_len >= 8) { + acc_len -= 8; + *buf ++ = (uint8_t)(acc >> acc_len); + } + } + if (acc_len > 0) { + *buf ++ = (uint8_t)(acc << (8 - acc_len)); + } + return out_len; +} + +/* see inner.h */ +size_t +PQCLEAN_FALCON512_AVX2_trim_i16_decode( + int16_t *x, unsigned logn, unsigned bits, + const void *in, size_t max_in_len) { + size_t n, in_len; + const uint8_t *buf; + size_t u; + uint32_t acc, mask1, mask2; + unsigned acc_len; + + n = (size_t)1 << logn; + in_len = ((n * bits) + 7) >> 3; + if (in_len > max_in_len) { + return 0; + } + buf = in; + u = 0; + acc = 0; + acc_len = 0; + mask1 = ((uint32_t)1 << bits) - 1; + mask2 = (uint32_t)1 << (bits - 1); + while (u < n) { + acc = (acc << 8) | *buf ++; + acc_len += 8; + while (acc_len >= bits && u < n) { + uint32_t w; + + acc_len -= bits; + w = (acc >> acc_len) & mask1; + w |= -(w & mask2); + if (w == -mask2) { + /* + * The -2^(bits-1) value is forbidden. + */ + return 0; + } + w |= -(w & mask2); + x[u ++] = (int16_t) * (int32_t *)&w; + } + } + if ((acc & (((uint32_t)1 << acc_len) - 1)) != 0) { + /* + * Extra bits in the last byte must be zero. + */ + return 0; + } + return in_len; +} + +/* see inner.h */ +size_t +PQCLEAN_FALCON512_AVX2_trim_i8_encode( + void *out, size_t max_out_len, + const int8_t *x, unsigned logn, unsigned bits) { + size_t n, u, out_len; + int minv, maxv; + uint8_t *buf; + uint32_t acc, mask; + unsigned acc_len; + + n = (size_t)1 << logn; + maxv = (1 << (bits - 1)) - 1; + minv = -maxv; + for (u = 0; u < n; u ++) { + if (x[u] < minv || x[u] > maxv) { + return 0; + } + } + out_len = ((n * bits) + 7) >> 3; + if (out == NULL) { + return out_len; + } + if (out_len > max_out_len) { + return 0; + } + buf = out; + acc = 0; + acc_len = 0; + mask = ((uint32_t)1 << bits) - 1; + for (u = 0; u < n; u ++) { + acc = (acc << bits) | ((uint8_t)x[u] & mask); + acc_len += bits; + while (acc_len >= 8) { + acc_len -= 8; + *buf ++ = (uint8_t)(acc >> acc_len); + } + } + if (acc_len > 0) { + *buf ++ = (uint8_t)(acc << (8 - acc_len)); + } + return out_len; +} + +/* see inner.h */ +size_t +PQCLEAN_FALCON512_AVX2_trim_i8_decode( + int8_t *x, unsigned logn, unsigned bits, + const void *in, size_t max_in_len) { + size_t n, in_len; + const uint8_t *buf; + size_t u; + uint32_t acc, mask1, mask2; + unsigned acc_len; + + n = (size_t)1 << logn; + in_len = ((n * bits) + 7) >> 3; + if (in_len > max_in_len) { + return 0; + } + buf = in; + u = 0; + acc = 0; + acc_len = 0; + mask1 = ((uint32_t)1 << bits) - 1; + mask2 = (uint32_t)1 << (bits - 1); + while (u < n) { + acc = (acc << 8) | *buf ++; + acc_len += 8; + while (acc_len >= bits && u < n) { + uint32_t w; + + acc_len -= bits; + w = (acc >> acc_len) & mask1; + w |= -(w & mask2); + if (w == -mask2) { + /* + * The -2^(bits-1) value is forbidden. + */ + return 0; + } + x[u ++] = (int8_t) * (int32_t *)&w; + } + } + if ((acc & (((uint32_t)1 << acc_len) - 1)) != 0) { + /* + * Extra bits in the last byte must be zero. + */ + return 0; + } + return in_len; +} + +/* see inner.h */ +size_t +PQCLEAN_FALCON512_AVX2_comp_encode( + void *out, size_t max_out_len, + const int16_t *x, unsigned logn) { + uint8_t *buf; + size_t n, u, v; + uint32_t acc; + unsigned acc_len; + + n = (size_t)1 << logn; + buf = out; + + /* + * Make sure that all values are within the -2047..+2047 range. + */ + for (u = 0; u < n; u ++) { + if (x[u] < -2047 || x[u] > +2047) { + return 0; + } + } + + acc = 0; + acc_len = 0; + v = 0; + for (u = 0; u < n; u ++) { + int t; + unsigned w; + + /* + * Get sign and absolute value of next integer; push the + * sign bit. + */ + acc <<= 1; + t = x[u]; + if (t < 0) { + t = -t; + acc |= 1; + } + w = (unsigned)t; + + /* + * Push the low 7 bits of the absolute value. + */ + acc <<= 7; + acc |= w & 127u; + w >>= 7; + + /* + * We pushed exactly 8 bits. + */ + acc_len += 8; + + /* + * Push as many zeros as necessary, then a one. Since the + * absolute value is at most 2047, w can only range up to + * 15 at this point, thus we will add at most 16 bits + * here. With the 8 bits above and possibly up to 7 bits + * from previous iterations, we may go up to 31 bits, which + * will fit in the accumulator, which is an uint32_t. + */ + acc <<= (w + 1); + acc |= 1; + acc_len += w + 1; + + /* + * Produce all full bytes. + */ + while (acc_len >= 8) { + acc_len -= 8; + if (buf != NULL) { + if (v >= max_out_len) { + return 0; + } + buf[v] = (uint8_t)(acc >> acc_len); + } + v ++; + } + } + + /* + * Flush remaining bits (if any). + */ + if (acc_len > 0) { + if (buf != NULL) { + if (v >= max_out_len) { + return 0; + } + buf[v] = (uint8_t)(acc << (8 - acc_len)); + } + v ++; + } + + return v; +} + +/* see inner.h */ +size_t +PQCLEAN_FALCON512_AVX2_comp_decode( + int16_t *x, unsigned logn, + const void *in, size_t max_in_len) { + const uint8_t *buf; + size_t n, u, v; + uint32_t acc; + unsigned acc_len; + + n = (size_t)1 << logn; + buf = in; + acc = 0; + acc_len = 0; + v = 0; + for (u = 0; u < n; u ++) { + unsigned b, s, m; + + /* + * Get next eight bits: sign and low seven bits of the + * absolute value. + */ + if (v >= max_in_len) { + return 0; + } + acc = (acc << 8) | (uint32_t)buf[v ++]; + b = acc >> acc_len; + s = b & 128; + m = b & 127; + + /* + * Get next bits until a 1 is reached. + */ + for (;;) { + if (acc_len == 0) { + if (v >= max_in_len) { + return 0; + } + acc = (acc << 8) | (uint32_t)buf[v ++]; + acc_len = 8; + } + acc_len --; + if (((acc >> acc_len) & 1) != 0) { + break; + } + m += 128; + if (m > 2047) { + return 0; + } + } + x[u] = (int16_t) m; + if (s) { + x[u] = (int16_t) - x[u]; + } + } + return v; +} + +/* + * Key elements and signatures are polynomials with small integer + * coefficients. Here are some statistics gathered over many + * generated key pairs (10000 or more for each degree): + * + * log(n) n max(f,g) std(f,g) max(F,G) std(F,G) + * 1 2 129 56.31 143 60.02 + * 2 4 123 40.93 160 46.52 + * 3 8 97 28.97 159 38.01 + * 4 16 100 21.48 154 32.50 + * 5 32 71 15.41 151 29.36 + * 6 64 59 11.07 138 27.77 + * 7 128 39 7.91 144 27.00 + * 8 256 32 5.63 148 26.61 + * 9 512 22 4.00 137 26.46 + * 10 1024 15 2.84 146 26.41 + * + * We want a compact storage format for private key, and, as part of + * key generation, we are allowed to reject some keys which would + * otherwise be fine (this does not induce any noticeable vulnerability + * as long as we reject only a small proportion of possible keys). + * Hence, we enforce at key generation time maximum values for the + * elements of f, g, F and G, so that their encoding can be expressed + * in fixed-width values. Limits have been chosen so that generated + * keys are almost always within bounds, thus not impacting neither + * security or performance. + * + * IMPORTANT: the code assumes that all coefficients of f, g, F and G + * ultimately fit in the -127..+127 range. Thus, none of the elements + * of max_fg_bits[] and max_FG_bits[] shall be greater than 8. + */ + +const uint8_t PQCLEAN_FALCON512_AVX2_max_fg_bits[] = { + 0, /* unused */ + 8, + 8, + 8, + 8, + 8, + 7, + 7, + 6, + 6, + 5 +}; + +const uint8_t PQCLEAN_FALCON512_AVX2_max_FG_bits[] = { + 0, /* unused */ + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8 +}; + +/* + * When generating a new key pair, we can always reject keys which + * feature an abnormally large coefficient. This can also be done for + * signatures, albeit with some care: in case the signature process is + * used in a derandomized setup (explicitly seeded with the message and + * private key), we have to follow the specification faithfully, and the + * specification only enforces a limit on the L2 norm of the signature + * vector. The limit on the L2 norm implies that the absolute value of + * a coefficient of the signature cannot be more than the following: + * + * log(n) n max sig coeff (theoretical) + * 1 2 412 + * 2 4 583 + * 3 8 824 + * 4 16 1166 + * 5 32 1649 + * 6 64 2332 + * 7 128 3299 + * 8 256 4665 + * 9 512 6598 + * 10 1024 9331 + * + * However, the largest observed signature coefficients during our + * experiments was 1077 (in absolute value), hence we can assume that, + * with overwhelming probability, signature coefficients will fit + * in -2047..2047, i.e. 12 bits. + */ + +const uint8_t PQCLEAN_FALCON512_AVX2_max_sig_bits[] = { + 0, /* unused */ + 10, + 11, + 11, + 12, + 12, + 12, + 12, + 12, + 12, + 12 +}; diff --git a/crypto_sign/falcon-512/avx2/common.c b/crypto_sign/falcon-512/avx2/common.c new file mode 100644 index 00000000..7c19e7db --- /dev/null +++ b/crypto_sign/falcon-512/avx2/common.c @@ -0,0 +1,294 @@ +#include "inner.h" + +/* + * Support functions for signatures (hash-to-point, norm). + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2017-2019 Falcon Project + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @author Thomas Pornin + */ + + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_hash_to_point_vartime( + inner_shake256_context *sc, + uint16_t *x, unsigned logn) { + /* + * This is the straightforward per-the-spec implementation. It + * is not constant-time, thus it might reveal information on the + * plaintext (at least, enough to check the plaintext against a + * list of potential plaintexts) in a scenario where the + * attacker does not have access to the signature value or to + * the public key, but knows the nonce (without knowledge of the + * nonce, the hashed output cannot be matched against potential + * plaintexts). + */ + size_t n; + + n = (size_t)1 << logn; + while (n > 0) { + uint8_t buf[2]; + uint32_t w; + + inner_shake256_extract(sc, (void *)buf, sizeof buf); + w = ((unsigned)buf[0] << 8) | (unsigned)buf[1]; + if (w < 61445) { + while (w >= 12289) { + w -= 12289; + } + *x ++ = (uint16_t)w; + n --; + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_hash_to_point_ct( + inner_shake256_context *sc, + uint16_t *x, unsigned logn, uint8_t *tmp) { + /* + * Each 16-bit sample is a value in 0..65535. The value is + * kept if it falls in 0..61444 (because 61445 = 5*12289) + * and rejected otherwise; thus, each sample has probability + * about 0.93758 of being selected. + * + * We want to oversample enough to be sure that we will + * have enough values with probability at least 1 - 2^(-256). + * Depending on degree N, this leads to the following + * required oversampling: + * + * logn n oversampling + * 1 2 65 + * 2 4 67 + * 3 8 71 + * 4 16 77 + * 5 32 86 + * 6 64 100 + * 7 128 122 + * 8 256 154 + * 9 512 205 + * 10 1024 287 + * + * If logn >= 7, then the provided temporary buffer is large + * enough. Otherwise, we use a stack buffer of 63 entries + * (i.e. 126 bytes) for the values that do not fit in tmp[]. + */ + + static const uint16_t overtab[] = { + 0, /* unused */ + 65, + 67, + 71, + 77, + 86, + 100, + 122, + 154, + 205, + 287 + }; + + unsigned n, n2, u, m, p, over; + uint16_t *tt1, tt2[63]; + + /* + * We first generate m 16-bit value. Values 0..n-1 go to x[]. + * Values n..2*n-1 go to tt1[]. Values 2*n and later go to tt2[]. + * We also reduce modulo q the values; rejected values are set + * to 0xFFFF. + */ + n = 1U << logn; + n2 = n << 1; + over = overtab[logn]; + m = n + over; + tt1 = (uint16_t *)tmp; + for (u = 0; u < m; u ++) { + uint8_t buf[2]; + uint32_t w, wr; + + inner_shake256_extract(sc, buf, sizeof buf); + w = ((uint32_t)buf[0] << 8) | (uint32_t)buf[1]; + wr = w - ((uint32_t)24578 & (((w - 24578) >> 31) - 1)); + wr = wr - ((uint32_t)24578 & (((wr - 24578) >> 31) - 1)); + wr = wr - ((uint32_t)12289 & (((wr - 12289) >> 31) - 1)); + wr |= ((w - 61445) >> 31) - 1; + if (u < n) { + x[u] = (uint16_t)wr; + } else if (u < n2) { + tt1[u - n] = (uint16_t)wr; + } else { + tt2[u - n2] = (uint16_t)wr; + } + } + + /* + * Now we must "squeeze out" the invalid values. We do this in + * a logarithmic sequence of passes; each pass computes where a + * value should go, and moves it down by 'p' slots if necessary, + * where 'p' uses an increasing powers-of-two scale. It can be + * shown that in all cases where the loop decides that a value + * has to be moved down by p slots, the destination slot is + * "free" (i.e. contains an invalid value). + */ + for (p = 1; p <= over; p <<= 1) { + unsigned v; + + /* + * In the loop below: + * + * - v contains the index of the final destination of + * the value; it is recomputed dynamically based on + * whether values are valid or not. + * + * - u is the index of the value we consider ("source"); + * its address is s. + * + * - The loop may swap the value with the one at index + * u-p. The address of the swap destination is d. + */ + v = 0; + for (u = 0; u < m; u ++) { + uint16_t *s, *d; + unsigned j, sv, dv, mk; + + if (u < n) { + s = &x[u]; + } else if (u < n2) { + s = &tt1[u - n]; + } else { + s = &tt2[u - n2]; + } + sv = *s; + + /* + * The value in sv should ultimately go to + * address v, i.e. jump back by u-v slots. + */ + j = u - v; + + /* + * We increment v for the next iteration, but + * only if the source value is valid. The mask + * 'mk' is -1 if the value is valid, 0 otherwise, + * so we _subtract_ mk. + */ + mk = (sv >> 15) - 1U; + v -= mk; + + /* + * In this loop we consider jumps by p slots; if + * u < p then there is nothing more to do. + */ + if (u < p) { + continue; + } + + /* + * Destination for the swap: value at address u-p. + */ + if ((u - p) < n) { + d = &x[u - p]; + } else if ((u - p) < n2) { + d = &tt1[(u - p) - n]; + } else { + d = &tt2[(u - p) - n2]; + } + dv = *d; + + /* + * The swap should be performed only if the source + * is valid AND the jump j has its 'p' bit set. + */ + mk &= -(((j & p) + 0x1FF) >> 9); + + *s = (uint16_t)(sv ^ (mk & (sv ^ dv))); + *d = (uint16_t)(dv ^ (mk & (sv ^ dv))); + } + } +} + +/* see inner.h */ +int +PQCLEAN_FALCON512_AVX2_is_short( + const int16_t *s1, const int16_t *s2, unsigned logn) { + /* + * We use the l2-norm. Code below uses only 32-bit operations to + * compute the square of the norm with saturation to 2^32-1 if + * the value exceeds 2^31-1. + */ + size_t n, u; + uint32_t s, ng; + + n = (size_t)1 << logn; + s = 0; + ng = 0; + for (u = 0; u < n; u ++) { + int32_t z; + + z = s1[u]; + s += (uint32_t)(z * z); + ng |= s; + z = s2[u]; + s += (uint32_t)(z * z); + ng |= s; + } + s |= -(ng >> 31); + + /* + * Acceptance bound on the l2-norm is: + * 1.2*1.55*sqrt(q)*sqrt(2*N) + * Value 7085 is floor((1.2^2)*(1.55^2)*2*1024). + */ + return s < (((uint32_t)7085 * (uint32_t)12289) >> (10 - logn)); +} + +/* see inner.h */ +int +PQCLEAN_FALCON512_AVX2_is_short_half( + uint32_t sqn, const int16_t *s2, unsigned logn) { + size_t n, u; + uint32_t ng; + + n = (size_t)1 << logn; + ng = -(sqn >> 31); + for (u = 0; u < n; u ++) { + int32_t z; + + z = s2[u]; + sqn += (uint32_t)(z * z); + ng |= sqn; + } + sqn |= -(ng >> 31); + + /* + * Acceptance bound on the l2-norm is: + * 1.2*1.55*sqrt(q)*sqrt(2*N) + * Value 7085 is floor((1.2^2)*(1.55^2)*2*1024). + */ + return sqn < (((uint32_t)7085 * (uint32_t)12289) >> (10 - logn)); +} diff --git a/crypto_sign/falcon-512/avx2/fft.c b/crypto_sign/falcon-512/avx2/fft.c new file mode 100644 index 00000000..e92af44b --- /dev/null +++ b/crypto_sign/falcon-512/avx2/fft.c @@ -0,0 +1,1109 @@ +#include "inner.h" + +/* + * FFT code. + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2017-2019 Falcon Project + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @author Thomas Pornin + */ + + +/* + * Rules for complex number macros: + * -------------------------------- + * + * Operand order is: destination, source1, source2... + * + * Each operand is a real and an imaginary part. + * + * All overlaps are allowed. + */ + +/* + * Addition of two complex numbers (d = a + b). + */ +#define FPC_ADD(d_re, d_im, a_re, a_im, b_re, b_im) do { \ + fpr fpct_re, fpct_im; \ + fpct_re = fpr_add(a_re, b_re); \ + fpct_im = fpr_add(a_im, b_im); \ + (d_re) = fpct_re; \ + (d_im) = fpct_im; \ + } while (0) + +/* + * Subtraction of two complex numbers (d = a - b). + */ +#define FPC_SUB(d_re, d_im, a_re, a_im, b_re, b_im) do { \ + fpr fpct_re, fpct_im; \ + fpct_re = fpr_sub(a_re, b_re); \ + fpct_im = fpr_sub(a_im, b_im); \ + (d_re) = fpct_re; \ + (d_im) = fpct_im; \ + } while (0) + +/* + * Multplication of two complex numbers (d = a * b). + */ +#define FPC_MUL(d_re, d_im, a_re, a_im, b_re, b_im) do { \ + fpr fpct_a_re, fpct_a_im; \ + fpr fpct_b_re, fpct_b_im; \ + fpr fpct_d_re, fpct_d_im; \ + fpct_a_re = (a_re); \ + fpct_a_im = (a_im); \ + fpct_b_re = (b_re); \ + fpct_b_im = (b_im); \ + fpct_d_re = fpr_sub( \ + fpr_mul(fpct_a_re, fpct_b_re), \ + fpr_mul(fpct_a_im, fpct_b_im)); \ + fpct_d_im = fpr_add( \ + fpr_mul(fpct_a_re, fpct_b_im), \ + fpr_mul(fpct_a_im, fpct_b_re)); \ + (d_re) = fpct_d_re; \ + (d_im) = fpct_d_im; \ + } while (0) + +/* + * Squaring of a complex number (d = a * a). + */ +#define FPC_SQR(d_re, d_im, a_re, a_im) do { \ + fpr fpct_a_re, fpct_a_im; \ + fpr fpct_d_re, fpct_d_im; \ + fpct_a_re = (a_re); \ + fpct_a_im = (a_im); \ + fpct_d_re = fpr_sub(fpr_sqr(fpct_a_re), fpr_sqr(fpct_a_im)); \ + fpct_d_im = fpr_double(fpr_mul(fpct_a_re, fpct_a_im)); \ + (d_re) = fpct_d_re; \ + (d_im) = fpct_d_im; \ + } while (0) + +/* + * Inversion of a complex number (d = 1 / a). + */ +#define FPC_INV(d_re, d_im, a_re, a_im) do { \ + fpr fpct_a_re, fpct_a_im; \ + fpr fpct_d_re, fpct_d_im; \ + fpr fpct_m; \ + fpct_a_re = (a_re); \ + fpct_a_im = (a_im); \ + fpct_m = fpr_add(fpr_sqr(fpct_a_re), fpr_sqr(fpct_a_im)); \ + fpct_m = fpr_inv(fpct_m); \ + fpct_d_re = fpr_mul(fpct_a_re, fpct_m); \ + fpct_d_im = fpr_mul(fpr_neg(fpct_a_im), fpct_m); \ + (d_re) = fpct_d_re; \ + (d_im) = fpct_d_im; \ + } while (0) + +/* + * Division of complex numbers (d = a / b). + */ +#define FPC_DIV(d_re, d_im, a_re, a_im, b_re, b_im) do { \ + fpr fpct_a_re, fpct_a_im; \ + fpr fpct_b_re, fpct_b_im; \ + fpr fpct_d_re, fpct_d_im; \ + fpr fpct_m; \ + fpct_a_re = (a_re); \ + fpct_a_im = (a_im); \ + fpct_b_re = (b_re); \ + fpct_b_im = (b_im); \ + fpct_m = fpr_add(fpr_sqr(fpct_b_re), fpr_sqr(fpct_b_im)); \ + fpct_m = fpr_inv(fpct_m); \ + fpct_b_re = fpr_mul(fpct_b_re, fpct_m); \ + fpct_b_im = fpr_mul(fpr_neg(fpct_b_im), fpct_m); \ + fpct_d_re = fpr_sub( \ + fpr_mul(fpct_a_re, fpct_b_re), \ + fpr_mul(fpct_a_im, fpct_b_im)); \ + fpct_d_im = fpr_add( \ + fpr_mul(fpct_a_re, fpct_b_im), \ + fpr_mul(fpct_a_im, fpct_b_re)); \ + (d_re) = fpct_d_re; \ + (d_im) = fpct_d_im; \ + } while (0) + +/* + * Let w = exp(i*pi/N); w is a primitive 2N-th root of 1. We define the + * values w_j = w^(2j+1) for all j from 0 to N-1: these are the roots + * of X^N+1 in the field of complex numbers. A crucial property is that + * w_{N-1-j} = conj(w_j) = 1/w_j for all j. + * + * FFT representation of a polynomial f (taken modulo X^N+1) is the + * set of values f(w_j). Since f is real, conj(f(w_j)) = f(conj(w_j)), + * thus f(w_{N-1-j}) = conj(f(w_j)). We thus store only half the values, + * for j = 0 to N/2-1; the other half can be recomputed easily when (if) + * needed. A consequence is that FFT representation has the same size + * as normal representation: N/2 complex numbers use N real numbers (each + * complex number is the combination of a real and an imaginary part). + * + * We use a specific ordering which makes computations easier. Let rev() + * be the bit-reversal function over log(N) bits. For j in 0..N/2-1, we + * store the real and imaginary parts of f(w_j) in slots: + * + * Re(f(w_j)) -> slot rev(j)/2 + * Im(f(w_j)) -> slot rev(j)/2+N/2 + * + * (Note that rev(j) is even for j < N/2.) + */ + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_FFT(fpr *f, unsigned logn) { + /* + * FFT algorithm in bit-reversal order uses the following + * iterative algorithm: + * + * t = N + * for m = 1; m < N; m *= 2: + * ht = t/2 + * for i1 = 0; i1 < m; i1 ++: + * j1 = i1 * t + * s = GM[m + i1] + * for j = j1; j < (j1 + ht); j ++: + * x = f[j] + * y = s * f[j + ht] + * f[j] = x + y + * f[j + ht] = x - y + * t = ht + * + * GM[k] contains w^rev(k) for primitive root w = exp(i*pi/N). + * + * In the description above, f[] is supposed to contain complex + * numbers. In our in-memory representation, the real and + * imaginary parts of f[k] are in array slots k and k+N/2. + * + * We only keep the first half of the complex numbers. We can + * see that after the first iteration, the first and second halves + * of the array of complex numbers have separate lives, so we + * simply ignore the second part. + */ + + unsigned u; + size_t t, n, hn, m; + + /* + * First iteration: compute f[j] + i * f[j+N/2] for all j < N/2 + * (because GM[1] = w^rev(1) = w^(N/2) = i). + * In our chosen representation, this is a no-op: everything is + * already where it should be. + */ + + /* + * Subsequent iterations are truncated to use only the first + * half of values. + */ + n = (size_t)1 << logn; + hn = n >> 1; + t = hn; + for (u = 1, m = 2; u < logn; u ++, m <<= 1) { + size_t ht, hm, i1, j1; + + ht = t >> 1; + hm = m >> 1; + for (i1 = 0, j1 = 0; i1 < hm; i1 ++, j1 += t) { + size_t j, j2; + + j2 = j1 + ht; + if (ht >= 4) { + __m256d s_re, s_im; + + s_re = _mm256_set1_pd( + fpr_gm_tab[((m + i1) << 1) + 0].v); + s_im = _mm256_set1_pd( + fpr_gm_tab[((m + i1) << 1) + 1].v); + for (j = j1; j < j2; j += 4) { + __m256d x_re, x_im, y_re, y_im; + __m256d z_re, z_im; + + x_re = _mm256_loadu_pd(&f[j].v); + x_im = _mm256_loadu_pd(&f[j + hn].v); + z_re = _mm256_loadu_pd(&f[j + ht].v); + z_im = _mm256_loadu_pd(&f[j + ht + hn].v); + y_re = FMSUB(z_re, s_re, + _mm256_mul_pd(z_im, s_im)); + y_im = FMADD(z_re, s_im, + _mm256_mul_pd(z_im, s_re)); + _mm256_storeu_pd(&f[j].v, + _mm256_add_pd(x_re, y_re)); + _mm256_storeu_pd(&f[j + hn].v, + _mm256_add_pd(x_im, y_im)); + _mm256_storeu_pd(&f[j + ht].v, + _mm256_sub_pd(x_re, y_re)); + _mm256_storeu_pd(&f[j + ht + hn].v, + _mm256_sub_pd(x_im, y_im)); + } + } else { + fpr s_re, s_im; + + s_re = fpr_gm_tab[((m + i1) << 1) + 0]; + s_im = fpr_gm_tab[((m + i1) << 1) + 1]; + for (j = j1; j < j2; j ++) { + fpr x_re, x_im, y_re, y_im; + + x_re = f[j]; + x_im = f[j + hn]; + y_re = f[j + ht]; + y_im = f[j + ht + hn]; + FPC_MUL(y_re, y_im, + y_re, y_im, s_re, s_im); + FPC_ADD(f[j], f[j + hn], + x_re, x_im, y_re, y_im); + FPC_SUB(f[j + ht], f[j + ht + hn], + x_re, x_im, y_re, y_im); + } + } + } + t = ht; + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_iFFT(fpr *f, unsigned logn) { + /* + * Inverse FFT algorithm in bit-reversal order uses the following + * iterative algorithm: + * + * t = 1 + * for m = N; m > 1; m /= 2: + * hm = m/2 + * dt = t*2 + * for i1 = 0; i1 < hm; i1 ++: + * j1 = i1 * dt + * s = iGM[hm + i1] + * for j = j1; j < (j1 + t); j ++: + * x = f[j] + * y = f[j + t] + * f[j] = x + y + * f[j + t] = s * (x - y) + * t = dt + * for i1 = 0; i1 < N; i1 ++: + * f[i1] = f[i1] / N + * + * iGM[k] contains (1/w)^rev(k) for primitive root w = exp(i*pi/N) + * (actually, iGM[k] = 1/GM[k] = conj(GM[k])). + * + * In the main loop (not counting the final division loop), in + * all iterations except the last, the first and second half of f[] + * (as an array of complex numbers) are separate. In our chosen + * representation, we do not keep the second half. + * + * The last iteration recombines the recomputed half with the + * implicit half, and should yield only real numbers since the + * target polynomial is real; moreover, s = i at that step. + * Thus, when considering x and y: + * y = conj(x) since the final f[j] must be real + * Therefore, f[j] is filled with 2*Re(x), and f[j + t] is + * filled with 2*Im(x). + * But we already have Re(x) and Im(x) in array slots j and j+t + * in our chosen representation. That last iteration is thus a + * simple doubling of the values in all the array. + * + * We make the last iteration a no-op by tweaking the final + * division into a division by N/2, not N. + */ + size_t u, n, hn, t, m; + + n = (size_t)1 << logn; + t = 1; + m = n; + hn = n >> 1; + for (u = logn; u > 1; u --) { + size_t hm, dt, i1, j1; + + hm = m >> 1; + dt = t << 1; + for (i1 = 0, j1 = 0; j1 < hn; i1 ++, j1 += dt) { + size_t j, j2; + + j2 = j1 + t; + if (t >= 4) { + __m256d s_re, s_im; + + s_re = _mm256_set1_pd( + fpr_gm_tab[((hm + i1) << 1) + 0].v); + s_im = _mm256_set1_pd( + fpr_gm_tab[((hm + i1) << 1) + 1].v); + for (j = j1; j < j2; j += 4) { + __m256d x_re, x_im, y_re, y_im; + __m256d z_re, z_im; + + x_re = _mm256_loadu_pd(&f[j].v); + x_im = _mm256_loadu_pd(&f[j + hn].v); + y_re = _mm256_loadu_pd(&f[j + t].v); + y_im = _mm256_loadu_pd(&f[j + t + hn].v); + _mm256_storeu_pd(&f[j].v, + _mm256_add_pd(x_re, y_re)); + _mm256_storeu_pd(&f[j + hn].v, + _mm256_add_pd(x_im, y_im)); + x_re = _mm256_sub_pd(y_re, x_re); + x_im = _mm256_sub_pd(x_im, y_im); + z_re = FMSUB(x_im, s_im, + _mm256_mul_pd(x_re, s_re)); + z_im = FMADD(x_re, s_im, + _mm256_mul_pd(x_im, s_re)); + _mm256_storeu_pd(&f[j + t].v, z_re); + _mm256_storeu_pd(&f[j + t + hn].v, z_im); + } + } else { + fpr s_re, s_im; + + s_re = fpr_gm_tab[((hm + i1) << 1) + 0]; + s_im = fpr_neg(fpr_gm_tab[((hm + i1) << 1) + 1]); + for (j = j1; j < j2; j ++) { + fpr x_re, x_im, y_re, y_im; + + x_re = f[j]; + x_im = f[j + hn]; + y_re = f[j + t]; + y_im = f[j + t + hn]; + FPC_ADD(f[j], f[j + hn], + x_re, x_im, y_re, y_im); + FPC_SUB(x_re, x_im, + x_re, x_im, y_re, y_im); + FPC_MUL(f[j + t], f[j + t + hn], + x_re, x_im, s_re, s_im); + } + } + } + t = dt; + m = hm; + } + + /* + * Last iteration is a no-op, provided that we divide by N/2 + * instead of N. We need to make a special case for logn = 0. + */ + if (logn > 0) { + fpr ni; + + ni = fpr_p2_tab[logn]; + for (u = 0; u < n; u ++) { + f[u] = fpr_mul(f[u], ni); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_poly_add( + fpr *a, const fpr *b, unsigned logn) { + size_t n, u; + + n = (size_t)1 << logn; + if (n >= 4) { + for (u = 0; u < n; u += 4) { + _mm256_storeu_pd(&a[u].v, + _mm256_add_pd( + _mm256_loadu_pd(&a[u].v), + _mm256_loadu_pd(&b[u].v))); + } + } else { + for (u = 0; u < n; u ++) { + a[u] = fpr_add(a[u], b[u]); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_poly_sub( + fpr *a, const fpr *b, unsigned logn) { + size_t n, u; + + n = (size_t)1 << logn; + if (n >= 4) { + for (u = 0; u < n; u += 4) { + _mm256_storeu_pd(&a[u].v, + _mm256_sub_pd( + _mm256_loadu_pd(&a[u].v), + _mm256_loadu_pd(&b[u].v))); + } + } else { + for (u = 0; u < n; u ++) { + a[u] = fpr_sub(a[u], b[u]); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_poly_neg(fpr *a, unsigned logn) { + size_t n, u; + + n = (size_t)1 << logn; + if (n >= 4) { + __m256d s; + + s = _mm256_set1_pd(-0.0); + for (u = 0; u < n; u += 4) { + _mm256_storeu_pd(&a[u].v, + _mm256_xor_pd(_mm256_loadu_pd(&a[u].v), s)); + } + } else { + for (u = 0; u < n; u ++) { + a[u] = fpr_neg(a[u]); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_poly_adj_fft(fpr *a, unsigned logn) { + size_t n, u; + + n = (size_t)1 << logn; + if (n >= 8) { + __m256d s; + + s = _mm256_set1_pd(-0.0); + for (u = (n >> 1); u < n; u += 4) { + _mm256_storeu_pd(&a[u].v, + _mm256_xor_pd(_mm256_loadu_pd(&a[u].v), s)); + } + } else { + for (u = (n >> 1); u < n; u ++) { + a[u] = fpr_neg(a[u]); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_poly_mul_fft( + fpr *a, const fpr *b, unsigned logn) { + size_t n, hn, u; + + n = (size_t)1 << logn; + hn = n >> 1; + if (n >= 8) { + for (u = 0; u < hn; u += 4) { + __m256d a_re, a_im, b_re, b_im, c_re, c_im; + + a_re = _mm256_loadu_pd(&a[u].v); + a_im = _mm256_loadu_pd(&a[u + hn].v); + b_re = _mm256_loadu_pd(&b[u].v); + b_im = _mm256_loadu_pd(&b[u + hn].v); + c_re = FMSUB( + a_re, b_re, _mm256_mul_pd(a_im, b_im)); + c_im = FMADD( + a_re, b_im, _mm256_mul_pd(a_im, b_re)); + _mm256_storeu_pd(&a[u].v, c_re); + _mm256_storeu_pd(&a[u + hn].v, c_im); + } + } else { + for (u = 0; u < hn; u ++) { + fpr a_re, a_im, b_re, b_im; + + a_re = a[u]; + a_im = a[u + hn]; + b_re = b[u]; + b_im = b[u + hn]; + FPC_MUL(a[u], a[u + hn], a_re, a_im, b_re, b_im); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_poly_muladj_fft( + fpr *a, const fpr *b, unsigned logn) { + size_t n, hn, u; + + n = (size_t)1 << logn; + hn = n >> 1; + if (n >= 8) { + for (u = 0; u < hn; u += 4) { + __m256d a_re, a_im, b_re, b_im, c_re, c_im; + + a_re = _mm256_loadu_pd(&a[u].v); + a_im = _mm256_loadu_pd(&a[u + hn].v); + b_re = _mm256_loadu_pd(&b[u].v); + b_im = _mm256_loadu_pd(&b[u + hn].v); + c_re = FMADD( + a_re, b_re, _mm256_mul_pd(a_im, b_im)); + c_im = FMSUB( + a_im, b_re, _mm256_mul_pd(a_re, b_im)); + _mm256_storeu_pd(&a[u].v, c_re); + _mm256_storeu_pd(&a[u + hn].v, c_im); + } + } else { + for (u = 0; u < hn; u ++) { + fpr a_re, a_im, b_re, b_im; + + a_re = a[u]; + a_im = a[u + hn]; + b_re = b[u]; + b_im = fpr_neg(b[u + hn]); + FPC_MUL(a[u], a[u + hn], a_re, a_im, b_re, b_im); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_poly_mulselfadj_fft(fpr *a, unsigned logn) { + /* + * Since each coefficient is multiplied with its own conjugate, + * the result contains only real values. + */ + size_t n, hn, u; + + n = (size_t)1 << logn; + hn = n >> 1; + if (n >= 8) { + __m256d zero; + + zero = _mm256_setzero_pd(); + for (u = 0; u < hn; u += 4) { + __m256d a_re, a_im; + + a_re = _mm256_loadu_pd(&a[u].v); + a_im = _mm256_loadu_pd(&a[u + hn].v); + _mm256_storeu_pd(&a[u].v, + FMADD(a_re, a_re, + _mm256_mul_pd(a_im, a_im))); + _mm256_storeu_pd(&a[u + hn].v, zero); + } + } else { + for (u = 0; u < hn; u ++) { + fpr a_re, a_im; + + a_re = a[u]; + a_im = a[u + hn]; + a[u] = fpr_add(fpr_sqr(a_re), fpr_sqr(a_im)); + a[u + hn] = fpr_zero; + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_poly_mulconst(fpr *a, fpr x, unsigned logn) { + size_t n, u; + + n = (size_t)1 << logn; + if (n >= 4) { + __m256d x4; + + x4 = _mm256_set1_pd(x.v); + for (u = 0; u < n; u += 4) { + _mm256_storeu_pd(&a[u].v, + _mm256_mul_pd(x4, _mm256_loadu_pd(&a[u].v))); + } + } else { + for (u = 0; u < n; u ++) { + a[u] = fpr_mul(a[u], x); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_poly_div_fft( + fpr *a, const fpr *b, unsigned logn) { + size_t n, hn, u; + + n = (size_t)1 << logn; + hn = n >> 1; + if (n >= 8) { + __m256d one; + + one = _mm256_set1_pd(1.0); + for (u = 0; u < hn; u += 4) { + __m256d a_re, a_im, b_re, b_im, c_re, c_im, t; + + a_re = _mm256_loadu_pd(&a[u].v); + a_im = _mm256_loadu_pd(&a[u + hn].v); + b_re = _mm256_loadu_pd(&b[u].v); + b_im = _mm256_loadu_pd(&b[u + hn].v); + t = _mm256_div_pd(one, + FMADD(b_re, b_re, + _mm256_mul_pd(b_im, b_im))); + b_re = _mm256_mul_pd(b_re, t); + b_im = _mm256_mul_pd(b_im, t); + c_re = FMADD( + a_re, b_re, _mm256_mul_pd(a_im, b_im)); + c_im = FMSUB( + a_im, b_re, _mm256_mul_pd(a_re, b_im)); + _mm256_storeu_pd(&a[u].v, c_re); + _mm256_storeu_pd(&a[u + hn].v, c_im); + } + } else { + for (u = 0; u < hn; u ++) { + fpr a_re, a_im, b_re, b_im; + + a_re = a[u]; + a_im = a[u + hn]; + b_re = b[u]; + b_im = b[u + hn]; + FPC_DIV(a[u], a[u + hn], a_re, a_im, b_re, b_im); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_poly_invnorm2_fft(fpr *d, + const fpr *a, const fpr *b, unsigned logn) { + size_t n, hn, u; + + n = (size_t)1 << logn; + hn = n >> 1; + if (n >= 8) { + __m256d one; + + one = _mm256_set1_pd(1.0); + for (u = 0; u < hn; u += 4) { + __m256d a_re, a_im, b_re, b_im, dv; + + a_re = _mm256_loadu_pd(&a[u].v); + a_im = _mm256_loadu_pd(&a[u + hn].v); + b_re = _mm256_loadu_pd(&b[u].v); + b_im = _mm256_loadu_pd(&b[u + hn].v); + dv = _mm256_div_pd(one, + _mm256_add_pd( + FMADD(a_re, a_re, + _mm256_mul_pd(a_im, a_im)), + FMADD(b_re, b_re, + _mm256_mul_pd(b_im, b_im)))); + _mm256_storeu_pd(&d[u].v, dv); + } + } else { + for (u = 0; u < hn; u ++) { + fpr a_re, a_im; + fpr b_re, b_im; + + a_re = a[u]; + a_im = a[u + hn]; + b_re = b[u]; + b_im = b[u + hn]; + d[u] = fpr_inv(fpr_add( + fpr_add(fpr_sqr(a_re), fpr_sqr(a_im)), + fpr_add(fpr_sqr(b_re), fpr_sqr(b_im)))); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_poly_add_muladj_fft(fpr *d, + const fpr *F, const fpr *G, + const fpr *f, const fpr *g, unsigned logn) { + size_t n, hn, u; + + n = (size_t)1 << logn; + hn = n >> 1; + if (n >= 8) { + for (u = 0; u < hn; u += 4) { + __m256d F_re, F_im, G_re, G_im; + __m256d f_re, f_im, g_re, g_im; + __m256d a_re, a_im, b_re, b_im; + + F_re = _mm256_loadu_pd(&F[u].v); + F_im = _mm256_loadu_pd(&F[u + hn].v); + G_re = _mm256_loadu_pd(&G[u].v); + G_im = _mm256_loadu_pd(&G[u + hn].v); + f_re = _mm256_loadu_pd(&f[u].v); + f_im = _mm256_loadu_pd(&f[u + hn].v); + g_re = _mm256_loadu_pd(&g[u].v); + g_im = _mm256_loadu_pd(&g[u + hn].v); + + a_re = FMADD(F_re, f_re, + _mm256_mul_pd(F_im, f_im)); + a_im = FMSUB(F_im, f_re, + _mm256_mul_pd(F_re, f_im)); + b_re = FMADD(G_re, g_re, + _mm256_mul_pd(G_im, g_im)); + b_im = FMSUB(G_im, g_re, + _mm256_mul_pd(G_re, g_im)); + _mm256_storeu_pd(&d[u].v, + _mm256_add_pd(a_re, b_re)); + _mm256_storeu_pd(&d[u + hn].v, + _mm256_add_pd(a_im, b_im)); + } + } else { + for (u = 0; u < hn; u ++) { + fpr F_re, F_im, G_re, G_im; + fpr f_re, f_im, g_re, g_im; + fpr a_re, a_im, b_re, b_im; + + F_re = F[u]; + F_im = F[u + hn]; + G_re = G[u]; + G_im = G[u + hn]; + f_re = f[u]; + f_im = f[u + hn]; + g_re = g[u]; + g_im = g[u + hn]; + + FPC_MUL(a_re, a_im, F_re, F_im, f_re, fpr_neg(f_im)); + FPC_MUL(b_re, b_im, G_re, G_im, g_re, fpr_neg(g_im)); + d[u] = fpr_add(a_re, b_re); + d[u + hn] = fpr_add(a_im, b_im); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_poly_mul_autoadj_fft( + fpr *a, const fpr *b, unsigned logn) { + size_t n, hn, u; + + n = (size_t)1 << logn; + hn = n >> 1; + if (n >= 8) { + for (u = 0; u < hn; u += 4) { + __m256d a_re, a_im, bv; + + a_re = _mm256_loadu_pd(&a[u].v); + a_im = _mm256_loadu_pd(&a[u + hn].v); + bv = _mm256_loadu_pd(&b[u].v); + _mm256_storeu_pd(&a[u].v, + _mm256_mul_pd(a_re, bv)); + _mm256_storeu_pd(&a[u + hn].v, + _mm256_mul_pd(a_im, bv)); + } + } else { + for (u = 0; u < hn; u ++) { + a[u] = fpr_mul(a[u], b[u]); + a[u + hn] = fpr_mul(a[u + hn], b[u]); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_poly_div_autoadj_fft( + fpr *a, const fpr *b, unsigned logn) { + size_t n, hn, u; + + n = (size_t)1 << logn; + hn = n >> 1; + if (n >= 8) { + __m256d one; + + one = _mm256_set1_pd(1.0); + for (u = 0; u < hn; u += 4) { + __m256d ib, a_re, a_im; + + ib = _mm256_div_pd(one, _mm256_loadu_pd(&b[u].v)); + a_re = _mm256_loadu_pd(&a[u].v); + a_im = _mm256_loadu_pd(&a[u + hn].v); + _mm256_storeu_pd(&a[u].v, _mm256_mul_pd(a_re, ib)); + _mm256_storeu_pd(&a[u + hn].v, _mm256_mul_pd(a_im, ib)); + } + } else { + for (u = 0; u < hn; u ++) { + fpr ib; + + ib = fpr_inv(b[u]); + a[u] = fpr_mul(a[u], ib); + a[u + hn] = fpr_mul(a[u + hn], ib); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_poly_LDL_fft( + const fpr *g00, + fpr *g01, fpr *g11, unsigned logn) { + size_t n, hn, u; + + n = (size_t)1 << logn; + hn = n >> 1; + if (n >= 8) { + __m256d one; + + one = _mm256_set1_pd(1.0); + for (u = 0; u < hn; u += 4) { + __m256d g00_re, g00_im, g01_re, g01_im, g11_re, g11_im; + __m256d t, mu_re, mu_im, xi_re, xi_im; + + g00_re = _mm256_loadu_pd(&g00[u].v); + g00_im = _mm256_loadu_pd(&g00[u + hn].v); + g01_re = _mm256_loadu_pd(&g01[u].v); + g01_im = _mm256_loadu_pd(&g01[u + hn].v); + g11_re = _mm256_loadu_pd(&g11[u].v); + g11_im = _mm256_loadu_pd(&g11[u + hn].v); + + t = _mm256_div_pd(one, + FMADD(g00_re, g00_re, + _mm256_mul_pd(g00_im, g00_im))); + g00_re = _mm256_mul_pd(g00_re, t); + g00_im = _mm256_mul_pd(g00_im, t); + mu_re = FMADD(g01_re, g00_re, + _mm256_mul_pd(g01_im, g00_im)); + mu_im = FMSUB(g01_re, g00_im, + _mm256_mul_pd(g01_im, g00_re)); + xi_re = FMSUB(mu_re, g01_re, + _mm256_mul_pd(mu_im, g01_im)); + xi_im = FMADD(mu_im, g01_re, + _mm256_mul_pd(mu_re, g01_im)); + _mm256_storeu_pd(&g11[u].v, + _mm256_sub_pd(g11_re, xi_re)); + _mm256_storeu_pd(&g11[u + hn].v, + _mm256_add_pd(g11_im, xi_im)); + _mm256_storeu_pd(&g01[u].v, mu_re); + _mm256_storeu_pd(&g01[u + hn].v, mu_im); + } + } else { + for (u = 0; u < hn; u ++) { + fpr g00_re, g00_im, g01_re, g01_im, g11_re, g11_im; + fpr mu_re, mu_im; + + g00_re = g00[u]; + g00_im = g00[u + hn]; + g01_re = g01[u]; + g01_im = g01[u + hn]; + g11_re = g11[u]; + g11_im = g11[u + hn]; + FPC_DIV(mu_re, mu_im, g01_re, g01_im, g00_re, g00_im); + FPC_MUL(g01_re, g01_im, + mu_re, mu_im, g01_re, fpr_neg(g01_im)); + FPC_SUB(g11[u], g11[u + hn], + g11_re, g11_im, g01_re, g01_im); + g01[u] = mu_re; + g01[u + hn] = fpr_neg(mu_im); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_poly_LDLmv_fft( + fpr *d11, fpr *l10, + const fpr *g00, const fpr *g01, + const fpr *g11, unsigned logn) { + size_t n, hn, u; + + n = (size_t)1 << logn; + hn = n >> 1; + if (n >= 8) { + __m256d one; + + one = _mm256_set1_pd(1.0); + for (u = 0; u < hn; u += 4) { + __m256d g00_re, g00_im, g01_re, g01_im, g11_re, g11_im; + __m256d t, mu_re, mu_im, xi_re, xi_im; + + g00_re = _mm256_loadu_pd(&g00[u].v); + g00_im = _mm256_loadu_pd(&g00[u + hn].v); + g01_re = _mm256_loadu_pd(&g01[u].v); + g01_im = _mm256_loadu_pd(&g01[u + hn].v); + g11_re = _mm256_loadu_pd(&g11[u].v); + g11_im = _mm256_loadu_pd(&g11[u + hn].v); + + t = _mm256_div_pd(one, + FMADD(g00_re, g00_re, + _mm256_mul_pd(g00_im, g00_im))); + g00_re = _mm256_mul_pd(g00_re, t); + g00_im = _mm256_mul_pd(g00_im, t); + mu_re = FMADD(g01_re, g00_re, + _mm256_mul_pd(g01_im, g00_im)); + mu_im = FMSUB(g01_re, g00_im, + _mm256_mul_pd(g01_im, g00_re)); + xi_re = FMSUB(mu_re, g01_re, + _mm256_mul_pd(mu_im, g01_im)); + xi_im = FMADD(mu_im, g01_re, + _mm256_mul_pd(mu_re, g01_im)); + _mm256_storeu_pd(&d11[u].v, + _mm256_sub_pd(g11_re, xi_re)); + _mm256_storeu_pd(&d11[u + hn].v, + _mm256_add_pd(g11_im, xi_im)); + _mm256_storeu_pd(&l10[u].v, mu_re); + _mm256_storeu_pd(&l10[u + hn].v, mu_im); + } + } else { + for (u = 0; u < hn; u ++) { + fpr g00_re, g00_im, g01_re, g01_im, g11_re, g11_im; + fpr mu_re, mu_im; + + g00_re = g00[u]; + g00_im = g00[u + hn]; + g01_re = g01[u]; + g01_im = g01[u + hn]; + g11_re = g11[u]; + g11_im = g11[u + hn]; + FPC_DIV(mu_re, mu_im, g01_re, g01_im, g00_re, g00_im); + FPC_MUL(g01_re, g01_im, + mu_re, mu_im, g01_re, fpr_neg(g01_im)); + FPC_SUB(d11[u], d11[u + hn], + g11_re, g11_im, g01_re, g01_im); + l10[u] = mu_re; + l10[u + hn] = fpr_neg(mu_im); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_poly_split_fft( + fpr *f0, fpr *f1, + const fpr *f, unsigned logn) { + /* + * The FFT representation we use is in bit-reversed order + * (element i contains f(w^(rev(i))), where rev() is the + * bit-reversal function over the ring degree. This changes + * indexes with regards to the Falcon specification. + */ + size_t n, hn, qn, u; + + n = (size_t)1 << logn; + hn = n >> 1; + qn = hn >> 1; + + if (n >= 8) { + __m256d half, sv; + + half = _mm256_set1_pd(0.5); + sv = _mm256_set_pd(-0.0, 0.0, -0.0, 0.0); + for (u = 0; u < qn; u += 2) { + __m256d ab_re, ab_im, ff0, ff1, ff2, ff3, gmt; + + ab_re = _mm256_loadu_pd(&f[(u << 1)].v); + ab_im = _mm256_loadu_pd(&f[(u << 1) + hn].v); + ff0 = _mm256_mul_pd(_mm256_hadd_pd(ab_re, ab_im), half); + ff0 = _mm256_permute4x64_pd(ff0, 0xD8); + _mm_storeu_pd(&f0[u].v, + _mm256_extractf128_pd(ff0, 0)); + _mm_storeu_pd(&f0[u + qn].v, + _mm256_extractf128_pd(ff0, 1)); + + ff1 = _mm256_mul_pd(_mm256_hsub_pd(ab_re, ab_im), half); + gmt = _mm256_loadu_pd(&fpr_gm_tab[(u + hn) << 1].v); + ff2 = _mm256_shuffle_pd(ff1, ff1, 0x5); + ff3 = _mm256_hadd_pd( + _mm256_mul_pd(ff1, gmt), + _mm256_xor_pd(_mm256_mul_pd(ff2, gmt), sv)); + ff3 = _mm256_permute4x64_pd(ff3, 0xD8); + _mm_storeu_pd(&f1[u].v, + _mm256_extractf128_pd(ff3, 0)); + _mm_storeu_pd(&f1[u + qn].v, + _mm256_extractf128_pd(ff3, 1)); + } + } else { + f0[0] = f[0]; + f1[0] = f[hn]; + + for (u = 0; u < qn; u ++) { + fpr a_re, a_im, b_re, b_im; + fpr t_re, t_im; + + a_re = f[(u << 1) + 0]; + a_im = f[(u << 1) + 0 + hn]; + b_re = f[(u << 1) + 1]; + b_im = f[(u << 1) + 1 + hn]; + + FPC_ADD(t_re, t_im, a_re, a_im, b_re, b_im); + f0[u] = fpr_half(t_re); + f0[u + qn] = fpr_half(t_im); + + FPC_SUB(t_re, t_im, a_re, a_im, b_re, b_im); + FPC_MUL(t_re, t_im, t_re, t_im, + fpr_gm_tab[((u + hn) << 1) + 0], + fpr_neg(fpr_gm_tab[((u + hn) << 1) + 1])); + f1[u] = fpr_half(t_re); + f1[u + qn] = fpr_half(t_im); + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_poly_merge_fft( + fpr *f, + const fpr *f0, const fpr *f1, unsigned logn) { + size_t n, hn, qn, u; + + n = (size_t)1 << logn; + hn = n >> 1; + qn = hn >> 1; + + if (n >= 16) { + for (u = 0; u < qn; u += 4) { + __m256d a_re, a_im, b_re, b_im, c_re, c_im; + __m256d gm1, gm2, g_re, g_im; + __m256d t_re, t_im, u_re, u_im; + __m256d tu1_re, tu2_re, tu1_im, tu2_im; + + a_re = _mm256_loadu_pd(&f0[u].v); + a_im = _mm256_loadu_pd(&f0[u + qn].v); + c_re = _mm256_loadu_pd(&f1[u].v); + c_im = _mm256_loadu_pd(&f1[u + qn].v); + + gm1 = _mm256_loadu_pd(&fpr_gm_tab[(u + hn) << 1].v); + gm2 = _mm256_loadu_pd(&fpr_gm_tab[(u + 2 + hn) << 1].v); + g_re = _mm256_unpacklo_pd(gm1, gm2); + g_im = _mm256_unpackhi_pd(gm1, gm2); + g_re = _mm256_permute4x64_pd(g_re, 0xD8); + g_im = _mm256_permute4x64_pd(g_im, 0xD8); + + b_re = FMSUB( + c_re, g_re, _mm256_mul_pd(c_im, g_im)); + b_im = FMADD( + c_re, g_im, _mm256_mul_pd(c_im, g_re)); + + t_re = _mm256_add_pd(a_re, b_re); + t_im = _mm256_add_pd(a_im, b_im); + u_re = _mm256_sub_pd(a_re, b_re); + u_im = _mm256_sub_pd(a_im, b_im); + + tu1_re = _mm256_unpacklo_pd(t_re, u_re); + tu2_re = _mm256_unpackhi_pd(t_re, u_re); + tu1_im = _mm256_unpacklo_pd(t_im, u_im); + tu2_im = _mm256_unpackhi_pd(t_im, u_im); + _mm256_storeu_pd(&f[(u << 1)].v, + _mm256_permute2f128_pd(tu1_re, tu2_re, 0x20)); + _mm256_storeu_pd(&f[(u << 1) + 4].v, + _mm256_permute2f128_pd(tu1_re, tu2_re, 0x31)); + _mm256_storeu_pd(&f[(u << 1) + hn].v, + _mm256_permute2f128_pd(tu1_im, tu2_im, 0x20)); + _mm256_storeu_pd(&f[(u << 1) + 4 + hn].v, + _mm256_permute2f128_pd(tu1_im, tu2_im, 0x31)); + } + } else { + f[0] = f0[0]; + f[hn] = f1[0]; + + for (u = 0; u < qn; u ++) { + fpr a_re, a_im, b_re, b_im; + fpr t_re, t_im; + + a_re = f0[u]; + a_im = f0[u + qn]; + FPC_MUL(b_re, b_im, f1[u], f1[u + qn], + fpr_gm_tab[((u + hn) << 1) + 0], + fpr_gm_tab[((u + hn) << 1) + 1]); + FPC_ADD(t_re, t_im, a_re, a_im, b_re, b_im); + f[(u << 1) + 0] = t_re; + f[(u << 1) + 0 + hn] = t_im; + FPC_SUB(t_re, t_im, a_re, a_im, b_re, b_im); + f[(u << 1) + 1] = t_re; + f[(u << 1) + 1 + hn] = t_im; + } + } +} diff --git a/crypto_sign/falcon-512/avx2/fpr.c b/crypto_sign/falcon-512/avx2/fpr.c new file mode 100644 index 00000000..2f04a35d --- /dev/null +++ b/crypto_sign/falcon-512/avx2/fpr.c @@ -0,0 +1,1078 @@ +#include "inner.h" + +/* + * Floating-point operations. + * + * This file implements the non-inline functions declared in + * fpr.h, as well as the constants for FFT / iFFT. + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2017-2019 Falcon Project + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @author Thomas Pornin + */ + + + +const fpr fpr_gm_tab[] = { + {0}, {0}, /* unused */ + {-0.000000000000000000000000000}, { 1.000000000000000000000000000}, + { 0.707106781186547524400844362}, { 0.707106781186547524400844362}, + {-0.707106781186547524400844362}, { 0.707106781186547524400844362}, + { 0.923879532511286756128183189}, { 0.382683432365089771728459984}, + {-0.382683432365089771728459984}, { 0.923879532511286756128183189}, + { 0.382683432365089771728459984}, { 0.923879532511286756128183189}, + {-0.923879532511286756128183189}, { 0.382683432365089771728459984}, + { 0.980785280403230449126182236}, { 0.195090322016128267848284868}, + {-0.195090322016128267848284868}, { 0.980785280403230449126182236}, + { 0.555570233019602224742830814}, { 0.831469612302545237078788378}, + {-0.831469612302545237078788378}, { 0.555570233019602224742830814}, + { 0.831469612302545237078788378}, { 0.555570233019602224742830814}, + {-0.555570233019602224742830814}, { 0.831469612302545237078788378}, + { 0.195090322016128267848284868}, { 0.980785280403230449126182236}, + {-0.980785280403230449126182236}, { 0.195090322016128267848284868}, + { 0.995184726672196886244836953}, { 0.098017140329560601994195564}, + {-0.098017140329560601994195564}, { 0.995184726672196886244836953}, + { 0.634393284163645498215171613}, { 0.773010453362736960810906610}, + {-0.773010453362736960810906610}, { 0.634393284163645498215171613}, + { 0.881921264348355029712756864}, { 0.471396736825997648556387626}, + {-0.471396736825997648556387626}, { 0.881921264348355029712756864}, + { 0.290284677254462367636192376}, { 0.956940335732208864935797887}, + {-0.956940335732208864935797887}, { 0.290284677254462367636192376}, + { 0.956940335732208864935797887}, { 0.290284677254462367636192376}, + {-0.290284677254462367636192376}, { 0.956940335732208864935797887}, + { 0.471396736825997648556387626}, { 0.881921264348355029712756864}, + {-0.881921264348355029712756864}, { 0.471396736825997648556387626}, + { 0.773010453362736960810906610}, { 0.634393284163645498215171613}, + {-0.634393284163645498215171613}, { 0.773010453362736960810906610}, + { 0.098017140329560601994195564}, { 0.995184726672196886244836953}, + {-0.995184726672196886244836953}, { 0.098017140329560601994195564}, + { 0.998795456205172392714771605}, { 0.049067674327418014254954977}, + {-0.049067674327418014254954977}, { 0.998795456205172392714771605}, + { 0.671558954847018400625376850}, { 0.740951125354959091175616897}, + {-0.740951125354959091175616897}, { 0.671558954847018400625376850}, + { 0.903989293123443331586200297}, { 0.427555093430282094320966857}, + {-0.427555093430282094320966857}, { 0.903989293123443331586200297}, + { 0.336889853392220050689253213}, { 0.941544065183020778412509403}, + {-0.941544065183020778412509403}, { 0.336889853392220050689253213}, + { 0.970031253194543992603984207}, { 0.242980179903263889948274162}, + {-0.242980179903263889948274162}, { 0.970031253194543992603984207}, + { 0.514102744193221726593693839}, { 0.857728610000272069902269984}, + {-0.857728610000272069902269984}, { 0.514102744193221726593693839}, + { 0.803207531480644909806676513}, { 0.595699304492433343467036529}, + {-0.595699304492433343467036529}, { 0.803207531480644909806676513}, + { 0.146730474455361751658850130}, { 0.989176509964780973451673738}, + {-0.989176509964780973451673738}, { 0.146730474455361751658850130}, + { 0.989176509964780973451673738}, { 0.146730474455361751658850130}, + {-0.146730474455361751658850130}, { 0.989176509964780973451673738}, + { 0.595699304492433343467036529}, { 0.803207531480644909806676513}, + {-0.803207531480644909806676513}, { 0.595699304492433343467036529}, + { 0.857728610000272069902269984}, { 0.514102744193221726593693839}, + {-0.514102744193221726593693839}, { 0.857728610000272069902269984}, + { 0.242980179903263889948274162}, { 0.970031253194543992603984207}, + {-0.970031253194543992603984207}, { 0.242980179903263889948274162}, + { 0.941544065183020778412509403}, { 0.336889853392220050689253213}, + {-0.336889853392220050689253213}, { 0.941544065183020778412509403}, + { 0.427555093430282094320966857}, { 0.903989293123443331586200297}, + {-0.903989293123443331586200297}, { 0.427555093430282094320966857}, + { 0.740951125354959091175616897}, { 0.671558954847018400625376850}, + {-0.671558954847018400625376850}, { 0.740951125354959091175616897}, + { 0.049067674327418014254954977}, { 0.998795456205172392714771605}, + {-0.998795456205172392714771605}, { 0.049067674327418014254954977}, + { 0.999698818696204220115765650}, { 0.024541228522912288031734529}, + {-0.024541228522912288031734529}, { 0.999698818696204220115765650}, + { 0.689540544737066924616730630}, { 0.724247082951466920941069243}, + {-0.724247082951466920941069243}, { 0.689540544737066924616730630}, + { 0.914209755703530654635014829}, { 0.405241314004989870908481306}, + {-0.405241314004989870908481306}, { 0.914209755703530654635014829}, + { 0.359895036534988148775104572}, { 0.932992798834738887711660256}, + {-0.932992798834738887711660256}, { 0.359895036534988148775104572}, + { 0.975702130038528544460395766}, { 0.219101240156869797227737547}, + {-0.219101240156869797227737547}, { 0.975702130038528544460395766}, + { 0.534997619887097210663076905}, { 0.844853565249707073259571205}, + {-0.844853565249707073259571205}, { 0.534997619887097210663076905}, + { 0.817584813151583696504920884}, { 0.575808191417845300745972454}, + {-0.575808191417845300745972454}, { 0.817584813151583696504920884}, + { 0.170961888760301226363642357}, { 0.985277642388941244774018433}, + {-0.985277642388941244774018433}, { 0.170961888760301226363642357}, + { 0.992479534598709998156767252}, { 0.122410675199216198498704474}, + {-0.122410675199216198498704474}, { 0.992479534598709998156767252}, + { 0.615231590580626845484913563}, { 0.788346427626606262009164705}, + {-0.788346427626606262009164705}, { 0.615231590580626845484913563}, + { 0.870086991108711418652292404}, { 0.492898192229784036873026689}, + {-0.492898192229784036873026689}, { 0.870086991108711418652292404}, + { 0.266712757474898386325286515}, { 0.963776065795439866686464356}, + {-0.963776065795439866686464356}, { 0.266712757474898386325286515}, + { 0.949528180593036667195936074}, { 0.313681740398891476656478846}, + {-0.313681740398891476656478846}, { 0.949528180593036667195936074}, + { 0.449611329654606600046294579}, { 0.893224301195515320342416447}, + {-0.893224301195515320342416447}, { 0.449611329654606600046294579}, + { 0.757208846506484547575464054}, { 0.653172842953776764084203014}, + {-0.653172842953776764084203014}, { 0.757208846506484547575464054}, + { 0.073564563599667423529465622}, { 0.997290456678690216135597140}, + {-0.997290456678690216135597140}, { 0.073564563599667423529465622}, + { 0.997290456678690216135597140}, { 0.073564563599667423529465622}, + {-0.073564563599667423529465622}, { 0.997290456678690216135597140}, + { 0.653172842953776764084203014}, { 0.757208846506484547575464054}, + {-0.757208846506484547575464054}, { 0.653172842953776764084203014}, + { 0.893224301195515320342416447}, { 0.449611329654606600046294579}, + {-0.449611329654606600046294579}, { 0.893224301195515320342416447}, + { 0.313681740398891476656478846}, { 0.949528180593036667195936074}, + {-0.949528180593036667195936074}, { 0.313681740398891476656478846}, + { 0.963776065795439866686464356}, { 0.266712757474898386325286515}, + {-0.266712757474898386325286515}, { 0.963776065795439866686464356}, + { 0.492898192229784036873026689}, { 0.870086991108711418652292404}, + {-0.870086991108711418652292404}, { 0.492898192229784036873026689}, + { 0.788346427626606262009164705}, { 0.615231590580626845484913563}, + {-0.615231590580626845484913563}, { 0.788346427626606262009164705}, + { 0.122410675199216198498704474}, { 0.992479534598709998156767252}, + {-0.992479534598709998156767252}, { 0.122410675199216198498704474}, + { 0.985277642388941244774018433}, { 0.170961888760301226363642357}, + {-0.170961888760301226363642357}, { 0.985277642388941244774018433}, + { 0.575808191417845300745972454}, { 0.817584813151583696504920884}, + {-0.817584813151583696504920884}, { 0.575808191417845300745972454}, + { 0.844853565249707073259571205}, { 0.534997619887097210663076905}, + {-0.534997619887097210663076905}, { 0.844853565249707073259571205}, + { 0.219101240156869797227737547}, { 0.975702130038528544460395766}, + {-0.975702130038528544460395766}, { 0.219101240156869797227737547}, + { 0.932992798834738887711660256}, { 0.359895036534988148775104572}, + {-0.359895036534988148775104572}, { 0.932992798834738887711660256}, + { 0.405241314004989870908481306}, { 0.914209755703530654635014829}, + {-0.914209755703530654635014829}, { 0.405241314004989870908481306}, + { 0.724247082951466920941069243}, { 0.689540544737066924616730630}, + {-0.689540544737066924616730630}, { 0.724247082951466920941069243}, + { 0.024541228522912288031734529}, { 0.999698818696204220115765650}, + {-0.999698818696204220115765650}, { 0.024541228522912288031734529}, + { 0.999924701839144540921646491}, { 0.012271538285719926079408262}, + {-0.012271538285719926079408262}, { 0.999924701839144540921646491}, + { 0.698376249408972853554813503}, { 0.715730825283818654125532623}, + {-0.715730825283818654125532623}, { 0.698376249408972853554813503}, + { 0.919113851690057743908477789}, { 0.393992040061048108596188661}, + {-0.393992040061048108596188661}, { 0.919113851690057743908477789}, + { 0.371317193951837543411934967}, { 0.928506080473215565937167396}, + {-0.928506080473215565937167396}, { 0.371317193951837543411934967}, + { 0.978317370719627633106240097}, { 0.207111376192218549708116020}, + {-0.207111376192218549708116020}, { 0.978317370719627633106240097}, + { 0.545324988422046422313987347}, { 0.838224705554838043186996856}, + {-0.838224705554838043186996856}, { 0.545324988422046422313987347}, + { 0.824589302785025264474803737}, { 0.565731810783613197389765011}, + {-0.565731810783613197389765011}, { 0.824589302785025264474803737}, + { 0.183039887955140958516532578}, { 0.983105487431216327180301155}, + {-0.983105487431216327180301155}, { 0.183039887955140958516532578}, + { 0.993906970002356041546922813}, { 0.110222207293883058807899140}, + {-0.110222207293883058807899140}, { 0.993906970002356041546922813}, + { 0.624859488142386377084072816}, { 0.780737228572094478301588484}, + {-0.780737228572094478301588484}, { 0.624859488142386377084072816}, + { 0.876070094195406607095844268}, { 0.482183772079122748517344481}, + {-0.482183772079122748517344481}, { 0.876070094195406607095844268}, + { 0.278519689385053105207848526}, { 0.960430519415565811199035138}, + {-0.960430519415565811199035138}, { 0.278519689385053105207848526}, + { 0.953306040354193836916740383}, { 0.302005949319228067003463232}, + {-0.302005949319228067003463232}, { 0.953306040354193836916740383}, + { 0.460538710958240023633181487}, { 0.887639620402853947760181617}, + {-0.887639620402853947760181617}, { 0.460538710958240023633181487}, + { 0.765167265622458925888815999}, { 0.643831542889791465068086063}, + {-0.643831542889791465068086063}, { 0.765167265622458925888815999}, + { 0.085797312344439890461556332}, { 0.996312612182778012627226190}, + {-0.996312612182778012627226190}, { 0.085797312344439890461556332}, + { 0.998118112900149207125155861}, { 0.061320736302208577782614593}, + {-0.061320736302208577782614593}, { 0.998118112900149207125155861}, + { 0.662415777590171761113069817}, { 0.749136394523459325469203257}, + {-0.749136394523459325469203257}, { 0.662415777590171761113069817}, + { 0.898674465693953843041976744}, { 0.438616238538527637647025738}, + {-0.438616238538527637647025738}, { 0.898674465693953843041976744}, + { 0.325310292162262934135954708}, { 0.945607325380521325730945387}, + {-0.945607325380521325730945387}, { 0.325310292162262934135954708}, + { 0.966976471044852109087220226}, { 0.254865659604514571553980779}, + {-0.254865659604514571553980779}, { 0.966976471044852109087220226}, + { 0.503538383725717558691867071}, { 0.863972856121586737918147054}, + {-0.863972856121586737918147054}, { 0.503538383725717558691867071}, + { 0.795836904608883536262791915}, { 0.605511041404325513920626941}, + {-0.605511041404325513920626941}, { 0.795836904608883536262791915}, + { 0.134580708507126186316358409}, { 0.990902635427780025108237011}, + {-0.990902635427780025108237011}, { 0.134580708507126186316358409}, + { 0.987301418157858382399815802}, { 0.158858143333861441684385360}, + {-0.158858143333861441684385360}, { 0.987301418157858382399815802}, + { 0.585797857456438860328080838}, { 0.810457198252594791726703434}, + {-0.810457198252594791726703434}, { 0.585797857456438860328080838}, + { 0.851355193105265142261290312}, { 0.524589682678468906215098464}, + {-0.524589682678468906215098464}, { 0.851355193105265142261290312}, + { 0.231058108280671119643236018}, { 0.972939952205560145467720114}, + {-0.972939952205560145467720114}, { 0.231058108280671119643236018}, + { 0.937339011912574923201899593}, { 0.348418680249434568419308588}, + {-0.348418680249434568419308588}, { 0.937339011912574923201899593}, + { 0.416429560097637182562598911}, { 0.909167983090522376563884788}, + {-0.909167983090522376563884788}, { 0.416429560097637182562598911}, + { 0.732654271672412834615546649}, { 0.680600997795453050594430464}, + {-0.680600997795453050594430464}, { 0.732654271672412834615546649}, + { 0.036807222941358832324332691}, { 0.999322384588349500896221011}, + {-0.999322384588349500896221011}, { 0.036807222941358832324332691}, + { 0.999322384588349500896221011}, { 0.036807222941358832324332691}, + {-0.036807222941358832324332691}, { 0.999322384588349500896221011}, + { 0.680600997795453050594430464}, { 0.732654271672412834615546649}, + {-0.732654271672412834615546649}, { 0.680600997795453050594430464}, + { 0.909167983090522376563884788}, { 0.416429560097637182562598911}, + {-0.416429560097637182562598911}, { 0.909167983090522376563884788}, + { 0.348418680249434568419308588}, { 0.937339011912574923201899593}, + {-0.937339011912574923201899593}, { 0.348418680249434568419308588}, + { 0.972939952205560145467720114}, { 0.231058108280671119643236018}, + {-0.231058108280671119643236018}, { 0.972939952205560145467720114}, + { 0.524589682678468906215098464}, { 0.851355193105265142261290312}, + {-0.851355193105265142261290312}, { 0.524589682678468906215098464}, + { 0.810457198252594791726703434}, { 0.585797857456438860328080838}, + {-0.585797857456438860328080838}, { 0.810457198252594791726703434}, + { 0.158858143333861441684385360}, { 0.987301418157858382399815802}, + {-0.987301418157858382399815802}, { 0.158858143333861441684385360}, + { 0.990902635427780025108237011}, { 0.134580708507126186316358409}, + {-0.134580708507126186316358409}, { 0.990902635427780025108237011}, + { 0.605511041404325513920626941}, { 0.795836904608883536262791915}, + {-0.795836904608883536262791915}, { 0.605511041404325513920626941}, + { 0.863972856121586737918147054}, { 0.503538383725717558691867071}, + {-0.503538383725717558691867071}, { 0.863972856121586737918147054}, + { 0.254865659604514571553980779}, { 0.966976471044852109087220226}, + {-0.966976471044852109087220226}, { 0.254865659604514571553980779}, + { 0.945607325380521325730945387}, { 0.325310292162262934135954708}, + {-0.325310292162262934135954708}, { 0.945607325380521325730945387}, + { 0.438616238538527637647025738}, { 0.898674465693953843041976744}, + {-0.898674465693953843041976744}, { 0.438616238538527637647025738}, + { 0.749136394523459325469203257}, { 0.662415777590171761113069817}, + {-0.662415777590171761113069817}, { 0.749136394523459325469203257}, + { 0.061320736302208577782614593}, { 0.998118112900149207125155861}, + {-0.998118112900149207125155861}, { 0.061320736302208577782614593}, + { 0.996312612182778012627226190}, { 0.085797312344439890461556332}, + {-0.085797312344439890461556332}, { 0.996312612182778012627226190}, + { 0.643831542889791465068086063}, { 0.765167265622458925888815999}, + {-0.765167265622458925888815999}, { 0.643831542889791465068086063}, + { 0.887639620402853947760181617}, { 0.460538710958240023633181487}, + {-0.460538710958240023633181487}, { 0.887639620402853947760181617}, + { 0.302005949319228067003463232}, { 0.953306040354193836916740383}, + {-0.953306040354193836916740383}, { 0.302005949319228067003463232}, + { 0.960430519415565811199035138}, { 0.278519689385053105207848526}, + {-0.278519689385053105207848526}, { 0.960430519415565811199035138}, + { 0.482183772079122748517344481}, { 0.876070094195406607095844268}, + {-0.876070094195406607095844268}, { 0.482183772079122748517344481}, + { 0.780737228572094478301588484}, { 0.624859488142386377084072816}, + {-0.624859488142386377084072816}, { 0.780737228572094478301588484}, + { 0.110222207293883058807899140}, { 0.993906970002356041546922813}, + {-0.993906970002356041546922813}, { 0.110222207293883058807899140}, + { 0.983105487431216327180301155}, { 0.183039887955140958516532578}, + {-0.183039887955140958516532578}, { 0.983105487431216327180301155}, + { 0.565731810783613197389765011}, { 0.824589302785025264474803737}, + {-0.824589302785025264474803737}, { 0.565731810783613197389765011}, + { 0.838224705554838043186996856}, { 0.545324988422046422313987347}, + {-0.545324988422046422313987347}, { 0.838224705554838043186996856}, + { 0.207111376192218549708116020}, { 0.978317370719627633106240097}, + {-0.978317370719627633106240097}, { 0.207111376192218549708116020}, + { 0.928506080473215565937167396}, { 0.371317193951837543411934967}, + {-0.371317193951837543411934967}, { 0.928506080473215565937167396}, + { 0.393992040061048108596188661}, { 0.919113851690057743908477789}, + {-0.919113851690057743908477789}, { 0.393992040061048108596188661}, + { 0.715730825283818654125532623}, { 0.698376249408972853554813503}, + {-0.698376249408972853554813503}, { 0.715730825283818654125532623}, + { 0.012271538285719926079408262}, { 0.999924701839144540921646491}, + {-0.999924701839144540921646491}, { 0.012271538285719926079408262}, + { 0.999981175282601142656990438}, { 0.006135884649154475359640235}, + {-0.006135884649154475359640235}, { 0.999981175282601142656990438}, + { 0.702754744457225302452914421}, { 0.711432195745216441522130290}, + {-0.711432195745216441522130290}, { 0.702754744457225302452914421}, + { 0.921514039342041943465396332}, { 0.388345046698826291624993541}, + {-0.388345046698826291624993541}, { 0.921514039342041943465396332}, + { 0.377007410216418256726567823}, { 0.926210242138311341974793388}, + {-0.926210242138311341974793388}, { 0.377007410216418256726567823}, + { 0.979569765685440534439326110}, { 0.201104634842091911558443546}, + {-0.201104634842091911558443546}, { 0.979569765685440534439326110}, + { 0.550457972936604802977289893}, { 0.834862874986380056304401383}, + {-0.834862874986380056304401383}, { 0.550457972936604802977289893}, + { 0.828045045257755752067527592}, { 0.560661576197336023839710223}, + {-0.560661576197336023839710223}, { 0.828045045257755752067527592}, + { 0.189068664149806212754997837}, { 0.981963869109555264072848154}, + {-0.981963869109555264072848154}, { 0.189068664149806212754997837}, + { 0.994564570734255452119106243}, { 0.104121633872054579120943880}, + {-0.104121633872054579120943880}, { 0.994564570734255452119106243}, + { 0.629638238914927025372981341}, { 0.776888465673232450040827983}, + {-0.776888465673232450040827983}, { 0.629638238914927025372981341}, + { 0.879012226428633477831323711}, { 0.476799230063322133342158117}, + {-0.476799230063322133342158117}, { 0.879012226428633477831323711}, + { 0.284407537211271843618310615}, { 0.958703474895871555374645792}, + {-0.958703474895871555374645792}, { 0.284407537211271843618310615}, + { 0.955141168305770721498157712}, { 0.296150888243623824121786128}, + {-0.296150888243623824121786128}, { 0.955141168305770721498157712}, + { 0.465976495767966177902756065}, { 0.884797098430937780104007041}, + {-0.884797098430937780104007041}, { 0.465976495767966177902756065}, + { 0.769103337645579639346626069}, { 0.639124444863775743801488193}, + {-0.639124444863775743801488193}, { 0.769103337645579639346626069}, + { 0.091908956497132728624990979}, { 0.995767414467659793982495643}, + {-0.995767414467659793982495643}, { 0.091908956497132728624990979}, + { 0.998475580573294752208559038}, { 0.055195244349689939809447526}, + {-0.055195244349689939809447526}, { 0.998475580573294752208559038}, + { 0.666999922303637506650154222}, { 0.745057785441465962407907310}, + {-0.745057785441465962407907310}, { 0.666999922303637506650154222}, + { 0.901348847046022014570746093}, { 0.433093818853151968484222638}, + {-0.433093818853151968484222638}, { 0.901348847046022014570746093}, + { 0.331106305759876401737190737}, { 0.943593458161960361495301445}, + {-0.943593458161960361495301445}, { 0.331106305759876401737190737}, + { 0.968522094274417316221088329}, { 0.248927605745720168110682816}, + {-0.248927605745720168110682816}, { 0.968522094274417316221088329}, + { 0.508830142543107036931749324}, { 0.860866938637767279344583877}, + {-0.860866938637767279344583877}, { 0.508830142543107036931749324}, + { 0.799537269107905033500246232}, { 0.600616479383868926653875896}, + {-0.600616479383868926653875896}, { 0.799537269107905033500246232}, + { 0.140658239332849230714788846}, { 0.990058210262297105505906464}, + {-0.990058210262297105505906464}, { 0.140658239332849230714788846}, + { 0.988257567730749491404792538}, { 0.152797185258443427720336613}, + {-0.152797185258443427720336613}, { 0.988257567730749491404792538}, + { 0.590759701858874228423887908}, { 0.806847553543799272206514313}, + {-0.806847553543799272206514313}, { 0.590759701858874228423887908}, + { 0.854557988365400520767862276}, { 0.519355990165589587361829932}, + {-0.519355990165589587361829932}, { 0.854557988365400520767862276}, + { 0.237023605994367206867735915}, { 0.971503890986251775537099622}, + {-0.971503890986251775537099622}, { 0.237023605994367206867735915}, + { 0.939459223602189911962669246}, { 0.342660717311994397592781983}, + {-0.342660717311994397592781983}, { 0.939459223602189911962669246}, + { 0.422000270799799685941287941}, { 0.906595704514915365332960588}, + {-0.906595704514915365332960588}, { 0.422000270799799685941287941}, + { 0.736816568877369875090132520}, { 0.676092703575315960360419228}, + {-0.676092703575315960360419228}, { 0.736816568877369875090132520}, + { 0.042938256934940823077124540}, { 0.999077727752645382888781997}, + {-0.999077727752645382888781997}, { 0.042938256934940823077124540}, + { 0.999529417501093163079703322}, { 0.030674803176636625934021028}, + {-0.030674803176636625934021028}, { 0.999529417501093163079703322}, + { 0.685083667772700381362052545}, { 0.728464390448225196492035438}, + {-0.728464390448225196492035438}, { 0.685083667772700381362052545}, + { 0.911706032005429851404397325}, { 0.410843171057903942183466675}, + {-0.410843171057903942183466675}, { 0.911706032005429851404397325}, + { 0.354163525420490382357395796}, { 0.935183509938947577642207480}, + {-0.935183509938947577642207480}, { 0.354163525420490382357395796}, + { 0.974339382785575860518721668}, { 0.225083911359792835991642120}, + {-0.225083911359792835991642120}, { 0.974339382785575860518721668}, + { 0.529803624686294668216054671}, { 0.848120344803297251279133563}, + {-0.848120344803297251279133563}, { 0.529803624686294668216054671}, + { 0.814036329705948361654516690}, { 0.580813958095764545075595272}, + {-0.580813958095764545075595272}, { 0.814036329705948361654516690}, + { 0.164913120489969921418189113}, { 0.986308097244598647863297524}, + {-0.986308097244598647863297524}, { 0.164913120489969921418189113}, + { 0.991709753669099522860049931}, { 0.128498110793793172624415589}, + {-0.128498110793793172624415589}, { 0.991709753669099522860049931}, + { 0.610382806276309452716352152}, { 0.792106577300212351782342879}, + {-0.792106577300212351782342879}, { 0.610382806276309452716352152}, + { 0.867046245515692651480195629}, { 0.498227666972781852410983869}, + {-0.498227666972781852410983869}, { 0.867046245515692651480195629}, + { 0.260794117915275518280186509}, { 0.965394441697689374550843858}, + {-0.965394441697689374550843858}, { 0.260794117915275518280186509}, + { 0.947585591017741134653387321}, { 0.319502030816015677901518272}, + {-0.319502030816015677901518272}, { 0.947585591017741134653387321}, + { 0.444122144570429231642069418}, { 0.895966249756185155914560282}, + {-0.895966249756185155914560282}, { 0.444122144570429231642069418}, + { 0.753186799043612482483430486}, { 0.657806693297078656931182264}, + {-0.657806693297078656931182264}, { 0.753186799043612482483430486}, + { 0.067443919563664057897972422}, { 0.997723066644191609848546728}, + {-0.997723066644191609848546728}, { 0.067443919563664057897972422}, + { 0.996820299291165714972629398}, { 0.079682437971430121147120656}, + {-0.079682437971430121147120656}, { 0.996820299291165714972629398}, + { 0.648514401022112445084560551}, { 0.761202385484261814029709836}, + {-0.761202385484261814029709836}, { 0.648514401022112445084560551}, + { 0.890448723244757889952150560}, { 0.455083587126343823535869268}, + {-0.455083587126343823535869268}, { 0.890448723244757889952150560}, + { 0.307849640041534893682063646}, { 0.951435020969008369549175569}, + {-0.951435020969008369549175569}, { 0.307849640041534893682063646}, + { 0.962121404269041595429604316}, { 0.272621355449948984493347477}, + {-0.272621355449948984493347477}, { 0.962121404269041595429604316}, + { 0.487550160148435954641485027}, { 0.873094978418290098636085973}, + {-0.873094978418290098636085973}, { 0.487550160148435954641485027}, + { 0.784556597155575233023892575}, { 0.620057211763289178646268191}, + {-0.620057211763289178646268191}, { 0.784556597155575233023892575}, + { 0.116318630911904767252544319}, { 0.993211949234794533104601012}, + {-0.993211949234794533104601012}, { 0.116318630911904767252544319}, + { 0.984210092386929073193874387}, { 0.177004220412148756196839844}, + {-0.177004220412148756196839844}, { 0.984210092386929073193874387}, + { 0.570780745886967280232652864}, { 0.821102514991104679060430820}, + {-0.821102514991104679060430820}, { 0.570780745886967280232652864}, + { 0.841554977436898409603499520}, { 0.540171472729892881297845480}, + {-0.540171472729892881297845480}, { 0.841554977436898409603499520}, + { 0.213110319916091373967757518}, { 0.977028142657754351485866211}, + {-0.977028142657754351485866211}, { 0.213110319916091373967757518}, + { 0.930766961078983731944872340}, { 0.365612997804773870011745909}, + {-0.365612997804773870011745909}, { 0.930766961078983731944872340}, + { 0.399624199845646828544117031}, { 0.916679059921042663116457013}, + {-0.916679059921042663116457013}, { 0.399624199845646828544117031}, + { 0.720002507961381629076682999}, { 0.693971460889654009003734389}, + {-0.693971460889654009003734389}, { 0.720002507961381629076682999}, + { 0.018406729905804820927366313}, { 0.999830581795823422015722275}, + {-0.999830581795823422015722275}, { 0.018406729905804820927366313}, + { 0.999830581795823422015722275}, { 0.018406729905804820927366313}, + {-0.018406729905804820927366313}, { 0.999830581795823422015722275}, + { 0.693971460889654009003734389}, { 0.720002507961381629076682999}, + {-0.720002507961381629076682999}, { 0.693971460889654009003734389}, + { 0.916679059921042663116457013}, { 0.399624199845646828544117031}, + {-0.399624199845646828544117031}, { 0.916679059921042663116457013}, + { 0.365612997804773870011745909}, { 0.930766961078983731944872340}, + {-0.930766961078983731944872340}, { 0.365612997804773870011745909}, + { 0.977028142657754351485866211}, { 0.213110319916091373967757518}, + {-0.213110319916091373967757518}, { 0.977028142657754351485866211}, + { 0.540171472729892881297845480}, { 0.841554977436898409603499520}, + {-0.841554977436898409603499520}, { 0.540171472729892881297845480}, + { 0.821102514991104679060430820}, { 0.570780745886967280232652864}, + {-0.570780745886967280232652864}, { 0.821102514991104679060430820}, + { 0.177004220412148756196839844}, { 0.984210092386929073193874387}, + {-0.984210092386929073193874387}, { 0.177004220412148756196839844}, + { 0.993211949234794533104601012}, { 0.116318630911904767252544319}, + {-0.116318630911904767252544319}, { 0.993211949234794533104601012}, + { 0.620057211763289178646268191}, { 0.784556597155575233023892575}, + {-0.784556597155575233023892575}, { 0.620057211763289178646268191}, + { 0.873094978418290098636085973}, { 0.487550160148435954641485027}, + {-0.487550160148435954641485027}, { 0.873094978418290098636085973}, + { 0.272621355449948984493347477}, { 0.962121404269041595429604316}, + {-0.962121404269041595429604316}, { 0.272621355449948984493347477}, + { 0.951435020969008369549175569}, { 0.307849640041534893682063646}, + {-0.307849640041534893682063646}, { 0.951435020969008369549175569}, + { 0.455083587126343823535869268}, { 0.890448723244757889952150560}, + {-0.890448723244757889952150560}, { 0.455083587126343823535869268}, + { 0.761202385484261814029709836}, { 0.648514401022112445084560551}, + {-0.648514401022112445084560551}, { 0.761202385484261814029709836}, + { 0.079682437971430121147120656}, { 0.996820299291165714972629398}, + {-0.996820299291165714972629398}, { 0.079682437971430121147120656}, + { 0.997723066644191609848546728}, { 0.067443919563664057897972422}, + {-0.067443919563664057897972422}, { 0.997723066644191609848546728}, + { 0.657806693297078656931182264}, { 0.753186799043612482483430486}, + {-0.753186799043612482483430486}, { 0.657806693297078656931182264}, + { 0.895966249756185155914560282}, { 0.444122144570429231642069418}, + {-0.444122144570429231642069418}, { 0.895966249756185155914560282}, + { 0.319502030816015677901518272}, { 0.947585591017741134653387321}, + {-0.947585591017741134653387321}, { 0.319502030816015677901518272}, + { 0.965394441697689374550843858}, { 0.260794117915275518280186509}, + {-0.260794117915275518280186509}, { 0.965394441697689374550843858}, + { 0.498227666972781852410983869}, { 0.867046245515692651480195629}, + {-0.867046245515692651480195629}, { 0.498227666972781852410983869}, + { 0.792106577300212351782342879}, { 0.610382806276309452716352152}, + {-0.610382806276309452716352152}, { 0.792106577300212351782342879}, + { 0.128498110793793172624415589}, { 0.991709753669099522860049931}, + {-0.991709753669099522860049931}, { 0.128498110793793172624415589}, + { 0.986308097244598647863297524}, { 0.164913120489969921418189113}, + {-0.164913120489969921418189113}, { 0.986308097244598647863297524}, + { 0.580813958095764545075595272}, { 0.814036329705948361654516690}, + {-0.814036329705948361654516690}, { 0.580813958095764545075595272}, + { 0.848120344803297251279133563}, { 0.529803624686294668216054671}, + {-0.529803624686294668216054671}, { 0.848120344803297251279133563}, + { 0.225083911359792835991642120}, { 0.974339382785575860518721668}, + {-0.974339382785575860518721668}, { 0.225083911359792835991642120}, + { 0.935183509938947577642207480}, { 0.354163525420490382357395796}, + {-0.354163525420490382357395796}, { 0.935183509938947577642207480}, + { 0.410843171057903942183466675}, { 0.911706032005429851404397325}, + {-0.911706032005429851404397325}, { 0.410843171057903942183466675}, + { 0.728464390448225196492035438}, { 0.685083667772700381362052545}, + {-0.685083667772700381362052545}, { 0.728464390448225196492035438}, + { 0.030674803176636625934021028}, { 0.999529417501093163079703322}, + {-0.999529417501093163079703322}, { 0.030674803176636625934021028}, + { 0.999077727752645382888781997}, { 0.042938256934940823077124540}, + {-0.042938256934940823077124540}, { 0.999077727752645382888781997}, + { 0.676092703575315960360419228}, { 0.736816568877369875090132520}, + {-0.736816568877369875090132520}, { 0.676092703575315960360419228}, + { 0.906595704514915365332960588}, { 0.422000270799799685941287941}, + {-0.422000270799799685941287941}, { 0.906595704514915365332960588}, + { 0.342660717311994397592781983}, { 0.939459223602189911962669246}, + {-0.939459223602189911962669246}, { 0.342660717311994397592781983}, + { 0.971503890986251775537099622}, { 0.237023605994367206867735915}, + {-0.237023605994367206867735915}, { 0.971503890986251775537099622}, + { 0.519355990165589587361829932}, { 0.854557988365400520767862276}, + {-0.854557988365400520767862276}, { 0.519355990165589587361829932}, + { 0.806847553543799272206514313}, { 0.590759701858874228423887908}, + {-0.590759701858874228423887908}, { 0.806847553543799272206514313}, + { 0.152797185258443427720336613}, { 0.988257567730749491404792538}, + {-0.988257567730749491404792538}, { 0.152797185258443427720336613}, + { 0.990058210262297105505906464}, { 0.140658239332849230714788846}, + {-0.140658239332849230714788846}, { 0.990058210262297105505906464}, + { 0.600616479383868926653875896}, { 0.799537269107905033500246232}, + {-0.799537269107905033500246232}, { 0.600616479383868926653875896}, + { 0.860866938637767279344583877}, { 0.508830142543107036931749324}, + {-0.508830142543107036931749324}, { 0.860866938637767279344583877}, + { 0.248927605745720168110682816}, { 0.968522094274417316221088329}, + {-0.968522094274417316221088329}, { 0.248927605745720168110682816}, + { 0.943593458161960361495301445}, { 0.331106305759876401737190737}, + {-0.331106305759876401737190737}, { 0.943593458161960361495301445}, + { 0.433093818853151968484222638}, { 0.901348847046022014570746093}, + {-0.901348847046022014570746093}, { 0.433093818853151968484222638}, + { 0.745057785441465962407907310}, { 0.666999922303637506650154222}, + {-0.666999922303637506650154222}, { 0.745057785441465962407907310}, + { 0.055195244349689939809447526}, { 0.998475580573294752208559038}, + {-0.998475580573294752208559038}, { 0.055195244349689939809447526}, + { 0.995767414467659793982495643}, { 0.091908956497132728624990979}, + {-0.091908956497132728624990979}, { 0.995767414467659793982495643}, + { 0.639124444863775743801488193}, { 0.769103337645579639346626069}, + {-0.769103337645579639346626069}, { 0.639124444863775743801488193}, + { 0.884797098430937780104007041}, { 0.465976495767966177902756065}, + {-0.465976495767966177902756065}, { 0.884797098430937780104007041}, + { 0.296150888243623824121786128}, { 0.955141168305770721498157712}, + {-0.955141168305770721498157712}, { 0.296150888243623824121786128}, + { 0.958703474895871555374645792}, { 0.284407537211271843618310615}, + {-0.284407537211271843618310615}, { 0.958703474895871555374645792}, + { 0.476799230063322133342158117}, { 0.879012226428633477831323711}, + {-0.879012226428633477831323711}, { 0.476799230063322133342158117}, + { 0.776888465673232450040827983}, { 0.629638238914927025372981341}, + {-0.629638238914927025372981341}, { 0.776888465673232450040827983}, + { 0.104121633872054579120943880}, { 0.994564570734255452119106243}, + {-0.994564570734255452119106243}, { 0.104121633872054579120943880}, + { 0.981963869109555264072848154}, { 0.189068664149806212754997837}, + {-0.189068664149806212754997837}, { 0.981963869109555264072848154}, + { 0.560661576197336023839710223}, { 0.828045045257755752067527592}, + {-0.828045045257755752067527592}, { 0.560661576197336023839710223}, + { 0.834862874986380056304401383}, { 0.550457972936604802977289893}, + {-0.550457972936604802977289893}, { 0.834862874986380056304401383}, + { 0.201104634842091911558443546}, { 0.979569765685440534439326110}, + {-0.979569765685440534439326110}, { 0.201104634842091911558443546}, + { 0.926210242138311341974793388}, { 0.377007410216418256726567823}, + {-0.377007410216418256726567823}, { 0.926210242138311341974793388}, + { 0.388345046698826291624993541}, { 0.921514039342041943465396332}, + {-0.921514039342041943465396332}, { 0.388345046698826291624993541}, + { 0.711432195745216441522130290}, { 0.702754744457225302452914421}, + {-0.702754744457225302452914421}, { 0.711432195745216441522130290}, + { 0.006135884649154475359640235}, { 0.999981175282601142656990438}, + {-0.999981175282601142656990438}, { 0.006135884649154475359640235}, + { 0.999995293809576171511580126}, { 0.003067956762965976270145365}, + {-0.003067956762965976270145365}, { 0.999995293809576171511580126}, + { 0.704934080375904908852523758}, { 0.709272826438865651316533772}, + {-0.709272826438865651316533772}, { 0.704934080375904908852523758}, + { 0.922701128333878570437264227}, { 0.385516053843918864075607949}, + {-0.385516053843918864075607949}, { 0.922701128333878570437264227}, + { 0.379847208924051170576281147}, { 0.925049240782677590302371869}, + {-0.925049240782677590302371869}, { 0.379847208924051170576281147}, + { 0.980182135968117392690210009}, { 0.198098410717953586179324918}, + {-0.198098410717953586179324918}, { 0.980182135968117392690210009}, + { 0.553016705580027531764226988}, { 0.833170164701913186439915922}, + {-0.833170164701913186439915922}, { 0.553016705580027531764226988}, + { 0.829761233794523042469023765}, { 0.558118531220556115693702964}, + {-0.558118531220556115693702964}, { 0.829761233794523042469023765}, + { 0.192080397049892441679288205}, { 0.981379193313754574318224190}, + {-0.981379193313754574318224190}, { 0.192080397049892441679288205}, + { 0.994879330794805620591166107}, { 0.101069862754827824987887585}, + {-0.101069862754827824987887585}, { 0.994879330794805620591166107}, + { 0.632018735939809021909403706}, { 0.774953106594873878359129282}, + {-0.774953106594873878359129282}, { 0.632018735939809021909403706}, + { 0.880470889052160770806542929}, { 0.474100214650550014398580015}, + {-0.474100214650550014398580015}, { 0.880470889052160770806542929}, + { 0.287347459544729526477331841}, { 0.957826413027532890321037029}, + {-0.957826413027532890321037029}, { 0.287347459544729526477331841}, + { 0.956045251349996443270479823}, { 0.293219162694258650606608599}, + {-0.293219162694258650606608599}, { 0.956045251349996443270479823}, + { 0.468688822035827933697617870}, { 0.883363338665731594736308015}, + {-0.883363338665731594736308015}, { 0.468688822035827933697617870}, + { 0.771060524261813773200605759}, { 0.636761861236284230413943435}, + {-0.636761861236284230413943435}, { 0.771060524261813773200605759}, + { 0.094963495329638998938034312}, { 0.995480755491926941769171600}, + {-0.995480755491926941769171600}, { 0.094963495329638998938034312}, + { 0.998640218180265222418199049}, { 0.052131704680283321236358216}, + {-0.052131704680283321236358216}, { 0.998640218180265222418199049}, + { 0.669282588346636065720696366}, { 0.743007952135121693517362293}, + {-0.743007952135121693517362293}, { 0.669282588346636065720696366}, + { 0.902673318237258806751502391}, { 0.430326481340082633908199031}, + {-0.430326481340082633908199031}, { 0.902673318237258806751502391}, + { 0.333999651442009404650865481}, { 0.942573197601446879280758735}, + {-0.942573197601446879280758735}, { 0.333999651442009404650865481}, + { 0.969281235356548486048290738}, { 0.245955050335794611599924709}, + {-0.245955050335794611599924709}, { 0.969281235356548486048290738}, + { 0.511468850437970399504391001}, { 0.859301818357008404783582139}, + {-0.859301818357008404783582139}, { 0.511468850437970399504391001}, + { 0.801376171723140219430247777}, { 0.598160706996342311724958652}, + {-0.598160706996342311724958652}, { 0.801376171723140219430247777}, + { 0.143695033150294454819773349}, { 0.989622017463200834623694454}, + {-0.989622017463200834623694454}, { 0.143695033150294454819773349}, + { 0.988721691960323767604516485}, { 0.149764534677321517229695737}, + {-0.149764534677321517229695737}, { 0.988721691960323767604516485}, + { 0.593232295039799808047809426}, { 0.805031331142963597922659282}, + {-0.805031331142963597922659282}, { 0.593232295039799808047809426}, + { 0.856147328375194481019630732}, { 0.516731799017649881508753876}, + {-0.516731799017649881508753876}, { 0.856147328375194481019630732}, + { 0.240003022448741486568922365}, { 0.970772140728950302138169611}, + {-0.970772140728950302138169611}, { 0.240003022448741486568922365}, + { 0.940506070593268323787291309}, { 0.339776884406826857828825803}, + {-0.339776884406826857828825803}, { 0.940506070593268323787291309}, + { 0.424779681209108833357226189}, { 0.905296759318118774354048329}, + {-0.905296759318118774354048329}, { 0.424779681209108833357226189}, + { 0.738887324460615147933116508}, { 0.673829000378756060917568372}, + {-0.673829000378756060917568372}, { 0.738887324460615147933116508}, + { 0.046003182130914628814301788}, { 0.998941293186856850633930266}, + {-0.998941293186856850633930266}, { 0.046003182130914628814301788}, + { 0.999618822495178597116830637}, { 0.027608145778965741612354872}, + {-0.027608145778965741612354872}, { 0.999618822495178597116830637}, + { 0.687315340891759108199186948}, { 0.726359155084345976817494315}, + {-0.726359155084345976817494315}, { 0.687315340891759108199186948}, + { 0.912962190428398164628018233}, { 0.408044162864978680820747499}, + {-0.408044162864978680820747499}, { 0.912962190428398164628018233}, + { 0.357030961233430032614954036}, { 0.934092550404258914729877883}, + {-0.934092550404258914729877883}, { 0.357030961233430032614954036}, + { 0.975025345066994146844913468}, { 0.222093620973203534094094721}, + {-0.222093620973203534094094721}, { 0.975025345066994146844913468}, + { 0.532403127877197971442805218}, { 0.846490938774052078300544488}, + {-0.846490938774052078300544488}, { 0.532403127877197971442805218}, + { 0.815814410806733789010772660}, { 0.578313796411655563342245019}, + {-0.578313796411655563342245019}, { 0.815814410806733789010772660}, + { 0.167938294974731178054745536}, { 0.985797509167567424700995000}, + {-0.985797509167567424700995000}, { 0.167938294974731178054745536}, + { 0.992099313142191757112085445}, { 0.125454983411546238542336453}, + {-0.125454983411546238542336453}, { 0.992099313142191757112085445}, + { 0.612810082429409703935211936}, { 0.790230221437310055030217152}, + {-0.790230221437310055030217152}, { 0.612810082429409703935211936}, + { 0.868570705971340895340449876}, { 0.495565261825772531150266670}, + {-0.495565261825772531150266670}, { 0.868570705971340895340449876}, + { 0.263754678974831383611349322}, { 0.964589793289812723836432159}, + {-0.964589793289812723836432159}, { 0.263754678974831383611349322}, + { 0.948561349915730288158494826}, { 0.316593375556165867243047035}, + {-0.316593375556165867243047035}, { 0.948561349915730288158494826}, + { 0.446868840162374195353044389}, { 0.894599485631382678433072126}, + {-0.894599485631382678433072126}, { 0.446868840162374195353044389}, + { 0.755201376896536527598710756}, { 0.655492852999615385312679701}, + {-0.655492852999615385312679701}, { 0.755201376896536527598710756}, + { 0.070504573389613863027351471}, { 0.997511456140303459699448390}, + {-0.997511456140303459699448390}, { 0.070504573389613863027351471}, + { 0.997060070339482978987989949}, { 0.076623861392031492278332463}, + {-0.076623861392031492278332463}, { 0.997060070339482978987989949}, + { 0.650846684996380915068975573}, { 0.759209188978388033485525443}, + {-0.759209188978388033485525443}, { 0.650846684996380915068975573}, + { 0.891840709392342727796478697}, { 0.452349587233770874133026703}, + {-0.452349587233770874133026703}, { 0.891840709392342727796478697}, + { 0.310767152749611495835997250}, { 0.950486073949481721759926101}, + {-0.950486073949481721759926101}, { 0.310767152749611495835997250}, + { 0.962953266873683886347921481}, { 0.269668325572915106525464462}, + {-0.269668325572915106525464462}, { 0.962953266873683886347921481}, + { 0.490226483288291154229598449}, { 0.871595086655951034842481435}, + {-0.871595086655951034842481435}, { 0.490226483288291154229598449}, + { 0.786455213599085757522319464}, { 0.617647307937803932403979402}, + {-0.617647307937803932403979402}, { 0.786455213599085757522319464}, + { 0.119365214810991364593637790}, { 0.992850414459865090793563344}, + {-0.992850414459865090793563344}, { 0.119365214810991364593637790}, + { 0.984748501801904218556553176}, { 0.173983873387463827950700807}, + {-0.173983873387463827950700807}, { 0.984748501801904218556553176}, + { 0.573297166698042212820171239}, { 0.819347520076796960824689637}, + {-0.819347520076796960824689637}, { 0.573297166698042212820171239}, + { 0.843208239641845437161743865}, { 0.537587076295645482502214932}, + {-0.537587076295645482502214932}, { 0.843208239641845437161743865}, + { 0.216106797076219509948385131}, { 0.976369731330021149312732194}, + {-0.976369731330021149312732194}, { 0.216106797076219509948385131}, + { 0.931884265581668106718557199}, { 0.362755724367397216204854462}, + {-0.362755724367397216204854462}, { 0.931884265581668106718557199}, + { 0.402434650859418441082533934}, { 0.915448716088267819566431292}, + {-0.915448716088267819566431292}, { 0.402434650859418441082533934}, + { 0.722128193929215321243607198}, { 0.691759258364157774906734132}, + {-0.691759258364157774906734132}, { 0.722128193929215321243607198}, + { 0.021474080275469507418374898}, { 0.999769405351215321657617036}, + {-0.999769405351215321657617036}, { 0.021474080275469507418374898}, + { 0.999882347454212525633049627}, { 0.015339206284988101044151868}, + {-0.015339206284988101044151868}, { 0.999882347454212525633049627}, + { 0.696177131491462944788582591}, { 0.717870045055731736211325329}, + {-0.717870045055731736211325329}, { 0.696177131491462944788582591}, + { 0.917900775621390457642276297}, { 0.396809987416710328595290911}, + {-0.396809987416710328595290911}, { 0.917900775621390457642276297}, + { 0.368466829953372331712746222}, { 0.929640895843181265457918066}, + {-0.929640895843181265457918066}, { 0.368466829953372331712746222}, + { 0.977677357824509979943404762}, { 0.210111836880469621717489972}, + {-0.210111836880469621717489972}, { 0.977677357824509979943404762}, + { 0.542750784864515906586768661}, { 0.839893794195999504583383987}, + {-0.839893794195999504583383987}, { 0.542750784864515906586768661}, + { 0.822849781375826332046780034}, { 0.568258952670131549790548489}, + {-0.568258952670131549790548489}, { 0.822849781375826332046780034}, + { 0.180022901405699522679906590}, { 0.983662419211730274396237776}, + {-0.983662419211730274396237776}, { 0.180022901405699522679906590}, + { 0.993564135520595333782021697}, { 0.113270952177564349018228733}, + {-0.113270952177564349018228733}, { 0.993564135520595333782021697}, + { 0.622461279374149972519166721}, { 0.782650596166575738458949301}, + {-0.782650596166575738458949301}, { 0.622461279374149972519166721}, + { 0.874586652278176112634431897}, { 0.484869248000791101822951699}, + {-0.484869248000791101822951699}, { 0.874586652278176112634431897}, + { 0.275571819310958163076425168}, { 0.961280485811320641748659653}, + {-0.961280485811320641748659653}, { 0.275571819310958163076425168}, + { 0.952375012719765858529893608}, { 0.304929229735402406490728633}, + {-0.304929229735402406490728633}, { 0.952375012719765858529893608}, + { 0.457813303598877221904961155}, { 0.889048355854664562540777729}, + {-0.889048355854664562540777729}, { 0.457813303598877221904961155}, + { 0.763188417263381271704838297}, { 0.646176012983316364832802220}, + {-0.646176012983316364832802220}, { 0.763188417263381271704838297}, + { 0.082740264549375693111987083}, { 0.996571145790554847093566910}, + {-0.996571145790554847093566910}, { 0.082740264549375693111987083}, + { 0.997925286198596012623025462}, { 0.064382630929857460819324537}, + {-0.064382630929857460819324537}, { 0.997925286198596012623025462}, + { 0.660114342067420478559490747}, { 0.751165131909686411205819422}, + {-0.751165131909686411205819422}, { 0.660114342067420478559490747}, + { 0.897324580705418281231391836}, { 0.441371268731716692879988968}, + {-0.441371268731716692879988968}, { 0.897324580705418281231391836}, + { 0.322407678801069848384807478}, { 0.946600913083283570044599823}, + {-0.946600913083283570044599823}, { 0.322407678801069848384807478}, + { 0.966190003445412555433832961}, { 0.257831102162159005614471295}, + {-0.257831102162159005614471295}, { 0.966190003445412555433832961}, + { 0.500885382611240786241285004}, { 0.865513624090569082825488358}, + {-0.865513624090569082825488358}, { 0.500885382611240786241285004}, + { 0.793975477554337164895083757}, { 0.607949784967773667243642671}, + {-0.607949784967773667243642671}, { 0.793975477554337164895083757}, + { 0.131540028702883111103387493}, { 0.991310859846115418957349799}, + {-0.991310859846115418957349799}, { 0.131540028702883111103387493}, + { 0.986809401814185476970235952}, { 0.161886393780111837641387995}, + {-0.161886393780111837641387995}, { 0.986809401814185476970235952}, + { 0.583308652937698294392830961}, { 0.812250586585203913049744181}, + {-0.812250586585203913049744181}, { 0.583308652937698294392830961}, + { 0.849741768000852489471268395}, { 0.527199134781901348464274575}, + {-0.527199134781901348464274575}, { 0.849741768000852489471268395}, + { 0.228072083170885739254457379}, { 0.973644249650811925318383912}, + {-0.973644249650811925318383912}, { 0.228072083170885739254457379}, + { 0.936265667170278246576310996}, { 0.351292756085567125601307623}, + {-0.351292756085567125601307623}, { 0.936265667170278246576310996}, + { 0.413638312238434547471944324}, { 0.910441292258067196934095369}, + {-0.910441292258067196934095369}, { 0.413638312238434547471944324}, + { 0.730562769227827561177758850}, { 0.682845546385248068164596123}, + {-0.682845546385248068164596123}, { 0.730562769227827561177758850}, + { 0.033741171851377584833716112}, { 0.999430604555461772019008327}, + {-0.999430604555461772019008327}, { 0.033741171851377584833716112}, + { 0.999204758618363895492950001}, { 0.039872927587739811128578738}, + {-0.039872927587739811128578738}, { 0.999204758618363895492950001}, + { 0.678350043129861486873655042}, { 0.734738878095963464563223604}, + {-0.734738878095963464563223604}, { 0.678350043129861486873655042}, + { 0.907886116487666212038681480}, { 0.419216888363223956433010020}, + {-0.419216888363223956433010020}, { 0.907886116487666212038681480}, + { 0.345541324963989065539191723}, { 0.938403534063108112192420774}, + {-0.938403534063108112192420774}, { 0.345541324963989065539191723}, + { 0.972226497078936305708321144}, { 0.234041958583543423191242045}, + {-0.234041958583543423191242045}, { 0.972226497078936305708321144}, + { 0.521975292937154342694258318}, { 0.852960604930363657746588082}, + {-0.852960604930363657746588082}, { 0.521975292937154342694258318}, + { 0.808656181588174991946968128}, { 0.588281548222645304786439813}, + {-0.588281548222645304786439813}, { 0.808656181588174991946968128}, + { 0.155828397654265235743101486}, { 0.987784141644572154230969032}, + {-0.987784141644572154230969032}, { 0.155828397654265235743101486}, + { 0.990485084256457037998682243}, { 0.137620121586486044948441663}, + {-0.137620121586486044948441663}, { 0.990485084256457037998682243}, + { 0.603066598540348201693430617}, { 0.797690840943391108362662755}, + {-0.797690840943391108362662755}, { 0.603066598540348201693430617}, + { 0.862423956111040538690933878}, { 0.506186645345155291048942344}, + {-0.506186645345155291048942344}, { 0.862423956111040538690933878}, + { 0.251897818154216950498106628}, { 0.967753837093475465243391912}, + {-0.967753837093475465243391912}, { 0.251897818154216950498106628}, + { 0.944604837261480265659265493}, { 0.328209843579092526107916817}, + {-0.328209843579092526107916817}, { 0.944604837261480265659265493}, + { 0.435857079922255491032544080}, { 0.900015892016160228714535267}, + {-0.900015892016160228714535267}, { 0.435857079922255491032544080}, + { 0.747100605980180144323078847}, { 0.664710978203344868130324985}, + {-0.664710978203344868130324985}, { 0.747100605980180144323078847}, + { 0.058258264500435759613979782}, { 0.998301544933892840738782163}, + {-0.998301544933892840738782163}, { 0.058258264500435759613979782}, + { 0.996044700901251989887944810}, { 0.088853552582524596561586535}, + {-0.088853552582524596561586535}, { 0.996044700901251989887944810}, + { 0.641481012808583151988739898}, { 0.767138911935820381181694573}, + {-0.767138911935820381181694573}, { 0.641481012808583151988739898}, + { 0.886222530148880631647990821}, { 0.463259783551860197390719637}, + {-0.463259783551860197390719637}, { 0.886222530148880631647990821}, + { 0.299079826308040476750336973}, { 0.954228095109105629780430732}, + {-0.954228095109105629780430732}, { 0.299079826308040476750336973}, + { 0.959571513081984528335528181}, { 0.281464937925757984095231007}, + {-0.281464937925757984095231007}, { 0.959571513081984528335528181}, + { 0.479493757660153026679839798}, { 0.877545290207261291668470750}, + {-0.877545290207261291668470750}, { 0.479493757660153026679839798}, + { 0.778816512381475953374724325}, { 0.627251815495144113509622565}, + {-0.627251815495144113509622565}, { 0.778816512381475953374724325}, + { 0.107172424956808849175529148}, { 0.994240449453187946358413442}, + {-0.994240449453187946358413442}, { 0.107172424956808849175529148}, + { 0.982539302287441255907040396}, { 0.186055151663446648105438304}, + {-0.186055151663446648105438304}, { 0.982539302287441255907040396}, + { 0.563199344013834115007363772}, { 0.826321062845663480311195452}, + {-0.826321062845663480311195452}, { 0.563199344013834115007363772}, + { 0.836547727223511984524285790}, { 0.547894059173100165608820571}, + {-0.547894059173100165608820571}, { 0.836547727223511984524285790}, + { 0.204108966092816874181696950}, { 0.978948175319062194715480124}, + {-0.978948175319062194715480124}, { 0.204108966092816874181696950}, + { 0.927362525650401087274536959}, { 0.374164062971457997104393020}, + {-0.374164062971457997104393020}, { 0.927362525650401087274536959}, + { 0.391170384302253888687512949}, { 0.920318276709110566440076541}, + {-0.920318276709110566440076541}, { 0.391170384302253888687512949}, + { 0.713584868780793592903125099}, { 0.700568793943248366792866380}, + {-0.700568793943248366792866380}, { 0.713584868780793592903125099}, + { 0.009203754782059819315102378}, { 0.999957644551963866333120920}, + {-0.999957644551963866333120920}, { 0.009203754782059819315102378}, + { 0.999957644551963866333120920}, { 0.009203754782059819315102378}, + {-0.009203754782059819315102378}, { 0.999957644551963866333120920}, + { 0.700568793943248366792866380}, { 0.713584868780793592903125099}, + {-0.713584868780793592903125099}, { 0.700568793943248366792866380}, + { 0.920318276709110566440076541}, { 0.391170384302253888687512949}, + {-0.391170384302253888687512949}, { 0.920318276709110566440076541}, + { 0.374164062971457997104393020}, { 0.927362525650401087274536959}, + {-0.927362525650401087274536959}, { 0.374164062971457997104393020}, + { 0.978948175319062194715480124}, { 0.204108966092816874181696950}, + {-0.204108966092816874181696950}, { 0.978948175319062194715480124}, + { 0.547894059173100165608820571}, { 0.836547727223511984524285790}, + {-0.836547727223511984524285790}, { 0.547894059173100165608820571}, + { 0.826321062845663480311195452}, { 0.563199344013834115007363772}, + {-0.563199344013834115007363772}, { 0.826321062845663480311195452}, + { 0.186055151663446648105438304}, { 0.982539302287441255907040396}, + {-0.982539302287441255907040396}, { 0.186055151663446648105438304}, + { 0.994240449453187946358413442}, { 0.107172424956808849175529148}, + {-0.107172424956808849175529148}, { 0.994240449453187946358413442}, + { 0.627251815495144113509622565}, { 0.778816512381475953374724325}, + {-0.778816512381475953374724325}, { 0.627251815495144113509622565}, + { 0.877545290207261291668470750}, { 0.479493757660153026679839798}, + {-0.479493757660153026679839798}, { 0.877545290207261291668470750}, + { 0.281464937925757984095231007}, { 0.959571513081984528335528181}, + {-0.959571513081984528335528181}, { 0.281464937925757984095231007}, + { 0.954228095109105629780430732}, { 0.299079826308040476750336973}, + {-0.299079826308040476750336973}, { 0.954228095109105629780430732}, + { 0.463259783551860197390719637}, { 0.886222530148880631647990821}, + {-0.886222530148880631647990821}, { 0.463259783551860197390719637}, + { 0.767138911935820381181694573}, { 0.641481012808583151988739898}, + {-0.641481012808583151988739898}, { 0.767138911935820381181694573}, + { 0.088853552582524596561586535}, { 0.996044700901251989887944810}, + {-0.996044700901251989887944810}, { 0.088853552582524596561586535}, + { 0.998301544933892840738782163}, { 0.058258264500435759613979782}, + {-0.058258264500435759613979782}, { 0.998301544933892840738782163}, + { 0.664710978203344868130324985}, { 0.747100605980180144323078847}, + {-0.747100605980180144323078847}, { 0.664710978203344868130324985}, + { 0.900015892016160228714535267}, { 0.435857079922255491032544080}, + {-0.435857079922255491032544080}, { 0.900015892016160228714535267}, + { 0.328209843579092526107916817}, { 0.944604837261480265659265493}, + {-0.944604837261480265659265493}, { 0.328209843579092526107916817}, + { 0.967753837093475465243391912}, { 0.251897818154216950498106628}, + {-0.251897818154216950498106628}, { 0.967753837093475465243391912}, + { 0.506186645345155291048942344}, { 0.862423956111040538690933878}, + {-0.862423956111040538690933878}, { 0.506186645345155291048942344}, + { 0.797690840943391108362662755}, { 0.603066598540348201693430617}, + {-0.603066598540348201693430617}, { 0.797690840943391108362662755}, + { 0.137620121586486044948441663}, { 0.990485084256457037998682243}, + {-0.990485084256457037998682243}, { 0.137620121586486044948441663}, + { 0.987784141644572154230969032}, { 0.155828397654265235743101486}, + {-0.155828397654265235743101486}, { 0.987784141644572154230969032}, + { 0.588281548222645304786439813}, { 0.808656181588174991946968128}, + {-0.808656181588174991946968128}, { 0.588281548222645304786439813}, + { 0.852960604930363657746588082}, { 0.521975292937154342694258318}, + {-0.521975292937154342694258318}, { 0.852960604930363657746588082}, + { 0.234041958583543423191242045}, { 0.972226497078936305708321144}, + {-0.972226497078936305708321144}, { 0.234041958583543423191242045}, + { 0.938403534063108112192420774}, { 0.345541324963989065539191723}, + {-0.345541324963989065539191723}, { 0.938403534063108112192420774}, + { 0.419216888363223956433010020}, { 0.907886116487666212038681480}, + {-0.907886116487666212038681480}, { 0.419216888363223956433010020}, + { 0.734738878095963464563223604}, { 0.678350043129861486873655042}, + {-0.678350043129861486873655042}, { 0.734738878095963464563223604}, + { 0.039872927587739811128578738}, { 0.999204758618363895492950001}, + {-0.999204758618363895492950001}, { 0.039872927587739811128578738}, + { 0.999430604555461772019008327}, { 0.033741171851377584833716112}, + {-0.033741171851377584833716112}, { 0.999430604555461772019008327}, + { 0.682845546385248068164596123}, { 0.730562769227827561177758850}, + {-0.730562769227827561177758850}, { 0.682845546385248068164596123}, + { 0.910441292258067196934095369}, { 0.413638312238434547471944324}, + {-0.413638312238434547471944324}, { 0.910441292258067196934095369}, + { 0.351292756085567125601307623}, { 0.936265667170278246576310996}, + {-0.936265667170278246576310996}, { 0.351292756085567125601307623}, + { 0.973644249650811925318383912}, { 0.228072083170885739254457379}, + {-0.228072083170885739254457379}, { 0.973644249650811925318383912}, + { 0.527199134781901348464274575}, { 0.849741768000852489471268395}, + {-0.849741768000852489471268395}, { 0.527199134781901348464274575}, + { 0.812250586585203913049744181}, { 0.583308652937698294392830961}, + {-0.583308652937698294392830961}, { 0.812250586585203913049744181}, + { 0.161886393780111837641387995}, { 0.986809401814185476970235952}, + {-0.986809401814185476970235952}, { 0.161886393780111837641387995}, + { 0.991310859846115418957349799}, { 0.131540028702883111103387493}, + {-0.131540028702883111103387493}, { 0.991310859846115418957349799}, + { 0.607949784967773667243642671}, { 0.793975477554337164895083757}, + {-0.793975477554337164895083757}, { 0.607949784967773667243642671}, + { 0.865513624090569082825488358}, { 0.500885382611240786241285004}, + {-0.500885382611240786241285004}, { 0.865513624090569082825488358}, + { 0.257831102162159005614471295}, { 0.966190003445412555433832961}, + {-0.966190003445412555433832961}, { 0.257831102162159005614471295}, + { 0.946600913083283570044599823}, { 0.322407678801069848384807478}, + {-0.322407678801069848384807478}, { 0.946600913083283570044599823}, + { 0.441371268731716692879988968}, { 0.897324580705418281231391836}, + {-0.897324580705418281231391836}, { 0.441371268731716692879988968}, + { 0.751165131909686411205819422}, { 0.660114342067420478559490747}, + {-0.660114342067420478559490747}, { 0.751165131909686411205819422}, + { 0.064382630929857460819324537}, { 0.997925286198596012623025462}, + {-0.997925286198596012623025462}, { 0.064382630929857460819324537}, + { 0.996571145790554847093566910}, { 0.082740264549375693111987083}, + {-0.082740264549375693111987083}, { 0.996571145790554847093566910}, + { 0.646176012983316364832802220}, { 0.763188417263381271704838297}, + {-0.763188417263381271704838297}, { 0.646176012983316364832802220}, + { 0.889048355854664562540777729}, { 0.457813303598877221904961155}, + {-0.457813303598877221904961155}, { 0.889048355854664562540777729}, + { 0.304929229735402406490728633}, { 0.952375012719765858529893608}, + {-0.952375012719765858529893608}, { 0.304929229735402406490728633}, + { 0.961280485811320641748659653}, { 0.275571819310958163076425168}, + {-0.275571819310958163076425168}, { 0.961280485811320641748659653}, + { 0.484869248000791101822951699}, { 0.874586652278176112634431897}, + {-0.874586652278176112634431897}, { 0.484869248000791101822951699}, + { 0.782650596166575738458949301}, { 0.622461279374149972519166721}, + {-0.622461279374149972519166721}, { 0.782650596166575738458949301}, + { 0.113270952177564349018228733}, { 0.993564135520595333782021697}, + {-0.993564135520595333782021697}, { 0.113270952177564349018228733}, + { 0.983662419211730274396237776}, { 0.180022901405699522679906590}, + {-0.180022901405699522679906590}, { 0.983662419211730274396237776}, + { 0.568258952670131549790548489}, { 0.822849781375826332046780034}, + {-0.822849781375826332046780034}, { 0.568258952670131549790548489}, + { 0.839893794195999504583383987}, { 0.542750784864515906586768661}, + {-0.542750784864515906586768661}, { 0.839893794195999504583383987}, + { 0.210111836880469621717489972}, { 0.977677357824509979943404762}, + {-0.977677357824509979943404762}, { 0.210111836880469621717489972}, + { 0.929640895843181265457918066}, { 0.368466829953372331712746222}, + {-0.368466829953372331712746222}, { 0.929640895843181265457918066}, + { 0.396809987416710328595290911}, { 0.917900775621390457642276297}, + {-0.917900775621390457642276297}, { 0.396809987416710328595290911}, + { 0.717870045055731736211325329}, { 0.696177131491462944788582591}, + {-0.696177131491462944788582591}, { 0.717870045055731736211325329}, + { 0.015339206284988101044151868}, { 0.999882347454212525633049627}, + {-0.999882347454212525633049627}, { 0.015339206284988101044151868}, + { 0.999769405351215321657617036}, { 0.021474080275469507418374898}, + {-0.021474080275469507418374898}, { 0.999769405351215321657617036}, + { 0.691759258364157774906734132}, { 0.722128193929215321243607198}, + {-0.722128193929215321243607198}, { 0.691759258364157774906734132}, + { 0.915448716088267819566431292}, { 0.402434650859418441082533934}, + {-0.402434650859418441082533934}, { 0.915448716088267819566431292}, + { 0.362755724367397216204854462}, { 0.931884265581668106718557199}, + {-0.931884265581668106718557199}, { 0.362755724367397216204854462}, + { 0.976369731330021149312732194}, { 0.216106797076219509948385131}, + {-0.216106797076219509948385131}, { 0.976369731330021149312732194}, + { 0.537587076295645482502214932}, { 0.843208239641845437161743865}, + {-0.843208239641845437161743865}, { 0.537587076295645482502214932}, + { 0.819347520076796960824689637}, { 0.573297166698042212820171239}, + {-0.573297166698042212820171239}, { 0.819347520076796960824689637}, + { 0.173983873387463827950700807}, { 0.984748501801904218556553176}, + {-0.984748501801904218556553176}, { 0.173983873387463827950700807}, + { 0.992850414459865090793563344}, { 0.119365214810991364593637790}, + {-0.119365214810991364593637790}, { 0.992850414459865090793563344}, + { 0.617647307937803932403979402}, { 0.786455213599085757522319464}, + {-0.786455213599085757522319464}, { 0.617647307937803932403979402}, + { 0.871595086655951034842481435}, { 0.490226483288291154229598449}, + {-0.490226483288291154229598449}, { 0.871595086655951034842481435}, + { 0.269668325572915106525464462}, { 0.962953266873683886347921481}, + {-0.962953266873683886347921481}, { 0.269668325572915106525464462}, + { 0.950486073949481721759926101}, { 0.310767152749611495835997250}, + {-0.310767152749611495835997250}, { 0.950486073949481721759926101}, + { 0.452349587233770874133026703}, { 0.891840709392342727796478697}, + {-0.891840709392342727796478697}, { 0.452349587233770874133026703}, + { 0.759209188978388033485525443}, { 0.650846684996380915068975573}, + {-0.650846684996380915068975573}, { 0.759209188978388033485525443}, + { 0.076623861392031492278332463}, { 0.997060070339482978987989949}, + {-0.997060070339482978987989949}, { 0.076623861392031492278332463}, + { 0.997511456140303459699448390}, { 0.070504573389613863027351471}, + {-0.070504573389613863027351471}, { 0.997511456140303459699448390}, + { 0.655492852999615385312679701}, { 0.755201376896536527598710756}, + {-0.755201376896536527598710756}, { 0.655492852999615385312679701}, + { 0.894599485631382678433072126}, { 0.446868840162374195353044389}, + {-0.446868840162374195353044389}, { 0.894599485631382678433072126}, + { 0.316593375556165867243047035}, { 0.948561349915730288158494826}, + {-0.948561349915730288158494826}, { 0.316593375556165867243047035}, + { 0.964589793289812723836432159}, { 0.263754678974831383611349322}, + {-0.263754678974831383611349322}, { 0.964589793289812723836432159}, + { 0.495565261825772531150266670}, { 0.868570705971340895340449876}, + {-0.868570705971340895340449876}, { 0.495565261825772531150266670}, + { 0.790230221437310055030217152}, { 0.612810082429409703935211936}, + {-0.612810082429409703935211936}, { 0.790230221437310055030217152}, + { 0.125454983411546238542336453}, { 0.992099313142191757112085445}, + {-0.992099313142191757112085445}, { 0.125454983411546238542336453}, + { 0.985797509167567424700995000}, { 0.167938294974731178054745536}, + {-0.167938294974731178054745536}, { 0.985797509167567424700995000}, + { 0.578313796411655563342245019}, { 0.815814410806733789010772660}, + {-0.815814410806733789010772660}, { 0.578313796411655563342245019}, + { 0.846490938774052078300544488}, { 0.532403127877197971442805218}, + {-0.532403127877197971442805218}, { 0.846490938774052078300544488}, + { 0.222093620973203534094094721}, { 0.975025345066994146844913468}, + {-0.975025345066994146844913468}, { 0.222093620973203534094094721}, + { 0.934092550404258914729877883}, { 0.357030961233430032614954036}, + {-0.357030961233430032614954036}, { 0.934092550404258914729877883}, + { 0.408044162864978680820747499}, { 0.912962190428398164628018233}, + {-0.912962190428398164628018233}, { 0.408044162864978680820747499}, + { 0.726359155084345976817494315}, { 0.687315340891759108199186948}, + {-0.687315340891759108199186948}, { 0.726359155084345976817494315}, + { 0.027608145778965741612354872}, { 0.999618822495178597116830637}, + {-0.999618822495178597116830637}, { 0.027608145778965741612354872}, + { 0.998941293186856850633930266}, { 0.046003182130914628814301788}, + {-0.046003182130914628814301788}, { 0.998941293186856850633930266}, + { 0.673829000378756060917568372}, { 0.738887324460615147933116508}, + {-0.738887324460615147933116508}, { 0.673829000378756060917568372}, + { 0.905296759318118774354048329}, { 0.424779681209108833357226189}, + {-0.424779681209108833357226189}, { 0.905296759318118774354048329}, + { 0.339776884406826857828825803}, { 0.940506070593268323787291309}, + {-0.940506070593268323787291309}, { 0.339776884406826857828825803}, + { 0.970772140728950302138169611}, { 0.240003022448741486568922365}, + {-0.240003022448741486568922365}, { 0.970772140728950302138169611}, + { 0.516731799017649881508753876}, { 0.856147328375194481019630732}, + {-0.856147328375194481019630732}, { 0.516731799017649881508753876}, + { 0.805031331142963597922659282}, { 0.593232295039799808047809426}, + {-0.593232295039799808047809426}, { 0.805031331142963597922659282}, + { 0.149764534677321517229695737}, { 0.988721691960323767604516485}, + {-0.988721691960323767604516485}, { 0.149764534677321517229695737}, + { 0.989622017463200834623694454}, { 0.143695033150294454819773349}, + {-0.143695033150294454819773349}, { 0.989622017463200834623694454}, + { 0.598160706996342311724958652}, { 0.801376171723140219430247777}, + {-0.801376171723140219430247777}, { 0.598160706996342311724958652}, + { 0.859301818357008404783582139}, { 0.511468850437970399504391001}, + {-0.511468850437970399504391001}, { 0.859301818357008404783582139}, + { 0.245955050335794611599924709}, { 0.969281235356548486048290738}, + {-0.969281235356548486048290738}, { 0.245955050335794611599924709}, + { 0.942573197601446879280758735}, { 0.333999651442009404650865481}, + {-0.333999651442009404650865481}, { 0.942573197601446879280758735}, + { 0.430326481340082633908199031}, { 0.902673318237258806751502391}, + {-0.902673318237258806751502391}, { 0.430326481340082633908199031}, + { 0.743007952135121693517362293}, { 0.669282588346636065720696366}, + {-0.669282588346636065720696366}, { 0.743007952135121693517362293}, + { 0.052131704680283321236358216}, { 0.998640218180265222418199049}, + {-0.998640218180265222418199049}, { 0.052131704680283321236358216}, + { 0.995480755491926941769171600}, { 0.094963495329638998938034312}, + {-0.094963495329638998938034312}, { 0.995480755491926941769171600}, + { 0.636761861236284230413943435}, { 0.771060524261813773200605759}, + {-0.771060524261813773200605759}, { 0.636761861236284230413943435}, + { 0.883363338665731594736308015}, { 0.468688822035827933697617870}, + {-0.468688822035827933697617870}, { 0.883363338665731594736308015}, + { 0.293219162694258650606608599}, { 0.956045251349996443270479823}, + {-0.956045251349996443270479823}, { 0.293219162694258650606608599}, + { 0.957826413027532890321037029}, { 0.287347459544729526477331841}, + {-0.287347459544729526477331841}, { 0.957826413027532890321037029}, + { 0.474100214650550014398580015}, { 0.880470889052160770806542929}, + {-0.880470889052160770806542929}, { 0.474100214650550014398580015}, + { 0.774953106594873878359129282}, { 0.632018735939809021909403706}, + {-0.632018735939809021909403706}, { 0.774953106594873878359129282}, + { 0.101069862754827824987887585}, { 0.994879330794805620591166107}, + {-0.994879330794805620591166107}, { 0.101069862754827824987887585}, + { 0.981379193313754574318224190}, { 0.192080397049892441679288205}, + {-0.192080397049892441679288205}, { 0.981379193313754574318224190}, + { 0.558118531220556115693702964}, { 0.829761233794523042469023765}, + {-0.829761233794523042469023765}, { 0.558118531220556115693702964}, + { 0.833170164701913186439915922}, { 0.553016705580027531764226988}, + {-0.553016705580027531764226988}, { 0.833170164701913186439915922}, + { 0.198098410717953586179324918}, { 0.980182135968117392690210009}, + {-0.980182135968117392690210009}, { 0.198098410717953586179324918}, + { 0.925049240782677590302371869}, { 0.379847208924051170576281147}, + {-0.379847208924051170576281147}, { 0.925049240782677590302371869}, + { 0.385516053843918864075607949}, { 0.922701128333878570437264227}, + {-0.922701128333878570437264227}, { 0.385516053843918864075607949}, + { 0.709272826438865651316533772}, { 0.704934080375904908852523758}, + {-0.704934080375904908852523758}, { 0.709272826438865651316533772}, + { 0.003067956762965976270145365}, { 0.999995293809576171511580126}, + {-0.999995293809576171511580126}, { 0.003067956762965976270145365} +}; + +const fpr fpr_p2_tab[] = { + { 2.00000000000 }, + { 1.00000000000 }, + { 0.50000000000 }, + { 0.25000000000 }, + { 0.12500000000 }, + { 0.06250000000 }, + { 0.03125000000 }, + { 0.01562500000 }, + { 0.00781250000 }, + { 0.00390625000 }, + { 0.00195312500 } +}; diff --git a/crypto_sign/falcon-512/avx2/fpr.h b/crypto_sign/falcon-512/avx2/fpr.h new file mode 100644 index 00000000..5c7df25c --- /dev/null +++ b/crypto_sign/falcon-512/avx2/fpr.h @@ -0,0 +1,349 @@ +#ifndef PQCLEAN_FALCON512_AVX2_FPR_H +#define PQCLEAN_FALCON512_AVX2_FPR_H + +/* + * Floating-point operations. + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2017-2019 Falcon Project + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @author Thomas Pornin + */ + + +/* ====================================================================== */ + +#include +#include + +#define FMADD(a, b, c) _mm256_add_pd(_mm256_mul_pd(a, b), c) +#define FMSUB(a, b, c) _mm256_sub_pd(_mm256_mul_pd(a, b), c) + +/* + * We wrap the native 'double' type into a structure so that the C compiler + * complains if we inadvertently use raw arithmetic operators on the 'fpr' + * type instead of using the inline functions below. This should have no + * extra runtime cost, since all the functions below are 'inline'. + */ +typedef struct { + double v; +} fpr; + +static inline fpr +FPR(double v) { + fpr x; + + x.v = v; + return x; +} + +static inline fpr +fpr_of(int64_t i) { + return FPR((double)i); +} + +static const fpr fpr_q = { 12289.0 }; +static const fpr fpr_inverse_of_q = { 1.0 / 12289.0 }; +static const fpr fpr_inv_2sqrsigma0 = { .150865048875372721532312163019 }; +static const fpr fpr_inv_sigma = { .005819826392951607426919370871 }; +static const fpr fpr_sigma_min_9 = { 1.291500756233514568549480827642 }; +static const fpr fpr_sigma_min_10 = { 1.311734375905083682667395805765 }; +static const fpr fpr_log2 = { 0.69314718055994530941723212146 }; +static const fpr fpr_inv_log2 = { 1.4426950408889634073599246810 }; +static const fpr fpr_bnorm_max = { 16822.4121 }; +static const fpr fpr_zero = { 0.0 }; +static const fpr fpr_one = { 1.0 }; +static const fpr fpr_two = { 2.0 }; +static const fpr fpr_onehalf = { 0.5 }; +static const fpr fpr_invsqrt2 = { 0.707106781186547524400844362105 }; +static const fpr fpr_invsqrt8 = { 0.353553390593273762200422181052 }; +static const fpr fpr_ptwo31 = { 2147483648.0 }; +static const fpr fpr_ptwo31m1 = { 2147483647.0 }; +static const fpr fpr_mtwo31m1 = { -2147483647.0 }; +static const fpr fpr_ptwo63m1 = { 9223372036854775807.0 }; +static const fpr fpr_mtwo63m1 = { -9223372036854775807.0 }; +static const fpr fpr_ptwo63 = { 9223372036854775808.0 }; + +static inline int64_t +fpr_rint(fpr x) { + /* + * We do not want to use llrint() since it might be not + * constant-time. + * + * Suppose that x >= 0. If x >= 2^52, then it is already an + * integer. Otherwise, if x < 2^52, then computing x+2^52 will + * yield a value that will be rounded to the nearest integer + * with exactly the right rules (round-to-nearest-even). + * + * In order to have constant-time processing, we must do the + * computation for both x >= 0 and x < 0 cases, and use a + * cast to an integer to access the sign and select the proper + * value. Such casts also allow us to find out if |x| < 2^52. + */ + int64_t sx, tx, rp, rn, m; + uint32_t ub; + + sx = (int64_t)(x.v - 1.0); + tx = (int64_t)x.v; + rp = (int64_t)(x.v + 4503599627370496.0) - 4503599627370496; + rn = (int64_t)(x.v - 4503599627370496.0) + 4503599627370496; + + /* + * If tx >= 2^52 or tx < -2^52, then result is tx. + * Otherwise, if sx >= 0, then result is rp. + * Otherwise, result is rn. We use the fact that when x is + * close to 0 (|x| <= 0.25) then both rp and rn are correct; + * and if x is not close to 0, then trunc(x-1.0) yields the + * appropriate sign. + */ + + /* + * Clamp rp to zero if tx < 0. + * Clamp rn to zero if tx >= 0. + */ + m = sx >> 63; + rn &= m; + rp &= ~m; + + /* + * Get the 12 upper bits of tx; if they are not all zeros or + * all ones, then tx >= 2^52 or tx < -2^52, and we clamp both + * rp and rn to zero. Otherwise, we clamp tx to zero. + */ + ub = (uint32_t)((uint64_t)tx >> 52); + m = -(int64_t)((((ub + 1) & 0xFFF) - 2) >> 31); + rp &= m; + rn &= m; + tx &= ~m; + + /* + * Only one of tx, rn or rp (at most) can be non-zero at this + * point. + */ + return tx | rn | rp; +} + +static inline int64_t +fpr_floor(fpr x) { + int64_t r; + + /* + * The cast performs a trunc() (rounding toward 0) and thus is + * wrong by 1 for most negative values. The correction below is + * constant-time as long as the compiler turns the + * floating-point conversion result into a 0/1 integer without a + * conditional branch or another non-constant-time construction. + * This should hold on all modern architectures with an FPU (and + * if it is false on a given arch, then chances are that the FPU + * itself is not constant-time, making the point moot). + */ + r = (int64_t)x.v; + return r - (x.v < (double)r); +} + +static inline int64_t +fpr_trunc(fpr x) { + return (int64_t)x.v; +} + +static inline fpr +fpr_add(fpr x, fpr y) { + return FPR(x.v + y.v); +} + +static inline fpr +fpr_sub(fpr x, fpr y) { + return FPR(x.v - y.v); +} + +static inline fpr +fpr_neg(fpr x) { + return FPR(-x.v); +} + +static inline fpr +fpr_half(fpr x) { + return FPR(x.v * 0.5); +} + +static inline fpr +fpr_double(fpr x) { + return FPR(x.v + x.v); +} + +static inline fpr +fpr_mul(fpr x, fpr y) { + return FPR(x.v * y.v); +} + +static inline fpr +fpr_sqr(fpr x) { + return FPR(x.v * x.v); +} + +static inline fpr +fpr_inv(fpr x) { + return FPR(1.0 / x.v); +} + +static inline fpr +fpr_div(fpr x, fpr y) { + return FPR(x.v / y.v); +} + +static inline void +fpr_sqrt_avx2(double *t) { + __m128d x; + + x = _mm_load1_pd(t); + x = _mm_sqrt_pd(x); + _mm_storel_pd(t, x); +} + +static inline fpr +fpr_sqrt(fpr x) { + /* + * We prefer not to have a dependency on libm when it can be + * avoided. On x86, calling the sqrt() libm function inlines + * the relevant opcode (fsqrt or sqrtsd, depending on whether + * the 387 FPU or SSE2 is used for floating-point operations) + * but then makes an optional call to the library function + * for proper error handling, in case the operand is negative. + * + * To avoid this dependency, we use intrinsics or inline assembly + * on recognized platforms: + * + * - If AVX2 is explicitly enabled, then we use SSE2 intrinsics. + * + * - On GCC/Clang with SSE maths, we use SSE2 intrinsics. + * + * - On GCC/Clang on i386, or MSVC on i386, we use inline assembly + * to call the 387 FPU fsqrt opcode. + * + * - On GCC/Clang/XLC on PowerPC, we use inline assembly to call + * the fsqrt opcode (Clang needs a special hack). + * + * - On GCC/Clang on ARM with hardware floating-point, we use + * inline assembly to call the vqsrt.f64 opcode. Due to a + * complex ecosystem of compilers and assembly syntaxes, we + * have to call it "fsqrt" or "fsqrtd", depending on case. + * + * If the platform is not recognized, a call to the system + * library function sqrt() is performed. On some compilers, this + * may actually inline the relevant opcode, and call the library + * function only when the input is invalid (e.g. negative); + * Falcon never actually calls sqrt() on a negative value, but + * the dependency to libm will still be there. + */ + + fpr_sqrt_avx2(&x.v); + return x; +} + +static inline int +fpr_lt(fpr x, fpr y) { + return x.v < y.v; +} + +static inline uint64_t +fpr_expm_p63(fpr x, fpr ccs) { + /* + * Polynomial approximation of exp(-x) is taken from FACCT: + * https://eprint.iacr.org/2018/1234 + * Specifically, values are extracted from the implementation + * referenced from the FACCT article, and available at: + * https://github.com/raykzhao/gaussian + * Tests over more than 24 billions of random inputs in the + * 0..log(2) range have never shown a deviation larger than + * 2^(-50) from the true mathematical value. + */ + + + /* + * AVX2 implementation uses more operations than Horner's method, + * but with a lower expression tree depth. This helps because + * additions and multiplications have a latency of 4 cycles on + * a Skylake, but the CPU can issue two of them per cycle. + */ + + static const union { + double d[12]; + __m256d v[3]; + } c = { + { + 0.999999999999994892974086724280, + 0.500000000000019206858326015208, + 0.166666666666984014666397229121, + 0.041666666666110491190622155955, + 0.008333333327800835146903501993, + 0.001388888894063186997887560103, + 0.000198412739277311890541063977, + 0.000024801566833585381209939524, + 0.000002755586350219122514855659, + 0.000000275607356160477811864927, + 0.000000025299506379442070029551, + 0.000000002073772366009083061987 + } + }; + + double d1, d2, d4, d8, y; + __m256d d14, d58, d9c; + + d1 = -x.v; + d2 = d1 * d1; + d4 = d2 * d2; + d8 = d4 * d4; + d14 = _mm256_set_pd(d4, d2 * d1, d2, d1); + d58 = _mm256_mul_pd(d14, _mm256_set1_pd(d4)); + d9c = _mm256_mul_pd(d14, _mm256_set1_pd(d8)); + d14 = _mm256_mul_pd(d14, _mm256_loadu_pd(&c.d[0])); + d58 = FMADD(d58, _mm256_loadu_pd(&c.d[4]), d14); + d9c = FMADD(d9c, _mm256_loadu_pd(&c.d[8]), d58); + d9c = _mm256_hadd_pd(d9c, d9c); + y = 1.0 + _mm_cvtsd_f64(_mm256_castpd256_pd128(d9c)) // _mm256_cvtsd_f64(d9c) + + _mm_cvtsd_f64(_mm256_extractf128_pd(d9c, 1)); + y *= ccs.v; + + /* + * Final conversion goes through int64_t first, because that's what + * the underlying opcode (vcvttsd2si) will do, and we know that the + * result will fit, since x >= 0 and ccs < 1. If we did the + * conversion directly to uint64_t, then the compiler would add some + * extra code to cover the case of a source value of 2^63 or more, + * and though the alternate path would never be exercised, the + * extra comparison would cost us some cycles. + */ + return (uint64_t)(int64_t)(y * fpr_ptwo63.v); + +} + +#define fpr_gm_tab PQCLEAN_FALCON512_AVX2_fpr_gm_tab +extern const fpr fpr_gm_tab[]; + +#define fpr_p2_tab PQCLEAN_FALCON512_AVX2_fpr_p2_tab +extern const fpr fpr_p2_tab[]; + +/* ====================================================================== */ +#endif diff --git a/crypto_sign/falcon-512/avx2/inner.h b/crypto_sign/falcon-512/avx2/inner.h new file mode 100644 index 00000000..22c34564 --- /dev/null +++ b/crypto_sign/falcon-512/avx2/inner.h @@ -0,0 +1,826 @@ +#ifndef PQCLEAN_FALCON512_AVX2_INNER_H +#define PQCLEAN_FALCON512_AVX2_INNER_H + + +/* + * Internal functions for Falcon. This is not the API intended to be + * used by applications; instead, this internal API provides all the + * primitives on which wrappers build to provide external APIs. + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2017-2019 Falcon Project + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @author Thomas Pornin + */ + +/* + * IMPORTANT API RULES + * ------------------- + * + * This API has some non-trivial usage rules: + * + * + * - All public functions (i.e. the non-static ones) must be referenced + * with the PQCLEAN_FALCON512_AVX2_ macro (e.g. PQCLEAN_FALCON512_AVX2_verify_raw for the verify_raw() + * function). That macro adds a prefix to the name, which is + * configurable with the FALCON_PREFIX macro. This allows compiling + * the code into a specific "namespace" and potentially including + * several versions of this code into a single application (e.g. to + * have an AVX2 and a non-AVX2 variants and select the one to use at + * runtime based on availability of AVX2 opcodes). + * + * - Functions that need temporary buffers expects them as a final + * tmp[] array of type uint8_t*, with a size which is documented for + * each function. However, most have some alignment requirements, + * because they will use the array to store 16-bit, 32-bit or 64-bit + * values (e.g. uint64_t or double). The caller must ensure proper + * alignment. What happens on unaligned access depends on the + * underlying architecture, ranging from a slight time penalty + * to immediate termination of the process. + * + * - Some functions rely on specific rounding rules and precision for + * floating-point numbers. On some systems (in particular 32-bit x86 + * with the 387 FPU), this requires setting an hardware control + * word. The caller MUST use set_fpu_cw() to ensure proper precision: + * + * oldcw = set_fpu_cw(2); + * PQCLEAN_FALCON512_AVX2_sign_dyn(...); + * set_fpu_cw(oldcw); + * + * On systems where the native floating-point precision is already + * proper, or integer-based emulation is used, the set_fpu_cw() + * function does nothing, so it can be called systematically. + */ +#include "fips202.h" +#include "fpr.h" +#include +#include +#include + +/* + * Some computations with floating-point elements, in particular + * rounding to the nearest integer, rely on operations using _exactly_ + * the precision of IEEE-754 binary64 type (i.e. 52 bits). On 32-bit + * x86, the 387 FPU may be used (depending on the target OS) and, in + * that case, may use more precision bits (i.e. 64 bits, for an 80-bit + * total type length); to prevent miscomputations, we define an explicit + * function that modifies the precision in the FPU control word. + * + * set_fpu_cw() sets the precision to the provided value, and returns + * the previously set precision; callers are supposed to restore the + * previous precision on exit. The correct (52-bit) precision is + * configured with the value "2". On unsupported compilers, or on + * targets other than 32-bit x86, or when the native 'double' type is + * not used, the set_fpu_cw() function does nothing at all. + */ +static inline unsigned +set_fpu_cw(unsigned x) { + return x; +} + + + + +/* ==================================================================== */ +/* + * SHAKE256 implementation (shake.c). + * + * API is defined to be easily replaced with the fips202.h API defined + * as part of PQClean. + */ + + + +#define inner_shake256_context shake256incctx +#define inner_shake256_init(sc) shake256_inc_init(sc) +#define inner_shake256_inject(sc, in, len) shake256_inc_absorb(sc, in, len) +#define inner_shake256_flip(sc) shake256_inc_finalize(sc) +#define inner_shake256_extract(sc, out, len) shake256_inc_squeeze(out, len, sc) +#define inner_shake256_ctx_release(sc) shake256_inc_ctx_release(sc) + + +/* ==================================================================== */ +/* + * Encoding/decoding functions (codec.c). + * + * Encoding functions take as parameters an output buffer (out) with + * a given maximum length (max_out_len); returned value is the actual + * number of bytes which have been written. If the output buffer is + * not large enough, then 0 is returned (some bytes may have been + * written to the buffer). If 'out' is NULL, then 'max_out_len' is + * ignored; instead, the function computes and returns the actual + * required output length (in bytes). + * + * Decoding functions take as parameters an input buffer (in) with + * its maximum length (max_in_len); returned value is the actual number + * of bytes that have been read from the buffer. If the provided length + * is too short, then 0 is returned. + * + * Values to encode or decode are vectors of integers, with N = 2^logn + * elements. + * + * Three encoding formats are defined: + * + * - modq: sequence of values modulo 12289, each encoded over exactly + * 14 bits. The encoder and decoder verify that integers are within + * the valid range (0..12288). Values are arrays of uint16. + * + * - trim: sequence of signed integers, a specified number of bits + * each. The number of bits is provided as parameter and includes + * the sign bit. Each integer x must be such that |x| < 2^(bits-1) + * (which means that the -2^(bits-1) value is forbidden); encode and + * decode functions check that property. Values are arrays of + * int16_t or int8_t, corresponding to names 'trim_i16' and + * 'trim_i8', respectively. + * + * - comp: variable-length encoding for signed integers; each integer + * uses a minimum of 9 bits, possibly more. This is normally used + * only for signatures. + * + */ + +size_t PQCLEAN_FALCON512_AVX2_modq_encode(void *out, size_t max_out_len, + const uint16_t *x, unsigned logn); +size_t PQCLEAN_FALCON512_AVX2_trim_i16_encode(void *out, size_t max_out_len, + const int16_t *x, unsigned logn, unsigned bits); +size_t PQCLEAN_FALCON512_AVX2_trim_i8_encode(void *out, size_t max_out_len, + const int8_t *x, unsigned logn, unsigned bits); +size_t PQCLEAN_FALCON512_AVX2_comp_encode(void *out, size_t max_out_len, + const int16_t *x, unsigned logn); + +size_t PQCLEAN_FALCON512_AVX2_modq_decode(uint16_t *x, unsigned logn, + const void *in, size_t max_in_len); +size_t PQCLEAN_FALCON512_AVX2_trim_i16_decode(int16_t *x, unsigned logn, unsigned bits, + const void *in, size_t max_in_len); +size_t PQCLEAN_FALCON512_AVX2_trim_i8_decode(int8_t *x, unsigned logn, unsigned bits, + const void *in, size_t max_in_len); +size_t PQCLEAN_FALCON512_AVX2_comp_decode(int16_t *x, unsigned logn, + const void *in, size_t max_in_len); + +/* + * Number of bits for key elements, indexed by logn (1 to 10). This + * is at most 8 bits for all degrees, but some degrees may have shorter + * elements. + */ +extern const uint8_t PQCLEAN_FALCON512_AVX2_max_fg_bits[]; +extern const uint8_t PQCLEAN_FALCON512_AVX2_max_FG_bits[]; + +/* + * Maximum size, in bits, of elements in a signature, indexed by logn + * (1 to 10). The size includes the sign bit. + */ +extern const uint8_t PQCLEAN_FALCON512_AVX2_max_sig_bits[]; + +/* ==================================================================== */ +/* + * Support functions used for both signature generation and signature + * verification (common.c). + */ + +/* + * From a SHAKE256 context (must be already flipped), produce a new + * point. This is the non-constant-time version, which may leak enough + * information to serve as a stop condition on a brute force attack on + * the hashed message (provided that the nonce value is known). + */ +void PQCLEAN_FALCON512_AVX2_hash_to_point_vartime(inner_shake256_context *sc, + uint16_t *x, unsigned logn); + +/* + * From a SHAKE256 context (must be already flipped), produce a new + * point. The temporary buffer (tmp) must have room for 2*2^logn bytes. + * This function is constant-time but is typically more expensive than + * PQCLEAN_FALCON512_AVX2_hash_to_point_vartime(). + * + * tmp[] must have 16-bit alignment. + */ +void PQCLEAN_FALCON512_AVX2_hash_to_point_ct(inner_shake256_context *sc, + uint16_t *x, unsigned logn, uint8_t *tmp); + +/* + * Tell whether a given vector (2N coordinates, in two halves) is + * acceptable as a signature. This compares the appropriate norm of the + * vector with the acceptance bound. Returned value is 1 on success + * (vector is short enough to be acceptable), 0 otherwise. + */ +int PQCLEAN_FALCON512_AVX2_is_short(const int16_t *s1, const int16_t *s2, unsigned logn); + +/* + * Tell whether a given vector (2N coordinates, in two halves) is + * acceptable as a signature. Instead of the first half s1, this + * function receives the "saturated squared norm" of s1, i.e. the + * sum of the squares of the coordinates of s1 (saturated at 2^32-1 + * if the sum exceeds 2^31-1). + * + * Returned value is 1 on success (vector is short enough to be + * acceptable), 0 otherwise. + */ +int PQCLEAN_FALCON512_AVX2_is_short_half(uint32_t sqn, const int16_t *s2, unsigned logn); + +/* ==================================================================== */ +/* + * Signature verification functions (vrfy.c). + */ + +/* + * Convert a public key to NTT + Montgomery format. Conversion is done + * in place. + */ +void PQCLEAN_FALCON512_AVX2_to_ntt_monty(uint16_t *h, unsigned logn); + +/* + * Internal signature verification code: + * c0[] contains the hashed nonce+message + * s2[] is the decoded signature + * h[] contains the public key, in NTT + Montgomery format + * logn is the degree log + * tmp[] temporary, must have at least 2*2^logn bytes + * Returned value is 1 on success, 0 on error. + * + * tmp[] must have 16-bit alignment. + */ +int PQCLEAN_FALCON512_AVX2_verify_raw(const uint16_t *c0, const int16_t *s2, + const uint16_t *h, unsigned logn, uint8_t *tmp); + +/* + * Compute the public key h[], given the private key elements f[] and + * g[]. This computes h = g/f mod phi mod q, where phi is the polynomial + * modulus. This function returns 1 on success, 0 on error (an error is + * reported if f is not invertible mod phi mod q). + * + * The tmp[] array must have room for at least 2*2^logn elements. + * tmp[] must have 16-bit alignment. + */ +int PQCLEAN_FALCON512_AVX2_compute_public(uint16_t *h, + const int8_t *f, const int8_t *g, unsigned logn, uint8_t *tmp); + +/* + * Recompute the fourth private key element. Private key consists in + * four polynomials with small coefficients f, g, F and G, which are + * such that fG - gF = q mod phi; furthermore, f is invertible modulo + * phi and modulo q. This function recomputes G from f, g and F. + * + * The tmp[] array must have room for at least 4*2^logn bytes. + * + * Returned value is 1 in success, 0 on error (f not invertible). + * tmp[] must have 16-bit alignment. + */ +int PQCLEAN_FALCON512_AVX2_complete_private(int8_t *G, + const int8_t *f, const int8_t *g, const int8_t *F, + unsigned logn, uint8_t *tmp); + +/* + * Test whether a given polynomial is invertible modulo phi and q. + * Polynomial coefficients are small integers. + * + * tmp[] must have 16-bit alignment. + */ +int PQCLEAN_FALCON512_AVX2_is_invertible( + const int16_t *s2, unsigned logn, uint8_t *tmp); + +/* + * Count the number of elements of value zero in the NTT representation + * of the given polynomial: this is the number of primitive 2n-th roots + * of unity (modulo q = 12289) that are roots of the provided polynomial + * (taken modulo q). + * + * tmp[] must have 16-bit alignment. + */ +int PQCLEAN_FALCON512_AVX2_count_nttzero(const int16_t *sig, unsigned logn, uint8_t *tmp); + +/* + * Internal signature verification with public key recovery: + * h[] receives the public key (NOT in NTT/Montgomery format) + * c0[] contains the hashed nonce+message + * s1[] is the first signature half + * s2[] is the second signature half + * logn is the degree log + * tmp[] temporary, must have at least 2*2^logn bytes + * Returned value is 1 on success, 0 on error. Success is returned if + * the signature is a short enough vector; in that case, the public + * key has been written to h[]. However, the caller must still + * verify that h[] is the correct value (e.g. with regards to a known + * hash of the public key). + * + * h[] may not overlap with any of the other arrays. + * + * tmp[] must have 16-bit alignment. + */ +int PQCLEAN_FALCON512_AVX2_verify_recover(uint16_t *h, + const uint16_t *c0, const int16_t *s1, const int16_t *s2, + unsigned logn, uint8_t *tmp); + +/* ==================================================================== */ +/* + * Implementation of floating-point real numbers (fpr.h, fpr.c). + */ + +/* + * Real numbers are implemented by an extra header file, included below. + * This is meant to support pluggable implementations. The default + * implementation relies on the C type 'double'. + * + * The included file must define the following types, functions and + * constants: + * + * fpr + * type for a real number + * + * fpr fpr_of(int64_t i) + * cast an integer into a real number; source must be in the + * -(2^63-1)..+(2^63-1) range + * + * fpr fpr_scaled(int64_t i, int sc) + * compute i*2^sc as a real number; source 'i' must be in the + * -(2^63-1)..+(2^63-1) range + * + * fpr fpr_ldexp(fpr x, int e) + * compute x*2^e + * + * int64_t fpr_rint(fpr x) + * round x to the nearest integer; x must be in the -(2^63-1) + * to +(2^63-1) range + * + * int64_t fpr_trunc(fpr x) + * round to an integer; this rounds towards zero; value must + * be in the -(2^63-1) to +(2^63-1) range + * + * fpr fpr_add(fpr x, fpr y) + * compute x + y + * + * fpr fpr_sub(fpr x, fpr y) + * compute x - y + * + * fpr fpr_neg(fpr x) + * compute -x + * + * fpr fpr_half(fpr x) + * compute x/2 + * + * fpr fpr_double(fpr x) + * compute x*2 + * + * fpr fpr_mul(fpr x, fpr y) + * compute x * y + * + * fpr fpr_sqr(fpr x) + * compute x * x + * + * fpr fpr_inv(fpr x) + * compute 1/x + * + * fpr fpr_div(fpr x, fpr y) + * compute x/y + * + * fpr fpr_sqrt(fpr x) + * compute the square root of x + * + * int fpr_lt(fpr x, fpr y) + * return 1 if x < y, 0 otherwise + * + * uint64_t fpr_expm_p63(fpr x) + * return exp(x), assuming that 0 <= x < log(2). Returned value + * is scaled to 63 bits (i.e. it really returns 2^63*exp(-x), + * rounded to the nearest integer). Computation should have a + * precision of at least 45 bits. + * + * const fpr fpr_gm_tab[] + * array of constants for FFT / iFFT + * + * const fpr fpr_p2_tab[] + * precomputed powers of 2 (by index, 0 to 10) + * + * Constants of type 'fpr': + * + * fpr fpr_q 12289 + * fpr fpr_inverse_of_q 1/12289 + * fpr fpr_inv_2sqrsigma0 1/(2*(1.8205^2)) + * fpr fpr_inv_sigma 1/(1.55*sqrt(12289)) + * fpr fpr_sigma_min_9 1.291500756233514568549480827642 + * fpr fpr_sigma_min_10 1.311734375905083682667395805765 + * fpr fpr_log2 log(2) + * fpr fpr_inv_log2 1/log(2) + * fpr fpr_bnorm_max 16822.4121 + * fpr fpr_zero 0 + * fpr fpr_one 1 + * fpr fpr_two 2 + * fpr fpr_onehalf 0.5 + * fpr fpr_ptwo31 2^31 + * fpr fpr_ptwo31m1 2^31-1 + * fpr fpr_mtwo31m1 -(2^31-1) + * fpr fpr_ptwo63m1 2^63-1 + * fpr fpr_mtwo63m1 -(2^63-1) + * fpr fpr_ptwo63 2^63 + */ + +/* ==================================================================== */ +/* + * RNG (rng.c). + * + * A PRNG based on ChaCha20 is implemented; it is seeded from a SHAKE256 + * context (flipped) and is used for bulk pseudorandom generation. + * A system-dependent seed generator is also provided. + */ + +/* + * Obtain a random seed from the system RNG. + * + * Returned value is 1 on success, 0 on error. + */ +int PQCLEAN_FALCON512_AVX2_get_seed(void *seed, size_t seed_len); + +/* + * Structure for a PRNG. This includes a large buffer so that values + * get generated in advance. The 'state' is used to keep the current + * PRNG algorithm state (contents depend on the selected algorithm). + * + * The unions with 'dummy_u64' are there to ensure proper alignment for + * 64-bit direct access. + */ +typedef struct { + union { + uint8_t d[512]; /* MUST be 512, exactly */ + uint64_t dummy_u64; + } buf; + size_t ptr; + union { + uint8_t d[256]; + uint64_t dummy_u64; + } state; + int type; +} prng; + +/* + * Instantiate a PRNG. That PRNG will feed over the provided SHAKE256 + * context (in "flipped" state) to obtain its initial state. + */ +void PQCLEAN_FALCON512_AVX2_prng_init(prng *p, inner_shake256_context *src); + +/* + * Refill the PRNG buffer. This is normally invoked automatically, and + * is declared here only so that prng_get_u64() may be inlined. + */ +void PQCLEAN_FALCON512_AVX2_prng_refill(prng *p); + +/* + * Get some bytes from a PRNG. + */ +void PQCLEAN_FALCON512_AVX2_prng_get_bytes(prng *p, void *dst, size_t len); + +/* + * Get a 64-bit random value from a PRNG. + */ +static inline uint64_t +prng_get_u64(prng *p) { + size_t u; + + /* + * If there are less than 9 bytes in the buffer, we refill it. + * This means that we may drop the last few bytes, but this allows + * for faster extraction code. Also, it means that we never leave + * an empty buffer. + */ + u = p->ptr; + if (u >= (sizeof p->buf.d) - 9) { + PQCLEAN_FALCON512_AVX2_prng_refill(p); + u = 0; + } + p->ptr = u + 8; + + return (uint64_t)p->buf.d[u + 0] + | ((uint64_t)p->buf.d[u + 1] << 8) + | ((uint64_t)p->buf.d[u + 2] << 16) + | ((uint64_t)p->buf.d[u + 3] << 24) + | ((uint64_t)p->buf.d[u + 4] << 32) + | ((uint64_t)p->buf.d[u + 5] << 40) + | ((uint64_t)p->buf.d[u + 6] << 48) + | ((uint64_t)p->buf.d[u + 7] << 56); +} + +/* + * Get an 8-bit random value from a PRNG. + */ +static inline unsigned +prng_get_u8(prng *p) { + unsigned v; + + v = p->buf.d[p->ptr ++]; + if (p->ptr == sizeof p->buf.d) { + PQCLEAN_FALCON512_AVX2_prng_refill(p); + } + return v; +} + +/* ==================================================================== */ +/* + * FFT (falcon-fft.c). + * + * A real polynomial is represented as an array of N 'fpr' elements. + * The FFT representation of a real polynomial contains N/2 complex + * elements; each is stored as two real numbers, for the real and + * imaginary parts, respectively. See falcon-fft.c for details on the + * internal representation. + */ + +/* + * Compute FFT in-place: the source array should contain a real + * polynomial (N coefficients); its storage area is reused to store + * the FFT representation of that polynomial (N/2 complex numbers). + * + * 'logn' MUST lie between 1 and 10 (inclusive). + */ +void PQCLEAN_FALCON512_AVX2_FFT(fpr *f, unsigned logn); + +/* + * Compute the inverse FFT in-place: the source array should contain the + * FFT representation of a real polynomial (N/2 elements); the resulting + * real polynomial (N coefficients of type 'fpr') is written over the + * array. + * + * 'logn' MUST lie between 1 and 10 (inclusive). + */ +void PQCLEAN_FALCON512_AVX2_iFFT(fpr *f, unsigned logn); + +/* + * Add polynomial b to polynomial a. a and b MUST NOT overlap. This + * function works in both normal and FFT representations. + */ +void PQCLEAN_FALCON512_AVX2_poly_add(fpr *a, const fpr *b, unsigned logn); + +/* + * Subtract polynomial b from polynomial a. a and b MUST NOT overlap. This + * function works in both normal and FFT representations. + */ +void PQCLEAN_FALCON512_AVX2_poly_sub(fpr *a, const fpr *b, unsigned logn); + +/* + * Negate polynomial a. This function works in both normal and FFT + * representations. + */ +void PQCLEAN_FALCON512_AVX2_poly_neg(fpr *a, unsigned logn); + +/* + * Compute adjoint of polynomial a. This function works only in FFT + * representation. + */ +void PQCLEAN_FALCON512_AVX2_poly_adj_fft(fpr *a, unsigned logn); + +/* + * Multiply polynomial a with polynomial b. a and b MUST NOT overlap. + * This function works only in FFT representation. + */ +void PQCLEAN_FALCON512_AVX2_poly_mul_fft(fpr *a, const fpr *b, unsigned logn); + +/* + * Multiply polynomial a with the adjoint of polynomial b. a and b MUST NOT + * overlap. This function works only in FFT representation. + */ +void PQCLEAN_FALCON512_AVX2_poly_muladj_fft(fpr *a, const fpr *b, unsigned logn); + +/* + * Multiply polynomial with its own adjoint. This function works only in FFT + * representation. + */ +void PQCLEAN_FALCON512_AVX2_poly_mulselfadj_fft(fpr *a, unsigned logn); + +/* + * Multiply polynomial with a real constant. This function works in both + * normal and FFT representations. + */ +void PQCLEAN_FALCON512_AVX2_poly_mulconst(fpr *a, fpr x, unsigned logn); + +/* + * Divide polynomial a by polynomial b, modulo X^N+1 (FFT representation). + * a and b MUST NOT overlap. + */ +void PQCLEAN_FALCON512_AVX2_poly_div_fft(fpr *a, const fpr *b, unsigned logn); + +/* + * Given f and g (in FFT representation), compute 1/(f*adj(f)+g*adj(g)) + * (also in FFT representation). Since the result is auto-adjoint, all its + * coordinates in FFT representation are real; as such, only the first N/2 + * values of d[] are filled (the imaginary parts are skipped). + * + * Array d MUST NOT overlap with either a or b. + */ +void PQCLEAN_FALCON512_AVX2_poly_invnorm2_fft(fpr *d, + const fpr *a, const fpr *b, unsigned logn); + +/* + * Given F, G, f and g (in FFT representation), compute F*adj(f)+G*adj(g) + * (also in FFT representation). Destination d MUST NOT overlap with + * any of the source arrays. + */ +void PQCLEAN_FALCON512_AVX2_poly_add_muladj_fft(fpr *d, + const fpr *F, const fpr *G, + const fpr *f, const fpr *g, unsigned logn); + +/* + * Multiply polynomial a by polynomial b, where b is autoadjoint. Both + * a and b are in FFT representation. Since b is autoadjoint, all its + * FFT coefficients are real, and the array b contains only N/2 elements. + * a and b MUST NOT overlap. + */ +void PQCLEAN_FALCON512_AVX2_poly_mul_autoadj_fft(fpr *a, + const fpr *b, unsigned logn); + +/* + * Divide polynomial a by polynomial b, where b is autoadjoint. Both + * a and b are in FFT representation. Since b is autoadjoint, all its + * FFT coefficients are real, and the array b contains only N/2 elements. + * a and b MUST NOT overlap. + */ +void PQCLEAN_FALCON512_AVX2_poly_div_autoadj_fft(fpr *a, + const fpr *b, unsigned logn); + +/* + * Perform an LDL decomposition of an auto-adjoint matrix G, in FFT + * representation. On input, g00, g01 and g11 are provided (where the + * matrix G = [[g00, g01], [adj(g01), g11]]). On output, the d00, l10 + * and d11 values are written in g00, g01 and g11, respectively + * (with D = [[d00, 0], [0, d11]] and L = [[1, 0], [l10, 1]]). + * (In fact, d00 = g00, so the g00 operand is left unmodified.) + */ +void PQCLEAN_FALCON512_AVX2_poly_LDL_fft(const fpr *g00, + fpr *g01, fpr *g11, unsigned logn); + +/* + * Perform an LDL decomposition of an auto-adjoint matrix G, in FFT + * representation. This is identical to poly_LDL_fft() except that + * g00, g01 and g11 are unmodified; the outputs d11 and l10 are written + * in two other separate buffers provided as extra parameters. + */ +void PQCLEAN_FALCON512_AVX2_poly_LDLmv_fft(fpr *d11, fpr *l10, + const fpr *g00, const fpr *g01, + const fpr *g11, unsigned logn); + +/* + * Apply "split" operation on a polynomial in FFT representation: + * f = f0(x^2) + x*f1(x^2), for half-size polynomials f0 and f1 + * (polynomials modulo X^(N/2)+1). f0, f1 and f MUST NOT overlap. + */ +void PQCLEAN_FALCON512_AVX2_poly_split_fft(fpr *f0, fpr *f1, + const fpr *f, unsigned logn); + +/* + * Apply "merge" operation on two polynomials in FFT representation: + * given f0 and f1, polynomials moduo X^(N/2)+1, this function computes + * f = f0(x^2) + x*f1(x^2), in FFT representation modulo X^N+1. + * f MUST NOT overlap with either f0 or f1. + */ +void PQCLEAN_FALCON512_AVX2_poly_merge_fft(fpr *f, + const fpr *f0, const fpr *f1, unsigned logn); + +/* ==================================================================== */ +/* + * Key pair generation. + */ + +/* + * Required sizes of the temporary buffer (in bytes). + * + * This size is 28*2^logn bytes, except for degrees 2 and 4 (logn = 1 + * or 2) where it is slightly greater. + */ +#define FALCON_KEYGEN_TEMP_1 136 +#define FALCON_KEYGEN_TEMP_2 272 +#define FALCON_KEYGEN_TEMP_3 224 +#define FALCON_KEYGEN_TEMP_4 448 +#define FALCON_KEYGEN_TEMP_5 896 +#define FALCON_KEYGEN_TEMP_6 1792 +#define FALCON_KEYGEN_TEMP_7 3584 +#define FALCON_KEYGEN_TEMP_8 7168 +#define FALCON_KEYGEN_TEMP_9 14336 +#define FALCON_KEYGEN_TEMP_10 28672 + +/* + * Generate a new key pair. Randomness is extracted from the provided + * SHAKE256 context, which must have already been seeded and flipped. + * The tmp[] array must have suitable size (see FALCON_KEYGEN_TEMP_* + * macros) and be aligned for the uint32_t, uint64_t and fpr types. + * + * The private key elements are written in f, g, F and G, and the + * public key is written in h. Either or both of G and h may be NULL, + * in which case the corresponding element is not returned (they can + * be recomputed from f, g and F). + * + * tmp[] must have 64-bit alignment. + * This function uses floating-point rounding (see set_fpu_cw()). + */ +void PQCLEAN_FALCON512_AVX2_keygen(inner_shake256_context *rng, + int8_t *f, int8_t *g, int8_t *F, int8_t *G, uint16_t *h, + unsigned logn, uint8_t *tmp); + +/* ==================================================================== */ +/* + * Signature generation. + */ + +/* + * Expand a private key into the B0 matrix in FFT representation and + * the LDL tree. All the values are written in 'expanded_key', for + * a total of (8*logn+40)*2^logn bytes. + * + * The tmp[] array must have room for at least 48*2^logn bytes. + * + * tmp[] must have 64-bit alignment. + * This function uses floating-point rounding (see set_fpu_cw()). + */ +void PQCLEAN_FALCON512_AVX2_expand_privkey(fpr *expanded_key, + const int8_t *f, const int8_t *g, const int8_t *F, const int8_t *G, + unsigned logn, uint8_t *tmp); + +/* + * Compute a signature over the provided hashed message (hm); the + * signature value is one short vector. This function uses an + * expanded key (as generated by PQCLEAN_FALCON512_AVX2_expand_privkey()). + * + * The sig[] and hm[] buffers may overlap. + * + * On successful output, the start of the tmp[] buffer contains the s1 + * vector (as int16_t elements). + * + * The minimal size (in bytes) of tmp[] is 48*2^logn bytes. + * + * tmp[] must have 64-bit alignment. + * This function uses floating-point rounding (see set_fpu_cw()). + */ +void PQCLEAN_FALCON512_AVX2_sign_tree(int16_t *sig, inner_shake256_context *rng, + const fpr *expanded_key, + const uint16_t *hm, unsigned logn, uint8_t *tmp); + +/* + * Compute a signature over the provided hashed message (hm); the + * signature value is one short vector. This function uses a raw + * key and dynamically recompute the B0 matrix and LDL tree; this + * saves RAM since there is no needed for an expanded key, but + * increases the signature cost. + * + * The sig[] and hm[] buffers may overlap. + * + * On successful output, the start of the tmp[] buffer contains the s1 + * vector (as int16_t elements). + * + * The minimal size (in bytes) of tmp[] is 72*2^logn bytes. + * + * tmp[] must have 64-bit alignment. + * This function uses floating-point rounding (see set_fpu_cw()). + */ +void PQCLEAN_FALCON512_AVX2_sign_dyn(int16_t *sig, inner_shake256_context *rng, + const int8_t *f, const int8_t *g, + const int8_t *F, const int8_t *G, + const uint16_t *hm, unsigned logn, uint8_t *tmp); + +/* + * Internal sampler engine. Exported for tests. + * + * sampler_context wraps around a source of random numbers (PRNG) and + * the sigma_min value (nominally dependent on the degree). + * + * sampler() takes as parameters: + * ctx pointer to the sampler_context structure + * mu center for the distribution + * isigma inverse of the distribution standard deviation + * It returns an integer sampled along the Gaussian distribution centered + * on mu and of standard deviation sigma = 1/isigma. + * + * gaussian0_sampler() takes as parameter a pointer to a PRNG, and + * returns an integer sampled along a half-Gaussian with standard + * deviation sigma0 = 1.8205 (center is 0, returned value is + * nonnegative). + */ + +typedef struct { + prng p; + fpr sigma_min; +} sampler_context; + +int PQCLEAN_FALCON512_AVX2_sampler(void *ctx, fpr mu, fpr isigma); + +int PQCLEAN_FALCON512_AVX2_gaussian0_sampler(prng *p); + +/* ==================================================================== */ + +#endif diff --git a/crypto_sign/falcon-512/avx2/keygen.c b/crypto_sign/falcon-512/avx2/keygen.c new file mode 100644 index 00000000..818755f8 --- /dev/null +++ b/crypto_sign/falcon-512/avx2/keygen.c @@ -0,0 +1,4231 @@ +#include "inner.h" + +/* + * Falcon key pair generation. + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2017-2019 Falcon Project + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @author Thomas Pornin + */ + + +#define MKN(logn) ((size_t)1 << (logn)) + +/* ==================================================================== */ +/* + * Modular arithmetics. + * + * We implement a few functions for computing modulo a small integer p. + * + * All functions require that 2^30 < p < 2^31. Moreover, operands must + * be in the 0..p-1 range. + * + * Modular addition and subtraction work for all such p. + * + * Montgomery multiplication requires that p is odd, and must be provided + * with an additional value p0i = -1/p mod 2^31. See below for some basics + * on Montgomery multiplication. + * + * Division computes an inverse modulo p by an exponentiation (with + * exponent p-2): this works only if p is prime. Multiplication + * requirements also apply, i.e. p must be odd and p0i must be provided. + * + * The NTT and inverse NTT need all of the above, and also that + * p = 1 mod 2048. + * + * ----------------------------------------------------------------------- + * + * We use Montgomery representation with 31-bit values: + * + * Let R = 2^31 mod p. When 2^30 < p < 2^31, R = 2^31 - p. + * Montgomery representation of an integer x modulo p is x*R mod p. + * + * Montgomery multiplication computes (x*y)/R mod p for + * operands x and y. Therefore: + * + * - if operands are x*R and y*R (Montgomery representations of x and + * y), then Montgomery multiplication computes (x*R*y*R)/R = (x*y)*R + * mod p, which is the Montgomery representation of the product x*y; + * + * - if operands are x*R and y (or x and y*R), then Montgomery + * multiplication returns x*y mod p: mixed-representation + * multiplications yield results in normal representation. + * + * To convert to Montgomery representation, we multiply by R, which is done + * by Montgomery-multiplying by R^2. Stand-alone conversion back from + * Montgomery representation is Montgomery-multiplication by 1. + */ + +/* + * Precomputed small primes. Each element contains the following: + * + * p The prime itself. + * + * g A primitive root of phi = X^N+1 (in field Z_p). + * + * s The inverse of the product of all previous primes in the array, + * computed modulo p and in Montgomery representation. + * + * All primes are such that p = 1 mod 2048, and are lower than 2^31. They + * are listed in decreasing order. + */ + +typedef struct { + uint32_t p; + uint32_t g; + uint32_t s; +} small_prime; + +static const small_prime PRIMES[] = { + { 2147473409, 383167813, 10239 }, + { 2147389441, 211808905, 471403745 }, + { 2147387393, 37672282, 1329335065 }, + { 2147377153, 1977035326, 968223422 }, + { 2147358721, 1067163706, 132460015 }, + { 2147352577, 1606082042, 598693809 }, + { 2147346433, 2033915641, 1056257184 }, + { 2147338241, 1653770625, 421286710 }, + { 2147309569, 631200819, 1111201074 }, + { 2147297281, 2038364663, 1042003613 }, + { 2147295233, 1962540515, 19440033 }, + { 2147239937, 2100082663, 353296760 }, + { 2147235841, 1991153006, 1703918027 }, + { 2147217409, 516405114, 1258919613 }, + { 2147205121, 409347988, 1089726929 }, + { 2147196929, 927788991, 1946238668 }, + { 2147178497, 1136922411, 1347028164 }, + { 2147100673, 868626236, 701164723 }, + { 2147082241, 1897279176, 617820870 }, + { 2147074049, 1888819123, 158382189 }, + { 2147051521, 25006327, 522758543 }, + { 2147043329, 327546255, 37227845 }, + { 2147039233, 766324424, 1133356428 }, + { 2146988033, 1862817362, 73861329 }, + { 2146963457, 404622040, 653019435 }, + { 2146959361, 1936581214, 995143093 }, + { 2146938881, 1559770096, 634921513 }, + { 2146908161, 422623708, 1985060172 }, + { 2146885633, 1751189170, 298238186 }, + { 2146871297, 578919515, 291810829 }, + { 2146846721, 1114060353, 915902322 }, + { 2146834433, 2069565474, 47859524 }, + { 2146818049, 1552824584, 646281055 }, + { 2146775041, 1906267847, 1597832891 }, + { 2146756609, 1847414714, 1228090888 }, + { 2146744321, 1818792070, 1176377637 }, + { 2146738177, 1118066398, 1054971214 }, + { 2146736129, 52057278, 933422153 }, + { 2146713601, 592259376, 1406621510 }, + { 2146695169, 263161877, 1514178701 }, + { 2146656257, 685363115, 384505091 }, + { 2146650113, 927727032, 537575289 }, + { 2146646017, 52575506, 1799464037 }, + { 2146643969, 1276803876, 1348954416 }, + { 2146603009, 814028633, 1521547704 }, + { 2146572289, 1846678872, 1310832121 }, + { 2146547713, 919368090, 1019041349 }, + { 2146508801, 671847612, 38582496 }, + { 2146492417, 283911680, 532424562 }, + { 2146490369, 1780044827, 896447978 }, + { 2146459649, 327980850, 1327906900 }, + { 2146447361, 1310561493, 958645253 }, + { 2146441217, 412148926, 287271128 }, + { 2146437121, 293186449, 2009822534 }, + { 2146430977, 179034356, 1359155584 }, + { 2146418689, 1517345488, 1790248672 }, + { 2146406401, 1615820390, 1584833571 }, + { 2146404353, 826651445, 607120498 }, + { 2146379777, 3816988, 1897049071 }, + { 2146363393, 1221409784, 1986921567 }, + { 2146355201, 1388081168, 849968120 }, + { 2146336769, 1803473237, 1655544036 }, + { 2146312193, 1023484977, 273671831 }, + { 2146293761, 1074591448, 467406983 }, + { 2146283521, 831604668, 1523950494 }, + { 2146203649, 712865423, 1170834574 }, + { 2146154497, 1764991362, 1064856763 }, + { 2146142209, 627386213, 1406840151 }, + { 2146127873, 1638674429, 2088393537 }, + { 2146099201, 1516001018, 690673370 }, + { 2146093057, 1294931393, 315136610 }, + { 2146091009, 1942399533, 973539425 }, + { 2146078721, 1843461814, 2132275436 }, + { 2146060289, 1098740778, 360423481 }, + { 2146048001, 1617213232, 1951981294 }, + { 2146041857, 1805783169, 2075683489 }, + { 2146019329, 272027909, 1753219918 }, + { 2145986561, 1206530344, 2034028118 }, + { 2145976321, 1243769360, 1173377644 }, + { 2145964033, 887200839, 1281344586 }, + { 2145906689, 1651026455, 906178216 }, + { 2145875969, 1673238256, 1043521212 }, + { 2145871873, 1226591210, 1399796492 }, + { 2145841153, 1465353397, 1324527802 }, + { 2145832961, 1150638905, 554084759 }, + { 2145816577, 221601706, 427340863 }, + { 2145785857, 608896761, 316590738 }, + { 2145755137, 1712054942, 1684294304 }, + { 2145742849, 1302302867, 724873116 }, + { 2145728513, 516717693, 431671476 }, + { 2145699841, 524575579, 1619722537 }, + { 2145691649, 1925625239, 982974435 }, + { 2145687553, 463795662, 1293154300 }, + { 2145673217, 771716636, 881778029 }, + { 2145630209, 1509556977, 837364988 }, + { 2145595393, 229091856, 851648427 }, + { 2145587201, 1796903241, 635342424 }, + { 2145525761, 715310882, 1677228081 }, + { 2145495041, 1040930522, 200685896 }, + { 2145466369, 949804237, 1809146322 }, + { 2145445889, 1673903706, 95316881 }, + { 2145390593, 806941852, 1428671135 }, + { 2145372161, 1402525292, 159350694 }, + { 2145361921, 2124760298, 1589134749 }, + { 2145359873, 1217503067, 1561543010 }, + { 2145355777, 338341402, 83865711 }, + { 2145343489, 1381532164, 641430002 }, + { 2145325057, 1883895478, 1528469895 }, + { 2145318913, 1335370424, 65809740 }, + { 2145312769, 2000008042, 1919775760 }, + { 2145300481, 961450962, 1229540578 }, + { 2145282049, 910466767, 1964062701 }, + { 2145232897, 816527501, 450152063 }, + { 2145218561, 1435128058, 1794509700 }, + { 2145187841, 33505311, 1272467582 }, + { 2145181697, 269767433, 1380363849 }, + { 2145175553, 56386299, 1316870546 }, + { 2145079297, 2106880293, 1391797340 }, + { 2145021953, 1347906152, 720510798 }, + { 2145015809, 206769262, 1651459955 }, + { 2145003521, 1885513236, 1393381284 }, + { 2144960513, 1810381315, 31937275 }, + { 2144944129, 1306487838, 2019419520 }, + { 2144935937, 37304730, 1841489054 }, + { 2144894977, 1601434616, 157985831 }, + { 2144888833, 98749330, 2128592228 }, + { 2144880641, 1772327002, 2076128344 }, + { 2144864257, 1404514762, 2029969964 }, + { 2144827393, 801236594, 406627220 }, + { 2144806913, 349217443, 1501080290 }, + { 2144796673, 1542656776, 2084736519 }, + { 2144778241, 1210734884, 1746416203 }, + { 2144759809, 1146598851, 716464489 }, + { 2144757761, 286328400, 1823728177 }, + { 2144729089, 1347555695, 1836644881 }, + { 2144727041, 1795703790, 520296412 }, + { 2144696321, 1302475157, 852964281 }, + { 2144667649, 1075877614, 504992927 }, + { 2144573441, 198765808, 1617144982 }, + { 2144555009, 321528767, 155821259 }, + { 2144550913, 814139516, 1819937644 }, + { 2144536577, 571143206, 962942255 }, + { 2144524289, 1746733766, 2471321 }, + { 2144512001, 1821415077, 124190939 }, + { 2144468993, 917871546, 1260072806 }, + { 2144458753, 378417981, 1569240563 }, + { 2144421889, 175229668, 1825620763 }, + { 2144409601, 1699216963, 351648117 }, + { 2144370689, 1071885991, 958186029 }, + { 2144348161, 1763151227, 540353574 }, + { 2144335873, 1060214804, 919598847 }, + { 2144329729, 663515846, 1448552668 }, + { 2144327681, 1057776305, 590222840 }, + { 2144309249, 1705149168, 1459294624 }, + { 2144296961, 325823721, 1649016934 }, + { 2144290817, 738775789, 447427206 }, + { 2144243713, 962347618, 893050215 }, + { 2144237569, 1655257077, 900860862 }, + { 2144161793, 242206694, 1567868672 }, + { 2144155649, 769415308, 1247993134 }, + { 2144137217, 320492023, 515841070 }, + { 2144120833, 1639388522, 770877302 }, + { 2144071681, 1761785233, 964296120 }, + { 2144065537, 419817825, 204564472 }, + { 2144028673, 666050597, 2091019760 }, + { 2144010241, 1413657615, 1518702610 }, + { 2143952897, 1238327946, 475672271 }, + { 2143940609, 307063413, 1176750846 }, + { 2143918081, 2062905559, 786785803 }, + { 2143899649, 1338112849, 1562292083 }, + { 2143891457, 68149545, 87166451 }, + { 2143885313, 921750778, 394460854 }, + { 2143854593, 719766593, 133877196 }, + { 2143836161, 1149399850, 1861591875 }, + { 2143762433, 1848739366, 1335934145 }, + { 2143756289, 1326674710, 102999236 }, + { 2143713281, 808061791, 1156900308 }, + { 2143690753, 388399459, 1926468019 }, + { 2143670273, 1427891374, 1756689401 }, + { 2143666177, 1912173949, 986629565 }, + { 2143645697, 2041160111, 371842865 }, + { 2143641601, 1279906897, 2023974350 }, + { 2143635457, 720473174, 1389027526 }, + { 2143621121, 1298309455, 1732632006 }, + { 2143598593, 1548762216, 1825417506 }, + { 2143567873, 620475784, 1073787233 }, + { 2143561729, 1932954575, 949167309 }, + { 2143553537, 354315656, 1652037534 }, + { 2143541249, 577424288, 1097027618 }, + { 2143531009, 357862822, 478640055 }, + { 2143522817, 2017706025, 1550531668 }, + { 2143506433, 2078127419, 1824320165 }, + { 2143488001, 613475285, 1604011510 }, + { 2143469569, 1466594987, 502095196 }, + { 2143426561, 1115430331, 1044637111 }, + { 2143383553, 9778045, 1902463734 }, + { 2143377409, 1557401276, 2056861771 }, + { 2143363073, 652036455, 1965915971 }, + { 2143260673, 1464581171, 1523257541 }, + { 2143246337, 1876119649, 764541916 }, + { 2143209473, 1614992673, 1920672844 }, + { 2143203329, 981052047, 2049774209 }, + { 2143160321, 1847355533, 728535665 }, + { 2143129601, 965558457, 603052992 }, + { 2143123457, 2140817191, 8348679 }, + { 2143100929, 1547263683, 694209023 }, + { 2143092737, 643459066, 1979934533 }, + { 2143082497, 188603778, 2026175670 }, + { 2143062017, 1657329695, 377451099 }, + { 2143051777, 114967950, 979255473 }, + { 2143025153, 1698431342, 1449196896 }, + { 2143006721, 1862741675, 1739650365 }, + { 2142996481, 756660457, 996160050 }, + { 2142976001, 927864010, 1166847574 }, + { 2142965761, 905070557, 661974566 }, + { 2142916609, 40932754, 1787161127 }, + { 2142892033, 1987985648, 675335382 }, + { 2142885889, 797497211, 1323096997 }, + { 2142871553, 2068025830, 1411877159 }, + { 2142861313, 1217177090, 1438410687 }, + { 2142830593, 409906375, 1767860634 }, + { 2142803969, 1197788993, 359782919 }, + { 2142785537, 643817365, 513932862 }, + { 2142779393, 1717046338, 218943121 }, + { 2142724097, 89336830, 416687049 }, + { 2142707713, 5944581, 1356813523 }, + { 2142658561, 887942135, 2074011722 }, + { 2142638081, 151851972, 1647339939 }, + { 2142564353, 1691505537, 1483107336 }, + { 2142533633, 1989920200, 1135938817 }, + { 2142529537, 959263126, 1531961857 }, + { 2142527489, 453251129, 1725566162 }, + { 2142502913, 1536028102, 182053257 }, + { 2142498817, 570138730, 701443447 }, + { 2142416897, 326965800, 411931819 }, + { 2142363649, 1675665410, 1517191733 }, + { 2142351361, 968529566, 1575712703 }, + { 2142330881, 1384953238, 1769087884 }, + { 2142314497, 1977173242, 1833745524 }, + { 2142289921, 95082313, 1714775493 }, + { 2142283777, 109377615, 1070584533 }, + { 2142277633, 16960510, 702157145 }, + { 2142263297, 553850819, 431364395 }, + { 2142208001, 241466367, 2053967982 }, + { 2142164993, 1795661326, 1031836848 }, + { 2142097409, 1212530046, 712772031 }, + { 2142087169, 1763869720, 822276067 }, + { 2142078977, 644065713, 1765268066 }, + { 2142074881, 112671944, 643204925 }, + { 2142044161, 1387785471, 1297890174 }, + { 2142025729, 783885537, 1000425730 }, + { 2142011393, 905662232, 1679401033 }, + { 2141974529, 799788433, 468119557 }, + { 2141943809, 1932544124, 449305555 }, + { 2141933569, 1527403256, 841867925 }, + { 2141931521, 1247076451, 743823916 }, + { 2141902849, 1199660531, 401687910 }, + { 2141890561, 150132350, 1720336972 }, + { 2141857793, 1287438162, 663880489 }, + { 2141833217, 618017731, 1819208266 }, + { 2141820929, 999578638, 1403090096 }, + { 2141786113, 81834325, 1523542501 }, + { 2141771777, 120001928, 463556492 }, + { 2141759489, 122455485, 2124928282 }, + { 2141749249, 141986041, 940339153 }, + { 2141685761, 889088734, 477141499 }, + { 2141673473, 324212681, 1122558298 }, + { 2141669377, 1175806187, 1373818177 }, + { 2141655041, 1113654822, 296887082 }, + { 2141587457, 991103258, 1585913875 }, + { 2141583361, 1401451409, 1802457360 }, + { 2141575169, 1571977166, 712760980 }, + { 2141546497, 1107849376, 1250270109 }, + { 2141515777, 196544219, 356001130 }, + { 2141495297, 1733571506, 1060744866 }, + { 2141483009, 321552363, 1168297026 }, + { 2141458433, 505818251, 733225819 }, + { 2141360129, 1026840098, 948342276 }, + { 2141325313, 945133744, 2129965998 }, + { 2141317121, 1871100260, 1843844634 }, + { 2141286401, 1790639498, 1750465696 }, + { 2141267969, 1376858592, 186160720 }, + { 2141255681, 2129698296, 1876677959 }, + { 2141243393, 2138900688, 1340009628 }, + { 2141214721, 1933049835, 1087819477 }, + { 2141212673, 1898664939, 1786328049 }, + { 2141202433, 990234828, 940682169 }, + { 2141175809, 1406392421, 993089586 }, + { 2141165569, 1263518371, 289019479 }, + { 2141073409, 1485624211, 507864514 }, + { 2141052929, 1885134788, 311252465 }, + { 2141040641, 1285021247, 280941862 }, + { 2141028353, 1527610374, 375035110 }, + { 2141011969, 1400626168, 164696620 }, + { 2140999681, 632959608, 966175067 }, + { 2140997633, 2045628978, 1290889438 }, + { 2140993537, 1412755491, 375366253 }, + { 2140942337, 719477232, 785367828 }, + { 2140925953, 45224252, 836552317 }, + { 2140917761, 1157376588, 1001839569 }, + { 2140887041, 278480752, 2098732796 }, + { 2140837889, 1663139953, 924094810 }, + { 2140788737, 802501511, 2045368990 }, + { 2140766209, 1820083885, 1800295504 }, + { 2140764161, 1169561905, 2106792035 }, + { 2140696577, 127781498, 1885987531 }, + { 2140684289, 16014477, 1098116827 }, + { 2140653569, 665960598, 1796728247 }, + { 2140594177, 1043085491, 377310938 }, + { 2140579841, 1732838211, 1504505945 }, + { 2140569601, 302071939, 358291016 }, + { 2140567553, 192393733, 1909137143 }, + { 2140557313, 406595731, 1175330270 }, + { 2140549121, 1748850918, 525007007 }, + { 2140477441, 499436566, 1031159814 }, + { 2140469249, 1886004401, 1029951320 }, + { 2140426241, 1483168100, 1676273461 }, + { 2140420097, 1779917297, 846024476 }, + { 2140413953, 522948893, 1816354149 }, + { 2140383233, 1931364473, 1296921241 }, + { 2140366849, 1917356555, 147196204 }, + { 2140354561, 16466177, 1349052107 }, + { 2140348417, 1875366972, 1860485634 }, + { 2140323841, 456498717, 1790256483 }, + { 2140321793, 1629493973, 150031888 }, + { 2140315649, 1904063898, 395510935 }, + { 2140280833, 1784104328, 831417909 }, + { 2140250113, 256087139, 697349101 }, + { 2140229633, 388553070, 243875754 }, + { 2140223489, 747459608, 1396270850 }, + { 2140200961, 507423743, 1895572209 }, + { 2140162049, 580106016, 2045297469 }, + { 2140149761, 712426444, 785217995 }, + { 2140137473, 1441607584, 536866543 }, + { 2140119041, 346538902, 1740434653 }, + { 2140090369, 282642885, 21051094 }, + { 2140076033, 1407456228, 319910029 }, + { 2140047361, 1619330500, 1488632070 }, + { 2140041217, 2089408064, 2012026134 }, + { 2140008449, 1705524800, 1613440760 }, + { 2139924481, 1846208233, 1280649481 }, + { 2139906049, 989438755, 1185646076 }, + { 2139867137, 1522314850, 372783595 }, + { 2139842561, 1681587377, 216848235 }, + { 2139826177, 2066284988, 1784999464 }, + { 2139824129, 480888214, 1513323027 }, + { 2139789313, 847937200, 858192859 }, + { 2139783169, 1642000434, 1583261448 }, + { 2139770881, 940699589, 179702100 }, + { 2139768833, 315623242, 964612676 }, + { 2139666433, 331649203, 764666914 }, + { 2139641857, 2118730799, 1313764644 }, + { 2139635713, 519149027, 519212449 }, + { 2139598849, 1526413634, 1769667104 }, + { 2139574273, 551148610, 820739925 }, + { 2139568129, 1386800242, 472447405 }, + { 2139549697, 813760130, 1412328531 }, + { 2139537409, 1615286260, 1609362979 }, + { 2139475969, 1352559299, 1696720421 }, + { 2139455489, 1048691649, 1584935400 }, + { 2139432961, 836025845, 950121150 }, + { 2139424769, 1558281165, 1635486858 }, + { 2139406337, 1728402143, 1674423301 }, + { 2139396097, 1727715782, 1483470544 }, + { 2139383809, 1092853491, 1741699084 }, + { 2139369473, 690776899, 1242798709 }, + { 2139351041, 1768782380, 2120712049 }, + { 2139334657, 1739968247, 1427249225 }, + { 2139332609, 1547189119, 623011170 }, + { 2139310081, 1346827917, 1605466350 }, + { 2139303937, 369317948, 828392831 }, + { 2139301889, 1560417239, 1788073219 }, + { 2139283457, 1303121623, 595079358 }, + { 2139248641, 1354555286, 573424177 }, + { 2139240449, 60974056, 885781403 }, + { 2139222017, 355573421, 1221054839 }, + { 2139215873, 566477826, 1724006500 }, + { 2139150337, 871437673, 1609133294 }, + { 2139144193, 1478130914, 1137491905 }, + { 2139117569, 1854880922, 964728507 }, + { 2139076609, 202405335, 756508944 }, + { 2139062273, 1399715741, 884826059 }, + { 2139045889, 1051045798, 1202295476 }, + { 2139033601, 1707715206, 632234634 }, + { 2139006977, 2035853139, 231626690 }, + { 2138951681, 183867876, 838350879 }, + { 2138945537, 1403254661, 404460202 }, + { 2138920961, 310865011, 1282911681 }, + { 2138910721, 1328496553, 103472415 }, + { 2138904577, 78831681, 993513549 }, + { 2138902529, 1319697451, 1055904361 }, + { 2138816513, 384338872, 1706202469 }, + { 2138810369, 1084868275, 405677177 }, + { 2138787841, 401181788, 1964773901 }, + { 2138775553, 1850532988, 1247087473 }, + { 2138767361, 874261901, 1576073565 }, + { 2138757121, 1187474742, 993541415 }, + { 2138748929, 1782458888, 1043206483 }, + { 2138744833, 1221500487, 800141243 }, + { 2138738689, 413465368, 1450660558 }, + { 2138695681, 739045140, 342611472 }, + { 2138658817, 1355845756, 672674190 }, + { 2138644481, 608379162, 1538874380 }, + { 2138632193, 1444914034, 686911254 }, + { 2138607617, 484707818, 1435142134 }, + { 2138591233, 539460669, 1290458549 }, + { 2138572801, 2093538990, 2011138646 }, + { 2138552321, 1149786988, 1076414907 }, + { 2138546177, 840688206, 2108985273 }, + { 2138533889, 209669619, 198172413 }, + { 2138523649, 1975879426, 1277003968 }, + { 2138490881, 1351891144, 1976858109 }, + { 2138460161, 1817321013, 1979278293 }, + { 2138429441, 1950077177, 203441928 }, + { 2138400769, 908970113, 628395069 }, + { 2138398721, 219890864, 758486760 }, + { 2138376193, 1306654379, 977554090 }, + { 2138351617, 298822498, 2004708503 }, + { 2138337281, 441457816, 1049002108 }, + { 2138320897, 1517731724, 1442269609 }, + { 2138290177, 1355911197, 1647139103 }, + { 2138234881, 531313247, 1746591962 }, + { 2138214401, 1899410930, 781416444 }, + { 2138202113, 1813477173, 1622508515 }, + { 2138191873, 1086458299, 1025408615 }, + { 2138183681, 1998800427, 827063290 }, + { 2138173441, 1921308898, 749670117 }, + { 2138103809, 1620902804, 2126787647 }, + { 2138099713, 828647069, 1892961817 }, + { 2138085377, 179405355, 1525506535 }, + { 2138060801, 615683235, 1259580138 }, + { 2138044417, 2030277840, 1731266562 }, + { 2138042369, 2087222316, 1627902259 }, + { 2138032129, 126388712, 1108640984 }, + { 2138011649, 715026550, 1017980050 }, + { 2137993217, 1693714349, 1351778704 }, + { 2137888769, 1289762259, 1053090405 }, + { 2137853953, 199991890, 1254192789 }, + { 2137833473, 941421685, 896995556 }, + { 2137817089, 750416446, 1251031181 }, + { 2137792513, 798075119, 368077456 }, + { 2137786369, 878543495, 1035375025 }, + { 2137767937, 9351178, 1156563902 }, + { 2137755649, 1382297614, 1686559583 }, + { 2137724929, 1345472850, 1681096331 }, + { 2137704449, 834666929, 630551727 }, + { 2137673729, 1646165729, 1892091571 }, + { 2137620481, 778943821, 48456461 }, + { 2137618433, 1730837875, 1713336725 }, + { 2137581569, 805610339, 1378891359 }, + { 2137538561, 204342388, 1950165220 }, + { 2137526273, 1947629754, 1500789441 }, + { 2137516033, 719902645, 1499525372 }, + { 2137491457, 230451261, 556382829 }, + { 2137440257, 979573541, 412760291 }, + { 2137374721, 927841248, 1954137185 }, + { 2137362433, 1243778559, 861024672 }, + { 2137313281, 1341338501, 980638386 }, + { 2137311233, 937415182, 1793212117 }, + { 2137255937, 795331324, 1410253405 }, + { 2137243649, 150756339, 1966999887 }, + { 2137182209, 163346914, 1939301431 }, + { 2137171969, 1952552395, 758913141 }, + { 2137159681, 570788721, 218668666 }, + { 2137147393, 1896656810, 2045670345 }, + { 2137141249, 358493842, 518199643 }, + { 2137139201, 1505023029, 674695848 }, + { 2137133057, 27911103, 830956306 }, + { 2137122817, 439771337, 1555268614 }, + { 2137116673, 790988579, 1871449599 }, + { 2137110529, 432109234, 811805080 }, + { 2137102337, 1357900653, 1184997641 }, + { 2137098241, 515119035, 1715693095 }, + { 2137090049, 408575203, 2085660657 }, + { 2137085953, 2097793407, 1349626963 }, + { 2137055233, 1556739954, 1449960883 }, + { 2137030657, 1545758650, 1369303716 }, + { 2136987649, 332602570, 103875114 }, + { 2136969217, 1499989506, 1662964115 }, + { 2136924161, 857040753, 4738842 }, + { 2136895489, 1948872712, 570436091 }, + { 2136893441, 58969960, 1568349634 }, + { 2136887297, 2127193379, 273612548 }, + { 2136850433, 111208983, 1181257116 }, + { 2136809473, 1627275942, 1680317971 }, + { 2136764417, 1574888217, 14011331 }, + { 2136741889, 14011055, 1129154251 }, + { 2136727553, 35862563, 1838555253 }, + { 2136721409, 310235666, 1363928244 }, + { 2136698881, 1612429202, 1560383828 }, + { 2136649729, 1138540131, 800014364 }, + { 2136606721, 602323503, 1433096652 }, + { 2136563713, 182209265, 1919611038 }, + { 2136555521, 324156477, 165591039 }, + { 2136549377, 195513113, 217165345 }, + { 2136526849, 1050768046, 939647887 }, + { 2136508417, 1886286237, 1619926572 }, + { 2136477697, 609647664, 35065157 }, + { 2136471553, 679352216, 1452259468 }, + { 2136457217, 128630031, 824816521 }, + { 2136422401, 19787464, 1526049830 }, + { 2136420353, 698316836, 1530623527 }, + { 2136371201, 1651862373, 1804812805 }, + { 2136334337, 326596005, 336977082 }, + { 2136322049, 63253370, 1904972151 }, + { 2136297473, 312176076, 172182411 }, + { 2136248321, 381261841, 369032670 }, + { 2136242177, 358688773, 1640007994 }, + { 2136229889, 512677188, 75585225 }, + { 2136219649, 2095003250, 1970086149 }, + { 2136207361, 1909650722, 537760675 }, + { 2136176641, 1334616195, 1533487619 }, + { 2136158209, 2096285632, 1793285210 }, + { 2136143873, 1897347517, 293843959 }, + { 2136133633, 923586222, 1022655978 }, + { 2136096769, 1464868191, 1515074410 }, + { 2136094721, 2020679520, 2061636104 }, + { 2136076289, 290798503, 1814726809 }, + { 2136041473, 156415894, 1250757633 }, + { 2135996417, 297459940, 1132158924 }, + { 2135955457, 538755304, 1688831340 }, + { 0, 0, 0 } +}; + +/* + * Reduce a small signed integer modulo a small prime. The source + * value x MUST be such that -p < x < p. + */ +static inline uint32_t +modp_set(int32_t x, uint32_t p) { + uint32_t w; + + w = (uint32_t)x; + w += p & -(w >> 31); + return w; +} + +/* + * Normalize a modular integer around 0. + */ +static inline int32_t +modp_norm(uint32_t x, uint32_t p) { + return (int32_t)(x - (p & (((x - ((p + 1) >> 1)) >> 31) - 1))); +} + +/* + * Compute -1/p mod 2^31. This works for all odd integers p that fit + * on 31 bits. + */ +static uint32_t +modp_ninv31(uint32_t p) { + uint32_t y; + + y = 2 - p; + y *= 2 - p * y; + y *= 2 - p * y; + y *= 2 - p * y; + y *= 2 - p * y; + return (uint32_t)0x7FFFFFFF & -y; +} + +/* + * Compute R = 2^31 mod p. + */ +static inline uint32_t +modp_R(uint32_t p) { + /* + * Since 2^30 < p < 2^31, we know that 2^31 mod p is simply + * 2^31 - p. + */ + return ((uint32_t)1 << 31) - p; +} + +/* + * Addition modulo p. + */ +static inline uint32_t +modp_add(uint32_t a, uint32_t b, uint32_t p) { + uint32_t d; + + d = a + b - p; + d += p & -(d >> 31); + return d; +} + +/* + * Subtraction modulo p. + */ +static inline uint32_t +modp_sub(uint32_t a, uint32_t b, uint32_t p) { + uint32_t d; + + d = a - b; + d += p & -(d >> 31); + return d; +} + +/* + * Halving modulo p. + */ +/* unused +static inline uint32_t +modp_half(uint32_t a, uint32_t p) +{ + a += p & -(a & 1); + return a >> 1; +} +*/ + +/* + * Montgomery multiplication modulo p. The 'p0i' value is -1/p mod 2^31. + * It is required that p is an odd integer. + */ +static inline uint32_t +modp_montymul(uint32_t a, uint32_t b, uint32_t p, uint32_t p0i) { + uint64_t z, w; + uint32_t d; + + z = (uint64_t)a * (uint64_t)b; + w = ((z * p0i) & (uint64_t)0x7FFFFFFF) * p; + d = (uint32_t)((z + w) >> 31) - p; + d += p & -(d >> 31); + return d; +} + +/* + * Compute R2 = 2^62 mod p. + */ +static uint32_t +modp_R2(uint32_t p, uint32_t p0i) { + uint32_t z; + + /* + * Compute z = 2^31 mod p (this is the value 1 in Montgomery + * representation), then double it with an addition. + */ + z = modp_R(p); + z = modp_add(z, z, p); + + /* + * Square it five times to obtain 2^32 in Montgomery representation + * (i.e. 2^63 mod p). + */ + z = modp_montymul(z, z, p, p0i); + z = modp_montymul(z, z, p, p0i); + z = modp_montymul(z, z, p, p0i); + z = modp_montymul(z, z, p, p0i); + z = modp_montymul(z, z, p, p0i); + + /* + * Halve the value mod p to get 2^62. + */ + z = (z + (p & -(z & 1))) >> 1; + return z; +} + +/* + * Compute 2^(31*x) modulo p. This works for integers x up to 2^11. + * p must be prime such that 2^30 < p < 2^31; p0i must be equal to + * -1/p mod 2^31; R2 must be equal to 2^62 mod p. + */ +static inline uint32_t +modp_Rx(unsigned x, uint32_t p, uint32_t p0i, uint32_t R2) { + int i; + uint32_t r, z; + + /* + * 2^(31*x) = (2^31)*(2^(31*(x-1))); i.e. we want the Montgomery + * representation of (2^31)^e mod p, where e = x-1. + * R2 is 2^31 in Montgomery representation. + */ + x --; + r = R2; + z = modp_R(p); + for (i = 0; (1U << i) <= x; i ++) { + if ((x & (1U << i)) != 0) { + z = modp_montymul(z, r, p, p0i); + } + r = modp_montymul(r, r, p, p0i); + } + return z; +} + +/* + * Division modulo p. If the divisor (b) is 0, then 0 is returned. + * This function computes proper results only when p is prime. + * Parameters: + * a dividend + * b divisor + * p odd prime modulus + * p0i -1/p mod 2^31 + * R 2^31 mod R + */ +static uint32_t +modp_div(uint32_t a, uint32_t b, uint32_t p, uint32_t p0i, uint32_t R) { + uint32_t z, e; + int i; + + e = p - 2; + z = R; + for (i = 30; i >= 0; i --) { + uint32_t z2; + + z = modp_montymul(z, z, p, p0i); + z2 = modp_montymul(z, b, p, p0i); + z ^= (z ^ z2) & -(uint32_t)((e >> i) & 1); + } + + /* + * The loop above just assumed that b was in Montgomery + * representation, i.e. really contained b*R; under that + * assumption, it returns 1/b in Montgomery representation, + * which is R/b. But we gave it b in normal representation, + * so the loop really returned R/(b/R) = R^2/b. + * + * We want a/b, so we need one Montgomery multiplication with a, + * which also remove one of the R factors, and another such + * multiplication to remove the second R factor. + */ + z = modp_montymul(z, 1, p, p0i); + return modp_montymul(a, z, p, p0i); +} + +/* + * Bit-reversal index table. + */ +static const uint16_t REV10[] = { + 0, 512, 256, 768, 128, 640, 384, 896, 64, 576, 320, 832, + 192, 704, 448, 960, 32, 544, 288, 800, 160, 672, 416, 928, + 96, 608, 352, 864, 224, 736, 480, 992, 16, 528, 272, 784, + 144, 656, 400, 912, 80, 592, 336, 848, 208, 720, 464, 976, + 48, 560, 304, 816, 176, 688, 432, 944, 112, 624, 368, 880, + 240, 752, 496, 1008, 8, 520, 264, 776, 136, 648, 392, 904, + 72, 584, 328, 840, 200, 712, 456, 968, 40, 552, 296, 808, + 168, 680, 424, 936, 104, 616, 360, 872, 232, 744, 488, 1000, + 24, 536, 280, 792, 152, 664, 408, 920, 88, 600, 344, 856, + 216, 728, 472, 984, 56, 568, 312, 824, 184, 696, 440, 952, + 120, 632, 376, 888, 248, 760, 504, 1016, 4, 516, 260, 772, + 132, 644, 388, 900, 68, 580, 324, 836, 196, 708, 452, 964, + 36, 548, 292, 804, 164, 676, 420, 932, 100, 612, 356, 868, + 228, 740, 484, 996, 20, 532, 276, 788, 148, 660, 404, 916, + 84, 596, 340, 852, 212, 724, 468, 980, 52, 564, 308, 820, + 180, 692, 436, 948, 116, 628, 372, 884, 244, 756, 500, 1012, + 12, 524, 268, 780, 140, 652, 396, 908, 76, 588, 332, 844, + 204, 716, 460, 972, 44, 556, 300, 812, 172, 684, 428, 940, + 108, 620, 364, 876, 236, 748, 492, 1004, 28, 540, 284, 796, + 156, 668, 412, 924, 92, 604, 348, 860, 220, 732, 476, 988, + 60, 572, 316, 828, 188, 700, 444, 956, 124, 636, 380, 892, + 252, 764, 508, 1020, 2, 514, 258, 770, 130, 642, 386, 898, + 66, 578, 322, 834, 194, 706, 450, 962, 34, 546, 290, 802, + 162, 674, 418, 930, 98, 610, 354, 866, 226, 738, 482, 994, + 18, 530, 274, 786, 146, 658, 402, 914, 82, 594, 338, 850, + 210, 722, 466, 978, 50, 562, 306, 818, 178, 690, 434, 946, + 114, 626, 370, 882, 242, 754, 498, 1010, 10, 522, 266, 778, + 138, 650, 394, 906, 74, 586, 330, 842, 202, 714, 458, 970, + 42, 554, 298, 810, 170, 682, 426, 938, 106, 618, 362, 874, + 234, 746, 490, 1002, 26, 538, 282, 794, 154, 666, 410, 922, + 90, 602, 346, 858, 218, 730, 474, 986, 58, 570, 314, 826, + 186, 698, 442, 954, 122, 634, 378, 890, 250, 762, 506, 1018, + 6, 518, 262, 774, 134, 646, 390, 902, 70, 582, 326, 838, + 198, 710, 454, 966, 38, 550, 294, 806, 166, 678, 422, 934, + 102, 614, 358, 870, 230, 742, 486, 998, 22, 534, 278, 790, + 150, 662, 406, 918, 86, 598, 342, 854, 214, 726, 470, 982, + 54, 566, 310, 822, 182, 694, 438, 950, 118, 630, 374, 886, + 246, 758, 502, 1014, 14, 526, 270, 782, 142, 654, 398, 910, + 78, 590, 334, 846, 206, 718, 462, 974, 46, 558, 302, 814, + 174, 686, 430, 942, 110, 622, 366, 878, 238, 750, 494, 1006, + 30, 542, 286, 798, 158, 670, 414, 926, 94, 606, 350, 862, + 222, 734, 478, 990, 62, 574, 318, 830, 190, 702, 446, 958, + 126, 638, 382, 894, 254, 766, 510, 1022, 1, 513, 257, 769, + 129, 641, 385, 897, 65, 577, 321, 833, 193, 705, 449, 961, + 33, 545, 289, 801, 161, 673, 417, 929, 97, 609, 353, 865, + 225, 737, 481, 993, 17, 529, 273, 785, 145, 657, 401, 913, + 81, 593, 337, 849, 209, 721, 465, 977, 49, 561, 305, 817, + 177, 689, 433, 945, 113, 625, 369, 881, 241, 753, 497, 1009, + 9, 521, 265, 777, 137, 649, 393, 905, 73, 585, 329, 841, + 201, 713, 457, 969, 41, 553, 297, 809, 169, 681, 425, 937, + 105, 617, 361, 873, 233, 745, 489, 1001, 25, 537, 281, 793, + 153, 665, 409, 921, 89, 601, 345, 857, 217, 729, 473, 985, + 57, 569, 313, 825, 185, 697, 441, 953, 121, 633, 377, 889, + 249, 761, 505, 1017, 5, 517, 261, 773, 133, 645, 389, 901, + 69, 581, 325, 837, 197, 709, 453, 965, 37, 549, 293, 805, + 165, 677, 421, 933, 101, 613, 357, 869, 229, 741, 485, 997, + 21, 533, 277, 789, 149, 661, 405, 917, 85, 597, 341, 853, + 213, 725, 469, 981, 53, 565, 309, 821, 181, 693, 437, 949, + 117, 629, 373, 885, 245, 757, 501, 1013, 13, 525, 269, 781, + 141, 653, 397, 909, 77, 589, 333, 845, 205, 717, 461, 973, + 45, 557, 301, 813, 173, 685, 429, 941, 109, 621, 365, 877, + 237, 749, 493, 1005, 29, 541, 285, 797, 157, 669, 413, 925, + 93, 605, 349, 861, 221, 733, 477, 989, 61, 573, 317, 829, + 189, 701, 445, 957, 125, 637, 381, 893, 253, 765, 509, 1021, + 3, 515, 259, 771, 131, 643, 387, 899, 67, 579, 323, 835, + 195, 707, 451, 963, 35, 547, 291, 803, 163, 675, 419, 931, + 99, 611, 355, 867, 227, 739, 483, 995, 19, 531, 275, 787, + 147, 659, 403, 915, 83, 595, 339, 851, 211, 723, 467, 979, + 51, 563, 307, 819, 179, 691, 435, 947, 115, 627, 371, 883, + 243, 755, 499, 1011, 11, 523, 267, 779, 139, 651, 395, 907, + 75, 587, 331, 843, 203, 715, 459, 971, 43, 555, 299, 811, + 171, 683, 427, 939, 107, 619, 363, 875, 235, 747, 491, 1003, + 27, 539, 283, 795, 155, 667, 411, 923, 91, 603, 347, 859, + 219, 731, 475, 987, 59, 571, 315, 827, 187, 699, 443, 955, + 123, 635, 379, 891, 251, 763, 507, 1019, 7, 519, 263, 775, + 135, 647, 391, 903, 71, 583, 327, 839, 199, 711, 455, 967, + 39, 551, 295, 807, 167, 679, 423, 935, 103, 615, 359, 871, + 231, 743, 487, 999, 23, 535, 279, 791, 151, 663, 407, 919, + 87, 599, 343, 855, 215, 727, 471, 983, 55, 567, 311, 823, + 183, 695, 439, 951, 119, 631, 375, 887, 247, 759, 503, 1015, + 15, 527, 271, 783, 143, 655, 399, 911, 79, 591, 335, 847, + 207, 719, 463, 975, 47, 559, 303, 815, 175, 687, 431, 943, + 111, 623, 367, 879, 239, 751, 495, 1007, 31, 543, 287, 799, + 159, 671, 415, 927, 95, 607, 351, 863, 223, 735, 479, 991, + 63, 575, 319, 831, 191, 703, 447, 959, 127, 639, 383, 895, + 255, 767, 511, 1023 +}; + +/* + * Compute the roots for NTT and inverse NTT (binary case). Input + * parameter g is a primitive 2048-th root of 1 modulo p (i.e. g^1024 = + * -1 mod p). This fills gm[] and igm[] with powers of g and 1/g: + * gm[rev(i)] = g^i mod p + * igm[rev(i)] = (1/g)^i mod p + * where rev() is the "bit reversal" function over 10 bits. It fills + * the arrays only up to N = 2^logn values. + * + * The values stored in gm[] and igm[] are in Montgomery representation. + * + * p must be a prime such that p = 1 mod 2048. + */ +static void +modp_mkgm2(uint32_t *gm, uint32_t *igm, unsigned logn, + uint32_t g, uint32_t p, uint32_t p0i) { + size_t u, n; + unsigned k; + uint32_t ig, x1, x2, R2; + + n = (size_t)1 << logn; + + /* + * We want g such that g^(2N) = 1 mod p, but the provided + * generator has order 2048. We must square it a few times. + */ + R2 = modp_R2(p, p0i); + g = modp_montymul(g, R2, p, p0i); + for (k = logn; k < 10; k ++) { + g = modp_montymul(g, g, p, p0i); + } + + ig = modp_div(R2, g, p, p0i, modp_R(p)); + k = 10 - logn; + x1 = x2 = modp_R(p); + for (u = 0; u < n; u ++) { + size_t v; + + v = REV10[u << k]; + gm[v] = x1; + igm[v] = x2; + x1 = modp_montymul(x1, g, p, p0i); + x2 = modp_montymul(x2, ig, p, p0i); + } +} + +/* + * Compute the NTT over a polynomial (binary case). Polynomial elements + * are a[0], a[stride], a[2 * stride]... + */ +static void +modp_NTT2_ext(uint32_t *a, size_t stride, const uint32_t *gm, unsigned logn, + uint32_t p, uint32_t p0i) { + size_t t, m, n; + + if (logn == 0) { + return; + } + n = (size_t)1 << logn; + t = n; + for (m = 1; m < n; m <<= 1) { + size_t ht, u, v1; + + ht = t >> 1; + for (u = 0, v1 = 0; u < m; u ++, v1 += t) { + uint32_t s; + size_t v; + uint32_t *r1, *r2; + + s = gm[m + u]; + r1 = a + v1 * stride; + r2 = r1 + ht * stride; + for (v = 0; v < ht; v ++, r1 += stride, r2 += stride) { + uint32_t x, y; + + x = *r1; + y = modp_montymul(*r2, s, p, p0i); + *r1 = modp_add(x, y, p); + *r2 = modp_sub(x, y, p); + } + } + t = ht; + } +} + +/* + * Compute the inverse NTT over a polynomial (binary case). + */ +static void +modp_iNTT2_ext(uint32_t *a, size_t stride, const uint32_t *igm, unsigned logn, + uint32_t p, uint32_t p0i) { + size_t t, m, n, k; + uint32_t ni; + uint32_t *r; + + if (logn == 0) { + return; + } + n = (size_t)1 << logn; + t = 1; + for (m = n; m > 1; m >>= 1) { + size_t hm, dt, u, v1; + + hm = m >> 1; + dt = t << 1; + for (u = 0, v1 = 0; u < hm; u ++, v1 += dt) { + uint32_t s; + size_t v; + uint32_t *r1, *r2; + + s = igm[hm + u]; + r1 = a + v1 * stride; + r2 = r1 + t * stride; + for (v = 0; v < t; v ++, r1 += stride, r2 += stride) { + uint32_t x, y; + + x = *r1; + y = *r2; + *r1 = modp_add(x, y, p); + *r2 = modp_montymul( + modp_sub(x, y, p), s, p, p0i);; + } + } + t = dt; + } + + /* + * We need 1/n in Montgomery representation, i.e. R/n. Since + * 1 <= logn <= 10, R/n is an integer; morever, R/n <= 2^30 < p, + * thus a simple shift will do. + */ + ni = (uint32_t)1 << (31 - logn); + for (k = 0, r = a; k < n; k ++, r += stride) { + *r = modp_montymul(*r, ni, p, p0i); + } +} + +/* + * Simplified macros for NTT and iNTT (binary case) when the elements + * are consecutive in RAM. + */ +#define modp_NTT2(a, gm, logn, p, p0i) modp_NTT2_ext(a, 1, gm, logn, p, p0i) +#define modp_iNTT2(a, igm, logn, p, p0i) modp_iNTT2_ext(a, 1, igm, logn, p, p0i) + +/* + * Given polynomial f in NTT representation modulo p, compute f' of degree + * less than N/2 such that f' = f0^2 - X*f1^2, where f0 and f1 are + * polynomials of degree less than N/2 such that f = f0(X^2) + X*f1(X^2). + * + * The new polynomial is written "in place" over the first N/2 elements + * of f. + * + * If applied logn times successively on a given polynomial, the resulting + * degree-0 polynomial is the resultant of f and X^N+1 modulo p. + * + * This function applies only to the binary case; it is invoked from + * solve_NTRU_binary_depth1(). + */ +static void +modp_poly_rec_res(uint32_t *f, unsigned logn, + uint32_t p, uint32_t p0i, uint32_t R2) { + size_t hn, u; + + hn = (size_t)1 << (logn - 1); + for (u = 0; u < hn; u ++) { + uint32_t w0, w1; + + w0 = f[(u << 1) + 0]; + w1 = f[(u << 1) + 1]; + f[u] = modp_montymul(modp_montymul(w0, w1, p, p0i), R2, p, p0i); + } +} + +/* ==================================================================== */ +/* + * Custom bignum implementation. + * + * This is a very reduced set of functionalities. We need to do the + * following operations: + * + * - Rebuild the resultant and the polynomial coefficients from their + * values modulo small primes (of length 31 bits each). + * + * - Compute an extended GCD between the two computed resultants. + * + * - Extract top bits and add scaled values during the successive steps + * of Babai rounding. + * + * When rebuilding values using CRT, we must also recompute the product + * of the small prime factors. We always do it one small factor at a + * time, so the "complicated" operations can be done modulo the small + * prime with the modp_* functions. CRT coefficients (inverses) are + * precomputed. + * + * All values are positive until the last step: when the polynomial + * coefficients have been rebuilt, we normalize them around 0. But then, + * only additions and subtractions on the upper few bits are needed + * afterwards. + * + * We keep big integers as arrays of 31-bit words (in uint32_t values); + * the top bit of each uint32_t is kept equal to 0. Using 31-bit words + * makes it easier to keep track of carries. When negative values are + * used, two's complement is used. + */ + +/* + * Subtract integer b from integer a. Both integers are supposed to have + * the same size. The carry (0 or 1) is returned. Source arrays a and b + * MUST be distinct. + * + * The operation is performed as described above if ctr = 1. If + * ctl = 0, the value a[] is unmodified, but all memory accesses are + * still performed, and the carry is computed and returned. + */ +static uint32_t +zint_sub(uint32_t *a, const uint32_t *b, size_t len, + uint32_t ctl) { + size_t u; + uint32_t cc, m; + + cc = 0; + m = -ctl; + for (u = 0; u < len; u ++) { + uint32_t aw, w; + + aw = a[u]; + w = aw - b[u] - cc; + cc = w >> 31; + aw ^= ((w & 0x7FFFFFFF) ^ aw) & m; + a[u] = aw; + } + return cc; +} + +/* + * Mutiply the provided big integer m with a small value x. + * This function assumes that x < 2^31. The carry word is returned. + */ +static uint32_t +zint_mul_small(uint32_t *m, size_t mlen, uint32_t x) { + size_t u; + uint32_t cc; + + cc = 0; + for (u = 0; u < mlen; u ++) { + uint64_t z; + + z = (uint64_t)m[u] * (uint64_t)x + cc; + m[u] = (uint32_t)z & 0x7FFFFFFF; + cc = (uint32_t)(z >> 31); + } + return cc; +} + +/* + * Reduce a big integer d modulo a small integer p. + * Rules: + * d is unsigned + * p is prime + * 2^30 < p < 2^31 + * p0i = -(1/p) mod 2^31 + * R2 = 2^62 mod p + */ +static uint32_t +zint_mod_small_unsigned(const uint32_t *d, size_t dlen, + uint32_t p, uint32_t p0i, uint32_t R2) { + uint32_t x; + size_t u; + + /* + * Algorithm: we inject words one by one, starting with the high + * word. Each step is: + * - multiply x by 2^31 + * - add new word + */ + x = 0; + u = dlen; + while (u -- > 0) { + uint32_t w; + + x = modp_montymul(x, R2, p, p0i); + w = d[u] - p; + w += p & -(w >> 31); + x = modp_add(x, w, p); + } + return x; +} + +/* + * Similar to zint_mod_small_unsigned(), except that d may be signed. + * Extra parameter is Rx = 2^(31*dlen) mod p. + */ +static uint32_t +zint_mod_small_signed(const uint32_t *d, size_t dlen, + uint32_t p, uint32_t p0i, uint32_t R2, uint32_t Rx) { + uint32_t z; + + if (dlen == 0) { + return 0; + } + z = zint_mod_small_unsigned(d, dlen, p, p0i, R2); + z = modp_sub(z, Rx & -(d[dlen - 1] >> 30), p); + return z; +} + +/* + * Add y*s to x. x and y initially have length 'len' words; the new x + * has length 'len+1' words. 's' must fit on 31 bits. x[] and y[] must + * not overlap. + */ +static void +zint_add_mul_small(uint32_t *x, + const uint32_t *y, size_t len, uint32_t s) { + size_t u; + uint32_t cc; + + cc = 0; + for (u = 0; u < len; u ++) { + uint32_t xw, yw; + uint64_t z; + + xw = x[u]; + yw = y[u]; + z = (uint64_t)yw * (uint64_t)s + (uint64_t)xw + (uint64_t)cc; + x[u] = (uint32_t)z & 0x7FFFFFFF; + cc = (uint32_t)(z >> 31); + } + x[len] = cc; +} + +/* + * Normalize a modular integer around 0: if x > p/2, then x is replaced + * with x - p (signed encoding with two's complement); otherwise, x is + * untouched. The two integers x and p are encoded over the same length. + */ +static void +zint_norm_zero(uint32_t *x, const uint32_t *p, size_t len) { + size_t u; + uint32_t r, bb; + + /* + * Compare x with p/2. We use the shifted version of p, and p + * is odd, so we really compare with (p-1)/2; we want to perform + * the subtraction if and only if x > (p-1)/2. + */ + r = 0; + bb = 0; + u = len; + while (u -- > 0) { + uint32_t wx, wp, cc; + + /* + * Get the two words to compare in wx and wp (both over + * 31 bits exactly). + */ + wx = x[u]; + wp = (p[u] >> 1) | (bb << 30); + bb = p[u] & 1; + + /* + * We set cc to -1, 0 or 1, depending on whether wp is + * lower than, equal to, or greater than wx. + */ + cc = wp - wx; + cc = ((-cc) >> 31) | -(cc >> 31); + + /* + * If r != 0 then it is either 1 or -1, and we keep its + * value. Otherwise, if r = 0, then we replace it with cc. + */ + r |= cc & ((r & 1) - 1); + } + + /* + * At this point, r = -1, 0 or 1, depending on whether (p-1)/2 + * is lower than, equal to, or greater than x. We thus want to + * do the subtraction only if r = -1. + */ + zint_sub(x, p, len, r >> 31); +} + +/* + * Rebuild integers from their RNS representation. There are 'num' + * integers, and each consists in 'xlen' words. 'xx' points at that + * first word of the first integer; subsequent integers are accessed + * by adding 'xstride' repeatedly. + * + * The words of an integer are the RNS representation of that integer, + * using the provided 'primes' are moduli. This function replaces + * each integer with its multi-word value (little-endian order). + * + * If "normalize_signed" is non-zero, then the returned value is + * normalized to the -m/2..m/2 interval (where m is the product of all + * small prime moduli); two's complement is used for negative values. + */ +static void +zint_rebuild_CRT(uint32_t *xx, size_t xlen, size_t xstride, + size_t num, const small_prime *primes, int normalize_signed, + uint32_t *tmp) { + size_t u; + uint32_t *x; + + tmp[0] = primes[0].p; + for (u = 1; u < xlen; u ++) { + /* + * At the entry of each loop iteration: + * - the first u words of each array have been + * reassembled; + * - the first u words of tmp[] contains the + * product of the prime moduli processed so far. + * + * We call 'q' the product of all previous primes. + */ + uint32_t p, p0i, s, R2; + size_t v; + + p = primes[u].p; + s = primes[u].s; + p0i = modp_ninv31(p); + R2 = modp_R2(p, p0i); + + for (v = 0, x = xx; v < num; v ++, x += xstride) { + uint32_t xp, xq, xr; + /* + * xp = the integer x modulo the prime p for this + * iteration + * xq = (x mod q) mod p + */ + xp = x[u]; + xq = zint_mod_small_unsigned(x, u, p, p0i, R2); + + /* + * New value is (x mod q) + q * (s * (xp - xq) mod p) + */ + xr = modp_montymul(s, modp_sub(xp, xq, p), p, p0i); + zint_add_mul_small(x, tmp, u, xr); + } + + /* + * Update product of primes in tmp[]. + */ + tmp[u] = zint_mul_small(tmp, u, p); + } + + /* + * Normalize the reconstructed values around 0. + */ + if (normalize_signed) { + for (u = 0, x = xx; u < num; u ++, x += xstride) { + zint_norm_zero(x, tmp, xlen); + } + } +} + +/* + * Negate a big integer conditionally: value a is replaced with -a if + * and only if ctl = 1. Control value ctl must be 0 or 1. + */ +static void +zint_negate(uint32_t *a, size_t len, uint32_t ctl) { + size_t u; + uint32_t cc, m; + + /* + * If ctl = 1 then we flip the bits of a by XORing with + * 0x7FFFFFFF, and we add 1 to the value. If ctl = 0 then we XOR + * with 0 and add 0, which leaves the value unchanged. + */ + cc = ctl; + m = -ctl >> 1; + for (u = 0; u < len; u ++) { + uint32_t aw; + + aw = a[u]; + aw = (aw ^ m) + cc; + a[u] = aw & 0x7FFFFFFF; + cc = aw >> 31; + } +} + +/* + * Replace a with (a*xa+b*xb)/(2^31) and b with (a*ya+b*yb)/(2^31). + * The low bits are dropped (the caller should compute the coefficients + * such that these dropped bits are all zeros). If either or both + * yields a negative value, then the value is negated. + * + * Returned value is: + * 0 both values were positive + * 1 new a had to be negated + * 2 new b had to be negated + * 3 both new a and new b had to be negated + * + * Coefficients xa, xb, ya and yb may use the full signed 32-bit range. + */ +static uint32_t +zint_co_reduce(uint32_t *a, uint32_t *b, size_t len, + int64_t xa, int64_t xb, int64_t ya, int64_t yb) { + size_t u; + int64_t cca, ccb; + uint32_t nega, negb; + + cca = 0; + ccb = 0; + for (u = 0; u < len; u ++) { + uint32_t wa, wb; + uint64_t za, zb; + + wa = a[u]; + wb = b[u]; + za = wa * (uint64_t)xa + wb * (uint64_t)xb + (uint64_t)cca; + zb = wa * (uint64_t)ya + wb * (uint64_t)yb + (uint64_t)ccb; + if (u > 0) { + a[u - 1] = (uint32_t)za & 0x7FFFFFFF; + b[u - 1] = (uint32_t)zb & 0x7FFFFFFF; + } + cca = *(int64_t *)&za >> 31; + ccb = *(int64_t *)&zb >> 31; + } + a[len - 1] = (uint32_t)cca; + b[len - 1] = (uint32_t)ccb; + + nega = (uint32_t)((uint64_t)cca >> 63); + negb = (uint32_t)((uint64_t)ccb >> 63); + zint_negate(a, len, nega); + zint_negate(b, len, negb); + return nega | (negb << 1); +} + +/* + * Finish modular reduction. Rules on input parameters: + * + * if neg = 1, then -m <= a < 0 + * if neg = 0, then 0 <= a < 2*m + * + * If neg = 0, then the top word of a[] is allowed to use 32 bits. + * + * Modulus m must be odd. + */ +static void +zint_finish_mod(uint32_t *a, size_t len, const uint32_t *m, uint32_t neg) { + size_t u; + uint32_t cc, xm, ym; + + /* + * First pass: compare a (assumed nonnegative) with m. Note that + * if the top word uses 32 bits, subtracting m must yield a + * value less than 2^31 since a < 2*m. + */ + cc = 0; + for (u = 0; u < len; u ++) { + cc = (a[u] - m[u] - cc) >> 31; + } + + /* + * If neg = 1 then we must add m (regardless of cc) + * If neg = 0 and cc = 0 then we must subtract m + * If neg = 0 and cc = 1 then we must do nothing + * + * In the loop below, we conditionally subtract either m or -m + * from a. Word xm is a word of m (if neg = 0) or -m (if neg = 1); + * but if neg = 0 and cc = 1, then ym = 0 and it forces mw to 0. + */ + xm = -neg >> 1; + ym = -(neg | (1 - cc)); + cc = neg; + for (u = 0; u < len; u ++) { + uint32_t aw, mw; + + aw = a[u]; + mw = (m[u] ^ xm) & ym; + aw = aw - mw - cc; + a[u] = aw & 0x7FFFFFFF; + cc = aw >> 31; + } +} + +/* + * Replace a with (a*xa+b*xb)/(2^31) mod m, and b with + * (a*ya+b*yb)/(2^31) mod m. Modulus m must be odd; m0i = -1/m[0] mod 2^31. + */ +static void +zint_co_reduce_mod(uint32_t *a, uint32_t *b, const uint32_t *m, size_t len, + uint32_t m0i, int64_t xa, int64_t xb, int64_t ya, int64_t yb) { + size_t u; + int64_t cca, ccb; + uint32_t fa, fb; + + /* + * These are actually four combined Montgomery multiplications. + */ + cca = 0; + ccb = 0; + fa = ((a[0] * (uint32_t)xa + b[0] * (uint32_t)xb) * m0i) & 0x7FFFFFFF; + fb = ((a[0] * (uint32_t)ya + b[0] * (uint32_t)yb) * m0i) & 0x7FFFFFFF; + for (u = 0; u < len; u ++) { + uint32_t wa, wb; + uint64_t za, zb; + + wa = a[u]; + wb = b[u]; + za = wa * (uint64_t)xa + wb * (uint64_t)xb + + m[u] * (uint64_t)fa + (uint64_t)cca; + zb = wa * (uint64_t)ya + wb * (uint64_t)yb + + m[u] * (uint64_t)fb + (uint64_t)ccb; + if (u > 0) { + a[u - 1] = (uint32_t)za & 0x7FFFFFFF; + b[u - 1] = (uint32_t)zb & 0x7FFFFFFF; + } + cca = *(int64_t *)&za >> 31; + ccb = *(int64_t *)&zb >> 31; + } + a[len - 1] = (uint32_t)cca; + b[len - 1] = (uint32_t)ccb; + + /* + * At this point: + * -m <= a < 2*m + * -m <= b < 2*m + * (this is a case of Montgomery reduction) + * The top words of 'a' and 'b' may have a 32-th bit set. + * We want to add or subtract the modulus, as required. + */ + zint_finish_mod(a, len, m, (uint32_t)((uint64_t)cca >> 63)); + zint_finish_mod(b, len, m, (uint32_t)((uint64_t)ccb >> 63)); +} + +/* + * Compute a GCD between two positive big integers x and y. The two + * integers must be odd. Returned value is 1 if the GCD is 1, 0 + * otherwise. When 1 is returned, arrays u and v are filled with values + * such that: + * 0 <= u <= y + * 0 <= v <= x + * x*u - y*v = 1 + * x[] and y[] are unmodified. Both input values must have the same + * encoded length. Temporary array must be large enough to accommodate 4 + * extra values of that length. Arrays u, v and tmp may not overlap with + * each other, or with either x or y. + */ +static int +zint_bezout(uint32_t *u, uint32_t *v, + const uint32_t *x, const uint32_t *y, + size_t len, uint32_t *tmp) { + /* + * Algorithm is an extended binary GCD. We maintain 6 values + * a, b, u0, u1, v0 and v1 with the following invariants: + * + * a = x*u0 - y*v0 + * b = x*u1 - y*v1 + * 0 <= a <= x + * 0 <= b <= y + * 0 <= u0 < y + * 0 <= v0 < x + * 0 <= u1 <= y + * 0 <= v1 < x + * + * Initial values are: + * + * a = x u0 = 1 v0 = 0 + * b = y u1 = y v1 = x-1 + * + * Each iteration reduces either a or b, and maintains the + * invariants. Algorithm stops when a = b, at which point their + * common value is GCD(a,b) and (u0,v0) (or (u1,v1)) contains + * the values (u,v) we want to return. + * + * The formal definition of the algorithm is a sequence of steps: + * + * - If a is even, then: + * a <- a/2 + * u0 <- u0/2 mod y + * v0 <- v0/2 mod x + * + * - Otherwise, if b is even, then: + * b <- b/2 + * u1 <- u1/2 mod y + * v1 <- v1/2 mod x + * + * - Otherwise, if a > b, then: + * a <- (a-b)/2 + * u0 <- (u0-u1)/2 mod y + * v0 <- (v0-v1)/2 mod x + * + * - Otherwise: + * b <- (b-a)/2 + * u1 <- (u1-u0)/2 mod y + * v1 <- (v1-v0)/2 mod y + * + * We can show that the operations above preserve the invariants: + * + * - If a is even, then u0 and v0 are either both even or both + * odd (since a = x*u0 - y*v0, and x and y are both odd). + * If u0 and v0 are both even, then (u0,v0) <- (u0/2,v0/2). + * Otherwise, (u0,v0) <- ((u0+y)/2,(v0+x)/2). Either way, + * the a = x*u0 - y*v0 invariant is preserved. + * + * - The same holds for the case where b is even. + * + * - If a and b are odd, and a > b, then: + * + * a-b = x*(u0-u1) - y*(v0-v1) + * + * In that situation, if u0 < u1, then x*(u0-u1) < 0, but + * a-b > 0; therefore, it must be that v0 < v1, and the + * first part of the update is: (u0,v0) <- (u0-u1+y,v0-v1+x), + * which preserves the invariants. Otherwise, if u0 > u1, + * then u0-u1 >= 1, thus x*(u0-u1) >= x. But a <= x and + * b >= 0, hence a-b <= x. It follows that, in that case, + * v0-v1 >= 0. The first part of the update is then: + * (u0,v0) <- (u0-u1,v0-v1), which again preserves the + * invariants. + * + * Either way, once the subtraction is done, the new value of + * a, which is the difference of two odd values, is even, + * and the remaining of this step is a subcase of the + * first algorithm case (i.e. when a is even). + * + * - If a and b are odd, and b > a, then the a similar + * argument holds. + * + * The values a and b start at x and y, respectively. Since x + * and y are odd, their GCD is odd, and it is easily seen that + * all steps conserve the GCD (GCD(a-b,b) = GCD(a, b); + * GCD(a/2,b) = GCD(a,b) if GCD(a,b) is odd). Moreover, either a + * or b is reduced by at least one bit at each iteration, so + * the algorithm necessarily converges on the case a = b, at + * which point the common value is the GCD. + * + * In the algorithm expressed above, when a = b, the fourth case + * applies, and sets b = 0. Since a contains the GCD of x and y, + * which are both odd, a must be odd, and subsequent iterations + * (if any) will simply divide b by 2 repeatedly, which has no + * consequence. Thus, the algorithm can run for more iterations + * than necessary; the final GCD will be in a, and the (u,v) + * coefficients will be (u0,v0). + * + * + * The presentation above is bit-by-bit. It can be sped up by + * noticing that all decisions are taken based on the low bits + * and high bits of a and b. We can extract the two top words + * and low word of each of a and b, and compute reduction + * parameters pa, pb, qa and qb such that the new values for + * a and b are: + * a' = (a*pa + b*pb) / (2^31) + * b' = (a*qa + b*qb) / (2^31) + * the two divisions being exact. The coefficients are obtained + * just from the extracted words, and may be slightly off, requiring + * an optional correction: if a' < 0, then we replace pa with -pa + * and pb with -pb. Each such step will reduce the total length + * (sum of lengths of a and b) by at least 30 bits at each + * iteration. + */ + uint32_t *u0, *u1, *v0, *v1, *a, *b; + uint32_t x0i, y0i; + uint32_t num, rc; + size_t j; + + if (len == 0) { + return 0; + } + + /* + * u0 and v0 are the u and v result buffers; the four other + * values (u1, v1, a and b) are taken from tmp[]. + */ + u0 = u; + v0 = v; + u1 = tmp; + v1 = u1 + len; + a = v1 + len; + b = a + len; + + /* + * We'll need the Montgomery reduction coefficients. + */ + x0i = modp_ninv31(x[0]); + y0i = modp_ninv31(y[0]); + + /* + * Initialize a, b, u0, u1, v0 and v1. + * a = x u0 = 1 v0 = 0 + * b = y u1 = y v1 = x-1 + * Note that x is odd, so computing x-1 is easy. + */ + memcpy(a, x, len * sizeof * x); + memcpy(b, y, len * sizeof * y); + u0[0] = 1; + memset(u0 + 1, 0, (len - 1) * sizeof * u0); + memset(v0, 0, len * sizeof * v0); + memcpy(u1, y, len * sizeof * u1); + memcpy(v1, x, len * sizeof * v1); + v1[0] --; + + /* + * Each input operand may be as large as 31*len bits, and we + * reduce the total length by at least 30 bits at each iteration. + */ + for (num = 62 * (uint32_t)len + 30; num >= 30; num -= 30) { + uint32_t c0, c1; + uint32_t a0, a1, b0, b1; + uint64_t a_hi, b_hi; + uint32_t a_lo, b_lo; + int64_t pa, pb, qa, qb; + int i; + uint32_t r; + + /* + * Extract the top words of a and b. If j is the highest + * index >= 1 such that a[j] != 0 or b[j] != 0, then we + * want (a[j] << 31) + a[j-1] and (b[j] << 31) + b[j-1]. + * If a and b are down to one word each, then we use + * a[0] and b[0]. + */ + c0 = (uint32_t) -1; + c1 = (uint32_t) -1; + a0 = 0; + a1 = 0; + b0 = 0; + b1 = 0; + j = len; + while (j -- > 0) { + uint32_t aw, bw; + + aw = a[j]; + bw = b[j]; + a0 ^= (a0 ^ aw) & c0; + a1 ^= (a1 ^ aw) & c1; + b0 ^= (b0 ^ bw) & c0; + b1 ^= (b1 ^ bw) & c1; + c1 = c0; + c0 &= (((aw | bw) + 0x7FFFFFFF) >> 31) - (uint32_t)1; + } + + /* + * If c1 = 0, then we grabbed two words for a and b. + * If c1 != 0 but c0 = 0, then we grabbed one word. It + * is not possible that c1 != 0 and c0 != 0, because that + * would mean that both integers are zero. + */ + a1 |= a0 & c1; + a0 &= ~c1; + b1 |= b0 & c1; + b0 &= ~c1; + a_hi = ((uint64_t)a0 << 31) + a1; + b_hi = ((uint64_t)b0 << 31) + b1; + a_lo = a[0]; + b_lo = b[0]; + + /* + * Compute reduction factors: + * + * a' = a*pa + b*pb + * b' = a*qa + b*qb + * + * such that a' and b' are both multiple of 2^31, but are + * only marginally larger than a and b. + */ + pa = 1; + pb = 0; + qa = 0; + qb = 1; + for (i = 0; i < 31; i ++) { + /* + * At each iteration: + * + * a <- (a-b)/2 if: a is odd, b is odd, a_hi > b_hi + * b <- (b-a)/2 if: a is odd, b is odd, a_hi <= b_hi + * a <- a/2 if: a is even + * b <- b/2 if: a is odd, b is even + * + * We multiply a_lo and b_lo by 2 at each + * iteration, thus a division by 2 really is a + * non-multiplication by 2. + */ + uint32_t rt, oa, ob, cAB, cBA, cA; + uint64_t rz; + + /* + * rt = 1 if a_hi > b_hi, 0 otherwise. + */ + rz = b_hi - a_hi; + rt = (uint32_t)((rz ^ ((a_hi ^ b_hi) + & (a_hi ^ rz))) >> 63); + + /* + * cAB = 1 if b must be subtracted from a + * cBA = 1 if a must be subtracted from b + * cA = 1 if a must be divided by 2 + * + * Rules: + * + * cAB and cBA cannot both be 1. + * If a is not divided by 2, b is. + */ + oa = (a_lo >> i) & 1; + ob = (b_lo >> i) & 1; + cAB = oa & ob & rt; + cBA = oa & ob & ~rt; + cA = cAB | (oa ^ 1); + + /* + * Conditional subtractions. + */ + a_lo -= b_lo & -cAB; + a_hi -= b_hi & -(uint64_t)cAB; + pa -= qa & -(int64_t)cAB; + pb -= qb & -(int64_t)cAB; + b_lo -= a_lo & -cBA; + b_hi -= a_hi & -(uint64_t)cBA; + qa -= pa & -(int64_t)cBA; + qb -= pb & -(int64_t)cBA; + + /* + * Shifting. + */ + a_lo += a_lo & (cA - 1); + pa += pa & ((int64_t)cA - 1); + pb += pb & ((int64_t)cA - 1); + a_hi ^= (a_hi ^ (a_hi >> 1)) & -(uint64_t)cA; + b_lo += b_lo & -cA; + qa += qa & -(int64_t)cA; + qb += qb & -(int64_t)cA; + b_hi ^= (b_hi ^ (b_hi >> 1)) & ((uint64_t)cA - 1); + } + + /* + * Apply the computed parameters to our values. We + * may have to correct pa and pb depending on the + * returned value of zint_co_reduce() (when a and/or b + * had to be negated). + */ + r = zint_co_reduce(a, b, len, pa, pb, qa, qb); + pa -= (pa + pa) & -(int64_t)(r & 1); + pb -= (pb + pb) & -(int64_t)(r & 1); + qa -= (qa + qa) & -(int64_t)(r >> 1); + qb -= (qb + qb) & -(int64_t)(r >> 1); + zint_co_reduce_mod(u0, u1, y, len, y0i, pa, pb, qa, qb); + zint_co_reduce_mod(v0, v1, x, len, x0i, pa, pb, qa, qb); + } + + /* + * At that point, array a[] should contain the GCD, and the + * results (u,v) should already be set. We check that the GCD + * is indeed 1. We also check that the two operands x and y + * are odd. + */ + rc = a[0] ^ 1; + for (j = 1; j < len; j ++) { + rc |= a[j]; + } + return (int)((1 - ((rc | -rc) >> 31)) & x[0] & y[0]); +} + +/* + * Add k*y*2^sc to x. The result is assumed to fit in the array of + * size xlen (truncation is applied if necessary). + * Scale factor 'sc' is provided as sch and scl, such that: + * sch = sc / 31 + * scl = sc % 31 + * xlen MUST NOT be lower than ylen. + * + * x[] and y[] are both signed integers, using two's complement for + * negative values. + */ +static void +zint_add_scaled_mul_small(uint32_t *x, size_t xlen, + const uint32_t *y, size_t ylen, int32_t k, + uint32_t sch, uint32_t scl) { + size_t u; + uint32_t ysign, tw; + int32_t cc; + + if (ylen == 0) { + return; + } + + ysign = -(y[ylen - 1] >> 30) >> 1; + tw = 0; + cc = 0; + for (u = sch; u < xlen; u ++) { + size_t v; + uint32_t wy, wys, ccu; + uint64_t z; + + /* + * Get the next word of y (scaled). + */ + v = u - sch; + if (v < ylen) { + wy = y[v]; + } else { + wy = ysign; + } + wys = ((wy << scl) & 0x7FFFFFFF) | tw; + tw = wy >> (31 - scl); + + /* + * The expression below does not overflow. + */ + z = (uint64_t)((int64_t)wys * (int64_t)k + (int64_t)x[u] + cc); + x[u] = (uint32_t)z & 0x7FFFFFFF; + + /* + * Right-shifting the signed value z would yield + * implementation-defined results (arithmetic shift is + * not guaranteed). However, we can cast to unsigned, + * and get the next carry as an unsigned word. We can + * then convert it back to signed by using the guaranteed + * fact that 'int32_t' uses two's complement with no + * trap representation or padding bit, and with a layout + * compatible with that of 'uint32_t'. + */ + ccu = (uint32_t)(z >> 31); + cc = *(int32_t *)&ccu; + } +} + +/* + * Subtract y*2^sc from x. The result is assumed to fit in the array of + * size xlen (truncation is applied if necessary). + * Scale factor 'sc' is provided as sch and scl, such that: + * sch = sc / 31 + * scl = sc % 31 + * xlen MUST NOT be lower than ylen. + * + * x[] and y[] are both signed integers, using two's complement for + * negative values. + */ +static void +zint_sub_scaled(uint32_t *x, size_t xlen, + const uint32_t *y, size_t ylen, uint32_t sch, uint32_t scl) { + size_t u; + uint32_t ysign, tw; + uint32_t cc; + + if (ylen == 0) { + return; + } + + ysign = -(y[ylen - 1] >> 30) >> 1; + tw = 0; + cc = 0; + for (u = sch; u < xlen; u ++) { + size_t v; + uint32_t w, wy, wys; + + /* + * Get the next word of y (scaled). + */ + v = u - sch; + if (v < ylen) { + wy = y[v]; + } else { + wy = ysign; + } + wys = ((wy << scl) & 0x7FFFFFFF) | tw; + tw = wy >> (31 - scl); + + w = x[u] - wys - cc; + x[u] = w & 0x7FFFFFFF; + cc = w >> 31; + } +} + +/* + * Convert a one-word signed big integer into a signed value. + */ +static inline int32_t +zint_one_to_plain(const uint32_t *x) { + uint32_t w; + + w = x[0]; + w |= (w & 0x40000000) << 1; + return *(int32_t *)&w; +} + +/* ==================================================================== */ + +/* + * Convert a polynomial to floating-point values. + * + * Each coefficient has length flen words, and starts fstride words after + * the previous. + * + * IEEE-754 binary64 values can represent values in a finite range, + * roughly 2^(-1023) to 2^(+1023); thus, if coefficients are too large, + * they should be "trimmed" by pointing not to the lowest word of each, + * but upper. + */ +static void +poly_big_to_fp(fpr *d, const uint32_t *f, size_t flen, size_t fstride, + unsigned logn) { + size_t n, u; + + n = MKN(logn); + if (flen == 0) { + for (u = 0; u < n; u ++) { + d[u] = fpr_zero; + } + return; + } + for (u = 0; u < n; u ++, f += fstride) { + size_t v; + uint32_t neg, cc, xm; + fpr x, fsc; + + /* + * Get sign of the integer; if it is negative, then we + * will load its absolute value instead, and negate the + * result. + */ + neg = -(f[flen - 1] >> 30); + xm = neg >> 1; + cc = neg & 1; + x = fpr_zero; + fsc = fpr_one; + for (v = 0; v < flen; v ++, fsc = fpr_mul(fsc, fpr_ptwo31)) { + uint32_t w; + + w = (f[v] ^ xm) + cc; + cc = w >> 31; + w &= 0x7FFFFFFF; + w -= (w << 1) & neg; + x = fpr_add(x, fpr_mul(fpr_of(*(int32_t *)&w), fsc)); + } + d[u] = x; + } +} + +/* + * Convert a polynomial to small integers. Source values are supposed + * to be one-word integers, signed over 31 bits. Returned value is 0 + * if any of the coefficients exceeds the provided limit (in absolute + * value), or 1 on success. + * + * This is not constant-time; this is not a problem here, because on + * any failure, the NTRU-solving process will be deemed to have failed + * and the (f,g) polynomials will be discarded. + */ +static int +poly_big_to_small(int8_t *d, const uint32_t *s, int lim, unsigned logn) { + size_t n, u; + + n = MKN(logn); + for (u = 0; u < n; u ++) { + int32_t z; + + z = zint_one_to_plain(s + u); + if (z < -lim || z > lim) { + return 0; + } + d[u] = (int8_t)z; + } + return 1; +} + +/* + * Subtract k*f from F, where F, f and k are polynomials modulo X^N+1. + * Coefficients of polynomial k are small integers (signed values in the + * -2^31..2^31 range) scaled by 2^sc. Value sc is provided as sch = sc / 31 + * and scl = sc % 31. + * + * This function implements the basic quadratic multiplication algorithm, + * which is efficient in space (no extra buffer needed) but slow at + * high degree. + */ +static void +poly_sub_scaled(uint32_t *F, size_t Flen, size_t Fstride, + const uint32_t *f, size_t flen, size_t fstride, + const int32_t *k, uint32_t sch, uint32_t scl, unsigned logn) { + size_t n, u; + + n = MKN(logn); + for (u = 0; u < n; u ++) { + int32_t kf; + size_t v; + uint32_t *x; + const uint32_t *y; + + kf = -k[u]; + x = F + u * Fstride; + y = f; + for (v = 0; v < n; v ++) { + zint_add_scaled_mul_small( + x, Flen, y, flen, kf, sch, scl); + if (u + v == n - 1) { + x = F; + kf = -kf; + } else { + x += Fstride; + } + y += fstride; + } + } +} + +/* + * Subtract k*f from F. Coefficients of polynomial k are small integers + * (signed values in the -2^31..2^31 range) scaled by 2^sc. This function + * assumes that the degree is large, and integers relatively small. + * The value sc is provided as sch = sc / 31 and scl = sc % 31. + */ +static void +poly_sub_scaled_ntt(uint32_t *F, size_t Flen, size_t Fstride, + const uint32_t *f, size_t flen, size_t fstride, + const int32_t *k, uint32_t sch, uint32_t scl, unsigned logn, + uint32_t *tmp) { + uint32_t *gm, *igm, *fk, *t1, *x; + const uint32_t *y; + size_t n, u, tlen; + const small_prime *primes; + + n = MKN(logn); + tlen = flen + 1; + gm = tmp; + igm = gm + MKN(logn); + fk = igm + MKN(logn); + t1 = fk + n * tlen; + + primes = PRIMES; + + /* + * Compute k*f in fk[], in RNS notation. + */ + for (u = 0; u < tlen; u ++) { + uint32_t p, p0i, R2, Rx; + size_t v; + + p = primes[u].p; + p0i = modp_ninv31(p); + R2 = modp_R2(p, p0i); + Rx = modp_Rx((unsigned)flen, p, p0i, R2); + modp_mkgm2(gm, igm, logn, primes[u].g, p, p0i); + + for (v = 0; v < n; v ++) { + t1[v] = modp_set(k[v], p); + } + modp_NTT2(t1, gm, logn, p, p0i); + for (v = 0, y = f, x = fk + u; + v < n; v ++, y += fstride, x += tlen) { + *x = zint_mod_small_signed(y, flen, p, p0i, R2, Rx); + } + modp_NTT2_ext(fk + u, tlen, gm, logn, p, p0i); + for (v = 0, x = fk + u; v < n; v ++, x += tlen) { + *x = modp_montymul( + modp_montymul(t1[v], *x, p, p0i), R2, p, p0i); + } + modp_iNTT2_ext(fk + u, tlen, igm, logn, p, p0i); + } + + /* + * Rebuild k*f. + */ + zint_rebuild_CRT(fk, tlen, tlen, n, primes, 1, t1); + + /* + * Subtract k*f, scaled, from F. + */ + for (u = 0, x = F, y = fk; u < n; u ++, x += Fstride, y += tlen) { + zint_sub_scaled(x, Flen, y, tlen, sch, scl); + } +} + +/* ==================================================================== */ + + +#define RNG_CONTEXT inner_shake256_context + +/* + * Get a random 8-byte integer from a SHAKE-based RNG. This function + * ensures consistent interpretation of the SHAKE output so that + * the same values will be obtained over different platforms, in case + * a known seed is used. + */ +static inline uint64_t +get_rng_u64(inner_shake256_context *rng) { + /* + * We enforce little-endian representation. + */ + + uint8_t tmp[8]; + + inner_shake256_extract(rng, tmp, sizeof tmp); + return (uint64_t)tmp[0] + | ((uint64_t)tmp[1] << 8) + | ((uint64_t)tmp[2] << 16) + | ((uint64_t)tmp[3] << 24) + | ((uint64_t)tmp[4] << 32) + | ((uint64_t)tmp[5] << 40) + | ((uint64_t)tmp[6] << 48) + | ((uint64_t)tmp[7] << 56); +} + +/* + * Table below incarnates a discrete Gaussian distribution: + * D(x) = exp(-(x^2)/(2*sigma^2)) + * where sigma = 1.17*sqrt(q/(2*N)), q = 12289, and N = 1024. + * Element 0 of the table is P(x = 0). + * For k > 0, element k is P(x >= k+1 | x > 0). + * Probabilities are scaled up by 2^63. + */ +static const uint64_t gauss_1024_12289[] = { + 1283868770400643928u, 6416574995475331444u, 4078260278032692663u, + 2353523259288686585u, 1227179971273316331u, 575931623374121527u, + 242543240509105209u, 91437049221049666u, 30799446349977173u, + 9255276791179340u, 2478152334826140u, 590642893610164u, + 125206034929641u, 23590435911403u, 3948334035941u, + 586753615614u, 77391054539u, 9056793210u, + 940121950u, 86539696u, 7062824u, + 510971u, 32764u, 1862u, + 94u, 4u, 0u +}; + +/* + * Generate a random value with a Gaussian distribution centered on 0. + * The RNG must be ready for extraction (already flipped). + * + * Distribution has standard deviation 1.17*sqrt(q/(2*N)). The + * precomputed table is for N = 1024. Since the sum of two independent + * values of standard deviation sigma has standard deviation + * sigma*sqrt(2), then we can just generate more values and add them + * together for lower dimensions. + */ +static int +mkgauss(RNG_CONTEXT *rng, unsigned logn) { + unsigned u, g; + int val; + + g = 1U << (10 - logn); + val = 0; + for (u = 0; u < g; u ++) { + /* + * Each iteration generates one value with the + * Gaussian distribution for N = 1024. + * + * We use two random 64-bit values. First value + * decides on whether the generated value is 0, and, + * if not, the sign of the value. Second random 64-bit + * word is used to generate the non-zero value. + * + * For constant-time code we have to read the complete + * table. This has negligible cost, compared with the + * remainder of the keygen process (solving the NTRU + * equation). + */ + uint64_t r; + uint32_t f, v, k, neg; + + /* + * First value: + * - flag 'neg' is randomly selected to be 0 or 1. + * - flag 'f' is set to 1 if the generated value is zero, + * or set to 0 otherwise. + */ + r = get_rng_u64(rng); + neg = (uint32_t)(r >> 63); + r &= ~((uint64_t)1 << 63); + f = (uint32_t)((r - gauss_1024_12289[0]) >> 63); + + /* + * We produce a new random 63-bit integer r, and go over + * the array, starting at index 1. We store in v the + * index of the first array element which is not greater + * than r, unless the flag f was already 1. + */ + v = 0; + r = get_rng_u64(rng); + r &= ~((uint64_t)1 << 63); + for (k = 1; k < (uint32_t)((sizeof gauss_1024_12289) + / (sizeof gauss_1024_12289[0])); k ++) { + uint32_t t; + + t = (uint32_t)((r - gauss_1024_12289[k]) >> 63) ^ 1; + v |= k & -(t & (f ^ 1)); + f |= t; + } + + /* + * We apply the sign ('neg' flag). If the value is zero, + * the sign has no effect. + */ + v = (v ^ -neg) + neg; + + /* + * Generated value is added to val. + */ + val += *(int32_t *)&v; + } + return val; +} + +/* + * The MAX_BL_SMALL[] and MAX_BL_LARGE[] contain the lengths, in 31-bit + * words, of intermediate values in the computation: + * + * MAX_BL_SMALL[depth]: length for the input f and g at that depth + * MAX_BL_LARGE[depth]: length for the unreduced F and G at that depth + * + * Rules: + * + * - Within an array, values grow. + * + * - The 'SMALL' array must have an entry for maximum depth, corresponding + * to the size of values used in the binary GCD. There is no such value + * for the 'LARGE' array (the binary GCD yields already reduced + * coefficients). + * + * - MAX_BL_LARGE[depth] >= MAX_BL_SMALL[depth + 1]. + * + * - Values must be large enough to handle the common cases, with some + * margins. + * + * - Values must not be "too large" either because we will convert some + * integers into floating-point values by considering the top 10 words, + * i.e. 310 bits; hence, for values of length more than 10 words, we + * should take care to have the length centered on the expected size. + * + * The following average lengths, in bits, have been measured on thousands + * of random keys (fg = max length of the absolute value of coefficients + * of f and g at that depth; FG = idem for the unreduced F and G; for the + * maximum depth, F and G are the output of binary GCD, multiplied by q; + * for each value, the average and standard deviation are provided). + * + * Binary case: + * depth: 10 fg: 6307.52 (24.48) FG: 6319.66 (24.51) + * depth: 9 fg: 3138.35 (12.25) FG: 9403.29 (27.55) + * depth: 8 fg: 1576.87 ( 7.49) FG: 4703.30 (14.77) + * depth: 7 fg: 794.17 ( 4.98) FG: 2361.84 ( 9.31) + * depth: 6 fg: 400.67 ( 3.10) FG: 1188.68 ( 6.04) + * depth: 5 fg: 202.22 ( 1.87) FG: 599.81 ( 3.87) + * depth: 4 fg: 101.62 ( 1.02) FG: 303.49 ( 2.38) + * depth: 3 fg: 50.37 ( 0.53) FG: 153.65 ( 1.39) + * depth: 2 fg: 24.07 ( 0.25) FG: 78.20 ( 0.73) + * depth: 1 fg: 10.99 ( 0.08) FG: 39.82 ( 0.41) + * depth: 0 fg: 4.00 ( 0.00) FG: 19.61 ( 0.49) + * + * Integers are actually represented either in binary notation over + * 31-bit words (signed, using two's complement), or in RNS, modulo + * many small primes. These small primes are close to, but slightly + * lower than, 2^31. Use of RNS loses less than two bits, even for + * the largest values. + * + * IMPORTANT: if these values are modified, then the temporary buffer + * sizes (FALCON_KEYGEN_TEMP_*, in inner.h) must be recomputed + * accordingly. + */ + +static const size_t MAX_BL_SMALL[] = { + 1, 1, 2, 2, 4, 7, 14, 27, 53, 106, 209 +}; + +static const size_t MAX_BL_LARGE[] = { + 2, 2, 5, 7, 12, 21, 40, 78, 157, 308 +}; + +/* + * Average and standard deviation for the maximum size (in bits) of + * coefficients of (f,g), depending on depth. These values are used + * to compute bounds for Babai's reduction. + */ +static const struct { + int avg; + int std; +} BITLENGTH[] = { + { 4, 0 }, + { 11, 1 }, + { 24, 1 }, + { 50, 1 }, + { 102, 1 }, + { 202, 2 }, + { 401, 4 }, + { 794, 5 }, + { 1577, 8 }, + { 3138, 13 }, + { 6308, 25 } +}; + +/* + * Minimal recursion depth at which we rebuild intermediate values + * when reconstructing f and g. + */ +#define DEPTH_INT_FG 4 + +/* + * Compute squared norm of a short vector. Returned value is saturated to + * 2^32-1 if it is not lower than 2^31. + */ +static uint32_t +poly_small_sqnorm(const int8_t *f, unsigned logn) { + size_t n, u; + uint32_t s, ng; + + n = MKN(logn); + s = 0; + ng = 0; + for (u = 0; u < n; u ++) { + int32_t z; + + z = f[u]; + s += (uint32_t)(z * z); + ng |= s; + } + return s | -(ng >> 31); +} + +/* + * Align (upwards) the provided 'data' pointer with regards to 'base' + * so that the offset is a multiple of the size of 'fpr'. + */ +static fpr * +align_fpr(void *base, void *data) { + uint8_t *cb, *cd; + size_t k, km; + + cb = base; + cd = data; + k = (size_t)(cd - cb); + km = k % sizeof(fpr); + if (km) { + k += (sizeof(fpr)) - km; + } + return (fpr *)(cb + k); +} + +/* + * Align (upwards) the provided 'data' pointer with regards to 'base' + * so that the offset is a multiple of the size of 'uint32_t'. + */ +static uint32_t * +align_u32(void *base, void *data) { + uint8_t *cb, *cd; + size_t k, km; + + cb = base; + cd = data; + k = (size_t)(cd - cb); + km = k % sizeof(uint32_t); + if (km) { + k += (sizeof(uint32_t)) - km; + } + return (uint32_t *)(cb + k); +} + +/* + * Convert a small vector to floating point. + */ +static void +poly_small_to_fp(fpr *x, const int8_t *f, unsigned logn) { + size_t n, u; + + n = MKN(logn); + for (u = 0; u < n; u ++) { + x[u] = fpr_of(f[u]); + } +} + +/* + * Input: f,g of degree N = 2^logn; 'depth' is used only to get their + * individual length. + * + * Output: f',g' of degree N/2, with the length for 'depth+1'. + * + * Values are in RNS; input and/or output may also be in NTT. + */ +static void +make_fg_step(uint32_t *data, unsigned logn, unsigned depth, + int in_ntt, int out_ntt) { + size_t n, hn, u; + size_t slen, tlen; + uint32_t *fd, *gd, *fs, *gs, *gm, *igm, *t1; + const small_prime *primes; + + n = (size_t)1 << logn; + hn = n >> 1; + slen = MAX_BL_SMALL[depth]; + tlen = MAX_BL_SMALL[depth + 1]; + primes = PRIMES; + + /* + * Prepare room for the result. + */ + fd = data; + gd = fd + hn * tlen; + fs = gd + hn * tlen; + gs = fs + n * slen; + gm = gs + n * slen; + igm = gm + n; + t1 = igm + n; + memmove(fs, data, 2 * n * slen * sizeof * data); + + /* + * First slen words: we use the input values directly, and apply + * inverse NTT as we go. + */ + for (u = 0; u < slen; u ++) { + uint32_t p, p0i, R2; + size_t v; + uint32_t *x; + + p = primes[u].p; + p0i = modp_ninv31(p); + R2 = modp_R2(p, p0i); + modp_mkgm2(gm, igm, logn, primes[u].g, p, p0i); + + for (v = 0, x = fs + u; v < n; v ++, x += slen) { + t1[v] = *x; + } + if (!in_ntt) { + modp_NTT2(t1, gm, logn, p, p0i); + } + for (v = 0, x = fd + u; v < hn; v ++, x += tlen) { + uint32_t w0, w1; + + w0 = t1[(v << 1) + 0]; + w1 = t1[(v << 1) + 1]; + *x = modp_montymul( + modp_montymul(w0, w1, p, p0i), R2, p, p0i); + } + if (in_ntt) { + modp_iNTT2_ext(fs + u, slen, igm, logn, p, p0i); + } + + for (v = 0, x = gs + u; v < n; v ++, x += slen) { + t1[v] = *x; + } + if (!in_ntt) { + modp_NTT2(t1, gm, logn, p, p0i); + } + for (v = 0, x = gd + u; v < hn; v ++, x += tlen) { + uint32_t w0, w1; + + w0 = t1[(v << 1) + 0]; + w1 = t1[(v << 1) + 1]; + *x = modp_montymul( + modp_montymul(w0, w1, p, p0i), R2, p, p0i); + } + if (in_ntt) { + modp_iNTT2_ext(gs + u, slen, igm, logn, p, p0i); + } + + if (!out_ntt) { + modp_iNTT2_ext(fd + u, tlen, igm, logn - 1, p, p0i); + modp_iNTT2_ext(gd + u, tlen, igm, logn - 1, p, p0i); + } + } + + /* + * Since the fs and gs words have been de-NTTized, we can use the + * CRT to rebuild the values. + */ + zint_rebuild_CRT(fs, slen, slen, n, primes, 1, gm); + zint_rebuild_CRT(gs, slen, slen, n, primes, 1, gm); + + /* + * Remaining words: use modular reductions to extract the values. + */ + for (u = slen; u < tlen; u ++) { + uint32_t p, p0i, R2, Rx; + size_t v; + uint32_t *x; + + p = primes[u].p; + p0i = modp_ninv31(p); + R2 = modp_R2(p, p0i); + Rx = modp_Rx((unsigned)slen, p, p0i, R2); + modp_mkgm2(gm, igm, logn, primes[u].g, p, p0i); + for (v = 0, x = fs; v < n; v ++, x += slen) { + t1[v] = zint_mod_small_signed(x, slen, p, p0i, R2, Rx); + } + modp_NTT2(t1, gm, logn, p, p0i); + for (v = 0, x = fd + u; v < hn; v ++, x += tlen) { + uint32_t w0, w1; + + w0 = t1[(v << 1) + 0]; + w1 = t1[(v << 1) + 1]; + *x = modp_montymul( + modp_montymul(w0, w1, p, p0i), R2, p, p0i); + } + for (v = 0, x = gs; v < n; v ++, x += slen) { + t1[v] = zint_mod_small_signed(x, slen, p, p0i, R2, Rx); + } + modp_NTT2(t1, gm, logn, p, p0i); + for (v = 0, x = gd + u; v < hn; v ++, x += tlen) { + uint32_t w0, w1; + + w0 = t1[(v << 1) + 0]; + w1 = t1[(v << 1) + 1]; + *x = modp_montymul( + modp_montymul(w0, w1, p, p0i), R2, p, p0i); + } + + if (!out_ntt) { + modp_iNTT2_ext(fd + u, tlen, igm, logn - 1, p, p0i); + modp_iNTT2_ext(gd + u, tlen, igm, logn - 1, p, p0i); + } + } +} + +/* + * Compute f and g at a specific depth, in RNS notation. + * + * Returned values are stored in the data[] array, at slen words per integer. + * + * Conditions: + * 0 <= depth <= logn + * + * Space use in data[]: enough room for any two successive values (f', g', + * f and g). + */ +static void +make_fg(uint32_t *data, const int8_t *f, const int8_t *g, + unsigned logn, unsigned depth, int out_ntt) { + size_t n, u; + uint32_t *ft, *gt, p0; + unsigned d; + const small_prime *primes; + + n = MKN(logn); + ft = data; + gt = ft + n; + primes = PRIMES; + p0 = primes[0].p; + for (u = 0; u < n; u ++) { + ft[u] = modp_set(f[u], p0); + gt[u] = modp_set(g[u], p0); + } + + if (depth == 0 && out_ntt) { + uint32_t *gm, *igm; + uint32_t p, p0i; + + p = primes[0].p; + p0i = modp_ninv31(p); + gm = gt + n; + igm = gm + MKN(logn); + modp_mkgm2(gm, igm, logn, primes[0].g, p, p0i); + modp_NTT2(ft, gm, logn, p, p0i); + modp_NTT2(gt, gm, logn, p, p0i); + return; + } + + if (depth == 0) { + return; + } + if (depth == 1) { + make_fg_step(data, logn, 0, 0, out_ntt); + return; + } + make_fg_step(data, logn, 0, 0, 1); + for (d = 1; d + 1 < depth; d ++) { + make_fg_step(data, logn - d, d, 1, 1); + } + make_fg_step(data, logn - depth + 1, depth - 1, 1, out_ntt); +} + +/* + * Solving the NTRU equation, deepest level: compute the resultants of + * f and g with X^N+1, and use binary GCD. The F and G values are + * returned in tmp[]. + * + * Returned value: 1 on success, 0 on error. + */ +static int +solve_NTRU_deepest(unsigned logn_top, + const int8_t *f, const int8_t *g, uint32_t *tmp) { + size_t len; + uint32_t *Fp, *Gp, *fp, *gp, *t1, q; + const small_prime *primes; + + len = MAX_BL_SMALL[logn_top]; + primes = PRIMES; + + Fp = tmp; + Gp = Fp + len; + fp = Gp + len; + gp = fp + len; + t1 = gp + len; + + make_fg(fp, f, g, logn_top, logn_top, 0); + + /* + * We use the CRT to rebuild the resultants as big integers. + * There are two such big integers. The resultants are always + * nonnegative. + */ + zint_rebuild_CRT(fp, len, len, 2, primes, 0, t1); + + /* + * Apply the binary GCD. The zint_bezout() function works only + * if both inputs are odd. + * + * We can test on the result and return 0 because that would + * imply failure of the NTRU solving equation, and the (f,g) + * values will be abandoned in that case. + */ + if (!zint_bezout(Gp, Fp, fp, gp, len, t1)) { + return 0; + } + + /* + * Multiply the two values by the target value q. Values must + * fit in the destination arrays. + * We can again test on the returned words: a non-zero output + * of zint_mul_small() means that we exceeded our array + * capacity, and that implies failure and rejection of (f,g). + */ + q = 12289; + if (zint_mul_small(Fp, len, q) != 0 + || zint_mul_small(Gp, len, q) != 0) { + return 0; + } + + return 1; +} + +/* + * Solving the NTRU equation, intermediate level. Upon entry, the F and G + * from the previous level should be in the tmp[] array. + * This function MAY be invoked for the top-level (in which case depth = 0). + * + * Returned value: 1 on success, 0 on error. + */ +static int +solve_NTRU_intermediate(unsigned logn_top, + const int8_t *f, const int8_t *g, unsigned depth, uint32_t *tmp) { + /* + * In this function, 'logn' is the log2 of the degree for + * this step. If N = 2^logn, then: + * - the F and G values already in fk->tmp (from the deeper + * levels) have degree N/2; + * - this function should return F and G of degree N. + */ + unsigned logn; + size_t n, hn, slen, dlen, llen, rlen, FGlen, u; + uint32_t *Fd, *Gd, *Ft, *Gt, *ft, *gt, *t1; + fpr *rt1, *rt2, *rt3, *rt4, *rt5; + int scale_fg, minbl_fg, maxbl_fg, maxbl_FG, scale_k; + uint32_t *x, *y; + int32_t *k; + const small_prime *primes; + + logn = logn_top - depth; + n = (size_t)1 << logn; + hn = n >> 1; + + /* + * slen = size for our input f and g; also size of the reduced + * F and G we return (degree N) + * + * dlen = size of the F and G obtained from the deeper level + * (degree N/2 or N/3) + * + * llen = size for intermediary F and G before reduction (degree N) + * + * We build our non-reduced F and G as two independent halves each, + * of degree N/2 (F = F0 + X*F1, G = G0 + X*G1). + */ + slen = MAX_BL_SMALL[depth]; + dlen = MAX_BL_SMALL[depth + 1]; + llen = MAX_BL_LARGE[depth]; + primes = PRIMES; + + /* + * Fd and Gd are the F and G from the deeper level. + */ + Fd = tmp; + Gd = Fd + dlen * hn; + + /* + * Compute the input f and g for this level. Note that we get f + * and g in RNS + NTT representation. + */ + ft = Gd + dlen * hn; + make_fg(ft, f, g, logn_top, depth, 1); + + /* + * Move the newly computed f and g to make room for our candidate + * F and G (unreduced). + */ + Ft = tmp; + Gt = Ft + n * llen; + t1 = Gt + n * llen; + memmove(t1, ft, 2 * n * slen * sizeof * ft); + ft = t1; + gt = ft + slen * n; + t1 = gt + slen * n; + + /* + * Move Fd and Gd _after_ f and g. + */ + memmove(t1, Fd, 2 * hn * dlen * sizeof * Fd); + Fd = t1; + Gd = Fd + hn * dlen; + + /* + * We reduce Fd and Gd modulo all the small primes we will need, + * and store the values in Ft and Gt (only n/2 values in each). + */ + for (u = 0; u < llen; u ++) { + uint32_t p, p0i, R2, Rx; + size_t v; + uint32_t *xs, *ys, *xd, *yd; + + p = primes[u].p; + p0i = modp_ninv31(p); + R2 = modp_R2(p, p0i); + Rx = modp_Rx((unsigned)dlen, p, p0i, R2); + for (v = 0, xs = Fd, ys = Gd, xd = Ft + u, yd = Gt + u; + v < hn; + v ++, xs += dlen, ys += dlen, xd += llen, yd += llen) { + *xd = zint_mod_small_signed(xs, dlen, p, p0i, R2, Rx); + *yd = zint_mod_small_signed(ys, dlen, p, p0i, R2, Rx); + } + } + + /* + * We do not need Fd and Gd after that point. + */ + + /* + * Compute our F and G modulo sufficiently many small primes. + */ + for (u = 0; u < llen; u ++) { + uint32_t p, p0i, R2; + uint32_t *gm, *igm, *fx, *gx, *Fp, *Gp; + size_t v; + + /* + * All computations are done modulo p. + */ + p = primes[u].p; + p0i = modp_ninv31(p); + R2 = modp_R2(p, p0i); + + /* + * If we processed slen words, then f and g have been + * de-NTTized, and are in RNS; we can rebuild them. + */ + if (u == slen) { + zint_rebuild_CRT(ft, slen, slen, n, primes, 1, t1); + zint_rebuild_CRT(gt, slen, slen, n, primes, 1, t1); + } + + gm = t1; + igm = gm + n; + fx = igm + n; + gx = fx + n; + + modp_mkgm2(gm, igm, logn, primes[u].g, p, p0i); + + if (u < slen) { + for (v = 0, x = ft + u, y = gt + u; + v < n; v ++, x += slen, y += slen) { + fx[v] = *x; + gx[v] = *y; + } + modp_iNTT2_ext(ft + u, slen, igm, logn, p, p0i); + modp_iNTT2_ext(gt + u, slen, igm, logn, p, p0i); + } else { + uint32_t Rx; + + Rx = modp_Rx((unsigned)slen, p, p0i, R2); + for (v = 0, x = ft, y = gt; + v < n; v ++, x += slen, y += slen) { + fx[v] = zint_mod_small_signed(x, slen, + p, p0i, R2, Rx); + gx[v] = zint_mod_small_signed(y, slen, + p, p0i, R2, Rx); + } + modp_NTT2(fx, gm, logn, p, p0i); + modp_NTT2(gx, gm, logn, p, p0i); + } + + /* + * Get F' and G' modulo p and in NTT representation + * (they have degree n/2). These values were computed in + * a previous step, and stored in Ft and Gt. + */ + Fp = gx + n; + Gp = Fp + hn; + for (v = 0, x = Ft + u, y = Gt + u; + v < hn; v ++, x += llen, y += llen) { + Fp[v] = *x; + Gp[v] = *y; + } + modp_NTT2(Fp, gm, logn - 1, p, p0i); + modp_NTT2(Gp, gm, logn - 1, p, p0i); + + /* + * Compute our F and G modulo p. + * + * General case: + * + * we divide degree by d = 2 or 3 + * f'(x^d) = N(f)(x^d) = f * adj(f) + * g'(x^d) = N(g)(x^d) = g * adj(g) + * f'*G' - g'*F' = q + * F = F'(x^d) * adj(g) + * G = G'(x^d) * adj(f) + * + * We compute things in the NTT. We group roots of phi + * such that all roots x in a group share the same x^d. + * If the roots in a group are x_1, x_2... x_d, then: + * + * N(f)(x_1^d) = f(x_1)*f(x_2)*...*f(x_d) + * + * Thus, we have: + * + * G(x_1) = f(x_2)*f(x_3)*...*f(x_d)*G'(x_1^d) + * G(x_2) = f(x_1)*f(x_3)*...*f(x_d)*G'(x_1^d) + * ... + * G(x_d) = f(x_1)*f(x_2)*...*f(x_{d-1})*G'(x_1^d) + * + * In all cases, we can thus compute F and G in NTT + * representation by a few simple multiplications. + * Moreover, in our chosen NTT representation, roots + * from the same group are consecutive in RAM. + */ + for (v = 0, x = Ft + u, y = Gt + u; v < hn; + v ++, x += (llen << 1), y += (llen << 1)) { + uint32_t ftA, ftB, gtA, gtB; + uint32_t mFp, mGp; + + ftA = fx[(v << 1) + 0]; + ftB = fx[(v << 1) + 1]; + gtA = gx[(v << 1) + 0]; + gtB = gx[(v << 1) + 1]; + mFp = modp_montymul(Fp[v], R2, p, p0i); + mGp = modp_montymul(Gp[v], R2, p, p0i); + x[0] = modp_montymul(gtB, mFp, p, p0i); + x[llen] = modp_montymul(gtA, mFp, p, p0i); + y[0] = modp_montymul(ftB, mGp, p, p0i); + y[llen] = modp_montymul(ftA, mGp, p, p0i); + } + modp_iNTT2_ext(Ft + u, llen, igm, logn, p, p0i); + modp_iNTT2_ext(Gt + u, llen, igm, logn, p, p0i); + } + + /* + * Rebuild F and G with the CRT. + */ + zint_rebuild_CRT(Ft, llen, llen, n, primes, 1, t1); + zint_rebuild_CRT(Gt, llen, llen, n, primes, 1, t1); + + /* + * At that point, Ft, Gt, ft and gt are consecutive in RAM (in that + * order). + */ + + /* + * Apply Babai reduction to bring back F and G to size slen. + * + * We use the FFT to compute successive approximations of the + * reduction coefficient. We first isolate the top bits of + * the coefficients of f and g, and convert them to floating + * point; with the FFT, we compute adj(f), adj(g), and + * 1/(f*adj(f)+g*adj(g)). + * + * Then, we repeatedly apply the following: + * + * - Get the top bits of the coefficients of F and G into + * floating point, and use the FFT to compute: + * (F*adj(f)+G*adj(g))/(f*adj(f)+g*adj(g)) + * + * - Convert back that value into normal representation, and + * round it to the nearest integers, yielding a polynomial k. + * Proper scaling is applied to f, g, F and G so that the + * coefficients fit on 32 bits (signed). + * + * - Subtract k*f from F and k*g from G. + * + * Under normal conditions, this process reduces the size of F + * and G by some bits at each iteration. For constant-time + * operation, we do not want to measure the actual length of + * F and G; instead, we do the following: + * + * - f and g are converted to floating-point, with some scaling + * if necessary to keep values in the representable range. + * + * - For each iteration, we _assume_ a maximum size for F and G, + * and use the values at that size. If we overreach, then + * we get zeros, which is harmless: the resulting coefficients + * of k will be 0 and the value won't be reduced. + * + * - We conservatively assume that F and G will be reduced by + * at least 25 bits at each iteration. + * + * Even when reaching the bottom of the reduction, reduction + * coefficient will remain low. If it goes out-of-range, then + * something wrong occurred and the whole NTRU solving fails. + */ + + /* + * Memory layout: + * - We need to compute and keep adj(f), adj(g), and + * 1/(f*adj(f)+g*adj(g)) (sizes N, N and N/2 fp numbers, + * respectively). + * - At each iteration we need two extra fp buffer (N fp values), + * and produce a k (N 32-bit words). k will be shared with one + * of the fp buffers. + * - To compute k*f and k*g efficiently (with the NTT), we need + * some extra room; we reuse the space of the temporary buffers. + * + * Arrays of 'fpr' are obtained from the temporary array itself. + * We ensure that the base is at a properly aligned offset (the + * source array tmp[] is supposed to be already aligned). + */ + + rt3 = align_fpr(tmp, t1); + rt4 = rt3 + n; + rt5 = rt4 + n; + rt1 = rt5 + (n >> 1); + k = (int32_t *)align_u32(tmp, rt1); + rt2 = align_fpr(tmp, k + n); + if (rt2 < (rt1 + n)) { + rt2 = rt1 + n; + } + t1 = (uint32_t *)k + n; + + /* + * Get f and g into rt3 and rt4 as floating-point approximations. + * + * We need to "scale down" the floating-point representation of + * coefficients when they are too big. We want to keep the value + * below 2^310 or so. Thus, when values are larger than 10 words, + * we consider only the top 10 words. Array lengths have been + * computed so that average maximum length will fall in the + * middle or the upper half of these top 10 words. + */ + rlen = slen; + if (rlen > 10) { + rlen = 10; + } + poly_big_to_fp(rt3, ft + slen - rlen, rlen, slen, logn); + poly_big_to_fp(rt4, gt + slen - rlen, rlen, slen, logn); + + /* + * Values in rt3 and rt4 are downscaled by 2^(scale_fg). + */ + scale_fg = 31 * (int)(slen - rlen); + + /* + * Estimated boundaries for the maximum size (in bits) of the + * coefficients of (f,g). We use the measured average, and + * allow for a deviation of at most six times the standard + * deviation. + */ + minbl_fg = BITLENGTH[depth].avg - 6 * BITLENGTH[depth].std; + maxbl_fg = BITLENGTH[depth].avg + 6 * BITLENGTH[depth].std; + + /* + * Compute 1/(f*adj(f)+g*adj(g)) in rt5. We also keep adj(f) + * and adj(g) in rt3 and rt4, respectively. + */ + PQCLEAN_FALCON512_AVX2_FFT(rt3, logn); + PQCLEAN_FALCON512_AVX2_FFT(rt4, logn); + PQCLEAN_FALCON512_AVX2_poly_invnorm2_fft(rt5, rt3, rt4, logn); + PQCLEAN_FALCON512_AVX2_poly_adj_fft(rt3, logn); + PQCLEAN_FALCON512_AVX2_poly_adj_fft(rt4, logn); + + /* + * Reduce F and G repeatedly. + * + * The expected maximum bit length of coefficients of F and G + * is kept in maxbl_FG, with the corresponding word length in + * FGlen. + */ + FGlen = llen; + maxbl_FG = 31 * (int)llen; + + /* + * Each reduction operation computes the reduction polynomial + * "k". We need that polynomial to have coefficients that fit + * on 32-bit signed integers, with some scaling; thus, we use + * a descending sequence of scaling values, down to zero. + * + * The size of the coefficients of k is (roughly) the difference + * between the size of the coefficients of (F,G) and the size + * of the coefficients of (f,g). Thus, the maximum size of the + * coefficients of k is, at the start, maxbl_FG - minbl_fg; + * this is our starting scale value for k. + * + * We need to estimate the size of (F,G) during the execution of + * the algorithm; we are allowed some overestimation but not too + * much (poly_big_to_fp() uses a 310-bit window). Generally + * speaking, after applying a reduction with k scaled to + * scale_k, the size of (F,G) will be size(f,g) + scale_k + dd, + * where 'dd' is a few bits to account for the fact that the + * reduction is never perfect (intuitively, dd is on the order + * of sqrt(N), so at most 5 bits; we here allow for 10 extra + * bits). + * + * The size of (f,g) is not known exactly, but maxbl_fg is an + * upper bound. + */ + scale_k = maxbl_FG - minbl_fg; + + for (;;) { + int scale_FG, dc, new_maxbl_FG; + uint32_t scl, sch; + fpr pdc, pt; + + /* + * Convert current F and G into floating-point. We apply + * scaling if the current length is more than 10 words. + */ + rlen = FGlen; + if (rlen > 10) { + rlen = 10; + } + scale_FG = 31 * (int)(FGlen - rlen); + poly_big_to_fp(rt1, Ft + FGlen - rlen, rlen, llen, logn); + poly_big_to_fp(rt2, Gt + FGlen - rlen, rlen, llen, logn); + + /* + * Compute (F*adj(f)+G*adj(g))/(f*adj(f)+g*adj(g)) in rt2. + */ + PQCLEAN_FALCON512_AVX2_FFT(rt1, logn); + PQCLEAN_FALCON512_AVX2_FFT(rt2, logn); + PQCLEAN_FALCON512_AVX2_poly_mul_fft(rt1, rt3, logn); + PQCLEAN_FALCON512_AVX2_poly_mul_fft(rt2, rt4, logn); + PQCLEAN_FALCON512_AVX2_poly_add(rt2, rt1, logn); + PQCLEAN_FALCON512_AVX2_poly_mul_autoadj_fft(rt2, rt5, logn); + PQCLEAN_FALCON512_AVX2_iFFT(rt2, logn); + + /* + * (f,g) are scaled by 'scale_fg', meaning that the + * numbers in rt3/rt4 should be multiplied by 2^(scale_fg) + * to have their true mathematical value. + * + * (F,G) are similarly scaled by 'scale_FG'. Therefore, + * the value we computed in rt2 is scaled by + * 'scale_FG-scale_fg'. + * + * We want that value to be scaled by 'scale_k', hence we + * apply a corrective scaling. After scaling, the values + * should fit in -2^31-1..+2^31-1. + */ + dc = scale_k - scale_FG + scale_fg; + + /* + * We will need to multiply values by 2^(-dc). The value + * 'dc' is not secret, so we can compute 2^(-dc) with a + * non-constant-time process. + * (We could use ldexp(), but we prefer to avoid any + * dependency on libm. When using FP emulation, we could + * use our fpr_ldexp(), which is constant-time.) + */ + if (dc < 0) { + dc = -dc; + pt = fpr_two; + } else { + pt = fpr_onehalf; + } + pdc = fpr_one; + while (dc != 0) { + if ((dc & 1) != 0) { + pdc = fpr_mul(pdc, pt); + } + dc >>= 1; + pt = fpr_sqr(pt); + } + + for (u = 0; u < n; u ++) { + fpr xv; + + xv = fpr_mul(rt2[u], pdc); + + /* + * Sometimes the values can be out-of-bounds if + * the algorithm fails; we must not call + * fpr_rint() (and cast to int32_t) if the value + * is not in-bounds. Note that the test does not + * break constant-time discipline, since any + * failure here implies that we discard the current + * secret key (f,g). + */ + if (!fpr_lt(fpr_mtwo31m1, xv) + || !fpr_lt(xv, fpr_ptwo31m1)) { + return 0; + } + k[u] = (int32_t)fpr_rint(xv); + } + + /* + * Values in k[] are integers. They really are scaled + * down by maxbl_FG - minbl_fg bits. + * + * If we are at low depth, then we use the NTT to + * compute k*f and k*g. + */ + sch = (uint32_t)(scale_k / 31); + scl = (uint32_t)(scale_k % 31); + if (depth <= DEPTH_INT_FG) { + poly_sub_scaled_ntt(Ft, FGlen, llen, ft, slen, slen, + k, sch, scl, logn, t1); + poly_sub_scaled_ntt(Gt, FGlen, llen, gt, slen, slen, + k, sch, scl, logn, t1); + } else { + poly_sub_scaled(Ft, FGlen, llen, ft, slen, slen, + k, sch, scl, logn); + poly_sub_scaled(Gt, FGlen, llen, gt, slen, slen, + k, sch, scl, logn); + } + + /* + * We compute the new maximum size of (F,G), assuming that + * (f,g) has _maximal_ length (i.e. that reduction is + * "late" instead of "early". We also adjust FGlen + * accordingly. + */ + new_maxbl_FG = scale_k + maxbl_fg + 10; + if (new_maxbl_FG < maxbl_FG) { + maxbl_FG = new_maxbl_FG; + if ((int)FGlen * 31 >= maxbl_FG + 31) { + FGlen --; + } + } + + /* + * We suppose that scaling down achieves a reduction by + * at least 25 bits per iteration. We stop when we have + * done the loop with an unscaled k. + */ + if (scale_k <= 0) { + break; + } + scale_k -= 25; + if (scale_k < 0) { + scale_k = 0; + } + } + + /* + * If (F,G) length was lowered below 'slen', then we must take + * care to re-extend the sign. + */ + if (FGlen < slen) { + for (u = 0; u < n; u ++, Ft += llen, Gt += llen) { + size_t v; + uint32_t sw; + + sw = -(Ft[FGlen - 1] >> 30) >> 1; + for (v = FGlen; v < slen; v ++) { + Ft[v] = sw; + } + sw = -(Gt[FGlen - 1] >> 30) >> 1; + for (v = FGlen; v < slen; v ++) { + Gt[v] = sw; + } + } + } + + /* + * Compress encoding of all values to 'slen' words (this is the + * expected output format). + */ + for (u = 0, x = tmp, y = tmp; + u < (n << 1); u ++, x += slen, y += llen) { + memmove(x, y, slen * sizeof * y); + } + return 1; +} + +/* + * Solving the NTRU equation, binary case, depth = 1. Upon entry, the + * F and G from the previous level should be in the tmp[] array. + * + * Returned value: 1 on success, 0 on error. + */ +static int +solve_NTRU_binary_depth1(unsigned logn_top, + const int8_t *f, const int8_t *g, uint32_t *tmp) { + /* + * The first half of this function is a copy of the corresponding + * part in solve_NTRU_intermediate(), for the reconstruction of + * the unreduced F and G. The second half (Babai reduction) is + * done differently, because the unreduced F and G fit in 53 bits + * of precision, allowing a much simpler process with lower RAM + * usage. + */ + unsigned depth, logn; + size_t n_top, n, hn, slen, dlen, llen, u; + uint32_t *Fd, *Gd, *Ft, *Gt, *ft, *gt, *t1; + fpr *rt1, *rt2, *rt3, *rt4, *rt5, *rt6; + uint32_t *x, *y; + + depth = 1; + n_top = (size_t)1 << logn_top; + logn = logn_top - depth; + n = (size_t)1 << logn; + hn = n >> 1; + + /* + * Equations are: + * + * f' = f0^2 - X^2*f1^2 + * g' = g0^2 - X^2*g1^2 + * F' and G' are a solution to f'G' - g'F' = q (from deeper levels) + * F = F'*(g0 - X*g1) + * G = G'*(f0 - X*f1) + * + * f0, f1, g0, g1, f', g', F' and G' are all "compressed" to + * degree N/2 (their odd-indexed coefficients are all zero). + */ + + /* + * slen = size for our input f and g; also size of the reduced + * F and G we return (degree N) + * + * dlen = size of the F and G obtained from the deeper level + * (degree N/2) + * + * llen = size for intermediary F and G before reduction (degree N) + * + * We build our non-reduced F and G as two independent halves each, + * of degree N/2 (F = F0 + X*F1, G = G0 + X*G1). + */ + slen = MAX_BL_SMALL[depth]; + dlen = MAX_BL_SMALL[depth + 1]; + llen = MAX_BL_LARGE[depth]; + + /* + * Fd and Gd are the F and G from the deeper level. Ft and Gt + * are the destination arrays for the unreduced F and G. + */ + Fd = tmp; + Gd = Fd + dlen * hn; + Ft = Gd + dlen * hn; + Gt = Ft + llen * n; + + /* + * We reduce Fd and Gd modulo all the small primes we will need, + * and store the values in Ft and Gt. + */ + for (u = 0; u < llen; u ++) { + uint32_t p, p0i, R2, Rx; + size_t v; + uint32_t *xs, *ys, *xd, *yd; + + p = PRIMES[u].p; + p0i = modp_ninv31(p); + R2 = modp_R2(p, p0i); + Rx = modp_Rx((unsigned)dlen, p, p0i, R2); + for (v = 0, xs = Fd, ys = Gd, xd = Ft + u, yd = Gt + u; + v < hn; + v ++, xs += dlen, ys += dlen, xd += llen, yd += llen) { + *xd = zint_mod_small_signed(xs, dlen, p, p0i, R2, Rx); + *yd = zint_mod_small_signed(ys, dlen, p, p0i, R2, Rx); + } + } + + /* + * Now Fd and Gd are not needed anymore; we can squeeze them out. + */ + memmove(tmp, Ft, llen * n * sizeof(uint32_t)); + Ft = tmp; + memmove(Ft + llen * n, Gt, llen * n * sizeof(uint32_t)); + Gt = Ft + llen * n; + ft = Gt + llen * n; + gt = ft + slen * n; + + t1 = gt + slen * n; + + /* + * Compute our F and G modulo sufficiently many small primes. + */ + for (u = 0; u < llen; u ++) { + uint32_t p, p0i, R2; + uint32_t *gm, *igm, *fx, *gx, *Fp, *Gp; + unsigned e; + size_t v; + + /* + * All computations are done modulo p. + */ + p = PRIMES[u].p; + p0i = modp_ninv31(p); + R2 = modp_R2(p, p0i); + + /* + * We recompute things from the source f and g, of full + * degree. However, we will need only the n first elements + * of the inverse NTT table (igm); the call to modp_mkgm() + * below will fill n_top elements in igm[] (thus overflowing + * into fx[]) but later code will overwrite these extra + * elements. + */ + gm = t1; + igm = gm + n_top; + fx = igm + n; + gx = fx + n_top; + modp_mkgm2(gm, igm, logn_top, PRIMES[u].g, p, p0i); + + /* + * Set ft and gt to f and g modulo p, respectively. + */ + for (v = 0; v < n_top; v ++) { + fx[v] = modp_set(f[v], p); + gx[v] = modp_set(g[v], p); + } + + /* + * Convert to NTT and compute our f and g. + */ + modp_NTT2(fx, gm, logn_top, p, p0i); + modp_NTT2(gx, gm, logn_top, p, p0i); + for (e = logn_top; e > logn; e --) { + modp_poly_rec_res(fx, e, p, p0i, R2); + modp_poly_rec_res(gx, e, p, p0i, R2); + } + + /* + * From that point onward, we only need tables for + * degree n, so we can save some space. + */ + if (depth > 0) { /* always true */ + memmove(gm + n, igm, n * sizeof * igm); + igm = gm + n; + memmove(igm + n, fx, n * sizeof * ft); + fx = igm + n; + memmove(fx + n, gx, n * sizeof * gt); + gx = fx + n; + } + + /* + * Get F' and G' modulo p and in NTT representation + * (they have degree n/2). These values were computed + * in a previous step, and stored in Ft and Gt. + */ + Fp = gx + n; + Gp = Fp + hn; + for (v = 0, x = Ft + u, y = Gt + u; + v < hn; v ++, x += llen, y += llen) { + Fp[v] = *x; + Gp[v] = *y; + } + modp_NTT2(Fp, gm, logn - 1, p, p0i); + modp_NTT2(Gp, gm, logn - 1, p, p0i); + + /* + * Compute our F and G modulo p. + * + * Equations are: + * + * f'(x^2) = N(f)(x^2) = f * adj(f) + * g'(x^2) = N(g)(x^2) = g * adj(g) + * + * f'*G' - g'*F' = q + * + * F = F'(x^2) * adj(g) + * G = G'(x^2) * adj(f) + * + * The NTT representation of f is f(w) for all w which + * are roots of phi. In the binary case, as well as in + * the ternary case for all depth except the deepest, + * these roots can be grouped in pairs (w,-w), and we + * then have: + * + * f(w) = adj(f)(-w) + * f(-w) = adj(f)(w) + * + * and w^2 is then a root for phi at the half-degree. + * + * At the deepest level in the ternary case, this still + * holds, in the following sense: the roots of x^2-x+1 + * are (w,-w^2) (for w^3 = -1, and w != -1), and we + * have: + * + * f(w) = adj(f)(-w^2) + * f(-w^2) = adj(f)(w) + * + * In all case, we can thus compute F and G in NTT + * representation by a few simple multiplications. + * Moreover, the two roots for each pair are consecutive + * in our bit-reversal encoding. + */ + for (v = 0, x = Ft + u, y = Gt + u; + v < hn; v ++, x += (llen << 1), y += (llen << 1)) { + uint32_t ftA, ftB, gtA, gtB; + uint32_t mFp, mGp; + + ftA = fx[(v << 1) + 0]; + ftB = fx[(v << 1) + 1]; + gtA = gx[(v << 1) + 0]; + gtB = gx[(v << 1) + 1]; + mFp = modp_montymul(Fp[v], R2, p, p0i); + mGp = modp_montymul(Gp[v], R2, p, p0i); + x[0] = modp_montymul(gtB, mFp, p, p0i); + x[llen] = modp_montymul(gtA, mFp, p, p0i); + y[0] = modp_montymul(ftB, mGp, p, p0i); + y[llen] = modp_montymul(ftA, mGp, p, p0i); + } + modp_iNTT2_ext(Ft + u, llen, igm, logn, p, p0i); + modp_iNTT2_ext(Gt + u, llen, igm, logn, p, p0i); + + /* + * Also save ft and gt (only up to size slen). + */ + if (u < slen) { + modp_iNTT2(fx, igm, logn, p, p0i); + modp_iNTT2(gx, igm, logn, p, p0i); + for (v = 0, x = ft + u, y = gt + u; + v < n; v ++, x += slen, y += slen) { + *x = fx[v]; + *y = gx[v]; + } + } + } + + /* + * Rebuild f, g, F and G with the CRT. Note that the elements of F + * and G are consecutive, and thus can be rebuilt in a single + * loop; similarly, the elements of f and g are consecutive. + */ + zint_rebuild_CRT(Ft, llen, llen, n << 1, PRIMES, 1, t1); + zint_rebuild_CRT(ft, slen, slen, n << 1, PRIMES, 1, t1); + + /* + * Here starts the Babai reduction, specialized for depth = 1. + * + * Candidates F and G (from Ft and Gt), and base f and g (ft and gt), + * are converted to floating point. There is no scaling, and a + * single pass is sufficient. + */ + + /* + * Convert F and G into floating point (rt1 and rt2). + */ + rt1 = align_fpr(tmp, gt + slen * n); + rt2 = rt1 + n; + poly_big_to_fp(rt1, Ft, llen, llen, logn); + poly_big_to_fp(rt2, Gt, llen, llen, logn); + + /* + * Integer representation of F and G is no longer needed, we + * can remove it. + */ + memmove(tmp, ft, 2 * slen * n * sizeof * ft); + ft = tmp; + gt = ft + slen * n; + rt3 = align_fpr(tmp, gt + slen * n); + memmove(rt3, rt1, 2 * n * sizeof * rt1); + rt1 = rt3; + rt2 = rt1 + n; + rt3 = rt2 + n; + rt4 = rt3 + n; + + /* + * Convert f and g into floating point (rt3 and rt4). + */ + poly_big_to_fp(rt3, ft, slen, slen, logn); + poly_big_to_fp(rt4, gt, slen, slen, logn); + + /* + * Remove unneeded ft and gt. + */ + memmove(tmp, rt1, 4 * n * sizeof * rt1); + rt1 = (fpr *)tmp; + rt2 = rt1 + n; + rt3 = rt2 + n; + rt4 = rt3 + n; + + /* + * We now have: + * rt1 = F + * rt2 = G + * rt3 = f + * rt4 = g + * in that order in RAM. We convert all of them to FFT. + */ + PQCLEAN_FALCON512_AVX2_FFT(rt1, logn); + PQCLEAN_FALCON512_AVX2_FFT(rt2, logn); + PQCLEAN_FALCON512_AVX2_FFT(rt3, logn); + PQCLEAN_FALCON512_AVX2_FFT(rt4, logn); + + /* + * Compute: + * rt5 = F*adj(f) + G*adj(g) + * rt6 = 1 / (f*adj(f) + g*adj(g)) + * (Note that rt6 is half-length.) + */ + rt5 = rt4 + n; + rt6 = rt5 + n; + PQCLEAN_FALCON512_AVX2_poly_add_muladj_fft(rt5, rt1, rt2, rt3, rt4, logn); + PQCLEAN_FALCON512_AVX2_poly_invnorm2_fft(rt6, rt3, rt4, logn); + + /* + * Compute: + * rt5 = (F*adj(f)+G*adj(g)) / (f*adj(f)+g*adj(g)) + */ + PQCLEAN_FALCON512_AVX2_poly_mul_autoadj_fft(rt5, rt6, logn); + + /* + * Compute k as the rounded version of rt5. Check that none of + * the values is larger than 2^63-1 (in absolute value) + * because that would make the fpr_rint() do something undefined; + * note that any out-of-bounds value here implies a failure and + * (f,g) will be discarded, so we can make a simple test. + */ + PQCLEAN_FALCON512_AVX2_iFFT(rt5, logn); + for (u = 0; u < n; u ++) { + fpr z; + + z = rt5[u]; + if (!fpr_lt(z, fpr_ptwo63m1) || !fpr_lt(fpr_mtwo63m1, z)) { + return 0; + } + rt5[u] = fpr_of(fpr_rint(z)); + } + PQCLEAN_FALCON512_AVX2_FFT(rt5, logn); + + /* + * Subtract k*f from F, and k*g from G. + */ + PQCLEAN_FALCON512_AVX2_poly_mul_fft(rt3, rt5, logn); + PQCLEAN_FALCON512_AVX2_poly_mul_fft(rt4, rt5, logn); + PQCLEAN_FALCON512_AVX2_poly_sub(rt1, rt3, logn); + PQCLEAN_FALCON512_AVX2_poly_sub(rt2, rt4, logn); + PQCLEAN_FALCON512_AVX2_iFFT(rt1, logn); + PQCLEAN_FALCON512_AVX2_iFFT(rt2, logn); + + /* + * Convert back F and G to integers, and return. + */ + Ft = tmp; + Gt = Ft + n; + rt3 = align_fpr(tmp, Gt + n); + memmove(rt3, rt1, 2 * n * sizeof * rt1); + rt1 = rt3; + rt2 = rt1 + n; + for (u = 0; u < n; u ++) { + Ft[u] = (uint32_t)fpr_rint(rt1[u]); + Gt[u] = (uint32_t)fpr_rint(rt2[u]); + } + + return 1; +} + +/* + * Solving the NTRU equation, top level. Upon entry, the F and G + * from the previous level should be in the tmp[] array. + * + * Returned value: 1 on success, 0 on error. + */ +static int +solve_NTRU_binary_depth0(unsigned logn, + const int8_t *f, const int8_t *g, uint32_t *tmp) { + size_t n, hn, u; + uint32_t p, p0i, R2; + uint32_t *Fp, *Gp, *t1, *t2, *t3, *t4, *t5; + uint32_t *gm, *igm, *ft, *gt; + fpr *rt2, *rt3; + + n = (size_t)1 << logn; + hn = n >> 1; + + /* + * Equations are: + * + * f' = f0^2 - X^2*f1^2 + * g' = g0^2 - X^2*g1^2 + * F' and G' are a solution to f'G' - g'F' = q (from deeper levels) + * F = F'*(g0 - X*g1) + * G = G'*(f0 - X*f1) + * + * f0, f1, g0, g1, f', g', F' and G' are all "compressed" to + * degree N/2 (their odd-indexed coefficients are all zero). + * + * Everything should fit in 31-bit integers, hence we can just use + * the first small prime p = 2147473409. + */ + p = PRIMES[0].p; + p0i = modp_ninv31(p); + R2 = modp_R2(p, p0i); + + Fp = tmp; + Gp = Fp + hn; + ft = Gp + hn; + gt = ft + n; + gm = gt + n; + igm = gm + n; + + modp_mkgm2(gm, igm, logn, PRIMES[0].g, p, p0i); + + /* + * Convert F' anf G' in NTT representation. + */ + for (u = 0; u < hn; u ++) { + Fp[u] = modp_set(zint_one_to_plain(Fp + u), p); + Gp[u] = modp_set(zint_one_to_plain(Gp + u), p); + } + modp_NTT2(Fp, gm, logn - 1, p, p0i); + modp_NTT2(Gp, gm, logn - 1, p, p0i); + + /* + * Load f and g and convert them to NTT representation. + */ + for (u = 0; u < n; u ++) { + ft[u] = modp_set(f[u], p); + gt[u] = modp_set(g[u], p); + } + modp_NTT2(ft, gm, logn, p, p0i); + modp_NTT2(gt, gm, logn, p, p0i); + + /* + * Build the unreduced F,G in ft and gt. + */ + for (u = 0; u < n; u += 2) { + uint32_t ftA, ftB, gtA, gtB; + uint32_t mFp, mGp; + + ftA = ft[u + 0]; + ftB = ft[u + 1]; + gtA = gt[u + 0]; + gtB = gt[u + 1]; + mFp = modp_montymul(Fp[u >> 1], R2, p, p0i); + mGp = modp_montymul(Gp[u >> 1], R2, p, p0i); + ft[u + 0] = modp_montymul(gtB, mFp, p, p0i); + ft[u + 1] = modp_montymul(gtA, mFp, p, p0i); + gt[u + 0] = modp_montymul(ftB, mGp, p, p0i); + gt[u + 1] = modp_montymul(ftA, mGp, p, p0i); + } + modp_iNTT2(ft, igm, logn, p, p0i); + modp_iNTT2(gt, igm, logn, p, p0i); + + Gp = Fp + n; + t1 = Gp + n; + memmove(Fp, ft, 2 * n * sizeof * ft); + + /* + * We now need to apply the Babai reduction. At that point, + * we have F and G in two n-word arrays. + * + * We can compute F*adj(f)+G*adj(g) and f*adj(f)+g*adj(g) + * modulo p, using the NTT. We still move memory around in + * order to save RAM. + */ + t2 = t1 + n; + t3 = t2 + n; + t4 = t3 + n; + t5 = t4 + n; + + /* + * Compute the NTT tables in t1 and t2. We do not keep t2 + * (we'll recompute it later on). + */ + modp_mkgm2(t1, t2, logn, PRIMES[0].g, p, p0i); + + /* + * Convert F and G to NTT. + */ + modp_NTT2(Fp, t1, logn, p, p0i); + modp_NTT2(Gp, t1, logn, p, p0i); + + /* + * Load f and adj(f) in t4 and t5, and convert them to NTT + * representation. + */ + t4[0] = t5[0] = modp_set(f[0], p); + for (u = 1; u < n; u ++) { + t4[u] = modp_set(f[u], p); + t5[n - u] = modp_set(-f[u], p); + } + modp_NTT2(t4, t1, logn, p, p0i); + modp_NTT2(t5, t1, logn, p, p0i); + + /* + * Compute F*adj(f) in t2, and f*adj(f) in t3. + */ + for (u = 0; u < n; u ++) { + uint32_t w; + + w = modp_montymul(t5[u], R2, p, p0i); + t2[u] = modp_montymul(w, Fp[u], p, p0i); + t3[u] = modp_montymul(w, t4[u], p, p0i); + } + + /* + * Load g and adj(g) in t4 and t5, and convert them to NTT + * representation. + */ + t4[0] = t5[0] = modp_set(g[0], p); + for (u = 1; u < n; u ++) { + t4[u] = modp_set(g[u], p); + t5[n - u] = modp_set(-g[u], p); + } + modp_NTT2(t4, t1, logn, p, p0i); + modp_NTT2(t5, t1, logn, p, p0i); + + /* + * Add G*adj(g) to t2, and g*adj(g) to t3. + */ + for (u = 0; u < n; u ++) { + uint32_t w; + + w = modp_montymul(t5[u], R2, p, p0i); + t2[u] = modp_add(t2[u], + modp_montymul(w, Gp[u], p, p0i), p); + t3[u] = modp_add(t3[u], + modp_montymul(w, t4[u], p, p0i), p); + } + + /* + * Convert back t2 and t3 to normal representation (normalized + * around 0), and then + * move them to t1 and t2. We first need to recompute the + * inverse table for NTT. + */ + modp_mkgm2(t1, t4, logn, PRIMES[0].g, p, p0i); + modp_iNTT2(t2, t4, logn, p, p0i); + modp_iNTT2(t3, t4, logn, p, p0i); + for (u = 0; u < n; u ++) { + t1[u] = (uint32_t)modp_norm(t2[u], p); + t2[u] = (uint32_t)modp_norm(t3[u], p); + } + + /* + * At that point, array contents are: + * + * F (NTT representation) (Fp) + * G (NTT representation) (Gp) + * F*adj(f)+G*adj(g) (t1) + * f*adj(f)+g*adj(g) (t2) + * + * We want to divide t1 by t2. The result is not integral; it + * must be rounded. We thus need to use the FFT. + */ + + /* + * Get f*adj(f)+g*adj(g) in FFT representation. Since this + * polynomial is auto-adjoint, all its coordinates in FFT + * representation are actually real, so we can truncate off + * the imaginary parts. + */ + rt3 = align_fpr(tmp, t3); + for (u = 0; u < n; u ++) { + rt3[u] = fpr_of(((int32_t *)t2)[u]); + } + PQCLEAN_FALCON512_AVX2_FFT(rt3, logn); + rt2 = align_fpr(tmp, t2); + memmove(rt2, rt3, hn * sizeof * rt3); + + /* + * Convert F*adj(f)+G*adj(g) in FFT representation. + */ + rt3 = rt2 + hn; + for (u = 0; u < n; u ++) { + rt3[u] = fpr_of(((int32_t *)t1)[u]); + } + PQCLEAN_FALCON512_AVX2_FFT(rt3, logn); + + /* + * Compute (F*adj(f)+G*adj(g))/(f*adj(f)+g*adj(g)) and get + * its rounded normal representation in t1. + */ + PQCLEAN_FALCON512_AVX2_poly_div_autoadj_fft(rt3, rt2, logn); + PQCLEAN_FALCON512_AVX2_iFFT(rt3, logn); + for (u = 0; u < n; u ++) { + t1[u] = modp_set((int32_t)fpr_rint(rt3[u]), p); + } + + /* + * RAM contents are now: + * + * F (NTT representation) (Fp) + * G (NTT representation) (Gp) + * k (t1) + * + * We want to compute F-k*f, and G-k*g. + */ + t2 = t1 + n; + t3 = t2 + n; + t4 = t3 + n; + t5 = t4 + n; + modp_mkgm2(t2, t3, logn, PRIMES[0].g, p, p0i); + for (u = 0; u < n; u ++) { + t4[u] = modp_set(f[u], p); + t5[u] = modp_set(g[u], p); + } + modp_NTT2(t1, t2, logn, p, p0i); + modp_NTT2(t4, t2, logn, p, p0i); + modp_NTT2(t5, t2, logn, p, p0i); + for (u = 0; u < n; u ++) { + uint32_t kw; + + kw = modp_montymul(t1[u], R2, p, p0i); + Fp[u] = modp_sub(Fp[u], + modp_montymul(kw, t4[u], p, p0i), p); + Gp[u] = modp_sub(Gp[u], + modp_montymul(kw, t5[u], p, p0i), p); + } + modp_iNTT2(Fp, t3, logn, p, p0i); + modp_iNTT2(Gp, t3, logn, p, p0i); + for (u = 0; u < n; u ++) { + Fp[u] = (uint32_t)modp_norm(Fp[u], p); + Gp[u] = (uint32_t)modp_norm(Gp[u], p); + } + + return 1; +} + +/* + * Solve the NTRU equation. Returned value is 1 on success, 0 on error. + * G can be NULL, in which case that value is computed but not returned. + * If any of the coefficients of F and G exceeds lim (in absolute value), + * then 0 is returned. + */ +static int +solve_NTRU(unsigned logn, int8_t *F, int8_t *G, + const int8_t *f, const int8_t *g, int lim, uint32_t *tmp) { + size_t n, u; + uint32_t *ft, *gt, *Ft, *Gt, *gm; + uint32_t p, p0i, r; + const small_prime *primes; + + n = MKN(logn); + + if (!solve_NTRU_deepest(logn, f, g, tmp)) { + return 0; + } + + /* + * For logn <= 2, we need to use solve_NTRU_intermediate() + * directly, because coefficients are a bit too large and + * do not fit the hypotheses in solve_NTRU_binary_depth0(). + */ + if (logn <= 2) { + unsigned depth; + + depth = logn; + while (depth -- > 0) { + if (!solve_NTRU_intermediate(logn, f, g, depth, tmp)) { + return 0; + } + } + } else { + unsigned depth; + + depth = logn; + while (depth -- > 2) { + if (!solve_NTRU_intermediate(logn, f, g, depth, tmp)) { + return 0; + } + } + if (!solve_NTRU_binary_depth1(logn, f, g, tmp)) { + return 0; + } + if (!solve_NTRU_binary_depth0(logn, f, g, tmp)) { + return 0; + } + } + + /* + * If no buffer has been provided for G, use a temporary one. + */ + if (G == NULL) { + G = (int8_t *)(tmp + 2 * n); + } + + /* + * Final F and G are in fk->tmp, one word per coefficient + * (signed value over 31 bits). + */ + if (!poly_big_to_small(F, tmp, lim, logn) + || !poly_big_to_small(G, tmp + n, lim, logn)) { + return 0; + } + + /* + * Verify that the NTRU equation is fulfilled. Since all elements + * have short lengths, verifying modulo a small prime p works, and + * allows using the NTT. + * + * We put Gt[] first in tmp[], and process it first, so that it does + * not overlap with G[] in case we allocated it ourselves. + */ + Gt = tmp; + ft = Gt + n; + gt = ft + n; + Ft = gt + n; + gm = Ft + n; + + primes = PRIMES; + p = primes[0].p; + p0i = modp_ninv31(p); + modp_mkgm2(gm, tmp, logn, primes[0].g, p, p0i); + for (u = 0; u < n; u ++) { + Gt[u] = modp_set(G[u], p); + } + for (u = 0; u < n; u ++) { + ft[u] = modp_set(f[u], p); + gt[u] = modp_set(g[u], p); + Ft[u] = modp_set(F[u], p); + } + modp_NTT2(ft, gm, logn, p, p0i); + modp_NTT2(gt, gm, logn, p, p0i); + modp_NTT2(Ft, gm, logn, p, p0i); + modp_NTT2(Gt, gm, logn, p, p0i); + r = modp_montymul(12289, 1, p, p0i); + for (u = 0; u < n; u ++) { + uint32_t z; + + z = modp_sub(modp_montymul(ft[u], Gt[u], p, p0i), + modp_montymul(gt[u], Ft[u], p, p0i), p); + if (z != r) { + return 0; + } + } + + return 1; +} + +/* + * Generate a random polynomial with a Gaussian distribution. This function + * also makes sure that the resultant of the polynomial with phi is odd. + */ +static void +poly_small_mkgauss(RNG_CONTEXT *rng, int8_t *f, unsigned logn) { + size_t n, u; + unsigned mod2; + + n = MKN(logn); + mod2 = 0; + for (u = 0; u < n; u ++) { + int s; + +restart: + s = mkgauss(rng, logn); + + /* + * We need the coefficient to fit within -127..+127; + * realistically, this is always the case except for + * the very low degrees (N = 2 or 4), for which there + * is no real security anyway. + */ + if (s < -127 || s > 127) { + goto restart; + } + + /* + * We need the sum of all coefficients to be 1; otherwise, + * the resultant of the polynomial with X^N+1 will be even, + * and the binary GCD will fail. + */ + if (u == n - 1) { + if ((mod2 ^ (unsigned)(s & 1)) == 0) { + goto restart; + } + } else { + mod2 ^= (unsigned)(s & 1); + } + f[u] = (int8_t)s; + } +} + +/* see falcon.h */ +void +PQCLEAN_FALCON512_AVX2_keygen(inner_shake256_context *rng, + int8_t *f, int8_t *g, int8_t *F, int8_t *G, uint16_t *h, + unsigned logn, uint8_t *tmp) { + /* + * Algorithm is the following: + * + * - Generate f and g with the Gaussian distribution. + * + * - If either Res(f,phi) or Res(g,phi) is even, try again. + * + * - If ||(f,g)|| is too large, try again. + * + * - If ||B~_{f,g}|| is too large, try again. + * + * - If f is not invertible mod phi mod q, try again. + * + * - Compute h = g/f mod phi mod q. + * + * - Solve the NTRU equation fG - gF = q; if the solving fails, + * try again. Usual failure condition is when Res(f,phi) + * and Res(g,phi) are not prime to each other. + */ + size_t n, u; + uint16_t *h2, *tmp2; + RNG_CONTEXT *rc; + + n = MKN(logn); + rc = rng; + + /* + * We need to generate f and g randomly, until we find values + * such that the norm of (g,-f), and of the orthogonalized + * vector, are satisfying. The orthogonalized vector is: + * (q*adj(f)/(f*adj(f)+g*adj(g)), q*adj(g)/(f*adj(f)+g*adj(g))) + * (it is actually the (N+1)-th row of the Gram-Schmidt basis). + * + * In the binary case, coefficients of f and g are generated + * independently of each other, with a discrete Gaussian + * distribution of standard deviation 1.17*sqrt(q/(2*N)). Then, + * the two vectors have expected norm 1.17*sqrt(q), which is + * also our acceptance bound: we require both vectors to be no + * larger than that (this will be satisfied about 1/4th of the + * time, thus we expect sampling new (f,g) about 4 times for that + * step). + * + * We require that Res(f,phi) and Res(g,phi) are both odd (the + * NTRU equation solver requires it). + */ + for (;;) { + fpr *rt1, *rt2, *rt3; + fpr bnorm; + uint32_t normf, normg, norm; + int lim; + + /* + * The poly_small_mkgauss() function makes sure + * that the sum of coefficients is 1 modulo 2 + * (i.e. the resultant of the polynomial with phi + * will be odd). + */ + poly_small_mkgauss(rc, f, logn); + poly_small_mkgauss(rc, g, logn); + + /* + * Verify that all coefficients are within the bounds + * defined in max_fg_bits. This is the case with + * overwhelming probability; this guarantees that the + * key will be encodable with FALCON_COMP_TRIM. + */ + lim = 1 << (PQCLEAN_FALCON512_AVX2_max_fg_bits[logn] - 1); + for (u = 0; u < n; u ++) { + /* + * We can use non-CT tests since on any failure + * we will discard f and g. + */ + if (f[u] >= lim || f[u] <= -lim + || g[u] >= lim || g[u] <= -lim) { + lim = -1; + break; + } + } + if (lim < 0) { + continue; + } + + /* + * Bound is 1.17*sqrt(q). We compute the squared + * norms. With q = 12289, the squared bound is: + * (1.17^2)* 12289 = 16822.4121 + * Since f and g are integral, the squared norm + * of (g,-f) is an integer. + */ + normf = poly_small_sqnorm(f, logn); + normg = poly_small_sqnorm(g, logn); + norm = (normf + normg) | -((normf | normg) >> 31); + if (norm >= 16823) { + continue; + } + + /* + * We compute the orthogonalized vector norm. + */ + rt1 = (fpr *)tmp; + rt2 = rt1 + n; + rt3 = rt2 + n; + poly_small_to_fp(rt1, f, logn); + poly_small_to_fp(rt2, g, logn); + PQCLEAN_FALCON512_AVX2_FFT(rt1, logn); + PQCLEAN_FALCON512_AVX2_FFT(rt2, logn); + PQCLEAN_FALCON512_AVX2_poly_invnorm2_fft(rt3, rt1, rt2, logn); + PQCLEAN_FALCON512_AVX2_poly_adj_fft(rt1, logn); + PQCLEAN_FALCON512_AVX2_poly_adj_fft(rt2, logn); + PQCLEAN_FALCON512_AVX2_poly_mulconst(rt1, fpr_q, logn); + PQCLEAN_FALCON512_AVX2_poly_mulconst(rt2, fpr_q, logn); + PQCLEAN_FALCON512_AVX2_poly_mul_autoadj_fft(rt1, rt3, logn); + PQCLEAN_FALCON512_AVX2_poly_mul_autoadj_fft(rt2, rt3, logn); + PQCLEAN_FALCON512_AVX2_iFFT(rt1, logn); + PQCLEAN_FALCON512_AVX2_iFFT(rt2, logn); + bnorm = fpr_zero; + for (u = 0; u < n; u ++) { + bnorm = fpr_add(bnorm, fpr_sqr(rt1[u])); + bnorm = fpr_add(bnorm, fpr_sqr(rt2[u])); + } + if (!fpr_lt(bnorm, fpr_bnorm_max)) { + continue; + } + + /* + * Compute public key h = g/f mod X^N+1 mod q. If this + * fails, we must restart. + */ + if (h == NULL) { + h2 = (uint16_t *)tmp; + tmp2 = h2 + n; + } else { + h2 = h; + tmp2 = (uint16_t *)tmp; + } + if (!PQCLEAN_FALCON512_AVX2_compute_public(h2, f, g, logn, (uint8_t *)tmp2)) { + continue; + } + + /* + * Solve the NTRU equation to get F and G. + */ + lim = (1 << (PQCLEAN_FALCON512_AVX2_max_FG_bits[logn] - 1)) - 1; + if (!solve_NTRU(logn, F, G, f, g, lim, (uint32_t *)tmp)) { + continue; + } + + /* + * Key pair is generated. + */ + break; + } +} diff --git a/crypto_sign/falcon-512/avx2/pqclean.c b/crypto_sign/falcon-512/avx2/pqclean.c new file mode 100644 index 00000000..7777acbc --- /dev/null +++ b/crypto_sign/falcon-512/avx2/pqclean.c @@ -0,0 +1,384 @@ +#include "api.h" +#include "inner.h" +#include "randombytes.h" +#include +#include +/* + * Wrapper for implementing the PQClean API. + */ + + + +#define NONCELEN 40 +#define SEEDLEN 48 + +/* + * Encoding formats (nnnn = log of degree, 9 for Falcon-512, 10 for Falcon-1024) + * + * private key: + * header byte: 0101nnnn + * private f (6 or 5 bits by element, depending on degree) + * private g (6 or 5 bits by element, depending on degree) + * private F (8 bits by element) + * + * public key: + * header byte: 0000nnnn + * public h (14 bits by element) + * + * signature: + * header byte: 0011nnnn + * nonce 40 bytes + * value (12 bits by element) + * + * message + signature: + * signature length (2 bytes, big-endian) + * nonce 40 bytes + * message + * header byte: 0010nnnn + * value (12 bits by element) + * (signature length is 1+len(value), not counting the nonce) + */ + +/* see api.h */ +int +PQCLEAN_FALCON512_AVX2_crypto_sign_keypair(unsigned char *pk, unsigned char *sk) { + union { + uint8_t b[FALCON_KEYGEN_TEMP_9]; + uint64_t dummy_u64; + fpr dummy_fpr; + } tmp; + int8_t f[512], g[512], F[512]; + uint16_t h[512]; + unsigned char seed[SEEDLEN]; + inner_shake256_context rng; + size_t u, v; + + /* + * Generate key pair. + */ + randombytes(seed, sizeof seed); + inner_shake256_init(&rng); + inner_shake256_inject(&rng, seed, sizeof seed); + inner_shake256_flip(&rng); + PQCLEAN_FALCON512_AVX2_keygen(&rng, f, g, F, NULL, h, 9, tmp.b); + inner_shake256_ctx_release(&rng); + + /* + * Encode private key. + */ + sk[0] = 0x50 + 9; + u = 1; + v = PQCLEAN_FALCON512_AVX2_trim_i8_encode( + sk + u, PQCLEAN_FALCON512_AVX2_CRYPTO_SECRETKEYBYTES - u, + f, 9, PQCLEAN_FALCON512_AVX2_max_fg_bits[9]); + if (v == 0) { + return -1; + } + u += v; + v = PQCLEAN_FALCON512_AVX2_trim_i8_encode( + sk + u, PQCLEAN_FALCON512_AVX2_CRYPTO_SECRETKEYBYTES - u, + g, 9, PQCLEAN_FALCON512_AVX2_max_fg_bits[9]); + if (v == 0) { + return -1; + } + u += v; + v = PQCLEAN_FALCON512_AVX2_trim_i8_encode( + sk + u, PQCLEAN_FALCON512_AVX2_CRYPTO_SECRETKEYBYTES - u, + F, 9, PQCLEAN_FALCON512_AVX2_max_FG_bits[9]); + if (v == 0) { + return -1; + } + u += v; + if (u != PQCLEAN_FALCON512_AVX2_CRYPTO_SECRETKEYBYTES) { + return -1; + } + + /* + * Encode public key. + */ + pk[0] = 0x00 + 9; + v = PQCLEAN_FALCON512_AVX2_modq_encode( + pk + 1, PQCLEAN_FALCON512_AVX2_CRYPTO_PUBLICKEYBYTES - 1, + h, 9); + if (v != PQCLEAN_FALCON512_AVX2_CRYPTO_PUBLICKEYBYTES - 1) { + return -1; + } + + return 0; +} + +/* + * Compute the signature. nonce[] receives the nonce and must have length + * NONCELEN bytes. sigbuf[] receives the signature value (without nonce + * or header byte), with *sigbuflen providing the maximum value length and + * receiving the actual value length. + * + * If a signature could be computed but not encoded because it would + * exceed the output buffer size, then a new signature is computed. If + * the provided buffer size is too low, this could loop indefinitely, so + * the caller must provide a size that can accommodate signatures with a + * large enough probability. + * + * Return value: 0 on success, -1 on error. + */ +static int +do_sign(uint8_t *nonce, uint8_t *sigbuf, size_t *sigbuflen, + const uint8_t *m, size_t mlen, const uint8_t *sk) { + union { + uint8_t b[72 * 512]; + uint64_t dummy_u64; + fpr dummy_fpr; + } tmp; + int8_t f[512], g[512], F[512], G[512]; + union { + int16_t sig[512]; + uint16_t hm[512]; + } r; + unsigned char seed[SEEDLEN]; + inner_shake256_context sc; + size_t u, v; + + /* + * Decode the private key. + */ + if (sk[0] != 0x50 + 9) { + return -1; + } + u = 1; + v = PQCLEAN_FALCON512_AVX2_trim_i8_decode( + f, 9, PQCLEAN_FALCON512_AVX2_max_fg_bits[9], + sk + u, PQCLEAN_FALCON512_AVX2_CRYPTO_SECRETKEYBYTES - u); + if (v == 0) { + return -1; + } + u += v; + v = PQCLEAN_FALCON512_AVX2_trim_i8_decode( + g, 9, PQCLEAN_FALCON512_AVX2_max_fg_bits[9], + sk + u, PQCLEAN_FALCON512_AVX2_CRYPTO_SECRETKEYBYTES - u); + if (v == 0) { + return -1; + } + u += v; + v = PQCLEAN_FALCON512_AVX2_trim_i8_decode( + F, 9, PQCLEAN_FALCON512_AVX2_max_FG_bits[9], + sk + u, PQCLEAN_FALCON512_AVX2_CRYPTO_SECRETKEYBYTES - u); + if (v == 0) { + return -1; + } + u += v; + if (u != PQCLEAN_FALCON512_AVX2_CRYPTO_SECRETKEYBYTES) { + return -1; + } + if (!PQCLEAN_FALCON512_AVX2_complete_private(G, f, g, F, 9, tmp.b)) { + return -1; + } + + /* + * Create a random nonce (40 bytes). + */ + randombytes(nonce, NONCELEN); + + /* + * Hash message nonce + message into a vector. + */ + inner_shake256_init(&sc); + inner_shake256_inject(&sc, nonce, NONCELEN); + inner_shake256_inject(&sc, m, mlen); + inner_shake256_flip(&sc); + PQCLEAN_FALCON512_AVX2_hash_to_point_ct(&sc, r.hm, 9, tmp.b); + inner_shake256_ctx_release(&sc); + + /* + * Initialize a RNG. + */ + randombytes(seed, sizeof seed); + inner_shake256_init(&sc); + inner_shake256_inject(&sc, seed, sizeof seed); + inner_shake256_flip(&sc); + + /* + * Compute and return the signature. This loops until a signature + * value is found that fits in the provided buffer. + */ + for (;;) { + PQCLEAN_FALCON512_AVX2_sign_dyn(r.sig, &sc, f, g, F, G, r.hm, 9, tmp.b); + v = PQCLEAN_FALCON512_AVX2_comp_encode(sigbuf, *sigbuflen, r.sig, 9); + if (v != 0) { + inner_shake256_ctx_release(&sc); + *sigbuflen = v; + return 0; + } + } +} + +/* + * Verify a sigature. The nonce has size NONCELEN bytes. sigbuf[] + * (of size sigbuflen) contains the signature value, not including the + * header byte or nonce. Return value is 0 on success, -1 on error. + */ +static int +do_verify( + const uint8_t *nonce, const uint8_t *sigbuf, size_t sigbuflen, + const uint8_t *m, size_t mlen, const uint8_t *pk) { + union { + uint8_t b[2 * 512]; + uint64_t dummy_u64; + fpr dummy_fpr; + } tmp; + uint16_t h[512], hm[512]; + int16_t sig[512]; + inner_shake256_context sc; + + /* + * Decode public key. + */ + if (pk[0] != 0x00 + 9) { + return -1; + } + if (PQCLEAN_FALCON512_AVX2_modq_decode(h, 9, + pk + 1, PQCLEAN_FALCON512_AVX2_CRYPTO_PUBLICKEYBYTES - 1) + != PQCLEAN_FALCON512_AVX2_CRYPTO_PUBLICKEYBYTES - 1) { + return -1; + } + PQCLEAN_FALCON512_AVX2_to_ntt_monty(h, 9); + + /* + * Decode signature. + */ + if (sigbuflen == 0) { + return -1; + } + if (PQCLEAN_FALCON512_AVX2_comp_decode(sig, 9, sigbuf, sigbuflen) != sigbuflen) { + return -1; + } + + /* + * Hash nonce + message into a vector. + */ + inner_shake256_init(&sc); + inner_shake256_inject(&sc, nonce, NONCELEN); + inner_shake256_inject(&sc, m, mlen); + inner_shake256_flip(&sc); + PQCLEAN_FALCON512_AVX2_hash_to_point_ct(&sc, hm, 9, tmp.b); + inner_shake256_ctx_release(&sc); + + /* + * Verify signature. + */ + if (!PQCLEAN_FALCON512_AVX2_verify_raw(hm, sig, h, 9, tmp.b)) { + return -1; + } + return 0; +} + +/* see api.h */ +int +PQCLEAN_FALCON512_AVX2_crypto_sign_signature( + uint8_t *sig, size_t *siglen, + const uint8_t *m, size_t mlen, const uint8_t *sk) { + /* + * The PQCLEAN_FALCON512_AVX2_CRYPTO_BYTES constant is used for + * the signed message object (as produced by PQCLEAN_FALCON512_AVX2_crypto_sign()) + * and includes a two-byte length value, so we take care here + * to only generate signatures that are two bytes shorter than + * the maximum. This is done to ensure that PQCLEAN_FALCON512_AVX2_crypto_sign() + * and PQCLEAN_FALCON512_AVX2_crypto_sign_signature() produce the exact same signature + * value, if used on the same message, with the same private key, + * and using the same output from randombytes() (this is for + * reproducibility of tests). + */ + size_t vlen; + + vlen = PQCLEAN_FALCON512_AVX2_CRYPTO_BYTES - NONCELEN - 3; + if (do_sign(sig + 1, sig + 1 + NONCELEN, &vlen, m, mlen, sk) < 0) { + return -1; + } + sig[0] = 0x30 + 9; + *siglen = 1 + NONCELEN + vlen; + return 0; +} + +/* see api.h */ +int +PQCLEAN_FALCON512_AVX2_crypto_sign_verify( + const uint8_t *sig, size_t siglen, + const uint8_t *m, size_t mlen, const uint8_t *pk) { + if (siglen < 1 + NONCELEN) { + return -1; + } + if (sig[0] != 0x30 + 9) { + return -1; + } + return do_verify(sig + 1, + sig + 1 + NONCELEN, siglen - 1 - NONCELEN, m, mlen, pk); +} + +/* see api.h */ +int +PQCLEAN_FALCON512_AVX2_crypto_sign( + uint8_t *sm, size_t *smlen, + const uint8_t *m, size_t mlen, const uint8_t *sk) { + uint8_t *pm, *sigbuf; + size_t sigbuflen; + + /* + * Move the message to its final location; this is a memmove() so + * it handles overlaps properly. + */ + memmove(sm + 2 + NONCELEN, m, mlen); + pm = sm + 2 + NONCELEN; + sigbuf = pm + 1 + mlen; + sigbuflen = PQCLEAN_FALCON512_AVX2_CRYPTO_BYTES - NONCELEN - 3; + if (do_sign(sm + 2, sigbuf, &sigbuflen, pm, mlen, sk) < 0) { + return -1; + } + pm[mlen] = 0x20 + 9; + sigbuflen ++; + sm[0] = (uint8_t)(sigbuflen >> 8); + sm[1] = (uint8_t)sigbuflen; + *smlen = mlen + 2 + NONCELEN + sigbuflen; + return 0; +} + +/* see api.h */ +int +PQCLEAN_FALCON512_AVX2_crypto_sign_open( + uint8_t *m, size_t *mlen, + const uint8_t *sm, size_t smlen, const uint8_t *pk) { + const uint8_t *sigbuf; + size_t pmlen, sigbuflen; + + if (smlen < 3 + NONCELEN) { + return -1; + } + sigbuflen = ((size_t)sm[0] << 8) | (size_t)sm[1]; + if (sigbuflen < 2 || sigbuflen > (smlen - NONCELEN - 2)) { + return -1; + } + sigbuflen --; + pmlen = smlen - NONCELEN - 3 - sigbuflen; + if (sm[2 + NONCELEN + pmlen] != 0x20 + 9) { + return -1; + } + sigbuf = sm + 2 + NONCELEN + pmlen + 1; + + /* + * The 2-byte length header and the one-byte signature header + * have been verified. Nonce is at sm+2, followed by the message + * itself. Message length is in pmlen. sigbuf/sigbuflen point to + * the signature value (excluding the header byte). + */ + if (do_verify(sm + 2, sigbuf, sigbuflen, + sm + 2 + NONCELEN, pmlen, pk) < 0) { + return -1; + } + + /* + * Signature is correct, we just have to copy/move the message + * to its final destination. The memmove() properly handles + * overlaps. + */ + memmove(m, sm + 2 + NONCELEN, pmlen); + *mlen = pmlen; + return 0; +} diff --git a/crypto_sign/falcon-512/avx2/rng.c b/crypto_sign/falcon-512/avx2/rng.c new file mode 100644 index 00000000..68c6dc7e --- /dev/null +++ b/crypto_sign/falcon-512/avx2/rng.c @@ -0,0 +1,195 @@ +#include "inner.h" +#include +/* + * PRNG and interface to the system RNG. + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2017-2019 Falcon Project + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @author Thomas Pornin + */ + + + +/* + * Include relevant system header files. For Win32, this will also need + * linking with advapi32.dll, which we trigger with an appropriate #pragma. + */ + +/* see inner.h */ +int +PQCLEAN_FALCON512_AVX2_get_seed(void *seed, size_t len) { + (void)seed; + if (len == 0) { + return 1; + } + return 0; +} + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_prng_init(prng *p, inner_shake256_context *src) { + inner_shake256_extract(src, p->state.d, 56); + PQCLEAN_FALCON512_AVX2_prng_refill(p); +} + +/* + * PRNG based on ChaCha20. + * + * State consists in key (32 bytes) then IV (16 bytes) and block counter + * (8 bytes). Normally, we should not care about local endianness (this + * is for a PRNG), but for the NIST competition we need reproducible KAT + * vectors that work across architectures, so we enforce little-endian + * interpretation where applicable. Moreover, output words are "spread + * out" over the output buffer with the interleaving pattern that is + * naturally obtained from the AVX2 implementation that runs eight + * ChaCha20 instances in parallel. + * + * The block counter is XORed into the first 8 bytes of the IV. + */ +void +PQCLEAN_FALCON512_AVX2_prng_refill(prng *p) { + + static const uint32_t CW[] = { + 0x61707865, 0x3320646e, 0x79622d32, 0x6b206574 + }; + + uint64_t cc; + size_t u; + int i; + uint32_t *sw; + union { + uint32_t w[16]; + __m256i y[2]; /* for alignment */ + } t; + __m256i state[16], init[16]; + + sw = (uint32_t *)p->state.d; + + /* + * XOR next counter values into state. + */ + cc = *(uint64_t *)(p->state.d + 48); + for (u = 0; u < 8; u ++) { + t.w[u] = (uint32_t)(cc + u); + t.w[u + 8] = (uint32_t)((cc + u) >> 32); + } + *(uint64_t *)(p->state.d + 48) = cc + 8; + + /* + * Load state. + */ + for (u = 0; u < 4; u ++) { + state[u] = init[u] = + _mm256_broadcastd_epi32(_mm_cvtsi32_si128((int32_t)CW[u])); + } + for (u = 0; u < 10; u ++) { + state[u + 4] = init[u + 4] = + _mm256_broadcastd_epi32(_mm_cvtsi32_si128((int32_t)sw[u])); + } + state[14] = init[14] = _mm256_xor_si256( + _mm256_broadcastd_epi32(_mm_cvtsi32_si128((int32_t)sw[10])), + _mm256_loadu_si256((__m256i *)&t.w[0])); + state[15] = init[15] = _mm256_xor_si256( + _mm256_broadcastd_epi32(_mm_cvtsi32_si128((int32_t)sw[11])), + _mm256_loadu_si256((__m256i *)&t.w[8])); + + /* + * Do all rounds. + */ + for (i = 0; i < 10; i ++) { + +#define QROUND(a, b, c, d) do { \ + state[a] = _mm256_add_epi32(state[a], state[b]); \ + state[d] = _mm256_xor_si256(state[d], state[a]); \ + state[d] = _mm256_or_si256( \ + _mm256_slli_epi32(state[d], 16), \ + _mm256_srli_epi32(state[d], 16)); \ + state[c] = _mm256_add_epi32(state[c], state[d]); \ + state[b] = _mm256_xor_si256(state[b], state[c]); \ + state[b] = _mm256_or_si256( \ + _mm256_slli_epi32(state[b], 12), \ + _mm256_srli_epi32(state[b], 20)); \ + state[a] = _mm256_add_epi32(state[a], state[b]); \ + state[d] = _mm256_xor_si256(state[d], state[a]); \ + state[d] = _mm256_or_si256( \ + _mm256_slli_epi32(state[d], 8), \ + _mm256_srli_epi32(state[d], 24)); \ + state[c] = _mm256_add_epi32(state[c], state[d]); \ + state[b] = _mm256_xor_si256(state[b], state[c]); \ + state[b] = _mm256_or_si256( \ + _mm256_slli_epi32(state[b], 7), \ + _mm256_srli_epi32(state[b], 25)); \ + } while (0) + + QROUND( 0, 4, 8, 12); + QROUND( 1, 5, 9, 13); + QROUND( 2, 6, 10, 14); + QROUND( 3, 7, 11, 15); + QROUND( 0, 5, 10, 15); + QROUND( 1, 6, 11, 12); + QROUND( 2, 7, 8, 13); + QROUND( 3, 4, 9, 14); + +#undef QROUND + + } + + /* + * Add initial state back and encode the result in the destination + * buffer. We can dump the AVX2 values "as is" because the non-AVX2 + * code uses a compatible order of values. + */ + for (u = 0; u < 16; u ++) { + _mm256_storeu_si256((__m256i *)&p->buf.d[u << 5], + _mm256_add_epi32(state[u], init[u])); + } + + + p->ptr = 0; +} + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_prng_get_bytes(prng *p, void *dst, size_t len) { + uint8_t *buf; + + buf = dst; + while (len > 0) { + size_t clen; + + clen = (sizeof p->buf.d) - p->ptr; + if (clen > len) { + clen = len; + } + memcpy(buf, p->buf.d, clen); + buf += clen; + len -= clen; + p->ptr += clen; + if (p->ptr == sizeof p->buf.d) { + PQCLEAN_FALCON512_AVX2_prng_refill(p); + } + } +} diff --git a/crypto_sign/falcon-512/avx2/sign.c b/crypto_sign/falcon-512/avx2/sign.c new file mode 100644 index 00000000..1b6cad3f --- /dev/null +++ b/crypto_sign/falcon-512/avx2/sign.c @@ -0,0 +1,1312 @@ +#include "inner.h" + +/* + * Falcon signature generation. + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2017-2019 Falcon Project + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @author Thomas Pornin + */ + + +/* =================================================================== */ + +/* + * Compute degree N from logarithm 'logn'. + */ +#define MKN(logn) ((size_t)1 << (logn)) + +/* =================================================================== */ +/* + * Binary case: + * N = 2^logn + * phi = X^N+1 + */ + +/* + * Get the size of the LDL tree for an input with polynomials of size + * 2^logn. The size is expressed in the number of elements. + */ +static inline unsigned +ffLDL_treesize(unsigned logn) { + /* + * For logn = 0 (polynomials are constant), the "tree" is a + * single element. Otherwise, the tree node has size 2^logn, and + * has two child trees for size logn-1 each. Thus, treesize s() + * must fulfill these two relations: + * + * s(0) = 1 + * s(logn) = (2^logn) + 2*s(logn-1) + */ + return (logn + 1) << logn; +} + +/* + * Inner function for ffLDL_fft(). It expects the matrix to be both + * auto-adjoint and quasicyclic; also, it uses the source operands + * as modifiable temporaries. + * + * tmp[] must have room for at least one polynomial. + */ +static void +ffLDL_fft_inner(fpr *tree, + fpr *g0, fpr *g1, unsigned logn, fpr *tmp) { + size_t n, hn; + + n = MKN(logn); + if (n == 1) { + tree[0] = g0[0]; + return; + } + hn = n >> 1; + + /* + * The LDL decomposition yields L (which is written in the tree) + * and the diagonal of D. Since d00 = g0, we just write d11 + * into tmp. + */ + PQCLEAN_FALCON512_AVX2_poly_LDLmv_fft(tmp, tree, g0, g1, g0, logn); + + /* + * Split d00 (currently in g0) and d11 (currently in tmp). We + * reuse g0 and g1 as temporary storage spaces: + * d00 splits into g1, g1+hn + * d11 splits into g0, g0+hn + */ + PQCLEAN_FALCON512_AVX2_poly_split_fft(g1, g1 + hn, g0, logn); + PQCLEAN_FALCON512_AVX2_poly_split_fft(g0, g0 + hn, tmp, logn); + + /* + * Each split result is the first row of a new auto-adjoint + * quasicyclic matrix for the next recursive step. + */ + ffLDL_fft_inner(tree + n, + g1, g1 + hn, logn - 1, tmp); + ffLDL_fft_inner(tree + n + ffLDL_treesize(logn - 1), + g0, g0 + hn, logn - 1, tmp); +} + +/* + * Compute the ffLDL tree of an auto-adjoint matrix G. The matrix + * is provided as three polynomials (FFT representation). + * + * The "tree" array is filled with the computed tree, of size + * (logn+1)*(2^logn) elements (see ffLDL_treesize()). + * + * Input arrays MUST NOT overlap, except possibly the three unmodified + * arrays g00, g01 and g11. tmp[] should have room for at least three + * polynomials of 2^logn elements each. + */ +static void +ffLDL_fft(fpr *tree, const fpr *g00, + const fpr *g01, const fpr *g11, + unsigned logn, fpr *tmp) { + size_t n, hn; + fpr *d00, *d11; + + n = MKN(logn); + if (n == 1) { + tree[0] = g00[0]; + return; + } + hn = n >> 1; + d00 = tmp; + d11 = tmp + n; + tmp += n << 1; + + memcpy(d00, g00, n * sizeof * g00); + PQCLEAN_FALCON512_AVX2_poly_LDLmv_fft(d11, tree, g00, g01, g11, logn); + + PQCLEAN_FALCON512_AVX2_poly_split_fft(tmp, tmp + hn, d00, logn); + PQCLEAN_FALCON512_AVX2_poly_split_fft(d00, d00 + hn, d11, logn); + memcpy(d11, tmp, n * sizeof * tmp); + ffLDL_fft_inner(tree + n, + d11, d11 + hn, logn - 1, tmp); + ffLDL_fft_inner(tree + n + ffLDL_treesize(logn - 1), + d00, d00 + hn, logn - 1, tmp); +} + +/* + * Normalize an ffLDL tree: each leaf of value x is replaced with + * sigma / sqrt(x). + */ +static void +ffLDL_binary_normalize(fpr *tree, unsigned logn) { + /* + * TODO: make an iterative version. + */ + size_t n; + + n = MKN(logn); + if (n == 1) { + /* + * We actually store in the tree leaf the inverse of + * the value mandated by the specification: this + * saves a division both here and in the sampler. + */ + tree[0] = fpr_mul(fpr_sqrt(tree[0]), fpr_inv_sigma); + } else { + ffLDL_binary_normalize(tree + n, logn - 1); + ffLDL_binary_normalize(tree + n + ffLDL_treesize(logn - 1), + logn - 1); + } +} + +/* =================================================================== */ + +/* + * Convert an integer polynomial (with small values) into the + * representation with complex numbers. + */ +static void +smallints_to_fpr(fpr *r, const int8_t *t, unsigned logn) { + size_t n, u; + + n = MKN(logn); + for (u = 0; u < n; u ++) { + r[u] = fpr_of(t[u]); + } +} + +/* + * The expanded private key contains: + * - The B0 matrix (four elements) + * - The ffLDL tree + */ + +static inline size_t +skoff_b00(unsigned logn) { + (void)logn; + return 0; +} + +static inline size_t +skoff_b01(unsigned logn) { + return MKN(logn); +} + +static inline size_t +skoff_b10(unsigned logn) { + return 2 * MKN(logn); +} + +static inline size_t +skoff_b11(unsigned logn) { + return 3 * MKN(logn); +} + +static inline size_t +skoff_tree(unsigned logn) { + return 4 * MKN(logn); +} + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_expand_privkey(fpr *expanded_key, + const int8_t *f, const int8_t *g, + const int8_t *F, const int8_t *G, + unsigned logn, uint8_t *tmp) { + size_t n; + fpr *rf, *rg, *rF, *rG; + fpr *b00, *b01, *b10, *b11; + fpr *g00, *g01, *g11, *gxx; + fpr *tree; + + n = MKN(logn); + b00 = expanded_key + skoff_b00(logn); + b01 = expanded_key + skoff_b01(logn); + b10 = expanded_key + skoff_b10(logn); + b11 = expanded_key + skoff_b11(logn); + tree = expanded_key + skoff_tree(logn); + + /* + * We load the private key elements directly into the B0 matrix, + * since B0 = [[g, -f], [G, -F]]. + */ + rf = b01; + rg = b00; + rF = b11; + rG = b10; + + smallints_to_fpr(rf, f, logn); + smallints_to_fpr(rg, g, logn); + smallints_to_fpr(rF, F, logn); + smallints_to_fpr(rG, G, logn); + + /* + * Compute the FFT for the key elements, and negate f and F. + */ + PQCLEAN_FALCON512_AVX2_FFT(rf, logn); + PQCLEAN_FALCON512_AVX2_FFT(rg, logn); + PQCLEAN_FALCON512_AVX2_FFT(rF, logn); + PQCLEAN_FALCON512_AVX2_FFT(rG, logn); + PQCLEAN_FALCON512_AVX2_poly_neg(rf, logn); + PQCLEAN_FALCON512_AVX2_poly_neg(rF, logn); + + /* + * The Gram matrix is G = B·B*. Formulas are: + * g00 = b00*adj(b00) + b01*adj(b01) + * g01 = b00*adj(b10) + b01*adj(b11) + * g10 = b10*adj(b00) + b11*adj(b01) + * g11 = b10*adj(b10) + b11*adj(b11) + * + * For historical reasons, this implementation uses + * g00, g01 and g11 (upper triangle). + */ + g00 = (fpr *)tmp; + g01 = g00 + n; + g11 = g01 + n; + gxx = g11 + n; + + memcpy(g00, b00, n * sizeof * b00); + PQCLEAN_FALCON512_AVX2_poly_mulselfadj_fft(g00, logn); + memcpy(gxx, b01, n * sizeof * b01); + PQCLEAN_FALCON512_AVX2_poly_mulselfadj_fft(gxx, logn); + PQCLEAN_FALCON512_AVX2_poly_add(g00, gxx, logn); + + memcpy(g01, b00, n * sizeof * b00); + PQCLEAN_FALCON512_AVX2_poly_muladj_fft(g01, b10, logn); + memcpy(gxx, b01, n * sizeof * b01); + PQCLEAN_FALCON512_AVX2_poly_muladj_fft(gxx, b11, logn); + PQCLEAN_FALCON512_AVX2_poly_add(g01, gxx, logn); + + memcpy(g11, b10, n * sizeof * b10); + PQCLEAN_FALCON512_AVX2_poly_mulselfadj_fft(g11, logn); + memcpy(gxx, b11, n * sizeof * b11); + PQCLEAN_FALCON512_AVX2_poly_mulselfadj_fft(gxx, logn); + PQCLEAN_FALCON512_AVX2_poly_add(g11, gxx, logn); + + /* + * Compute the Falcon tree. + */ + ffLDL_fft(tree, g00, g01, g11, logn, gxx); + + /* + * Normalize tree. + */ + ffLDL_binary_normalize(tree, logn); +} + +typedef int (*samplerZ)(void *ctx, fpr mu, fpr sigma); + +/* + * Perform Fast Fourier Sampling for target vector t. The Gram matrix + * is provided (G = [[g00, g01], [adj(g01), g11]]). The sampled vector + * is written over (t0,t1). The Gram matrix is modified as well. The + * tmp[] buffer must have room for four polynomials. + */ +static void +ffSampling_fft_dyntree(samplerZ samp, void *samp_ctx, + fpr *t0, fpr *t1, + fpr *g00, fpr *g01, fpr *g11, + unsigned logn, fpr *tmp) { + size_t n, hn; + fpr *z0, *z1; + + /* + * Deepest level: the LDL tree leaf value is just g00 (the + * array has length only 1 at this point); we normalize it + * with regards to sigma, then use it for sampling. + */ + if (logn == 0) { + fpr leaf; + + leaf = g00[0]; + leaf = fpr_mul(fpr_sqrt(leaf), fpr_inv_sigma); + t0[0] = fpr_of(samp(samp_ctx, t0[0], leaf)); + t1[0] = fpr_of(samp(samp_ctx, t1[0], leaf)); + return; + } + + n = (size_t)1 << logn; + hn = n >> 1; + + /* + * Decompose G into LDL. We only need d00 (identical to g00), + * d11, and l10; we do that in place. + */ + PQCLEAN_FALCON512_AVX2_poly_LDL_fft(g00, g01, g11, logn); + + /* + * Split d00 and d11 and expand them into half-size quasi-cyclic + * Gram matrices. We also save l10 in tmp[]. + */ + PQCLEAN_FALCON512_AVX2_poly_split_fft(tmp, tmp + hn, g00, logn); + memcpy(g00, tmp, n * sizeof * tmp); + PQCLEAN_FALCON512_AVX2_poly_split_fft(tmp, tmp + hn, g11, logn); + memcpy(g11, tmp, n * sizeof * tmp); + memcpy(tmp, g01, n * sizeof * g01); + memcpy(g01, g00, hn * sizeof * g00); + memcpy(g01 + hn, g11, hn * sizeof * g00); + + /* + * The half-size Gram matrices for the recursive LDL tree + * building are now: + * - left sub-tree: g00, g00+hn, g01 + * - right sub-tree: g11, g11+hn, g01+hn + * l10 is in tmp[]. + */ + + /* + * We split t1 and use the first recursive call on the two + * halves, using the right sub-tree. The result is merged + * back into tmp + 2*n. + */ + z1 = tmp + n; + PQCLEAN_FALCON512_AVX2_poly_split_fft(z1, z1 + hn, t1, logn); + ffSampling_fft_dyntree(samp, samp_ctx, z1, z1 + hn, + g11, g11 + hn, g01 + hn, logn - 1, z1 + n); + PQCLEAN_FALCON512_AVX2_poly_merge_fft(tmp + (n << 1), z1, z1 + hn, logn); + + /* + * Compute tb0 = t0 + (t1 - z1) * l10. + * At that point, l10 is in tmp, t1 is unmodified, and z1 is + * in tmp + (n << 1). The buffer in z1 is free. + * + * In the end, z1 is written over t1, and tb0 is in t0. + */ + memcpy(z1, t1, n * sizeof * t1); + PQCLEAN_FALCON512_AVX2_poly_sub(z1, tmp + (n << 1), logn); + memcpy(t1, tmp + (n << 1), n * sizeof * tmp); + PQCLEAN_FALCON512_AVX2_poly_mul_fft(tmp, z1, logn); + PQCLEAN_FALCON512_AVX2_poly_add(t0, tmp, logn); + + /* + * Second recursive invocation, on the split tb0 (currently in t0) + * and the left sub-tree. + */ + z0 = tmp; + PQCLEAN_FALCON512_AVX2_poly_split_fft(z0, z0 + hn, t0, logn); + ffSampling_fft_dyntree(samp, samp_ctx, z0, z0 + hn, + g00, g00 + hn, g01, logn - 1, z0 + n); + PQCLEAN_FALCON512_AVX2_poly_merge_fft(t0, z0, z0 + hn, logn); +} + +/* + * Perform Fast Fourier Sampling for target vector t and LDL tree T. + * tmp[] must have size for at least two polynomials of size 2^logn. + */ +static void +ffSampling_fft(samplerZ samp, void *samp_ctx, + fpr *z0, fpr *z1, + const fpr *tree, + const fpr *t0, const fpr *t1, unsigned logn, + fpr *tmp) { + size_t n, hn; + const fpr *tree0, *tree1; + + /* + * When logn == 2, we inline the last two recursion levels. + */ + if (logn == 2) { + fpr w0, w1, w2, w3, sigma; + __m128d ww0, ww1, wa, wb, wc, wd; + __m128d wy0, wy1, wz0, wz1; + __m128d half, invsqrt8, invsqrt2, neghi, neglo; + int si0, si1, si2, si3; + + tree0 = tree + 4; + tree1 = tree + 8; + + half = _mm_set1_pd(0.5); + invsqrt8 = _mm_set1_pd(0.353553390593273762200422181052); + invsqrt2 = _mm_set1_pd(0.707106781186547524400844362105); + neghi = _mm_set_pd(-0.0, 0.0); + neglo = _mm_set_pd(0.0, -0.0); + + /* + * We split t1 into w*, then do the recursive invocation, + * with output in w*. We finally merge back into z1. + */ + ww0 = _mm_loadu_pd(&t1[0].v); + ww1 = _mm_loadu_pd(&t1[2].v); + wa = _mm_unpacklo_pd(ww0, ww1); + wb = _mm_unpackhi_pd(ww0, ww1); + wc = _mm_add_pd(wa, wb); + ww0 = _mm_mul_pd(wc, half); + wc = _mm_sub_pd(wa, wb); + wd = _mm_xor_pd(_mm_permute_pd(wc, 1), neghi); + ww1 = _mm_mul_pd(_mm_add_pd(wc, wd), invsqrt8); + + w2.v = _mm_cvtsd_f64(ww1); + w3.v = _mm_cvtsd_f64(_mm_permute_pd(ww1, 1)); + wa = ww1; + sigma = tree1[3]; + si2 = samp(samp_ctx, w2, sigma); + si3 = samp(samp_ctx, w3, sigma); + ww1 = _mm_set_pd((double)si3, (double)si2); + wa = _mm_sub_pd(wa, ww1); + wb = _mm_loadu_pd(&tree1[0].v); + wc = _mm_mul_pd(wa, wb); + wd = _mm_mul_pd(wa, _mm_permute_pd(wb, 1)); + wa = _mm_unpacklo_pd(wc, wd); + wb = _mm_unpackhi_pd(wc, wd); + ww0 = _mm_add_pd(ww0, _mm_add_pd(wa, _mm_xor_pd(wb, neglo))); + w0.v = _mm_cvtsd_f64(ww0); + w1.v = _mm_cvtsd_f64(_mm_permute_pd(ww0, 1)); + sigma = tree1[2]; + si0 = samp(samp_ctx, w0, sigma); + si1 = samp(samp_ctx, w1, sigma); + ww0 = _mm_set_pd((double)si1, (double)si0); + + wc = _mm_mul_pd( + _mm_set_pd((double)(si2 + si3), (double)(si2 - si3)), + invsqrt2); + wa = _mm_add_pd(ww0, wc); + wb = _mm_sub_pd(ww0, wc); + ww0 = _mm_unpacklo_pd(wa, wb); + ww1 = _mm_unpackhi_pd(wa, wb); + _mm_storeu_pd(&z1[0].v, ww0); + _mm_storeu_pd(&z1[2].v, ww1); + + /* + * Compute tb0 = t0 + (t1 - z1) * L. Value tb0 ends up in w*. + */ + wy0 = _mm_sub_pd(_mm_loadu_pd(&t1[0].v), ww0); + wy1 = _mm_sub_pd(_mm_loadu_pd(&t1[2].v), ww1); + wz0 = _mm_loadu_pd(&tree[0].v); + wz1 = _mm_loadu_pd(&tree[2].v); + ww0 = _mm_sub_pd(_mm_mul_pd(wy0, wz0), _mm_mul_pd(wy1, wz1)); + ww1 = _mm_add_pd(_mm_mul_pd(wy0, wz1), _mm_mul_pd(wy1, wz0)); + ww0 = _mm_add_pd(ww0, _mm_loadu_pd(&t0[0].v)); + ww1 = _mm_add_pd(ww1, _mm_loadu_pd(&t0[2].v)); + + /* + * Second recursive invocation. + */ + wa = _mm_unpacklo_pd(ww0, ww1); + wb = _mm_unpackhi_pd(ww0, ww1); + wc = _mm_add_pd(wa, wb); + ww0 = _mm_mul_pd(wc, half); + wc = _mm_sub_pd(wa, wb); + wd = _mm_xor_pd(_mm_permute_pd(wc, 1), neghi); + ww1 = _mm_mul_pd(_mm_add_pd(wc, wd), invsqrt8); + + w2.v = _mm_cvtsd_f64(ww1); + w3.v = _mm_cvtsd_f64(_mm_permute_pd(ww1, 1)); + wa = ww1; + sigma = tree0[3]; + si2 = samp(samp_ctx, w2, sigma); + si3 = samp(samp_ctx, w3, sigma); + ww1 = _mm_set_pd((double)si3, (double)si2); + wa = _mm_sub_pd(wa, ww1); + wb = _mm_loadu_pd(&tree0[0].v); + wc = _mm_mul_pd(wa, wb); + wd = _mm_mul_pd(wa, _mm_permute_pd(wb, 1)); + wa = _mm_unpacklo_pd(wc, wd); + wb = _mm_unpackhi_pd(wc, wd); + ww0 = _mm_add_pd(ww0, _mm_add_pd(wa, _mm_xor_pd(wb, neglo))); + w0.v = _mm_cvtsd_f64(ww0); + w1.v = _mm_cvtsd_f64(_mm_permute_pd(ww0, 1)); + sigma = tree0[2]; + si0 = samp(samp_ctx, w0, sigma); + si1 = samp(samp_ctx, w1, sigma); + ww0 = _mm_set_pd((double)si1, (double)si0); + + wc = _mm_mul_pd( + _mm_set_pd((double)(si2 + si3), (double)(si2 - si3)), + invsqrt2); + wa = _mm_add_pd(ww0, wc); + wb = _mm_sub_pd(ww0, wc); + ww0 = _mm_unpacklo_pd(wa, wb); + ww1 = _mm_unpackhi_pd(wa, wb); + _mm_storeu_pd(&z0[0].v, ww0); + _mm_storeu_pd(&z0[2].v, ww1); + + return; + } + + /* + * Case logn == 1 is reachable only when using Falcon-2 (the + * smallest size for which Falcon is mathematically defined, but + * of course way too insecure to be of any use). + */ + if (logn == 1) { + fpr x0, x1, y0, y1, sigma; + fpr a_re, a_im, b_re, b_im, c_re, c_im; + + x0 = t1[0]; + x1 = t1[1]; + sigma = tree[3]; + z1[0] = y0 = fpr_of(samp(samp_ctx, x0, sigma)); + z1[1] = y1 = fpr_of(samp(samp_ctx, x1, sigma)); + a_re = fpr_sub(x0, y0); + a_im = fpr_sub(x1, y1); + b_re = tree[0]; + b_im = tree[1]; + c_re = fpr_sub(fpr_mul(a_re, b_re), fpr_mul(a_im, b_im)); + c_im = fpr_add(fpr_mul(a_re, b_im), fpr_mul(a_im, b_re)); + x0 = fpr_add(c_re, t0[0]); + x1 = fpr_add(c_im, t0[1]); + sigma = tree[2]; + z0[0] = fpr_of(samp(samp_ctx, x0, sigma)); + z0[1] = fpr_of(samp(samp_ctx, x1, sigma)); + + return; + } + + /* + * Normal end of recursion is for logn == 0. Since the last + * steps of the recursions were inlined in the blocks above + * (when logn == 1 or 2), this case is not reachable, and is + * retained here only for documentation purposes. + + if (logn == 0) { + fpr x0, x1, sigma; + + x0 = t0[0]; + x1 = t1[0]; + sigma = tree[0]; + z0[0] = fpr_of(samp(samp_ctx, x0, sigma)); + z1[0] = fpr_of(samp(samp_ctx, x1, sigma)); + return; + } + + */ + + /* + * General recursive case (logn >= 3). + */ + + n = (size_t)1 << logn; + hn = n >> 1; + tree0 = tree + n; + tree1 = tree + n + ffLDL_treesize(logn - 1); + + /* + * We split t1 into z1 (reused as temporary storage), then do + * the recursive invocation, with output in tmp. We finally + * merge back into z1. + */ + PQCLEAN_FALCON512_AVX2_poly_split_fft(z1, z1 + hn, t1, logn); + ffSampling_fft(samp, samp_ctx, tmp, tmp + hn, + tree1, z1, z1 + hn, logn - 1, tmp + n); + PQCLEAN_FALCON512_AVX2_poly_merge_fft(z1, tmp, tmp + hn, logn); + + /* + * Compute tb0 = t0 + (t1 - z1) * L. Value tb0 ends up in tmp[]. + */ + memcpy(tmp, t1, n * sizeof * t1); + PQCLEAN_FALCON512_AVX2_poly_sub(tmp, z1, logn); + PQCLEAN_FALCON512_AVX2_poly_mul_fft(tmp, tree, logn); + PQCLEAN_FALCON512_AVX2_poly_add(tmp, t0, logn); + + /* + * Second recursive invocation. + */ + PQCLEAN_FALCON512_AVX2_poly_split_fft(z0, z0 + hn, tmp, logn); + ffSampling_fft(samp, samp_ctx, tmp, tmp + hn, + tree0, z0, z0 + hn, logn - 1, tmp + n); + PQCLEAN_FALCON512_AVX2_poly_merge_fft(z0, tmp, tmp + hn, logn); +} + +/* + * Compute a signature: the signature contains two vectors, s1 and s2. + * The s1 vector is not returned. The squared norm of (s1,s2) is + * computed, and if it is short enough, then s2 is returned into the + * s2[] buffer, and 1 is returned; otherwise, s2[] is untouched and 0 is + * returned; the caller should then try again. This function uses an + * expanded key. + * + * tmp[] must have room for at least six polynomials. + */ +static int +do_sign_tree(samplerZ samp, void *samp_ctx, int16_t *s2, + const fpr *expanded_key, + const uint16_t *hm, + unsigned logn, fpr *tmp) { + size_t n, u; + fpr *t0, *t1, *tx, *ty; + const fpr *b00, *b01, *b10, *b11, *tree; + fpr ni; + uint32_t sqn, ng; + int16_t *s1tmp, *s2tmp; + + n = MKN(logn); + t0 = tmp; + t1 = t0 + n; + b00 = expanded_key + skoff_b00(logn); + b01 = expanded_key + skoff_b01(logn); + b10 = expanded_key + skoff_b10(logn); + b11 = expanded_key + skoff_b11(logn); + tree = expanded_key + skoff_tree(logn); + + /* + * Set the target vector to [hm, 0] (hm is the hashed message). + */ + for (u = 0; u < n; u ++) { + t0[u] = fpr_of(hm[u]); + /* This is implicit. + t1[u] = fpr_zero; + */ + } + + /* + * Apply the lattice basis to obtain the real target + * vector (after normalization with regards to modulus). + */ + PQCLEAN_FALCON512_AVX2_FFT(t0, logn); + ni = fpr_inverse_of_q; + memcpy(t1, t0, n * sizeof * t0); + PQCLEAN_FALCON512_AVX2_poly_mul_fft(t1, b01, logn); + PQCLEAN_FALCON512_AVX2_poly_mulconst(t1, fpr_neg(ni), logn); + PQCLEAN_FALCON512_AVX2_poly_mul_fft(t0, b11, logn); + PQCLEAN_FALCON512_AVX2_poly_mulconst(t0, ni, logn); + + tx = t1 + n; + ty = tx + n; + + /* + * Apply sampling. Output is written back in [tx, ty]. + */ + ffSampling_fft(samp, samp_ctx, tx, ty, tree, t0, t1, logn, ty + n); + + /* + * Get the lattice point corresponding to that tiny vector. + */ + memcpy(t0, tx, n * sizeof * tx); + memcpy(t1, ty, n * sizeof * ty); + PQCLEAN_FALCON512_AVX2_poly_mul_fft(tx, b00, logn); + PQCLEAN_FALCON512_AVX2_poly_mul_fft(ty, b10, logn); + PQCLEAN_FALCON512_AVX2_poly_add(tx, ty, logn); + memcpy(ty, t0, n * sizeof * t0); + PQCLEAN_FALCON512_AVX2_poly_mul_fft(ty, b01, logn); + + memcpy(t0, tx, n * sizeof * tx); + PQCLEAN_FALCON512_AVX2_poly_mul_fft(t1, b11, logn); + PQCLEAN_FALCON512_AVX2_poly_add(t1, ty, logn); + + PQCLEAN_FALCON512_AVX2_iFFT(t0, logn); + PQCLEAN_FALCON512_AVX2_iFFT(t1, logn); + + /* + * Compute the signature. + */ + s1tmp = (int16_t *)tx; + sqn = 0; + ng = 0; + for (u = 0; u < n; u ++) { + int32_t z; + + z = (int32_t)hm[u] - (int32_t)fpr_rint(t0[u]); + sqn += (uint32_t)(z * z); + ng |= sqn; + s1tmp[u] = (int16_t)z; + } + sqn |= -(ng >> 31); + + /* + * With "normal" degrees (e.g. 512 or 1024), it is very + * improbable that the computed vector is not short enough; + * however, it may happen in practice for the very reduced + * versions (e.g. degree 16 or below). In that case, the caller + * will loop, and we must not write anything into s2[] because + * s2[] may overlap with the hashed message hm[] and we need + * hm[] for the next iteration. + */ + s2tmp = (int16_t *)tmp; + for (u = 0; u < n; u ++) { + s2tmp[u] = (int16_t) - fpr_rint(t1[u]); + } + if (PQCLEAN_FALCON512_AVX2_is_short_half(sqn, s2tmp, logn)) { + memcpy(s2, s2tmp, n * sizeof * s2); + memcpy(tmp, s1tmp, n * sizeof * s1tmp); + return 1; + } + return 0; +} + +/* + * Compute a signature: the signature contains two vectors, s1 and s2. + * The s1 vector is not returned. The squared norm of (s1,s2) is + * computed, and if it is short enough, then s2 is returned into the + * s2[] buffer, and 1 is returned; otherwise, s2[] is untouched and 0 is + * returned; the caller should then try again. + * + * tmp[] must have room for at least nine polynomials. + */ +static int +do_sign_dyn(samplerZ samp, void *samp_ctx, int16_t *s2, + const int8_t *f, const int8_t *g, + const int8_t *F, const int8_t *G, + const uint16_t *hm, unsigned logn, fpr *tmp) { + size_t n, u; + fpr *t0, *t1, *tx, *ty; + fpr *b00, *b01, *b10, *b11, *g00, *g01, *g11; + fpr ni; + uint32_t sqn, ng; + int16_t *s1tmp, *s2tmp; + + n = MKN(logn); + + /* + * Lattice basis is B = [[g, -f], [G, -F]]. We convert it to FFT. + */ + b00 = tmp; + b01 = b00 + n; + b10 = b01 + n; + b11 = b10 + n; + smallints_to_fpr(b01, f, logn); + smallints_to_fpr(b00, g, logn); + smallints_to_fpr(b11, F, logn); + smallints_to_fpr(b10, G, logn); + PQCLEAN_FALCON512_AVX2_FFT(b01, logn); + PQCLEAN_FALCON512_AVX2_FFT(b00, logn); + PQCLEAN_FALCON512_AVX2_FFT(b11, logn); + PQCLEAN_FALCON512_AVX2_FFT(b10, logn); + PQCLEAN_FALCON512_AVX2_poly_neg(b01, logn); + PQCLEAN_FALCON512_AVX2_poly_neg(b11, logn); + + /* + * Compute the Gram matrix G = B·B*. Formulas are: + * g00 = b00*adj(b00) + b01*adj(b01) + * g01 = b00*adj(b10) + b01*adj(b11) + * g10 = b10*adj(b00) + b11*adj(b01) + * g11 = b10*adj(b10) + b11*adj(b11) + * + * For historical reasons, this implementation uses + * g00, g01 and g11 (upper triangle). g10 is not kept + * since it is equal to adj(g01). + * + * We _replace_ the matrix B with the Gram matrix, but we + * must keep b01 and b11 for computing the target vector. + */ + t0 = b11 + n; + t1 = t0 + n; + + memcpy(t0, b01, n * sizeof * b01); + PQCLEAN_FALCON512_AVX2_poly_mulselfadj_fft(t0, logn); // t0 <- b01*adj(b01) + + memcpy(t1, b00, n * sizeof * b00); + PQCLEAN_FALCON512_AVX2_poly_muladj_fft(t1, b10, logn); // t1 <- b00*adj(b10) + PQCLEAN_FALCON512_AVX2_poly_mulselfadj_fft(b00, logn); // b00 <- b00*adj(b00) + PQCLEAN_FALCON512_AVX2_poly_add(b00, t0, logn); // b00 <- g00 + memcpy(t0, b01, n * sizeof * b01); + PQCLEAN_FALCON512_AVX2_poly_muladj_fft(b01, b11, logn); // b01 <- b01*adj(b11) + PQCLEAN_FALCON512_AVX2_poly_add(b01, t1, logn); // b01 <- g01 + + PQCLEAN_FALCON512_AVX2_poly_mulselfadj_fft(b10, logn); // b10 <- b10*adj(b10) + memcpy(t1, b11, n * sizeof * b11); + PQCLEAN_FALCON512_AVX2_poly_mulselfadj_fft(t1, logn); // t1 <- b11*adj(b11) + PQCLEAN_FALCON512_AVX2_poly_add(b10, t1, logn); // b10 <- g11 + + /* + * We rename variables to make things clearer. The three elements + * of the Gram matrix uses the first 3*n slots of tmp[], followed + * by b11 and b01 (in that order). + */ + g00 = b00; + g01 = b01; + g11 = b10; + b01 = t0; + t0 = b01 + n; + t1 = t0 + n; + + /* + * Memory layout at that point: + * g00 g01 g11 b11 b01 t0 t1 + */ + + /* + * Set the target vector to [hm, 0] (hm is the hashed message). + */ + for (u = 0; u < n; u ++) { + t0[u] = fpr_of(hm[u]); + /* This is implicit. + t1[u] = fpr_zero; + */ + } + + /* + * Apply the lattice basis to obtain the real target + * vector (after normalization with regards to modulus). + */ + PQCLEAN_FALCON512_AVX2_FFT(t0, logn); + ni = fpr_inverse_of_q; + memcpy(t1, t0, n * sizeof * t0); + PQCLEAN_FALCON512_AVX2_poly_mul_fft(t1, b01, logn); + PQCLEAN_FALCON512_AVX2_poly_mulconst(t1, fpr_neg(ni), logn); + PQCLEAN_FALCON512_AVX2_poly_mul_fft(t0, b11, logn); + PQCLEAN_FALCON512_AVX2_poly_mulconst(t0, ni, logn); + + /* + * b01 and b11 can be discarded, so we move back (t0,t1). + * Memory layout is now: + * g00 g01 g11 t0 t1 + */ + memcpy(b11, t0, n * 2 * sizeof * t0); + t0 = g11 + n; + t1 = t0 + n; + + /* + * Apply sampling; result is written over (t0,t1). + */ + ffSampling_fft_dyntree(samp, samp_ctx, + t0, t1, g00, g01, g11, logn, t1 + n); + + /* + * We arrange the layout back to: + * b00 b01 b10 b11 t0 t1 + * + * We did not conserve the matrix basis, so we must recompute + * it now. + */ + b00 = tmp; + b01 = b00 + n; + b10 = b01 + n; + b11 = b10 + n; + memmove(b11 + n, t0, n * 2 * sizeof * t0); + t0 = b11 + n; + t1 = t0 + n; + smallints_to_fpr(b01, f, logn); + smallints_to_fpr(b00, g, logn); + smallints_to_fpr(b11, F, logn); + smallints_to_fpr(b10, G, logn); + PQCLEAN_FALCON512_AVX2_FFT(b01, logn); + PQCLEAN_FALCON512_AVX2_FFT(b00, logn); + PQCLEAN_FALCON512_AVX2_FFT(b11, logn); + PQCLEAN_FALCON512_AVX2_FFT(b10, logn); + PQCLEAN_FALCON512_AVX2_poly_neg(b01, logn); + PQCLEAN_FALCON512_AVX2_poly_neg(b11, logn); + tx = t1 + n; + ty = tx + n; + + /* + * Get the lattice point corresponding to that tiny vector. + */ + memcpy(tx, t0, n * sizeof * t0); + memcpy(ty, t1, n * sizeof * t1); + PQCLEAN_FALCON512_AVX2_poly_mul_fft(tx, b00, logn); + PQCLEAN_FALCON512_AVX2_poly_mul_fft(ty, b10, logn); + PQCLEAN_FALCON512_AVX2_poly_add(tx, ty, logn); + memcpy(ty, t0, n * sizeof * t0); + PQCLEAN_FALCON512_AVX2_poly_mul_fft(ty, b01, logn); + + memcpy(t0, tx, n * sizeof * tx); + PQCLEAN_FALCON512_AVX2_poly_mul_fft(t1, b11, logn); + PQCLEAN_FALCON512_AVX2_poly_add(t1, ty, logn); + PQCLEAN_FALCON512_AVX2_iFFT(t0, logn); + PQCLEAN_FALCON512_AVX2_iFFT(t1, logn); + + s1tmp = (int16_t *)tx; + sqn = 0; + ng = 0; + for (u = 0; u < n; u ++) { + int32_t z; + + z = (int32_t)hm[u] - (int32_t)fpr_rint(t0[u]); + sqn += (uint32_t)(z * z); + ng |= sqn; + s1tmp[u] = (int16_t)z; + } + sqn |= -(ng >> 31); + + /* + * With "normal" degrees (e.g. 512 or 1024), it is very + * improbable that the computed vector is not short enough; + * however, it may happen in practice for the very reduced + * versions (e.g. degree 16 or below). In that case, the caller + * will loop, and we must not write anything into s2[] because + * s2[] may overlap with the hashed message hm[] and we need + * hm[] for the next iteration. + */ + s2tmp = (int16_t *)tmp; + for (u = 0; u < n; u ++) { + s2tmp[u] = (int16_t) - fpr_rint(t1[u]); + } + if (PQCLEAN_FALCON512_AVX2_is_short_half(sqn, s2tmp, logn)) { + memcpy(s2, s2tmp, n * sizeof * s2); + memcpy(tmp, s1tmp, n * sizeof * s1tmp); + return 1; + } + return 0; +} + +/* + * Sample an integer value along a half-gaussian distribution centered + * on zero and standard deviation 1.8205, with a precision of 72 bits. + */ +int +PQCLEAN_FALCON512_AVX2_gaussian0_sampler(prng *p) { + + /* + * High words. + */ + static const union { + uint16_t u16[16]; + __m256i ymm[1]; + } rhi15 = { + { + 0x51FB, 0x2A69, 0x113E, 0x0568, + 0x014A, 0x003B, 0x0008, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000 + } + }; + + static const union { + uint64_t u64[20]; + __m256i ymm[5]; + } rlo57 = { + { + 0x1F42ED3AC391802, 0x12B181F3F7DDB82, + 0x1CDD0934829C1FF, 0x1754377C7994AE4, + 0x1846CAEF33F1F6F, 0x14AC754ED74BD5F, + 0x024DD542B776AE4, 0x1A1FFDC65AD63DA, + 0x01F80D88A7B6428, 0x001C3FDB2040C69, + 0x00012CF24D031FB, 0x00000949F8B091F, + 0x0000003665DA998, 0x00000000EBF6EBB, + 0x0000000002F5D7E, 0x000000000007098, + 0x0000000000000C6, 0x000000000000001, + 0x000000000000000, 0x000000000000000 + } + }; + + uint64_t lo; + unsigned hi; + __m256i xhi, rhi, gthi, eqhi, eqm; + __m256i xlo, gtlo0, gtlo1, gtlo2, gtlo3, gtlo4; + __m128i t, zt; + int r; + + /* + * Get a 72-bit random value and split it into a low part + * (57 bits) and a high part (15 bits) + */ + lo = prng_get_u64(p); + hi = prng_get_u8(p); + hi = (hi << 7) | (unsigned)(lo >> 57); + lo &= 0x1FFFFFFFFFFFFFF; + + /* + * Broadcast the high part and compare it with the relevant + * values. We need both a "greater than" and an "equal" + * comparisons. + */ + xhi = _mm256_broadcastw_epi16(_mm_cvtsi32_si128((int32_t)hi)); + rhi = _mm256_loadu_si256(&rhi15.ymm[0]); + gthi = _mm256_cmpgt_epi16(rhi, xhi); + eqhi = _mm256_cmpeq_epi16(rhi, xhi); + + /* + * The result is the number of 72-bit values (among the list of 19) + * which are greater than the 72-bit random value. We first count + * all non-zero 16-bit elements in the first eight of gthi. Such + * elements have value -1 or 0, so we first negate them. + */ + t = _mm_srli_epi16(_mm256_castsi256_si128(gthi), 15); + zt = _mm_setzero_si128(); + t = _mm_hadd_epi16(t, zt); + t = _mm_hadd_epi16(t, zt); + t = _mm_hadd_epi16(t, zt); + r = _mm_cvtsi128_si32(t); + + /* + * We must look at the low bits for all values for which the + * high bits are an "equal" match; values 8-18 all have the + * same high bits (0). + * On 32-bit systems, 'lo' really is two registers, requiring + * some extra code. + */ + xlo = _mm256_broadcastq_epi64(_mm_cvtsi64_si128(*(int64_t *)&lo)); + gtlo0 = _mm256_cmpgt_epi64(_mm256_loadu_si256(&rlo57.ymm[0]), xlo); + gtlo1 = _mm256_cmpgt_epi64(_mm256_loadu_si256(&rlo57.ymm[1]), xlo); + gtlo2 = _mm256_cmpgt_epi64(_mm256_loadu_si256(&rlo57.ymm[2]), xlo); + gtlo3 = _mm256_cmpgt_epi64(_mm256_loadu_si256(&rlo57.ymm[3]), xlo); + gtlo4 = _mm256_cmpgt_epi64(_mm256_loadu_si256(&rlo57.ymm[4]), xlo); + + /* + * Keep only comparison results that correspond to the non-zero + * elements in eqhi. + */ + gtlo0 = _mm256_and_si256(gtlo0, _mm256_cvtepi16_epi64( + _mm256_castsi256_si128(eqhi))); + gtlo1 = _mm256_and_si256(gtlo1, _mm256_cvtepi16_epi64( + _mm256_castsi256_si128(_mm256_bsrli_epi128(eqhi, 8)))); + eqm = _mm256_permute4x64_epi64(eqhi, 0xFF); + gtlo2 = _mm256_and_si256(gtlo2, eqm); + gtlo3 = _mm256_and_si256(gtlo3, eqm); + gtlo4 = _mm256_and_si256(gtlo4, eqm); + + /* + * Add all values to count the total number of "-1" elements. + * Since the first eight "high" words are all different, only + * one element (at most) in gtlo0:gtlo1 can be non-zero; however, + * if the high word of the random value is zero, then many + * elements of gtlo2:gtlo3:gtlo4 can be non-zero. + */ + gtlo0 = _mm256_or_si256(gtlo0, gtlo1); + gtlo0 = _mm256_add_epi64( + _mm256_add_epi64(gtlo0, gtlo2), + _mm256_add_epi64(gtlo3, gtlo4)); + t = _mm_add_epi64( + _mm256_castsi256_si128(gtlo0), + _mm256_extracti128_si256(gtlo0, 1)); + t = _mm_add_epi64(t, _mm_srli_si128(t, 8)); + r -= _mm_cvtsi128_si32(t); + + return r; + +} + +/* + * Sample a bit with probability exp(-x) for some x >= 0. + */ +static int +BerExp(prng *p, fpr x, fpr ccs) { + int s, i; + fpr r; + uint32_t sw, w; + uint64_t z; + + /* + * Reduce x modulo log(2): x = s*log(2) + r, with s an integer, + * and 0 <= r < log(2). Since x >= 0, we can use fpr_trunc(). + */ + s = (int)fpr_trunc(fpr_mul(x, fpr_inv_log2)); + r = fpr_sub(x, fpr_mul(fpr_of(s), fpr_log2)); + + /* + * It may happen (quite rarely) that s >= 64; if sigma = 1.2 + * (the minimum value for sigma), r = 0 and b = 1, then we get + * s >= 64 if the half-Gaussian produced a z >= 13, which happens + * with probability about 0.000000000230383991, which is + * approximatively equal to 2^(-32). In any case, if s >= 64, + * then BerExp will be non-zero with probability less than + * 2^(-64), so we can simply saturate s at 63. + */ + sw = (uint32_t)s; + sw ^= (sw ^ 63) & -((63 - sw) >> 31); + s = (int)sw; + + /* + * Compute exp(-r); we know that 0 <= r < log(2) at this point, so + * we can use fpr_expm_p63(), which yields a result scaled to 2^63. + * We scale it up to 2^64, then right-shift it by s bits because + * we really want exp(-x) = 2^(-s)*exp(-r). + * + * The "-1" operation makes sure that the value fits on 64 bits + * (i.e. if r = 0, we may get 2^64, and we prefer 2^64-1 in that + * case). The bias is negligible since fpr_expm_p63() only computes + * with 51 bits of precision or so. + */ + z = ((fpr_expm_p63(r, ccs) << 1) - 1) >> s; + + /* + * Sample a bit with probability exp(-x). Since x = s*log(2) + r, + * exp(-x) = 2^-s * exp(-r), we compare lazily exp(-x) with the + * PRNG output to limit its consumption, the sign of the difference + * yields the expected result. + */ + i = 64; + do { + i -= 8; + w = prng_get_u8(p) - ((uint32_t)(z >> i) & 0xFF); + } while (!w && i > 0); + return (int)(w >> 31); +} + +/* + * The sampler produces a random integer that follows a discrete Gaussian + * distribution, centered on mu, and with standard deviation sigma. The + * provided parameter isigma is equal to 1/sigma. + * + * The value of sigma MUST lie between 1 and 2 (i.e. isigma lies between + * 0.5 and 1); in Falcon, sigma should always be between 1.2 and 1.9. + */ +int +PQCLEAN_FALCON512_AVX2_sampler(void *ctx, fpr mu, fpr isigma) { + sampler_context *spc; + int s, z0, z, b; + fpr r, dss, ccs, x; + + spc = ctx; + + /* + * Center is mu. We compute mu = s + r where s is an integer + * and 0 <= r < 1. + */ + s = (int)fpr_floor(mu); + r = fpr_sub(mu, fpr_of(s)); + + /* + * dss = 1/(2*sigma^2) = 0.5*(isigma^2). + */ + dss = fpr_half(fpr_sqr(isigma)); + + /* + * ccs = sigma_min / sigma = sigma_min * isigma. + */ + ccs = fpr_mul(isigma, spc->sigma_min); + + /* + * We now need to sample on center r. + */ + for (;;) { + /* + * Sample z for a Gaussian distribution. Then get a + * random bit b to turn the sampling into a bimodal + * distribution: if b = 1, we use z+1, otherwise we + * use -z. We thus have two situations: + * + * - b = 1: z >= 1 and sampled against a Gaussian + * centered on 1. + * - b = 0: z <= 0 and sampled against a Gaussian + * centered on 0. + */ + z0 = PQCLEAN_FALCON512_AVX2_gaussian0_sampler(&spc->p); + b = (int)prng_get_u8(&spc->p) & 1; + z = b + ((b << 1) - 1) * z0; + + /* + * Rejection sampling. We want a Gaussian centered on r; + * but we sampled against a Gaussian centered on b (0 or + * 1). But we know that z is always in the range where + * our sampling distribution is greater than the Gaussian + * distribution, so rejection works. + * + * We got z with distribution: + * G(z) = exp(-((z-b)^2)/(2*sigma0^2)) + * We target distribution: + * S(z) = exp(-((z-r)^2)/(2*sigma^2)) + * Rejection sampling works by keeping the value z with + * probability S(z)/G(z), and starting again otherwise. + * This requires S(z) <= G(z), which is the case here. + * Thus, we simply need to keep our z with probability: + * P = exp(-x) + * where: + * x = ((z-r)^2)/(2*sigma^2) - ((z-b)^2)/(2*sigma0^2) + * + * Here, we scale up the Bernouilli distribution, which + * makes rejection more probable, but makes rejection + * rate sufficiently decorrelated from the Gaussian + * center and standard deviation that the whole sampler + * can be said to be constant-time. + */ + x = fpr_mul(fpr_sqr(fpr_sub(fpr_of(z), r)), dss); + x = fpr_sub(x, fpr_mul(fpr_of(z0 * z0), fpr_inv_2sqrsigma0)); + if (BerExp(&spc->p, x, ccs)) { + /* + * Rejection sampling was centered on r, but the + * actual center is mu = s + r. + */ + return s + z; + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_sign_tree(int16_t *sig, inner_shake256_context *rng, + const fpr *expanded_key, + const uint16_t *hm, unsigned logn, uint8_t *tmp) { + fpr *ftmp; + + ftmp = (fpr *)tmp; + for (;;) { + /* + * Signature produces short vectors s1 and s2. The + * signature is acceptable only if the aggregate vector + * s1,s2 is short; we must use the same bound as the + * verifier. + * + * If the signature is acceptable, then we return only s2 + * (the verifier recomputes s1 from s2, the hashed message, + * and the public key). + */ + sampler_context spc; + samplerZ samp; + void *samp_ctx; + + /* + * Normal sampling. We use a fast PRNG seeded from our + * SHAKE context ('rng'). + */ + if (logn == 10) { + spc.sigma_min = fpr_sigma_min_10; + } else { + spc.sigma_min = fpr_sigma_min_9; + } + PQCLEAN_FALCON512_AVX2_prng_init(&spc.p, rng); + samp = PQCLEAN_FALCON512_AVX2_sampler; + samp_ctx = &spc; + + /* + * Do the actual signature. + */ + if (do_sign_tree(samp, samp_ctx, sig, + expanded_key, hm, logn, ftmp)) { + break; + } + } +} + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_sign_dyn(int16_t *sig, inner_shake256_context *rng, + const int8_t *f, const int8_t *g, + const int8_t *F, const int8_t *G, + const uint16_t *hm, unsigned logn, uint8_t *tmp) { + fpr *ftmp; + + ftmp = (fpr *)tmp; + for (;;) { + /* + * Signature produces short vectors s1 and s2. The + * signature is acceptable only if the aggregate vector + * s1,s2 is short; we must use the same bound as the + * verifier. + * + * If the signature is acceptable, then we return only s2 + * (the verifier recomputes s1 from s2, the hashed message, + * and the public key). + */ + sampler_context spc; + samplerZ samp; + void *samp_ctx; + + /* + * Normal sampling. We use a fast PRNG seeded from our + * SHAKE context ('rng'). + */ + if (logn == 10) { + spc.sigma_min = fpr_sigma_min_10; + } else { + spc.sigma_min = fpr_sigma_min_9; + } + PQCLEAN_FALCON512_AVX2_prng_init(&spc.p, rng); + samp = PQCLEAN_FALCON512_AVX2_sampler; + samp_ctx = &spc; + + /* + * Do the actual signature. + */ + if (do_sign_dyn(samp, samp_ctx, sig, + f, g, F, G, hm, logn, ftmp)) { + break; + } + } +} diff --git a/crypto_sign/falcon-512/avx2/vrfy.c b/crypto_sign/falcon-512/avx2/vrfy.c new file mode 100644 index 00000000..16fa9576 --- /dev/null +++ b/crypto_sign/falcon-512/avx2/vrfy.c @@ -0,0 +1,853 @@ +#include "inner.h" + +/* + * Falcon signature verification. + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2017-2019 Falcon Project + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @author Thomas Pornin + */ + + +/* ===================================================================== */ +/* + * Constants for NTT. + * + * n = 2^logn (2 <= n <= 1024) + * phi = X^n + 1 + * q = 12289 + * q0i = -1/q mod 2^16 + * R = 2^16 mod q + * R2 = 2^32 mod q + */ + +#define Q 12289 +#define Q0I 12287 +#define R 4091 +#define R2 10952 + +/* + * Table for NTT, binary case: + * GMb[x] = R*(g^rev(x)) mod q + * where g = 7 (it is a 2048-th primitive root of 1 modulo q) + * and rev() is the bit-reversal function over 10 bits. + */ +static const uint16_t GMb[] = { + 4091, 7888, 11060, 11208, 6960, 4342, 6275, 9759, + 1591, 6399, 9477, 5266, 586, 5825, 7538, 9710, + 1134, 6407, 1711, 965, 7099, 7674, 3743, 6442, + 10414, 8100, 1885, 1688, 1364, 10329, 10164, 9180, + 12210, 6240, 997, 117, 4783, 4407, 1549, 7072, + 2829, 6458, 4431, 8877, 7144, 2564, 5664, 4042, + 12189, 432, 10751, 1237, 7610, 1534, 3983, 7863, + 2181, 6308, 8720, 6570, 4843, 1690, 14, 3872, + 5569, 9368, 12163, 2019, 7543, 2315, 4673, 7340, + 1553, 1156, 8401, 11389, 1020, 2967, 10772, 7045, + 3316, 11236, 5285, 11578, 10637, 10086, 9493, 6180, + 9277, 6130, 3323, 883, 10469, 489, 1502, 2851, + 11061, 9729, 2742, 12241, 4970, 10481, 10078, 1195, + 730, 1762, 3854, 2030, 5892, 10922, 9020, 5274, + 9179, 3604, 3782, 10206, 3180, 3467, 4668, 2446, + 7613, 9386, 834, 7703, 6836, 3403, 5351, 12276, + 3580, 1739, 10820, 9787, 10209, 4070, 12250, 8525, + 10401, 2749, 7338, 10574, 6040, 943, 9330, 1477, + 6865, 9668, 3585, 6633, 12145, 4063, 3684, 7680, + 8188, 6902, 3533, 9807, 6090, 727, 10099, 7003, + 6945, 1949, 9731, 10559, 6057, 378, 7871, 8763, + 8901, 9229, 8846, 4551, 9589, 11664, 7630, 8821, + 5680, 4956, 6251, 8388, 10156, 8723, 2341, 3159, + 1467, 5460, 8553, 7783, 2649, 2320, 9036, 6188, + 737, 3698, 4699, 5753, 9046, 3687, 16, 914, + 5186, 10531, 4552, 1964, 3509, 8436, 7516, 5381, + 10733, 3281, 7037, 1060, 2895, 7156, 8887, 5357, + 6409, 8197, 2962, 6375, 5064, 6634, 5625, 278, + 932, 10229, 8927, 7642, 351, 9298, 237, 5858, + 7692, 3146, 12126, 7586, 2053, 11285, 3802, 5204, + 4602, 1748, 11300, 340, 3711, 4614, 300, 10993, + 5070, 10049, 11616, 12247, 7421, 10707, 5746, 5654, + 3835, 5553, 1224, 8476, 9237, 3845, 250, 11209, + 4225, 6326, 9680, 12254, 4136, 2778, 692, 8808, + 6410, 6718, 10105, 10418, 3759, 7356, 11361, 8433, + 6437, 3652, 6342, 8978, 5391, 2272, 6476, 7416, + 8418, 10824, 11986, 5733, 876, 7030, 2167, 2436, + 3442, 9217, 8206, 4858, 5964, 2746, 7178, 1434, + 7389, 8879, 10661, 11457, 4220, 1432, 10832, 4328, + 8557, 1867, 9454, 2416, 3816, 9076, 686, 5393, + 2523, 4339, 6115, 619, 937, 2834, 7775, 3279, + 2363, 7488, 6112, 5056, 824, 10204, 11690, 1113, + 2727, 9848, 896, 2028, 5075, 2654, 10464, 7884, + 12169, 5434, 3070, 6400, 9132, 11672, 12153, 4520, + 1273, 9739, 11468, 9937, 10039, 9720, 2262, 9399, + 11192, 315, 4511, 1158, 6061, 6751, 11865, 357, + 7367, 4550, 983, 8534, 8352, 10126, 7530, 9253, + 4367, 5221, 3999, 8777, 3161, 6990, 4130, 11652, + 3374, 11477, 1753, 292, 8681, 2806, 10378, 12188, + 5800, 11811, 3181, 1988, 1024, 9340, 2477, 10928, + 4582, 6750, 3619, 5503, 5233, 2463, 8470, 7650, + 7964, 6395, 1071, 1272, 3474, 11045, 3291, 11344, + 8502, 9478, 9837, 1253, 1857, 6233, 4720, 11561, + 6034, 9817, 3339, 1797, 2879, 6242, 5200, 2114, + 7962, 9353, 11363, 5475, 6084, 9601, 4108, 7323, + 10438, 9471, 1271, 408, 6911, 3079, 360, 8276, + 11535, 9156, 9049, 11539, 850, 8617, 784, 7919, + 8334, 12170, 1846, 10213, 12184, 7827, 11903, 5600, + 9779, 1012, 721, 2784, 6676, 6552, 5348, 4424, + 6816, 8405, 9959, 5150, 2356, 5552, 5267, 1333, + 8801, 9661, 7308, 5788, 4910, 909, 11613, 4395, + 8238, 6686, 4302, 3044, 2285, 12249, 1963, 9216, + 4296, 11918, 695, 4371, 9793, 4884, 2411, 10230, + 2650, 841, 3890, 10231, 7248, 8505, 11196, 6688, + 4059, 6060, 3686, 4722, 11853, 5816, 7058, 6868, + 11137, 7926, 4894, 12284, 4102, 3908, 3610, 6525, + 7938, 7982, 11977, 6755, 537, 4562, 1623, 8227, + 11453, 7544, 906, 11816, 9548, 10858, 9703, 2815, + 11736, 6813, 6979, 819, 8903, 6271, 10843, 348, + 7514, 8339, 6439, 694, 852, 5659, 2781, 3716, + 11589, 3024, 1523, 8659, 4114, 10738, 3303, 5885, + 2978, 7289, 11884, 9123, 9323, 11830, 98, 2526, + 2116, 4131, 11407, 1844, 3645, 3916, 8133, 2224, + 10871, 8092, 9651, 5989, 7140, 8480, 1670, 159, + 10923, 4918, 128, 7312, 725, 9157, 5006, 6393, + 3494, 6043, 10972, 6181, 11838, 3423, 10514, 7668, + 3693, 6658, 6905, 11953, 10212, 11922, 9101, 8365, + 5110, 45, 2400, 1921, 4377, 2720, 1695, 51, + 2808, 650, 1896, 9997, 9971, 11980, 8098, 4833, + 4135, 4257, 5838, 4765, 10985, 11532, 590, 12198, + 482, 12173, 2006, 7064, 10018, 3912, 12016, 10519, + 11362, 6954, 2210, 284, 5413, 6601, 3865, 10339, + 11188, 6231, 517, 9564, 11281, 3863, 1210, 4604, + 8160, 11447, 153, 7204, 5763, 5089, 9248, 12154, + 11748, 1354, 6672, 179, 5532, 2646, 5941, 12185, + 862, 3158, 477, 7279, 5678, 7914, 4254, 302, + 2893, 10114, 6890, 9560, 9647, 11905, 4098, 9824, + 10269, 1353, 10715, 5325, 6254, 3951, 1807, 6449, + 5159, 1308, 8315, 3404, 1877, 1231, 112, 6398, + 11724, 12272, 7286, 1459, 12274, 9896, 3456, 800, + 1397, 10678, 103, 7420, 7976, 936, 764, 632, + 7996, 8223, 8445, 7758, 10870, 9571, 2508, 1946, + 6524, 10158, 1044, 4338, 2457, 3641, 1659, 4139, + 4688, 9733, 11148, 3946, 2082, 5261, 2036, 11850, + 7636, 12236, 5366, 2380, 1399, 7720, 2100, 3217, + 10912, 8898, 7578, 11995, 2791, 1215, 3355, 2711, + 2267, 2004, 8568, 10176, 3214, 2337, 1750, 4729, + 4997, 7415, 6315, 12044, 4374, 7157, 4844, 211, + 8003, 10159, 9290, 11481, 1735, 2336, 5793, 9875, + 8192, 986, 7527, 1401, 870, 3615, 8465, 2756, + 9770, 2034, 10168, 3264, 6132, 54, 2880, 4763, + 11805, 3074, 8286, 9428, 4881, 6933, 1090, 10038, + 2567, 708, 893, 6465, 4962, 10024, 2090, 5718, + 10743, 780, 4733, 4623, 2134, 2087, 4802, 884, + 5372, 5795, 5938, 4333, 6559, 7549, 5269, 10664, + 4252, 3260, 5917, 10814, 5768, 9983, 8096, 7791, + 6800, 7491, 6272, 1907, 10947, 6289, 11803, 6032, + 11449, 1171, 9201, 7933, 2479, 7970, 11337, 7062, + 8911, 6728, 6542, 8114, 8828, 6595, 3545, 4348, + 4610, 2205, 6999, 8106, 5560, 10390, 9321, 2499, + 2413, 7272, 6881, 10582, 9308, 9437, 3554, 3326, + 5991, 11969, 3415, 12283, 9838, 12063, 4332, 7830, + 11329, 6605, 12271, 2044, 11611, 7353, 11201, 11582, + 3733, 8943, 9978, 1627, 7168, 3935, 5050, 2762, + 7496, 10383, 755, 1654, 12053, 4952, 10134, 4394, + 6592, 7898, 7497, 8904, 12029, 3581, 10748, 5674, + 10358, 4901, 7414, 8771, 710, 6764, 8462, 7193, + 5371, 7274, 11084, 290, 7864, 6827, 11822, 2509, + 6578, 4026, 5807, 1458, 5721, 5762, 4178, 2105, + 11621, 4852, 8897, 2856, 11510, 9264, 2520, 8776, + 7011, 2647, 1898, 7039, 5950, 11163, 5488, 6277, + 9182, 11456, 633, 10046, 11554, 5633, 9587, 2333, + 7008, 7084, 5047, 7199, 9865, 8997, 569, 6390, + 10845, 9679, 8268, 11472, 4203, 1997, 2, 9331, + 162, 6182, 2000, 3649, 9792, 6363, 7557, 6187, + 8510, 9935, 5536, 9019, 3706, 12009, 1452, 3067, + 5494, 9692, 4865, 6019, 7106, 9610, 4588, 10165, + 6261, 5887, 2652, 10172, 1580, 10379, 4638, 9949 +}; + +/* + * Table for inverse NTT, binary case: + * iGMb[x] = R*((1/g)^rev(x)) mod q + * Since g = 7, 1/g = 8778 mod 12289. + */ +static const uint16_t iGMb[] = { + 4091, 4401, 1081, 1229, 2530, 6014, 7947, 5329, + 2579, 4751, 6464, 11703, 7023, 2812, 5890, 10698, + 3109, 2125, 1960, 10925, 10601, 10404, 4189, 1875, + 5847, 8546, 4615, 5190, 11324, 10578, 5882, 11155, + 8417, 12275, 10599, 7446, 5719, 3569, 5981, 10108, + 4426, 8306, 10755, 4679, 11052, 1538, 11857, 100, + 8247, 6625, 9725, 5145, 3412, 7858, 5831, 9460, + 5217, 10740, 7882, 7506, 12172, 11292, 6049, 79, + 13, 6938, 8886, 5453, 4586, 11455, 2903, 4676, + 9843, 7621, 8822, 9109, 2083, 8507, 8685, 3110, + 7015, 3269, 1367, 6397, 10259, 8435, 10527, 11559, + 11094, 2211, 1808, 7319, 48, 9547, 2560, 1228, + 9438, 10787, 11800, 1820, 11406, 8966, 6159, 3012, + 6109, 2796, 2203, 1652, 711, 7004, 1053, 8973, + 5244, 1517, 9322, 11269, 900, 3888, 11133, 10736, + 4949, 7616, 9974, 4746, 10270, 126, 2921, 6720, + 6635, 6543, 1582, 4868, 42, 673, 2240, 7219, + 1296, 11989, 7675, 8578, 11949, 989, 10541, 7687, + 7085, 8487, 1004, 10236, 4703, 163, 9143, 4597, + 6431, 12052, 2991, 11938, 4647, 3362, 2060, 11357, + 12011, 6664, 5655, 7225, 5914, 9327, 4092, 5880, + 6932, 3402, 5133, 9394, 11229, 5252, 9008, 1556, + 6908, 4773, 3853, 8780, 10325, 7737, 1758, 7103, + 11375, 12273, 8602, 3243, 6536, 7590, 8591, 11552, + 6101, 3253, 9969, 9640, 4506, 3736, 6829, 10822, + 9130, 9948, 3566, 2133, 3901, 6038, 7333, 6609, + 3468, 4659, 625, 2700, 7738, 3443, 3060, 3388, + 3526, 4418, 11911, 6232, 1730, 2558, 10340, 5344, + 5286, 2190, 11562, 6199, 2482, 8756, 5387, 4101, + 4609, 8605, 8226, 144, 5656, 8704, 2621, 5424, + 10812, 2959, 11346, 6249, 1715, 4951, 9540, 1888, + 3764, 39, 8219, 2080, 2502, 1469, 10550, 8709, + 5601, 1093, 3784, 5041, 2058, 8399, 11448, 9639, + 2059, 9878, 7405, 2496, 7918, 11594, 371, 7993, + 3073, 10326, 40, 10004, 9245, 7987, 5603, 4051, + 7894, 676, 11380, 7379, 6501, 4981, 2628, 3488, + 10956, 7022, 6737, 9933, 7139, 2330, 3884, 5473, + 7865, 6941, 5737, 5613, 9505, 11568, 11277, 2510, + 6689, 386, 4462, 105, 2076, 10443, 119, 3955, + 4370, 11505, 3672, 11439, 750, 3240, 3133, 754, + 4013, 11929, 9210, 5378, 11881, 11018, 2818, 1851, + 4966, 8181, 2688, 6205, 6814, 926, 2936, 4327, + 10175, 7089, 6047, 9410, 10492, 8950, 2472, 6255, + 728, 7569, 6056, 10432, 11036, 2452, 2811, 3787, + 945, 8998, 1244, 8815, 11017, 11218, 5894, 4325, + 4639, 3819, 9826, 7056, 6786, 8670, 5539, 7707, + 1361, 9812, 2949, 11265, 10301, 9108, 478, 6489, + 101, 1911, 9483, 3608, 11997, 10536, 812, 8915, + 637, 8159, 5299, 9128, 3512, 8290, 7068, 7922, + 3036, 4759, 2163, 3937, 3755, 11306, 7739, 4922, + 11932, 424, 5538, 6228, 11131, 7778, 11974, 1097, + 2890, 10027, 2569, 2250, 2352, 821, 2550, 11016, + 7769, 136, 617, 3157, 5889, 9219, 6855, 120, + 4405, 1825, 9635, 7214, 10261, 11393, 2441, 9562, + 11176, 599, 2085, 11465, 7233, 6177, 4801, 9926, + 9010, 4514, 9455, 11352, 11670, 6174, 7950, 9766, + 6896, 11603, 3213, 8473, 9873, 2835, 10422, 3732, + 7961, 1457, 10857, 8069, 832, 1628, 3410, 4900, + 10855, 5111, 9543, 6325, 7431, 4083, 3072, 8847, + 9853, 10122, 5259, 11413, 6556, 303, 1465, 3871, + 4873, 5813, 10017, 6898, 3311, 5947, 8637, 5852, + 3856, 928, 4933, 8530, 1871, 2184, 5571, 5879, + 3481, 11597, 9511, 8153, 35, 2609, 5963, 8064, + 1080, 12039, 8444, 3052, 3813, 11065, 6736, 8454, + 2340, 7651, 1910, 10709, 2117, 9637, 6402, 6028, + 2124, 7701, 2679, 5183, 6270, 7424, 2597, 6795, + 9222, 10837, 280, 8583, 3270, 6753, 2354, 3779, + 6102, 4732, 5926, 2497, 8640, 10289, 6107, 12127, + 2958, 12287, 10292, 8086, 817, 4021, 2610, 1444, + 5899, 11720, 3292, 2424, 5090, 7242, 5205, 5281, + 9956, 2702, 6656, 735, 2243, 11656, 833, 3107, + 6012, 6801, 1126, 6339, 5250, 10391, 9642, 5278, + 3513, 9769, 3025, 779, 9433, 3392, 7437, 668, + 10184, 8111, 6527, 6568, 10831, 6482, 8263, 5711, + 9780, 467, 5462, 4425, 11999, 1205, 5015, 6918, + 5096, 3827, 5525, 11579, 3518, 4875, 7388, 1931, + 6615, 1541, 8708, 260, 3385, 4792, 4391, 5697, + 7895, 2155, 7337, 236, 10635, 11534, 1906, 4793, + 9527, 7239, 8354, 5121, 10662, 2311, 3346, 8556, + 707, 1088, 4936, 678, 10245, 18, 5684, 960, + 4459, 7957, 226, 2451, 6, 8874, 320, 6298, + 8963, 8735, 2852, 2981, 1707, 5408, 5017, 9876, + 9790, 2968, 1899, 6729, 4183, 5290, 10084, 7679, + 7941, 8744, 5694, 3461, 4175, 5747, 5561, 3378, + 5227, 952, 4319, 9810, 4356, 3088, 11118, 840, + 6257, 486, 6000, 1342, 10382, 6017, 4798, 5489, + 4498, 4193, 2306, 6521, 1475, 6372, 9029, 8037, + 1625, 7020, 4740, 5730, 7956, 6351, 6494, 6917, + 11405, 7487, 10202, 10155, 7666, 7556, 11509, 1546, + 6571, 10199, 2265, 7327, 5824, 11396, 11581, 9722, + 2251, 11199, 5356, 7408, 2861, 4003, 9215, 484, + 7526, 9409, 12235, 6157, 9025, 2121, 10255, 2519, + 9533, 3824, 8674, 11419, 10888, 4762, 11303, 4097, + 2414, 6496, 9953, 10554, 808, 2999, 2130, 4286, + 12078, 7445, 5132, 7915, 245, 5974, 4874, 7292, + 7560, 10539, 9952, 9075, 2113, 3721, 10285, 10022, + 9578, 8934, 11074, 9498, 294, 4711, 3391, 1377, + 9072, 10189, 4569, 10890, 9909, 6923, 53, 4653, + 439, 10253, 7028, 10207, 8343, 1141, 2556, 7601, + 8150, 10630, 8648, 9832, 7951, 11245, 2131, 5765, + 10343, 9781, 2718, 1419, 4531, 3844, 4066, 4293, + 11657, 11525, 11353, 4313, 4869, 12186, 1611, 10892, + 11489, 8833, 2393, 15, 10830, 5003, 17, 565, + 5891, 12177, 11058, 10412, 8885, 3974, 10981, 7130, + 5840, 10482, 8338, 6035, 6964, 1574, 10936, 2020, + 2465, 8191, 384, 2642, 2729, 5399, 2175, 9396, + 11987, 8035, 4375, 6611, 5010, 11812, 9131, 11427, + 104, 6348, 9643, 6757, 12110, 5617, 10935, 541, + 135, 3041, 7200, 6526, 5085, 12136, 842, 4129, + 7685, 11079, 8426, 1008, 2725, 11772, 6058, 1101, + 1950, 8424, 5688, 6876, 12005, 10079, 5335, 927, + 1770, 273, 8377, 2271, 5225, 10283, 116, 11807, + 91, 11699, 757, 1304, 7524, 6451, 8032, 8154, + 7456, 4191, 309, 2318, 2292, 10393, 11639, 9481, + 12238, 10594, 9569, 7912, 10368, 9889, 12244, 7179, + 3924, 3188, 367, 2077, 336, 5384, 5631, 8596, + 4621, 1775, 8866, 451, 6108, 1317, 6246, 8795, + 5896, 7283, 3132, 11564, 4977, 12161, 7371, 1366, + 12130, 10619, 3809, 5149, 6300, 2638, 4197, 1418, + 10065, 4156, 8373, 8644, 10445, 882, 8158, 10173, + 9763, 12191, 459, 2966, 3166, 405, 5000, 9311, + 6404, 8986, 1551, 8175, 3630, 10766, 9265, 700, + 8573, 9508, 6630, 11437, 11595, 5850, 3950, 4775, + 11941, 1446, 6018, 3386, 11470, 5310, 5476, 553, + 9474, 2586, 1431, 2741, 473, 11383, 4745, 836, + 4062, 10666, 7727, 11752, 5534, 312, 4307, 4351, + 5764, 8679, 8381, 8187, 5, 7395, 4363, 1152, + 5421, 5231, 6473, 436, 7567, 8603, 6229, 8230 +}; + +/* + * Reduce a small signed integer modulo q. The source integer MUST + * be between -q/2 and +q/2. + */ +static inline uint32_t +mq_conv_small(int x) { + /* + * If x < 0, the cast to uint32_t will set the high bit to 1. + */ + uint32_t y; + + y = (uint32_t)x; + y += Q & -(y >> 31); + return y; +} + +/* + * Addition modulo q. Operands must be in the 0..q-1 range. + */ +static inline uint32_t +mq_add(uint32_t x, uint32_t y) { + /* + * We compute x + y - q. If the result is negative, then the + * high bit will be set, and 'd >> 31' will be equal to 1; + * thus '-(d >> 31)' will be an all-one pattern. Otherwise, + * it will be an all-zero pattern. In other words, this + * implements a conditional addition of q. + */ + uint32_t d; + + d = x + y - Q; + d += Q & -(d >> 31); + return d; +} + +/* + * Subtraction modulo q. Operands must be in the 0..q-1 range. + */ +static inline uint32_t +mq_sub(uint32_t x, uint32_t y) { + /* + * As in mq_add(), we use a conditional addition to ensure the + * result is in the 0..q-1 range. + */ + uint32_t d; + + d = x - y; + d += Q & -(d >> 31); + return d; +} + +/* + * Division by 2 modulo q. Operand must be in the 0..q-1 range. + */ +static inline uint32_t +mq_rshift1(uint32_t x) { + x += Q & -(x & 1); + return (x >> 1); +} + +/* + * Montgomery multiplication modulo q. If we set R = 2^16 mod q, then + * this function computes: x * y / R mod q + * Operands must be in the 0..q-1 range. + */ +static inline uint32_t +mq_montymul(uint32_t x, uint32_t y) { + uint32_t z, w; + + /* + * We compute x*y + k*q with a value of k chosen so that the 16 + * low bits of the result are 0. We can then shift the value. + * After the shift, result may still be larger than q, but it + * will be lower than 2*q, so a conditional subtraction works. + */ + + z = x * y; + w = ((z * Q0I) & 0xFFFF) * Q; + + /* + * When adding z and w, the result will have its low 16 bits + * equal to 0. Since x, y and z are lower than q, the sum will + * be no more than (2^15 - 1) * q + (q - 1)^2, which will + * fit on 29 bits. + */ + z = (z + w) >> 16; + + /* + * After the shift, analysis shows that the value will be less + * than 2q. We do a subtraction then conditional subtraction to + * ensure the result is in the expected range. + */ + z -= Q; + z += Q & -(z >> 31); + return z; +} + +/* + * Montgomery squaring (computes (x^2)/R). + */ +static inline uint32_t +mq_montysqr(uint32_t x) { + return mq_montymul(x, x); +} + +/* + * Divide x by y modulo q = 12289. + */ +static inline uint32_t +mq_div_12289(uint32_t x, uint32_t y) { + /* + * We invert y by computing y^(q-2) mod q. + * + * We use the following addition chain for exponent e = 12287: + * + * e0 = 1 + * e1 = 2 * e0 = 2 + * e2 = e1 + e0 = 3 + * e3 = e2 + e1 = 5 + * e4 = 2 * e3 = 10 + * e5 = 2 * e4 = 20 + * e6 = 2 * e5 = 40 + * e7 = 2 * e6 = 80 + * e8 = 2 * e7 = 160 + * e9 = e8 + e2 = 163 + * e10 = e9 + e8 = 323 + * e11 = 2 * e10 = 646 + * e12 = 2 * e11 = 1292 + * e13 = e12 + e9 = 1455 + * e14 = 2 * e13 = 2910 + * e15 = 2 * e14 = 5820 + * e16 = e15 + e10 = 6143 + * e17 = 2 * e16 = 12286 + * e18 = e17 + e0 = 12287 + * + * Additions on exponents are converted to Montgomery + * multiplications. We define all intermediate results as so + * many local variables, and let the C compiler work out which + * must be kept around. + */ + uint32_t y0, y1, y2, y3, y4, y5, y6, y7, y8, y9; + uint32_t y10, y11, y12, y13, y14, y15, y16, y17, y18; + + y0 = mq_montymul(y, R2); + y1 = mq_montysqr(y0); + y2 = mq_montymul(y1, y0); + y3 = mq_montymul(y2, y1); + y4 = mq_montysqr(y3); + y5 = mq_montysqr(y4); + y6 = mq_montysqr(y5); + y7 = mq_montysqr(y6); + y8 = mq_montysqr(y7); + y9 = mq_montymul(y8, y2); + y10 = mq_montymul(y9, y8); + y11 = mq_montysqr(y10); + y12 = mq_montysqr(y11); + y13 = mq_montymul(y12, y9); + y14 = mq_montysqr(y13); + y15 = mq_montysqr(y14); + y16 = mq_montymul(y15, y10); + y17 = mq_montysqr(y16); + y18 = mq_montymul(y17, y0); + + /* + * Final multiplication with x, which is not in Montgomery + * representation, computes the correct division result. + */ + return mq_montymul(y18, x); +} + +/* + * Compute NTT on a ring element. + */ +static void +mq_NTT(uint16_t *a, unsigned logn) { + size_t n, t, m; + + n = (size_t)1 << logn; + t = n; + for (m = 1; m < n; m <<= 1) { + size_t ht, i, j1; + + ht = t >> 1; + for (i = 0, j1 = 0; i < m; i ++, j1 += t) { + size_t j, j2; + uint32_t s; + + s = GMb[m + i]; + j2 = j1 + ht; + for (j = j1; j < j2; j ++) { + uint32_t u, v; + + u = a[j]; + v = mq_montymul(a[j + ht], s); + a[j] = (uint16_t)mq_add(u, v); + a[j + ht] = (uint16_t)mq_sub(u, v); + } + } + t = ht; + } +} + +/* + * Compute the inverse NTT on a ring element, binary case. + */ +static void +mq_iNTT(uint16_t *a, unsigned logn) { + size_t n, t, m; + uint32_t ni; + + n = (size_t)1 << logn; + t = 1; + m = n; + while (m > 1) { + size_t hm, dt, i, j1; + + hm = m >> 1; + dt = t << 1; + for (i = 0, j1 = 0; i < hm; i ++, j1 += dt) { + size_t j, j2; + uint32_t s; + + j2 = j1 + t; + s = iGMb[hm + i]; + for (j = j1; j < j2; j ++) { + uint32_t u, v, w; + + u = a[j]; + v = a[j + t]; + a[j] = (uint16_t)mq_add(u, v); + w = mq_sub(u, v); + a[j + t] = (uint16_t) + mq_montymul(w, s); + } + } + t = dt; + m = hm; + } + + /* + * To complete the inverse NTT, we must now divide all values by + * n (the vector size). We thus need the inverse of n, i.e. we + * need to divide 1 by 2 logn times. But we also want it in + * Montgomery representation, i.e. we also want to multiply it + * by R = 2^16. In the common case, this should be a simple right + * shift. The loop below is generic and works also in corner cases; + * its computation time is negligible. + */ + ni = R; + for (m = n; m > 1; m >>= 1) { + ni = mq_rshift1(ni); + } + for (m = 0; m < n; m ++) { + a[m] = (uint16_t)mq_montymul(a[m], ni); + } +} + +/* + * Convert a polynomial (mod q) to Montgomery representation. + */ +static void +mq_poly_tomonty(uint16_t *f, unsigned logn) { + size_t u, n; + + n = (size_t)1 << logn; + for (u = 0; u < n; u ++) { + f[u] = (uint16_t)mq_montymul(f[u], R2); + } +} + +/* + * Multiply two polynomials together (NTT representation, and using + * a Montgomery multiplication). Result f*g is written over f. + */ +static void +mq_poly_montymul_ntt(uint16_t *f, const uint16_t *g, unsigned logn) { + size_t u, n; + + n = (size_t)1 << logn; + for (u = 0; u < n; u ++) { + f[u] = (uint16_t)mq_montymul(f[u], g[u]); + } +} + +/* + * Subtract polynomial g from polynomial f. + */ +static void +mq_poly_sub(uint16_t *f, const uint16_t *g, unsigned logn) { + size_t u, n; + + n = (size_t)1 << logn; + for (u = 0; u < n; u ++) { + f[u] = (uint16_t)mq_sub(f[u], g[u]); + } +} + +/* ===================================================================== */ + +/* see inner.h */ +void +PQCLEAN_FALCON512_AVX2_to_ntt_monty(uint16_t *h, unsigned logn) { + mq_NTT(h, logn); + mq_poly_tomonty(h, logn); +} + +/* see inner.h */ +int +PQCLEAN_FALCON512_AVX2_verify_raw(const uint16_t *c0, const int16_t *s2, + const uint16_t *h, unsigned logn, uint8_t *tmp) { + size_t u, n; + uint16_t *tt; + + n = (size_t)1 << logn; + tt = (uint16_t *)tmp; + + /* + * Reduce s2 elements modulo q ([0..q-1] range). + */ + for (u = 0; u < n; u ++) { + uint32_t w; + + w = (uint32_t)s2[u]; + w += Q & -(w >> 31); + tt[u] = (uint16_t)w; + } + + /* + * Compute -s1 = s2*h - c0 mod phi mod q (in tt[]). + */ + mq_NTT(tt, logn); + mq_poly_montymul_ntt(tt, h, logn); + mq_iNTT(tt, logn); + mq_poly_sub(tt, c0, logn); + + /* + * Normalize -s1 elements into the [-q/2..q/2] range. + */ + for (u = 0; u < n; u ++) { + int32_t w; + + w = (int32_t)tt[u]; + w -= (int32_t)(Q & -(((Q >> 1) - (uint32_t)w) >> 31)); + ((int16_t *)tt)[u] = (int16_t)w; + } + + /* + * Signature is valid if and only if the aggregate (-s1,s2) vector + * is short enough. + */ + return PQCLEAN_FALCON512_AVX2_is_short((int16_t *)tt, s2, logn); +} + +/* see inner.h */ +int +PQCLEAN_FALCON512_AVX2_compute_public(uint16_t *h, + const int8_t *f, const int8_t *g, unsigned logn, uint8_t *tmp) { + size_t u, n; + uint16_t *tt; + + n = (size_t)1 << logn; + tt = (uint16_t *)tmp; + for (u = 0; u < n; u ++) { + tt[u] = (uint16_t)mq_conv_small(f[u]); + h[u] = (uint16_t)mq_conv_small(g[u]); + } + mq_NTT(h, logn); + mq_NTT(tt, logn); + for (u = 0; u < n; u ++) { + if (tt[u] == 0) { + return 0; + } + h[u] = (uint16_t)mq_div_12289(h[u], tt[u]); + } + mq_iNTT(h, logn); + return 1; +} + +/* see inner.h */ +int +PQCLEAN_FALCON512_AVX2_complete_private(int8_t *G, + const int8_t *f, const int8_t *g, const int8_t *F, + unsigned logn, uint8_t *tmp) { + size_t u, n; + uint16_t *t1, *t2; + + n = (size_t)1 << logn; + t1 = (uint16_t *)tmp; + t2 = t1 + n; + for (u = 0; u < n; u ++) { + t1[u] = (uint16_t)mq_conv_small(g[u]); + t2[u] = (uint16_t)mq_conv_small(F[u]); + } + mq_NTT(t1, logn); + mq_NTT(t2, logn); + mq_poly_tomonty(t1, logn); + mq_poly_montymul_ntt(t1, t2, logn); + for (u = 0; u < n; u ++) { + t2[u] = (uint16_t)mq_conv_small(f[u]); + } + mq_NTT(t2, logn); + for (u = 0; u < n; u ++) { + if (t2[u] == 0) { + return 0; + } + t1[u] = (uint16_t)mq_div_12289(t1[u], t2[u]); + } + mq_iNTT(t1, logn); + for (u = 0; u < n; u ++) { + uint32_t w; + int32_t gi; + + w = t1[u]; + w -= (Q & ~ -((w - (Q >> 1)) >> 31)); + gi = *(int32_t *)&w; + if (gi < -127 || gi > +127) { + return 0; + } + G[u] = (int8_t)gi; + } + return 1; +} + +/* see inner.h */ +int +PQCLEAN_FALCON512_AVX2_is_invertible( + const int16_t *s2, unsigned logn, uint8_t *tmp) { + size_t u, n; + uint16_t *tt; + uint32_t r; + + n = (size_t)1 << logn; + tt = (uint16_t *)tmp; + for (u = 0; u < n; u ++) { + uint32_t w; + + w = (uint32_t)s2[u]; + w += Q & -(w >> 31); + tt[u] = (uint16_t)w; + } + mq_NTT(tt, logn); + r = 0; + for (u = 0; u < n; u ++) { + r |= (uint32_t)(tt[u] - 1); + } + return (int)(1u - (r >> 31)); +} + +/* see inner.h */ +int +PQCLEAN_FALCON512_AVX2_verify_recover(uint16_t *h, + const uint16_t *c0, const int16_t *s1, const int16_t *s2, + unsigned logn, uint8_t *tmp) { + size_t u, n; + uint16_t *tt; + uint32_t r; + + n = (size_t)1 << logn; + + /* + * Reduce elements of s1 and s2 modulo q; then write s2 into tt[] + * and c0 - s1 into h[]. + */ + tt = (uint16_t *)tmp; + for (u = 0; u < n; u ++) { + uint32_t w; + + w = (uint32_t)s2[u]; + w += Q & -(w >> 31); + tt[u] = (uint16_t)w; + + w = (uint32_t)s1[u]; + w += Q & -(w >> 31); + w = mq_sub(c0[u], w); + h[u] = (uint16_t)w; + } + + /* + * Compute h = (c0 - s1) / s2. If one of the coefficients of s2 + * is zero (in NTT representation) then the operation fails. We + * keep that information into a flag so that we do not deviate + * from strict constant-time processing; if all coefficients of + * s2 are non-zero, then the high bit of r will be zero. + */ + mq_NTT(tt, logn); + mq_NTT(h, logn); + r = 0; + for (u = 0; u < n; u ++) { + r |= (uint32_t)(tt[u] - 1); + h[u] = (uint16_t)mq_div_12289(h[u], tt[u]); + } + mq_iNTT(h, logn); + + /* + * Signature is acceptable if and only if it is short enough, + * and s2 was invertible mod phi mod q. The caller must still + * check that the rebuilt public key matches the expected + * value (e.g. through a hash). + */ + r = ~r & (uint32_t) - PQCLEAN_FALCON512_AVX2_is_short(s1, s2, logn); + return (int)(r >> 31); +} + +/* see inner.h */ +int +PQCLEAN_FALCON512_AVX2_count_nttzero(const int16_t *sig, unsigned logn, uint8_t *tmp) { + uint16_t *s2; + size_t u, n; + uint32_t r; + + n = (size_t)1 << logn; + s2 = (uint16_t *)tmp; + for (u = 0; u < n; u ++) { + uint32_t w; + + w = (uint32_t)sig[u]; + w += Q & -(w >> 31); + s2[u] = (uint16_t)w; + } + mq_NTT(s2, logn); + r = 0; + for (u = 0; u < n; u ++) { + uint32_t w; + + w = (uint32_t)s2[u] - 1u; + r += (w >> 31); + } + return (int)r; +} diff --git a/crypto_sign/falcon-512/clean/LICENSE b/crypto_sign/falcon-512/clean/LICENSE index bf2aeb7d..12c7b56c 100644 --- a/crypto_sign/falcon-512/clean/LICENSE +++ b/crypto_sign/falcon-512/clean/LICENSE @@ -1,3 +1,4 @@ +\ MIT License Copyright (c) 2017-2019 Falcon Project @@ -20,3 +21,4 @@ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/crypto_sign/falcon-512/clean/Makefile b/crypto_sign/falcon-512/clean/Makefile index e4ea7b13..21031452 100644 --- a/crypto_sign/falcon-512/clean/Makefile +++ b/crypto_sign/falcon-512/clean/Makefile @@ -1,10 +1,10 @@ # This Makefile can be used with GNU Make or BSD Make -LIB=libfalcon-512_clean.a +LIB=libfalcon512_clean.a -SOURCES = codec.c common.c fft.c fpr.c keygen.c pqclean.c rng.c sign.c vrfy.c -OBJECTS = codec.o common.o fft.o fpr.o keygen.o pqclean.o rng.o sign.o vrfy.o -HEADERS = api.h fpr.h inner.h +SOURCES = codec.c common.c fft.c fpr.c keygen.c pqclean.c rng.c sign.c vrfy.c +OBJECTS = codec.o common.o fft.o fpr.o keygen.o pqclean.o rng.o sign.o vrfy.o +HEADERS = api.h fpr.h inner.h CFLAGS=-O3 -Wall -Wconversion -Wextra -Wpedantic -Wvla -Werror -Wmissing-prototypes -Wredundant-decls -std=c99 -I../../../common $(EXTRAFLAGS) diff --git a/crypto_sign/falcon-512/clean/Makefile.Microsoft_nmake b/crypto_sign/falcon-512/clean/Makefile.Microsoft_nmake index 7cdbcf98..bdfdf53d 100644 --- a/crypto_sign/falcon-512/clean/Makefile.Microsoft_nmake +++ b/crypto_sign/falcon-512/clean/Makefile.Microsoft_nmake @@ -1,8 +1,8 @@ # This Makefile can be used with Microsoft Visual Studio's nmake using the command: # nmake /f Makefile.Microsoft_nmake -LIBRARY=libfalcon-512_clean.lib -OBJECTS=codec.obj common.obj fft.obj fpr.obj keygen.obj pqclean.obj rng.obj sign.obj vrfy.obj +LIBRARY=libfalcon512_clean.lib +OBJECTS=codec.obj common.obj fft.obj fpr.obj keygen.obj pqclean.obj rng.obj sign.obj vrfy.obj # Warning C4146 is raised when a unary minus operator is applied to an # unsigned type; this has nonetheless been standard and portable for as @@ -16,7 +16,7 @@ all: $(LIBRARY) $(OBJECTS): *.h $(LIBRARY): $(OBJECTS) - LIB.EXE /NOLOGO /WX /OUT:$@ $** + LIB.EXE /NOLOGO /WX /OUT:$@ $** clean: -DEL $(OBJECTS) diff --git a/crypto_sign/falcon-512/clean/codec.c b/crypto_sign/falcon-512/clean/codec.c index fe88f022..76709bc9 100644 --- a/crypto_sign/falcon-512/clean/codec.c +++ b/crypto_sign/falcon-512/clean/codec.c @@ -1,3 +1,5 @@ +#include "inner.h" + /* * Encoding/decoding of keys and signatures. * @@ -29,7 +31,6 @@ * @author Thomas Pornin */ -#include "inner.h" /* see inner.h */ size_t diff --git a/crypto_sign/falcon-512/clean/common.c b/crypto_sign/falcon-512/clean/common.c index dcea0c1a..dea433f6 100644 --- a/crypto_sign/falcon-512/clean/common.c +++ b/crypto_sign/falcon-512/clean/common.c @@ -1,3 +1,5 @@ +#include "inner.h" + /* * Support functions for signatures (hash-to-point, norm). * @@ -29,7 +31,6 @@ * @author Thomas Pornin */ -#include "inner.h" /* see inner.h */ void diff --git a/crypto_sign/falcon-512/clean/fft.c b/crypto_sign/falcon-512/clean/fft.c index 4b3c1a81..a7d9bdad 100644 --- a/crypto_sign/falcon-512/clean/fft.c +++ b/crypto_sign/falcon-512/clean/fft.c @@ -1,3 +1,5 @@ +#include "inner.h" + /* * FFT code. * @@ -29,7 +31,6 @@ * @author Thomas Pornin */ -#include "inner.h" /* * Rules for complex number macros: diff --git a/crypto_sign/falcon-512/clean/fpr.c b/crypto_sign/falcon-512/clean/fpr.c index ff3eda4a..091462a7 100644 --- a/crypto_sign/falcon-512/clean/fpr.c +++ b/crypto_sign/falcon-512/clean/fpr.c @@ -1,3 +1,5 @@ +#include "inner.h" + /* * Floating-point operations. * @@ -32,7 +34,6 @@ * @author Thomas Pornin */ -#include "inner.h" /* @@ -1631,4 +1632,3 @@ const fpr fpr_p2_tab[] = { 4571153621781053440U, 4566650022153682944U }; - diff --git a/crypto_sign/falcon-512/clean/fpr.h b/crypto_sign/falcon-512/clean/fpr.h index b662a52b..f88595e2 100644 --- a/crypto_sign/falcon-512/clean/fpr.h +++ b/crypto_sign/falcon-512/clean/fpr.h @@ -1,3 +1,6 @@ +#ifndef PQCLEAN_FALCON512_CLEAN_FPR_H +#define PQCLEAN_FALCON512_CLEAN_FPR_H + /* * Floating-point operations. * @@ -467,4 +470,4 @@ extern const fpr fpr_gm_tab[]; extern const fpr fpr_p2_tab[]; /* ====================================================================== */ - +#endif diff --git a/crypto_sign/falcon-512/clean/inner.h b/crypto_sign/falcon-512/clean/inner.h index f4fefe15..b81197f1 100644 --- a/crypto_sign/falcon-512/clean/inner.h +++ b/crypto_sign/falcon-512/clean/inner.h @@ -1,5 +1,6 @@ -#ifndef FALCON_INNER_H__ -#define FALCON_INNER_H__ +#ifndef PQCLEAN_FALCON512_CLEAN_INNER_H +#define PQCLEAN_FALCON512_CLEAN_INNER_H + /* * Internal functions for Falcon. This is not the API intended to be @@ -72,8 +73,8 @@ * proper, or integer-based emulation is used, the set_fpu_cw() * function does nothing, so it can be called systematically. */ - - +#include "fips202.h" +#include "fpr.h" #include #include #include @@ -115,7 +116,6 @@ set_fpu_cw(unsigned x) { */ -#include "fips202.h" #define inner_shake256_context shake256incctx #define inner_shake256_init(sc) shake256_inc_init(sc) @@ -438,7 +438,6 @@ int PQCLEAN_FALCON512_CLEAN_verify_recover(uint16_t *h, * fpr fpr_mtwo63m1 -(2^63-1) * fpr fpr_ptwo63 2^63 */ -#include "fpr.h" /* ==================================================================== */ /* @@ -514,10 +513,6 @@ prng_get_u64(prng *p) { } p->ptr = u + 8; - /* - * On systems that use little-endian encoding and allow - * unaligned accesses, we can simply read the data where it is. - */ return (uint64_t)p->buf.d[u + 0] | ((uint64_t)p->buf.d[u + 1] << 8) | ((uint64_t)p->buf.d[u + 2] << 16) diff --git a/crypto_sign/falcon-512/clean/keygen.c b/crypto_sign/falcon-512/clean/keygen.c index 6fe3ec2e..f72ecd99 100644 --- a/crypto_sign/falcon-512/clean/keygen.c +++ b/crypto_sign/falcon-512/clean/keygen.c @@ -1,3 +1,5 @@ +#include "inner.h" + /* * Falcon key pair generation. * @@ -29,7 +31,6 @@ * @author Thomas Pornin */ -#include "inner.h" #define MKN(logn) ((size_t)1 << (logn)) @@ -2207,7 +2208,6 @@ get_rng_u64(inner_shake256_context *rng) { | ((uint64_t)tmp[7] << 56); } - /* * Table below incarnates a discrete Gaussian distribution: * D(x) = exp(-(x^2)/(2*sigma^2)) diff --git a/crypto_sign/falcon-512/clean/pqclean.c b/crypto_sign/falcon-512/clean/pqclean.c index 6589999c..3abf6814 100644 --- a/crypto_sign/falcon-512/clean/pqclean.c +++ b/crypto_sign/falcon-512/clean/pqclean.c @@ -1,16 +1,16 @@ +#include "api.h" +#include "inner.h" +#include "randombytes.h" +#include +#include /* * Wrapper for implementing the PQClean API. */ -#include -#include -#include "api.h" -#include "inner.h" #define NONCELEN 40 - -#include "randombytes.h" +#define SEEDLEN 48 /* * Encoding formats (nnnn = log of degree, 9 for Falcon-512, 10 for Falcon-1024) @@ -41,8 +41,7 @@ /* see api.h */ int -PQCLEAN_FALCON512_CLEAN_crypto_sign_keypair( - uint8_t *pk, uint8_t *sk) { +PQCLEAN_FALCON512_CLEAN_crypto_sign_keypair(unsigned char *pk, unsigned char *sk) { union { uint8_t b[FALCON_KEYGEN_TEMP_9]; uint64_t dummy_u64; @@ -50,7 +49,7 @@ PQCLEAN_FALCON512_CLEAN_crypto_sign_keypair( } tmp; int8_t f[512], g[512], F[512]; uint16_t h[512]; - unsigned char seed[48]; + unsigned char seed[SEEDLEN]; inner_shake256_context rng; size_t u, v; @@ -135,7 +134,7 @@ do_sign(uint8_t *nonce, uint8_t *sigbuf, size_t *sigbuflen, int16_t sig[512]; uint16_t hm[512]; } r; - unsigned char seed[48]; + unsigned char seed[SEEDLEN]; inner_shake256_context sc; size_t u, v; @@ -279,11 +278,11 @@ PQCLEAN_FALCON512_CLEAN_crypto_sign_signature( const uint8_t *m, size_t mlen, const uint8_t *sk) { /* * The PQCLEAN_FALCON512_CLEAN_CRYPTO_BYTES constant is used for - * the signed message object (as produced by crypto_sign()) + * the signed message object (as produced by PQCLEAN_FALCON512_CLEAN_crypto_sign()) * and includes a two-byte length value, so we take care here * to only generate signatures that are two bytes shorter than - * the maximum. This is done to ensure that crypto_sign() - * and crypto_sign_signature() produce the exact same signature + * the maximum. This is done to ensure that PQCLEAN_FALCON512_CLEAN_crypto_sign() + * and PQCLEAN_FALCON512_CLEAN_crypto_sign_signature() produce the exact same signature * value, if used on the same message, with the same private key, * and using the same output from randombytes() (this is for * reproducibility of tests). diff --git a/crypto_sign/falcon-512/clean/rng.c b/crypto_sign/falcon-512/clean/rng.c index 41d90761..266db757 100644 --- a/crypto_sign/falcon-512/clean/rng.c +++ b/crypto_sign/falcon-512/clean/rng.c @@ -1,3 +1,5 @@ +#include "inner.h" +#include /* * PRNG and interface to the system RNG. * @@ -29,10 +31,22 @@ * @author Thomas Pornin */ -#include -#include "inner.h" +/* + * Include relevant system header files. For Win32, this will also need + * linking with advapi32.dll, which we trigger with an appropriate #pragma. + */ + +/* see inner.h */ +int +PQCLEAN_FALCON512_CLEAN_get_seed(void *seed, size_t len) { + (void)seed; + if (len == 0) { + return 1; + } + return 0; +} /* see inner.h */ void @@ -46,9 +60,6 @@ PQCLEAN_FALCON512_CLEAN_prng_init(prng *p, inner_shake256_context *src) { uint64_t th, tl; int i; - uint32_t *d32 = (uint32_t *) p->state.d; - uint64_t *d64 = (uint64_t *) p->state.d; - inner_shake256_extract(src, tmp, 56); for (i = 0; i < 14; i ++) { uint32_t w; @@ -57,11 +68,11 @@ PQCLEAN_FALCON512_CLEAN_prng_init(prng *p, inner_shake256_context *src) { | ((uint32_t)tmp[(i << 2) + 1] << 8) | ((uint32_t)tmp[(i << 2) + 2] << 16) | ((uint32_t)tmp[(i << 2) + 3] << 24); - d32[i] = w; + *(uint32_t *)(p->state.d + (i << 2)) = w; } - tl = d32[48 / sizeof(uint32_t)]; - th = d32[52 / sizeof(uint32_t)]; - d64[48 / sizeof(uint64_t)] = tl + (th << 32); + tl = *(uint32_t *)(p->state.d + 48); + th = *(uint32_t *)(p->state.d + 52); + *(uint64_t *)(p->state.d + 48) = tl + (th << 32); PQCLEAN_FALCON512_CLEAN_prng_refill(p); } @@ -88,14 +99,12 @@ PQCLEAN_FALCON512_CLEAN_prng_refill(prng *p) { uint64_t cc; size_t u; - uint32_t *d32 = (uint32_t *) p->state.d; - uint64_t *d64 = (uint64_t *) p->state.d; /* * State uses local endianness. Only the output bytes must be * converted to little endian (if used on a big-endian machine). */ - cc = d64[48 / sizeof(uint64_t)]; + cc = *(uint64_t *)(p->state.d + 48); for (u = 0; u < 8; u ++) { uint32_t state[16]; size_t v; @@ -139,10 +148,12 @@ PQCLEAN_FALCON512_CLEAN_prng_refill(prng *p) { state[v] += CW[v]; } for (v = 4; v < 14; v ++) { - state[v] += d32[v - 4]; + state[v] += ((uint32_t *)p->state.d)[v - 4]; } - state[14] += d32[10] ^ (uint32_t)cc; - state[15] += d32[11] ^ (uint32_t)(cc >> 32); + state[14] += ((uint32_t *)p->state.d)[10] + ^ (uint32_t)cc; + state[15] += ((uint32_t *)p->state.d)[11] + ^ (uint32_t)(cc >> 32); cc ++; /* @@ -160,7 +171,7 @@ PQCLEAN_FALCON512_CLEAN_prng_refill(prng *p) { (uint8_t)(state[v] >> 24); } } - d64[48 / sizeof(uint64_t)] = cc; + *(uint64_t *)(p->state.d + 48) = cc; p->ptr = 0; diff --git a/crypto_sign/falcon-512/clean/sign.c b/crypto_sign/falcon-512/clean/sign.c index 65cd8322..87566d98 100644 --- a/crypto_sign/falcon-512/clean/sign.c +++ b/crypto_sign/falcon-512/clean/sign.c @@ -1,3 +1,5 @@ +#include "inner.h" + /* * Falcon signature generation. * @@ -29,7 +31,6 @@ * @author Thomas Pornin */ -#include "inner.h" /* =================================================================== */ @@ -1081,8 +1082,8 @@ BerExp(prng *p, fpr x, fpr ccs) { int PQCLEAN_FALCON512_CLEAN_sampler(void *ctx, fpr mu, fpr isigma) { sampler_context *spc; - int s; - fpr r, dss, ccs; + int s, z0, z, b; + fpr r, dss, ccs, x; spc = ctx; @@ -1107,9 +1108,6 @@ PQCLEAN_FALCON512_CLEAN_sampler(void *ctx, fpr mu, fpr isigma) { * We now need to sample on center r. */ for (;;) { - int z0, z, b; - fpr x; - /* * Sample z for a Gaussian distribution. Then get a * random bit b to turn the sampling into a bimodal diff --git a/crypto_sign/falcon-512/clean/vrfy.c b/crypto_sign/falcon-512/clean/vrfy.c index 779bd2c8..cf89f69f 100644 --- a/crypto_sign/falcon-512/clean/vrfy.c +++ b/crypto_sign/falcon-512/clean/vrfy.c @@ -1,3 +1,5 @@ +#include "inner.h" + /* * Falcon signature verification. * @@ -29,7 +31,6 @@ * @author Thomas Pornin */ -#include "inner.h" /* ===================================================================== */ /* diff --git a/test/duplicate_consistency/falcon-1024_avx2.yml b/test/duplicate_consistency/falcon-1024_avx2.yml new file mode 100644 index 00000000..0a0fc809 --- /dev/null +++ b/test/duplicate_consistency/falcon-1024_avx2.yml @@ -0,0 +1,33 @@ +consistency_checks: + - source: + scheme: falcon-512 + implementation: clean + files: + - codec.c + - common.c + - keygen.c + - vrfy.c + - source: + scheme: falcon-512 + implementation: avx2 + files: + - fpr.h + - inner.h + - codec.c + - common.c + - fft.c + - fpr.c + - keygen.c + - rng.c + - sign.c + - vrfy.c + - source: + scheme: falcon-1024 + implementation: clean + files: + - api.h + - codec.c + - common.c + - keygen.c + - pqclean.c + - vrfy.c diff --git a/test/duplicate_consistency/falcon-1024_clean.yml b/test/duplicate_consistency/falcon-1024_clean.yml new file mode 100644 index 00000000..99f3d6bd --- /dev/null +++ b/test/duplicate_consistency/falcon-1024_clean.yml @@ -0,0 +1,32 @@ +consistency_checks: + - source: + scheme: falcon-512 + implementation: clean + files: + - fpr.h + - codec.c + - common.c + - fft.c + - fpr.c + - keygen.c + - rng.c + - sign.c + - vrfy.c + - source: + scheme: falcon-512 + implementation: avx2 + files: + - codec.c + - common.c + - keygen.c + - vrfy.c + - source: + scheme: falcon-1024 + implementation: avx2 + files: + - api.h + - codec.c + - common.c + - keygen.c + - pqclean.c + - vrfy.c diff --git a/test/duplicate_consistency/falcon-512_avx2.yml b/test/duplicate_consistency/falcon-512_avx2.yml new file mode 100644 index 00000000..8f8320fc --- /dev/null +++ b/test/duplicate_consistency/falcon-512_avx2.yml @@ -0,0 +1,33 @@ +consistency_checks: + - source: + scheme: falcon-512 + implementation: clean + files: + - api.h + - codec.c + - common.c + - keygen.c + - pqclean.c + - vrfy.c + - source: + scheme: falcon-1024 + implementation: clean + files: + - codec.c + - common.c + - keygen.c + - vrfy.c + - source: + scheme: falcon-1024 + implementation: avx2 + files: + - fpr.h + - inner.h + - codec.c + - common.c + - fft.c + - fpr.c + - keygen.c + - rng.c + - sign.c + - vrfy.c diff --git a/test/duplicate_consistency/falcon-512_clean.yml b/test/duplicate_consistency/falcon-512_clean.yml new file mode 100644 index 00000000..29971ae2 --- /dev/null +++ b/test/duplicate_consistency/falcon-512_clean.yml @@ -0,0 +1,32 @@ +consistency_checks: + - source: + scheme: falcon-512 + implementation: avx2 + files: + - api.h + - codec.c + - common.c + - keygen.c + - pqclean.c + - vrfy.c + - source: + scheme: falcon-1024 + implementation: clean + files: + - fpr.h + - codec.c + - common.c + - fft.c + - fpr.c + - keygen.c + - rng.c + - sign.c + - vrfy.c + - source: + scheme: falcon-1024 + implementation: avx2 + files: + - codec.c + - common.c + - keygen.c + - vrfy.c diff --git a/test/duplicate_consistency/falcon1024_avx2.yml b/test/duplicate_consistency/falcon1024_avx2.yml new file mode 100644 index 00000000..83395b51 --- /dev/null +++ b/test/duplicate_consistency/falcon1024_avx2.yml @@ -0,0 +1,11 @@ +consistency_checks: + - source: + scheme: falcon1024 + implementation: clean + files: + - api.h + - codec.c + - common.c + - keygen.c + - pqclean.c + - vrfy.c diff --git a/test/duplicate_consistency/falcon1024_clean.yml b/test/duplicate_consistency/falcon1024_clean.yml new file mode 100644 index 00000000..5ca85bff --- /dev/null +++ b/test/duplicate_consistency/falcon1024_clean.yml @@ -0,0 +1,11 @@ +consistency_checks: + - source: + scheme: falcon1024 + implementation: avx2 + files: + - api.h + - codec.c + - common.c + - keygen.c + - pqclean.c + - vrfy.c diff --git a/test/duplicate_consistency/falcon512_avx2.yml b/test/duplicate_consistency/falcon512_avx2.yml new file mode 100644 index 00000000..60b3681d --- /dev/null +++ b/test/duplicate_consistency/falcon512_avx2.yml @@ -0,0 +1,33 @@ +consistency_checks: + - source: + scheme: falcon512 + implementation: clean + files: + - api.h + - codec.c + - common.c + - keygen.c + - pqclean.c + - vrfy.c + - source: + scheme: falcon1024 + implementation: clean + files: + - codec.c + - common.c + - keygen.c + - vrfy.c + - source: + scheme: falcon1024 + implementation: avx2 + files: + - fpr.h + - inner.h + - codec.c + - common.c + - fft.c + - fpr.c + - keygen.c + - rng.c + - sign.c + - vrfy.c diff --git a/test/duplicate_consistency/falcon512_clean.yml b/test/duplicate_consistency/falcon512_clean.yml new file mode 100644 index 00000000..b2274f54 --- /dev/null +++ b/test/duplicate_consistency/falcon512_clean.yml @@ -0,0 +1,32 @@ +consistency_checks: + - source: + scheme: falcon512 + implementation: avx2 + files: + - api.h + - codec.c + - common.c + - keygen.c + - pqclean.c + - vrfy.c + - source: + scheme: falcon1024 + implementation: clean + files: + - fpr.h + - codec.c + - common.c + - fft.c + - fpr.c + - keygen.c + - rng.c + - sign.c + - vrfy.c + - source: + scheme: falcon1024 + implementation: avx2 + files: + - codec.c + - common.c + - keygen.c + - vrfy.c From d0cea52677c5ebbcd934059842d4f5226764193c Mon Sep 17 00:00:00 2001 From: "John M. Schanck" Date: Wed, 21 Oct 2020 17:18:02 -0400 Subject: [PATCH 2/3] falcon: use hyphen in library name --- crypto_sign/falcon-1024/META.yml | 4 ++-- crypto_sign/falcon-1024/avx2/Makefile | 2 +- crypto_sign/falcon-1024/clean/Makefile | 2 +- crypto_sign/falcon-1024/clean/Makefile.Microsoft_nmake | 2 +- crypto_sign/falcon-512/META.yml | 4 ++-- crypto_sign/falcon-512/avx2/Makefile | 2 +- crypto_sign/falcon-512/clean/Makefile | 2 +- crypto_sign/falcon-512/clean/Makefile.Microsoft_nmake | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/crypto_sign/falcon-1024/META.yml b/crypto_sign/falcon-1024/META.yml index 3cfb0b8c..fb088b45 100644 --- a/crypto_sign/falcon-1024/META.yml +++ b/crypto_sign/falcon-1024/META.yml @@ -20,9 +20,9 @@ auxiliary-submitters: - Zhenfei Zhang implementations: - name: clean - version: supercop-20201018 via https://github.com/jschanck/package-pqclean/tree/6f6f4227/falcon + version: supercop-20201018 via https://github.com/jschanck/package-pqclean/tree/78831f03/falcon - name: avx2 - version: supercop-20201018 via https://github.com/jschanck/package-pqclean/tree/6f6f4227/falcon + version: supercop-20201018 via https://github.com/jschanck/package-pqclean/tree/78831f03/falcon supported_platforms: - architecture: x86_64 operating_systems: diff --git a/crypto_sign/falcon-1024/avx2/Makefile b/crypto_sign/falcon-1024/avx2/Makefile index 3ea67daa..495374f6 100644 --- a/crypto_sign/falcon-1024/avx2/Makefile +++ b/crypto_sign/falcon-1024/avx2/Makefile @@ -1,6 +1,6 @@ # This Makefile can be used with GNU Make or BSD Make -LIB=libfalcon1024_avx2.a +LIB=libfalcon-1024_avx2.a SOURCES = codec.c common.c fft.c fpr.c keygen.c pqclean.c rng.c sign.c vrfy.c OBJECTS = codec.o common.o fft.o fpr.o keygen.o pqclean.o rng.o sign.o vrfy.o diff --git a/crypto_sign/falcon-1024/clean/Makefile b/crypto_sign/falcon-1024/clean/Makefile index d958ea0e..2e20ece4 100644 --- a/crypto_sign/falcon-1024/clean/Makefile +++ b/crypto_sign/falcon-1024/clean/Makefile @@ -1,6 +1,6 @@ # This Makefile can be used with GNU Make or BSD Make -LIB=libfalcon1024_clean.a +LIB=libfalcon-1024_clean.a SOURCES = codec.c common.c fft.c fpr.c keygen.c pqclean.c rng.c sign.c vrfy.c OBJECTS = codec.o common.o fft.o fpr.o keygen.o pqclean.o rng.o sign.o vrfy.o diff --git a/crypto_sign/falcon-1024/clean/Makefile.Microsoft_nmake b/crypto_sign/falcon-1024/clean/Makefile.Microsoft_nmake index 5bf6b36f..15a74498 100644 --- a/crypto_sign/falcon-1024/clean/Makefile.Microsoft_nmake +++ b/crypto_sign/falcon-1024/clean/Makefile.Microsoft_nmake @@ -1,7 +1,7 @@ # This Makefile can be used with Microsoft Visual Studio's nmake using the command: # nmake /f Makefile.Microsoft_nmake -LIBRARY=libfalcon1024_clean.lib +LIBRARY=libfalcon-1024_clean.lib OBJECTS=codec.obj common.obj fft.obj fpr.obj keygen.obj pqclean.obj rng.obj sign.obj vrfy.obj # Warning C4146 is raised when a unary minus operator is applied to an diff --git a/crypto_sign/falcon-512/META.yml b/crypto_sign/falcon-512/META.yml index fa45b552..eee16206 100644 --- a/crypto_sign/falcon-512/META.yml +++ b/crypto_sign/falcon-512/META.yml @@ -20,9 +20,9 @@ auxiliary-submitters: - Zhenfei Zhang implementations: - name: clean - version: supercop-20201018 via https://github.com/jschanck/package-pqclean/tree/6f6f4227/falcon + version: supercop-20201018 via https://github.com/jschanck/package-pqclean/tree/78831f03/falcon - name: avx2 - version: supercop-20201018 via https://github.com/jschanck/package-pqclean/tree/6f6f4227/falcon + version: supercop-20201018 via https://github.com/jschanck/package-pqclean/tree/78831f03/falcon supported_platforms: - architecture: x86_64 operating_systems: diff --git a/crypto_sign/falcon-512/avx2/Makefile b/crypto_sign/falcon-512/avx2/Makefile index 9f3ca4a7..d0875be4 100644 --- a/crypto_sign/falcon-512/avx2/Makefile +++ b/crypto_sign/falcon-512/avx2/Makefile @@ -1,6 +1,6 @@ # This Makefile can be used with GNU Make or BSD Make -LIB=libfalcon512_avx2.a +LIB=libfalcon-512_avx2.a SOURCES = codec.c common.c fft.c fpr.c keygen.c pqclean.c rng.c sign.c vrfy.c OBJECTS = codec.o common.o fft.o fpr.o keygen.o pqclean.o rng.o sign.o vrfy.o diff --git a/crypto_sign/falcon-512/clean/Makefile b/crypto_sign/falcon-512/clean/Makefile index 21031452..0ccb3760 100644 --- a/crypto_sign/falcon-512/clean/Makefile +++ b/crypto_sign/falcon-512/clean/Makefile @@ -1,6 +1,6 @@ # This Makefile can be used with GNU Make or BSD Make -LIB=libfalcon512_clean.a +LIB=libfalcon-512_clean.a SOURCES = codec.c common.c fft.c fpr.c keygen.c pqclean.c rng.c sign.c vrfy.c OBJECTS = codec.o common.o fft.o fpr.o keygen.o pqclean.o rng.o sign.o vrfy.o diff --git a/crypto_sign/falcon-512/clean/Makefile.Microsoft_nmake b/crypto_sign/falcon-512/clean/Makefile.Microsoft_nmake index bdfdf53d..af9621a5 100644 --- a/crypto_sign/falcon-512/clean/Makefile.Microsoft_nmake +++ b/crypto_sign/falcon-512/clean/Makefile.Microsoft_nmake @@ -1,7 +1,7 @@ # This Makefile can be used with Microsoft Visual Studio's nmake using the command: # nmake /f Makefile.Microsoft_nmake -LIBRARY=libfalcon512_clean.lib +LIBRARY=libfalcon-512_clean.lib OBJECTS=codec.obj common.obj fft.obj fpr.obj keygen.obj pqclean.obj rng.obj sign.obj vrfy.obj # Warning C4146 is raised when a unary minus operator is applied to an From 2aab49b09ea496307413c44c7a32f3a7f3e8c952 Mon Sep 17 00:00:00 2001 From: "John M. Schanck" Date: Fri, 23 Oct 2020 10:00:50 -0400 Subject: [PATCH 3/3] duplicate_consistency: strip spaces before comparison --- test/test_duplicate_consistency.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/test/test_duplicate_consistency.py b/test/test_duplicate_consistency.py index 2876569b..a5967151 100644 --- a/test/test_duplicate_consistency.py +++ b/test/test_duplicate_consistency.py @@ -62,17 +62,17 @@ def test_duplicate_consistency(implementation, source, files): for file in files: target_path = os.path.join(source.path(), file) this_path = os.path.join(implementation.path(), file) - target_src = file_get_contents(target_path) - this_src = file_get_contents(this_path) - this_transformed_src = this_src.replace( - implementation.namespace_prefix(), '') - target_transformed_src = target_src.replace( - source.namespace_prefix(), '') + target_src = file_get_contents(target_path)\ + .replace(source.namespace_prefix(), '')\ + .replace(' ', '') + this_src = file_get_contents(this_path)\ + .replace(implementation.namespace_prefix(), '')\ + .replace(' ', '') - if not this_transformed_src == target_transformed_src: + if not this_src == target_src: diff = difflib.unified_diff( - this_transformed_src.splitlines(keepends=True), - target_transformed_src.splitlines(keepends=True), + this_src.splitlines(keepends=True), + target_src.splitlines(keepends=True), fromfile=this_path, tofile=target_path) messages.append("{} differed:\n{}".format(file, ''.join(diff)))