diff --git a/crypto_kem/frodokem1344aes/clean/util.c b/crypto_kem/frodokem1344aes/clean/util.c index be4e2d3e..ab565fca 100644 --- a/crypto_kem/frodokem1344aes/clean/util.c +++ b/crypto_kem/frodokem1344aes/clean/util.c @@ -11,7 +11,12 @@ #include "common.h" #include "params.h" -#define min(x, y) (((x) < (y)) ? (x) : (y)) +static inline uint8_t min(uint8_t x, uint8_t y) { + if (x < y) { + return x; + } + return y; +} uint16_t PQCLEAN_FRODOKEM1344AES_CLEAN_LE_TO_UINT16(uint16_t n) { return (((uint8_t *) &n)[0] | (((uint8_t *) &n)[1] << 8)); diff --git a/crypto_kem/frodokem1344aes/opt/util.c b/crypto_kem/frodokem1344aes/opt/util.c index d2dbb565..9e617aa3 100644 --- a/crypto_kem/frodokem1344aes/opt/util.c +++ b/crypto_kem/frodokem1344aes/opt/util.c @@ -11,7 +11,12 @@ #include "common.h" #include "params.h" -#define min(x, y) (((x) < (y)) ? (x) : (y)) +static inline uint8_t min(uint8_t x, uint8_t y) { + if (x < y) { + return x; + } + return y; +} uint16_t PQCLEAN_FRODOKEM1344AES_OPT_LE_TO_UINT16(uint16_t n) { return (((uint8_t *) &n)[0] | (((uint8_t *) &n)[1] << 8)); diff --git a/crypto_kem/frodokem1344shake/clean/util.c b/crypto_kem/frodokem1344shake/clean/util.c index 189ea752..c8b24c85 100644 --- a/crypto_kem/frodokem1344shake/clean/util.c +++ b/crypto_kem/frodokem1344shake/clean/util.c @@ -11,7 +11,12 @@ #include "common.h" #include "params.h" -#define min(x, y) (((x) < (y)) ? (x) : (y)) +static inline uint8_t min(uint8_t x, uint8_t y) { + if (x < y) { + return x; + } + return y; +} uint16_t PQCLEAN_FRODOKEM1344SHAKE_CLEAN_LE_TO_UINT16(uint16_t n) { return (((uint8_t *) &n)[0] | (((uint8_t *) &n)[1] << 8)); diff --git a/crypto_kem/frodokem1344shake/opt/util.c b/crypto_kem/frodokem1344shake/opt/util.c index 220bfae5..6f6e8d66 100644 --- a/crypto_kem/frodokem1344shake/opt/util.c +++ b/crypto_kem/frodokem1344shake/opt/util.c @@ -11,7 +11,12 @@ #include "common.h" #include "params.h" -#define min(x, y) (((x) < (y)) ? (x) : (y)) +static inline uint8_t min(uint8_t x, uint8_t y) { + if (x < y) { + return x; + } + return y; +} uint16_t PQCLEAN_FRODOKEM1344SHAKE_OPT_LE_TO_UINT16(uint16_t n) { return (((uint8_t *) &n)[0] | (((uint8_t *) &n)[1] << 8)); diff --git a/crypto_kem/frodokem640aes/clean/util.c b/crypto_kem/frodokem640aes/clean/util.c index d0218113..bdba92e0 100644 --- a/crypto_kem/frodokem640aes/clean/util.c +++ b/crypto_kem/frodokem640aes/clean/util.c @@ -11,7 +11,12 @@ #include "common.h" #include "params.h" -#define min(x, y) (((x) < (y)) ? (x) : (y)) +static inline uint8_t min(uint8_t x, uint8_t y) { + if (x < y) { + return x; + } + return y; +} uint16_t PQCLEAN_FRODOKEM640AES_CLEAN_LE_TO_UINT16(uint16_t n) { return (((uint8_t *) &n)[0] | (((uint8_t *) &n)[1] << 8)); diff --git a/crypto_kem/frodokem640aes/opt/util.c b/crypto_kem/frodokem640aes/opt/util.c index b43d9f84..5cdd6ca3 100644 --- a/crypto_kem/frodokem640aes/opt/util.c +++ b/crypto_kem/frodokem640aes/opt/util.c @@ -11,7 +11,12 @@ #include "common.h" #include "params.h" -#define min(x, y) (((x) < (y)) ? (x) : (y)) +static inline uint8_t min(uint8_t x, uint8_t y) { + if (x < y) { + return x; + } + return y; +} uint16_t PQCLEAN_FRODOKEM640AES_OPT_LE_TO_UINT16(uint16_t n) { return (((uint8_t *) &n)[0] | (((uint8_t *) &n)[1] << 8)); diff --git a/crypto_kem/frodokem640shake/clean/util.c b/crypto_kem/frodokem640shake/clean/util.c index 31e1b155..ace911bd 100644 --- a/crypto_kem/frodokem640shake/clean/util.c +++ b/crypto_kem/frodokem640shake/clean/util.c @@ -11,7 +11,12 @@ #include "common.h" #include "params.h" -#define min(x, y) (((x) < (y)) ? (x) : (y)) +static inline uint8_t min(uint8_t x, uint8_t y) { + if (x < y) { + return x; + } + return y; +} uint16_t PQCLEAN_FRODOKEM640SHAKE_CLEAN_LE_TO_UINT16(uint16_t n) { return (((uint8_t *) &n)[0] | (((uint8_t *) &n)[1] << 8)); diff --git a/crypto_kem/frodokem640shake/opt/util.c b/crypto_kem/frodokem640shake/opt/util.c index 6cbe46b0..1b3a5825 100644 --- a/crypto_kem/frodokem640shake/opt/util.c +++ b/crypto_kem/frodokem640shake/opt/util.c @@ -11,7 +11,12 @@ #include "common.h" #include "params.h" -#define min(x, y) (((x) < (y)) ? (x) : (y)) +static inline uint8_t min(uint8_t x, uint8_t y) { + if (x < y) { + return x; + } + return y; +} uint16_t PQCLEAN_FRODOKEM640SHAKE_OPT_LE_TO_UINT16(uint16_t n) { return (((uint8_t *) &n)[0] | (((uint8_t *) &n)[1] << 8)); diff --git a/crypto_kem/frodokem976aes/clean/util.c b/crypto_kem/frodokem976aes/clean/util.c index dda97621..c8a76b81 100644 --- a/crypto_kem/frodokem976aes/clean/util.c +++ b/crypto_kem/frodokem976aes/clean/util.c @@ -11,7 +11,12 @@ #include "common.h" #include "params.h" -#define min(x, y) (((x) < (y)) ? (x) : (y)) +static inline uint8_t min(uint8_t x, uint8_t y) { + if (x < y) { + return x; + } + return y; +} uint16_t PQCLEAN_FRODOKEM976AES_CLEAN_LE_TO_UINT16(uint16_t n) { return (((uint8_t *) &n)[0] | (((uint8_t *) &n)[1] << 8)); diff --git a/crypto_kem/frodokem976aes/opt/util.c b/crypto_kem/frodokem976aes/opt/util.c index 67019878..cac6b449 100644 --- a/crypto_kem/frodokem976aes/opt/util.c +++ b/crypto_kem/frodokem976aes/opt/util.c @@ -11,7 +11,12 @@ #include "common.h" #include "params.h" -#define min(x, y) (((x) < (y)) ? (x) : (y)) +static inline uint8_t min(uint8_t x, uint8_t y) { + if (x < y) { + return x; + } + return y; +} uint16_t PQCLEAN_FRODOKEM976AES_OPT_LE_TO_UINT16(uint16_t n) { return (((uint8_t *) &n)[0] | (((uint8_t *) &n)[1] << 8)); diff --git a/crypto_kem/frodokem976shake/clean/util.c b/crypto_kem/frodokem976shake/clean/util.c index b8246b87..8360be6d 100644 --- a/crypto_kem/frodokem976shake/clean/util.c +++ b/crypto_kem/frodokem976shake/clean/util.c @@ -11,7 +11,12 @@ #include "common.h" #include "params.h" -#define min(x, y) (((x) < (y)) ? (x) : (y)) +static inline uint8_t min(uint8_t x, uint8_t y) { + if (x < y) { + return x; + } + return y; +} uint16_t PQCLEAN_FRODOKEM976SHAKE_CLEAN_LE_TO_UINT16(uint16_t n) { return (((uint8_t *) &n)[0] | (((uint8_t *) &n)[1] << 8)); diff --git a/crypto_kem/frodokem976shake/opt/util.c b/crypto_kem/frodokem976shake/opt/util.c index 0ae983c1..83677f28 100644 --- a/crypto_kem/frodokem976shake/opt/util.c +++ b/crypto_kem/frodokem976shake/opt/util.c @@ -11,7 +11,12 @@ #include "common.h" #include "params.h" -#define min(x, y) (((x) < (y)) ? (x) : (y)) +static inline uint8_t min(uint8_t x, uint8_t y) { + if (x < y) { + return x; + } + return y; +} uint16_t PQCLEAN_FRODOKEM976SHAKE_OPT_LE_TO_UINT16(uint16_t n) { return (((uint8_t *) &n)[0] | (((uint8_t *) &n)[1] << 8)); diff --git a/crypto_kem/mceliece348864/avx/int32_sort.c b/crypto_kem/mceliece348864/avx/int32_sort.c index daf93da4..f984819f 100644 --- a/crypto_kem/mceliece348864/avx/int32_sort.c +++ b/crypto_kem/mceliece348864/avx/int32_sort.c @@ -462,8 +462,11 @@ static void int32_sort_2power(int32 *x, size_t n, int flagdown) { } q = n >> 3; - flip = (p << 1 == q); - flipflip = !flip; + flip = 0; + if (p << 1 == q) { + flip = 1; + } + flipflip = 1 - flip; for (j = 0; j < q; j += p + p) { for (k = j; k < j + p + p; k += p) { for (i = k; i < k + p; i += 8) { diff --git a/crypto_kem/mceliece348864f/avx/int32_sort.c b/crypto_kem/mceliece348864f/avx/int32_sort.c index 4284a598..3d00867d 100644 --- a/crypto_kem/mceliece348864f/avx/int32_sort.c +++ b/crypto_kem/mceliece348864f/avx/int32_sort.c @@ -462,8 +462,11 @@ static void int32_sort_2power(int32 *x, size_t n, int flagdown) { } q = n >> 3; - flip = (p << 1 == q); - flipflip = !flip; + flip = 0; + if (p << 1 == q) { + flip = 1; + } + flipflip = 1 - flip; for (j = 0; j < q; j += p + p) { for (k = j; k < j + p + p; k += p) { for (i = k; i < k + p; i += 8) { diff --git a/crypto_kem/mceliece460896/avx/int32_sort.c b/crypto_kem/mceliece460896/avx/int32_sort.c index d9577389..aae917bd 100644 --- a/crypto_kem/mceliece460896/avx/int32_sort.c +++ b/crypto_kem/mceliece460896/avx/int32_sort.c @@ -462,8 +462,11 @@ static void int32_sort_2power(int32 *x, size_t n, int flagdown) { } q = n >> 3; - flip = (p << 1 == q); - flipflip = !flip; + flip = 0; + if (p << 1 == q) { + flip = 1; + } + flipflip = 1 - flip; for (j = 0; j < q; j += p + p) { for (k = j; k < j + p + p; k += p) { for (i = k; i < k + p; i += 8) { diff --git a/crypto_kem/mceliece460896f/avx/int32_sort.c b/crypto_kem/mceliece460896f/avx/int32_sort.c index f88bfc74..d8c2b1c8 100644 --- a/crypto_kem/mceliece460896f/avx/int32_sort.c +++ b/crypto_kem/mceliece460896f/avx/int32_sort.c @@ -462,8 +462,11 @@ static void int32_sort_2power(int32 *x, size_t n, int flagdown) { } q = n >> 3; - flip = (p << 1 == q); - flipflip = !flip; + flip = 0; + if (p << 1 == q) { + flip = 1; + } + flipflip = 1 - flip; for (j = 0; j < q; j += p + p) { for (k = j; k < j + p + p; k += p) { for (i = k; i < k + p; i += 8) { diff --git a/crypto_kem/mceliece6688128/avx/int32_sort.c b/crypto_kem/mceliece6688128/avx/int32_sort.c index 4421fdf1..d55525e7 100644 --- a/crypto_kem/mceliece6688128/avx/int32_sort.c +++ b/crypto_kem/mceliece6688128/avx/int32_sort.c @@ -462,8 +462,11 @@ static void int32_sort_2power(int32 *x, size_t n, int flagdown) { } q = n >> 3; - flip = (p << 1 == q); - flipflip = !flip; + flip = 0; + if (p << 1 == q) { + flip = 1; + } + flipflip = 1 - flip; for (j = 0; j < q; j += p + p) { for (k = j; k < j + p + p; k += p) { for (i = k; i < k + p; i += 8) { diff --git a/crypto_kem/mceliece6688128f/avx/int32_sort.c b/crypto_kem/mceliece6688128f/avx/int32_sort.c index 4be2b525..f70f51dd 100644 --- a/crypto_kem/mceliece6688128f/avx/int32_sort.c +++ b/crypto_kem/mceliece6688128f/avx/int32_sort.c @@ -462,8 +462,11 @@ static void int32_sort_2power(int32 *x, size_t n, int flagdown) { } q = n >> 3; - flip = (p << 1 == q); - flipflip = !flip; + flip = 0; + if (p << 1 == q) { + flip = 1; + } + flipflip = 1 - flip; for (j = 0; j < q; j += p + p) { for (k = j; k < j + p + p; k += p) { for (i = k; i < k + p; i += 8) { diff --git a/crypto_kem/mceliece6960119/avx/int32_sort.c b/crypto_kem/mceliece6960119/avx/int32_sort.c index 39785008..02087a3e 100644 --- a/crypto_kem/mceliece6960119/avx/int32_sort.c +++ b/crypto_kem/mceliece6960119/avx/int32_sort.c @@ -462,8 +462,11 @@ static void int32_sort_2power(int32 *x, size_t n, int flagdown) { } q = n >> 3; - flip = (p << 1 == q); - flipflip = !flip; + flip = 0; + if (p << 1 == q) { + flip = 1; + } + flipflip = 1 - flip; for (j = 0; j < q; j += p + p) { for (k = j; k < j + p + p; k += p) { for (i = k; i < k + p; i += 8) { diff --git a/crypto_kem/mceliece6960119f/avx/int32_sort.c b/crypto_kem/mceliece6960119f/avx/int32_sort.c index 9c38802a..73bf75a2 100644 --- a/crypto_kem/mceliece6960119f/avx/int32_sort.c +++ b/crypto_kem/mceliece6960119f/avx/int32_sort.c @@ -462,8 +462,11 @@ static void int32_sort_2power(int32 *x, size_t n, int flagdown) { } q = n >> 3; - flip = (p << 1 == q); - flipflip = !flip; + flip = 0; + if (p << 1 == q) { + flip = 1; + } + flipflip = 1 - flip; for (j = 0; j < q; j += p + p) { for (k = j; k < j + p + p; k += p) { for (i = k; i < k + p; i += 8) { diff --git a/crypto_kem/mceliece8192128/avx/int32_sort.c b/crypto_kem/mceliece8192128/avx/int32_sort.c index a7eca2ba..0e11f1c4 100644 --- a/crypto_kem/mceliece8192128/avx/int32_sort.c +++ b/crypto_kem/mceliece8192128/avx/int32_sort.c @@ -462,8 +462,11 @@ static void int32_sort_2power(int32 *x, size_t n, int flagdown) { } q = n >> 3; - flip = (p << 1 == q); - flipflip = !flip; + flip = 0; + if (p << 1 == q) { + flip = 1; + } + flipflip = 1 - flip; for (j = 0; j < q; j += p + p) { for (k = j; k < j + p + p; k += p) { for (i = k; i < k + p; i += 8) { diff --git a/crypto_kem/mceliece8192128f/avx/int32_sort.c b/crypto_kem/mceliece8192128f/avx/int32_sort.c index d18b3a73..a35e886e 100644 --- a/crypto_kem/mceliece8192128f/avx/int32_sort.c +++ b/crypto_kem/mceliece8192128f/avx/int32_sort.c @@ -462,8 +462,11 @@ static void int32_sort_2power(int32 *x, size_t n, int flagdown) { } q = n >> 3; - flip = (p << 1 == q); - flipflip = !flip; + flip = 0; + if (p << 1 == q) { + flip = 1; + } + flipflip = 1 - flip; for (j = 0; j < q; j += p + p) { for (k = j; k < j + p + p; k += p) { for (i = k; i < k + p; i += 8) { diff --git a/crypto_kem/ntruhps2048509/avx2/crypto_sort_int32.c b/crypto_kem/ntruhps2048509/avx2/crypto_sort_int32.c index 12c5ea15..cef509b6 100644 --- a/crypto_kem/ntruhps2048509/avx2/crypto_sort_int32.c +++ b/crypto_kem/ntruhps2048509/avx2/crypto_sort_int32.c @@ -1,8 +1,8 @@ +#include "crypto_sort_int32.h" +#include // Based on supercop-20200820/crypto_sort/int32/avx2 -#include "crypto_sort_int32.h" -#include #define int32 int32_t typedef __m256i int32x8; @@ -469,8 +469,11 @@ static void int32_sort_2power(int32 *x, size_t n, int flagdown) { } q = n >> 3; - flip = (p << 1 == q); - flipflip = !flip; + flip = 0; + if (p << 1 == q) { + flip = 1; + } + flipflip = 1 - flip; for (j = 0; j < q; j += p + p) { for (k = j; k < j + p + p; k += p) { for (i = k; i < k + p; i += 8) { diff --git a/crypto_kem/ntruhps2048509/avx2/crypto_sort_int32.h b/crypto_kem/ntruhps2048509/avx2/crypto_sort_int32.h index 5a121e16..fdc31858 100644 --- a/crypto_kem/ntruhps2048509/avx2/crypto_sort_int32.h +++ b/crypto_kem/ntruhps2048509/avx2/crypto_sort_int32.h @@ -1,11 +1,10 @@ #ifndef CRYPTO_SORT #define CRYPTO_SORT - #include "params.h" - #include #include + void PQCLEAN_NTRUHPS2048509_AVX2_crypto_sort_int32(int32_t *x, size_t n); #endif diff --git a/crypto_kem/ntruhps2048677/avx2/crypto_sort_int32.c b/crypto_kem/ntruhps2048677/avx2/crypto_sort_int32.c index e9a81a7f..874b4cd7 100644 --- a/crypto_kem/ntruhps2048677/avx2/crypto_sort_int32.c +++ b/crypto_kem/ntruhps2048677/avx2/crypto_sort_int32.c @@ -1,8 +1,8 @@ +#include "crypto_sort_int32.h" +#include // Based on supercop-20200820/crypto_sort/int32/avx2 -#include "crypto_sort_int32.h" -#include #define int32 int32_t typedef __m256i int32x8; @@ -469,8 +469,11 @@ static void int32_sort_2power(int32 *x, size_t n, int flagdown) { } q = n >> 3; - flip = (p << 1 == q); - flipflip = !flip; + flip = 0; + if (p << 1 == q) { + flip = 1; + } + flipflip = 1 - flip; for (j = 0; j < q; j += p + p) { for (k = j; k < j + p + p; k += p) { for (i = k; i < k + p; i += 8) { diff --git a/crypto_kem/ntruhps2048677/avx2/crypto_sort_int32.h b/crypto_kem/ntruhps2048677/avx2/crypto_sort_int32.h index 12f221b0..84d40c07 100644 --- a/crypto_kem/ntruhps2048677/avx2/crypto_sort_int32.h +++ b/crypto_kem/ntruhps2048677/avx2/crypto_sort_int32.h @@ -1,11 +1,10 @@ #ifndef CRYPTO_SORT #define CRYPTO_SORT - #include "params.h" - #include #include + void PQCLEAN_NTRUHPS2048677_AVX2_crypto_sort_int32(int32_t *x, size_t n); #endif diff --git a/crypto_kem/ntruhps4096821/avx2/crypto_sort_int32.c b/crypto_kem/ntruhps4096821/avx2/crypto_sort_int32.c index 47b06efd..d4c16d25 100644 --- a/crypto_kem/ntruhps4096821/avx2/crypto_sort_int32.c +++ b/crypto_kem/ntruhps4096821/avx2/crypto_sort_int32.c @@ -1,8 +1,8 @@ +#include "crypto_sort_int32.h" +#include // Based on supercop-20200820/crypto_sort/int32/avx2 -#include "crypto_sort_int32.h" -#include #define int32 int32_t typedef __m256i int32x8; @@ -469,8 +469,11 @@ static void int32_sort_2power(int32 *x, size_t n, int flagdown) { } q = n >> 3; - flip = (p << 1 == q); - flipflip = !flip; + flip = 0; + if (p << 1 == q) { + flip = 1; + } + flipflip = 1 - flip; for (j = 0; j < q; j += p + p) { for (k = j; k < j + p + p; k += p) { for (i = k; i < k + p; i += 8) { diff --git a/crypto_kem/ntruhps4096821/avx2/crypto_sort_int32.h b/crypto_kem/ntruhps4096821/avx2/crypto_sort_int32.h index 63d91ade..209e2188 100644 --- a/crypto_kem/ntruhps4096821/avx2/crypto_sort_int32.h +++ b/crypto_kem/ntruhps4096821/avx2/crypto_sort_int32.h @@ -1,11 +1,10 @@ #ifndef CRYPTO_SORT #define CRYPTO_SORT - #include "params.h" - #include #include + void PQCLEAN_NTRUHPS4096821_AVX2_crypto_sort_int32(int32_t *x, size_t n); #endif diff --git a/crypto_kem/ntrulpr653/avx2/crypto_sort_int32.c b/crypto_kem/ntrulpr653/avx2/crypto_sort_int32.c index 9a3a3bf6..c116c03d 100644 --- a/crypto_kem/ntrulpr653/avx2/crypto_sort_int32.c +++ b/crypto_kem/ntrulpr653/avx2/crypto_sort_int32.c @@ -462,8 +462,11 @@ static void int32_sort_2power(int32 *x, long long n, int flagdown) { } q = n >> 3; - flip = (p << 1 == q); - flipflip = !flip; + flip = 0; + if (p << 1 == q) { + flip = 1; + } + flipflip = 1 - flip; for (j = 0; j < q; j += p + p) { for (k = j; k < j + p + p; k += p) { for (i = k; i < k + p; i += 8) { diff --git a/crypto_kem/ntrulpr761/avx2/crypto_sort_int32.c b/crypto_kem/ntrulpr761/avx2/crypto_sort_int32.c index 4b4e018c..9340fb5b 100644 --- a/crypto_kem/ntrulpr761/avx2/crypto_sort_int32.c +++ b/crypto_kem/ntrulpr761/avx2/crypto_sort_int32.c @@ -462,8 +462,11 @@ static void int32_sort_2power(int32 *x, long long n, int flagdown) { } q = n >> 3; - flip = (p << 1 == q); - flipflip = !flip; + flip = 0; + if (p << 1 == q) { + flip = 1; + } + flipflip = 1 - flip; for (j = 0; j < q; j += p + p) { for (k = j; k < j + p + p; k += p) { for (i = k; i < k + p; i += 8) { diff --git a/crypto_kem/ntrulpr857/avx2/crypto_sort_int32.c b/crypto_kem/ntrulpr857/avx2/crypto_sort_int32.c index 3f87a657..0b82a5d1 100644 --- a/crypto_kem/ntrulpr857/avx2/crypto_sort_int32.c +++ b/crypto_kem/ntrulpr857/avx2/crypto_sort_int32.c @@ -462,8 +462,11 @@ static void int32_sort_2power(int32 *x, long long n, int flagdown) { } q = n >> 3; - flip = (p << 1 == q); - flipflip = !flip; + flip = 0; + if (p << 1 == q) { + flip = 1; + } + flipflip = 1 - flip; for (j = 0; j < q; j += p + p) { for (k = j; k < j + p + p; k += p) { for (i = k; i < k + p; i += 8) { diff --git a/crypto_kem/sntrup653/avx2/crypto_sort_int32.c b/crypto_kem/sntrup653/avx2/crypto_sort_int32.c index c0927747..289bc6b0 100644 --- a/crypto_kem/sntrup653/avx2/crypto_sort_int32.c +++ b/crypto_kem/sntrup653/avx2/crypto_sort_int32.c @@ -462,8 +462,11 @@ static void int32_sort_2power(int32 *x, long long n, int flagdown) { } q = n >> 3; - flip = (p << 1 == q); - flipflip = !flip; + flip = 0; + if (p << 1 == q) { + flip = 1; + } + flipflip = 1 - flip; for (j = 0; j < q; j += p + p) { for (k = j; k < j + p + p; k += p) { for (i = k; i < k + p; i += 8) { diff --git a/crypto_kem/sntrup761/avx2/crypto_sort_int32.c b/crypto_kem/sntrup761/avx2/crypto_sort_int32.c index ffe126ea..a3268aef 100644 --- a/crypto_kem/sntrup761/avx2/crypto_sort_int32.c +++ b/crypto_kem/sntrup761/avx2/crypto_sort_int32.c @@ -462,8 +462,11 @@ static void int32_sort_2power(int32 *x, long long n, int flagdown) { } q = n >> 3; - flip = (p << 1 == q); - flipflip = !flip; + flip = 0; + if (p << 1 == q) { + flip = 1; + } + flipflip = 1 - flip; for (j = 0; j < q; j += p + p) { for (k = j; k < j + p + p; k += p) { for (i = k; i < k + p; i += 8) { diff --git a/crypto_kem/sntrup857/avx2/crypto_sort_int32.c b/crypto_kem/sntrup857/avx2/crypto_sort_int32.c index a086fc93..bd96995a 100644 --- a/crypto_kem/sntrup857/avx2/crypto_sort_int32.c +++ b/crypto_kem/sntrup857/avx2/crypto_sort_int32.c @@ -462,8 +462,11 @@ static void int32_sort_2power(int32 *x, long long n, int flagdown) { } q = n >> 3; - flip = (p << 1 == q); - flipflip = !flip; + flip = 0; + if (p << 1 == q) { + flip = 1; + } + flipflip = 1 - flip; for (j = 0; j < q; j += p + p) { for (k = j; k < j + p + p; k += p) { for (i = k; i < k + p; i += 8) { diff --git a/crypto_sign/falcon-1024/clean/codec.c b/crypto_sign/falcon-1024/clean/codec.c index d9f5de74..70856aff 100644 --- a/crypto_sign/falcon-1024/clean/codec.c +++ b/crypto_sign/falcon-1024/clean/codec.c @@ -443,7 +443,10 @@ PQCLEAN_FALCON1024_CLEAN_comp_decode( return 0; } } - x[u] = (int16_t)(s ? -(int)m : (int)m); + x[u] = (int16_t) m; + if (s) { + x[u] = (int16_t) - x[u]; + } } return v; } diff --git a/crypto_sign/falcon-1024/clean/fpr.h b/crypto_sign/falcon-1024/clean/fpr.h index c3103dc1..004bc0df 100644 --- a/crypto_sign/falcon-1024/clean/fpr.h +++ b/crypto_sign/falcon-1024/clean/fpr.h @@ -424,20 +424,32 @@ fpr fpr_sqrt(fpr x); static inline int fpr_lt(fpr x, fpr y) { /* - * If x >= 0 or y >= 0, a signed comparison yields the proper - * result: + * If both x and y are positive, then a signed comparison yields + * the proper result: * - For positive values, the order is preserved. * - The sign bit is at the same place as in integers, so * sign is preserved. + * Moreover, we can compute [x < y] as sgn(x-y) and the computation + * of x-y will not overflow. + * + * If the signs differ, then sgn(x) gives the proper result. * * If both x and y are negative, then the order is reversed. - * We cannot simply invert the comparison result in that case - * because it would not handle the edge case x = y properly. + * Hence [x < y] = sgn(y-x). We must compute this separately from + * sgn(x-y); simply inverting sgn(x-y) would not handle the edge + * case x = y properly. */ int cc0, cc1; + int64_t sx; + int64_t sy; + + sx = *(int64_t *)&x; + sy = *(int64_t *)&y; + sy &= ~((sx ^ sy) >> 63); /* set sy=0 if signs differ */ + + cc0 = (int)((sx - sy) >> 63) & 1; /* Neither subtraction overflows when */ + cc1 = (int)((sy - sx) >> 63) & 1; /* the signs are the same. */ - cc0 = *(int64_t *)&x < *(int64_t *)&y; - cc1 = *(int64_t *)&x > *(int64_t *)&y; return cc0 ^ ((cc0 ^ cc1) & (int)((x & y) >> 63)); } diff --git a/crypto_sign/falcon-1024/clean/keygen.c b/crypto_sign/falcon-1024/clean/keygen.c index a7246b78..e987b3a5 100644 --- a/crypto_sign/falcon-1024/clean/keygen.c +++ b/crypto_sign/falcon-1024/clean/keygen.c @@ -1902,7 +1902,11 @@ zint_add_scaled_mul_small(uint32_t *x, size_t xlen, * Get the next word of y (scaled). */ v = u - sch; - wy = v < ylen ? y[v] : ysign; + if (v < ylen) { + wy = y[v]; + } else { + wy = ysign; + } wys = ((wy << scl) & 0x7FFFFFFF) | tw; tw = wy >> (31 - scl); @@ -1960,7 +1964,11 @@ zint_sub_scaled(uint32_t *x, size_t xlen, * Get the next word of y (scaled). */ v = u - sch; - wy = v < ylen ? y[v] : ysign; + if (v < ylen) { + wy = y[v]; + } else { + wy = ysign; + } wys = ((wy << scl) & 0x7FFFFFFF) | tw; tw = wy >> (31 - scl); @@ -2648,10 +2656,18 @@ make_fg(uint32_t *data, const int8_t *f, const int8_t *g, return; } - for (d = 0; d < depth; d ++) { - make_fg_step(data, logn - d, d, - d != 0, (d + 1) < depth || out_ntt); + if (depth == 0) { + return; } + if (depth == 1) { + make_fg_step(data, logn, 0, 0, out_ntt); + return; + } + make_fg_step(data, logn, 0, 0, 1); + for (d = 1; d + 1 < depth; d ++) { + make_fg_step(data, logn - d, d, 1, 1); + } + make_fg_step(data, logn - depth + 1, depth - 1, 1, out_ntt); } /* @@ -3028,7 +3044,10 @@ solve_NTRU_intermediate(unsigned logn_top, * computed so that average maximum length will fall in the * middle or the upper half of these top 10 words. */ - rlen = (slen > 10) ? 10 : slen; + rlen = slen; + if (rlen > 10) { + rlen = 10; + } poly_big_to_fp(rt3, ft + slen - rlen, rlen, slen, logn); poly_big_to_fp(rt4, gt + slen - rlen, rlen, slen, logn); @@ -3102,7 +3121,10 @@ solve_NTRU_intermediate(unsigned logn_top, * Convert current F and G into floating-point. We apply * scaling if the current length is more than 10 words. */ - rlen = (FGlen > 10) ? 10 : FGlen; + rlen = FGlen; + if (rlen > 10) { + rlen = 10; + } scale_FG = 31 * (int)(FGlen - rlen); poly_big_to_fp(rt1, Ft + FGlen - rlen, rlen, llen, logn); poly_big_to_fp(rt2, Gt + FGlen - rlen, rlen, llen, logn); diff --git a/crypto_sign/falcon-1024/clean/sign.c b/crypto_sign/falcon-1024/clean/sign.c index 10101a68..56518bf5 100644 --- a/crypto_sign/falcon-1024/clean/sign.c +++ b/crypto_sign/falcon-1024/clean/sign.c @@ -1189,9 +1189,11 @@ PQCLEAN_FALCON1024_CLEAN_sign_tree(int16_t *sig, inner_shake256_context *rng, * Normal sampling. We use a fast PRNG seeded from our * SHAKE context ('rng'). */ - spc.sigma_min = (logn == 10) - ? fpr_sigma_min_10 - : fpr_sigma_min_9; + if (logn == 10) { + spc.sigma_min = fpr_sigma_min_10; + } else { + spc.sigma_min = fpr_sigma_min_9; + } PQCLEAN_FALCON1024_CLEAN_prng_init(&spc.p, rng); samp = PQCLEAN_FALCON1024_CLEAN_sampler; samp_ctx = &spc; @@ -1234,9 +1236,11 @@ PQCLEAN_FALCON1024_CLEAN_sign_dyn(int16_t *sig, inner_shake256_context *rng, * Normal sampling. We use a fast PRNG seeded from our * SHAKE context ('rng'). */ - spc.sigma_min = (logn == 10) - ? fpr_sigma_min_10 - : fpr_sigma_min_9; + if (logn == 10) { + spc.sigma_min = fpr_sigma_min_10; + } else { + spc.sigma_min = fpr_sigma_min_9; + } PQCLEAN_FALCON1024_CLEAN_prng_init(&spc.p, rng); samp = PQCLEAN_FALCON1024_CLEAN_sampler; samp_ctx = &spc; diff --git a/crypto_sign/falcon-512/clean/codec.c b/crypto_sign/falcon-512/clean/codec.c index dda9c975..fe88f022 100644 --- a/crypto_sign/falcon-512/clean/codec.c +++ b/crypto_sign/falcon-512/clean/codec.c @@ -443,7 +443,10 @@ PQCLEAN_FALCON512_CLEAN_comp_decode( return 0; } } - x[u] = (int16_t)(s ? -(int)m : (int)m); + x[u] = (int16_t) m; + if (s) { + x[u] = (int16_t) - x[u]; + } } return v; } diff --git a/crypto_sign/falcon-512/clean/fpr.h b/crypto_sign/falcon-512/clean/fpr.h index f29e55f3..b662a52b 100644 --- a/crypto_sign/falcon-512/clean/fpr.h +++ b/crypto_sign/falcon-512/clean/fpr.h @@ -424,20 +424,32 @@ fpr fpr_sqrt(fpr x); static inline int fpr_lt(fpr x, fpr y) { /* - * If x >= 0 or y >= 0, a signed comparison yields the proper - * result: + * If both x and y are positive, then a signed comparison yields + * the proper result: * - For positive values, the order is preserved. * - The sign bit is at the same place as in integers, so * sign is preserved. + * Moreover, we can compute [x < y] as sgn(x-y) and the computation + * of x-y will not overflow. + * + * If the signs differ, then sgn(x) gives the proper result. * * If both x and y are negative, then the order is reversed. - * We cannot simply invert the comparison result in that case - * because it would not handle the edge case x = y properly. + * Hence [x < y] = sgn(y-x). We must compute this separately from + * sgn(x-y); simply inverting sgn(x-y) would not handle the edge + * case x = y properly. */ int cc0, cc1; + int64_t sx; + int64_t sy; + + sx = *(int64_t *)&x; + sy = *(int64_t *)&y; + sy &= ~((sx ^ sy) >> 63); /* set sy=0 if signs differ */ + + cc0 = (int)((sx - sy) >> 63) & 1; /* Neither subtraction overflows when */ + cc1 = (int)((sy - sx) >> 63) & 1; /* the signs are the same. */ - cc0 = *(int64_t *)&x < *(int64_t *)&y; - cc1 = *(int64_t *)&x > *(int64_t *)&y; return cc0 ^ ((cc0 ^ cc1) & (int)((x & y) >> 63)); } diff --git a/crypto_sign/falcon-512/clean/keygen.c b/crypto_sign/falcon-512/clean/keygen.c index 8ee73151..6fe3ec2e 100644 --- a/crypto_sign/falcon-512/clean/keygen.c +++ b/crypto_sign/falcon-512/clean/keygen.c @@ -1902,7 +1902,11 @@ zint_add_scaled_mul_small(uint32_t *x, size_t xlen, * Get the next word of y (scaled). */ v = u - sch; - wy = v < ylen ? y[v] : ysign; + if (v < ylen) { + wy = y[v]; + } else { + wy = ysign; + } wys = ((wy << scl) & 0x7FFFFFFF) | tw; tw = wy >> (31 - scl); @@ -1960,7 +1964,11 @@ zint_sub_scaled(uint32_t *x, size_t xlen, * Get the next word of y (scaled). */ v = u - sch; - wy = v < ylen ? y[v] : ysign; + if (v < ylen) { + wy = y[v]; + } else { + wy = ysign; + } wys = ((wy << scl) & 0x7FFFFFFF) | tw; tw = wy >> (31 - scl); @@ -2648,10 +2656,18 @@ make_fg(uint32_t *data, const int8_t *f, const int8_t *g, return; } - for (d = 0; d < depth; d ++) { - make_fg_step(data, logn - d, d, - d != 0, (d + 1) < depth || out_ntt); + if (depth == 0) { + return; } + if (depth == 1) { + make_fg_step(data, logn, 0, 0, out_ntt); + return; + } + make_fg_step(data, logn, 0, 0, 1); + for (d = 1; d + 1 < depth; d ++) { + make_fg_step(data, logn - d, d, 1, 1); + } + make_fg_step(data, logn - depth + 1, depth - 1, 1, out_ntt); } /* @@ -3028,7 +3044,10 @@ solve_NTRU_intermediate(unsigned logn_top, * computed so that average maximum length will fall in the * middle or the upper half of these top 10 words. */ - rlen = (slen > 10) ? 10 : slen; + rlen = slen; + if (rlen > 10) { + rlen = 10; + } poly_big_to_fp(rt3, ft + slen - rlen, rlen, slen, logn); poly_big_to_fp(rt4, gt + slen - rlen, rlen, slen, logn); @@ -3102,7 +3121,10 @@ solve_NTRU_intermediate(unsigned logn_top, * Convert current F and G into floating-point. We apply * scaling if the current length is more than 10 words. */ - rlen = (FGlen > 10) ? 10 : FGlen; + rlen = FGlen; + if (rlen > 10) { + rlen = 10; + } scale_FG = 31 * (int)(FGlen - rlen); poly_big_to_fp(rt1, Ft + FGlen - rlen, rlen, llen, logn); poly_big_to_fp(rt2, Gt + FGlen - rlen, rlen, llen, logn); diff --git a/crypto_sign/falcon-512/clean/sign.c b/crypto_sign/falcon-512/clean/sign.c index f96ddfe2..65cd8322 100644 --- a/crypto_sign/falcon-512/clean/sign.c +++ b/crypto_sign/falcon-512/clean/sign.c @@ -1189,9 +1189,11 @@ PQCLEAN_FALCON512_CLEAN_sign_tree(int16_t *sig, inner_shake256_context *rng, * Normal sampling. We use a fast PRNG seeded from our * SHAKE context ('rng'). */ - spc.sigma_min = (logn == 10) - ? fpr_sigma_min_10 - : fpr_sigma_min_9; + if (logn == 10) { + spc.sigma_min = fpr_sigma_min_10; + } else { + spc.sigma_min = fpr_sigma_min_9; + } PQCLEAN_FALCON512_CLEAN_prng_init(&spc.p, rng); samp = PQCLEAN_FALCON512_CLEAN_sampler; samp_ctx = &spc; @@ -1234,9 +1236,11 @@ PQCLEAN_FALCON512_CLEAN_sign_dyn(int16_t *sig, inner_shake256_context *rng, * Normal sampling. We use a fast PRNG seeded from our * SHAKE context ('rng'). */ - spc.sigma_min = (logn == 10) - ? fpr_sigma_min_10 - : fpr_sigma_min_9; + if (logn == 10) { + spc.sigma_min = fpr_sigma_min_10; + } else { + spc.sigma_min = fpr_sigma_min_9; + } PQCLEAN_FALCON512_CLEAN_prng_init(&spc.p, rng); samp = PQCLEAN_FALCON512_CLEAN_sampler; samp_ctx = &spc; diff --git a/crypto_sign/rainbowIIIc-classic/clean/blas_comm.c b/crypto_sign/rainbowIIIc-classic/clean/blas_comm.c index ad2b31ff..82686971 100644 --- a/crypto_sign/rainbowIIIc-classic/clean/blas_comm.c +++ b/crypto_sign/rainbowIIIc-classic/clean/blas_comm.c @@ -72,7 +72,7 @@ static unsigned int gf256mat_gauss_elim_ref(uint8_t *mat, unsigned int h, unsign for (unsigned int j = i + 1; j < h; j++) { uint8_t *aj = mat + w * j; - PQCLEAN_RAINBOWIIICCLASSIC_CLEAN_gf256v_predicated_add(ai + skip_len_align4, !PQCLEAN_RAINBOWIIICCLASSIC_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4); + PQCLEAN_RAINBOWIIICCLASSIC_CLEAN_gf256v_predicated_add(ai + skip_len_align4, 1 ^ PQCLEAN_RAINBOWIIICCLASSIC_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4); } r8 &= PQCLEAN_RAINBOWIIICCLASSIC_CLEAN_gf256_is_nonzero(ai[i]); uint8_t pivot = ai[i]; diff --git a/crypto_sign/rainbowIIIc-cyclic-compressed/clean/blas_comm.c b/crypto_sign/rainbowIIIc-cyclic-compressed/clean/blas_comm.c index 83fbfc61..64feed34 100644 --- a/crypto_sign/rainbowIIIc-cyclic-compressed/clean/blas_comm.c +++ b/crypto_sign/rainbowIIIc-cyclic-compressed/clean/blas_comm.c @@ -72,7 +72,7 @@ static unsigned int gf256mat_gauss_elim_ref(uint8_t *mat, unsigned int h, unsign for (unsigned int j = i + 1; j < h; j++) { uint8_t *aj = mat + w * j; - PQCLEAN_RAINBOWIIICCYCLICCOMPRESSED_CLEAN_gf256v_predicated_add(ai + skip_len_align4, !PQCLEAN_RAINBOWIIICCYCLICCOMPRESSED_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4); + PQCLEAN_RAINBOWIIICCYCLICCOMPRESSED_CLEAN_gf256v_predicated_add(ai + skip_len_align4, 1 ^ PQCLEAN_RAINBOWIIICCYCLICCOMPRESSED_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4); } r8 &= PQCLEAN_RAINBOWIIICCYCLICCOMPRESSED_CLEAN_gf256_is_nonzero(ai[i]); uint8_t pivot = ai[i]; diff --git a/crypto_sign/rainbowIIIc-cyclic/clean/blas_comm.c b/crypto_sign/rainbowIIIc-cyclic/clean/blas_comm.c index 14457694..5666c879 100644 --- a/crypto_sign/rainbowIIIc-cyclic/clean/blas_comm.c +++ b/crypto_sign/rainbowIIIc-cyclic/clean/blas_comm.c @@ -72,7 +72,7 @@ static unsigned int gf256mat_gauss_elim_ref(uint8_t *mat, unsigned int h, unsign for (unsigned int j = i + 1; j < h; j++) { uint8_t *aj = mat + w * j; - PQCLEAN_RAINBOWIIICCYCLIC_CLEAN_gf256v_predicated_add(ai + skip_len_align4, !PQCLEAN_RAINBOWIIICCYCLIC_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4); + PQCLEAN_RAINBOWIIICCYCLIC_CLEAN_gf256v_predicated_add(ai + skip_len_align4, 1 ^ PQCLEAN_RAINBOWIIICCYCLIC_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4); } r8 &= PQCLEAN_RAINBOWIIICCYCLIC_CLEAN_gf256_is_nonzero(ai[i]); uint8_t pivot = ai[i]; diff --git a/crypto_sign/rainbowIa-classic/clean/blas_comm.c b/crypto_sign/rainbowIa-classic/clean/blas_comm.c index b2193b63..10a6f3f6 100644 --- a/crypto_sign/rainbowIa-classic/clean/blas_comm.c +++ b/crypto_sign/rainbowIa-classic/clean/blas_comm.c @@ -74,7 +74,7 @@ static unsigned int gf16mat_gauss_elim_ref(uint8_t *mat, unsigned int h, unsigne uint8_t *ai = mat + n_w_byte * i; for (unsigned int j = i + 1; j < h; j++) { uint8_t *aj = mat + n_w_byte * j; - PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_predicated_add(ai + offset_byte, !PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16_is_nonzero(PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_get_ele(ai, i)), aj + offset_byte, n_w_byte - offset_byte); + PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_predicated_add(ai + offset_byte, 1 ^ PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16_is_nonzero(PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_get_ele(ai, i)), aj + offset_byte, n_w_byte - offset_byte); } uint8_t pivot = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_get_ele(ai, i); r8 &= PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16_is_nonzero(pivot); diff --git a/crypto_sign/rainbowIa-cyclic-compressed/clean/blas_comm.c b/crypto_sign/rainbowIa-cyclic-compressed/clean/blas_comm.c index 0bbc845e..4016d4d5 100644 --- a/crypto_sign/rainbowIa-cyclic-compressed/clean/blas_comm.c +++ b/crypto_sign/rainbowIa-cyclic-compressed/clean/blas_comm.c @@ -74,7 +74,7 @@ static unsigned int gf16mat_gauss_elim_ref(uint8_t *mat, unsigned int h, unsigne uint8_t *ai = mat + n_w_byte * i; for (unsigned int j = i + 1; j < h; j++) { uint8_t *aj = mat + n_w_byte * j; - PQCLEAN_RAINBOWIACYCLICCOMPRESSED_CLEAN_gf256v_predicated_add(ai + offset_byte, !PQCLEAN_RAINBOWIACYCLICCOMPRESSED_CLEAN_gf16_is_nonzero(PQCLEAN_RAINBOWIACYCLICCOMPRESSED_CLEAN_gf16v_get_ele(ai, i)), aj + offset_byte, n_w_byte - offset_byte); + PQCLEAN_RAINBOWIACYCLICCOMPRESSED_CLEAN_gf256v_predicated_add(ai + offset_byte, 1 ^ PQCLEAN_RAINBOWIACYCLICCOMPRESSED_CLEAN_gf16_is_nonzero(PQCLEAN_RAINBOWIACYCLICCOMPRESSED_CLEAN_gf16v_get_ele(ai, i)), aj + offset_byte, n_w_byte - offset_byte); } uint8_t pivot = PQCLEAN_RAINBOWIACYCLICCOMPRESSED_CLEAN_gf16v_get_ele(ai, i); r8 &= PQCLEAN_RAINBOWIACYCLICCOMPRESSED_CLEAN_gf16_is_nonzero(pivot); diff --git a/crypto_sign/rainbowIa-cyclic/clean/blas_comm.c b/crypto_sign/rainbowIa-cyclic/clean/blas_comm.c index 49a6f630..b8469e1a 100644 --- a/crypto_sign/rainbowIa-cyclic/clean/blas_comm.c +++ b/crypto_sign/rainbowIa-cyclic/clean/blas_comm.c @@ -74,7 +74,7 @@ static unsigned int gf16mat_gauss_elim_ref(uint8_t *mat, unsigned int h, unsigne uint8_t *ai = mat + n_w_byte * i; for (unsigned int j = i + 1; j < h; j++) { uint8_t *aj = mat + n_w_byte * j; - PQCLEAN_RAINBOWIACYCLIC_CLEAN_gf256v_predicated_add(ai + offset_byte, !PQCLEAN_RAINBOWIACYCLIC_CLEAN_gf16_is_nonzero(PQCLEAN_RAINBOWIACYCLIC_CLEAN_gf16v_get_ele(ai, i)), aj + offset_byte, n_w_byte - offset_byte); + PQCLEAN_RAINBOWIACYCLIC_CLEAN_gf256v_predicated_add(ai + offset_byte, 1 ^ PQCLEAN_RAINBOWIACYCLIC_CLEAN_gf16_is_nonzero(PQCLEAN_RAINBOWIACYCLIC_CLEAN_gf16v_get_ele(ai, i)), aj + offset_byte, n_w_byte - offset_byte); } uint8_t pivot = PQCLEAN_RAINBOWIACYCLIC_CLEAN_gf16v_get_ele(ai, i); r8 &= PQCLEAN_RAINBOWIACYCLIC_CLEAN_gf16_is_nonzero(pivot); diff --git a/crypto_sign/rainbowVc-classic/clean/blas_comm.c b/crypto_sign/rainbowVc-classic/clean/blas_comm.c index 2242e16b..25ba0604 100644 --- a/crypto_sign/rainbowVc-classic/clean/blas_comm.c +++ b/crypto_sign/rainbowVc-classic/clean/blas_comm.c @@ -72,7 +72,7 @@ static unsigned int gf256mat_gauss_elim_ref(uint8_t *mat, unsigned int h, unsign for (unsigned int j = i + 1; j < h; j++) { uint8_t *aj = mat + w * j; - PQCLEAN_RAINBOWVCCLASSIC_CLEAN_gf256v_predicated_add(ai + skip_len_align4, !PQCLEAN_RAINBOWVCCLASSIC_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4); + PQCLEAN_RAINBOWVCCLASSIC_CLEAN_gf256v_predicated_add(ai + skip_len_align4, 1 ^ PQCLEAN_RAINBOWVCCLASSIC_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4); } r8 &= PQCLEAN_RAINBOWVCCLASSIC_CLEAN_gf256_is_nonzero(ai[i]); uint8_t pivot = ai[i]; diff --git a/crypto_sign/rainbowVc-cyclic-compressed/clean/blas_comm.c b/crypto_sign/rainbowVc-cyclic-compressed/clean/blas_comm.c index 0d3fd44b..606c2fc5 100644 --- a/crypto_sign/rainbowVc-cyclic-compressed/clean/blas_comm.c +++ b/crypto_sign/rainbowVc-cyclic-compressed/clean/blas_comm.c @@ -72,7 +72,7 @@ static unsigned int gf256mat_gauss_elim_ref(uint8_t *mat, unsigned int h, unsign for (unsigned int j = i + 1; j < h; j++) { uint8_t *aj = mat + w * j; - PQCLEAN_RAINBOWVCCYCLICCOMPRESSED_CLEAN_gf256v_predicated_add(ai + skip_len_align4, !PQCLEAN_RAINBOWVCCYCLICCOMPRESSED_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4); + PQCLEAN_RAINBOWVCCYCLICCOMPRESSED_CLEAN_gf256v_predicated_add(ai + skip_len_align4, 1 ^ PQCLEAN_RAINBOWVCCYCLICCOMPRESSED_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4); } r8 &= PQCLEAN_RAINBOWVCCYCLICCOMPRESSED_CLEAN_gf256_is_nonzero(ai[i]); uint8_t pivot = ai[i]; diff --git a/crypto_sign/rainbowVc-cyclic/clean/blas_comm.c b/crypto_sign/rainbowVc-cyclic/clean/blas_comm.c index 1dd6e4ea..481b7699 100644 --- a/crypto_sign/rainbowVc-cyclic/clean/blas_comm.c +++ b/crypto_sign/rainbowVc-cyclic/clean/blas_comm.c @@ -72,7 +72,7 @@ static unsigned int gf256mat_gauss_elim_ref(uint8_t *mat, unsigned int h, unsign for (unsigned int j = i + 1; j < h; j++) { uint8_t *aj = mat + w * j; - PQCLEAN_RAINBOWVCCYCLIC_CLEAN_gf256v_predicated_add(ai + skip_len_align4, !PQCLEAN_RAINBOWVCCYCLIC_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4); + PQCLEAN_RAINBOWVCCYCLIC_CLEAN_gf256v_predicated_add(ai + skip_len_align4, 1 ^ PQCLEAN_RAINBOWVCCYCLIC_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4); } r8 &= PQCLEAN_RAINBOWVCCYCLIC_CLEAN_gf256_is_nonzero(ai[i]); uint8_t pivot = ai[i]; diff --git a/crypto_sign/sphincs-sha256-128f-robust/avx2/sha256avx.c b/crypto_sign/sphincs-sha256-128f-robust/avx2/sha256avx.c index 9c1fd6c5..4b689e8b 100644 --- a/crypto_sign/sphincs-sha256-128f-robust/avx2/sha256avx.c +++ b/crypto_sign/sphincs-sha256-128f-robust/avx2/sha256avx.c @@ -63,9 +63,13 @@ void PQCLEAN_SPHINCSSHA256128FROBUST_AVX2_sha256_update8x(sha256ctxx8 *ctx, const unsigned char *d7, unsigned long long len) { unsigned long long i = 0; + unsigned long long bytes_to_copy; while (i < len) { - unsigned long long bytes_to_copy = (len - i) > 64 ? 64 : (len - i); + bytes_to_copy = len - i; + if (bytes_to_copy > 64) { + bytes_to_copy = 64; + } memcpy(&ctx->msgblocks[64 * 0], d0 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 1], d1 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 2], d2 + i, bytes_to_copy); diff --git a/crypto_sign/sphincs-sha256-128f-simple/avx2/sha256avx.c b/crypto_sign/sphincs-sha256-128f-simple/avx2/sha256avx.c index 1e34b5f2..d68e58f1 100644 --- a/crypto_sign/sphincs-sha256-128f-simple/avx2/sha256avx.c +++ b/crypto_sign/sphincs-sha256-128f-simple/avx2/sha256avx.c @@ -63,9 +63,13 @@ void PQCLEAN_SPHINCSSHA256128FSIMPLE_AVX2_sha256_update8x(sha256ctxx8 *ctx, const unsigned char *d7, unsigned long long len) { unsigned long long i = 0; + unsigned long long bytes_to_copy; while (i < len) { - unsigned long long bytes_to_copy = (len - i) > 64 ? 64 : (len - i); + bytes_to_copy = len - i; + if (bytes_to_copy > 64) { + bytes_to_copy = 64; + } memcpy(&ctx->msgblocks[64 * 0], d0 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 1], d1 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 2], d2 + i, bytes_to_copy); diff --git a/crypto_sign/sphincs-sha256-128s-robust/avx2/sha256avx.c b/crypto_sign/sphincs-sha256-128s-robust/avx2/sha256avx.c index 4d43b57a..215e13b0 100644 --- a/crypto_sign/sphincs-sha256-128s-robust/avx2/sha256avx.c +++ b/crypto_sign/sphincs-sha256-128s-robust/avx2/sha256avx.c @@ -63,9 +63,13 @@ void PQCLEAN_SPHINCSSHA256128SROBUST_AVX2_sha256_update8x(sha256ctxx8 *ctx, const unsigned char *d7, unsigned long long len) { unsigned long long i = 0; + unsigned long long bytes_to_copy; while (i < len) { - unsigned long long bytes_to_copy = (len - i) > 64 ? 64 : (len - i); + bytes_to_copy = len - i; + if (bytes_to_copy > 64) { + bytes_to_copy = 64; + } memcpy(&ctx->msgblocks[64 * 0], d0 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 1], d1 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 2], d2 + i, bytes_to_copy); diff --git a/crypto_sign/sphincs-sha256-128s-simple/avx2/sha256avx.c b/crypto_sign/sphincs-sha256-128s-simple/avx2/sha256avx.c index 7fe73516..e3c286c2 100644 --- a/crypto_sign/sphincs-sha256-128s-simple/avx2/sha256avx.c +++ b/crypto_sign/sphincs-sha256-128s-simple/avx2/sha256avx.c @@ -63,9 +63,13 @@ void PQCLEAN_SPHINCSSHA256128SSIMPLE_AVX2_sha256_update8x(sha256ctxx8 *ctx, const unsigned char *d7, unsigned long long len) { unsigned long long i = 0; + unsigned long long bytes_to_copy; while (i < len) { - unsigned long long bytes_to_copy = (len - i) > 64 ? 64 : (len - i); + bytes_to_copy = len - i; + if (bytes_to_copy > 64) { + bytes_to_copy = 64; + } memcpy(&ctx->msgblocks[64 * 0], d0 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 1], d1 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 2], d2 + i, bytes_to_copy); diff --git a/crypto_sign/sphincs-sha256-192f-robust/avx2/sha256avx.c b/crypto_sign/sphincs-sha256-192f-robust/avx2/sha256avx.c index 76fc9014..23ea1fab 100644 --- a/crypto_sign/sphincs-sha256-192f-robust/avx2/sha256avx.c +++ b/crypto_sign/sphincs-sha256-192f-robust/avx2/sha256avx.c @@ -63,9 +63,13 @@ void PQCLEAN_SPHINCSSHA256192FROBUST_AVX2_sha256_update8x(sha256ctxx8 *ctx, const unsigned char *d7, unsigned long long len) { unsigned long long i = 0; + unsigned long long bytes_to_copy; while (i < len) { - unsigned long long bytes_to_copy = (len - i) > 64 ? 64 : (len - i); + bytes_to_copy = len - i; + if (bytes_to_copy > 64) { + bytes_to_copy = 64; + } memcpy(&ctx->msgblocks[64 * 0], d0 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 1], d1 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 2], d2 + i, bytes_to_copy); diff --git a/crypto_sign/sphincs-sha256-192f-simple/avx2/sha256avx.c b/crypto_sign/sphincs-sha256-192f-simple/avx2/sha256avx.c index aedfd893..6c112147 100644 --- a/crypto_sign/sphincs-sha256-192f-simple/avx2/sha256avx.c +++ b/crypto_sign/sphincs-sha256-192f-simple/avx2/sha256avx.c @@ -63,9 +63,13 @@ void PQCLEAN_SPHINCSSHA256192FSIMPLE_AVX2_sha256_update8x(sha256ctxx8 *ctx, const unsigned char *d7, unsigned long long len) { unsigned long long i = 0; + unsigned long long bytes_to_copy; while (i < len) { - unsigned long long bytes_to_copy = (len - i) > 64 ? 64 : (len - i); + bytes_to_copy = len - i; + if (bytes_to_copy > 64) { + bytes_to_copy = 64; + } memcpy(&ctx->msgblocks[64 * 0], d0 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 1], d1 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 2], d2 + i, bytes_to_copy); diff --git a/crypto_sign/sphincs-sha256-192s-robust/avx2/sha256avx.c b/crypto_sign/sphincs-sha256-192s-robust/avx2/sha256avx.c index a4807e94..36c99db5 100644 --- a/crypto_sign/sphincs-sha256-192s-robust/avx2/sha256avx.c +++ b/crypto_sign/sphincs-sha256-192s-robust/avx2/sha256avx.c @@ -63,9 +63,13 @@ void PQCLEAN_SPHINCSSHA256192SROBUST_AVX2_sha256_update8x(sha256ctxx8 *ctx, const unsigned char *d7, unsigned long long len) { unsigned long long i = 0; + unsigned long long bytes_to_copy; while (i < len) { - unsigned long long bytes_to_copy = (len - i) > 64 ? 64 : (len - i); + bytes_to_copy = len - i; + if (bytes_to_copy > 64) { + bytes_to_copy = 64; + } memcpy(&ctx->msgblocks[64 * 0], d0 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 1], d1 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 2], d2 + i, bytes_to_copy); diff --git a/crypto_sign/sphincs-sha256-192s-simple/avx2/sha256avx.c b/crypto_sign/sphincs-sha256-192s-simple/avx2/sha256avx.c index 201346dc..fc1926b2 100644 --- a/crypto_sign/sphincs-sha256-192s-simple/avx2/sha256avx.c +++ b/crypto_sign/sphincs-sha256-192s-simple/avx2/sha256avx.c @@ -63,9 +63,13 @@ void PQCLEAN_SPHINCSSHA256192SSIMPLE_AVX2_sha256_update8x(sha256ctxx8 *ctx, const unsigned char *d7, unsigned long long len) { unsigned long long i = 0; + unsigned long long bytes_to_copy; while (i < len) { - unsigned long long bytes_to_copy = (len - i) > 64 ? 64 : (len - i); + bytes_to_copy = len - i; + if (bytes_to_copy > 64) { + bytes_to_copy = 64; + } memcpy(&ctx->msgblocks[64 * 0], d0 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 1], d1 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 2], d2 + i, bytes_to_copy); diff --git a/crypto_sign/sphincs-sha256-256f-robust/avx2/sha256avx.c b/crypto_sign/sphincs-sha256-256f-robust/avx2/sha256avx.c index df93c559..83fe79e1 100644 --- a/crypto_sign/sphincs-sha256-256f-robust/avx2/sha256avx.c +++ b/crypto_sign/sphincs-sha256-256f-robust/avx2/sha256avx.c @@ -63,9 +63,13 @@ void PQCLEAN_SPHINCSSHA256256FROBUST_AVX2_sha256_update8x(sha256ctxx8 *ctx, const unsigned char *d7, unsigned long long len) { unsigned long long i = 0; + unsigned long long bytes_to_copy; while (i < len) { - unsigned long long bytes_to_copy = (len - i) > 64 ? 64 : (len - i); + bytes_to_copy = len - i; + if (bytes_to_copy > 64) { + bytes_to_copy = 64; + } memcpy(&ctx->msgblocks[64 * 0], d0 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 1], d1 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 2], d2 + i, bytes_to_copy); diff --git a/crypto_sign/sphincs-sha256-256f-simple/avx2/sha256avx.c b/crypto_sign/sphincs-sha256-256f-simple/avx2/sha256avx.c index da2ad9a4..7444a9e1 100644 --- a/crypto_sign/sphincs-sha256-256f-simple/avx2/sha256avx.c +++ b/crypto_sign/sphincs-sha256-256f-simple/avx2/sha256avx.c @@ -63,9 +63,13 @@ void PQCLEAN_SPHINCSSHA256256FSIMPLE_AVX2_sha256_update8x(sha256ctxx8 *ctx, const unsigned char *d7, unsigned long long len) { unsigned long long i = 0; + unsigned long long bytes_to_copy; while (i < len) { - unsigned long long bytes_to_copy = (len - i) > 64 ? 64 : (len - i); + bytes_to_copy = len - i; + if (bytes_to_copy > 64) { + bytes_to_copy = 64; + } memcpy(&ctx->msgblocks[64 * 0], d0 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 1], d1 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 2], d2 + i, bytes_to_copy); diff --git a/crypto_sign/sphincs-sha256-256s-robust/avx2/sha256avx.c b/crypto_sign/sphincs-sha256-256s-robust/avx2/sha256avx.c index c02ce1e1..35b4c496 100644 --- a/crypto_sign/sphincs-sha256-256s-robust/avx2/sha256avx.c +++ b/crypto_sign/sphincs-sha256-256s-robust/avx2/sha256avx.c @@ -63,9 +63,13 @@ void PQCLEAN_SPHINCSSHA256256SROBUST_AVX2_sha256_update8x(sha256ctxx8 *ctx, const unsigned char *d7, unsigned long long len) { unsigned long long i = 0; + unsigned long long bytes_to_copy; while (i < len) { - unsigned long long bytes_to_copy = (len - i) > 64 ? 64 : (len - i); + bytes_to_copy = len - i; + if (bytes_to_copy > 64) { + bytes_to_copy = 64; + } memcpy(&ctx->msgblocks[64 * 0], d0 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 1], d1 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 2], d2 + i, bytes_to_copy); diff --git a/crypto_sign/sphincs-sha256-256s-simple/avx2/sha256avx.c b/crypto_sign/sphincs-sha256-256s-simple/avx2/sha256avx.c index 7f0972bc..dfdc77de 100644 --- a/crypto_sign/sphincs-sha256-256s-simple/avx2/sha256avx.c +++ b/crypto_sign/sphincs-sha256-256s-simple/avx2/sha256avx.c @@ -63,9 +63,13 @@ void PQCLEAN_SPHINCSSHA256256SSIMPLE_AVX2_sha256_update8x(sha256ctxx8 *ctx, const unsigned char *d7, unsigned long long len) { unsigned long long i = 0; + unsigned long long bytes_to_copy; while (i < len) { - unsigned long long bytes_to_copy = (len - i) > 64 ? 64 : (len - i); + bytes_to_copy = len - i; + if (bytes_to_copy > 64) { + bytes_to_copy = 64; + } memcpy(&ctx->msgblocks[64 * 0], d0 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 1], d1 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 2], d2 + i, bytes_to_copy); diff --git a/test/test_boolean.py b/test/test_boolean.py new file mode 100644 index 00000000..deb07703 --- /dev/null +++ b/test/test_boolean.py @@ -0,0 +1,109 @@ + +""" +Checks that the implementation does not make use of boolean operations (==, <=, !, etc) +in assignments or function calls. +""" + +import os + +import pytest + +import helpers +import pqclean +import pycparser + + +def setup_module(): + if not(os.path.exists(os.path.join('pycparser', '.git'))): + print("Please run `git submodule update --init`") + + +class ForbiddenLineVisitor(pycparser.c_ast.NodeVisitor): + def __init__(self): + self.errors = [] + + def visit_Assignment(self, node): + v = ForbiddenOpVisitor(); + v.visit(node.rvalue) + self.errors.extend(v.errors) + + def visit_Decl(self, node): + if node.init: + v = ForbiddenOpVisitor(); + v.visit(node.init) + self.errors.extend(v.errors) + + def visit_FuncCall(self, node): + if node.args: + v = ForbiddenOpVisitor(); + v.visit(node.args) + self.errors.extend(v.errors) + +class ForbiddenOpVisitor(pycparser.c_ast.NodeVisitor): + def __init__(self): + self.errors = [] + + def visit_BinaryOp(self, node): + v = ForbiddenOpVisitor(); + v.visit(node.left) + self.errors.extend(v.errors) + if node.op in ['<', '<=', '>', '>=', '==', '!=', '&&', '||']: + err = "\n {} at {c.file}:{c.line}:{c.column}".format(node.op, c=node.coord) + self.errors.append(err) + v = ForbiddenOpVisitor(); + v.visit(node.right) + self.errors.extend(v.errors) + + def visit_UnaryOp(self, node): + if node.op == '!': + err = "\n {} at {c.file}:{c.line}:{c.column}".format(node.op, c=node.coord) + self.errors.append(err) + v = ForbiddenOpVisitor(); + v.visit(node.expr) + self.errors.extend(v.errors) + + def visit_TernaryOp(self, node): + err = "\n ternary operator at {c.file}:{c.line}:{c.column}".format(c=node.coord) + self.errors.append(err) + + +@pytest.mark.parametrize( + 'implementation', + pqclean.Scheme.all_implementations(), + ids=str, +) +@helpers.skip_windows() +@helpers.filtered_test +def test_boolean(implementation): + errors = [] + for fname in os.listdir(implementation.path()): + if not fname.endswith(".c"): + continue + tdir, _ = os.path.split(os.path.realpath(__file__)) + ast = pycparser.parse_file( + os.path.join(implementation.path(), fname), + use_cpp=True, + cpp_path='cc', # not all platforms link cpp correctly; cc -E works + cpp_args=[ + '-E', + '-std=c99', + '-nostdinc', # pycparser cannot deal with e.g. __attribute__ + '-I{}'.format(os.path.join(tdir, "../common")), + # necessary to mock e.g. + '-I{}'.format( + os.path.join(tdir, 'pycparser/utils/fake_libc_include')), + ] + ) + v = ForbiddenLineVisitor() + v.visit(ast) + errors.extend(v.errors) + if errors: + raise AssertionError( + "Prohibited use of boolean operations in assignment or function call" + + "".join(errors) + ) + + +if __name__ == "__main__": + import sys + pytest.main(sys.argv)