From ff16fec4353e0484cd267bd829f36efd6e15aac6 Mon Sep 17 00:00:00 2001 From: "John M. Schanck" Date: Thu, 17 Sep 2020 11:54:58 -0400 Subject: [PATCH] astyle --- crypto_sign/falcon-1024/clean/codec.c | 2 +- crypto_sign/falcon-1024/clean/fpr.h | 4 +-- crypto_sign/falcon-1024/clean/keygen.c | 28 +++++++++++-------- crypto_sign/falcon-1024/clean/sign.c | 8 +++--- crypto_sign/falcon-512/clean/codec.c | 2 +- crypto_sign/falcon-512/clean/fpr.h | 4 +-- crypto_sign/falcon-512/clean/keygen.c | 28 +++++++++++-------- crypto_sign/falcon-512/clean/sign.c | 8 +++--- .../rainbowIIIc-classic/clean/blas_comm.c | 2 +- .../clean/blas_comm.c | 2 +- .../rainbowIIIc-cyclic/clean/blas_comm.c | 2 +- .../rainbowIa-classic/clean/blas_comm.c | 2 +- .../clean/blas_comm.c | 2 +- .../rainbowIa-cyclic/clean/blas_comm.c | 2 +- .../rainbowVc-classic/clean/blas_comm.c | 2 +- .../clean/blas_comm.c | 2 +- .../rainbowVc-cyclic/clean/blas_comm.c | 2 +- .../avx2/sha256avx.c | 4 ++- .../avx2/sha256avx.c | 4 ++- .../avx2/sha256avx.c | 4 ++- .../avx2/sha256avx.c | 4 ++- .../avx2/sha256avx.c | 4 ++- .../avx2/sha256avx.c | 4 ++- .../avx2/sha256avx.c | 4 ++- .../avx2/sha256avx.c | 4 ++- .../avx2/sha256avx.c | 4 ++- .../avx2/sha256avx.c | 4 ++- .../avx2/sha256avx.c | 4 ++- .../avx2/sha256avx.c | 4 ++- 29 files changed, 93 insertions(+), 57 deletions(-) diff --git a/crypto_sign/falcon-1024/clean/codec.c b/crypto_sign/falcon-1024/clean/codec.c index a096f7b2..70856aff 100644 --- a/crypto_sign/falcon-1024/clean/codec.c +++ b/crypto_sign/falcon-1024/clean/codec.c @@ -445,7 +445,7 @@ PQCLEAN_FALCON1024_CLEAN_comp_decode( } x[u] = (int16_t) m; if (s) { - x[u] = (int16_t) -x[u]; + x[u] = (int16_t) - x[u]; } } return v; diff --git a/crypto_sign/falcon-1024/clean/fpr.h b/crypto_sign/falcon-1024/clean/fpr.h index 5678471e..795a5b49 100644 --- a/crypto_sign/falcon-1024/clean/fpr.h +++ b/crypto_sign/falcon-1024/clean/fpr.h @@ -436,8 +436,8 @@ fpr_lt(fpr x, fpr y) { */ int cc0, cc1; - cc0 = (int)((*(int64_t *)&x - *(int64_t *)&y) >> 63) & 1; - cc1 = (int)((*(int64_t *)&y - *(int64_t *)&x) >> 63) & 1; + cc0 = (int)((*(int64_t *)&x - * (int64_t *)&y) >> 63) & 1; + cc1 = (int)((*(int64_t *)&y - * (int64_t *)&x) >> 63) & 1; return cc0 ^ ((cc0 ^ cc1) & (int)((x & y) >> 63)); } diff --git a/crypto_sign/falcon-1024/clean/keygen.c b/crypto_sign/falcon-1024/clean/keygen.c index 00a9be7e..e987b3a5 100644 --- a/crypto_sign/falcon-1024/clean/keygen.c +++ b/crypto_sign/falcon-1024/clean/keygen.c @@ -1903,9 +1903,9 @@ zint_add_scaled_mul_small(uint32_t *x, size_t xlen, */ v = u - sch; if (v < ylen) { - wy = y[v]; + wy = y[v]; } else { - wy = ysign; + wy = ysign; } wys = ((wy << scl) & 0x7FFFFFFF) | tw; tw = wy >> (31 - scl); @@ -1965,9 +1965,9 @@ zint_sub_scaled(uint32_t *x, size_t xlen, */ v = u - sch; if (v < ylen) { - wy = y[v]; + wy = y[v]; } else { - wy = ysign; + wy = ysign; } wys = ((wy << scl) & 0x7FFFFFFF) | tw; tw = wy >> (31 - scl); @@ -2656,16 +2656,18 @@ make_fg(uint32_t *data, const int8_t *f, const int8_t *g, return; } - if (depth == 0) return; + if (depth == 0) { + return; + } if (depth == 1) { - make_fg_step(data, logn, 0, 0, out_ntt); - return; + make_fg_step(data, logn, 0, 0, out_ntt); + return; } make_fg_step(data, logn, 0, 0, 1); - for (d = 1; d+1 < depth; d ++) { + for (d = 1; d + 1 < depth; d ++) { make_fg_step(data, logn - d, d, 1, 1); } - make_fg_step(data, logn-depth+1, depth-1, 1, out_ntt); + make_fg_step(data, logn - depth + 1, depth - 1, 1, out_ntt); } /* @@ -3043,7 +3045,9 @@ solve_NTRU_intermediate(unsigned logn_top, * middle or the upper half of these top 10 words. */ rlen = slen; - if (rlen > 10) rlen = 10; + if (rlen > 10) { + rlen = 10; + } poly_big_to_fp(rt3, ft + slen - rlen, rlen, slen, logn); poly_big_to_fp(rt4, gt + slen - rlen, rlen, slen, logn); @@ -3118,7 +3122,9 @@ solve_NTRU_intermediate(unsigned logn_top, * scaling if the current length is more than 10 words. */ rlen = FGlen; - if (rlen > 10) rlen = 10; + if (rlen > 10) { + rlen = 10; + } scale_FG = 31 * (int)(FGlen - rlen); poly_big_to_fp(rt1, Ft + FGlen - rlen, rlen, llen, logn); poly_big_to_fp(rt2, Gt + FGlen - rlen, rlen, llen, logn); diff --git a/crypto_sign/falcon-1024/clean/sign.c b/crypto_sign/falcon-1024/clean/sign.c index 8baec84e..56518bf5 100644 --- a/crypto_sign/falcon-1024/clean/sign.c +++ b/crypto_sign/falcon-1024/clean/sign.c @@ -1190,9 +1190,9 @@ PQCLEAN_FALCON1024_CLEAN_sign_tree(int16_t *sig, inner_shake256_context *rng, * SHAKE context ('rng'). */ if (logn == 10) { - spc.sigma_min = fpr_sigma_min_10; + spc.sigma_min = fpr_sigma_min_10; } else { - spc.sigma_min = fpr_sigma_min_9; + spc.sigma_min = fpr_sigma_min_9; } PQCLEAN_FALCON1024_CLEAN_prng_init(&spc.p, rng); samp = PQCLEAN_FALCON1024_CLEAN_sampler; @@ -1237,9 +1237,9 @@ PQCLEAN_FALCON1024_CLEAN_sign_dyn(int16_t *sig, inner_shake256_context *rng, * SHAKE context ('rng'). */ if (logn == 10) { - spc.sigma_min = fpr_sigma_min_10; + spc.sigma_min = fpr_sigma_min_10; } else { - spc.sigma_min = fpr_sigma_min_9; + spc.sigma_min = fpr_sigma_min_9; } PQCLEAN_FALCON1024_CLEAN_prng_init(&spc.p, rng); samp = PQCLEAN_FALCON1024_CLEAN_sampler; diff --git a/crypto_sign/falcon-512/clean/codec.c b/crypto_sign/falcon-512/clean/codec.c index febfe4c3..fe88f022 100644 --- a/crypto_sign/falcon-512/clean/codec.c +++ b/crypto_sign/falcon-512/clean/codec.c @@ -445,7 +445,7 @@ PQCLEAN_FALCON512_CLEAN_comp_decode( } x[u] = (int16_t) m; if (s) { - x[u] = (int16_t) -x[u]; + x[u] = (int16_t) - x[u]; } } return v; diff --git a/crypto_sign/falcon-512/clean/fpr.h b/crypto_sign/falcon-512/clean/fpr.h index 57dc24e1..65ce5db4 100644 --- a/crypto_sign/falcon-512/clean/fpr.h +++ b/crypto_sign/falcon-512/clean/fpr.h @@ -436,8 +436,8 @@ fpr_lt(fpr x, fpr y) { */ int cc0, cc1; - cc0 = (int)((*(int64_t *)&x - *(int64_t *)&y) >> 63) & 1; - cc1 = (int)((*(int64_t *)&y - *(int64_t *)&x) >> 63) & 1; + cc0 = (int)((*(int64_t *)&x - * (int64_t *)&y) >> 63) & 1; + cc1 = (int)((*(int64_t *)&y - * (int64_t *)&x) >> 63) & 1; return cc0 ^ ((cc0 ^ cc1) & (int)((x & y) >> 63)); } diff --git a/crypto_sign/falcon-512/clean/keygen.c b/crypto_sign/falcon-512/clean/keygen.c index cfe59119..6fe3ec2e 100644 --- a/crypto_sign/falcon-512/clean/keygen.c +++ b/crypto_sign/falcon-512/clean/keygen.c @@ -1903,9 +1903,9 @@ zint_add_scaled_mul_small(uint32_t *x, size_t xlen, */ v = u - sch; if (v < ylen) { - wy = y[v]; + wy = y[v]; } else { - wy = ysign; + wy = ysign; } wys = ((wy << scl) & 0x7FFFFFFF) | tw; tw = wy >> (31 - scl); @@ -1965,9 +1965,9 @@ zint_sub_scaled(uint32_t *x, size_t xlen, */ v = u - sch; if (v < ylen) { - wy = y[v]; + wy = y[v]; } else { - wy = ysign; + wy = ysign; } wys = ((wy << scl) & 0x7FFFFFFF) | tw; tw = wy >> (31 - scl); @@ -2656,16 +2656,18 @@ make_fg(uint32_t *data, const int8_t *f, const int8_t *g, return; } - if (depth == 0) return; + if (depth == 0) { + return; + } if (depth == 1) { - make_fg_step(data, logn, 0, 0, out_ntt); - return; + make_fg_step(data, logn, 0, 0, out_ntt); + return; } make_fg_step(data, logn, 0, 0, 1); - for (d = 1; d+1 < depth; d ++) { + for (d = 1; d + 1 < depth; d ++) { make_fg_step(data, logn - d, d, 1, 1); } - make_fg_step(data, logn-depth+1, depth-1, 1, out_ntt); + make_fg_step(data, logn - depth + 1, depth - 1, 1, out_ntt); } /* @@ -3043,7 +3045,9 @@ solve_NTRU_intermediate(unsigned logn_top, * middle or the upper half of these top 10 words. */ rlen = slen; - if (rlen > 10) rlen = 10; + if (rlen > 10) { + rlen = 10; + } poly_big_to_fp(rt3, ft + slen - rlen, rlen, slen, logn); poly_big_to_fp(rt4, gt + slen - rlen, rlen, slen, logn); @@ -3118,7 +3122,9 @@ solve_NTRU_intermediate(unsigned logn_top, * scaling if the current length is more than 10 words. */ rlen = FGlen; - if (rlen > 10) rlen = 10; + if (rlen > 10) { + rlen = 10; + } scale_FG = 31 * (int)(FGlen - rlen); poly_big_to_fp(rt1, Ft + FGlen - rlen, rlen, llen, logn); poly_big_to_fp(rt2, Gt + FGlen - rlen, rlen, llen, logn); diff --git a/crypto_sign/falcon-512/clean/sign.c b/crypto_sign/falcon-512/clean/sign.c index ef0adea4..65cd8322 100644 --- a/crypto_sign/falcon-512/clean/sign.c +++ b/crypto_sign/falcon-512/clean/sign.c @@ -1190,9 +1190,9 @@ PQCLEAN_FALCON512_CLEAN_sign_tree(int16_t *sig, inner_shake256_context *rng, * SHAKE context ('rng'). */ if (logn == 10) { - spc.sigma_min = fpr_sigma_min_10; + spc.sigma_min = fpr_sigma_min_10; } else { - spc.sigma_min = fpr_sigma_min_9; + spc.sigma_min = fpr_sigma_min_9; } PQCLEAN_FALCON512_CLEAN_prng_init(&spc.p, rng); samp = PQCLEAN_FALCON512_CLEAN_sampler; @@ -1237,9 +1237,9 @@ PQCLEAN_FALCON512_CLEAN_sign_dyn(int16_t *sig, inner_shake256_context *rng, * SHAKE context ('rng'). */ if (logn == 10) { - spc.sigma_min = fpr_sigma_min_10; + spc.sigma_min = fpr_sigma_min_10; } else { - spc.sigma_min = fpr_sigma_min_9; + spc.sigma_min = fpr_sigma_min_9; } PQCLEAN_FALCON512_CLEAN_prng_init(&spc.p, rng); samp = PQCLEAN_FALCON512_CLEAN_sampler; diff --git a/crypto_sign/rainbowIIIc-classic/clean/blas_comm.c b/crypto_sign/rainbowIIIc-classic/clean/blas_comm.c index 6f7727b9..82686971 100644 --- a/crypto_sign/rainbowIIIc-classic/clean/blas_comm.c +++ b/crypto_sign/rainbowIIIc-classic/clean/blas_comm.c @@ -72,7 +72,7 @@ static unsigned int gf256mat_gauss_elim_ref(uint8_t *mat, unsigned int h, unsign for (unsigned int j = i + 1; j < h; j++) { uint8_t *aj = mat + w * j; - PQCLEAN_RAINBOWIIICCLASSIC_CLEAN_gf256v_predicated_add(ai + skip_len_align4, 1^PQCLEAN_RAINBOWIIICCLASSIC_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4); + PQCLEAN_RAINBOWIIICCLASSIC_CLEAN_gf256v_predicated_add(ai + skip_len_align4, 1 ^ PQCLEAN_RAINBOWIIICCLASSIC_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4); } r8 &= PQCLEAN_RAINBOWIIICCLASSIC_CLEAN_gf256_is_nonzero(ai[i]); uint8_t pivot = ai[i]; diff --git a/crypto_sign/rainbowIIIc-cyclic-compressed/clean/blas_comm.c b/crypto_sign/rainbowIIIc-cyclic-compressed/clean/blas_comm.c index 5701b090..64feed34 100644 --- a/crypto_sign/rainbowIIIc-cyclic-compressed/clean/blas_comm.c +++ b/crypto_sign/rainbowIIIc-cyclic-compressed/clean/blas_comm.c @@ -72,7 +72,7 @@ static unsigned int gf256mat_gauss_elim_ref(uint8_t *mat, unsigned int h, unsign for (unsigned int j = i + 1; j < h; j++) { uint8_t *aj = mat + w * j; - PQCLEAN_RAINBOWIIICCYCLICCOMPRESSED_CLEAN_gf256v_predicated_add(ai + skip_len_align4, 1^PQCLEAN_RAINBOWIIICCYCLICCOMPRESSED_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4); + PQCLEAN_RAINBOWIIICCYCLICCOMPRESSED_CLEAN_gf256v_predicated_add(ai + skip_len_align4, 1 ^ PQCLEAN_RAINBOWIIICCYCLICCOMPRESSED_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4); } r8 &= PQCLEAN_RAINBOWIIICCYCLICCOMPRESSED_CLEAN_gf256_is_nonzero(ai[i]); uint8_t pivot = ai[i]; diff --git a/crypto_sign/rainbowIIIc-cyclic/clean/blas_comm.c b/crypto_sign/rainbowIIIc-cyclic/clean/blas_comm.c index 4ad7a4b7..5666c879 100644 --- a/crypto_sign/rainbowIIIc-cyclic/clean/blas_comm.c +++ b/crypto_sign/rainbowIIIc-cyclic/clean/blas_comm.c @@ -72,7 +72,7 @@ static unsigned int gf256mat_gauss_elim_ref(uint8_t *mat, unsigned int h, unsign for (unsigned int j = i + 1; j < h; j++) { uint8_t *aj = mat + w * j; - PQCLEAN_RAINBOWIIICCYCLIC_CLEAN_gf256v_predicated_add(ai + skip_len_align4, 1^PQCLEAN_RAINBOWIIICCYCLIC_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4); + PQCLEAN_RAINBOWIIICCYCLIC_CLEAN_gf256v_predicated_add(ai + skip_len_align4, 1 ^ PQCLEAN_RAINBOWIIICCYCLIC_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4); } r8 &= PQCLEAN_RAINBOWIIICCYCLIC_CLEAN_gf256_is_nonzero(ai[i]); uint8_t pivot = ai[i]; diff --git a/crypto_sign/rainbowIa-classic/clean/blas_comm.c b/crypto_sign/rainbowIa-classic/clean/blas_comm.c index a58d87d1..10a6f3f6 100644 --- a/crypto_sign/rainbowIa-classic/clean/blas_comm.c +++ b/crypto_sign/rainbowIa-classic/clean/blas_comm.c @@ -74,7 +74,7 @@ static unsigned int gf16mat_gauss_elim_ref(uint8_t *mat, unsigned int h, unsigne uint8_t *ai = mat + n_w_byte * i; for (unsigned int j = i + 1; j < h; j++) { uint8_t *aj = mat + n_w_byte * j; - PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_predicated_add(ai + offset_byte, 1^PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16_is_nonzero(PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_get_ele(ai, i)), aj + offset_byte, n_w_byte - offset_byte); + PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf256v_predicated_add(ai + offset_byte, 1 ^ PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16_is_nonzero(PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_get_ele(ai, i)), aj + offset_byte, n_w_byte - offset_byte); } uint8_t pivot = PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16v_get_ele(ai, i); r8 &= PQCLEAN_RAINBOWIACLASSIC_CLEAN_gf16_is_nonzero(pivot); diff --git a/crypto_sign/rainbowIa-cyclic-compressed/clean/blas_comm.c b/crypto_sign/rainbowIa-cyclic-compressed/clean/blas_comm.c index 4c452af7..4016d4d5 100644 --- a/crypto_sign/rainbowIa-cyclic-compressed/clean/blas_comm.c +++ b/crypto_sign/rainbowIa-cyclic-compressed/clean/blas_comm.c @@ -74,7 +74,7 @@ static unsigned int gf16mat_gauss_elim_ref(uint8_t *mat, unsigned int h, unsigne uint8_t *ai = mat + n_w_byte * i; for (unsigned int j = i + 1; j < h; j++) { uint8_t *aj = mat + n_w_byte * j; - PQCLEAN_RAINBOWIACYCLICCOMPRESSED_CLEAN_gf256v_predicated_add(ai + offset_byte, 1^PQCLEAN_RAINBOWIACYCLICCOMPRESSED_CLEAN_gf16_is_nonzero(PQCLEAN_RAINBOWIACYCLICCOMPRESSED_CLEAN_gf16v_get_ele(ai, i)), aj + offset_byte, n_w_byte - offset_byte); + PQCLEAN_RAINBOWIACYCLICCOMPRESSED_CLEAN_gf256v_predicated_add(ai + offset_byte, 1 ^ PQCLEAN_RAINBOWIACYCLICCOMPRESSED_CLEAN_gf16_is_nonzero(PQCLEAN_RAINBOWIACYCLICCOMPRESSED_CLEAN_gf16v_get_ele(ai, i)), aj + offset_byte, n_w_byte - offset_byte); } uint8_t pivot = PQCLEAN_RAINBOWIACYCLICCOMPRESSED_CLEAN_gf16v_get_ele(ai, i); r8 &= PQCLEAN_RAINBOWIACYCLICCOMPRESSED_CLEAN_gf16_is_nonzero(pivot); diff --git a/crypto_sign/rainbowIa-cyclic/clean/blas_comm.c b/crypto_sign/rainbowIa-cyclic/clean/blas_comm.c index 65fb6c33..b8469e1a 100644 --- a/crypto_sign/rainbowIa-cyclic/clean/blas_comm.c +++ b/crypto_sign/rainbowIa-cyclic/clean/blas_comm.c @@ -74,7 +74,7 @@ static unsigned int gf16mat_gauss_elim_ref(uint8_t *mat, unsigned int h, unsigne uint8_t *ai = mat + n_w_byte * i; for (unsigned int j = i + 1; j < h; j++) { uint8_t *aj = mat + n_w_byte * j; - PQCLEAN_RAINBOWIACYCLIC_CLEAN_gf256v_predicated_add(ai + offset_byte, 1^PQCLEAN_RAINBOWIACYCLIC_CLEAN_gf16_is_nonzero(PQCLEAN_RAINBOWIACYCLIC_CLEAN_gf16v_get_ele(ai, i)), aj + offset_byte, n_w_byte - offset_byte); + PQCLEAN_RAINBOWIACYCLIC_CLEAN_gf256v_predicated_add(ai + offset_byte, 1 ^ PQCLEAN_RAINBOWIACYCLIC_CLEAN_gf16_is_nonzero(PQCLEAN_RAINBOWIACYCLIC_CLEAN_gf16v_get_ele(ai, i)), aj + offset_byte, n_w_byte - offset_byte); } uint8_t pivot = PQCLEAN_RAINBOWIACYCLIC_CLEAN_gf16v_get_ele(ai, i); r8 &= PQCLEAN_RAINBOWIACYCLIC_CLEAN_gf16_is_nonzero(pivot); diff --git a/crypto_sign/rainbowVc-classic/clean/blas_comm.c b/crypto_sign/rainbowVc-classic/clean/blas_comm.c index bfd6a6ea..25ba0604 100644 --- a/crypto_sign/rainbowVc-classic/clean/blas_comm.c +++ b/crypto_sign/rainbowVc-classic/clean/blas_comm.c @@ -72,7 +72,7 @@ static unsigned int gf256mat_gauss_elim_ref(uint8_t *mat, unsigned int h, unsign for (unsigned int j = i + 1; j < h; j++) { uint8_t *aj = mat + w * j; - PQCLEAN_RAINBOWVCCLASSIC_CLEAN_gf256v_predicated_add(ai + skip_len_align4, 1^PQCLEAN_RAINBOWVCCLASSIC_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4); + PQCLEAN_RAINBOWVCCLASSIC_CLEAN_gf256v_predicated_add(ai + skip_len_align4, 1 ^ PQCLEAN_RAINBOWVCCLASSIC_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4); } r8 &= PQCLEAN_RAINBOWVCCLASSIC_CLEAN_gf256_is_nonzero(ai[i]); uint8_t pivot = ai[i]; diff --git a/crypto_sign/rainbowVc-cyclic-compressed/clean/blas_comm.c b/crypto_sign/rainbowVc-cyclic-compressed/clean/blas_comm.c index 8ec4a06a..606c2fc5 100644 --- a/crypto_sign/rainbowVc-cyclic-compressed/clean/blas_comm.c +++ b/crypto_sign/rainbowVc-cyclic-compressed/clean/blas_comm.c @@ -72,7 +72,7 @@ static unsigned int gf256mat_gauss_elim_ref(uint8_t *mat, unsigned int h, unsign for (unsigned int j = i + 1; j < h; j++) { uint8_t *aj = mat + w * j; - PQCLEAN_RAINBOWVCCYCLICCOMPRESSED_CLEAN_gf256v_predicated_add(ai + skip_len_align4, 1^PQCLEAN_RAINBOWVCCYCLICCOMPRESSED_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4); + PQCLEAN_RAINBOWVCCYCLICCOMPRESSED_CLEAN_gf256v_predicated_add(ai + skip_len_align4, 1 ^ PQCLEAN_RAINBOWVCCYCLICCOMPRESSED_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4); } r8 &= PQCLEAN_RAINBOWVCCYCLICCOMPRESSED_CLEAN_gf256_is_nonzero(ai[i]); uint8_t pivot = ai[i]; diff --git a/crypto_sign/rainbowVc-cyclic/clean/blas_comm.c b/crypto_sign/rainbowVc-cyclic/clean/blas_comm.c index a1f0a574..481b7699 100644 --- a/crypto_sign/rainbowVc-cyclic/clean/blas_comm.c +++ b/crypto_sign/rainbowVc-cyclic/clean/blas_comm.c @@ -72,7 +72,7 @@ static unsigned int gf256mat_gauss_elim_ref(uint8_t *mat, unsigned int h, unsign for (unsigned int j = i + 1; j < h; j++) { uint8_t *aj = mat + w * j; - PQCLEAN_RAINBOWVCCYCLIC_CLEAN_gf256v_predicated_add(ai + skip_len_align4, 1^PQCLEAN_RAINBOWVCCYCLIC_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4); + PQCLEAN_RAINBOWVCCYCLIC_CLEAN_gf256v_predicated_add(ai + skip_len_align4, 1 ^ PQCLEAN_RAINBOWVCCYCLIC_CLEAN_gf256_is_nonzero(ai[i]), aj + skip_len_align4, w - skip_len_align4); } r8 &= PQCLEAN_RAINBOWVCCYCLIC_CLEAN_gf256_is_nonzero(ai[i]); uint8_t pivot = ai[i]; diff --git a/crypto_sign/sphincs-sha256-128f-robust/avx2/sha256avx.c b/crypto_sign/sphincs-sha256-128f-robust/avx2/sha256avx.c index 9fb6aa53..4b689e8b 100644 --- a/crypto_sign/sphincs-sha256-128f-robust/avx2/sha256avx.c +++ b/crypto_sign/sphincs-sha256-128f-robust/avx2/sha256avx.c @@ -67,7 +67,9 @@ void PQCLEAN_SPHINCSSHA256128FROBUST_AVX2_sha256_update8x(sha256ctxx8 *ctx, while (i < len) { bytes_to_copy = len - i; - if (bytes_to_copy > 64) bytes_to_copy = 64; + if (bytes_to_copy > 64) { + bytes_to_copy = 64; + } memcpy(&ctx->msgblocks[64 * 0], d0 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 1], d1 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 2], d2 + i, bytes_to_copy); diff --git a/crypto_sign/sphincs-sha256-128f-simple/avx2/sha256avx.c b/crypto_sign/sphincs-sha256-128f-simple/avx2/sha256avx.c index d8af492c..d68e58f1 100644 --- a/crypto_sign/sphincs-sha256-128f-simple/avx2/sha256avx.c +++ b/crypto_sign/sphincs-sha256-128f-simple/avx2/sha256avx.c @@ -67,7 +67,9 @@ void PQCLEAN_SPHINCSSHA256128FSIMPLE_AVX2_sha256_update8x(sha256ctxx8 *ctx, while (i < len) { bytes_to_copy = len - i; - if (bytes_to_copy > 64) bytes_to_copy = 64; + if (bytes_to_copy > 64) { + bytes_to_copy = 64; + } memcpy(&ctx->msgblocks[64 * 0], d0 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 1], d1 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 2], d2 + i, bytes_to_copy); diff --git a/crypto_sign/sphincs-sha256-128s-robust/avx2/sha256avx.c b/crypto_sign/sphincs-sha256-128s-robust/avx2/sha256avx.c index 31a22283..215e13b0 100644 --- a/crypto_sign/sphincs-sha256-128s-robust/avx2/sha256avx.c +++ b/crypto_sign/sphincs-sha256-128s-robust/avx2/sha256avx.c @@ -67,7 +67,9 @@ void PQCLEAN_SPHINCSSHA256128SROBUST_AVX2_sha256_update8x(sha256ctxx8 *ctx, while (i < len) { bytes_to_copy = len - i; - if (bytes_to_copy > 64) bytes_to_copy = 64; + if (bytes_to_copy > 64) { + bytes_to_copy = 64; + } memcpy(&ctx->msgblocks[64 * 0], d0 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 1], d1 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 2], d2 + i, bytes_to_copy); diff --git a/crypto_sign/sphincs-sha256-128s-simple/avx2/sha256avx.c b/crypto_sign/sphincs-sha256-128s-simple/avx2/sha256avx.c index 1f1500c7..e3c286c2 100644 --- a/crypto_sign/sphincs-sha256-128s-simple/avx2/sha256avx.c +++ b/crypto_sign/sphincs-sha256-128s-simple/avx2/sha256avx.c @@ -67,7 +67,9 @@ void PQCLEAN_SPHINCSSHA256128SSIMPLE_AVX2_sha256_update8x(sha256ctxx8 *ctx, while (i < len) { bytes_to_copy = len - i; - if (bytes_to_copy > 64) bytes_to_copy = 64; + if (bytes_to_copy > 64) { + bytes_to_copy = 64; + } memcpy(&ctx->msgblocks[64 * 0], d0 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 1], d1 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 2], d2 + i, bytes_to_copy); diff --git a/crypto_sign/sphincs-sha256-192f-robust/avx2/sha256avx.c b/crypto_sign/sphincs-sha256-192f-robust/avx2/sha256avx.c index 50e42a56..23ea1fab 100644 --- a/crypto_sign/sphincs-sha256-192f-robust/avx2/sha256avx.c +++ b/crypto_sign/sphincs-sha256-192f-robust/avx2/sha256avx.c @@ -67,7 +67,9 @@ void PQCLEAN_SPHINCSSHA256192FROBUST_AVX2_sha256_update8x(sha256ctxx8 *ctx, while (i < len) { bytes_to_copy = len - i; - if (bytes_to_copy > 64) bytes_to_copy = 64; + if (bytes_to_copy > 64) { + bytes_to_copy = 64; + } memcpy(&ctx->msgblocks[64 * 0], d0 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 1], d1 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 2], d2 + i, bytes_to_copy); diff --git a/crypto_sign/sphincs-sha256-192f-simple/avx2/sha256avx.c b/crypto_sign/sphincs-sha256-192f-simple/avx2/sha256avx.c index 97e8f442..6c112147 100644 --- a/crypto_sign/sphincs-sha256-192f-simple/avx2/sha256avx.c +++ b/crypto_sign/sphincs-sha256-192f-simple/avx2/sha256avx.c @@ -67,7 +67,9 @@ void PQCLEAN_SPHINCSSHA256192FSIMPLE_AVX2_sha256_update8x(sha256ctxx8 *ctx, while (i < len) { bytes_to_copy = len - i; - if (bytes_to_copy > 64) bytes_to_copy = 64; + if (bytes_to_copy > 64) { + bytes_to_copy = 64; + } memcpy(&ctx->msgblocks[64 * 0], d0 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 1], d1 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 2], d2 + i, bytes_to_copy); diff --git a/crypto_sign/sphincs-sha256-192s-robust/avx2/sha256avx.c b/crypto_sign/sphincs-sha256-192s-robust/avx2/sha256avx.c index 2235dc7b..36c99db5 100644 --- a/crypto_sign/sphincs-sha256-192s-robust/avx2/sha256avx.c +++ b/crypto_sign/sphincs-sha256-192s-robust/avx2/sha256avx.c @@ -67,7 +67,9 @@ void PQCLEAN_SPHINCSSHA256192SROBUST_AVX2_sha256_update8x(sha256ctxx8 *ctx, while (i < len) { bytes_to_copy = len - i; - if (bytes_to_copy > 64) bytes_to_copy = 64; + if (bytes_to_copy > 64) { + bytes_to_copy = 64; + } memcpy(&ctx->msgblocks[64 * 0], d0 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 1], d1 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 2], d2 + i, bytes_to_copy); diff --git a/crypto_sign/sphincs-sha256-192s-simple/avx2/sha256avx.c b/crypto_sign/sphincs-sha256-192s-simple/avx2/sha256avx.c index 237e8aeb..fc1926b2 100644 --- a/crypto_sign/sphincs-sha256-192s-simple/avx2/sha256avx.c +++ b/crypto_sign/sphincs-sha256-192s-simple/avx2/sha256avx.c @@ -67,7 +67,9 @@ void PQCLEAN_SPHINCSSHA256192SSIMPLE_AVX2_sha256_update8x(sha256ctxx8 *ctx, while (i < len) { bytes_to_copy = len - i; - if (bytes_to_copy > 64) bytes_to_copy = 64; + if (bytes_to_copy > 64) { + bytes_to_copy = 64; + } memcpy(&ctx->msgblocks[64 * 0], d0 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 1], d1 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 2], d2 + i, bytes_to_copy); diff --git a/crypto_sign/sphincs-sha256-256f-robust/avx2/sha256avx.c b/crypto_sign/sphincs-sha256-256f-robust/avx2/sha256avx.c index 13aad2d8..83fe79e1 100644 --- a/crypto_sign/sphincs-sha256-256f-robust/avx2/sha256avx.c +++ b/crypto_sign/sphincs-sha256-256f-robust/avx2/sha256avx.c @@ -67,7 +67,9 @@ void PQCLEAN_SPHINCSSHA256256FROBUST_AVX2_sha256_update8x(sha256ctxx8 *ctx, while (i < len) { bytes_to_copy = len - i; - if (bytes_to_copy > 64) bytes_to_copy = 64; + if (bytes_to_copy > 64) { + bytes_to_copy = 64; + } memcpy(&ctx->msgblocks[64 * 0], d0 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 1], d1 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 2], d2 + i, bytes_to_copy); diff --git a/crypto_sign/sphincs-sha256-256f-simple/avx2/sha256avx.c b/crypto_sign/sphincs-sha256-256f-simple/avx2/sha256avx.c index ef988608..7444a9e1 100644 --- a/crypto_sign/sphincs-sha256-256f-simple/avx2/sha256avx.c +++ b/crypto_sign/sphincs-sha256-256f-simple/avx2/sha256avx.c @@ -67,7 +67,9 @@ void PQCLEAN_SPHINCSSHA256256FSIMPLE_AVX2_sha256_update8x(sha256ctxx8 *ctx, while (i < len) { bytes_to_copy = len - i; - if (bytes_to_copy > 64) bytes_to_copy = 64; + if (bytes_to_copy > 64) { + bytes_to_copy = 64; + } memcpy(&ctx->msgblocks[64 * 0], d0 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 1], d1 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 2], d2 + i, bytes_to_copy); diff --git a/crypto_sign/sphincs-sha256-256s-robust/avx2/sha256avx.c b/crypto_sign/sphincs-sha256-256s-robust/avx2/sha256avx.c index fbdce7e8..35b4c496 100644 --- a/crypto_sign/sphincs-sha256-256s-robust/avx2/sha256avx.c +++ b/crypto_sign/sphincs-sha256-256s-robust/avx2/sha256avx.c @@ -67,7 +67,9 @@ void PQCLEAN_SPHINCSSHA256256SROBUST_AVX2_sha256_update8x(sha256ctxx8 *ctx, while (i < len) { bytes_to_copy = len - i; - if (bytes_to_copy > 64) bytes_to_copy = 64; + if (bytes_to_copy > 64) { + bytes_to_copy = 64; + } memcpy(&ctx->msgblocks[64 * 0], d0 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 1], d1 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 2], d2 + i, bytes_to_copy); diff --git a/crypto_sign/sphincs-sha256-256s-simple/avx2/sha256avx.c b/crypto_sign/sphincs-sha256-256s-simple/avx2/sha256avx.c index 73eb3471..dfdc77de 100644 --- a/crypto_sign/sphincs-sha256-256s-simple/avx2/sha256avx.c +++ b/crypto_sign/sphincs-sha256-256s-simple/avx2/sha256avx.c @@ -67,7 +67,9 @@ void PQCLEAN_SPHINCSSHA256256SSIMPLE_AVX2_sha256_update8x(sha256ctxx8 *ctx, while (i < len) { bytes_to_copy = len - i; - if (bytes_to_copy > 64) bytes_to_copy = 64; + if (bytes_to_copy > 64) { + bytes_to_copy = 64; + } memcpy(&ctx->msgblocks[64 * 0], d0 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 1], d1 + i, bytes_to_copy); memcpy(&ctx->msgblocks[64 * 2], d2 + i, bytes_to_copy);