Simplify the calculation of the Montgomery constants in |BN_MONT_CTX_set|, making the inversion constant-time. It should also be faster by avoiding any use of the |BIGNUM| API in favor of using only 64-bit arithmetic. Now it's obvious how it works. /s Change-Id: I59a1e1c3631f426fbeabd0c752e0de44bcb5fd75 Reviewed-on: https://boringssl-review.googlesource.com/9031 Reviewed-by: Adam Langley <agl@google.com> Commit-Queue: Adam Langley <agl@google.com> CQ-Verified: CQ bot account: commit-bot@chromium.org <commit-bot@chromium.org>kris/onging/CECPQ3_patch15
@@ -57,6 +57,7 @@ add_library( | |||||
gcd.c | gcd.c | ||||
kronecker.c | kronecker.c | ||||
montgomery.c | montgomery.c | ||||
montgomery_inv.c | |||||
mul.c | mul.c | ||||
prime.c | prime.c | ||||
random.c | random.c | ||||
@@ -156,6 +156,7 @@ BIGNUM *bn_expand(BIGNUM *bn, size_t bits); | |||||
#define BN_MASK2l (0xffffffffUL) | #define BN_MASK2l (0xffffffffUL) | ||||
#define BN_MASK2h (0xffffffff00000000UL) | #define BN_MASK2h (0xffffffff00000000UL) | ||||
#define BN_MASK2h1 (0xffffffff80000000UL) | #define BN_MASK2h1 (0xffffffff80000000UL) | ||||
#define BN_MONT_CTX_N0_LIMBS 1 | |||||
#define BN_TBIT (0x8000000000000000UL) | #define BN_TBIT (0x8000000000000000UL) | ||||
#define BN_DEC_CONV (10000000000000000000UL) | #define BN_DEC_CONV (10000000000000000000UL) | ||||
#define BN_DEC_NUM 19 | #define BN_DEC_NUM 19 | ||||
@@ -171,6 +172,12 @@ BIGNUM *bn_expand(BIGNUM *bn, size_t bits); | |||||
#define BN_MASK2l (0xffffUL) | #define BN_MASK2l (0xffffUL) | ||||
#define BN_MASK2h1 (0xffff8000UL) | #define BN_MASK2h1 (0xffff8000UL) | ||||
#define BN_MASK2h (0xffff0000UL) | #define BN_MASK2h (0xffff0000UL) | ||||
/* On some 32-bit platforms, Montgomery multiplication is done using 64-bit | |||||
* arithmetic with SIMD instructions. On such platforms, |BN_MONT_CTX::n0| | |||||
* needs to be two words long. Only certain 32-bit platforms actually make use | |||||
* of n0[1] and shorter R value would suffice for the others. However, | |||||
* currently only the assembly files know which is which. */ | |||||
#define BN_MONT_CTX_N0_LIMBS 2 | |||||
#define BN_TBIT (0x80000000UL) | #define BN_TBIT (0x80000000UL) | ||||
#define BN_DEC_CONV (1000000000UL) | #define BN_DEC_CONV (1000000000UL) | ||||
#define BN_DEC_NUM 9 | #define BN_DEC_NUM 9 | ||||
@@ -192,7 +199,6 @@ BIGNUM *bn_expand(BIGNUM *bn, size_t bits); | |||||
#define Hw(t) (((BN_ULONG)((t)>>BN_BITS2))&BN_MASK2) | #define Hw(t) (((BN_ULONG)((t)>>BN_BITS2))&BN_MASK2) | ||||
#endif | #endif | ||||
/* bn_set_words sets |bn| to the value encoded in the |num| words in |words|, | /* bn_set_words sets |bn| to the value encoded in the |num| words in |words|, | ||||
* least significant word first. */ | * least significant word first. */ | ||||
int bn_set_words(BIGNUM *bn, const BN_ULONG *words, size_t num); | int bn_set_words(BIGNUM *bn, const BN_ULONG *words, size_t num); | ||||
@@ -221,6 +227,8 @@ int bn_cmp_part_words(const BN_ULONG *a, const BN_ULONG *b, int cl, int dl); | |||||
int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, | int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, | ||||
const BN_ULONG *np, const BN_ULONG *n0, int num); | const BN_ULONG *np, const BN_ULONG *n0, int num); | ||||
uint64_t bn_mont_n0(const BIGNUM *n); | |||||
#if defined(OPENSSL_X86_64) && defined(_MSC_VER) | #if defined(OPENSSL_X86_64) && defined(_MSC_VER) | ||||
#define BN_UMULT_LOHI(low, high, a, b) ((low) = _umul128((a), (b), &(high))) | #define BN_UMULT_LOHI(low, high, a, b) ((low) = _umul128((a), (b), &(high))) | ||||
#endif | #endif | ||||
@@ -162,131 +162,61 @@ BN_MONT_CTX *BN_MONT_CTX_copy(BN_MONT_CTX *to, const BN_MONT_CTX *from) { | |||||
return to; | return to; | ||||
} | } | ||||
int BN_MONT_CTX_set(BN_MONT_CTX *mont, const BIGNUM *mod, BN_CTX *ctx) { | |||||
int ret = 0; | |||||
BIGNUM *Ri, *R; | |||||
BIGNUM tmod; | |||||
BN_ULONG buf[2]; | |||||
OPENSSL_COMPILE_ASSERT(BN_MONT_CTX_N0_LIMBS == 1 || BN_MONT_CTX_N0_LIMBS == 2, | |||||
BN_MONT_CTX_N0_LIMBS_VALUE_INVALID); | |||||
OPENSSL_COMPILE_ASSERT(sizeof(BN_ULONG) * BN_MONT_CTX_N0_LIMBS == | |||||
sizeof(uint64_t), BN_MONT_CTX_set_64_bit_mismatch); | |||||
int BN_MONT_CTX_set(BN_MONT_CTX *mont, const BIGNUM *mod, BN_CTX *ctx) { | |||||
if (BN_is_zero(mod)) { | if (BN_is_zero(mod)) { | ||||
OPENSSL_PUT_ERROR(BN, BN_R_DIV_BY_ZERO); | OPENSSL_PUT_ERROR(BN, BN_R_DIV_BY_ZERO); | ||||
return 0; | return 0; | ||||
} | } | ||||
BN_CTX_start(ctx); | |||||
Ri = BN_CTX_get(ctx); | |||||
if (Ri == NULL) { | |||||
goto err; | |||||
} | |||||
R = &mont->RR; /* grab RR as a temp */ | |||||
if (!BN_copy(&mont->N, mod)) { | |||||
goto err; /* Set N */ | |||||
} | |||||
mont->N.neg = 0; | |||||
BN_init(&tmod); | |||||
tmod.d = buf; | |||||
tmod.dmax = 2; | |||||
tmod.neg = 0; | |||||
#if defined(OPENSSL_BN_ASM_MONT) && (BN_BITS2 <= 32) | |||||
/* Only certain BN_BITS2<=32 platforms actually make use of | |||||
* n0[1], and we could use the #else case (with a shorter R | |||||
* value) for the others. However, currently only the assembler | |||||
* files do know which is which. */ | |||||
BN_zero(R); | |||||
if (!BN_set_bit(R, 2 * BN_BITS2)) { | |||||
goto err; | |||||
} | |||||
tmod.top = 0; | |||||
if ((buf[0] = mod->d[0])) { | |||||
tmod.top = 1; | |||||
if (!BN_is_odd(mod)) { | |||||
OPENSSL_PUT_ERROR(BN, BN_R_CALLED_WITH_EVEN_MODULUS); | |||||
return 0; | |||||
} | } | ||||
if ((buf[1] = mod->top > 1 ? mod->d[1] : 0)) { | |||||
tmod.top = 2; | |||||
if (BN_is_negative(mod)) { | |||||
OPENSSL_PUT_ERROR(BN, BN_R_NEGATIVE_NUMBER); | |||||
return 0; | |||||
} | } | ||||
if (BN_mod_inverse(Ri, R, &tmod, ctx) == NULL) { | |||||
goto err; | |||||
} | |||||
if (!BN_lshift(Ri, Ri, 2 * BN_BITS2)) { | |||||
goto err; /* R*Ri */ | |||||
/* Save the modulus. */ | |||||
if (!BN_copy(&mont->N, mod)) { | |||||
OPENSSL_PUT_ERROR(BN, ERR_R_INTERNAL_ERROR); | |||||
return 0; | |||||
} | } | ||||
if (!BN_is_zero(Ri)) { | |||||
if (!BN_sub_word(Ri, 1)) { | |||||
goto err; | |||||
} | |||||
} else { | |||||
/* if N mod word size == 1 */ | |||||
if (bn_expand(Ri, (int)sizeof(BN_ULONG) * 2) == NULL) { | |||||
goto err; | |||||
} | |||||
/* Ri-- (mod double word size) */ | |||||
Ri->neg = 0; | |||||
Ri->d[0] = BN_MASK2; | |||||
Ri->d[1] = BN_MASK2; | |||||
Ri->top = 2; | |||||
if (BN_get_flags(mod, BN_FLG_CONSTTIME)) { | |||||
BN_set_flags(&mont->N, BN_FLG_CONSTTIME); | |||||
} | } | ||||
if (!BN_div(Ri, NULL, Ri, &tmod, ctx)) { | |||||
goto err; | |||||
} | |||||
/* Ni = (R*Ri-1)/N, | |||||
* keep only couple of least significant words: */ | |||||
mont->n0[0] = (Ri->top > 0) ? Ri->d[0] : 0; | |||||
mont->n0[1] = (Ri->top > 1) ? Ri->d[1] : 0; | |||||
/* Find n0 such that n0 * N == -1 (mod r). | |||||
* | |||||
* Only certain BN_BITS2<=32 platforms actually make use of n0[1]. For the | |||||
* others, we could use a shorter R value and use faster |BN_ULONG|-based | |||||
* math instead of |uint64_t|-based math, which would be double-precision. | |||||
* However, currently only the assembler files know which is which. */ | |||||
uint64_t n0 = bn_mont_n0(mod); | |||||
mont->n0[0] = (BN_ULONG)n0; | |||||
#if BN_MONT_CTX_N0_LIMBS == 2 | |||||
mont->n0[1] = (BN_ULONG)(n0 >> BN_BITS2); | |||||
#else | #else | ||||
BN_zero(R); | |||||
if (!BN_set_bit(R, BN_BITS2)) { | |||||
goto err; /* R */ | |||||
} | |||||
buf[0] = mod->d[0]; /* tmod = N mod word size */ | |||||
buf[1] = 0; | |||||
tmod.top = buf[0] != 0 ? 1 : 0; | |||||
/* Ri = R^-1 mod N*/ | |||||
if (BN_mod_inverse(Ri, R, &tmod, ctx) == NULL) { | |||||
goto err; | |||||
} | |||||
if (!BN_lshift(Ri, Ri, BN_BITS2)) { | |||||
goto err; /* R*Ri */ | |||||
} | |||||
if (!BN_is_zero(Ri)) { | |||||
if (!BN_sub_word(Ri, 1)) { | |||||
goto err; | |||||
} | |||||
} else { | |||||
/* if N mod word size == 1 */ | |||||
if (!BN_set_word(Ri, BN_MASK2)) { | |||||
goto err; /* Ri-- (mod word size) */ | |||||
} | |||||
} | |||||
if (!BN_div(Ri, NULL, Ri, &tmod, ctx)) { | |||||
goto err; | |||||
} | |||||
/* Ni = (R*Ri-1)/N, | |||||
* keep only least significant word: */ | |||||
mont->n0[0] = (Ri->top > 0) ? Ri->d[0] : 0; | |||||
mont->n0[1] = 0; | mont->n0[1] = 0; | ||||
#endif | #endif | ||||
/* RR = (2^ri)^2 == 2^(ri*2) == 1 << (ri*2), which has its (ri*2)th bit set. */ | |||||
int ri = (BN_num_bits(mod) + (BN_BITS2 - 1)) / BN_BITS2 * BN_BITS2; | |||||
BN_zero(&(mont->RR)); | |||||
if (!BN_set_bit(&(mont->RR), ri * 2)) { | |||||
goto err; | |||||
} | |||||
if (!BN_mod(&(mont->RR), &(mont->RR), &(mont->N), ctx)) { | |||||
goto err; | |||||
/* Save RR = R**2 (mod N). R is the smallest power of 2**BN_BITS such that R | |||||
* > mod. Even though the assembly on some 32-bit platforms works with 64-bit | |||||
* values, using |BN_BITS2| here, rather than |BN_MONT_CTX_N0_LIMBS * | |||||
* BN_BITS2|, is correct because because R^2 will still be a multiple of the | |||||
* latter as |BN_MONT_CTX_N0_LIMBS| is either one or two. */ | |||||
unsigned lgBigR = (BN_num_bits(mod) + (BN_BITS2 - 1)) / BN_BITS2 * BN_BITS2; | |||||
BN_zero(&mont->RR); | |||||
if (!BN_set_bit(&mont->RR, lgBigR * 2) || | |||||
!BN_mod(&mont->RR, &mont->RR, &mont->N, ctx)) { | |||||
return 0; | |||||
} | } | ||||
ret = 1; | |||||
err: | |||||
BN_CTX_end(ctx); | |||||
return ret; | |||||
return 1; | |||||
} | } | ||||
int BN_MONT_CTX_set_locked(BN_MONT_CTX **pmont, CRYPTO_MUTEX *lock, | int BN_MONT_CTX_set_locked(BN_MONT_CTX **pmont, CRYPTO_MUTEX *lock, | ||||
@@ -0,0 +1,158 @@ | |||||
/* Copyright 2016 Brian Smith. | |||||
* | |||||
* Permission to use, copy, modify, and/or distribute this software for any | |||||
* purpose with or without fee is hereby granted, provided that the above | |||||
* copyright notice and this permission notice appear in all copies. | |||||
* | |||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY | |||||
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION | |||||
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN | |||||
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ | |||||
#include <openssl/bn.h> | |||||
#include <assert.h> | |||||
#include "internal.h" | |||||
#include "../internal.h" | |||||
static uint64_t bn_neg_inv_mod_r_u64(uint64_t n); | |||||
OPENSSL_COMPILE_ASSERT(BN_MONT_CTX_N0_LIMBS == 1 || BN_MONT_CTX_N0_LIMBS == 2, | |||||
BN_MONT_CTX_N0_LIMBS_VALUE_INVALID); | |||||
OPENSSL_COMPILE_ASSERT(sizeof(uint64_t) == | |||||
BN_MONT_CTX_N0_LIMBS * sizeof(BN_ULONG), | |||||
BN_MONT_CTX_N0_LIMBS_DOES_NOT_MATCH_UINT64_T); | |||||
/* LG_LITTLE_R is log_2(r). */ | |||||
#define LG_LITTLE_R (BN_MONT_CTX_N0_LIMBS * BN_BITS2) | |||||
uint64_t bn_mont_n0(const BIGNUM *n) { | |||||
/* These conditions are checked by the caller, |BN_MONT_CTX_set|. */ | |||||
assert(!BN_is_zero(n)); | |||||
assert(!BN_is_negative(n)); | |||||
assert(BN_is_odd(n)); | |||||
/* r == 2**(BN_MONT_CTX_N0_LIMBS * BN_BITS2) and LG_LITTLE_R == lg(r). This | |||||
* ensures that we can do integer division by |r| by simply ignoring | |||||
* |BN_MONT_CTX_N0_LIMBS| limbs. Similarly, we can calculate values modulo | |||||
* |r| by just looking at the lowest |BN_MONT_CTX_N0_LIMBS| limbs. This is | |||||
* what makes Montgomery multiplication efficient. | |||||
* | |||||
* As shown in Algorithm 1 of "Fast Prime Field Elliptic Curve Cryptography | |||||
* with 256 Bit Primes" by Shay Gueron and Vlad Krasnov, in the loop of a | |||||
* multi-limb Montgomery multiplication of |a * b (mod n)|, given the | |||||
* unreduced product |t == a * b|, we repeatedly calculate: | |||||
* | |||||
* t1 := t % r |t1| is |t|'s lowest limb (see previous paragraph). | |||||
* t2 := t1*n0*n | |||||
* t3 := t + t2 | |||||
* t := t3 / r copy all limbs of |t3| except the lowest to |t|. | |||||
* | |||||
* In the last step, it would only make sense to ignore the lowest limb of | |||||
* |t3| if it were zero. The middle steps ensure that this is the case: | |||||
* | |||||
* t3 == 0 (mod r) | |||||
* t + t2 == 0 (mod r) | |||||
* t + t1*n0*n == 0 (mod r) | |||||
* t1*n0*n == -t (mod r) | |||||
* t*n0*n == -t (mod r) | |||||
* n0*n == -1 (mod r) | |||||
* n0 == -1/n (mod r) | |||||
* | |||||
* Thus, in each iteration of the loop, we multiply by the constant factor | |||||
* |n0|, the negative inverse of n (mod r). */ | |||||
/* n_mod_r = n % r. As explained above, this is done by taking the lowest | |||||
* |BN_MONT_CTX_N0_LIMBS| limbs of |n|. */ | |||||
uint64_t n_mod_r = n->d[0]; | |||||
#if BN_MONT_CTX_N0_LIMBS == 2 | |||||
if (n->top > 1) { | |||||
n_mod_r |= (uint64_t)n->d[1] << BN_BITS2; | |||||
} | |||||
#endif | |||||
return bn_neg_inv_mod_r_u64(n_mod_r); | |||||
} | |||||
/* bn_neg_inv_r_mod_n_u64 calculates the -1/n mod r; i.e. it calculates |v| | |||||
* such that u*r - v*n == 1. |r| is the constant defined in |bn_mont_n0|. |n| | |||||
* must be odd. | |||||
* | |||||
* This is derived from |xbinGCD| in the "Montgomery Multiplication" chapter of | |||||
* "Hacker's Delight" by Henry S. Warren, Jr.: | |||||
* http://www.hackersdelight.org/MontgomeryMultiplication.pdf. | |||||
* | |||||
* This is inspired by Joppe W. Bos's "Constant Time Modular Inversion" | |||||
* (http://www.joppebos.com/files/CTInversion.pdf) so that the inversion is | |||||
* constant-time with respect to |n|. We assume uint64_t additions, | |||||
* subtractions, shifts, and bitwise operations are all constant time, which | |||||
* may be a large leap of faith on 32-bit targets. We avoid division and | |||||
* multiplication, which tend to be the most problematic in terms of timing | |||||
* leaks. | |||||
* | |||||
* Most GCD implementations return values such that |u*r + v*n == 1|, so the | |||||
* caller would have to negate the resultant |v| for the purpose of Montgomery | |||||
* multiplication. This implementation does the negation implicitly by doing | |||||
* the computations as a difference instead of a sum. */ | |||||
static uint64_t bn_neg_inv_mod_r_u64(uint64_t n) { | |||||
assert(n % 2 == 1); | |||||
/* alpha == 2**(lg r - 1) == r / 2. */ | |||||
static const uint64_t alpha = UINT64_C(1) << (LG_LITTLE_R - 1); | |||||
const uint64_t beta = n; | |||||
uint64_t u = 1; | |||||
uint64_t v = 0; | |||||
/* The invariant maintained from here on is: | |||||
* 2**(lg r - i) == u*2*alpha - v*beta. */ | |||||
for (size_t i = 0; i < LG_LITTLE_R; ++i) { | |||||
#if BN_BITS2 == 64 && defined(BN_ULLONG) | |||||
assert((BN_ULLONG)(1) << (LG_LITTLE_R - i) == | |||||
((BN_ULLONG)u * 2 * alpha) - ((BN_ULLONG)v * beta)); | |||||
#endif | |||||
/* Delete a common factor of 2 in u and v if |u| is even. Otherwise, set | |||||
* |u = (u + beta) / 2| and |v = (v / 2) + alpha|. */ | |||||
uint64_t u_is_odd = UINT64_C(0) - (u & 1); /* Either 0xff..ff or 0. */ | |||||
/* The addition can overflow, so use Dietz's method for it. | |||||
* | |||||
* Dietz calculates (x+y)/2 by (x⊕y)>>1 + x&y. This is valid for all | |||||
* (unsigned) x and y, even when x+y overflows. Evidence for 32-bit values | |||||
* (embedded in 64 bits to so that overflow can be ignored): | |||||
* | |||||
* (declare-fun x () (_ BitVec 64)) | |||||
* (declare-fun y () (_ BitVec 64)) | |||||
* (assert (let ( | |||||
* (one (_ bv1 64)) | |||||
* (thirtyTwo (_ bv32 64))) | |||||
* (and | |||||
* (bvult x (bvshl one thirtyTwo)) | |||||
* (bvult y (bvshl one thirtyTwo)) | |||||
* (not (= | |||||
* (bvadd (bvlshr (bvxor x y) one) (bvand x y)) | |||||
* (bvlshr (bvadd x y) one))) | |||||
* ))) | |||||
* (check-sat) */ | |||||
uint64_t beta_if_u_is_odd = beta & u_is_odd; /* Either |beta| or 0. */ | |||||
u = ((u ^ beta_if_u_is_odd) >> 1) + (u & beta_if_u_is_odd); | |||||
uint64_t alpha_if_u_is_odd = alpha & u_is_odd; /* Either |alpha| or 0. */ | |||||
v = (v >> 1) + alpha_if_u_is_odd; | |||||
} | |||||
/* The invariant now shows that u*r - v*n == 1 since r == 2 * alpha. */ | |||||
#if BN_BITS2 == 64 && defined(BN_ULLONG) | |||||
assert(1 == ((BN_ULLONG)u * 2 * alpha) - ((BN_ULLONG)v * beta)); | |||||
#endif | |||||
return v; | |||||
} |
@@ -681,8 +681,7 @@ static int mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx) { | |||||
BIGNUM local_p, local_q; | BIGNUM local_p, local_q; | ||||
BIGNUM *p = NULL, *q = NULL; | BIGNUM *p = NULL, *q = NULL; | ||||
/* Make sure BN_mod_inverse in Montgomery intialization uses the | |||||
* BN_FLG_CONSTTIME flag. */ | |||||
/* Make sure BN_mod in Montgomery initialization uses BN_FLG_CONSTTIME. */ | |||||
BN_init(&local_p); | BN_init(&local_p); | ||||
p = &local_p; | p = &local_p; | ||||
BN_with_flags(p, rsa->p, BN_FLG_CONSTTIME); | BN_with_flags(p, rsa->p, BN_FLG_CONSTTIME); | ||||