You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

1121 lines
29 KiB

  1. /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
  2. * All rights reserved.
  3. *
  4. * This package is an SSL implementation written
  5. * by Eric Young (eay@cryptsoft.com).
  6. * The implementation was written so as to conform with Netscapes SSL.
  7. *
  8. * This library is free for commercial and non-commercial use as long as
  9. * the following conditions are aheared to. The following conditions
  10. * apply to all code found in this distribution, be it the RC4, RSA,
  11. * lhash, DES, etc., code; not just the SSL code. The SSL documentation
  12. * included with this distribution is covered by the same copyright terms
  13. * except that the holder is Tim Hudson (tjh@cryptsoft.com).
  14. *
  15. * Copyright remains Eric Young's, and as such any Copyright notices in
  16. * the code are not to be removed.
  17. * If this package is used in a product, Eric Young should be given attribution
  18. * as the author of the parts of the library used.
  19. * This can be in the form of a textual message at program startup or
  20. * in documentation (online or textual) provided with the package.
  21. *
  22. * Redistribution and use in source and binary forms, with or without
  23. * modification, are permitted provided that the following conditions
  24. * are met:
  25. * 1. Redistributions of source code must retain the copyright
  26. * notice, this list of conditions and the following disclaimer.
  27. * 2. Redistributions in binary form must reproduce the above copyright
  28. * notice, this list of conditions and the following disclaimer in the
  29. * documentation and/or other materials provided with the distribution.
  30. * 3. All advertising materials mentioning features or use of this software
  31. * must display the following acknowledgement:
  32. * "This product includes cryptographic software written by
  33. * Eric Young (eay@cryptsoft.com)"
  34. * The word 'cryptographic' can be left out if the rouines from the library
  35. * being used are not cryptographic related :-).
  36. * 4. If you include any Windows specific code (or a derivative thereof) from
  37. * the apps directory (application code) you must include an acknowledgement:
  38. * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
  39. *
  40. * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
  41. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  42. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  43. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  44. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  45. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  46. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  47. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  48. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  49. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  50. * SUCH DAMAGE.
  51. *
  52. * The licence and distribution terms for any publically available version or
  53. * derivative of this code cannot be changed. i.e. this code cannot simply be
  54. * copied and put under another distribution licence
  55. * [including the GNU Public Licence.] */
  56. #include <openssl/bn.h>
  57. #include <assert.h>
  58. #include "internal.h"
  59. /* Generic implementations of most operations are needed for:
  60. * - Configurations without inline assembly.
  61. * - Architectures other than x86 or x86_64.
  62. * - Windows x84_64; x86_64-gcc.c does not build on MSVC. */
  63. #if defined(OPENSSL_NO_ASM) || \
  64. (!defined(OPENSSL_X86_64) && !defined(OPENSSL_X86)) || \
  65. (defined(OPENSSL_X86_64) && defined(OPENSSL_WINDOWS))
  66. #if defined(OPENSSL_WINDOWS)
  67. #define alloca _alloca
  68. #else
  69. #include <alloca.h>
  70. #endif
  71. #ifdef BN_LLONG
  72. #define mul_add(r, a, w, c) \
  73. { \
  74. BN_ULLONG t; \
  75. t = (BN_ULLONG)w * (a) + (r) + (c); \
  76. (r) = Lw(t); \
  77. (c) = Hw(t); \
  78. }
  79. #define mul(r, a, w, c) \
  80. { \
  81. BN_ULLONG t; \
  82. t = (BN_ULLONG)w * (a) + (c); \
  83. (r) = Lw(t); \
  84. (c) = Hw(t); \
  85. }
  86. #define sqr(r0, r1, a) \
  87. { \
  88. BN_ULLONG t; \
  89. t = (BN_ULLONG)(a) * (a); \
  90. (r0) = Lw(t); \
  91. (r1) = Hw(t); \
  92. }
  93. #elif defined(BN_UMULT_LOHI)
  94. #define mul_add(r, a, w, c) \
  95. { \
  96. BN_ULONG high, low, ret, tmp = (a); \
  97. ret = (r); \
  98. BN_UMULT_LOHI(low, high, w, tmp); \
  99. ret += (c); \
  100. (c) = (ret < (c)) ? 1 : 0; \
  101. (c) += high; \
  102. ret += low; \
  103. (c) += (ret < low) ? 1 : 0; \
  104. (r) = ret; \
  105. }
  106. #define mul(r, a, w, c) \
  107. { \
  108. BN_ULONG high, low, ret, ta = (a); \
  109. BN_UMULT_LOHI(low, high, w, ta); \
  110. ret = low + (c); \
  111. (c) = high; \
  112. (c) += (ret < low) ? 1 : 0; \
  113. (r) = ret; \
  114. }
  115. #define sqr(r0, r1, a) \
  116. { \
  117. BN_ULONG tmp = (a); \
  118. BN_UMULT_LOHI(r0, r1, tmp, tmp); \
  119. }
  120. #else
  121. /*************************************************************
  122. * No long long type
  123. */
  124. #define LBITS(a) ((a) & BN_MASK2l)
  125. #define HBITS(a) (((a) >> BN_BITS4) & BN_MASK2l)
  126. #define L2HBITS(a) (((a) << BN_BITS4) & BN_MASK2)
  127. #define LLBITS(a) ((a) & BN_MASKl)
  128. #define LHBITS(a) (((a) >> BN_BITS2) & BN_MASKl)
  129. #define LL2HBITS(a) ((BN_ULLONG)((a) & BN_MASKl) << BN_BITS2)
  130. #define mul64(l, h, bl, bh) \
  131. { \
  132. BN_ULONG m, m1, lt, ht; \
  133. \
  134. lt = l; \
  135. ht = h; \
  136. m = (bh) * (lt); \
  137. lt = (bl) * (lt); \
  138. m1 = (bl) * (ht); \
  139. ht = (bh) * (ht); \
  140. m = (m + m1) & BN_MASK2; \
  141. if (m < m1) \
  142. ht += L2HBITS((BN_ULONG)1); \
  143. ht += HBITS(m); \
  144. m1 = L2HBITS(m); \
  145. lt = (lt + m1) & BN_MASK2; \
  146. if (lt < m1) \
  147. ht++; \
  148. (l) = lt; \
  149. (h) = ht; \
  150. }
  151. #define sqr64(lo, ho, in) \
  152. { \
  153. BN_ULONG l, h, m; \
  154. \
  155. h = (in); \
  156. l = LBITS(h); \
  157. h = HBITS(h); \
  158. m = (l) * (h); \
  159. l *= l; \
  160. h *= h; \
  161. h += (m & BN_MASK2h1) >> (BN_BITS4 - 1); \
  162. m = (m & BN_MASK2l) << (BN_BITS4 + 1); \
  163. l = (l + m) & BN_MASK2; \
  164. if (l < m) \
  165. h++; \
  166. (lo) = l; \
  167. (ho) = h; \
  168. }
  169. #define mul_add(r, a, bl, bh, c) \
  170. { \
  171. BN_ULONG l, h; \
  172. \
  173. h = (a); \
  174. l = LBITS(h); \
  175. h = HBITS(h); \
  176. mul64(l, h, (bl), (bh)); \
  177. \
  178. /* non-multiply part */ \
  179. l = (l + (c)) & BN_MASK2; \
  180. if (l < (c)) \
  181. h++; \
  182. (c) = (r); \
  183. l = (l + (c)) & BN_MASK2; \
  184. if (l < (c)) \
  185. h++; \
  186. (c) = h & BN_MASK2; \
  187. (r) = l; \
  188. }
  189. #define mul(r, a, bl, bh, c) \
  190. { \
  191. BN_ULONG l, h; \
  192. \
  193. h = (a); \
  194. l = LBITS(h); \
  195. h = HBITS(h); \
  196. mul64(l, h, (bl), (bh)); \
  197. \
  198. /* non-multiply part */ \
  199. l += (c); \
  200. if ((l & BN_MASK2) < (c)) \
  201. h++; \
  202. (c) = h & BN_MASK2; \
  203. (r) = l & BN_MASK2; \
  204. }
  205. #endif /* !BN_LLONG */
  206. #if defined(BN_LLONG) || defined(BN_UMULT_HIGH)
  207. BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num,
  208. BN_ULONG w) {
  209. BN_ULONG c1 = 0;
  210. assert(num >= 0);
  211. if (num <= 0) {
  212. return c1;
  213. }
  214. while (num & ~3) {
  215. mul_add(rp[0], ap[0], w, c1);
  216. mul_add(rp[1], ap[1], w, c1);
  217. mul_add(rp[2], ap[2], w, c1);
  218. mul_add(rp[3], ap[3], w, c1);
  219. ap += 4;
  220. rp += 4;
  221. num -= 4;
  222. }
  223. while (num) {
  224. mul_add(rp[0], ap[0], w, c1);
  225. ap++;
  226. rp++;
  227. num--;
  228. }
  229. return c1;
  230. }
  231. BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w) {
  232. BN_ULONG c1 = 0;
  233. assert(num >= 0);
  234. if (num <= 0) {
  235. return c1;
  236. }
  237. while (num & ~3) {
  238. mul(rp[0], ap[0], w, c1);
  239. mul(rp[1], ap[1], w, c1);
  240. mul(rp[2], ap[2], w, c1);
  241. mul(rp[3], ap[3], w, c1);
  242. ap += 4;
  243. rp += 4;
  244. num -= 4;
  245. }
  246. while (num) {
  247. mul(rp[0], ap[0], w, c1);
  248. ap++;
  249. rp++;
  250. num--;
  251. }
  252. return c1;
  253. }
  254. void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, int n) {
  255. assert(n >= 0);
  256. if (n <= 0) {
  257. return;
  258. }
  259. while (n & ~3) {
  260. sqr(r[0], r[1], a[0]);
  261. sqr(r[2], r[3], a[1]);
  262. sqr(r[4], r[5], a[2]);
  263. sqr(r[6], r[7], a[3]);
  264. a += 4;
  265. r += 8;
  266. n -= 4;
  267. }
  268. while (n) {
  269. sqr(r[0], r[1], a[0]);
  270. a++;
  271. r += 2;
  272. n--;
  273. }
  274. }
  275. #else /* !(defined(BN_LLONG) || defined(BN_UMULT_HIGH)) */
  276. BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num,
  277. BN_ULONG w) {
  278. BN_ULONG c = 0;
  279. BN_ULONG bl, bh;
  280. assert(num >= 0);
  281. if (num <= 0) {
  282. return (BN_ULONG)0;
  283. }
  284. bl = LBITS(w);
  285. bh = HBITS(w);
  286. while (num & ~3) {
  287. mul_add(rp[0], ap[0], bl, bh, c);
  288. mul_add(rp[1], ap[1], bl, bh, c);
  289. mul_add(rp[2], ap[2], bl, bh, c);
  290. mul_add(rp[3], ap[3], bl, bh, c);
  291. ap += 4;
  292. rp += 4;
  293. num -= 4;
  294. }
  295. while (num) {
  296. mul_add(rp[0], ap[0], bl, bh, c);
  297. ap++;
  298. rp++;
  299. num--;
  300. }
  301. return c;
  302. }
  303. BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w) {
  304. BN_ULONG carry = 0;
  305. BN_ULONG bl, bh;
  306. assert(num >= 0);
  307. if (num <= 0) {
  308. return (BN_ULONG)0;
  309. }
  310. bl = LBITS(w);
  311. bh = HBITS(w);
  312. while (num & ~3) {
  313. mul(rp[0], ap[0], bl, bh, carry);
  314. mul(rp[1], ap[1], bl, bh, carry);
  315. mul(rp[2], ap[2], bl, bh, carry);
  316. mul(rp[3], ap[3], bl, bh, carry);
  317. ap += 4;
  318. rp += 4;
  319. num -= 4;
  320. }
  321. while (num) {
  322. mul(rp[0], ap[0], bl, bh, carry);
  323. ap++;
  324. rp++;
  325. num--;
  326. }
  327. return carry;
  328. }
  329. void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, int n) {
  330. assert(n >= 0);
  331. if (n <= 0) {
  332. return;
  333. }
  334. while (n & ~3) {
  335. sqr64(r[0], r[1], a[0]);
  336. sqr64(r[2], r[3], a[1]);
  337. sqr64(r[4], r[5], a[2]);
  338. sqr64(r[6], r[7], a[3]);
  339. a += 4;
  340. r += 8;
  341. n -= 4;
  342. }
  343. while (n) {
  344. sqr64(r[0], r[1], a[0]);
  345. a++;
  346. r += 2;
  347. n--;
  348. }
  349. }
  350. #endif /* !(defined(BN_LLONG) || defined(BN_UMULT_HIGH)) */
  351. #if defined(BN_LLONG)
  352. BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d) {
  353. return (BN_ULONG)(((((BN_ULLONG)h) << BN_BITS2) | l) / (BN_ULLONG)d);
  354. }
  355. #else
  356. /* Divide h,l by d and return the result. */
  357. BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d) {
  358. BN_ULONG dh, dl, q, ret = 0, th, tl, t;
  359. int i, count = 2;
  360. if (d == 0) {
  361. return BN_MASK2;
  362. }
  363. i = BN_num_bits_word(d);
  364. assert((i == BN_BITS2) || (h <= (BN_ULONG)1 << i));
  365. i = BN_BITS2 - i;
  366. if (h >= d) {
  367. h -= d;
  368. }
  369. if (i) {
  370. d <<= i;
  371. h = (h << i) | (l >> (BN_BITS2 - i));
  372. l <<= i;
  373. }
  374. dh = (d & BN_MASK2h) >> BN_BITS4;
  375. dl = (d & BN_MASK2l);
  376. for (;;) {
  377. if ((h >> BN_BITS4) == dh) {
  378. q = BN_MASK2l;
  379. } else {
  380. q = h / dh;
  381. }
  382. th = q * dh;
  383. tl = dl * q;
  384. for (;;) {
  385. t = h - th;
  386. if ((t & BN_MASK2h) ||
  387. ((tl) <= ((t << BN_BITS4) | ((l & BN_MASK2h) >> BN_BITS4)))) {
  388. break;
  389. }
  390. q--;
  391. th -= dh;
  392. tl -= dl;
  393. }
  394. t = (tl >> BN_BITS4);
  395. tl = (tl << BN_BITS4) & BN_MASK2h;
  396. th += t;
  397. if (l < tl) {
  398. th++;
  399. }
  400. l -= tl;
  401. if (h < th) {
  402. h += d;
  403. q--;
  404. }
  405. h -= th;
  406. if (--count == 0) {
  407. break;
  408. }
  409. ret = q << BN_BITS4;
  410. h = ((h << BN_BITS4) | (l >> BN_BITS4)) & BN_MASK2;
  411. l = (l & BN_MASK2l) << BN_BITS4;
  412. }
  413. ret |= q;
  414. return ret;
  415. }
  416. #endif /* !defined(BN_LLONG) */
  417. #ifdef BN_LLONG
  418. BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
  419. int n) {
  420. BN_ULLONG ll = 0;
  421. assert(n >= 0);
  422. if (n <= 0) {
  423. return (BN_ULONG)0;
  424. }
  425. while (n & ~3) {
  426. ll += (BN_ULLONG)a[0] + b[0];
  427. r[0] = (BN_ULONG)ll & BN_MASK2;
  428. ll >>= BN_BITS2;
  429. ll += (BN_ULLONG)a[1] + b[1];
  430. r[1] = (BN_ULONG)ll & BN_MASK2;
  431. ll >>= BN_BITS2;
  432. ll += (BN_ULLONG)a[2] + b[2];
  433. r[2] = (BN_ULONG)ll & BN_MASK2;
  434. ll >>= BN_BITS2;
  435. ll += (BN_ULLONG)a[3] + b[3];
  436. r[3] = (BN_ULONG)ll & BN_MASK2;
  437. ll >>= BN_BITS2;
  438. a += 4;
  439. b += 4;
  440. r += 4;
  441. n -= 4;
  442. }
  443. while (n) {
  444. ll += (BN_ULLONG)a[0] + b[0];
  445. r[0] = (BN_ULONG)ll & BN_MASK2;
  446. ll >>= BN_BITS2;
  447. a++;
  448. b++;
  449. r++;
  450. n--;
  451. }
  452. return (BN_ULONG)ll;
  453. }
  454. #else /* !BN_LLONG */
  455. BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
  456. int n) {
  457. BN_ULONG c, l, t;
  458. assert(n >= 0);
  459. if (n <= 0) {
  460. return (BN_ULONG)0;
  461. }
  462. c = 0;
  463. while (n & ~3) {
  464. t = a[0];
  465. t = (t + c) & BN_MASK2;
  466. c = (t < c);
  467. l = (t + b[0]) & BN_MASK2;
  468. c += (l < t);
  469. r[0] = l;
  470. t = a[1];
  471. t = (t + c) & BN_MASK2;
  472. c = (t < c);
  473. l = (t + b[1]) & BN_MASK2;
  474. c += (l < t);
  475. r[1] = l;
  476. t = a[2];
  477. t = (t + c) & BN_MASK2;
  478. c = (t < c);
  479. l = (t + b[2]) & BN_MASK2;
  480. c += (l < t);
  481. r[2] = l;
  482. t = a[3];
  483. t = (t + c) & BN_MASK2;
  484. c = (t < c);
  485. l = (t + b[3]) & BN_MASK2;
  486. c += (l < t);
  487. r[3] = l;
  488. a += 4;
  489. b += 4;
  490. r += 4;
  491. n -= 4;
  492. }
  493. while (n) {
  494. t = a[0];
  495. t = (t + c) & BN_MASK2;
  496. c = (t < c);
  497. l = (t + b[0]) & BN_MASK2;
  498. c += (l < t);
  499. r[0] = l;
  500. a++;
  501. b++;
  502. r++;
  503. n--;
  504. }
  505. return (BN_ULONG)c;
  506. }
  507. #endif /* !BN_LLONG */
  508. BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
  509. int n) {
  510. BN_ULONG t1, t2;
  511. int c = 0;
  512. assert(n >= 0);
  513. if (n <= 0) {
  514. return (BN_ULONG)0;
  515. }
  516. while (n & ~3) {
  517. t1 = a[0];
  518. t2 = b[0];
  519. r[0] = (t1 - t2 - c) & BN_MASK2;
  520. if (t1 != t2)
  521. c = (t1 < t2);
  522. t1 = a[1];
  523. t2 = b[1];
  524. r[1] = (t1 - t2 - c) & BN_MASK2;
  525. if (t1 != t2)
  526. c = (t1 < t2);
  527. t1 = a[2];
  528. t2 = b[2];
  529. r[2] = (t1 - t2 - c) & BN_MASK2;
  530. if (t1 != t2)
  531. c = (t1 < t2);
  532. t1 = a[3];
  533. t2 = b[3];
  534. r[3] = (t1 - t2 - c) & BN_MASK2;
  535. if (t1 != t2)
  536. c = (t1 < t2);
  537. a += 4;
  538. b += 4;
  539. r += 4;
  540. n -= 4;
  541. }
  542. while (n) {
  543. t1 = a[0];
  544. t2 = b[0];
  545. r[0] = (t1 - t2 - c) & BN_MASK2;
  546. if (t1 != t2)
  547. c = (t1 < t2);
  548. a++;
  549. b++;
  550. r++;
  551. n--;
  552. }
  553. return c;
  554. }
  555. /* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */
  556. /* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */
  557. /* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */
  558. /* sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) */
  559. #ifdef BN_LLONG
  560. /* Keep in mind that additions to multiplication result can not overflow,
  561. * because its high half cannot be all-ones. */
  562. #define mul_add_c(a, b, c0, c1, c2) \
  563. do { \
  564. BN_ULONG hi; \
  565. BN_ULLONG t = (BN_ULLONG)(a) * (b); \
  566. t += c0; /* no carry */ \
  567. c0 = (BN_ULONG)Lw(t); \
  568. hi = (BN_ULONG)Hw(t); \
  569. c1 = (c1 + hi) & BN_MASK2; \
  570. if (c1 < hi) \
  571. c2++; \
  572. } while (0)
  573. #define mul_add_c2(a, b, c0, c1, c2) \
  574. do { \
  575. BN_ULONG hi; \
  576. BN_ULLONG t = (BN_ULLONG)(a) * (b); \
  577. BN_ULLONG tt = t + c0; /* no carry */ \
  578. c0 = (BN_ULONG)Lw(tt); \
  579. hi = (BN_ULONG)Hw(tt); \
  580. c1 = (c1 + hi) & BN_MASK2; \
  581. if (c1 < hi) \
  582. c2++; \
  583. t += c0; /* no carry */ \
  584. c0 = (BN_ULONG)Lw(t); \
  585. hi = (BN_ULONG)Hw(t); \
  586. c1 = (c1 + hi) & BN_MASK2; \
  587. if (c1 < hi) \
  588. c2++; \
  589. } while (0)
  590. #define sqr_add_c(a, i, c0, c1, c2) \
  591. do { \
  592. BN_ULONG hi; \
  593. BN_ULLONG t = (BN_ULLONG)a[i] * a[i]; \
  594. t += c0; /* no carry */ \
  595. c0 = (BN_ULONG)Lw(t); \
  596. hi = (BN_ULONG)Hw(t); \
  597. c1 = (c1 + hi) & BN_MASK2; \
  598. if (c1 < hi) \
  599. c2++; \
  600. } while (0)
  601. #define sqr_add_c2(a, i, j, c0, c1, c2) mul_add_c2((a)[i], (a)[j], c0, c1, c2)
  602. #elif defined(BN_UMULT_LOHI)
  603. /* Keep in mind that additions to hi can not overflow, because the high word of
  604. * a multiplication result cannot be all-ones. */
  605. #define mul_add_c(a, b, c0, c1, c2) \
  606. do { \
  607. BN_ULONG ta = (a), tb = (b); \
  608. BN_ULONG lo, hi; \
  609. BN_UMULT_LOHI(lo, hi, ta, tb); \
  610. c0 += lo; \
  611. hi += (c0 < lo) ? 1 : 0; \
  612. c1 += hi; \
  613. c2 += (c1 < hi) ? 1 : 0; \
  614. } while (0)
  615. #define mul_add_c2(a, b, c0, c1, c2) \
  616. do { \
  617. BN_ULONG ta = (a), tb = (b); \
  618. BN_ULONG lo, hi, tt; \
  619. BN_UMULT_LOHI(lo, hi, ta, tb); \
  620. c0 += lo; \
  621. tt = hi + ((c0 < lo) ? 1 : 0); \
  622. c1 += tt; \
  623. c2 += (c1 < tt) ? 1 : 0; \
  624. c0 += lo; \
  625. hi += (c0 < lo) ? 1 : 0; \
  626. c1 += hi; \
  627. c2 += (c1 < hi) ? 1 : 0; \
  628. } while (0)
  629. #define sqr_add_c(a, i, c0, c1, c2) \
  630. do { \
  631. BN_ULONG ta = (a)[i]; \
  632. BN_ULONG lo, hi; \
  633. BN_UMULT_LOHI(lo, hi, ta, ta); \
  634. c0 += lo; \
  635. hi += (c0 < lo) ? 1 : 0; \
  636. c1 += hi; \
  637. c2 += (c1 < hi) ? 1 : 0; \
  638. } while (0)
  639. #define sqr_add_c2(a, i, j, c0, c1, c2) mul_add_c2((a)[i], (a)[j], c0, c1, c2)
  640. #else /* !BN_LLONG */
  641. /* Keep in mind that additions to hi can not overflow, because
  642. * the high word of a multiplication result cannot be all-ones. */
  643. #define mul_add_c(a, b, c0, c1, c2) \
  644. do { \
  645. BN_ULONG lo = LBITS(a), hi = HBITS(a); \
  646. BN_ULONG bl = LBITS(b), bh = HBITS(b); \
  647. mul64(lo, hi, bl, bh); \
  648. c0 = (c0 + lo) & BN_MASK2; \
  649. if (c0 < lo) \
  650. hi++; \
  651. c1 = (c1 + hi) & BN_MASK2; \
  652. if (c1 < hi) \
  653. c2++; \
  654. } while (0)
  655. #define mul_add_c2(a, b, c0, c1, c2) \
  656. do { \
  657. BN_ULONG tt; \
  658. BN_ULONG lo = LBITS(a), hi = HBITS(a); \
  659. BN_ULONG bl = LBITS(b), bh = HBITS(b); \
  660. mul64(lo, hi, bl, bh); \
  661. tt = hi; \
  662. c0 = (c0 + lo) & BN_MASK2; \
  663. if (c0 < lo) \
  664. tt++; \
  665. c1 = (c1 + tt) & BN_MASK2; \
  666. if (c1 < tt) \
  667. c2++; \
  668. c0 = (c0 + lo) & BN_MASK2; \
  669. if (c0 < lo) \
  670. hi++; \
  671. c1 = (c1 + hi) & BN_MASK2; \
  672. if (c1 < hi) \
  673. c2++; \
  674. } while (0)
  675. #define sqr_add_c(a, i, c0, c1, c2) \
  676. do { \
  677. BN_ULONG lo, hi; \
  678. sqr64(lo, hi, (a)[i]); \
  679. c0 = (c0 + lo) & BN_MASK2; \
  680. if (c0 < lo) \
  681. hi++; \
  682. c1 = (c1 + hi) & BN_MASK2; \
  683. if (c1 < hi) \
  684. c2++; \
  685. } while (0)
  686. #define sqr_add_c2(a, i, j, c0, c1, c2) mul_add_c2((a)[i], (a)[j], c0, c1, c2)
  687. #endif /* !BN_LLONG */
  688. void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) {
  689. BN_ULONG c1, c2, c3;
  690. c1 = 0;
  691. c2 = 0;
  692. c3 = 0;
  693. mul_add_c(a[0], b[0], c1, c2, c3);
  694. r[0] = c1;
  695. c1 = 0;
  696. mul_add_c(a[0], b[1], c2, c3, c1);
  697. mul_add_c(a[1], b[0], c2, c3, c1);
  698. r[1] = c2;
  699. c2 = 0;
  700. mul_add_c(a[2], b[0], c3, c1, c2);
  701. mul_add_c(a[1], b[1], c3, c1, c2);
  702. mul_add_c(a[0], b[2], c3, c1, c2);
  703. r[2] = c3;
  704. c3 = 0;
  705. mul_add_c(a[0], b[3], c1, c2, c3);
  706. mul_add_c(a[1], b[2], c1, c2, c3);
  707. mul_add_c(a[2], b[1], c1, c2, c3);
  708. mul_add_c(a[3], b[0], c1, c2, c3);
  709. r[3] = c1;
  710. c1 = 0;
  711. mul_add_c(a[4], b[0], c2, c3, c1);
  712. mul_add_c(a[3], b[1], c2, c3, c1);
  713. mul_add_c(a[2], b[2], c2, c3, c1);
  714. mul_add_c(a[1], b[3], c2, c3, c1);
  715. mul_add_c(a[0], b[4], c2, c3, c1);
  716. r[4] = c2;
  717. c2 = 0;
  718. mul_add_c(a[0], b[5], c3, c1, c2);
  719. mul_add_c(a[1], b[4], c3, c1, c2);
  720. mul_add_c(a[2], b[3], c3, c1, c2);
  721. mul_add_c(a[3], b[2], c3, c1, c2);
  722. mul_add_c(a[4], b[1], c3, c1, c2);
  723. mul_add_c(a[5], b[0], c3, c1, c2);
  724. r[5] = c3;
  725. c3 = 0;
  726. mul_add_c(a[6], b[0], c1, c2, c3);
  727. mul_add_c(a[5], b[1], c1, c2, c3);
  728. mul_add_c(a[4], b[2], c1, c2, c3);
  729. mul_add_c(a[3], b[3], c1, c2, c3);
  730. mul_add_c(a[2], b[4], c1, c2, c3);
  731. mul_add_c(a[1], b[5], c1, c2, c3);
  732. mul_add_c(a[0], b[6], c1, c2, c3);
  733. r[6] = c1;
  734. c1 = 0;
  735. mul_add_c(a[0], b[7], c2, c3, c1);
  736. mul_add_c(a[1], b[6], c2, c3, c1);
  737. mul_add_c(a[2], b[5], c2, c3, c1);
  738. mul_add_c(a[3], b[4], c2, c3, c1);
  739. mul_add_c(a[4], b[3], c2, c3, c1);
  740. mul_add_c(a[5], b[2], c2, c3, c1);
  741. mul_add_c(a[6], b[1], c2, c3, c1);
  742. mul_add_c(a[7], b[0], c2, c3, c1);
  743. r[7] = c2;
  744. c2 = 0;
  745. mul_add_c(a[7], b[1], c3, c1, c2);
  746. mul_add_c(a[6], b[2], c3, c1, c2);
  747. mul_add_c(a[5], b[3], c3, c1, c2);
  748. mul_add_c(a[4], b[4], c3, c1, c2);
  749. mul_add_c(a[3], b[5], c3, c1, c2);
  750. mul_add_c(a[2], b[6], c3, c1, c2);
  751. mul_add_c(a[1], b[7], c3, c1, c2);
  752. r[8] = c3;
  753. c3 = 0;
  754. mul_add_c(a[2], b[7], c1, c2, c3);
  755. mul_add_c(a[3], b[6], c1, c2, c3);
  756. mul_add_c(a[4], b[5], c1, c2, c3);
  757. mul_add_c(a[5], b[4], c1, c2, c3);
  758. mul_add_c(a[6], b[3], c1, c2, c3);
  759. mul_add_c(a[7], b[2], c1, c2, c3);
  760. r[9] = c1;
  761. c1 = 0;
  762. mul_add_c(a[7], b[3], c2, c3, c1);
  763. mul_add_c(a[6], b[4], c2, c3, c1);
  764. mul_add_c(a[5], b[5], c2, c3, c1);
  765. mul_add_c(a[4], b[6], c2, c3, c1);
  766. mul_add_c(a[3], b[7], c2, c3, c1);
  767. r[10] = c2;
  768. c2 = 0;
  769. mul_add_c(a[4], b[7], c3, c1, c2);
  770. mul_add_c(a[5], b[6], c3, c1, c2);
  771. mul_add_c(a[6], b[5], c3, c1, c2);
  772. mul_add_c(a[7], b[4], c3, c1, c2);
  773. r[11] = c3;
  774. c3 = 0;
  775. mul_add_c(a[7], b[5], c1, c2, c3);
  776. mul_add_c(a[6], b[6], c1, c2, c3);
  777. mul_add_c(a[5], b[7], c1, c2, c3);
  778. r[12] = c1;
  779. c1 = 0;
  780. mul_add_c(a[6], b[7], c2, c3, c1);
  781. mul_add_c(a[7], b[6], c2, c3, c1);
  782. r[13] = c2;
  783. c2 = 0;
  784. mul_add_c(a[7], b[7], c3, c1, c2);
  785. r[14] = c3;
  786. r[15] = c1;
  787. }
  788. void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) {
  789. BN_ULONG c1, c2, c3;
  790. c1 = 0;
  791. c2 = 0;
  792. c3 = 0;
  793. mul_add_c(a[0], b[0], c1, c2, c3);
  794. r[0] = c1;
  795. c1 = 0;
  796. mul_add_c(a[0], b[1], c2, c3, c1);
  797. mul_add_c(a[1], b[0], c2, c3, c1);
  798. r[1] = c2;
  799. c2 = 0;
  800. mul_add_c(a[2], b[0], c3, c1, c2);
  801. mul_add_c(a[1], b[1], c3, c1, c2);
  802. mul_add_c(a[0], b[2], c3, c1, c2);
  803. r[2] = c3;
  804. c3 = 0;
  805. mul_add_c(a[0], b[3], c1, c2, c3);
  806. mul_add_c(a[1], b[2], c1, c2, c3);
  807. mul_add_c(a[2], b[1], c1, c2, c3);
  808. mul_add_c(a[3], b[0], c1, c2, c3);
  809. r[3] = c1;
  810. c1 = 0;
  811. mul_add_c(a[3], b[1], c2, c3, c1);
  812. mul_add_c(a[2], b[2], c2, c3, c1);
  813. mul_add_c(a[1], b[3], c2, c3, c1);
  814. r[4] = c2;
  815. c2 = 0;
  816. mul_add_c(a[2], b[3], c3, c1, c2);
  817. mul_add_c(a[3], b[2], c3, c1, c2);
  818. r[5] = c3;
  819. c3 = 0;
  820. mul_add_c(a[3], b[3], c1, c2, c3);
  821. r[6] = c1;
  822. r[7] = c2;
  823. }
  824. void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a) {
  825. BN_ULONG c1, c2, c3;
  826. c1 = 0;
  827. c2 = 0;
  828. c3 = 0;
  829. sqr_add_c(a, 0, c1, c2, c3);
  830. r[0] = c1;
  831. c1 = 0;
  832. sqr_add_c2(a, 1, 0, c2, c3, c1);
  833. r[1] = c2;
  834. c2 = 0;
  835. sqr_add_c(a, 1, c3, c1, c2);
  836. sqr_add_c2(a, 2, 0, c3, c1, c2);
  837. r[2] = c3;
  838. c3 = 0;
  839. sqr_add_c2(a, 3, 0, c1, c2, c3);
  840. sqr_add_c2(a, 2, 1, c1, c2, c3);
  841. r[3] = c1;
  842. c1 = 0;
  843. sqr_add_c(a, 2, c2, c3, c1);
  844. sqr_add_c2(a, 3, 1, c2, c3, c1);
  845. sqr_add_c2(a, 4, 0, c2, c3, c1);
  846. r[4] = c2;
  847. c2 = 0;
  848. sqr_add_c2(a, 5, 0, c3, c1, c2);
  849. sqr_add_c2(a, 4, 1, c3, c1, c2);
  850. sqr_add_c2(a, 3, 2, c3, c1, c2);
  851. r[5] = c3;
  852. c3 = 0;
  853. sqr_add_c(a, 3, c1, c2, c3);
  854. sqr_add_c2(a, 4, 2, c1, c2, c3);
  855. sqr_add_c2(a, 5, 1, c1, c2, c3);
  856. sqr_add_c2(a, 6, 0, c1, c2, c3);
  857. r[6] = c1;
  858. c1 = 0;
  859. sqr_add_c2(a, 7, 0, c2, c3, c1);
  860. sqr_add_c2(a, 6, 1, c2, c3, c1);
  861. sqr_add_c2(a, 5, 2, c2, c3, c1);
  862. sqr_add_c2(a, 4, 3, c2, c3, c1);
  863. r[7] = c2;
  864. c2 = 0;
  865. sqr_add_c(a, 4, c3, c1, c2);
  866. sqr_add_c2(a, 5, 3, c3, c1, c2);
  867. sqr_add_c2(a, 6, 2, c3, c1, c2);
  868. sqr_add_c2(a, 7, 1, c3, c1, c2);
  869. r[8] = c3;
  870. c3 = 0;
  871. sqr_add_c2(a, 7, 2, c1, c2, c3);
  872. sqr_add_c2(a, 6, 3, c1, c2, c3);
  873. sqr_add_c2(a, 5, 4, c1, c2, c3);
  874. r[9] = c1;
  875. c1 = 0;
  876. sqr_add_c(a, 5, c2, c3, c1);
  877. sqr_add_c2(a, 6, 4, c2, c3, c1);
  878. sqr_add_c2(a, 7, 3, c2, c3, c1);
  879. r[10] = c2;
  880. c2 = 0;
  881. sqr_add_c2(a, 7, 4, c3, c1, c2);
  882. sqr_add_c2(a, 6, 5, c3, c1, c2);
  883. r[11] = c3;
  884. c3 = 0;
  885. sqr_add_c(a, 6, c1, c2, c3);
  886. sqr_add_c2(a, 7, 5, c1, c2, c3);
  887. r[12] = c1;
  888. c1 = 0;
  889. sqr_add_c2(a, 7, 6, c2, c3, c1);
  890. r[13] = c2;
  891. c2 = 0;
  892. sqr_add_c(a, 7, c3, c1, c2);
  893. r[14] = c3;
  894. r[15] = c1;
  895. }
  896. void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a) {
  897. BN_ULONG c1, c2, c3;
  898. c1 = 0;
  899. c2 = 0;
  900. c3 = 0;
  901. sqr_add_c(a, 0, c1, c2, c3);
  902. r[0] = c1;
  903. c1 = 0;
  904. sqr_add_c2(a, 1, 0, c2, c3, c1);
  905. r[1] = c2;
  906. c2 = 0;
  907. sqr_add_c(a, 1, c3, c1, c2);
  908. sqr_add_c2(a, 2, 0, c3, c1, c2);
  909. r[2] = c3;
  910. c3 = 0;
  911. sqr_add_c2(a, 3, 0, c1, c2, c3);
  912. sqr_add_c2(a, 2, 1, c1, c2, c3);
  913. r[3] = c1;
  914. c1 = 0;
  915. sqr_add_c(a, 2, c2, c3, c1);
  916. sqr_add_c2(a, 3, 1, c2, c3, c1);
  917. r[4] = c2;
  918. c2 = 0;
  919. sqr_add_c2(a, 3, 2, c3, c1, c2);
  920. r[5] = c3;
  921. c3 = 0;
  922. sqr_add_c(a, 3, c1, c2, c3);
  923. r[6] = c1;
  924. r[7] = c2;
  925. }
  926. #if defined(OPENSSL_NO_ASM) || (!defined(OPENSSL_ARM) && !defined(OPENSSL_X86_64))
  927. /* This is essentially reference implementation, which may or may not
  928. * result in performance improvement. E.g. on IA-32 this routine was
  929. * observed to give 40% faster rsa1024 private key operations and 10%
  930. * faster rsa4096 ones, while on AMD64 it improves rsa1024 sign only
  931. * by 10% and *worsens* rsa4096 sign by 15%. Once again, it's a
  932. * reference implementation, one to be used as starting point for
  933. * platform-specific assembler. Mentioned numbers apply to compiler
  934. * generated code compiled with and without -DOPENSSL_BN_ASM_MONT and
  935. * can vary not only from platform to platform, but even for compiler
  936. * versions. Assembler vs. assembler improvement coefficients can
  937. * [and are known to] differ and are to be documented elsewhere. */
  938. int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
  939. const BN_ULONG *np, const BN_ULONG *n0p, int num) {
  940. BN_ULONG c0, c1, ml, *tp, n0;
  941. #ifdef mul64
  942. BN_ULONG mh;
  943. #endif
  944. volatile BN_ULONG *vp;
  945. int i = 0, j;
  946. #if 0 /* template for platform-specific implementation */
  947. if (ap==bp) return bn_sqr_mont(rp,ap,np,n0p,num);
  948. #endif
  949. vp = tp = alloca((num + 2) * sizeof(BN_ULONG));
  950. n0 = *n0p;
  951. c0 = 0;
  952. ml = bp[0];
  953. #ifdef mul64
  954. mh = HBITS(ml);
  955. ml = LBITS(ml);
  956. for (j = 0; j < num; ++j)
  957. mul(tp[j], ap[j], ml, mh, c0);
  958. #else
  959. for (j = 0; j < num; ++j)
  960. mul(tp[j], ap[j], ml, c0);
  961. #endif
  962. tp[num] = c0;
  963. tp[num + 1] = 0;
  964. goto enter;
  965. for (i = 0; i < num; i++) {
  966. c0 = 0;
  967. ml = bp[i];
  968. #ifdef mul64
  969. mh = HBITS(ml);
  970. ml = LBITS(ml);
  971. for (j = 0; j < num; ++j)
  972. mul_add(tp[j], ap[j], ml, mh, c0);
  973. #else
  974. for (j = 0; j < num; ++j)
  975. mul_add(tp[j], ap[j], ml, c0);
  976. #endif
  977. c1 = (tp[num] + c0) & BN_MASK2;
  978. tp[num] = c1;
  979. tp[num + 1] = (c1 < c0 ? 1 : 0);
  980. enter:
  981. c1 = tp[0];
  982. ml = (c1 * n0) & BN_MASK2;
  983. c0 = 0;
  984. #ifdef mul64
  985. mh = HBITS(ml);
  986. ml = LBITS(ml);
  987. mul_add(c1, np[0], ml, mh, c0);
  988. #else
  989. mul_add(c1, ml, np[0], c0);
  990. #endif
  991. for (j = 1; j < num; j++) {
  992. c1 = tp[j];
  993. #ifdef mul64
  994. mul_add(c1, np[j], ml, mh, c0);
  995. #else
  996. mul_add(c1, ml, np[j], c0);
  997. #endif
  998. tp[j - 1] = c1 & BN_MASK2;
  999. }
  1000. c1 = (tp[num] + c0) & BN_MASK2;
  1001. tp[num - 1] = c1;
  1002. tp[num] = tp[num + 1] + (c1 < c0 ? 1 : 0);
  1003. }
  1004. if (tp[num] != 0 || tp[num - 1] >= np[num - 1]) {
  1005. c0 = bn_sub_words(rp, tp, np, num);
  1006. if (tp[num] != 0 || c0 == 0) {
  1007. for (i = 0; i < num + 2; i++)
  1008. vp[i] = 0;
  1009. return 1;
  1010. }
  1011. }
  1012. for (i = 0; i < num; i++)
  1013. rp[i] = tp[i], vp[i] = 0;
  1014. vp[num] = 0;
  1015. vp[num + 1] = 0;
  1016. return 1;
  1017. }
  1018. #endif
  1019. #endif