You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

1020 lines
26 KiB

  1. /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
  2. * All rights reserved.
  3. *
  4. * This package is an SSL implementation written
  5. * by Eric Young (eay@cryptsoft.com).
  6. * The implementation was written so as to conform with Netscapes SSL.
  7. *
  8. * This library is free for commercial and non-commercial use as long as
  9. * the following conditions are aheared to. The following conditions
  10. * apply to all code found in this distribution, be it the RC4, RSA,
  11. * lhash, DES, etc., code; not just the SSL code. The SSL documentation
  12. * included with this distribution is covered by the same copyright terms
  13. * except that the holder is Tim Hudson (tjh@cryptsoft.com).
  14. *
  15. * Copyright remains Eric Young's, and as such any Copyright notices in
  16. * the code are not to be removed.
  17. * If this package is used in a product, Eric Young should be given attribution
  18. * as the author of the parts of the library used.
  19. * This can be in the form of a textual message at program startup or
  20. * in documentation (online or textual) provided with the package.
  21. *
  22. * Redistribution and use in source and binary forms, with or without
  23. * modification, are permitted provided that the following conditions
  24. * are met:
  25. * 1. Redistributions of source code must retain the copyright
  26. * notice, this list of conditions and the following disclaimer.
  27. * 2. Redistributions in binary form must reproduce the above copyright
  28. * notice, this list of conditions and the following disclaimer in the
  29. * documentation and/or other materials provided with the distribution.
  30. * 3. All advertising materials mentioning features or use of this software
  31. * must display the following acknowledgement:
  32. * "This product includes cryptographic software written by
  33. * Eric Young (eay@cryptsoft.com)"
  34. * The word 'cryptographic' can be left out if the rouines from the library
  35. * being used are not cryptographic related :-).
  36. * 4. If you include any Windows specific code (or a derivative thereof) from
  37. * the apps directory (application code) you must include an acknowledgement:
  38. * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
  39. *
  40. * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
  41. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  42. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  43. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  44. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  45. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  46. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  47. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  48. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  49. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  50. * SUCH DAMAGE.
  51. *
  52. * The licence and distribution terms for any publically available version or
  53. * derivative of this code cannot be changed. i.e. this code cannot simply be
  54. * copied and put under another distribution licence
  55. * [including the GNU Public Licence.] */
  56. #include <openssl/bn.h>
  57. #include <assert.h>
  58. #include "internal.h"
  59. /* Generic implementations of most operations are needed for:
  60. * - Configurations without inline assembly.
  61. * - Architectures other than x86 or x86_64.
  62. * - Windows x84_64; x86_64-gcc.c does not build on MSVC. */
  63. #if defined(OPENSSL_NO_ASM) || \
  64. (!defined(OPENSSL_X86_64) && !defined(OPENSSL_X86)) || \
  65. (defined(OPENSSL_X86_64) && defined(OPENSSL_WINDOWS))
  66. #ifdef BN_ULLONG
  67. #define mul_add(r, a, w, c) \
  68. { \
  69. BN_ULLONG t; \
  70. t = (BN_ULLONG)w * (a) + (r) + (c); \
  71. (r) = Lw(t); \
  72. (c) = Hw(t); \
  73. }
  74. #define mul(r, a, w, c) \
  75. { \
  76. BN_ULLONG t; \
  77. t = (BN_ULLONG)w * (a) + (c); \
  78. (r) = Lw(t); \
  79. (c) = Hw(t); \
  80. }
  81. #define sqr(r0, r1, a) \
  82. { \
  83. BN_ULLONG t; \
  84. t = (BN_ULLONG)(a) * (a); \
  85. (r0) = Lw(t); \
  86. (r1) = Hw(t); \
  87. }
  88. #elif defined(BN_UMULT_LOHI)
  89. #define mul_add(r, a, w, c) \
  90. { \
  91. BN_ULONG high, low, ret, tmp = (a); \
  92. ret = (r); \
  93. BN_UMULT_LOHI(low, high, w, tmp); \
  94. ret += (c); \
  95. (c) = (ret < (c)) ? 1 : 0; \
  96. (c) += high; \
  97. ret += low; \
  98. (c) += (ret < low) ? 1 : 0; \
  99. (r) = ret; \
  100. }
  101. #define mul(r, a, w, c) \
  102. { \
  103. BN_ULONG high, low, ret, ta = (a); \
  104. BN_UMULT_LOHI(low, high, w, ta); \
  105. ret = low + (c); \
  106. (c) = high; \
  107. (c) += (ret < low) ? 1 : 0; \
  108. (r) = ret; \
  109. }
  110. #define sqr(r0, r1, a) \
  111. { \
  112. BN_ULONG tmp = (a); \
  113. BN_UMULT_LOHI(r0, r1, tmp, tmp); \
  114. }
  115. #else
  116. /*************************************************************
  117. * No long long type
  118. */
  119. #define LBITS(a) ((a) & BN_MASK2l)
  120. #define HBITS(a) (((a) >> BN_BITS4) & BN_MASK2l)
  121. #define L2HBITS(a) (((a) << BN_BITS4) & BN_MASK2)
  122. #define LLBITS(a) ((a) & BN_MASKl)
  123. #define LHBITS(a) (((a) >> BN_BITS2) & BN_MASKl)
  124. #define LL2HBITS(a) ((BN_ULLONG)((a) & BN_MASKl) << BN_BITS2)
  125. #define mul64(l, h, bl, bh) \
  126. { \
  127. BN_ULONG m, m1, lt, ht; \
  128. \
  129. lt = l; \
  130. ht = h; \
  131. m = (bh) * (lt); \
  132. lt = (bl) * (lt); \
  133. m1 = (bl) * (ht); \
  134. ht = (bh) * (ht); \
  135. m = (m + m1) & BN_MASK2; \
  136. if (m < m1) \
  137. ht += L2HBITS((BN_ULONG)1); \
  138. ht += HBITS(m); \
  139. m1 = L2HBITS(m); \
  140. lt = (lt + m1) & BN_MASK2; \
  141. if (lt < m1) \
  142. ht++; \
  143. (l) = lt; \
  144. (h) = ht; \
  145. }
  146. #define sqr64(lo, ho, in) \
  147. { \
  148. BN_ULONG l, h, m; \
  149. \
  150. h = (in); \
  151. l = LBITS(h); \
  152. h = HBITS(h); \
  153. m = (l) * (h); \
  154. l *= l; \
  155. h *= h; \
  156. h += (m & BN_MASK2h1) >> (BN_BITS4 - 1); \
  157. m = (m & BN_MASK2l) << (BN_BITS4 + 1); \
  158. l = (l + m) & BN_MASK2; \
  159. if (l < m) \
  160. h++; \
  161. (lo) = l; \
  162. (ho) = h; \
  163. }
  164. #define mul_add(r, a, bl, bh, c) \
  165. { \
  166. BN_ULONG l, h; \
  167. \
  168. h = (a); \
  169. l = LBITS(h); \
  170. h = HBITS(h); \
  171. mul64(l, h, (bl), (bh)); \
  172. \
  173. /* non-multiply part */ \
  174. l = (l + (c)) & BN_MASK2; \
  175. if (l < (c)) \
  176. h++; \
  177. (c) = (r); \
  178. l = (l + (c)) & BN_MASK2; \
  179. if (l < (c)) \
  180. h++; \
  181. (c) = h & BN_MASK2; \
  182. (r) = l; \
  183. }
  184. #define mul(r, a, bl, bh, c) \
  185. { \
  186. BN_ULONG l, h; \
  187. \
  188. h = (a); \
  189. l = LBITS(h); \
  190. h = HBITS(h); \
  191. mul64(l, h, (bl), (bh)); \
  192. \
  193. /* non-multiply part */ \
  194. l += (c); \
  195. if ((l & BN_MASK2) < (c)) \
  196. h++; \
  197. (c) = h & BN_MASK2; \
  198. (r) = l & BN_MASK2; \
  199. }
  200. #endif /* !BN_ULLONG */
  201. #if defined(BN_ULLONG) || defined(BN_UMULT_HIGH)
  202. BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num,
  203. BN_ULONG w) {
  204. BN_ULONG c1 = 0;
  205. assert(num >= 0);
  206. if (num <= 0) {
  207. return c1;
  208. }
  209. while (num & ~3) {
  210. mul_add(rp[0], ap[0], w, c1);
  211. mul_add(rp[1], ap[1], w, c1);
  212. mul_add(rp[2], ap[2], w, c1);
  213. mul_add(rp[3], ap[3], w, c1);
  214. ap += 4;
  215. rp += 4;
  216. num -= 4;
  217. }
  218. while (num) {
  219. mul_add(rp[0], ap[0], w, c1);
  220. ap++;
  221. rp++;
  222. num--;
  223. }
  224. return c1;
  225. }
  226. BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w) {
  227. BN_ULONG c1 = 0;
  228. assert(num >= 0);
  229. if (num <= 0) {
  230. return c1;
  231. }
  232. while (num & ~3) {
  233. mul(rp[0], ap[0], w, c1);
  234. mul(rp[1], ap[1], w, c1);
  235. mul(rp[2], ap[2], w, c1);
  236. mul(rp[3], ap[3], w, c1);
  237. ap += 4;
  238. rp += 4;
  239. num -= 4;
  240. }
  241. while (num) {
  242. mul(rp[0], ap[0], w, c1);
  243. ap++;
  244. rp++;
  245. num--;
  246. }
  247. return c1;
  248. }
  249. void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, int n) {
  250. assert(n >= 0);
  251. if (n <= 0) {
  252. return;
  253. }
  254. while (n & ~3) {
  255. sqr(r[0], r[1], a[0]);
  256. sqr(r[2], r[3], a[1]);
  257. sqr(r[4], r[5], a[2]);
  258. sqr(r[6], r[7], a[3]);
  259. a += 4;
  260. r += 8;
  261. n -= 4;
  262. }
  263. while (n) {
  264. sqr(r[0], r[1], a[0]);
  265. a++;
  266. r += 2;
  267. n--;
  268. }
  269. }
  270. #else /* !(defined(BN_ULLONG) || defined(BN_UMULT_HIGH)) */
  271. BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num,
  272. BN_ULONG w) {
  273. BN_ULONG c = 0;
  274. BN_ULONG bl, bh;
  275. assert(num >= 0);
  276. if (num <= 0) {
  277. return (BN_ULONG)0;
  278. }
  279. bl = LBITS(w);
  280. bh = HBITS(w);
  281. while (num & ~3) {
  282. mul_add(rp[0], ap[0], bl, bh, c);
  283. mul_add(rp[1], ap[1], bl, bh, c);
  284. mul_add(rp[2], ap[2], bl, bh, c);
  285. mul_add(rp[3], ap[3], bl, bh, c);
  286. ap += 4;
  287. rp += 4;
  288. num -= 4;
  289. }
  290. while (num) {
  291. mul_add(rp[0], ap[0], bl, bh, c);
  292. ap++;
  293. rp++;
  294. num--;
  295. }
  296. return c;
  297. }
  298. BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w) {
  299. BN_ULONG carry = 0;
  300. BN_ULONG bl, bh;
  301. assert(num >= 0);
  302. if (num <= 0) {
  303. return (BN_ULONG)0;
  304. }
  305. bl = LBITS(w);
  306. bh = HBITS(w);
  307. while (num & ~3) {
  308. mul(rp[0], ap[0], bl, bh, carry);
  309. mul(rp[1], ap[1], bl, bh, carry);
  310. mul(rp[2], ap[2], bl, bh, carry);
  311. mul(rp[3], ap[3], bl, bh, carry);
  312. ap += 4;
  313. rp += 4;
  314. num -= 4;
  315. }
  316. while (num) {
  317. mul(rp[0], ap[0], bl, bh, carry);
  318. ap++;
  319. rp++;
  320. num--;
  321. }
  322. return carry;
  323. }
  324. void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, int n) {
  325. assert(n >= 0);
  326. if (n <= 0) {
  327. return;
  328. }
  329. while (n & ~3) {
  330. sqr64(r[0], r[1], a[0]);
  331. sqr64(r[2], r[3], a[1]);
  332. sqr64(r[4], r[5], a[2]);
  333. sqr64(r[6], r[7], a[3]);
  334. a += 4;
  335. r += 8;
  336. n -= 4;
  337. }
  338. while (n) {
  339. sqr64(r[0], r[1], a[0]);
  340. a++;
  341. r += 2;
  342. n--;
  343. }
  344. }
  345. #endif /* !(defined(BN_ULLONG) || defined(BN_UMULT_HIGH)) */
  346. #if defined(BN_ULLONG)
  347. BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d) {
  348. return (BN_ULONG)(((((BN_ULLONG)h) << BN_BITS2) | l) / (BN_ULLONG)d);
  349. }
  350. #else
  351. /* Divide h,l by d and return the result. */
  352. BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d) {
  353. BN_ULONG dh, dl, q, ret = 0, th, tl, t;
  354. int i, count = 2;
  355. if (d == 0) {
  356. return BN_MASK2;
  357. }
  358. i = BN_num_bits_word(d);
  359. assert((i == BN_BITS2) || (h <= (BN_ULONG)1 << i));
  360. i = BN_BITS2 - i;
  361. if (h >= d) {
  362. h -= d;
  363. }
  364. if (i) {
  365. d <<= i;
  366. h = (h << i) | (l >> (BN_BITS2 - i));
  367. l <<= i;
  368. }
  369. dh = (d & BN_MASK2h) >> BN_BITS4;
  370. dl = (d & BN_MASK2l);
  371. for (;;) {
  372. if ((h >> BN_BITS4) == dh) {
  373. q = BN_MASK2l;
  374. } else {
  375. q = h / dh;
  376. }
  377. th = q * dh;
  378. tl = dl * q;
  379. for (;;) {
  380. t = h - th;
  381. if ((t & BN_MASK2h) ||
  382. ((tl) <= ((t << BN_BITS4) | ((l & BN_MASK2h) >> BN_BITS4)))) {
  383. break;
  384. }
  385. q--;
  386. th -= dh;
  387. tl -= dl;
  388. }
  389. t = (tl >> BN_BITS4);
  390. tl = (tl << BN_BITS4) & BN_MASK2h;
  391. th += t;
  392. if (l < tl) {
  393. th++;
  394. }
  395. l -= tl;
  396. if (h < th) {
  397. h += d;
  398. q--;
  399. }
  400. h -= th;
  401. if (--count == 0) {
  402. break;
  403. }
  404. ret = q << BN_BITS4;
  405. h = ((h << BN_BITS4) | (l >> BN_BITS4)) & BN_MASK2;
  406. l = (l & BN_MASK2l) << BN_BITS4;
  407. }
  408. ret |= q;
  409. return ret;
  410. }
  411. #endif /* !defined(BN_ULLONG) */
  412. #ifdef BN_ULLONG
  413. BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
  414. int n) {
  415. BN_ULLONG ll = 0;
  416. assert(n >= 0);
  417. if (n <= 0) {
  418. return (BN_ULONG)0;
  419. }
  420. while (n & ~3) {
  421. ll += (BN_ULLONG)a[0] + b[0];
  422. r[0] = (BN_ULONG)ll & BN_MASK2;
  423. ll >>= BN_BITS2;
  424. ll += (BN_ULLONG)a[1] + b[1];
  425. r[1] = (BN_ULONG)ll & BN_MASK2;
  426. ll >>= BN_BITS2;
  427. ll += (BN_ULLONG)a[2] + b[2];
  428. r[2] = (BN_ULONG)ll & BN_MASK2;
  429. ll >>= BN_BITS2;
  430. ll += (BN_ULLONG)a[3] + b[3];
  431. r[3] = (BN_ULONG)ll & BN_MASK2;
  432. ll >>= BN_BITS2;
  433. a += 4;
  434. b += 4;
  435. r += 4;
  436. n -= 4;
  437. }
  438. while (n) {
  439. ll += (BN_ULLONG)a[0] + b[0];
  440. r[0] = (BN_ULONG)ll & BN_MASK2;
  441. ll >>= BN_BITS2;
  442. a++;
  443. b++;
  444. r++;
  445. n--;
  446. }
  447. return (BN_ULONG)ll;
  448. }
  449. #else /* !BN_ULLONG */
  450. BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
  451. int n) {
  452. BN_ULONG c, l, t;
  453. assert(n >= 0);
  454. if (n <= 0) {
  455. return (BN_ULONG)0;
  456. }
  457. c = 0;
  458. while (n & ~3) {
  459. t = a[0];
  460. t = (t + c) & BN_MASK2;
  461. c = (t < c);
  462. l = (t + b[0]) & BN_MASK2;
  463. c += (l < t);
  464. r[0] = l;
  465. t = a[1];
  466. t = (t + c) & BN_MASK2;
  467. c = (t < c);
  468. l = (t + b[1]) & BN_MASK2;
  469. c += (l < t);
  470. r[1] = l;
  471. t = a[2];
  472. t = (t + c) & BN_MASK2;
  473. c = (t < c);
  474. l = (t + b[2]) & BN_MASK2;
  475. c += (l < t);
  476. r[2] = l;
  477. t = a[3];
  478. t = (t + c) & BN_MASK2;
  479. c = (t < c);
  480. l = (t + b[3]) & BN_MASK2;
  481. c += (l < t);
  482. r[3] = l;
  483. a += 4;
  484. b += 4;
  485. r += 4;
  486. n -= 4;
  487. }
  488. while (n) {
  489. t = a[0];
  490. t = (t + c) & BN_MASK2;
  491. c = (t < c);
  492. l = (t + b[0]) & BN_MASK2;
  493. c += (l < t);
  494. r[0] = l;
  495. a++;
  496. b++;
  497. r++;
  498. n--;
  499. }
  500. return (BN_ULONG)c;
  501. }
  502. #endif /* !BN_ULLONG */
  503. BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
  504. int n) {
  505. BN_ULONG t1, t2;
  506. int c = 0;
  507. assert(n >= 0);
  508. if (n <= 0) {
  509. return (BN_ULONG)0;
  510. }
  511. while (n & ~3) {
  512. t1 = a[0];
  513. t2 = b[0];
  514. r[0] = (t1 - t2 - c) & BN_MASK2;
  515. if (t1 != t2) {
  516. c = (t1 < t2);
  517. }
  518. t1 = a[1];
  519. t2 = b[1];
  520. r[1] = (t1 - t2 - c) & BN_MASK2;
  521. if (t1 != t2) {
  522. c = (t1 < t2);
  523. }
  524. t1 = a[2];
  525. t2 = b[2];
  526. r[2] = (t1 - t2 - c) & BN_MASK2;
  527. if (t1 != t2) {
  528. c = (t1 < t2);
  529. }
  530. t1 = a[3];
  531. t2 = b[3];
  532. r[3] = (t1 - t2 - c) & BN_MASK2;
  533. if (t1 != t2) {
  534. c = (t1 < t2);
  535. }
  536. a += 4;
  537. b += 4;
  538. r += 4;
  539. n -= 4;
  540. }
  541. while (n) {
  542. t1 = a[0];
  543. t2 = b[0];
  544. r[0] = (t1 - t2 - c) & BN_MASK2;
  545. if (t1 != t2) {
  546. c = (t1 < t2);
  547. }
  548. a++;
  549. b++;
  550. r++;
  551. n--;
  552. }
  553. return c;
  554. }
  555. /* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */
  556. /* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */
  557. /* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */
  558. /* sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) */
  559. #ifdef BN_ULLONG
  560. /* Keep in mind that additions to multiplication result can not overflow,
  561. * because its high half cannot be all-ones. */
  562. #define mul_add_c(a, b, c0, c1, c2) \
  563. do { \
  564. BN_ULONG hi; \
  565. BN_ULLONG t = (BN_ULLONG)(a) * (b); \
  566. t += c0; /* no carry */ \
  567. c0 = (BN_ULONG)Lw(t); \
  568. hi = (BN_ULONG)Hw(t); \
  569. c1 = (c1 + hi) & BN_MASK2; \
  570. if (c1 < hi) \
  571. c2++; \
  572. } while (0)
  573. #define mul_add_c2(a, b, c0, c1, c2) \
  574. do { \
  575. BN_ULONG hi; \
  576. BN_ULLONG t = (BN_ULLONG)(a) * (b); \
  577. BN_ULLONG tt = t + c0; /* no carry */ \
  578. c0 = (BN_ULONG)Lw(tt); \
  579. hi = (BN_ULONG)Hw(tt); \
  580. c1 = (c1 + hi) & BN_MASK2; \
  581. if (c1 < hi) \
  582. c2++; \
  583. t += c0; /* no carry */ \
  584. c0 = (BN_ULONG)Lw(t); \
  585. hi = (BN_ULONG)Hw(t); \
  586. c1 = (c1 + hi) & BN_MASK2; \
  587. if (c1 < hi) \
  588. c2++; \
  589. } while (0)
  590. #define sqr_add_c(a, i, c0, c1, c2) \
  591. do { \
  592. BN_ULONG hi; \
  593. BN_ULLONG t = (BN_ULLONG)a[i] * a[i]; \
  594. t += c0; /* no carry */ \
  595. c0 = (BN_ULONG)Lw(t); \
  596. hi = (BN_ULONG)Hw(t); \
  597. c1 = (c1 + hi) & BN_MASK2; \
  598. if (c1 < hi) \
  599. c2++; \
  600. } while (0)
  601. #define sqr_add_c2(a, i, j, c0, c1, c2) mul_add_c2((a)[i], (a)[j], c0, c1, c2)
  602. #elif defined(BN_UMULT_LOHI)
  603. /* Keep in mind that additions to hi can not overflow, because the high word of
  604. * a multiplication result cannot be all-ones. */
  605. #define mul_add_c(a, b, c0, c1, c2) \
  606. do { \
  607. BN_ULONG ta = (a), tb = (b); \
  608. BN_ULONG lo, hi; \
  609. BN_UMULT_LOHI(lo, hi, ta, tb); \
  610. c0 += lo; \
  611. hi += (c0 < lo) ? 1 : 0; \
  612. c1 += hi; \
  613. c2 += (c1 < hi) ? 1 : 0; \
  614. } while (0)
  615. #define mul_add_c2(a, b, c0, c1, c2) \
  616. do { \
  617. BN_ULONG ta = (a), tb = (b); \
  618. BN_ULONG lo, hi, tt; \
  619. BN_UMULT_LOHI(lo, hi, ta, tb); \
  620. c0 += lo; \
  621. tt = hi + ((c0 < lo) ? 1 : 0); \
  622. c1 += tt; \
  623. c2 += (c1 < tt) ? 1 : 0; \
  624. c0 += lo; \
  625. hi += (c0 < lo) ? 1 : 0; \
  626. c1 += hi; \
  627. c2 += (c1 < hi) ? 1 : 0; \
  628. } while (0)
  629. #define sqr_add_c(a, i, c0, c1, c2) \
  630. do { \
  631. BN_ULONG ta = (a)[i]; \
  632. BN_ULONG lo, hi; \
  633. BN_UMULT_LOHI(lo, hi, ta, ta); \
  634. c0 += lo; \
  635. hi += (c0 < lo) ? 1 : 0; \
  636. c1 += hi; \
  637. c2 += (c1 < hi) ? 1 : 0; \
  638. } while (0)
  639. #define sqr_add_c2(a, i, j, c0, c1, c2) mul_add_c2((a)[i], (a)[j], c0, c1, c2)
  640. #else /* !BN_ULLONG */
  641. /* Keep in mind that additions to hi can not overflow, because
  642. * the high word of a multiplication result cannot be all-ones. */
  643. #define mul_add_c(a, b, c0, c1, c2) \
  644. do { \
  645. BN_ULONG lo = LBITS(a), hi = HBITS(a); \
  646. BN_ULONG bl = LBITS(b), bh = HBITS(b); \
  647. mul64(lo, hi, bl, bh); \
  648. c0 = (c0 + lo) & BN_MASK2; \
  649. if (c0 < lo) \
  650. hi++; \
  651. c1 = (c1 + hi) & BN_MASK2; \
  652. if (c1 < hi) \
  653. c2++; \
  654. } while (0)
  655. #define mul_add_c2(a, b, c0, c1, c2) \
  656. do { \
  657. BN_ULONG tt; \
  658. BN_ULONG lo = LBITS(a), hi = HBITS(a); \
  659. BN_ULONG bl = LBITS(b), bh = HBITS(b); \
  660. mul64(lo, hi, bl, bh); \
  661. tt = hi; \
  662. c0 = (c0 + lo) & BN_MASK2; \
  663. if (c0 < lo) \
  664. tt++; \
  665. c1 = (c1 + tt) & BN_MASK2; \
  666. if (c1 < tt) \
  667. c2++; \
  668. c0 = (c0 + lo) & BN_MASK2; \
  669. if (c0 < lo) \
  670. hi++; \
  671. c1 = (c1 + hi) & BN_MASK2; \
  672. if (c1 < hi) \
  673. c2++; \
  674. } while (0)
  675. #define sqr_add_c(a, i, c0, c1, c2) \
  676. do { \
  677. BN_ULONG lo, hi; \
  678. sqr64(lo, hi, (a)[i]); \
  679. c0 = (c0 + lo) & BN_MASK2; \
  680. if (c0 < lo) \
  681. hi++; \
  682. c1 = (c1 + hi) & BN_MASK2; \
  683. if (c1 < hi) \
  684. c2++; \
  685. } while (0)
  686. #define sqr_add_c2(a, i, j, c0, c1, c2) mul_add_c2((a)[i], (a)[j], c0, c1, c2)
  687. #endif /* !BN_ULLONG */
  688. void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) {
  689. BN_ULONG c1, c2, c3;
  690. c1 = 0;
  691. c2 = 0;
  692. c3 = 0;
  693. mul_add_c(a[0], b[0], c1, c2, c3);
  694. r[0] = c1;
  695. c1 = 0;
  696. mul_add_c(a[0], b[1], c2, c3, c1);
  697. mul_add_c(a[1], b[0], c2, c3, c1);
  698. r[1] = c2;
  699. c2 = 0;
  700. mul_add_c(a[2], b[0], c3, c1, c2);
  701. mul_add_c(a[1], b[1], c3, c1, c2);
  702. mul_add_c(a[0], b[2], c3, c1, c2);
  703. r[2] = c3;
  704. c3 = 0;
  705. mul_add_c(a[0], b[3], c1, c2, c3);
  706. mul_add_c(a[1], b[2], c1, c2, c3);
  707. mul_add_c(a[2], b[1], c1, c2, c3);
  708. mul_add_c(a[3], b[0], c1, c2, c3);
  709. r[3] = c1;
  710. c1 = 0;
  711. mul_add_c(a[4], b[0], c2, c3, c1);
  712. mul_add_c(a[3], b[1], c2, c3, c1);
  713. mul_add_c(a[2], b[2], c2, c3, c1);
  714. mul_add_c(a[1], b[3], c2, c3, c1);
  715. mul_add_c(a[0], b[4], c2, c3, c1);
  716. r[4] = c2;
  717. c2 = 0;
  718. mul_add_c(a[0], b[5], c3, c1, c2);
  719. mul_add_c(a[1], b[4], c3, c1, c2);
  720. mul_add_c(a[2], b[3], c3, c1, c2);
  721. mul_add_c(a[3], b[2], c3, c1, c2);
  722. mul_add_c(a[4], b[1], c3, c1, c2);
  723. mul_add_c(a[5], b[0], c3, c1, c2);
  724. r[5] = c3;
  725. c3 = 0;
  726. mul_add_c(a[6], b[0], c1, c2, c3);
  727. mul_add_c(a[5], b[1], c1, c2, c3);
  728. mul_add_c(a[4], b[2], c1, c2, c3);
  729. mul_add_c(a[3], b[3], c1, c2, c3);
  730. mul_add_c(a[2], b[4], c1, c2, c3);
  731. mul_add_c(a[1], b[5], c1, c2, c3);
  732. mul_add_c(a[0], b[6], c1, c2, c3);
  733. r[6] = c1;
  734. c1 = 0;
  735. mul_add_c(a[0], b[7], c2, c3, c1);
  736. mul_add_c(a[1], b[6], c2, c3, c1);
  737. mul_add_c(a[2], b[5], c2, c3, c1);
  738. mul_add_c(a[3], b[4], c2, c3, c1);
  739. mul_add_c(a[4], b[3], c2, c3, c1);
  740. mul_add_c(a[5], b[2], c2, c3, c1);
  741. mul_add_c(a[6], b[1], c2, c3, c1);
  742. mul_add_c(a[7], b[0], c2, c3, c1);
  743. r[7] = c2;
  744. c2 = 0;
  745. mul_add_c(a[7], b[1], c3, c1, c2);
  746. mul_add_c(a[6], b[2], c3, c1, c2);
  747. mul_add_c(a[5], b[3], c3, c1, c2);
  748. mul_add_c(a[4], b[4], c3, c1, c2);
  749. mul_add_c(a[3], b[5], c3, c1, c2);
  750. mul_add_c(a[2], b[6], c3, c1, c2);
  751. mul_add_c(a[1], b[7], c3, c1, c2);
  752. r[8] = c3;
  753. c3 = 0;
  754. mul_add_c(a[2], b[7], c1, c2, c3);
  755. mul_add_c(a[3], b[6], c1, c2, c3);
  756. mul_add_c(a[4], b[5], c1, c2, c3);
  757. mul_add_c(a[5], b[4], c1, c2, c3);
  758. mul_add_c(a[6], b[3], c1, c2, c3);
  759. mul_add_c(a[7], b[2], c1, c2, c3);
  760. r[9] = c1;
  761. c1 = 0;
  762. mul_add_c(a[7], b[3], c2, c3, c1);
  763. mul_add_c(a[6], b[4], c2, c3, c1);
  764. mul_add_c(a[5], b[5], c2, c3, c1);
  765. mul_add_c(a[4], b[6], c2, c3, c1);
  766. mul_add_c(a[3], b[7], c2, c3, c1);
  767. r[10] = c2;
  768. c2 = 0;
  769. mul_add_c(a[4], b[7], c3, c1, c2);
  770. mul_add_c(a[5], b[6], c3, c1, c2);
  771. mul_add_c(a[6], b[5], c3, c1, c2);
  772. mul_add_c(a[7], b[4], c3, c1, c2);
  773. r[11] = c3;
  774. c3 = 0;
  775. mul_add_c(a[7], b[5], c1, c2, c3);
  776. mul_add_c(a[6], b[6], c1, c2, c3);
  777. mul_add_c(a[5], b[7], c1, c2, c3);
  778. r[12] = c1;
  779. c1 = 0;
  780. mul_add_c(a[6], b[7], c2, c3, c1);
  781. mul_add_c(a[7], b[6], c2, c3, c1);
  782. r[13] = c2;
  783. c2 = 0;
  784. mul_add_c(a[7], b[7], c3, c1, c2);
  785. r[14] = c3;
  786. r[15] = c1;
  787. }
  788. void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) {
  789. BN_ULONG c1, c2, c3;
  790. c1 = 0;
  791. c2 = 0;
  792. c3 = 0;
  793. mul_add_c(a[0], b[0], c1, c2, c3);
  794. r[0] = c1;
  795. c1 = 0;
  796. mul_add_c(a[0], b[1], c2, c3, c1);
  797. mul_add_c(a[1], b[0], c2, c3, c1);
  798. r[1] = c2;
  799. c2 = 0;
  800. mul_add_c(a[2], b[0], c3, c1, c2);
  801. mul_add_c(a[1], b[1], c3, c1, c2);
  802. mul_add_c(a[0], b[2], c3, c1, c2);
  803. r[2] = c3;
  804. c3 = 0;
  805. mul_add_c(a[0], b[3], c1, c2, c3);
  806. mul_add_c(a[1], b[2], c1, c2, c3);
  807. mul_add_c(a[2], b[1], c1, c2, c3);
  808. mul_add_c(a[3], b[0], c1, c2, c3);
  809. r[3] = c1;
  810. c1 = 0;
  811. mul_add_c(a[3], b[1], c2, c3, c1);
  812. mul_add_c(a[2], b[2], c2, c3, c1);
  813. mul_add_c(a[1], b[3], c2, c3, c1);
  814. r[4] = c2;
  815. c2 = 0;
  816. mul_add_c(a[2], b[3], c3, c1, c2);
  817. mul_add_c(a[3], b[2], c3, c1, c2);
  818. r[5] = c3;
  819. c3 = 0;
  820. mul_add_c(a[3], b[3], c1, c2, c3);
  821. r[6] = c1;
  822. r[7] = c2;
  823. }
  824. void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a) {
  825. BN_ULONG c1, c2, c3;
  826. c1 = 0;
  827. c2 = 0;
  828. c3 = 0;
  829. sqr_add_c(a, 0, c1, c2, c3);
  830. r[0] = c1;
  831. c1 = 0;
  832. sqr_add_c2(a, 1, 0, c2, c3, c1);
  833. r[1] = c2;
  834. c2 = 0;
  835. sqr_add_c(a, 1, c3, c1, c2);
  836. sqr_add_c2(a, 2, 0, c3, c1, c2);
  837. r[2] = c3;
  838. c3 = 0;
  839. sqr_add_c2(a, 3, 0, c1, c2, c3);
  840. sqr_add_c2(a, 2, 1, c1, c2, c3);
  841. r[3] = c1;
  842. c1 = 0;
  843. sqr_add_c(a, 2, c2, c3, c1);
  844. sqr_add_c2(a, 3, 1, c2, c3, c1);
  845. sqr_add_c2(a, 4, 0, c2, c3, c1);
  846. r[4] = c2;
  847. c2 = 0;
  848. sqr_add_c2(a, 5, 0, c3, c1, c2);
  849. sqr_add_c2(a, 4, 1, c3, c1, c2);
  850. sqr_add_c2(a, 3, 2, c3, c1, c2);
  851. r[5] = c3;
  852. c3 = 0;
  853. sqr_add_c(a, 3, c1, c2, c3);
  854. sqr_add_c2(a, 4, 2, c1, c2, c3);
  855. sqr_add_c2(a, 5, 1, c1, c2, c3);
  856. sqr_add_c2(a, 6, 0, c1, c2, c3);
  857. r[6] = c1;
  858. c1 = 0;
  859. sqr_add_c2(a, 7, 0, c2, c3, c1);
  860. sqr_add_c2(a, 6, 1, c2, c3, c1);
  861. sqr_add_c2(a, 5, 2, c2, c3, c1);
  862. sqr_add_c2(a, 4, 3, c2, c3, c1);
  863. r[7] = c2;
  864. c2 = 0;
  865. sqr_add_c(a, 4, c3, c1, c2);
  866. sqr_add_c2(a, 5, 3, c3, c1, c2);
  867. sqr_add_c2(a, 6, 2, c3, c1, c2);
  868. sqr_add_c2(a, 7, 1, c3, c1, c2);
  869. r[8] = c3;
  870. c3 = 0;
  871. sqr_add_c2(a, 7, 2, c1, c2, c3);
  872. sqr_add_c2(a, 6, 3, c1, c2, c3);
  873. sqr_add_c2(a, 5, 4, c1, c2, c3);
  874. r[9] = c1;
  875. c1 = 0;
  876. sqr_add_c(a, 5, c2, c3, c1);
  877. sqr_add_c2(a, 6, 4, c2, c3, c1);
  878. sqr_add_c2(a, 7, 3, c2, c3, c1);
  879. r[10] = c2;
  880. c2 = 0;
  881. sqr_add_c2(a, 7, 4, c3, c1, c2);
  882. sqr_add_c2(a, 6, 5, c3, c1, c2);
  883. r[11] = c3;
  884. c3 = 0;
  885. sqr_add_c(a, 6, c1, c2, c3);
  886. sqr_add_c2(a, 7, 5, c1, c2, c3);
  887. r[12] = c1;
  888. c1 = 0;
  889. sqr_add_c2(a, 7, 6, c2, c3, c1);
  890. r[13] = c2;
  891. c2 = 0;
  892. sqr_add_c(a, 7, c3, c1, c2);
  893. r[14] = c3;
  894. r[15] = c1;
  895. }
  896. void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a) {
  897. BN_ULONG c1, c2, c3;
  898. c1 = 0;
  899. c2 = 0;
  900. c3 = 0;
  901. sqr_add_c(a, 0, c1, c2, c3);
  902. r[0] = c1;
  903. c1 = 0;
  904. sqr_add_c2(a, 1, 0, c2, c3, c1);
  905. r[1] = c2;
  906. c2 = 0;
  907. sqr_add_c(a, 1, c3, c1, c2);
  908. sqr_add_c2(a, 2, 0, c3, c1, c2);
  909. r[2] = c3;
  910. c3 = 0;
  911. sqr_add_c2(a, 3, 0, c1, c2, c3);
  912. sqr_add_c2(a, 2, 1, c1, c2, c3);
  913. r[3] = c1;
  914. c1 = 0;
  915. sqr_add_c(a, 2, c2, c3, c1);
  916. sqr_add_c2(a, 3, 1, c2, c3, c1);
  917. r[4] = c2;
  918. c2 = 0;
  919. sqr_add_c2(a, 3, 2, c3, c1, c2);
  920. r[5] = c3;
  921. c3 = 0;
  922. sqr_add_c(a, 3, c1, c2, c3);
  923. r[6] = c1;
  924. r[7] = c2;
  925. }
  926. #endif