Du kan inte välja fler än 25 ämnen Ämnen måste starta med en bokstav eller siffra, kan innehålla bindestreck ('-') och vara max 35 tecken långa.

generic.c 29 KiB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131
  1. /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
  2. * All rights reserved.
  3. *
  4. * This package is an SSL implementation written
  5. * by Eric Young (eay@cryptsoft.com).
  6. * The implementation was written so as to conform with Netscapes SSL.
  7. *
  8. * This library is free for commercial and non-commercial use as long as
  9. * the following conditions are aheared to. The following conditions
  10. * apply to all code found in this distribution, be it the RC4, RSA,
  11. * lhash, DES, etc., code; not just the SSL code. The SSL documentation
  12. * included with this distribution is covered by the same copyright terms
  13. * except that the holder is Tim Hudson (tjh@cryptsoft.com).
  14. *
  15. * Copyright remains Eric Young's, and as such any Copyright notices in
  16. * the code are not to be removed.
  17. * If this package is used in a product, Eric Young should be given attribution
  18. * as the author of the parts of the library used.
  19. * This can be in the form of a textual message at program startup or
  20. * in documentation (online or textual) provided with the package.
  21. *
  22. * Redistribution and use in source and binary forms, with or without
  23. * modification, are permitted provided that the following conditions
  24. * are met:
  25. * 1. Redistributions of source code must retain the copyright
  26. * notice, this list of conditions and the following disclaimer.
  27. * 2. Redistributions in binary form must reproduce the above copyright
  28. * notice, this list of conditions and the following disclaimer in the
  29. * documentation and/or other materials provided with the distribution.
  30. * 3. All advertising materials mentioning features or use of this software
  31. * must display the following acknowledgement:
  32. * "This product includes cryptographic software written by
  33. * Eric Young (eay@cryptsoft.com)"
  34. * The word 'cryptographic' can be left out if the rouines from the library
  35. * being used are not cryptographic related :-).
  36. * 4. If you include any Windows specific code (or a derivative thereof) from
  37. * the apps directory (application code) you must include an acknowledgement:
  38. * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
  39. *
  40. * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
  41. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  42. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  43. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  44. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  45. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  46. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  47. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  48. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  49. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  50. * SUCH DAMAGE.
  51. *
  52. * The licence and distribution terms for any publically available version or
  53. * derivative of this code cannot be changed. i.e. this code cannot simply be
  54. * copied and put under another distribution licence
  55. * [including the GNU Public Licence.] */
  56. #include <openssl/bn.h>
  57. #include <assert.h>
  58. #include "internal.h"
  59. /* Generic implementations of most operations are needed for:
  60. * - Configurations without inline assembly.
  61. * - Architectures other than x86 or x86_64.
  62. * - Windows x84_64; x86_64-gcc.c does not build on MSVC. */
  63. #if defined(OPENSSL_NO_ASM) || \
  64. (!defined(OPENSSL_X86_64) && !defined(OPENSSL_X86)) || \
  65. (defined(OPENSSL_X86_64) && defined(OPENSSL_WINDOWS))
  66. #if defined(OPENSSL_WINDOWS)
  67. #define alloca _alloca
  68. #else
  69. #include <alloca.h>
  70. #endif
  71. #ifdef BN_LLONG
  72. #define mul_add(r, a, w, c) \
  73. { \
  74. BN_ULLONG t; \
  75. t = (BN_ULLONG)w * (a) + (r) + (c); \
  76. (r) = Lw(t); \
  77. (c) = Hw(t); \
  78. }
  79. #define mul(r, a, w, c) \
  80. { \
  81. BN_ULLONG t; \
  82. t = (BN_ULLONG)w * (a) + (c); \
  83. (r) = Lw(t); \
  84. (c) = Hw(t); \
  85. }
  86. #define sqr(r0, r1, a) \
  87. { \
  88. BN_ULLONG t; \
  89. t = (BN_ULLONG)(a) * (a); \
  90. (r0) = Lw(t); \
  91. (r1) = Hw(t); \
  92. }
  93. #elif defined(BN_UMULT_LOHI)
  94. #define mul_add(r, a, w, c) \
  95. { \
  96. BN_ULONG high, low, ret, tmp = (a); \
  97. ret = (r); \
  98. BN_UMULT_LOHI(low, high, w, tmp); \
  99. ret += (c); \
  100. (c) = (ret < (c)) ? 1 : 0; \
  101. (c) += high; \
  102. ret += low; \
  103. (c) += (ret < low) ? 1 : 0; \
  104. (r) = ret; \
  105. }
  106. #define mul(r, a, w, c) \
  107. { \
  108. BN_ULONG high, low, ret, ta = (a); \
  109. BN_UMULT_LOHI(low, high, w, ta); \
  110. ret = low + (c); \
  111. (c) = high; \
  112. (c) += (ret < low) ? 1 : 0; \
  113. (r) = ret; \
  114. }
  115. #define sqr(r0, r1, a) \
  116. { \
  117. BN_ULONG tmp = (a); \
  118. BN_UMULT_LOHI(r0, r1, tmp, tmp); \
  119. }
  120. #else
  121. /*************************************************************
  122. * No long long type
  123. */
  124. #define LBITS(a) ((a) & BN_MASK2l)
  125. #define HBITS(a) (((a) >> BN_BITS4) & BN_MASK2l)
  126. #define L2HBITS(a) (((a) << BN_BITS4) & BN_MASK2)
  127. #define LLBITS(a) ((a) & BN_MASKl)
  128. #define LHBITS(a) (((a) >> BN_BITS2) & BN_MASKl)
  129. #define LL2HBITS(a) ((BN_ULLONG)((a) & BN_MASKl) << BN_BITS2)
  130. #define mul64(l, h, bl, bh) \
  131. { \
  132. BN_ULONG m, m1, lt, ht; \
  133. \
  134. lt = l; \
  135. ht = h; \
  136. m = (bh) * (lt); \
  137. lt = (bl) * (lt); \
  138. m1 = (bl) * (ht); \
  139. ht = (bh) * (ht); \
  140. m = (m + m1) & BN_MASK2; \
  141. if (m < m1) \
  142. ht += L2HBITS((BN_ULONG)1); \
  143. ht += HBITS(m); \
  144. m1 = L2HBITS(m); \
  145. lt = (lt + m1) & BN_MASK2; \
  146. if (lt < m1) \
  147. ht++; \
  148. (l) = lt; \
  149. (h) = ht; \
  150. }
  151. #define sqr64(lo, ho, in) \
  152. { \
  153. BN_ULONG l, h, m; \
  154. \
  155. h = (in); \
  156. l = LBITS(h); \
  157. h = HBITS(h); \
  158. m = (l) * (h); \
  159. l *= l; \
  160. h *= h; \
  161. h += (m & BN_MASK2h1) >> (BN_BITS4 - 1); \
  162. m = (m & BN_MASK2l) << (BN_BITS4 + 1); \
  163. l = (l + m) & BN_MASK2; \
  164. if (l < m) \
  165. h++; \
  166. (lo) = l; \
  167. (ho) = h; \
  168. }
  169. #define mul_add(r, a, bl, bh, c) \
  170. { \
  171. BN_ULONG l, h; \
  172. \
  173. h = (a); \
  174. l = LBITS(h); \
  175. h = HBITS(h); \
  176. mul64(l, h, (bl), (bh)); \
  177. \
  178. /* non-multiply part */ \
  179. l = (l + (c)) & BN_MASK2; \
  180. if (l < (c)) \
  181. h++; \
  182. (c) = (r); \
  183. l = (l + (c)) & BN_MASK2; \
  184. if (l < (c)) \
  185. h++; \
  186. (c) = h & BN_MASK2; \
  187. (r) = l; \
  188. }
  189. #define mul(r, a, bl, bh, c) \
  190. { \
  191. BN_ULONG l, h; \
  192. \
  193. h = (a); \
  194. l = LBITS(h); \
  195. h = HBITS(h); \
  196. mul64(l, h, (bl), (bh)); \
  197. \
  198. /* non-multiply part */ \
  199. l += (c); \
  200. if ((l & BN_MASK2) < (c)) \
  201. h++; \
  202. (c) = h & BN_MASK2; \
  203. (r) = l & BN_MASK2; \
  204. }
  205. #endif /* !BN_LLONG */
  206. #if defined(BN_LLONG) || defined(BN_UMULT_HIGH)
  207. BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num,
  208. BN_ULONG w) {
  209. BN_ULONG c1 = 0;
  210. assert(num >= 0);
  211. if (num <= 0) {
  212. return c1;
  213. }
  214. while (num & ~3) {
  215. mul_add(rp[0], ap[0], w, c1);
  216. mul_add(rp[1], ap[1], w, c1);
  217. mul_add(rp[2], ap[2], w, c1);
  218. mul_add(rp[3], ap[3], w, c1);
  219. ap += 4;
  220. rp += 4;
  221. num -= 4;
  222. }
  223. while (num) {
  224. mul_add(rp[0], ap[0], w, c1);
  225. ap++;
  226. rp++;
  227. num--;
  228. }
  229. return c1;
  230. }
  231. BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w) {
  232. BN_ULONG c1 = 0;
  233. assert(num >= 0);
  234. if (num <= 0) {
  235. return c1;
  236. }
  237. while (num & ~3) {
  238. mul(rp[0], ap[0], w, c1);
  239. mul(rp[1], ap[1], w, c1);
  240. mul(rp[2], ap[2], w, c1);
  241. mul(rp[3], ap[3], w, c1);
  242. ap += 4;
  243. rp += 4;
  244. num -= 4;
  245. }
  246. while (num) {
  247. mul(rp[0], ap[0], w, c1);
  248. ap++;
  249. rp++;
  250. num--;
  251. }
  252. return c1;
  253. }
  254. void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, int n) {
  255. assert(n >= 0);
  256. if (n <= 0) {
  257. return;
  258. }
  259. while (n & ~3) {
  260. sqr(r[0], r[1], a[0]);
  261. sqr(r[2], r[3], a[1]);
  262. sqr(r[4], r[5], a[2]);
  263. sqr(r[6], r[7], a[3]);
  264. a += 4;
  265. r += 8;
  266. n -= 4;
  267. }
  268. while (n) {
  269. sqr(r[0], r[1], a[0]);
  270. a++;
  271. r += 2;
  272. n--;
  273. }
  274. }
  275. #else /* !(defined(BN_LLONG) || defined(BN_UMULT_HIGH)) */
  276. BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num,
  277. BN_ULONG w) {
  278. BN_ULONG c = 0;
  279. BN_ULONG bl, bh;
  280. assert(num >= 0);
  281. if (num <= 0) {
  282. return (BN_ULONG)0;
  283. }
  284. bl = LBITS(w);
  285. bh = HBITS(w);
  286. while (num & ~3) {
  287. mul_add(rp[0], ap[0], bl, bh, c);
  288. mul_add(rp[1], ap[1], bl, bh, c);
  289. mul_add(rp[2], ap[2], bl, bh, c);
  290. mul_add(rp[3], ap[3], bl, bh, c);
  291. ap += 4;
  292. rp += 4;
  293. num -= 4;
  294. }
  295. while (num) {
  296. mul_add(rp[0], ap[0], bl, bh, c);
  297. ap++;
  298. rp++;
  299. num--;
  300. }
  301. return c;
  302. }
  303. BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w) {
  304. BN_ULONG carry = 0;
  305. BN_ULONG bl, bh;
  306. assert(num >= 0);
  307. if (num <= 0) {
  308. return (BN_ULONG)0;
  309. }
  310. bl = LBITS(w);
  311. bh = HBITS(w);
  312. while (num & ~3) {
  313. mul(rp[0], ap[0], bl, bh, carry);
  314. mul(rp[1], ap[1], bl, bh, carry);
  315. mul(rp[2], ap[2], bl, bh, carry);
  316. mul(rp[3], ap[3], bl, bh, carry);
  317. ap += 4;
  318. rp += 4;
  319. num -= 4;
  320. }
  321. while (num) {
  322. mul(rp[0], ap[0], bl, bh, carry);
  323. ap++;
  324. rp++;
  325. num--;
  326. }
  327. return carry;
  328. }
  329. void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, int n) {
  330. assert(n >= 0);
  331. if (n <= 0) {
  332. return;
  333. }
  334. while (n & ~3) {
  335. sqr64(r[0], r[1], a[0]);
  336. sqr64(r[2], r[3], a[1]);
  337. sqr64(r[4], r[5], a[2]);
  338. sqr64(r[6], r[7], a[3]);
  339. a += 4;
  340. r += 8;
  341. n -= 4;
  342. }
  343. while (n) {
  344. sqr64(r[0], r[1], a[0]);
  345. a++;
  346. r += 2;
  347. n--;
  348. }
  349. }
  350. #endif /* !(defined(BN_LLONG) || defined(BN_UMULT_HIGH)) */
  351. #if defined(BN_LLONG)
  352. BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d) {
  353. return (BN_ULONG)(((((BN_ULLONG)h) << BN_BITS2) | l) / (BN_ULLONG)d);
  354. }
  355. #else
  356. /* Divide h,l by d and return the result. */
  357. BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d) {
  358. BN_ULONG dh, dl, q, ret = 0, th, tl, t;
  359. int i, count = 2;
  360. if (d == 0) {
  361. return BN_MASK2;
  362. }
  363. i = BN_num_bits_word(d);
  364. assert((i == BN_BITS2) || (h <= (BN_ULONG)1 << i));
  365. i = BN_BITS2 - i;
  366. if (h >= d) {
  367. h -= d;
  368. }
  369. if (i) {
  370. d <<= i;
  371. h = (h << i) | (l >> (BN_BITS2 - i));
  372. l <<= i;
  373. }
  374. dh = (d & BN_MASK2h) >> BN_BITS4;
  375. dl = (d & BN_MASK2l);
  376. for (;;) {
  377. if ((h >> BN_BITS4) == dh) {
  378. q = BN_MASK2l;
  379. } else {
  380. q = h / dh;
  381. }
  382. th = q * dh;
  383. tl = dl * q;
  384. for (;;) {
  385. t = h - th;
  386. if ((t & BN_MASK2h) ||
  387. ((tl) <= ((t << BN_BITS4) | ((l & BN_MASK2h) >> BN_BITS4)))) {
  388. break;
  389. }
  390. q--;
  391. th -= dh;
  392. tl -= dl;
  393. }
  394. t = (tl >> BN_BITS4);
  395. tl = (tl << BN_BITS4) & BN_MASK2h;
  396. th += t;
  397. if (l < tl) {
  398. th++;
  399. }
  400. l -= tl;
  401. if (h < th) {
  402. h += d;
  403. q--;
  404. }
  405. h -= th;
  406. if (--count == 0) {
  407. break;
  408. }
  409. ret = q << BN_BITS4;
  410. h = ((h << BN_BITS4) | (l >> BN_BITS4)) & BN_MASK2;
  411. l = (l & BN_MASK2l) << BN_BITS4;
  412. }
  413. ret |= q;
  414. return ret;
  415. }
  416. #endif /* !defined(BN_LLONG) */
  417. #ifdef BN_LLONG
  418. BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
  419. int n) {
  420. BN_ULLONG ll = 0;
  421. assert(n >= 0);
  422. if (n <= 0) {
  423. return (BN_ULONG)0;
  424. }
  425. while (n & ~3) {
  426. ll += (BN_ULLONG)a[0] + b[0];
  427. r[0] = (BN_ULONG)ll & BN_MASK2;
  428. ll >>= BN_BITS2;
  429. ll += (BN_ULLONG)a[1] + b[1];
  430. r[1] = (BN_ULONG)ll & BN_MASK2;
  431. ll >>= BN_BITS2;
  432. ll += (BN_ULLONG)a[2] + b[2];
  433. r[2] = (BN_ULONG)ll & BN_MASK2;
  434. ll >>= BN_BITS2;
  435. ll += (BN_ULLONG)a[3] + b[3];
  436. r[3] = (BN_ULONG)ll & BN_MASK2;
  437. ll >>= BN_BITS2;
  438. a += 4;
  439. b += 4;
  440. r += 4;
  441. n -= 4;
  442. }
  443. while (n) {
  444. ll += (BN_ULLONG)a[0] + b[0];
  445. r[0] = (BN_ULONG)ll & BN_MASK2;
  446. ll >>= BN_BITS2;
  447. a++;
  448. b++;
  449. r++;
  450. n--;
  451. }
  452. return (BN_ULONG)ll;
  453. }
  454. #else /* !BN_LLONG */
  455. BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
  456. int n) {
  457. BN_ULONG c, l, t;
  458. assert(n >= 0);
  459. if (n <= 0) {
  460. return (BN_ULONG)0;
  461. }
  462. c = 0;
  463. while (n & ~3) {
  464. t = a[0];
  465. t = (t + c) & BN_MASK2;
  466. c = (t < c);
  467. l = (t + b[0]) & BN_MASK2;
  468. c += (l < t);
  469. r[0] = l;
  470. t = a[1];
  471. t = (t + c) & BN_MASK2;
  472. c = (t < c);
  473. l = (t + b[1]) & BN_MASK2;
  474. c += (l < t);
  475. r[1] = l;
  476. t = a[2];
  477. t = (t + c) & BN_MASK2;
  478. c = (t < c);
  479. l = (t + b[2]) & BN_MASK2;
  480. c += (l < t);
  481. r[2] = l;
  482. t = a[3];
  483. t = (t + c) & BN_MASK2;
  484. c = (t < c);
  485. l = (t + b[3]) & BN_MASK2;
  486. c += (l < t);
  487. r[3] = l;
  488. a += 4;
  489. b += 4;
  490. r += 4;
  491. n -= 4;
  492. }
  493. while (n) {
  494. t = a[0];
  495. t = (t + c) & BN_MASK2;
  496. c = (t < c);
  497. l = (t + b[0]) & BN_MASK2;
  498. c += (l < t);
  499. r[0] = l;
  500. a++;
  501. b++;
  502. r++;
  503. n--;
  504. }
  505. return (BN_ULONG)c;
  506. }
  507. #endif /* !BN_LLONG */
  508. BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
  509. int n) {
  510. BN_ULONG t1, t2;
  511. int c = 0;
  512. assert(n >= 0);
  513. if (n <= 0) {
  514. return (BN_ULONG)0;
  515. }
  516. while (n & ~3) {
  517. t1 = a[0];
  518. t2 = b[0];
  519. r[0] = (t1 - t2 - c) & BN_MASK2;
  520. if (t1 != t2) {
  521. c = (t1 < t2);
  522. }
  523. t1 = a[1];
  524. t2 = b[1];
  525. r[1] = (t1 - t2 - c) & BN_MASK2;
  526. if (t1 != t2) {
  527. c = (t1 < t2);
  528. }
  529. t1 = a[2];
  530. t2 = b[2];
  531. r[2] = (t1 - t2 - c) & BN_MASK2;
  532. if (t1 != t2) {
  533. c = (t1 < t2);
  534. }
  535. t1 = a[3];
  536. t2 = b[3];
  537. r[3] = (t1 - t2 - c) & BN_MASK2;
  538. if (t1 != t2) {
  539. c = (t1 < t2);
  540. }
  541. a += 4;
  542. b += 4;
  543. r += 4;
  544. n -= 4;
  545. }
  546. while (n) {
  547. t1 = a[0];
  548. t2 = b[0];
  549. r[0] = (t1 - t2 - c) & BN_MASK2;
  550. if (t1 != t2) {
  551. c = (t1 < t2);
  552. }
  553. a++;
  554. b++;
  555. r++;
  556. n--;
  557. }
  558. return c;
  559. }
  560. /* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */
  561. /* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */
  562. /* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */
  563. /* sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) */
  564. #ifdef BN_LLONG
  565. /* Keep in mind that additions to multiplication result can not overflow,
  566. * because its high half cannot be all-ones. */
  567. #define mul_add_c(a, b, c0, c1, c2) \
  568. do { \
  569. BN_ULONG hi; \
  570. BN_ULLONG t = (BN_ULLONG)(a) * (b); \
  571. t += c0; /* no carry */ \
  572. c0 = (BN_ULONG)Lw(t); \
  573. hi = (BN_ULONG)Hw(t); \
  574. c1 = (c1 + hi) & BN_MASK2; \
  575. if (c1 < hi) \
  576. c2++; \
  577. } while (0)
  578. #define mul_add_c2(a, b, c0, c1, c2) \
  579. do { \
  580. BN_ULONG hi; \
  581. BN_ULLONG t = (BN_ULLONG)(a) * (b); \
  582. BN_ULLONG tt = t + c0; /* no carry */ \
  583. c0 = (BN_ULONG)Lw(tt); \
  584. hi = (BN_ULONG)Hw(tt); \
  585. c1 = (c1 + hi) & BN_MASK2; \
  586. if (c1 < hi) \
  587. c2++; \
  588. t += c0; /* no carry */ \
  589. c0 = (BN_ULONG)Lw(t); \
  590. hi = (BN_ULONG)Hw(t); \
  591. c1 = (c1 + hi) & BN_MASK2; \
  592. if (c1 < hi) \
  593. c2++; \
  594. } while (0)
  595. #define sqr_add_c(a, i, c0, c1, c2) \
  596. do { \
  597. BN_ULONG hi; \
  598. BN_ULLONG t = (BN_ULLONG)a[i] * a[i]; \
  599. t += c0; /* no carry */ \
  600. c0 = (BN_ULONG)Lw(t); \
  601. hi = (BN_ULONG)Hw(t); \
  602. c1 = (c1 + hi) & BN_MASK2; \
  603. if (c1 < hi) \
  604. c2++; \
  605. } while (0)
  606. #define sqr_add_c2(a, i, j, c0, c1, c2) mul_add_c2((a)[i], (a)[j], c0, c1, c2)
  607. #elif defined(BN_UMULT_LOHI)
  608. /* Keep in mind that additions to hi can not overflow, because the high word of
  609. * a multiplication result cannot be all-ones. */
  610. #define mul_add_c(a, b, c0, c1, c2) \
  611. do { \
  612. BN_ULONG ta = (a), tb = (b); \
  613. BN_ULONG lo, hi; \
  614. BN_UMULT_LOHI(lo, hi, ta, tb); \
  615. c0 += lo; \
  616. hi += (c0 < lo) ? 1 : 0; \
  617. c1 += hi; \
  618. c2 += (c1 < hi) ? 1 : 0; \
  619. } while (0)
  620. #define mul_add_c2(a, b, c0, c1, c2) \
  621. do { \
  622. BN_ULONG ta = (a), tb = (b); \
  623. BN_ULONG lo, hi, tt; \
  624. BN_UMULT_LOHI(lo, hi, ta, tb); \
  625. c0 += lo; \
  626. tt = hi + ((c0 < lo) ? 1 : 0); \
  627. c1 += tt; \
  628. c2 += (c1 < tt) ? 1 : 0; \
  629. c0 += lo; \
  630. hi += (c0 < lo) ? 1 : 0; \
  631. c1 += hi; \
  632. c2 += (c1 < hi) ? 1 : 0; \
  633. } while (0)
  634. #define sqr_add_c(a, i, c0, c1, c2) \
  635. do { \
  636. BN_ULONG ta = (a)[i]; \
  637. BN_ULONG lo, hi; \
  638. BN_UMULT_LOHI(lo, hi, ta, ta); \
  639. c0 += lo; \
  640. hi += (c0 < lo) ? 1 : 0; \
  641. c1 += hi; \
  642. c2 += (c1 < hi) ? 1 : 0; \
  643. } while (0)
  644. #define sqr_add_c2(a, i, j, c0, c1, c2) mul_add_c2((a)[i], (a)[j], c0, c1, c2)
  645. #else /* !BN_LLONG */
  646. /* Keep in mind that additions to hi can not overflow, because
  647. * the high word of a multiplication result cannot be all-ones. */
  648. #define mul_add_c(a, b, c0, c1, c2) \
  649. do { \
  650. BN_ULONG lo = LBITS(a), hi = HBITS(a); \
  651. BN_ULONG bl = LBITS(b), bh = HBITS(b); \
  652. mul64(lo, hi, bl, bh); \
  653. c0 = (c0 + lo) & BN_MASK2; \
  654. if (c0 < lo) \
  655. hi++; \
  656. c1 = (c1 + hi) & BN_MASK2; \
  657. if (c1 < hi) \
  658. c2++; \
  659. } while (0)
  660. #define mul_add_c2(a, b, c0, c1, c2) \
  661. do { \
  662. BN_ULONG tt; \
  663. BN_ULONG lo = LBITS(a), hi = HBITS(a); \
  664. BN_ULONG bl = LBITS(b), bh = HBITS(b); \
  665. mul64(lo, hi, bl, bh); \
  666. tt = hi; \
  667. c0 = (c0 + lo) & BN_MASK2; \
  668. if (c0 < lo) \
  669. tt++; \
  670. c1 = (c1 + tt) & BN_MASK2; \
  671. if (c1 < tt) \
  672. c2++; \
  673. c0 = (c0 + lo) & BN_MASK2; \
  674. if (c0 < lo) \
  675. hi++; \
  676. c1 = (c1 + hi) & BN_MASK2; \
  677. if (c1 < hi) \
  678. c2++; \
  679. } while (0)
  680. #define sqr_add_c(a, i, c0, c1, c2) \
  681. do { \
  682. BN_ULONG lo, hi; \
  683. sqr64(lo, hi, (a)[i]); \
  684. c0 = (c0 + lo) & BN_MASK2; \
  685. if (c0 < lo) \
  686. hi++; \
  687. c1 = (c1 + hi) & BN_MASK2; \
  688. if (c1 < hi) \
  689. c2++; \
  690. } while (0)
  691. #define sqr_add_c2(a, i, j, c0, c1, c2) mul_add_c2((a)[i], (a)[j], c0, c1, c2)
  692. #endif /* !BN_LLONG */
  693. void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) {
  694. BN_ULONG c1, c2, c3;
  695. c1 = 0;
  696. c2 = 0;
  697. c3 = 0;
  698. mul_add_c(a[0], b[0], c1, c2, c3);
  699. r[0] = c1;
  700. c1 = 0;
  701. mul_add_c(a[0], b[1], c2, c3, c1);
  702. mul_add_c(a[1], b[0], c2, c3, c1);
  703. r[1] = c2;
  704. c2 = 0;
  705. mul_add_c(a[2], b[0], c3, c1, c2);
  706. mul_add_c(a[1], b[1], c3, c1, c2);
  707. mul_add_c(a[0], b[2], c3, c1, c2);
  708. r[2] = c3;
  709. c3 = 0;
  710. mul_add_c(a[0], b[3], c1, c2, c3);
  711. mul_add_c(a[1], b[2], c1, c2, c3);
  712. mul_add_c(a[2], b[1], c1, c2, c3);
  713. mul_add_c(a[3], b[0], c1, c2, c3);
  714. r[3] = c1;
  715. c1 = 0;
  716. mul_add_c(a[4], b[0], c2, c3, c1);
  717. mul_add_c(a[3], b[1], c2, c3, c1);
  718. mul_add_c(a[2], b[2], c2, c3, c1);
  719. mul_add_c(a[1], b[3], c2, c3, c1);
  720. mul_add_c(a[0], b[4], c2, c3, c1);
  721. r[4] = c2;
  722. c2 = 0;
  723. mul_add_c(a[0], b[5], c3, c1, c2);
  724. mul_add_c(a[1], b[4], c3, c1, c2);
  725. mul_add_c(a[2], b[3], c3, c1, c2);
  726. mul_add_c(a[3], b[2], c3, c1, c2);
  727. mul_add_c(a[4], b[1], c3, c1, c2);
  728. mul_add_c(a[5], b[0], c3, c1, c2);
  729. r[5] = c3;
  730. c3 = 0;
  731. mul_add_c(a[6], b[0], c1, c2, c3);
  732. mul_add_c(a[5], b[1], c1, c2, c3);
  733. mul_add_c(a[4], b[2], c1, c2, c3);
  734. mul_add_c(a[3], b[3], c1, c2, c3);
  735. mul_add_c(a[2], b[4], c1, c2, c3);
  736. mul_add_c(a[1], b[5], c1, c2, c3);
  737. mul_add_c(a[0], b[6], c1, c2, c3);
  738. r[6] = c1;
  739. c1 = 0;
  740. mul_add_c(a[0], b[7], c2, c3, c1);
  741. mul_add_c(a[1], b[6], c2, c3, c1);
  742. mul_add_c(a[2], b[5], c2, c3, c1);
  743. mul_add_c(a[3], b[4], c2, c3, c1);
  744. mul_add_c(a[4], b[3], c2, c3, c1);
  745. mul_add_c(a[5], b[2], c2, c3, c1);
  746. mul_add_c(a[6], b[1], c2, c3, c1);
  747. mul_add_c(a[7], b[0], c2, c3, c1);
  748. r[7] = c2;
  749. c2 = 0;
  750. mul_add_c(a[7], b[1], c3, c1, c2);
  751. mul_add_c(a[6], b[2], c3, c1, c2);
  752. mul_add_c(a[5], b[3], c3, c1, c2);
  753. mul_add_c(a[4], b[4], c3, c1, c2);
  754. mul_add_c(a[3], b[5], c3, c1, c2);
  755. mul_add_c(a[2], b[6], c3, c1, c2);
  756. mul_add_c(a[1], b[7], c3, c1, c2);
  757. r[8] = c3;
  758. c3 = 0;
  759. mul_add_c(a[2], b[7], c1, c2, c3);
  760. mul_add_c(a[3], b[6], c1, c2, c3);
  761. mul_add_c(a[4], b[5], c1, c2, c3);
  762. mul_add_c(a[5], b[4], c1, c2, c3);
  763. mul_add_c(a[6], b[3], c1, c2, c3);
  764. mul_add_c(a[7], b[2], c1, c2, c3);
  765. r[9] = c1;
  766. c1 = 0;
  767. mul_add_c(a[7], b[3], c2, c3, c1);
  768. mul_add_c(a[6], b[4], c2, c3, c1);
  769. mul_add_c(a[5], b[5], c2, c3, c1);
  770. mul_add_c(a[4], b[6], c2, c3, c1);
  771. mul_add_c(a[3], b[7], c2, c3, c1);
  772. r[10] = c2;
  773. c2 = 0;
  774. mul_add_c(a[4], b[7], c3, c1, c2);
  775. mul_add_c(a[5], b[6], c3, c1, c2);
  776. mul_add_c(a[6], b[5], c3, c1, c2);
  777. mul_add_c(a[7], b[4], c3, c1, c2);
  778. r[11] = c3;
  779. c3 = 0;
  780. mul_add_c(a[7], b[5], c1, c2, c3);
  781. mul_add_c(a[6], b[6], c1, c2, c3);
  782. mul_add_c(a[5], b[7], c1, c2, c3);
  783. r[12] = c1;
  784. c1 = 0;
  785. mul_add_c(a[6], b[7], c2, c3, c1);
  786. mul_add_c(a[7], b[6], c2, c3, c1);
  787. r[13] = c2;
  788. c2 = 0;
  789. mul_add_c(a[7], b[7], c3, c1, c2);
  790. r[14] = c3;
  791. r[15] = c1;
  792. }
  793. void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) {
  794. BN_ULONG c1, c2, c3;
  795. c1 = 0;
  796. c2 = 0;
  797. c3 = 0;
  798. mul_add_c(a[0], b[0], c1, c2, c3);
  799. r[0] = c1;
  800. c1 = 0;
  801. mul_add_c(a[0], b[1], c2, c3, c1);
  802. mul_add_c(a[1], b[0], c2, c3, c1);
  803. r[1] = c2;
  804. c2 = 0;
  805. mul_add_c(a[2], b[0], c3, c1, c2);
  806. mul_add_c(a[1], b[1], c3, c1, c2);
  807. mul_add_c(a[0], b[2], c3, c1, c2);
  808. r[2] = c3;
  809. c3 = 0;
  810. mul_add_c(a[0], b[3], c1, c2, c3);
  811. mul_add_c(a[1], b[2], c1, c2, c3);
  812. mul_add_c(a[2], b[1], c1, c2, c3);
  813. mul_add_c(a[3], b[0], c1, c2, c3);
  814. r[3] = c1;
  815. c1 = 0;
  816. mul_add_c(a[3], b[1], c2, c3, c1);
  817. mul_add_c(a[2], b[2], c2, c3, c1);
  818. mul_add_c(a[1], b[3], c2, c3, c1);
  819. r[4] = c2;
  820. c2 = 0;
  821. mul_add_c(a[2], b[3], c3, c1, c2);
  822. mul_add_c(a[3], b[2], c3, c1, c2);
  823. r[5] = c3;
  824. c3 = 0;
  825. mul_add_c(a[3], b[3], c1, c2, c3);
  826. r[6] = c1;
  827. r[7] = c2;
  828. }
  829. void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a) {
  830. BN_ULONG c1, c2, c3;
  831. c1 = 0;
  832. c2 = 0;
  833. c3 = 0;
  834. sqr_add_c(a, 0, c1, c2, c3);
  835. r[0] = c1;
  836. c1 = 0;
  837. sqr_add_c2(a, 1, 0, c2, c3, c1);
  838. r[1] = c2;
  839. c2 = 0;
  840. sqr_add_c(a, 1, c3, c1, c2);
  841. sqr_add_c2(a, 2, 0, c3, c1, c2);
  842. r[2] = c3;
  843. c3 = 0;
  844. sqr_add_c2(a, 3, 0, c1, c2, c3);
  845. sqr_add_c2(a, 2, 1, c1, c2, c3);
  846. r[3] = c1;
  847. c1 = 0;
  848. sqr_add_c(a, 2, c2, c3, c1);
  849. sqr_add_c2(a, 3, 1, c2, c3, c1);
  850. sqr_add_c2(a, 4, 0, c2, c3, c1);
  851. r[4] = c2;
  852. c2 = 0;
  853. sqr_add_c2(a, 5, 0, c3, c1, c2);
  854. sqr_add_c2(a, 4, 1, c3, c1, c2);
  855. sqr_add_c2(a, 3, 2, c3, c1, c2);
  856. r[5] = c3;
  857. c3 = 0;
  858. sqr_add_c(a, 3, c1, c2, c3);
  859. sqr_add_c2(a, 4, 2, c1, c2, c3);
  860. sqr_add_c2(a, 5, 1, c1, c2, c3);
  861. sqr_add_c2(a, 6, 0, c1, c2, c3);
  862. r[6] = c1;
  863. c1 = 0;
  864. sqr_add_c2(a, 7, 0, c2, c3, c1);
  865. sqr_add_c2(a, 6, 1, c2, c3, c1);
  866. sqr_add_c2(a, 5, 2, c2, c3, c1);
  867. sqr_add_c2(a, 4, 3, c2, c3, c1);
  868. r[7] = c2;
  869. c2 = 0;
  870. sqr_add_c(a, 4, c3, c1, c2);
  871. sqr_add_c2(a, 5, 3, c3, c1, c2);
  872. sqr_add_c2(a, 6, 2, c3, c1, c2);
  873. sqr_add_c2(a, 7, 1, c3, c1, c2);
  874. r[8] = c3;
  875. c3 = 0;
  876. sqr_add_c2(a, 7, 2, c1, c2, c3);
  877. sqr_add_c2(a, 6, 3, c1, c2, c3);
  878. sqr_add_c2(a, 5, 4, c1, c2, c3);
  879. r[9] = c1;
  880. c1 = 0;
  881. sqr_add_c(a, 5, c2, c3, c1);
  882. sqr_add_c2(a, 6, 4, c2, c3, c1);
  883. sqr_add_c2(a, 7, 3, c2, c3, c1);
  884. r[10] = c2;
  885. c2 = 0;
  886. sqr_add_c2(a, 7, 4, c3, c1, c2);
  887. sqr_add_c2(a, 6, 5, c3, c1, c2);
  888. r[11] = c3;
  889. c3 = 0;
  890. sqr_add_c(a, 6, c1, c2, c3);
  891. sqr_add_c2(a, 7, 5, c1, c2, c3);
  892. r[12] = c1;
  893. c1 = 0;
  894. sqr_add_c2(a, 7, 6, c2, c3, c1);
  895. r[13] = c2;
  896. c2 = 0;
  897. sqr_add_c(a, 7, c3, c1, c2);
  898. r[14] = c3;
  899. r[15] = c1;
  900. }
  901. void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a) {
  902. BN_ULONG c1, c2, c3;
  903. c1 = 0;
  904. c2 = 0;
  905. c3 = 0;
  906. sqr_add_c(a, 0, c1, c2, c3);
  907. r[0] = c1;
  908. c1 = 0;
  909. sqr_add_c2(a, 1, 0, c2, c3, c1);
  910. r[1] = c2;
  911. c2 = 0;
  912. sqr_add_c(a, 1, c3, c1, c2);
  913. sqr_add_c2(a, 2, 0, c3, c1, c2);
  914. r[2] = c3;
  915. c3 = 0;
  916. sqr_add_c2(a, 3, 0, c1, c2, c3);
  917. sqr_add_c2(a, 2, 1, c1, c2, c3);
  918. r[3] = c1;
  919. c1 = 0;
  920. sqr_add_c(a, 2, c2, c3, c1);
  921. sqr_add_c2(a, 3, 1, c2, c3, c1);
  922. r[4] = c2;
  923. c2 = 0;
  924. sqr_add_c2(a, 3, 2, c3, c1, c2);
  925. r[5] = c3;
  926. c3 = 0;
  927. sqr_add_c(a, 3, c1, c2, c3);
  928. r[6] = c1;
  929. r[7] = c2;
  930. }
  931. #if defined(OPENSSL_NO_ASM) || (!defined(OPENSSL_ARM) && !defined(OPENSSL_X86_64))
  932. /* This is essentially reference implementation, which may or may not
  933. * result in performance improvement. E.g. on IA-32 this routine was
  934. * observed to give 40% faster rsa1024 private key operations and 10%
  935. * faster rsa4096 ones, while on AMD64 it improves rsa1024 sign only
  936. * by 10% and *worsens* rsa4096 sign by 15%. Once again, it's a
  937. * reference implementation, one to be used as starting point for
  938. * platform-specific assembler. Mentioned numbers apply to compiler
  939. * generated code compiled with and without -DOPENSSL_BN_ASM_MONT and
  940. * can vary not only from platform to platform, but even for compiler
  941. * versions. Assembler vs. assembler improvement coefficients can
  942. * [and are known to] differ and are to be documented elsewhere. */
  943. int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
  944. const BN_ULONG *np, const BN_ULONG *n0p, int num) {
  945. BN_ULONG c0, c1, ml, *tp, n0;
  946. #ifdef mul64
  947. BN_ULONG mh;
  948. #endif
  949. volatile BN_ULONG *vp;
  950. int i = 0, j;
  951. #if 0 /* template for platform-specific implementation */
  952. if (ap==bp) return bn_sqr_mont(rp,ap,np,n0p,num);
  953. #endif
  954. vp = tp = alloca((num + 2) * sizeof(BN_ULONG));
  955. n0 = *n0p;
  956. c0 = 0;
  957. ml = bp[0];
  958. #ifdef mul64
  959. mh = HBITS(ml);
  960. ml = LBITS(ml);
  961. for (j = 0; j < num; ++j) {
  962. mul(tp[j], ap[j], ml, mh, c0);
  963. }
  964. #else
  965. for (j = 0; j < num; ++j) {
  966. mul(tp[j], ap[j], ml, c0);
  967. }
  968. #endif
  969. tp[num] = c0;
  970. tp[num + 1] = 0;
  971. goto enter;
  972. for (; i < num; i++) {
  973. c0 = 0;
  974. ml = bp[i];
  975. #ifdef mul64
  976. mh = HBITS(ml);
  977. ml = LBITS(ml);
  978. for (j = 0; j < num; ++j) {
  979. mul_add(tp[j], ap[j], ml, mh, c0);
  980. }
  981. #else
  982. for (j = 0; j < num; ++j) {
  983. mul_add(tp[j], ap[j], ml, c0);
  984. }
  985. #endif
  986. c1 = (tp[num] + c0) & BN_MASK2;
  987. tp[num] = c1;
  988. tp[num + 1] = (c1 < c0 ? 1 : 0);
  989. enter:
  990. c1 = tp[0];
  991. ml = (c1 * n0) & BN_MASK2;
  992. c0 = 0;
  993. #ifdef mul64
  994. mh = HBITS(ml);
  995. ml = LBITS(ml);
  996. mul_add(c1, np[0], ml, mh, c0);
  997. #else
  998. mul_add(c1, ml, np[0], c0);
  999. #endif
  1000. for (j = 1; j < num; j++) {
  1001. c1 = tp[j];
  1002. #ifdef mul64
  1003. mul_add(c1, np[j], ml, mh, c0);
  1004. #else
  1005. mul_add(c1, ml, np[j], c0);
  1006. #endif
  1007. tp[j - 1] = c1 & BN_MASK2;
  1008. }
  1009. c1 = (tp[num] + c0) & BN_MASK2;
  1010. tp[num - 1] = c1;
  1011. tp[num] = tp[num + 1] + (c1 < c0 ? 1 : 0);
  1012. }
  1013. if (tp[num] != 0 || tp[num - 1] >= np[num - 1]) {
  1014. c0 = bn_sub_words(rp, tp, np, num);
  1015. if (tp[num] != 0 || c0 == 0) {
  1016. for (i = 0; i < num + 2; i++) {
  1017. vp[i] = 0;
  1018. }
  1019. return 1;
  1020. }
  1021. }
  1022. for (i = 0; i < num; i++) {
  1023. rp[i] = tp[i], vp[i] = 0;
  1024. }
  1025. vp[num] = 0;
  1026. vp[num + 1] = 0;
  1027. return 1;
  1028. }
  1029. #endif
  1030. #endif