Non puoi selezionare più di 25 argomenti Gli argomenti devono iniziare con una lettera o un numero, possono includere trattini ('-') e possono essere lunghi fino a 35 caratteri.
 
 
 
 
 
 

2236 righe
51 KiB

  1. #!/usr/bin/env perl
  2. #
  3. # ====================================================================
  4. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  5. # project. The module is, however, dual licensed under OpenSSL and
  6. # CRYPTOGAMS licenses depending on where you obtain it. For further
  7. # details see http://www.openssl.org/~appro/cryptogams/.
  8. # ====================================================================
  9. #
  10. # This module implements Poly1305 hash for x86_64.
  11. #
  12. # March 2015
  13. #
  14. # Numbers are cycles per processed byte with poly1305_blocks alone,
  15. # measured with rdtsc at fixed clock frequency.
  16. #
  17. # IALU/gcc-4.8(*) AVX(**) AVX2
  18. # P4 4.90/+120% -
  19. # Core 2 2.39/+90% -
  20. # Westmere 1.86/+120% -
  21. # Sandy Bridge 1.39/+140% 1.10
  22. # Haswell 1.10/+175% 1.11 0.65
  23. # Skylake 1.12/+120% 0.96 0.51
  24. # Silvermont 2.83/+95% -
  25. # VIA Nano 1.82/+150% -
  26. # Sledgehammer 1.38/+160% -
  27. # Bulldozer 2.21/+130% 0.97
  28. #
  29. # (*) improvement coefficients relative to clang are more modest and
  30. # are ~50% on most processors, in both cases we are comparing to
  31. # __int128 code;
  32. # (**) SSE2 implementation was attempted, but among non-AVX processors
  33. # it was faster than integer-only code only on older Intel P4 and
  34. # Core processors, 50-30%, less newer processor is, but slower on
  35. # contemporary ones, for example almost 2x slower on Atom, and as
  36. # former are naturally disappearing, SSE2 is deemed unnecessary;
  37. $flavour = shift;
  38. $output = shift;
  39. if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
  40. $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
  41. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  42. ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
  43. ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
  44. die "can't locate x86_64-xlate.pl";
  45. $avx = 2;
  46. open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
  47. *STDOUT=*OUT;
  48. my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx");
  49. my ($mac,$nonce)=($inp,$len); # *_emit arguments
  50. my ($d1,$d2,$d3, $r0,$r1,$s1)=map("%r$_",(8..13));
  51. my ($h0,$h1,$h2)=("%r14","%rbx","%rbp");
  52. sub poly1305_iteration {
  53. # input: copy of $r1 in %rax, $h0-$h2, $r0-$r1
  54. # output: $h0-$h2 *= $r0-$r1
  55. $code.=<<___;
  56. mulq $h0 # h0*r1
  57. mov %rax,$d2
  58. mov $r0,%rax
  59. mov %rdx,$d3
  60. mulq $h0 # h0*r0
  61. mov %rax,$h0 # future $h0
  62. mov $r0,%rax
  63. mov %rdx,$d1
  64. mulq $h1 # h1*r0
  65. add %rax,$d2
  66. mov $s1,%rax
  67. adc %rdx,$d3
  68. mulq $h1 # h1*s1
  69. mov $h2,$h1 # borrow $h1
  70. add %rax,$h0
  71. adc %rdx,$d1
  72. imulq $s1,$h1 # h2*s1
  73. add $h1,$d2
  74. mov $d1,$h1
  75. adc \$0,$d3
  76. imulq $r0,$h2 # h2*r0
  77. add $d2,$h1
  78. mov \$-4,%rax # mask value
  79. adc $h2,$d3
  80. and $d3,%rax # last reduction step
  81. mov $d3,$h2
  82. shr \$2,$d3
  83. and \$3,$h2
  84. add $d3,%rax
  85. add %rax,$h0
  86. adc \$0,$h1
  87. ___
  88. }
  89. ########################################################################
  90. # Layout of opaque area is following.
  91. #
  92. # unsigned __int64 h[3]; # current hash value base 2^64
  93. # unsigned __int64 r[2]; # key value base 2^64
  94. $code.=<<___;
  95. .text
  96. .extern OPENSSL_ia32cap_P
  97. .globl poly1305_init
  98. .globl poly1305_blocks
  99. .globl poly1305_emit
  100. .type poly1305_init,\@function,3
  101. .align 32
  102. poly1305_init:
  103. xor %rax,%rax
  104. mov %rax,0($ctx) # initialize hash value
  105. mov %rax,8($ctx)
  106. mov %rax,16($ctx)
  107. cmp \$0,$inp
  108. je .Lno_key
  109. lea poly1305_blocks(%rip),%r10
  110. lea poly1305_emit(%rip),%r11
  111. ___
  112. $code.=<<___ if ($avx);
  113. mov OPENSSL_ia32cap_P+4(%rip),%r9
  114. lea poly1305_blocks_avx(%rip),%rax
  115. lea poly1305_emit_avx(%rip),%rcx
  116. bt \$`60-32`,%r9 # AVX?
  117. cmovc %rax,%r10
  118. cmovc %rcx,%r11
  119. ___
  120. $code.=<<___ if ($avx>1);
  121. lea poly1305_blocks_avx2(%rip),%rax
  122. bt \$`5+32`,%r9 # AVX2?
  123. cmovc %rax,%r10
  124. ___
  125. $code.=<<___;
  126. mov \$0x0ffffffc0fffffff,%rax
  127. mov \$0x0ffffffc0ffffffc,%rcx
  128. and 0($inp),%rax
  129. and 8($inp),%rcx
  130. mov %rax,24($ctx)
  131. mov %rcx,32($ctx)
  132. ___
  133. $code.=<<___ if ($flavour !~ /elf32/);
  134. mov %r10,0(%rdx)
  135. mov %r11,8(%rdx)
  136. ___
  137. $code.=<<___ if ($flavour =~ /elf32/);
  138. mov %r10d,0(%rdx)
  139. mov %r11d,4(%rdx)
  140. ___
  141. $code.=<<___;
  142. mov \$1,%eax
  143. .Lno_key:
  144. ret
  145. .size poly1305_init,.-poly1305_init
  146. .type poly1305_blocks,\@function,4
  147. .align 32
  148. poly1305_blocks:
  149. .Lblocks:
  150. sub \$16,$len # too short?
  151. jc .Lno_data
  152. push %rbx
  153. push %rbp
  154. push %r12
  155. push %r13
  156. push %r14
  157. push %r15
  158. .Lblocks_body:
  159. mov $len,%r15 # reassign $len
  160. mov 24($ctx),$r0 # load r
  161. mov 32($ctx),$s1
  162. mov 0($ctx),$h0 # load hash value
  163. mov 8($ctx),$h1
  164. mov 16($ctx),$h2
  165. mov $s1,$r1
  166. shr \$2,$s1
  167. mov $r1,%rax
  168. add $r1,$s1 # s1 = r1 + (r1 >> 2)
  169. jmp .Loop
  170. .align 32
  171. .Loop:
  172. add 0($inp),$h0 # accumulate input
  173. adc 8($inp),$h1
  174. lea 16($inp),$inp
  175. adc $padbit,$h2
  176. ___
  177. &poly1305_iteration();
  178. $code.=<<___;
  179. mov $r1,%rax
  180. sub \$16,%r15 # len-=16
  181. jnc .Loop
  182. mov $h0,0($ctx) # store hash value
  183. mov $h1,8($ctx)
  184. mov $h2,16($ctx)
  185. mov 0(%rsp),%r15
  186. mov 8(%rsp),%r14
  187. mov 16(%rsp),%r13
  188. mov 24(%rsp),%r12
  189. mov 32(%rsp),%rbp
  190. mov 40(%rsp),%rbx
  191. lea 48(%rsp),%rsp
  192. .Lno_data:
  193. .Lblocks_epilogue:
  194. ret
  195. .size poly1305_blocks,.-poly1305_blocks
  196. .type poly1305_emit,\@function,3
  197. .align 32
  198. poly1305_emit:
  199. .Lemit:
  200. mov 0($ctx),%r8 # load hash value
  201. mov 8($ctx),%r9
  202. mov 16($ctx),%r10
  203. mov %r8,%rax
  204. add \$5,%r8 # compare to modulus
  205. mov %r9,%rcx
  206. adc \$0,%r9
  207. adc \$0,%r10
  208. shr \$2,%r10 # did 130-bit value overfow?
  209. cmovnz %r8,%rax
  210. cmovnz %r9,%rcx
  211. add 0($nonce),%rax # accumulate nonce
  212. adc 8($nonce),%rcx
  213. mov %rax,0($mac) # write result
  214. mov %rcx,8($mac)
  215. ret
  216. .size poly1305_emit,.-poly1305_emit
  217. ___
  218. if ($avx) {
  219. ########################################################################
  220. # Layout of opaque area is following.
  221. #
  222. # unsigned __int32 h[5]; # current hash value base 2^26
  223. # unsigned __int32 is_base2_26;
  224. # unsigned __int64 r[2]; # key value base 2^64
  225. # unsigned __int64 pad;
  226. # struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9];
  227. #
  228. # where r^n are base 2^26 digits of degrees of multiplier key. There are
  229. # 5 digits, but last four are interleaved with multiples of 5, totalling
  230. # in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4.
  231. my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
  232. map("%xmm$_",(0..15));
  233. $code.=<<___;
  234. .type __poly1305_block,\@abi-omnipotent
  235. .align 32
  236. __poly1305_block:
  237. ___
  238. &poly1305_iteration();
  239. $code.=<<___;
  240. ret
  241. .size __poly1305_block,.-__poly1305_block
  242. .type __poly1305_init_avx,\@abi-omnipotent
  243. .align 32
  244. __poly1305_init_avx:
  245. mov $r0,$h0
  246. mov $r1,$h1
  247. xor $h2,$h2
  248. lea 48+64($ctx),$ctx # size optimization
  249. mov $r1,%rax
  250. call __poly1305_block # r^2
  251. mov \$0x3ffffff,%eax # save interleaved r^2 and r base 2^26
  252. mov \$0x3ffffff,%edx
  253. mov $h0,$d1
  254. and $h0#d,%eax
  255. mov $r0,$d2
  256. and $r0#d,%edx
  257. mov %eax,`16*0+0-64`($ctx)
  258. shr \$26,$d1
  259. mov %edx,`16*0+4-64`($ctx)
  260. shr \$26,$d2
  261. mov \$0x3ffffff,%eax
  262. mov \$0x3ffffff,%edx
  263. and $d1#d,%eax
  264. and $d2#d,%edx
  265. mov %eax,`16*1+0-64`($ctx)
  266. lea (%rax,%rax,4),%eax # *5
  267. mov %edx,`16*1+4-64`($ctx)
  268. lea (%rdx,%rdx,4),%edx # *5
  269. mov %eax,`16*2+0-64`($ctx)
  270. shr \$26,$d1
  271. mov %edx,`16*2+4-64`($ctx)
  272. shr \$26,$d2
  273. mov $h1,%rax
  274. mov $r1,%rdx
  275. shl \$12,%rax
  276. shl \$12,%rdx
  277. or $d1,%rax
  278. or $d2,%rdx
  279. and \$0x3ffffff,%eax
  280. and \$0x3ffffff,%edx
  281. mov %eax,`16*3+0-64`($ctx)
  282. lea (%rax,%rax,4),%eax # *5
  283. mov %edx,`16*3+4-64`($ctx)
  284. lea (%rdx,%rdx,4),%edx # *5
  285. mov %eax,`16*4+0-64`($ctx)
  286. mov $h1,$d1
  287. mov %edx,`16*4+4-64`($ctx)
  288. mov $r1,$d2
  289. mov \$0x3ffffff,%eax
  290. mov \$0x3ffffff,%edx
  291. shr \$14,$d1
  292. shr \$14,$d2
  293. and $d1#d,%eax
  294. and $d2#d,%edx
  295. mov %eax,`16*5+0-64`($ctx)
  296. lea (%rax,%rax,4),%eax # *5
  297. mov %edx,`16*5+4-64`($ctx)
  298. lea (%rdx,%rdx,4),%edx # *5
  299. mov %eax,`16*6+0-64`($ctx)
  300. shr \$26,$d1
  301. mov %edx,`16*6+4-64`($ctx)
  302. shr \$26,$d2
  303. mov $h2,%rax
  304. shl \$24,%rax
  305. or %rax,$d1
  306. mov $d1#d,`16*7+0-64`($ctx)
  307. lea ($d1,$d1,4),$d1 # *5
  308. mov $d2#d,`16*7+4-64`($ctx)
  309. lea ($d2,$d2,4),$d2 # *5
  310. mov $d1#d,`16*8+0-64`($ctx)
  311. mov $d2#d,`16*8+4-64`($ctx)
  312. mov $r1,%rax
  313. call __poly1305_block # r^3
  314. mov \$0x3ffffff,%eax # save r^3 base 2^26
  315. mov $h0,$d1
  316. and $h0#d,%eax
  317. shr \$26,$d1
  318. mov %eax,`16*0+12-64`($ctx)
  319. mov \$0x3ffffff,%edx
  320. and $d1#d,%edx
  321. mov %edx,`16*1+12-64`($ctx)
  322. lea (%rdx,%rdx,4),%edx # *5
  323. shr \$26,$d1
  324. mov %edx,`16*2+12-64`($ctx)
  325. mov $h1,%rax
  326. shl \$12,%rax
  327. or $d1,%rax
  328. and \$0x3ffffff,%eax
  329. mov %eax,`16*3+12-64`($ctx)
  330. lea (%rax,%rax,4),%eax # *5
  331. mov $h1,$d1
  332. mov %eax,`16*4+12-64`($ctx)
  333. mov \$0x3ffffff,%edx
  334. shr \$14,$d1
  335. and $d1#d,%edx
  336. mov %edx,`16*5+12-64`($ctx)
  337. lea (%rdx,%rdx,4),%edx # *5
  338. shr \$26,$d1
  339. mov %edx,`16*6+12-64`($ctx)
  340. mov $h2,%rax
  341. shl \$24,%rax
  342. or %rax,$d1
  343. mov $d1#d,`16*7+12-64`($ctx)
  344. lea ($d1,$d1,4),$d1 # *5
  345. mov $d1#d,`16*8+12-64`($ctx)
  346. mov $r1,%rax
  347. call __poly1305_block # r^4
  348. mov \$0x3ffffff,%eax # save r^4 base 2^26
  349. mov $h0,$d1
  350. and $h0#d,%eax
  351. shr \$26,$d1
  352. mov %eax,`16*0+8-64`($ctx)
  353. mov \$0x3ffffff,%edx
  354. and $d1#d,%edx
  355. mov %edx,`16*1+8-64`($ctx)
  356. lea (%rdx,%rdx,4),%edx # *5
  357. shr \$26,$d1
  358. mov %edx,`16*2+8-64`($ctx)
  359. mov $h1,%rax
  360. shl \$12,%rax
  361. or $d1,%rax
  362. and \$0x3ffffff,%eax
  363. mov %eax,`16*3+8-64`($ctx)
  364. lea (%rax,%rax,4),%eax # *5
  365. mov $h1,$d1
  366. mov %eax,`16*4+8-64`($ctx)
  367. mov \$0x3ffffff,%edx
  368. shr \$14,$d1
  369. and $d1#d,%edx
  370. mov %edx,`16*5+8-64`($ctx)
  371. lea (%rdx,%rdx,4),%edx # *5
  372. shr \$26,$d1
  373. mov %edx,`16*6+8-64`($ctx)
  374. mov $h2,%rax
  375. shl \$24,%rax
  376. or %rax,$d1
  377. mov $d1#d,`16*7+8-64`($ctx)
  378. lea ($d1,$d1,4),$d1 # *5
  379. mov $d1#d,`16*8+8-64`($ctx)
  380. lea -48-64($ctx),$ctx # size [de-]optimization
  381. ret
  382. .size __poly1305_init_avx,.-__poly1305_init_avx
  383. .type poly1305_blocks_avx,\@function,4
  384. .align 32
  385. poly1305_blocks_avx:
  386. mov 20($ctx),%r8d # is_base2_26
  387. cmp \$128,$len
  388. jae .Lblocks_avx
  389. test %r8d,%r8d
  390. jz .Lblocks
  391. .Lblocks_avx:
  392. and \$-16,$len
  393. jz .Lno_data_avx
  394. vzeroupper
  395. test %r8d,%r8d
  396. jz .Lbase2_64_avx
  397. test \$31,$len
  398. jz .Leven_avx
  399. push %rbx
  400. push %rbp
  401. push %r12
  402. push %r13
  403. push %r14
  404. push %r15
  405. .Lblocks_avx_body:
  406. mov $len,%r15 # reassign $len
  407. mov 0($ctx),$d1 # load hash value
  408. mov 8($ctx),$d2
  409. mov 16($ctx),$h2#d
  410. mov 24($ctx),$r0 # load r
  411. mov 32($ctx),$s1
  412. ################################# base 2^26 -> base 2^64
  413. mov $d1#d,$h0#d
  414. and \$-1<<31,$d1
  415. mov $d2,$r1 # borrow $r1
  416. mov $d2#d,$h1#d
  417. and \$-1<<31,$d2
  418. shr \$6,$d1
  419. shl \$52,$r1
  420. add $d1,$h0
  421. shr \$12,$h1
  422. shr \$18,$d2
  423. add $r1,$h0
  424. adc $d2,$h1
  425. mov $h2,$d1
  426. shl \$40,$d1
  427. shr \$24,$h2
  428. add $d1,$h1
  429. adc \$0,$h2 # can be partially reduced...
  430. mov \$-4,$d2 # ... so reduce
  431. mov $h2,$d1
  432. and $h2,$d2
  433. shr \$2,$d1
  434. and \$3,$h2
  435. add $d2,$d1 # =*5
  436. add $d1,$h0
  437. adc \$0,$h1
  438. mov $s1,$r1
  439. mov $s1,%rax
  440. shr \$2,$s1
  441. add $r1,$s1 # s1 = r1 + (r1 >> 2)
  442. add 0($inp),$h0 # accumulate input
  443. adc 8($inp),$h1
  444. lea 16($inp),$inp
  445. adc $padbit,$h2
  446. call __poly1305_block
  447. test $padbit,$padbit # if $padbit is zero,
  448. jz .Lstore_base2_64_avx # store hash in base 2^64 format
  449. ################################# base 2^64 -> base 2^26
  450. mov $h0,%rax
  451. mov $h0,%rdx
  452. shr \$52,$h0
  453. mov $h1,$r0
  454. mov $h1,$r1
  455. shr \$26,%rdx
  456. and \$0x3ffffff,%rax # h[0]
  457. shl \$12,$r0
  458. and \$0x3ffffff,%rdx # h[1]
  459. shr \$14,$h1
  460. or $r0,$h0
  461. shl \$24,$h2
  462. and \$0x3ffffff,$h0 # h[2]
  463. shr \$40,$r1
  464. and \$0x3ffffff,$h1 # h[3]
  465. or $r1,$h2 # h[4]
  466. sub \$16,%r15
  467. jz .Lstore_base2_26_avx
  468. vmovd %rax#d,$H0
  469. vmovd %rdx#d,$H1
  470. vmovd $h0#d,$H2
  471. vmovd $h1#d,$H3
  472. vmovd $h2#d,$H4
  473. jmp .Lproceed_avx
  474. .align 32
  475. .Lstore_base2_64_avx:
  476. mov $h0,0($ctx)
  477. mov $h1,8($ctx)
  478. mov $h2,16($ctx) # note that is_base2_26 is zeroed
  479. jmp .Ldone_avx
  480. .align 16
  481. .Lstore_base2_26_avx:
  482. mov %rax#d,0($ctx) # store hash value base 2^26
  483. mov %rdx#d,4($ctx)
  484. mov $h0#d,8($ctx)
  485. mov $h1#d,12($ctx)
  486. mov $h2#d,16($ctx)
  487. .align 16
  488. .Ldone_avx:
  489. mov 0(%rsp),%r15
  490. mov 8(%rsp),%r14
  491. mov 16(%rsp),%r13
  492. mov 24(%rsp),%r12
  493. mov 32(%rsp),%rbp
  494. mov 40(%rsp),%rbx
  495. lea 48(%rsp),%rsp
  496. .Lno_data_avx:
  497. .Lblocks_avx_epilogue:
  498. ret
  499. .align 32
  500. .Lbase2_64_avx:
  501. push %rbx
  502. push %rbp
  503. push %r12
  504. push %r13
  505. push %r14
  506. push %r15
  507. .Lbase2_64_avx_body:
  508. mov $len,%r15 # reassign $len
  509. mov 24($ctx),$r0 # load r
  510. mov 32($ctx),$s1
  511. mov 0($ctx),$h0 # load hash value
  512. mov 8($ctx),$h1
  513. mov 16($ctx),$h2#d
  514. mov $s1,$r1
  515. mov $s1,%rax
  516. shr \$2,$s1
  517. add $r1,$s1 # s1 = r1 + (r1 >> 2)
  518. test \$31,$len
  519. jz .Linit_avx
  520. add 0($inp),$h0 # accumulate input
  521. adc 8($inp),$h1
  522. lea 16($inp),$inp
  523. adc $padbit,$h2
  524. sub \$16,%r15
  525. call __poly1305_block
  526. .Linit_avx:
  527. ################################# base 2^64 -> base 2^26
  528. mov $h0,%rax
  529. mov $h0,%rdx
  530. shr \$52,$h0
  531. mov $h1,$d1
  532. mov $h1,$d2
  533. shr \$26,%rdx
  534. and \$0x3ffffff,%rax # h[0]
  535. shl \$12,$d1
  536. and \$0x3ffffff,%rdx # h[1]
  537. shr \$14,$h1
  538. or $d1,$h0
  539. shl \$24,$h2
  540. and \$0x3ffffff,$h0 # h[2]
  541. shr \$40,$d2
  542. and \$0x3ffffff,$h1 # h[3]
  543. or $d2,$h2 # h[4]
  544. vmovd %rax#d,$H0
  545. vmovd %rdx#d,$H1
  546. vmovd $h0#d,$H2
  547. vmovd $h1#d,$H3
  548. vmovd $h2#d,$H4
  549. movl \$1,20($ctx) # set is_base2_26
  550. call __poly1305_init_avx
  551. .Lproceed_avx:
  552. mov %r15,$len
  553. mov 0(%rsp),%r15
  554. mov 8(%rsp),%r14
  555. mov 16(%rsp),%r13
  556. mov 24(%rsp),%r12
  557. mov 32(%rsp),%rbp
  558. mov 40(%rsp),%rbx
  559. lea 48(%rsp),%rax
  560. lea 48(%rsp),%rsp
  561. .Lbase2_64_avx_epilogue:
  562. jmp .Ldo_avx
  563. .align 32
  564. .Leven_avx:
  565. vmovd 4*0($ctx),$H0 # load hash value
  566. vmovd 4*1($ctx),$H1
  567. vmovd 4*2($ctx),$H2
  568. vmovd 4*3($ctx),$H3
  569. vmovd 4*4($ctx),$H4
  570. .Ldo_avx:
  571. ___
  572. $code.=<<___ if (!$win64);
  573. lea -0x58(%rsp),%r11
  574. sub \$0x178,%rsp
  575. ___
  576. $code.=<<___ if ($win64);
  577. lea -0xf8(%rsp),%r11
  578. sub \$0x218,%rsp
  579. vmovdqa %xmm6,0x50(%r11)
  580. vmovdqa %xmm7,0x60(%r11)
  581. vmovdqa %xmm8,0x70(%r11)
  582. vmovdqa %xmm9,0x80(%r11)
  583. vmovdqa %xmm10,0x90(%r11)
  584. vmovdqa %xmm11,0xa0(%r11)
  585. vmovdqa %xmm12,0xb0(%r11)
  586. vmovdqa %xmm13,0xc0(%r11)
  587. vmovdqa %xmm14,0xd0(%r11)
  588. vmovdqa %xmm15,0xe0(%r11)
  589. .Ldo_avx_body:
  590. ___
  591. $code.=<<___;
  592. sub \$64,$len
  593. lea -32($inp),%rax
  594. cmovc %rax,$inp
  595. vmovdqu `16*3`($ctx),$D4 # preload r0^2
  596. lea `16*3+64`($ctx),$ctx # size optimization
  597. lea .Lconst(%rip),%rcx
  598. ################################################################
  599. # load input
  600. vmovdqu 16*2($inp),$T0
  601. vmovdqu 16*3($inp),$T1
  602. vmovdqa 64(%rcx),$MASK # .Lmask26
  603. vpsrldq \$6,$T0,$T2 # splat input
  604. vpsrldq \$6,$T1,$T3
  605. vpunpckhqdq $T1,$T0,$T4 # 4
  606. vpunpcklqdq $T1,$T0,$T0 # 0:1
  607. vpunpcklqdq $T3,$T2,$T3 # 2:3
  608. vpsrlq \$40,$T4,$T4 # 4
  609. vpsrlq \$26,$T0,$T1
  610. vpand $MASK,$T0,$T0 # 0
  611. vpsrlq \$4,$T3,$T2
  612. vpand $MASK,$T1,$T1 # 1
  613. vpsrlq \$30,$T3,$T3
  614. vpand $MASK,$T2,$T2 # 2
  615. vpand $MASK,$T3,$T3 # 3
  616. vpor 32(%rcx),$T4,$T4 # padbit, yes, always
  617. jbe .Lskip_loop_avx
  618. # expand and copy pre-calculated table to stack
  619. vmovdqu `16*1-64`($ctx),$D1
  620. vmovdqu `16*2-64`($ctx),$D2
  621. vpshufd \$0xEE,$D4,$D3 # 34xx -> 3434
  622. vpshufd \$0x44,$D4,$D0 # xx12 -> 1212
  623. vmovdqa $D3,-0x90(%r11)
  624. vmovdqa $D0,0x00(%rsp)
  625. vpshufd \$0xEE,$D1,$D4
  626. vmovdqu `16*3-64`($ctx),$D0
  627. vpshufd \$0x44,$D1,$D1
  628. vmovdqa $D4,-0x80(%r11)
  629. vmovdqa $D1,0x10(%rsp)
  630. vpshufd \$0xEE,$D2,$D3
  631. vmovdqu `16*4-64`($ctx),$D1
  632. vpshufd \$0x44,$D2,$D2
  633. vmovdqa $D3,-0x70(%r11)
  634. vmovdqa $D2,0x20(%rsp)
  635. vpshufd \$0xEE,$D0,$D4
  636. vmovdqu `16*5-64`($ctx),$D2
  637. vpshufd \$0x44,$D0,$D0
  638. vmovdqa $D4,-0x60(%r11)
  639. vmovdqa $D0,0x30(%rsp)
  640. vpshufd \$0xEE,$D1,$D3
  641. vmovdqu `16*6-64`($ctx),$D0
  642. vpshufd \$0x44,$D1,$D1
  643. vmovdqa $D3,-0x50(%r11)
  644. vmovdqa $D1,0x40(%rsp)
  645. vpshufd \$0xEE,$D2,$D4
  646. vmovdqu `16*7-64`($ctx),$D1
  647. vpshufd \$0x44,$D2,$D2
  648. vmovdqa $D4,-0x40(%r11)
  649. vmovdqa $D2,0x50(%rsp)
  650. vpshufd \$0xEE,$D0,$D3
  651. vmovdqu `16*8-64`($ctx),$D2
  652. vpshufd \$0x44,$D0,$D0
  653. vmovdqa $D3,-0x30(%r11)
  654. vmovdqa $D0,0x60(%rsp)
  655. vpshufd \$0xEE,$D1,$D4
  656. vpshufd \$0x44,$D1,$D1
  657. vmovdqa $D4,-0x20(%r11)
  658. vmovdqa $D1,0x70(%rsp)
  659. vpshufd \$0xEE,$D2,$D3
  660. vmovdqa 0x00(%rsp),$D4 # preload r0^2
  661. vpshufd \$0x44,$D2,$D2
  662. vmovdqa $D3,-0x10(%r11)
  663. vmovdqa $D2,0x80(%rsp)
  664. jmp .Loop_avx
  665. .align 32
  666. .Loop_avx:
  667. ################################################################
  668. # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
  669. # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
  670. # \___________________/
  671. # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
  672. # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
  673. # \___________________/ \____________________/
  674. #
  675. # Note that we start with inp[2:3]*r^2. This is because it
  676. # doesn't depend on reduction in previous iteration.
  677. ################################################################
  678. # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
  679. # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
  680. # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
  681. # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
  682. # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
  683. #
  684. # though note that $Tx and $Hx are "reversed" in this section,
  685. # and $D4 is preloaded with r0^2...
  686. vpmuludq $T0,$D4,$D0 # d0 = h0*r0
  687. vpmuludq $T1,$D4,$D1 # d1 = h1*r0
  688. vmovdqa $H2,0x20(%r11) # offload hash
  689. vpmuludq $T2,$D4,$D2 # d3 = h2*r0
  690. vmovdqa 0x10(%rsp),$H2 # r1^2
  691. vpmuludq $T3,$D4,$D3 # d3 = h3*r0
  692. vpmuludq $T4,$D4,$D4 # d4 = h4*r0
  693. vmovdqa $H0,0x00(%r11) #
  694. vpmuludq 0x20(%rsp),$T4,$H0 # h4*s1
  695. vmovdqa $H1,0x10(%r11) #
  696. vpmuludq $T3,$H2,$H1 # h3*r1
  697. vpaddq $H0,$D0,$D0 # d0 += h4*s1
  698. vpaddq $H1,$D4,$D4 # d4 += h3*r1
  699. vmovdqa $H3,0x30(%r11) #
  700. vpmuludq $T2,$H2,$H0 # h2*r1
  701. vpmuludq $T1,$H2,$H1 # h1*r1
  702. vpaddq $H0,$D3,$D3 # d3 += h2*r1
  703. vmovdqa 0x30(%rsp),$H3 # r2^2
  704. vpaddq $H1,$D2,$D2 # d2 += h1*r1
  705. vmovdqa $H4,0x40(%r11) #
  706. vpmuludq $T0,$H2,$H2 # h0*r1
  707. vpmuludq $T2,$H3,$H0 # h2*r2
  708. vpaddq $H2,$D1,$D1 # d1 += h0*r1
  709. vmovdqa 0x40(%rsp),$H4 # s2^2
  710. vpaddq $H0,$D4,$D4 # d4 += h2*r2
  711. vpmuludq $T1,$H3,$H1 # h1*r2
  712. vpmuludq $T0,$H3,$H3 # h0*r2
  713. vpaddq $H1,$D3,$D3 # d3 += h1*r2
  714. vmovdqa 0x50(%rsp),$H2 # r3^2
  715. vpaddq $H3,$D2,$D2 # d2 += h0*r2
  716. vpmuludq $T4,$H4,$H0 # h4*s2
  717. vpmuludq $T3,$H4,$H4 # h3*s2
  718. vpaddq $H0,$D1,$D1 # d1 += h4*s2
  719. vmovdqa 0x60(%rsp),$H3 # s3^2
  720. vpaddq $H4,$D0,$D0 # d0 += h3*s2
  721. vmovdqa 0x80(%rsp),$H4 # s4^2
  722. vpmuludq $T1,$H2,$H1 # h1*r3
  723. vpmuludq $T0,$H2,$H2 # h0*r3
  724. vpaddq $H1,$D4,$D4 # d4 += h1*r3
  725. vpaddq $H2,$D3,$D3 # d3 += h0*r3
  726. vpmuludq $T4,$H3,$H0 # h4*s3
  727. vpmuludq $T3,$H3,$H1 # h3*s3
  728. vpaddq $H0,$D2,$D2 # d2 += h4*s3
  729. vmovdqu 16*0($inp),$H0 # load input
  730. vpaddq $H1,$D1,$D1 # d1 += h3*s3
  731. vpmuludq $T2,$H3,$H3 # h2*s3
  732. vpmuludq $T2,$H4,$T2 # h2*s4
  733. vpaddq $H3,$D0,$D0 # d0 += h2*s3
  734. vmovdqu 16*1($inp),$H1 #
  735. vpaddq $T2,$D1,$D1 # d1 += h2*s4
  736. vpmuludq $T3,$H4,$T3 # h3*s4
  737. vpmuludq $T4,$H4,$T4 # h4*s4
  738. vpsrldq \$6,$H0,$H2 # splat input
  739. vpaddq $T3,$D2,$D2 # d2 += h3*s4
  740. vpaddq $T4,$D3,$D3 # d3 += h4*s4
  741. vpsrldq \$6,$H1,$H3 #
  742. vpmuludq 0x70(%rsp),$T0,$T4 # h0*r4
  743. vpmuludq $T1,$H4,$T0 # h1*s4
  744. vpunpckhqdq $H1,$H0,$H4 # 4
  745. vpaddq $T4,$D4,$D4 # d4 += h0*r4
  746. vmovdqa -0x90(%r11),$T4 # r0^4
  747. vpaddq $T0,$D0,$D0 # d0 += h1*s4
  748. vpunpcklqdq $H1,$H0,$H0 # 0:1
  749. vpunpcklqdq $H3,$H2,$H3 # 2:3
  750. #vpsrlq \$40,$H4,$H4 # 4
  751. vpsrldq \$`40/8`,$H4,$H4 # 4
  752. vpsrlq \$26,$H0,$H1
  753. vpand $MASK,$H0,$H0 # 0
  754. vpsrlq \$4,$H3,$H2
  755. vpand $MASK,$H1,$H1 # 1
  756. vpand 0(%rcx),$H4,$H4 # .Lmask24
  757. vpsrlq \$30,$H3,$H3
  758. vpand $MASK,$H2,$H2 # 2
  759. vpand $MASK,$H3,$H3 # 3
  760. vpor 32(%rcx),$H4,$H4 # padbit, yes, always
  761. vpaddq 0x00(%r11),$H0,$H0 # add hash value
  762. vpaddq 0x10(%r11),$H1,$H1
  763. vpaddq 0x20(%r11),$H2,$H2
  764. vpaddq 0x30(%r11),$H3,$H3
  765. vpaddq 0x40(%r11),$H4,$H4
  766. lea 16*2($inp),%rax
  767. lea 16*4($inp),$inp
  768. sub \$64,$len
  769. cmovc %rax,$inp
  770. ################################################################
  771. # Now we accumulate (inp[0:1]+hash)*r^4
  772. ################################################################
  773. # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
  774. # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
  775. # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
  776. # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
  777. # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
  778. vpmuludq $H0,$T4,$T0 # h0*r0
  779. vpmuludq $H1,$T4,$T1 # h1*r0
  780. vpaddq $T0,$D0,$D0
  781. vpaddq $T1,$D1,$D1
  782. vmovdqa -0x80(%r11),$T2 # r1^4
  783. vpmuludq $H2,$T4,$T0 # h2*r0
  784. vpmuludq $H3,$T4,$T1 # h3*r0
  785. vpaddq $T0,$D2,$D2
  786. vpaddq $T1,$D3,$D3
  787. vpmuludq $H4,$T4,$T4 # h4*r0
  788. vpmuludq -0x70(%r11),$H4,$T0 # h4*s1
  789. vpaddq $T4,$D4,$D4
  790. vpaddq $T0,$D0,$D0 # d0 += h4*s1
  791. vpmuludq $H2,$T2,$T1 # h2*r1
  792. vpmuludq $H3,$T2,$T0 # h3*r1
  793. vpaddq $T1,$D3,$D3 # d3 += h2*r1
  794. vmovdqa -0x60(%r11),$T3 # r2^4
  795. vpaddq $T0,$D4,$D4 # d4 += h3*r1
  796. vpmuludq $H1,$T2,$T1 # h1*r1
  797. vpmuludq $H0,$T2,$T2 # h0*r1
  798. vpaddq $T1,$D2,$D2 # d2 += h1*r1
  799. vpaddq $T2,$D1,$D1 # d1 += h0*r1
  800. vmovdqa -0x50(%r11),$T4 # s2^4
  801. vpmuludq $H2,$T3,$T0 # h2*r2
  802. vpmuludq $H1,$T3,$T1 # h1*r2
  803. vpaddq $T0,$D4,$D4 # d4 += h2*r2
  804. vpaddq $T1,$D3,$D3 # d3 += h1*r2
  805. vmovdqa -0x40(%r11),$T2 # r3^4
  806. vpmuludq $H0,$T3,$T3 # h0*r2
  807. vpmuludq $H4,$T4,$T0 # h4*s2
  808. vpaddq $T3,$D2,$D2 # d2 += h0*r2
  809. vpaddq $T0,$D1,$D1 # d1 += h4*s2
  810. vmovdqa -0x30(%r11),$T3 # s3^4
  811. vpmuludq $H3,$T4,$T4 # h3*s2
  812. vpmuludq $H1,$T2,$T1 # h1*r3
  813. vpaddq $T4,$D0,$D0 # d0 += h3*s2
  814. vmovdqa -0x10(%r11),$T4 # s4^4
  815. vpaddq $T1,$D4,$D4 # d4 += h1*r3
  816. vpmuludq $H0,$T2,$T2 # h0*r3
  817. vpmuludq $H4,$T3,$T0 # h4*s3
  818. vpaddq $T2,$D3,$D3 # d3 += h0*r3
  819. vpaddq $T0,$D2,$D2 # d2 += h4*s3
  820. vmovdqu 16*2($inp),$T0 # load input
  821. vpmuludq $H3,$T3,$T2 # h3*s3
  822. vpmuludq $H2,$T3,$T3 # h2*s3
  823. vpaddq $T2,$D1,$D1 # d1 += h3*s3
  824. vmovdqu 16*3($inp),$T1 #
  825. vpaddq $T3,$D0,$D0 # d0 += h2*s3
  826. vpmuludq $H2,$T4,$H2 # h2*s4
  827. vpmuludq $H3,$T4,$H3 # h3*s4
  828. vpsrldq \$6,$T0,$T2 # splat input
  829. vpaddq $H2,$D1,$D1 # d1 += h2*s4
  830. vpmuludq $H4,$T4,$H4 # h4*s4
  831. vpsrldq \$6,$T1,$T3 #
  832. vpaddq $H3,$D2,$H2 # h2 = d2 + h3*s4
  833. vpaddq $H4,$D3,$H3 # h3 = d3 + h4*s4
  834. vpmuludq -0x20(%r11),$H0,$H4 # h0*r4
  835. vpmuludq $H1,$T4,$H0
  836. vpunpckhqdq $T1,$T0,$T4 # 4
  837. vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
  838. vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
  839. vpunpcklqdq $T1,$T0,$T0 # 0:1
  840. vpunpcklqdq $T3,$T2,$T3 # 2:3
  841. #vpsrlq \$40,$T4,$T4 # 4
  842. vpsrldq \$`40/8`,$T4,$T4 # 4
  843. vpsrlq \$26,$T0,$T1
  844. vmovdqa 0x00(%rsp),$D4 # preload r0^2
  845. vpand $MASK,$T0,$T0 # 0
  846. vpsrlq \$4,$T3,$T2
  847. vpand $MASK,$T1,$T1 # 1
  848. vpand 0(%rcx),$T4,$T4 # .Lmask24
  849. vpsrlq \$30,$T3,$T3
  850. vpand $MASK,$T2,$T2 # 2
  851. vpand $MASK,$T3,$T3 # 3
  852. vpor 32(%rcx),$T4,$T4 # padbit, yes, always
  853. ################################################################
  854. # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
  855. # and P. Schwabe
  856. vpsrlq \$26,$H3,$D3
  857. vpand $MASK,$H3,$H3
  858. vpaddq $D3,$H4,$H4 # h3 -> h4
  859. vpsrlq \$26,$H0,$D0
  860. vpand $MASK,$H0,$H0
  861. vpaddq $D0,$D1,$H1 # h0 -> h1
  862. vpsrlq \$26,$H4,$D0
  863. vpand $MASK,$H4,$H4
  864. vpsrlq \$26,$H1,$D1
  865. vpand $MASK,$H1,$H1
  866. vpaddq $D1,$H2,$H2 # h1 -> h2
  867. vpaddq $D0,$H0,$H0
  868. vpsllq \$2,$D0,$D0
  869. vpaddq $D0,$H0,$H0 # h4 -> h0
  870. vpsrlq \$26,$H2,$D2
  871. vpand $MASK,$H2,$H2
  872. vpaddq $D2,$H3,$H3 # h2 -> h3
  873. vpsrlq \$26,$H0,$D0
  874. vpand $MASK,$H0,$H0
  875. vpaddq $D0,$H1,$H1 # h0 -> h1
  876. vpsrlq \$26,$H3,$D3
  877. vpand $MASK,$H3,$H3
  878. vpaddq $D3,$H4,$H4 # h3 -> h4
  879. ja .Loop_avx
  880. .Lskip_loop_avx:
  881. ################################################################
  882. # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
  883. vpshufd \$0x10,$D4,$D4 # r0^n, xx12 -> x1x2
  884. add \$32,$len
  885. jnz .Long_tail_avx
  886. vpaddq $H2,$T2,$T2
  887. vpaddq $H0,$T0,$T0
  888. vpaddq $H1,$T1,$T1
  889. vpaddq $H3,$T3,$T3
  890. vpaddq $H4,$T4,$T4
  891. .Long_tail_avx:
  892. vmovdqa $H2,0x20(%r11)
  893. vmovdqa $H0,0x00(%r11)
  894. vmovdqa $H1,0x10(%r11)
  895. vmovdqa $H3,0x30(%r11)
  896. vmovdqa $H4,0x40(%r11)
  897. # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
  898. # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
  899. # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
  900. # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
  901. # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
  902. vpmuludq $T2,$D4,$D2 # d2 = h2*r0
  903. vpmuludq $T0,$D4,$D0 # d0 = h0*r0
  904. vpshufd \$0x10,`16*1-64`($ctx),$H2 # r1^n
  905. vpmuludq $T1,$D4,$D1 # d1 = h1*r0
  906. vpmuludq $T3,$D4,$D3 # d3 = h3*r0
  907. vpmuludq $T4,$D4,$D4 # d4 = h4*r0
  908. vpmuludq $T3,$H2,$H0 # h3*r1
  909. vpaddq $H0,$D4,$D4 # d4 += h3*r1
  910. vpshufd \$0x10,`16*2-64`($ctx),$H3 # s1^n
  911. vpmuludq $T2,$H2,$H1 # h2*r1
  912. vpaddq $H1,$D3,$D3 # d3 += h2*r1
  913. vpshufd \$0x10,`16*3-64`($ctx),$H4 # r2^n
  914. vpmuludq $T1,$H2,$H0 # h1*r1
  915. vpaddq $H0,$D2,$D2 # d2 += h1*r1
  916. vpmuludq $T0,$H2,$H2 # h0*r1
  917. vpaddq $H2,$D1,$D1 # d1 += h0*r1
  918. vpmuludq $T4,$H3,$H3 # h4*s1
  919. vpaddq $H3,$D0,$D0 # d0 += h4*s1
  920. vpshufd \$0x10,`16*4-64`($ctx),$H2 # s2^n
  921. vpmuludq $T2,$H4,$H1 # h2*r2
  922. vpaddq $H1,$D4,$D4 # d4 += h2*r2
  923. vpmuludq $T1,$H4,$H0 # h1*r2
  924. vpaddq $H0,$D3,$D3 # d3 += h1*r2
  925. vpshufd \$0x10,`16*5-64`($ctx),$H3 # r3^n
  926. vpmuludq $T0,$H4,$H4 # h0*r2
  927. vpaddq $H4,$D2,$D2 # d2 += h0*r2
  928. vpmuludq $T4,$H2,$H1 # h4*s2
  929. vpaddq $H1,$D1,$D1 # d1 += h4*s2
  930. vpshufd \$0x10,`16*6-64`($ctx),$H4 # s3^n
  931. vpmuludq $T3,$H2,$H2 # h3*s2
  932. vpaddq $H2,$D0,$D0 # d0 += h3*s2
  933. vpmuludq $T1,$H3,$H0 # h1*r3
  934. vpaddq $H0,$D4,$D4 # d4 += h1*r3
  935. vpmuludq $T0,$H3,$H3 # h0*r3
  936. vpaddq $H3,$D3,$D3 # d3 += h0*r3
  937. vpshufd \$0x10,`16*7-64`($ctx),$H2 # r4^n
  938. vpmuludq $T4,$H4,$H1 # h4*s3
  939. vpaddq $H1,$D2,$D2 # d2 += h4*s3
  940. vpshufd \$0x10,`16*8-64`($ctx),$H3 # s4^n
  941. vpmuludq $T3,$H4,$H0 # h3*s3
  942. vpaddq $H0,$D1,$D1 # d1 += h3*s3
  943. vpmuludq $T2,$H4,$H4 # h2*s3
  944. vpaddq $H4,$D0,$D0 # d0 += h2*s3
  945. vpmuludq $T0,$H2,$H2 # h0*r4
  946. vpaddq $H2,$D4,$D4 # h4 = d4 + h0*r4
  947. vpmuludq $T4,$H3,$H1 # h4*s4
  948. vpaddq $H1,$D3,$D3 # h3 = d3 + h4*s4
  949. vpmuludq $T3,$H3,$H0 # h3*s4
  950. vpaddq $H0,$D2,$D2 # h2 = d2 + h3*s4
  951. vpmuludq $T2,$H3,$H1 # h2*s4
  952. vpaddq $H1,$D1,$D1 # h1 = d1 + h2*s4
  953. vpmuludq $T1,$H3,$H3 # h1*s4
  954. vpaddq $H3,$D0,$D0 # h0 = d0 + h1*s4
  955. jz .Lshort_tail_avx
  956. vmovdqu 16*0($inp),$H0 # load input
  957. vmovdqu 16*1($inp),$H1
  958. vpsrldq \$6,$H0,$H2 # splat input
  959. vpsrldq \$6,$H1,$H3
  960. vpunpckhqdq $H1,$H0,$H4 # 4
  961. vpunpcklqdq $H1,$H0,$H0 # 0:1
  962. vpunpcklqdq $H3,$H2,$H3 # 2:3
  963. vpsrlq \$40,$H4,$H4 # 4
  964. vpsrlq \$26,$H0,$H1
  965. vpand $MASK,$H0,$H0 # 0
  966. vpsrlq \$4,$H3,$H2
  967. vpand $MASK,$H1,$H1 # 1
  968. vpsrlq \$30,$H3,$H3
  969. vpand $MASK,$H2,$H2 # 2
  970. vpand $MASK,$H3,$H3 # 3
  971. vpor 32(%rcx),$H4,$H4 # padbit, yes, always
  972. vpshufd \$0x32,`16*0-64`($ctx),$T4 # r0^n, 34xx -> x3x4
  973. vpaddq 0x00(%r11),$H0,$H0
  974. vpaddq 0x10(%r11),$H1,$H1
  975. vpaddq 0x20(%r11),$H2,$H2
  976. vpaddq 0x30(%r11),$H3,$H3
  977. vpaddq 0x40(%r11),$H4,$H4
  978. ################################################################
  979. # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate
  980. vpmuludq $H0,$T4,$T0 # h0*r0
  981. vpaddq $T0,$D0,$D0 # d0 += h0*r0
  982. vpmuludq $H1,$T4,$T1 # h1*r0
  983. vpaddq $T1,$D1,$D1 # d1 += h1*r0
  984. vpmuludq $H2,$T4,$T0 # h2*r0
  985. vpaddq $T0,$D2,$D2 # d2 += h2*r0
  986. vpshufd \$0x32,`16*1-64`($ctx),$T2 # r1^n
  987. vpmuludq $H3,$T4,$T1 # h3*r0
  988. vpaddq $T1,$D3,$D3 # d3 += h3*r0
  989. vpmuludq $H4,$T4,$T4 # h4*r0
  990. vpaddq $T4,$D4,$D4 # d4 += h4*r0
  991. vpmuludq $H3,$T2,$T0 # h3*r1
  992. vpaddq $T0,$D4,$D4 # d4 += h3*r1
  993. vpshufd \$0x32,`16*2-64`($ctx),$T3 # s1
  994. vpmuludq $H2,$T2,$T1 # h2*r1
  995. vpaddq $T1,$D3,$D3 # d3 += h2*r1
  996. vpshufd \$0x32,`16*3-64`($ctx),$T4 # r2
  997. vpmuludq $H1,$T2,$T0 # h1*r1
  998. vpaddq $T0,$D2,$D2 # d2 += h1*r1
  999. vpmuludq $H0,$T2,$T2 # h0*r1
  1000. vpaddq $T2,$D1,$D1 # d1 += h0*r1
  1001. vpmuludq $H4,$T3,$T3 # h4*s1
  1002. vpaddq $T3,$D0,$D0 # d0 += h4*s1
  1003. vpshufd \$0x32,`16*4-64`($ctx),$T2 # s2
  1004. vpmuludq $H2,$T4,$T1 # h2*r2
  1005. vpaddq $T1,$D4,$D4 # d4 += h2*r2
  1006. vpmuludq $H1,$T4,$T0 # h1*r2
  1007. vpaddq $T0,$D3,$D3 # d3 += h1*r2
  1008. vpshufd \$0x32,`16*5-64`($ctx),$T3 # r3
  1009. vpmuludq $H0,$T4,$T4 # h0*r2
  1010. vpaddq $T4,$D2,$D2 # d2 += h0*r2
  1011. vpmuludq $H4,$T2,$T1 # h4*s2
  1012. vpaddq $T1,$D1,$D1 # d1 += h4*s2
  1013. vpshufd \$0x32,`16*6-64`($ctx),$T4 # s3
  1014. vpmuludq $H3,$T2,$T2 # h3*s2
  1015. vpaddq $T2,$D0,$D0 # d0 += h3*s2
  1016. vpmuludq $H1,$T3,$T0 # h1*r3
  1017. vpaddq $T0,$D4,$D4 # d4 += h1*r3
  1018. vpmuludq $H0,$T3,$T3 # h0*r3
  1019. vpaddq $T3,$D3,$D3 # d3 += h0*r3
  1020. vpshufd \$0x32,`16*7-64`($ctx),$T2 # r4
  1021. vpmuludq $H4,$T4,$T1 # h4*s3
  1022. vpaddq $T1,$D2,$D2 # d2 += h4*s3
  1023. vpshufd \$0x32,`16*8-64`($ctx),$T3 # s4
  1024. vpmuludq $H3,$T4,$T0 # h3*s3
  1025. vpaddq $T0,$D1,$D1 # d1 += h3*s3
  1026. vpmuludq $H2,$T4,$T4 # h2*s3
  1027. vpaddq $T4,$D0,$D0 # d0 += h2*s3
  1028. vpmuludq $H0,$T2,$T2 # h0*r4
  1029. vpaddq $T2,$D4,$D4 # d4 += h0*r4
  1030. vpmuludq $H4,$T3,$T1 # h4*s4
  1031. vpaddq $T1,$D3,$D3 # d3 += h4*s4
  1032. vpmuludq $H3,$T3,$T0 # h3*s4
  1033. vpaddq $T0,$D2,$D2 # d2 += h3*s4
  1034. vpmuludq $H2,$T3,$T1 # h2*s4
  1035. vpaddq $T1,$D1,$D1 # d1 += h2*s4
  1036. vpmuludq $H1,$T3,$T3 # h1*s4
  1037. vpaddq $T3,$D0,$D0 # d0 += h1*s4
  1038. .Lshort_tail_avx:
  1039. ################################################################
  1040. # horizontal addition
  1041. vpsrldq \$8,$D4,$T4
  1042. vpsrldq \$8,$D3,$T3
  1043. vpsrldq \$8,$D1,$T1
  1044. vpsrldq \$8,$D0,$T0
  1045. vpsrldq \$8,$D2,$T2
  1046. vpaddq $T3,$D3,$D3
  1047. vpaddq $T4,$D4,$D4
  1048. vpaddq $T0,$D0,$D0
  1049. vpaddq $T1,$D1,$D1
  1050. vpaddq $T2,$D2,$D2
  1051. ################################################################
  1052. # lazy reduction
  1053. vpsrlq \$26,$D3,$H3
  1054. vpand $MASK,$D3,$D3
  1055. vpaddq $H3,$D4,$D4 # h3 -> h4
  1056. vpsrlq \$26,$D0,$H0
  1057. vpand $MASK,$D0,$D0
  1058. vpaddq $H0,$D1,$D1 # h0 -> h1
  1059. vpsrlq \$26,$D4,$H4
  1060. vpand $MASK,$D4,$D4
  1061. vpsrlq \$26,$D1,$H1
  1062. vpand $MASK,$D1,$D1
  1063. vpaddq $H1,$D2,$D2 # h1 -> h2
  1064. vpaddq $H4,$D0,$D0
  1065. vpsllq \$2,$H4,$H4
  1066. vpaddq $H4,$D0,$D0 # h4 -> h0
  1067. vpsrlq \$26,$D2,$H2
  1068. vpand $MASK,$D2,$D2
  1069. vpaddq $H2,$D3,$D3 # h2 -> h3
  1070. vpsrlq \$26,$D0,$H0
  1071. vpand $MASK,$D0,$D0
  1072. vpaddq $H0,$D1,$D1 # h0 -> h1
  1073. vpsrlq \$26,$D3,$H3
  1074. vpand $MASK,$D3,$D3
  1075. vpaddq $H3,$D4,$D4 # h3 -> h4
  1076. vmovd $D0,`4*0-48-64`($ctx) # save partially reduced
  1077. vmovd $D1,`4*1-48-64`($ctx)
  1078. vmovd $D2,`4*2-48-64`($ctx)
  1079. vmovd $D3,`4*3-48-64`($ctx)
  1080. vmovd $D4,`4*4-48-64`($ctx)
  1081. ___
  1082. $code.=<<___ if ($win64);
  1083. vmovdqa 0x50(%r11),%xmm6
  1084. vmovdqa 0x60(%r11),%xmm7
  1085. vmovdqa 0x70(%r11),%xmm8
  1086. vmovdqa 0x80(%r11),%xmm9
  1087. vmovdqa 0x90(%r11),%xmm10
  1088. vmovdqa 0xa0(%r11),%xmm11
  1089. vmovdqa 0xb0(%r11),%xmm12
  1090. vmovdqa 0xc0(%r11),%xmm13
  1091. vmovdqa 0xd0(%r11),%xmm14
  1092. vmovdqa 0xe0(%r11),%xmm15
  1093. lea 0xf8(%r11),%rsp
  1094. .Ldo_avx_epilogue:
  1095. ___
  1096. $code.=<<___ if (!$win64);
  1097. lea 0x58(%r11),%rsp
  1098. ___
  1099. $code.=<<___;
  1100. vzeroupper
  1101. ret
  1102. .size poly1305_blocks_avx,.-poly1305_blocks_avx
  1103. .type poly1305_emit_avx,\@function,3
  1104. .align 32
  1105. poly1305_emit_avx:
  1106. cmpl \$0,20($ctx) # is_base2_26?
  1107. je .Lemit
  1108. mov 0($ctx),%eax # load hash value base 2^26
  1109. mov 4($ctx),%ecx
  1110. mov 8($ctx),%r8d
  1111. mov 12($ctx),%r11d
  1112. mov 16($ctx),%r10d
  1113. shl \$26,%rcx # base 2^26 -> base 2^64
  1114. mov %r8,%r9
  1115. shl \$52,%r8
  1116. add %rcx,%rax
  1117. shr \$12,%r9
  1118. add %rax,%r8 # h0
  1119. adc \$0,%r9
  1120. shl \$14,%r11
  1121. mov %r10,%rax
  1122. shr \$24,%r10
  1123. add %r11,%r9
  1124. shl \$40,%rax
  1125. add %rax,%r9 # h1
  1126. adc \$0,%r10 # h2
  1127. mov %r10,%rax # could be partially reduced, so reduce
  1128. mov %r10,%rcx
  1129. and \$3,%r10
  1130. shr \$2,%rax
  1131. and \$-4,%rcx
  1132. add %rcx,%rax
  1133. add %rax,%r8
  1134. adc \$0,%r9
  1135. mov %r8,%rax
  1136. add \$5,%r8 # compare to modulus
  1137. mov %r9,%rcx
  1138. adc \$0,%r9
  1139. adc \$0,%r10
  1140. shr \$2,%r10 # did 130-bit value overfow?
  1141. cmovnz %r8,%rax
  1142. cmovnz %r9,%rcx
  1143. add 0($nonce),%rax # accumulate nonce
  1144. adc 8($nonce),%rcx
  1145. mov %rax,0($mac) # write result
  1146. mov %rcx,8($mac)
  1147. ret
  1148. .size poly1305_emit_avx,.-poly1305_emit_avx
  1149. ___
  1150. if ($avx>1) {
  1151. my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) =
  1152. map("%ymm$_",(0..15));
  1153. my $S4=$MASK;
  1154. $code.=<<___;
  1155. .type poly1305_blocks_avx2,\@function,4
  1156. .align 32
  1157. poly1305_blocks_avx2:
  1158. mov 20($ctx),%r8d # is_base2_26
  1159. cmp \$128,$len
  1160. jae .Lblocks_avx2
  1161. test %r8d,%r8d
  1162. jz .Lblocks
  1163. .Lblocks_avx2:
  1164. and \$-16,$len
  1165. jz .Lno_data_avx2
  1166. vzeroupper
  1167. test %r8d,%r8d
  1168. jz .Lbase2_64_avx2
  1169. test \$63,$len
  1170. jz .Leven_avx2
  1171. push %rbx
  1172. push %rbp
  1173. push %r12
  1174. push %r13
  1175. push %r14
  1176. push %r15
  1177. .Lblocks_avx2_body:
  1178. mov $len,%r15 # reassign $len
  1179. mov 0($ctx),$d1 # load hash value
  1180. mov 8($ctx),$d2
  1181. mov 16($ctx),$h2#d
  1182. mov 24($ctx),$r0 # load r
  1183. mov 32($ctx),$s1
  1184. ################################# base 2^26 -> base 2^64
  1185. mov $d1#d,$h0#d
  1186. and \$-1<<31,$d1
  1187. mov $d2,$r1 # borrow $r1
  1188. mov $d2#d,$h1#d
  1189. and \$-1<<31,$d2
  1190. shr \$6,$d1
  1191. shl \$52,$r1
  1192. add $d1,$h0
  1193. shr \$12,$h1
  1194. shr \$18,$d2
  1195. add $r1,$h0
  1196. adc $d2,$h1
  1197. mov $h2,$d1
  1198. shl \$40,$d1
  1199. shr \$24,$h2
  1200. add $d1,$h1
  1201. adc \$0,$h2 # can be partially reduced...
  1202. mov \$-4,$d2 # ... so reduce
  1203. mov $h2,$d1
  1204. and $h2,$d2
  1205. shr \$2,$d1
  1206. and \$3,$h2
  1207. add $d2,$d1 # =*5
  1208. add $d1,$h0
  1209. adc \$0,$h1
  1210. mov $s1,$r1
  1211. mov $s1,%rax
  1212. shr \$2,$s1
  1213. add $r1,$s1 # s1 = r1 + (r1 >> 2)
  1214. .Lbase2_26_pre_avx2:
  1215. add 0($inp),$h0 # accumulate input
  1216. adc 8($inp),$h1
  1217. lea 16($inp),$inp
  1218. adc $padbit,$h2
  1219. sub \$16,%r15
  1220. call __poly1305_block
  1221. mov $r1,%rax
  1222. test \$63,%r15
  1223. jnz .Lbase2_26_pre_avx2
  1224. test $padbit,$padbit # if $padbit is zero,
  1225. jz .Lstore_base2_64_avx2 # store hash in base 2^64 format
  1226. ################################# base 2^64 -> base 2^26
  1227. mov $h0,%rax
  1228. mov $h0,%rdx
  1229. shr \$52,$h0
  1230. mov $h1,$r0
  1231. mov $h1,$r1
  1232. shr \$26,%rdx
  1233. and \$0x3ffffff,%rax # h[0]
  1234. shl \$12,$r0
  1235. and \$0x3ffffff,%rdx # h[1]
  1236. shr \$14,$h1
  1237. or $r0,$h0
  1238. shl \$24,$h2
  1239. and \$0x3ffffff,$h0 # h[2]
  1240. shr \$40,$r1
  1241. and \$0x3ffffff,$h1 # h[3]
  1242. or $r1,$h2 # h[4]
  1243. test %r15,%r15
  1244. jz .Lstore_base2_26_avx2
  1245. vmovd %rax#d,%x#$H0
  1246. vmovd %rdx#d,%x#$H1
  1247. vmovd $h0#d,%x#$H2
  1248. vmovd $h1#d,%x#$H3
  1249. vmovd $h2#d,%x#$H4
  1250. jmp .Lproceed_avx2
  1251. .align 32
  1252. .Lstore_base2_64_avx2:
  1253. mov $h0,0($ctx)
  1254. mov $h1,8($ctx)
  1255. mov $h2,16($ctx) # note that is_base2_26 is zeroed
  1256. jmp .Ldone_avx2
  1257. .align 16
  1258. .Lstore_base2_26_avx2:
  1259. mov %rax#d,0($ctx) # store hash value base 2^26
  1260. mov %rdx#d,4($ctx)
  1261. mov $h0#d,8($ctx)
  1262. mov $h1#d,12($ctx)
  1263. mov $h2#d,16($ctx)
  1264. .align 16
  1265. .Ldone_avx2:
  1266. mov 0(%rsp),%r15
  1267. mov 8(%rsp),%r14
  1268. mov 16(%rsp),%r13
  1269. mov 24(%rsp),%r12
  1270. mov 32(%rsp),%rbp
  1271. mov 40(%rsp),%rbx
  1272. lea 48(%rsp),%rsp
  1273. .Lno_data_avx2:
  1274. .Lblocks_avx2_epilogue:
  1275. ret
  1276. .align 32
  1277. .Lbase2_64_avx2:
  1278. push %rbx
  1279. push %rbp
  1280. push %r12
  1281. push %r13
  1282. push %r14
  1283. push %r15
  1284. .Lbase2_64_avx2_body:
  1285. mov $len,%r15 # reassign $len
  1286. mov 24($ctx),$r0 # load r
  1287. mov 32($ctx),$s1
  1288. mov 0($ctx),$h0 # load hash value
  1289. mov 8($ctx),$h1
  1290. mov 16($ctx),$h2#d
  1291. mov $s1,$r1
  1292. mov $s1,%rax
  1293. shr \$2,$s1
  1294. add $r1,$s1 # s1 = r1 + (r1 >> 2)
  1295. test \$63,$len
  1296. jz .Linit_avx2
  1297. .Lbase2_64_pre_avx2:
  1298. add 0($inp),$h0 # accumulate input
  1299. adc 8($inp),$h1
  1300. lea 16($inp),$inp
  1301. adc $padbit,$h2
  1302. sub \$16,%r15
  1303. call __poly1305_block
  1304. mov $r1,%rax
  1305. test \$63,%r15
  1306. jnz .Lbase2_64_pre_avx2
  1307. .Linit_avx2:
  1308. ################################# base 2^64 -> base 2^26
  1309. mov $h0,%rax
  1310. mov $h0,%rdx
  1311. shr \$52,$h0
  1312. mov $h1,$d1
  1313. mov $h1,$d2
  1314. shr \$26,%rdx
  1315. and \$0x3ffffff,%rax # h[0]
  1316. shl \$12,$d1
  1317. and \$0x3ffffff,%rdx # h[1]
  1318. shr \$14,$h1
  1319. or $d1,$h0
  1320. shl \$24,$h2
  1321. and \$0x3ffffff,$h0 # h[2]
  1322. shr \$40,$d2
  1323. and \$0x3ffffff,$h1 # h[3]
  1324. or $d2,$h2 # h[4]
  1325. vmovd %rax#d,%x#$H0
  1326. vmovd %rdx#d,%x#$H1
  1327. vmovd $h0#d,%x#$H2
  1328. vmovd $h1#d,%x#$H3
  1329. vmovd $h2#d,%x#$H4
  1330. movl \$1,20($ctx) # set is_base2_26
  1331. call __poly1305_init_avx
  1332. .Lproceed_avx2:
  1333. mov %r15,$len
  1334. mov 0(%rsp),%r15
  1335. mov 8(%rsp),%r14
  1336. mov 16(%rsp),%r13
  1337. mov 24(%rsp),%r12
  1338. mov 32(%rsp),%rbp
  1339. mov 40(%rsp),%rbx
  1340. lea 48(%rsp),%rax
  1341. lea 48(%rsp),%rsp
  1342. .Lbase2_64_avx2_epilogue:
  1343. jmp .Ldo_avx2
  1344. .align 32
  1345. .Leven_avx2:
  1346. vmovd 4*0($ctx),%x#$H0 # load hash value base 2^26
  1347. vmovd 4*1($ctx),%x#$H1
  1348. vmovd 4*2($ctx),%x#$H2
  1349. vmovd 4*3($ctx),%x#$H3
  1350. vmovd 4*4($ctx),%x#$H4
  1351. .Ldo_avx2:
  1352. ___
  1353. $code.=<<___ if (!$win64);
  1354. lea -8(%rsp),%r11
  1355. sub \$0x128,%rsp
  1356. ___
  1357. $code.=<<___ if ($win64);
  1358. lea -0xf8(%rsp),%r11
  1359. sub \$0x1c8,%rsp
  1360. vmovdqa %xmm6,0x50(%r11)
  1361. vmovdqa %xmm7,0x60(%r11)
  1362. vmovdqa %xmm8,0x70(%r11)
  1363. vmovdqa %xmm9,0x80(%r11)
  1364. vmovdqa %xmm10,0x90(%r11)
  1365. vmovdqa %xmm11,0xa0(%r11)
  1366. vmovdqa %xmm12,0xb0(%r11)
  1367. vmovdqa %xmm13,0xc0(%r11)
  1368. vmovdqa %xmm14,0xd0(%r11)
  1369. vmovdqa %xmm15,0xe0(%r11)
  1370. .Ldo_avx2_body:
  1371. ___
  1372. $code.=<<___;
  1373. lea 48+64($ctx),$ctx # size optimization
  1374. lea .Lconst(%rip),%rcx
  1375. # expand and copy pre-calculated table to stack
  1376. vmovdqu `16*0-64`($ctx),%x#$T2
  1377. and \$-512,%rsp
  1378. vmovdqu `16*1-64`($ctx),%x#$T3
  1379. vmovdqu `16*2-64`($ctx),%x#$T4
  1380. vmovdqu `16*3-64`($ctx),%x#$D0
  1381. vmovdqu `16*4-64`($ctx),%x#$D1
  1382. vmovdqu `16*5-64`($ctx),%x#$D2
  1383. vmovdqu `16*6-64`($ctx),%x#$D3
  1384. vpermq \$0x15,$T2,$T2 # 00003412 -> 12343434
  1385. vmovdqu `16*7-64`($ctx),%x#$D4
  1386. vpermq \$0x15,$T3,$T3
  1387. vpshufd \$0xc8,$T2,$T2 # 12343434 -> 14243444
  1388. vmovdqu `16*8-64`($ctx),%x#$MASK
  1389. vpermq \$0x15,$T4,$T4
  1390. vpshufd \$0xc8,$T3,$T3
  1391. vmovdqa $T2,0x00(%rsp)
  1392. vpermq \$0x15,$D0,$D0
  1393. vpshufd \$0xc8,$T4,$T4
  1394. vmovdqa $T3,0x20(%rsp)
  1395. vpermq \$0x15,$D1,$D1
  1396. vpshufd \$0xc8,$D0,$D0
  1397. vmovdqa $T4,0x40(%rsp)
  1398. vpermq \$0x15,$D2,$D2
  1399. vpshufd \$0xc8,$D1,$D1
  1400. vmovdqa $D0,0x60(%rsp)
  1401. vpermq \$0x15,$D3,$D3
  1402. vpshufd \$0xc8,$D2,$D2
  1403. vmovdqa $D1,0x80(%rsp)
  1404. vpermq \$0x15,$D4,$D4
  1405. vpshufd \$0xc8,$D3,$D3
  1406. vmovdqa $D2,0xa0(%rsp)
  1407. vpermq \$0x15,$MASK,$MASK
  1408. vpshufd \$0xc8,$D4,$D4
  1409. vmovdqa $D3,0xc0(%rsp)
  1410. vpshufd \$0xc8,$MASK,$MASK
  1411. vmovdqa $D4,0xe0(%rsp)
  1412. vmovdqa $MASK,0x100(%rsp)
  1413. vmovdqa 64(%rcx),$MASK # .Lmask26
  1414. ################################################################
  1415. # load input
  1416. vmovdqu 16*0($inp),%x#$T0
  1417. vmovdqu 16*1($inp),%x#$T1
  1418. vinserti128 \$1,16*2($inp),$T0,$T0
  1419. vinserti128 \$1,16*3($inp),$T1,$T1
  1420. lea 16*4($inp),$inp
  1421. vpsrldq \$6,$T0,$T2 # splat input
  1422. vpsrldq \$6,$T1,$T3
  1423. vpunpckhqdq $T1,$T0,$T4 # 4
  1424. vpunpcklqdq $T3,$T2,$T2 # 2:3
  1425. vpunpcklqdq $T1,$T0,$T0 # 0:1
  1426. vpsrlq \$30,$T2,$T3
  1427. vpsrlq \$4,$T2,$T2
  1428. vpsrlq \$26,$T0,$T1
  1429. vpsrlq \$40,$T4,$T4 # 4
  1430. vpand $MASK,$T2,$T2 # 2
  1431. vpand $MASK,$T0,$T0 # 0
  1432. vpand $MASK,$T1,$T1 # 1
  1433. vpand $MASK,$T3,$T3 # 3
  1434. vpor 32(%rcx),$T4,$T4 # padbit, yes, always
  1435. lea 0x90(%rsp),%rax # size optimization
  1436. vpaddq $H2,$T2,$H2 # accumulate input
  1437. sub \$64,$len
  1438. jz .Ltail_avx2
  1439. jmp .Loop_avx2
  1440. .align 32
  1441. .Loop_avx2:
  1442. ################################################################
  1443. # ((inp[0]*r^4+r[4])*r^4+r[8])*r^4
  1444. # ((inp[1]*r^4+r[5])*r^4+r[9])*r^3
  1445. # ((inp[2]*r^4+r[6])*r^4+r[10])*r^2
  1446. # ((inp[3]*r^4+r[7])*r^4+r[11])*r^1
  1447. # \________/\________/
  1448. ################################################################
  1449. #vpaddq $H2,$T2,$H2 # accumulate input
  1450. vpaddq $H0,$T0,$H0
  1451. vmovdqa `32*0`(%rsp),$T0 # r0^4
  1452. vpaddq $H1,$T1,$H1
  1453. vmovdqa `32*1`(%rsp),$T1 # r1^4
  1454. vpaddq $H3,$T3,$H3
  1455. vmovdqa `32*3`(%rsp),$T2 # r2^4
  1456. vpaddq $H4,$T4,$H4
  1457. vmovdqa `32*6-0x90`(%rax),$T3 # s3^4
  1458. vmovdqa `32*8-0x90`(%rax),$S4 # s4^4
  1459. # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
  1460. # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
  1461. # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
  1462. # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
  1463. # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
  1464. #
  1465. # however, as h2 is "chronologically" first one available pull
  1466. # corresponding operations up, so it's
  1467. #
  1468. # d4 = h2*r2 + h4*r0 + h3*r1 + h1*r3 + h0*r4
  1469. # d3 = h2*r1 + h3*r0 + h1*r2 + h0*r3 + h4*5*r4
  1470. # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
  1471. # d1 = h2*5*r4 + h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3
  1472. # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2 + h1*5*r4
  1473. vpmuludq $H2,$T0,$D2 # d2 = h2*r0
  1474. vpmuludq $H2,$T1,$D3 # d3 = h2*r1
  1475. vpmuludq $H2,$T2,$D4 # d4 = h2*r2
  1476. vpmuludq $H2,$T3,$D0 # d0 = h2*s3
  1477. vpmuludq $H2,$S4,$D1 # d1 = h2*s4
  1478. vpmuludq $H0,$T1,$T4 # h0*r1
  1479. vpmuludq $H1,$T1,$H2 # h1*r1, borrow $H2 as temp
  1480. vpaddq $T4,$D1,$D1 # d1 += h0*r1
  1481. vpaddq $H2,$D2,$D2 # d2 += h1*r1
  1482. vpmuludq $H3,$T1,$T4 # h3*r1
  1483. vpmuludq `32*2`(%rsp),$H4,$H2 # h4*s1
  1484. vpaddq $T4,$D4,$D4 # d4 += h3*r1
  1485. vpaddq $H2,$D0,$D0 # d0 += h4*s1
  1486. vmovdqa `32*4-0x90`(%rax),$T1 # s2
  1487. vpmuludq $H0,$T0,$T4 # h0*r0
  1488. vpmuludq $H1,$T0,$H2 # h1*r0
  1489. vpaddq $T4,$D0,$D0 # d0 += h0*r0
  1490. vpaddq $H2,$D1,$D1 # d1 += h1*r0
  1491. vpmuludq $H3,$T0,$T4 # h3*r0
  1492. vpmuludq $H4,$T0,$H2 # h4*r0
  1493. vmovdqu 16*0($inp),%x#$T0 # load input
  1494. vpaddq $T4,$D3,$D3 # d3 += h3*r0
  1495. vpaddq $H2,$D4,$D4 # d4 += h4*r0
  1496. vinserti128 \$1,16*2($inp),$T0,$T0
  1497. vpmuludq $H3,$T1,$T4 # h3*s2
  1498. vpmuludq $H4,$T1,$H2 # h4*s2
  1499. vmovdqu 16*1($inp),%x#$T1
  1500. vpaddq $T4,$D0,$D0 # d0 += h3*s2
  1501. vpaddq $H2,$D1,$D1 # d1 += h4*s2
  1502. vmovdqa `32*5-0x90`(%rax),$H2 # r3
  1503. vpmuludq $H1,$T2,$T4 # h1*r2
  1504. vpmuludq $H0,$T2,$T2 # h0*r2
  1505. vpaddq $T4,$D3,$D3 # d3 += h1*r2
  1506. vpaddq $T2,$D2,$D2 # d2 += h0*r2
  1507. vinserti128 \$1,16*3($inp),$T1,$T1
  1508. lea 16*4($inp),$inp
  1509. vpmuludq $H1,$H2,$T4 # h1*r3
  1510. vpmuludq $H0,$H2,$H2 # h0*r3
  1511. vpsrldq \$6,$T0,$T2 # splat input
  1512. vpaddq $T4,$D4,$D4 # d4 += h1*r3
  1513. vpaddq $H2,$D3,$D3 # d3 += h0*r3
  1514. vpmuludq $H3,$T3,$T4 # h3*s3
  1515. vpmuludq $H4,$T3,$H2 # h4*s3
  1516. vpsrldq \$6,$T1,$T3
  1517. vpaddq $T4,$D1,$D1 # d1 += h3*s3
  1518. vpaddq $H2,$D2,$D2 # d2 += h4*s3
  1519. vpunpckhqdq $T1,$T0,$T4 # 4
  1520. vpmuludq $H3,$S4,$H3 # h3*s4
  1521. vpmuludq $H4,$S4,$H4 # h4*s4
  1522. vpunpcklqdq $T1,$T0,$T0 # 0:1
  1523. vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4
  1524. vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4
  1525. vpunpcklqdq $T3,$T2,$T3 # 2:3
  1526. vpmuludq `32*7-0x90`(%rax),$H0,$H4 # h0*r4
  1527. vpmuludq $H1,$S4,$H0 # h1*s4
  1528. vmovdqa 64(%rcx),$MASK # .Lmask26
  1529. vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
  1530. vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
  1531. ################################################################
  1532. # lazy reduction (interleaved with tail of input splat)
  1533. vpsrlq \$26,$H3,$D3
  1534. vpand $MASK,$H3,$H3
  1535. vpaddq $D3,$H4,$H4 # h3 -> h4
  1536. vpsrlq \$26,$H0,$D0
  1537. vpand $MASK,$H0,$H0
  1538. vpaddq $D0,$D1,$H1 # h0 -> h1
  1539. vpsrlq \$26,$H4,$D4
  1540. vpand $MASK,$H4,$H4
  1541. vpsrlq \$4,$T3,$T2
  1542. vpsrlq \$26,$H1,$D1
  1543. vpand $MASK,$H1,$H1
  1544. vpaddq $D1,$H2,$H2 # h1 -> h2
  1545. vpaddq $D4,$H0,$H0
  1546. vpsllq \$2,$D4,$D4
  1547. vpaddq $D4,$H0,$H0 # h4 -> h0
  1548. vpand $MASK,$T2,$T2 # 2
  1549. vpsrlq \$26,$T0,$T1
  1550. vpsrlq \$26,$H2,$D2
  1551. vpand $MASK,$H2,$H2
  1552. vpaddq $D2,$H3,$H3 # h2 -> h3
  1553. vpaddq $T2,$H2,$H2 # modulo-scheduled
  1554. vpsrlq \$30,$T3,$T3
  1555. vpsrlq \$26,$H0,$D0
  1556. vpand $MASK,$H0,$H0
  1557. vpaddq $D0,$H1,$H1 # h0 -> h1
  1558. vpsrlq \$40,$T4,$T4 # 4
  1559. vpsrlq \$26,$H3,$D3
  1560. vpand $MASK,$H3,$H3
  1561. vpaddq $D3,$H4,$H4 # h3 -> h4
  1562. vpand $MASK,$T0,$T0 # 0
  1563. vpand $MASK,$T1,$T1 # 1
  1564. vpand $MASK,$T3,$T3 # 3
  1565. vpor 32(%rcx),$T4,$T4 # padbit, yes, always
  1566. sub \$64,$len
  1567. jnz .Loop_avx2
  1568. .byte 0x66,0x90
  1569. .Ltail_avx2:
  1570. ################################################################
  1571. # while above multiplications were by r^4 in all lanes, in last
  1572. # iteration we multiply least significant lane by r^4 and most
  1573. # significant one by r, so copy of above except that references
  1574. # to the precomputed table are displaced by 4...
  1575. #vpaddq $H2,$T2,$H2 # accumulate input
  1576. vpaddq $H0,$T0,$H0
  1577. vmovdqu `32*0+4`(%rsp),$T0 # r0^4
  1578. vpaddq $H1,$T1,$H1
  1579. vmovdqu `32*1+4`(%rsp),$T1 # r1^4
  1580. vpaddq $H3,$T3,$H3
  1581. vmovdqu `32*3+4`(%rsp),$T2 # r2^4
  1582. vpaddq $H4,$T4,$H4
  1583. vmovdqu `32*6+4-0x90`(%rax),$T3 # s3^4
  1584. vmovdqu `32*8+4-0x90`(%rax),$S4 # s4^4
  1585. vpmuludq $H2,$T0,$D2 # d2 = h2*r0
  1586. vpmuludq $H2,$T1,$D3 # d3 = h2*r1
  1587. vpmuludq $H2,$T2,$D4 # d4 = h2*r2
  1588. vpmuludq $H2,$T3,$D0 # d0 = h2*s3
  1589. vpmuludq $H2,$S4,$D1 # d1 = h2*s4
  1590. vpmuludq $H0,$T1,$T4 # h0*r1
  1591. vpmuludq $H1,$T1,$H2 # h1*r1
  1592. vpaddq $T4,$D1,$D1 # d1 += h0*r1
  1593. vpaddq $H2,$D2,$D2 # d2 += h1*r1
  1594. vpmuludq $H3,$T1,$T4 # h3*r1
  1595. vpmuludq `32*2+4`(%rsp),$H4,$H2 # h4*s1
  1596. vpaddq $T4,$D4,$D4 # d4 += h3*r1
  1597. vpaddq $H2,$D0,$D0 # d0 += h4*s1
  1598. vpmuludq $H0,$T0,$T4 # h0*r0
  1599. vpmuludq $H1,$T0,$H2 # h1*r0
  1600. vpaddq $T4,$D0,$D0 # d0 += h0*r0
  1601. vmovdqu `32*4+4-0x90`(%rax),$T1 # s2
  1602. vpaddq $H2,$D1,$D1 # d1 += h1*r0
  1603. vpmuludq $H3,$T0,$T4 # h3*r0
  1604. vpmuludq $H4,$T0,$H2 # h4*r0
  1605. vpaddq $T4,$D3,$D3 # d3 += h3*r0
  1606. vpaddq $H2,$D4,$D4 # d4 += h4*r0
  1607. vpmuludq $H3,$T1,$T4 # h3*s2
  1608. vpmuludq $H4,$T1,$H2 # h4*s2
  1609. vpaddq $T4,$D0,$D0 # d0 += h3*s2
  1610. vpaddq $H2,$D1,$D1 # d1 += h4*s2
  1611. vmovdqu `32*5+4-0x90`(%rax),$H2 # r3
  1612. vpmuludq $H1,$T2,$T4 # h1*r2
  1613. vpmuludq $H0,$T2,$T2 # h0*r2
  1614. vpaddq $T4,$D3,$D3 # d3 += h1*r2
  1615. vpaddq $T2,$D2,$D2 # d2 += h0*r2
  1616. vpmuludq $H1,$H2,$T4 # h1*r3
  1617. vpmuludq $H0,$H2,$H2 # h0*r3
  1618. vpaddq $T4,$D4,$D4 # d4 += h1*r3
  1619. vpaddq $H2,$D3,$D3 # d3 += h0*r3
  1620. vpmuludq $H3,$T3,$T4 # h3*s3
  1621. vpmuludq $H4,$T3,$H2 # h4*s3
  1622. vpaddq $T4,$D1,$D1 # d1 += h3*s3
  1623. vpaddq $H2,$D2,$D2 # d2 += h4*s3
  1624. vpmuludq $H3,$S4,$H3 # h3*s4
  1625. vpmuludq $H4,$S4,$H4 # h4*s4
  1626. vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4
  1627. vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4
  1628. vpmuludq `32*7+4-0x90`(%rax),$H0,$H4 # h0*r4
  1629. vpmuludq $H1,$S4,$H0 # h1*s4
  1630. vmovdqa 64(%rcx),$MASK # .Lmask26
  1631. vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
  1632. vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
  1633. ################################################################
  1634. # horizontal addition
  1635. vpsrldq \$8,$D1,$T1
  1636. vpsrldq \$8,$H2,$T2
  1637. vpsrldq \$8,$H3,$T3
  1638. vpsrldq \$8,$H4,$T4
  1639. vpsrldq \$8,$H0,$T0
  1640. vpaddq $T1,$D1,$D1
  1641. vpaddq $T2,$H2,$H2
  1642. vpaddq $T3,$H3,$H3
  1643. vpaddq $T4,$H4,$H4
  1644. vpaddq $T0,$H0,$H0
  1645. vpermq \$0x2,$H3,$T3
  1646. vpermq \$0x2,$H4,$T4
  1647. vpermq \$0x2,$H0,$T0
  1648. vpermq \$0x2,$D1,$T1
  1649. vpermq \$0x2,$H2,$T2
  1650. vpaddq $T3,$H3,$H3
  1651. vpaddq $T4,$H4,$H4
  1652. vpaddq $T0,$H0,$H0
  1653. vpaddq $T1,$D1,$D1
  1654. vpaddq $T2,$H2,$H2
  1655. ################################################################
  1656. # lazy reduction
  1657. vpsrlq \$26,$H3,$D3
  1658. vpand $MASK,$H3,$H3
  1659. vpaddq $D3,$H4,$H4 # h3 -> h4
  1660. vpsrlq \$26,$H0,$D0
  1661. vpand $MASK,$H0,$H0
  1662. vpaddq $D0,$D1,$H1 # h0 -> h1
  1663. vpsrlq \$26,$H4,$D4
  1664. vpand $MASK,$H4,$H4
  1665. vpsrlq \$26,$H1,$D1
  1666. vpand $MASK,$H1,$H1
  1667. vpaddq $D1,$H2,$H2 # h1 -> h2
  1668. vpaddq $D4,$H0,$H0
  1669. vpsllq \$2,$D4,$D4
  1670. vpaddq $D4,$H0,$H0 # h4 -> h0
  1671. vpsrlq \$26,$H2,$D2
  1672. vpand $MASK,$H2,$H2
  1673. vpaddq $D2,$H3,$H3 # h2 -> h3
  1674. vpsrlq \$26,$H0,$D0
  1675. vpand $MASK,$H0,$H0
  1676. vpaddq $D0,$H1,$H1 # h0 -> h1
  1677. vpsrlq \$26,$H3,$D3
  1678. vpand $MASK,$H3,$H3
  1679. vpaddq $D3,$H4,$H4 # h3 -> h4
  1680. vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced
  1681. vmovd %x#$H1,`4*1-48-64`($ctx)
  1682. vmovd %x#$H2,`4*2-48-64`($ctx)
  1683. vmovd %x#$H3,`4*3-48-64`($ctx)
  1684. vmovd %x#$H4,`4*4-48-64`($ctx)
  1685. ___
  1686. $code.=<<___ if ($win64);
  1687. vmovdqa 0x50(%r11),%xmm6
  1688. vmovdqa 0x60(%r11),%xmm7
  1689. vmovdqa 0x70(%r11),%xmm8
  1690. vmovdqa 0x80(%r11),%xmm9
  1691. vmovdqa 0x90(%r11),%xmm10
  1692. vmovdqa 0xa0(%r11),%xmm11
  1693. vmovdqa 0xb0(%r11),%xmm12
  1694. vmovdqa 0xc0(%r11),%xmm13
  1695. vmovdqa 0xd0(%r11),%xmm14
  1696. vmovdqa 0xe0(%r11),%xmm15
  1697. lea 0xf8(%r11),%rsp
  1698. .Ldo_avx2_epilogue:
  1699. ___
  1700. $code.=<<___ if (!$win64);
  1701. lea 8(%r11),%rsp
  1702. ___
  1703. $code.=<<___;
  1704. vzeroupper
  1705. ret
  1706. .size poly1305_blocks_avx2,.-poly1305_blocks_avx2
  1707. ___
  1708. }
  1709. $code.=<<___;
  1710. .align 64
  1711. .Lconst:
  1712. .Lmask24:
  1713. .long 0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0
  1714. .L129:
  1715. .long 1<<24,0,1<<24,0,1<<24,0,1<<24,0
  1716. .Lmask26:
  1717. .long 0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0
  1718. .Lfive:
  1719. .long 5,0,5,0,5,0,5,0
  1720. ___
  1721. }
  1722. $code.=<<___;
  1723. .asciz "Poly1305 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
  1724. .align 16
  1725. ___
  1726. # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
  1727. # CONTEXT *context,DISPATCHER_CONTEXT *disp)
  1728. if ($win64) {
  1729. $rec="%rcx";
  1730. $frame="%rdx";
  1731. $context="%r8";
  1732. $disp="%r9";
  1733. $code.=<<___;
  1734. .extern __imp_RtlVirtualUnwind
  1735. .type se_handler,\@abi-omnipotent
  1736. .align 16
  1737. se_handler:
  1738. push %rsi
  1739. push %rdi
  1740. push %rbx
  1741. push %rbp
  1742. push %r12
  1743. push %r13
  1744. push %r14
  1745. push %r15
  1746. pushfq
  1747. sub \$64,%rsp
  1748. mov 120($context),%rax # pull context->Rax
  1749. mov 248($context),%rbx # pull context->Rip
  1750. mov 8($disp),%rsi # disp->ImageBase
  1751. mov 56($disp),%r11 # disp->HandlerData
  1752. mov 0(%r11),%r10d # HandlerData[0]
  1753. lea (%rsi,%r10),%r10 # prologue label
  1754. cmp %r10,%rbx # context->Rip<.Lprologue
  1755. jb .Lcommon_seh_tail
  1756. mov 152($context),%rax # pull context->Rsp
  1757. mov 4(%r11),%r10d # HandlerData[1]
  1758. lea (%rsi,%r10),%r10 # epilogue label
  1759. cmp %r10,%rbx # context->Rip>=.Lepilogue
  1760. jae .Lcommon_seh_tail
  1761. lea 48(%rax),%rax
  1762. mov -8(%rax),%rbx
  1763. mov -16(%rax),%rbp
  1764. mov -24(%rax),%r12
  1765. mov -32(%rax),%r13
  1766. mov -40(%rax),%r14
  1767. mov -48(%rax),%r15
  1768. mov %rbx,144($context) # restore context->Rbx
  1769. mov %rbp,160($context) # restore context->Rbp
  1770. mov %r12,216($context) # restore context->R12
  1771. mov %r13,224($context) # restore context->R13
  1772. mov %r14,232($context) # restore context->R14
  1773. mov %r15,240($context) # restore context->R14
  1774. jmp .Lcommon_seh_tail
  1775. .size se_handler,.-se_handler
  1776. .type avx_handler,\@abi-omnipotent
  1777. .align 16
  1778. avx_handler:
  1779. push %rsi
  1780. push %rdi
  1781. push %rbx
  1782. push %rbp
  1783. push %r12
  1784. push %r13
  1785. push %r14
  1786. push %r15
  1787. pushfq
  1788. sub \$64,%rsp
  1789. mov 120($context),%rax # pull context->Rax
  1790. mov 248($context),%rbx # pull context->Rip
  1791. mov 8($disp),%rsi # disp->ImageBase
  1792. mov 56($disp),%r11 # disp->HandlerData
  1793. mov 0(%r11),%r10d # HandlerData[0]
  1794. lea (%rsi,%r10),%r10 # prologue label
  1795. cmp %r10,%rbx # context->Rip<prologue label
  1796. jb .Lcommon_seh_tail
  1797. mov 152($context),%rax # pull context->Rsp
  1798. mov 4(%r11),%r10d # HandlerData[1]
  1799. lea (%rsi,%r10),%r10 # epilogue label
  1800. cmp %r10,%rbx # context->Rip>=epilogue label
  1801. jae .Lcommon_seh_tail
  1802. mov 208($context),%rax # pull context->R11
  1803. lea 0x50(%rax),%rsi
  1804. lea 0xf8(%rax),%rax
  1805. lea 512($context),%rdi # &context.Xmm6
  1806. mov \$20,%ecx
  1807. .long 0xa548f3fc # cld; rep movsq
  1808. .Lcommon_seh_tail:
  1809. mov 8(%rax),%rdi
  1810. mov 16(%rax),%rsi
  1811. mov %rax,152($context) # restore context->Rsp
  1812. mov %rsi,168($context) # restore context->Rsi
  1813. mov %rdi,176($context) # restore context->Rdi
  1814. mov 40($disp),%rdi # disp->ContextRecord
  1815. mov $context,%rsi # context
  1816. mov \$154,%ecx # sizeof(CONTEXT)
  1817. .long 0xa548f3fc # cld; rep movsq
  1818. mov $disp,%rsi
  1819. xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
  1820. mov 8(%rsi),%rdx # arg2, disp->ImageBase
  1821. mov 0(%rsi),%r8 # arg3, disp->ControlPc
  1822. mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
  1823. mov 40(%rsi),%r10 # disp->ContextRecord
  1824. lea 56(%rsi),%r11 # &disp->HandlerData
  1825. lea 24(%rsi),%r12 # &disp->EstablisherFrame
  1826. mov %r10,32(%rsp) # arg5
  1827. mov %r11,40(%rsp) # arg6
  1828. mov %r12,48(%rsp) # arg7
  1829. mov %rcx,56(%rsp) # arg8, (NULL)
  1830. call *__imp_RtlVirtualUnwind(%rip)
  1831. mov \$1,%eax # ExceptionContinueSearch
  1832. add \$64,%rsp
  1833. popfq
  1834. pop %r15
  1835. pop %r14
  1836. pop %r13
  1837. pop %r12
  1838. pop %rbp
  1839. pop %rbx
  1840. pop %rdi
  1841. pop %rsi
  1842. ret
  1843. .size avx_handler,.-avx_handler
  1844. .section .pdata
  1845. .align 4
  1846. .rva .LSEH_begin_poly1305_init
  1847. .rva .LSEH_end_poly1305_init
  1848. .rva .LSEH_info_poly1305_init
  1849. .rva .LSEH_begin_poly1305_blocks
  1850. .rva .LSEH_end_poly1305_blocks
  1851. .rva .LSEH_info_poly1305_blocks
  1852. .rva .LSEH_begin_poly1305_emit
  1853. .rva .LSEH_end_poly1305_emit
  1854. .rva .LSEH_info_poly1305_emit
  1855. ___
  1856. $code.=<<___ if ($avx);
  1857. .rva .LSEH_begin_poly1305_blocks_avx
  1858. .rva .Lbase2_64_avx
  1859. .rva .LSEH_info_poly1305_blocks_avx_1
  1860. .rva .Lbase2_64_avx
  1861. .rva .Leven_avx
  1862. .rva .LSEH_info_poly1305_blocks_avx_2
  1863. .rva .Leven_avx
  1864. .rva .LSEH_end_poly1305_blocks_avx
  1865. .rva .LSEH_info_poly1305_blocks_avx_3
  1866. .rva .LSEH_begin_poly1305_emit_avx
  1867. .rva .LSEH_end_poly1305_emit_avx
  1868. .rva .LSEH_info_poly1305_emit_avx
  1869. ___
  1870. $code.=<<___ if ($avx>1);
  1871. .rva .LSEH_begin_poly1305_blocks_avx2
  1872. .rva .Lbase2_64_avx2
  1873. .rva .LSEH_info_poly1305_blocks_avx2_1
  1874. .rva .Lbase2_64_avx2
  1875. .rva .Leven_avx2
  1876. .rva .LSEH_info_poly1305_blocks_avx2_2
  1877. .rva .Leven_avx2
  1878. .rva .LSEH_end_poly1305_blocks_avx2
  1879. .rva .LSEH_info_poly1305_blocks_avx2_3
  1880. ___
  1881. $code.=<<___;
  1882. .section .xdata
  1883. .align 8
  1884. .LSEH_info_poly1305_init:
  1885. .byte 9,0,0,0
  1886. .rva se_handler
  1887. .rva .LSEH_begin_poly1305_init,.LSEH_begin_poly1305_init
  1888. .LSEH_info_poly1305_blocks:
  1889. .byte 9,0,0,0
  1890. .rva se_handler
  1891. .rva .Lblocks_body,.Lblocks_epilogue
  1892. .LSEH_info_poly1305_emit:
  1893. .byte 9,0,0,0
  1894. .rva se_handler
  1895. .rva .LSEH_begin_poly1305_emit,.LSEH_begin_poly1305_emit
  1896. ___
  1897. $code.=<<___ if ($avx);
  1898. .LSEH_info_poly1305_blocks_avx_1:
  1899. .byte 9,0,0,0
  1900. .rva se_handler
  1901. .rva .Lblocks_avx_body,.Lblocks_avx_epilogue # HandlerData[]
  1902. .LSEH_info_poly1305_blocks_avx_2:
  1903. .byte 9,0,0,0
  1904. .rva se_handler
  1905. .rva .Lbase2_64_avx_body,.Lbase2_64_avx_epilogue # HandlerData[]
  1906. .LSEH_info_poly1305_blocks_avx_3:
  1907. .byte 9,0,0,0
  1908. .rva avx_handler
  1909. .rva .Ldo_avx_body,.Ldo_avx_epilogue # HandlerData[]
  1910. .LSEH_info_poly1305_emit_avx:
  1911. .byte 9,0,0,0
  1912. .rva se_handler
  1913. .rva .LSEH_begin_poly1305_emit_avx,.LSEH_begin_poly1305_emit_avx
  1914. ___
  1915. $code.=<<___ if ($avx>1);
  1916. .LSEH_info_poly1305_blocks_avx2_1:
  1917. .byte 9,0,0,0
  1918. .rva se_handler
  1919. .rva .Lblocks_avx2_body,.Lblocks_avx2_epilogue # HandlerData[]
  1920. .LSEH_info_poly1305_blocks_avx2_2:
  1921. .byte 9,0,0,0
  1922. .rva se_handler
  1923. .rva .Lbase2_64_avx2_body,.Lbase2_64_avx2_epilogue # HandlerData[]
  1924. .LSEH_info_poly1305_blocks_avx2_3:
  1925. .byte 9,0,0,0
  1926. .rva avx_handler
  1927. .rva .Ldo_avx2_body,.Ldo_avx2_epilogue # HandlerData[]
  1928. ___
  1929. }
  1930. foreach (split('\n',$code)) {
  1931. s/\`([^\`]*)\`/eval($1)/ge;
  1932. s/%r([a-z]+)#d/%e$1/g;
  1933. s/%r([0-9]+)#d/%r$1d/g;
  1934. s/%x#%y/%x/g;
  1935. print $_,"\n";
  1936. }
  1937. close STDOUT;