You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

1046 lines
28 KiB

  1. #!/usr/bin/env perl
  2. #
  3. # ====================================================================
  4. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  5. # project. The module is, however, dual licensed under OpenSSL and
  6. # CRYPTOGAMS licenses depending on where you obtain it. For further
  7. # details see http://www.openssl.org/~appro/cryptogams/.
  8. # ====================================================================
  9. #
  10. #
  11. # AES-NI-CTR+GHASH stitch.
  12. #
  13. # February 2013
  14. #
  15. # OpenSSL GCM implementation is organized in such way that its
  16. # performance is rather close to the sum of its streamed components,
  17. # in the context parallelized AES-NI CTR and modulo-scheduled
  18. # PCLMULQDQ-enabled GHASH. Unfortunately, as no stitch implementation
  19. # was observed to perform significantly better than the sum of the
  20. # components on contemporary CPUs, the effort was deemed impossible to
  21. # justify. This module is based on combination of Intel submissions,
  22. # [1] and [2], with MOVBE twist suggested by Ilya Albrekht and Max
  23. # Locktyukhin of Intel Corp. who verified that it reduces shuffles
  24. # pressure with notable relative improvement, achieving 1.0 cycle per
  25. # byte processed with 128-bit key on Haswell processor, and 0.74 -
  26. # on Broadwell. [Mentioned results are raw profiled measurements for
  27. # favourable packet size, one divisible by 96. Applications using the
  28. # EVP interface will observe a few percent worse performance.]
  29. #
  30. # [1] http://rt.openssl.org/Ticket/Display.html?id=2900&user=guest&pass=guest
  31. # [2] http://www.intel.com/content/dam/www/public/us/en/documents/software-support/enabling-high-performance-gcm.pdf
  32. $flavour = shift;
  33. $output = shift;
  34. if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
  35. $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
  36. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  37. ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
  38. ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
  39. die "can't locate x86_64-xlate.pl";
  40. # In upstream, this is controlled by shelling out to the compiler to check
  41. # versions, but BoringSSL is intended to be used with pre-generated perlasm
  42. # output, so this isn't useful anyway.
  43. #
  44. # TODO(davidben): Enable this after testing. $avx goes up to 2.
  45. $avx = 0;
  46. open OUT,"| \"$^X\" $xlate $flavour $output";
  47. *STDOUT=*OUT;
  48. if ($avx>1) {{{
  49. ($inp,$out,$len,$key,$ivp,$Xip)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9");
  50. ($Ii,$T1,$T2,$Hkey,
  51. $Z0,$Z1,$Z2,$Z3,$Xi) = map("%xmm$_",(0..8));
  52. ($inout0,$inout1,$inout2,$inout3,$inout4,$inout5,$rndkey) = map("%xmm$_",(9..15));
  53. ($counter,$rounds,$ret,$const,$in0,$end0)=("%ebx","%ebp","%r10","%r11","%r14","%r15");
  54. $code=<<___;
  55. .text
  56. .type _aesni_ctr32_ghash_6x,\@abi-omnipotent
  57. .align 32
  58. _aesni_ctr32_ghash_6x:
  59. vmovdqu 0x20($const),$T2 # borrow $T2, .Lone_msb
  60. sub \$6,$len
  61. vpxor $Z0,$Z0,$Z0 # $Z0 = 0
  62. vmovdqu 0x00-0x80($key),$rndkey
  63. vpaddb $T2,$T1,$inout1
  64. vpaddb $T2,$inout1,$inout2
  65. vpaddb $T2,$inout2,$inout3
  66. vpaddb $T2,$inout3,$inout4
  67. vpaddb $T2,$inout4,$inout5
  68. vpxor $rndkey,$T1,$inout0
  69. vmovdqu $Z0,16+8(%rsp) # "$Z3" = 0
  70. jmp .Loop6x
  71. .align 32
  72. .Loop6x:
  73. add \$`6<<24`,$counter
  74. jc .Lhandle_ctr32 # discard $inout[1-5]?
  75. vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
  76. vpaddb $T2,$inout5,$T1 # next counter value
  77. vpxor $rndkey,$inout1,$inout1
  78. vpxor $rndkey,$inout2,$inout2
  79. .Lresume_ctr32:
  80. vmovdqu $T1,($ivp) # save next counter value
  81. vpclmulqdq \$0x10,$Hkey,$Z3,$Z1
  82. vpxor $rndkey,$inout3,$inout3
  83. vmovups 0x10-0x80($key),$T2 # borrow $T2 for $rndkey
  84. vpclmulqdq \$0x01,$Hkey,$Z3,$Z2
  85. xor %r12,%r12
  86. cmp $in0,$end0
  87. vaesenc $T2,$inout0,$inout0
  88. vmovdqu 0x30+8(%rsp),$Ii # I[4]
  89. vpxor $rndkey,$inout4,$inout4
  90. vpclmulqdq \$0x00,$Hkey,$Z3,$T1
  91. vaesenc $T2,$inout1,$inout1
  92. vpxor $rndkey,$inout5,$inout5
  93. setnc %r12b
  94. vpclmulqdq \$0x11,$Hkey,$Z3,$Z3
  95. vaesenc $T2,$inout2,$inout2
  96. vmovdqu 0x10-0x20($Xip),$Hkey # $Hkey^2
  97. neg %r12
  98. vaesenc $T2,$inout3,$inout3
  99. vpxor $Z1,$Z2,$Z2
  100. vpclmulqdq \$0x00,$Hkey,$Ii,$Z1
  101. vpxor $Z0,$Xi,$Xi # modulo-scheduled
  102. vaesenc $T2,$inout4,$inout4
  103. vpxor $Z1,$T1,$Z0
  104. and \$0x60,%r12
  105. vmovups 0x20-0x80($key),$rndkey
  106. vpclmulqdq \$0x10,$Hkey,$Ii,$T1
  107. vaesenc $T2,$inout5,$inout5
  108. vpclmulqdq \$0x01,$Hkey,$Ii,$T2
  109. lea ($in0,%r12),$in0
  110. vaesenc $rndkey,$inout0,$inout0
  111. vpxor 16+8(%rsp),$Xi,$Xi # modulo-scheduled [vpxor $Z3,$Xi,$Xi]
  112. vpclmulqdq \$0x11,$Hkey,$Ii,$Hkey
  113. vmovdqu 0x40+8(%rsp),$Ii # I[3]
  114. vaesenc $rndkey,$inout1,$inout1
  115. movbe 0x58($in0),%r13
  116. vaesenc $rndkey,$inout2,$inout2
  117. movbe 0x50($in0),%r12
  118. vaesenc $rndkey,$inout3,$inout3
  119. mov %r13,0x20+8(%rsp)
  120. vaesenc $rndkey,$inout4,$inout4
  121. mov %r12,0x28+8(%rsp)
  122. vmovdqu 0x30-0x20($Xip),$Z1 # borrow $Z1 for $Hkey^3
  123. vaesenc $rndkey,$inout5,$inout5
  124. vmovups 0x30-0x80($key),$rndkey
  125. vpxor $T1,$Z2,$Z2
  126. vpclmulqdq \$0x00,$Z1,$Ii,$T1
  127. vaesenc $rndkey,$inout0,$inout0
  128. vpxor $T2,$Z2,$Z2
  129. vpclmulqdq \$0x10,$Z1,$Ii,$T2
  130. vaesenc $rndkey,$inout1,$inout1
  131. vpxor $Hkey,$Z3,$Z3
  132. vpclmulqdq \$0x01,$Z1,$Ii,$Hkey
  133. vaesenc $rndkey,$inout2,$inout2
  134. vpclmulqdq \$0x11,$Z1,$Ii,$Z1
  135. vmovdqu 0x50+8(%rsp),$Ii # I[2]
  136. vaesenc $rndkey,$inout3,$inout3
  137. vaesenc $rndkey,$inout4,$inout4
  138. vpxor $T1,$Z0,$Z0
  139. vmovdqu 0x40-0x20($Xip),$T1 # borrow $T1 for $Hkey^4
  140. vaesenc $rndkey,$inout5,$inout5
  141. vmovups 0x40-0x80($key),$rndkey
  142. vpxor $T2,$Z2,$Z2
  143. vpclmulqdq \$0x00,$T1,$Ii,$T2
  144. vaesenc $rndkey,$inout0,$inout0
  145. vpxor $Hkey,$Z2,$Z2
  146. vpclmulqdq \$0x10,$T1,$Ii,$Hkey
  147. vaesenc $rndkey,$inout1,$inout1
  148. movbe 0x48($in0),%r13
  149. vpxor $Z1,$Z3,$Z3
  150. vpclmulqdq \$0x01,$T1,$Ii,$Z1
  151. vaesenc $rndkey,$inout2,$inout2
  152. movbe 0x40($in0),%r12
  153. vpclmulqdq \$0x11,$T1,$Ii,$T1
  154. vmovdqu 0x60+8(%rsp),$Ii # I[1]
  155. vaesenc $rndkey,$inout3,$inout3
  156. mov %r13,0x30+8(%rsp)
  157. vaesenc $rndkey,$inout4,$inout4
  158. mov %r12,0x38+8(%rsp)
  159. vpxor $T2,$Z0,$Z0
  160. vmovdqu 0x60-0x20($Xip),$T2 # borrow $T2 for $Hkey^5
  161. vaesenc $rndkey,$inout5,$inout5
  162. vmovups 0x50-0x80($key),$rndkey
  163. vpxor $Hkey,$Z2,$Z2
  164. vpclmulqdq \$0x00,$T2,$Ii,$Hkey
  165. vaesenc $rndkey,$inout0,$inout0
  166. vpxor $Z1,$Z2,$Z2
  167. vpclmulqdq \$0x10,$T2,$Ii,$Z1
  168. vaesenc $rndkey,$inout1,$inout1
  169. movbe 0x38($in0),%r13
  170. vpxor $T1,$Z3,$Z3
  171. vpclmulqdq \$0x01,$T2,$Ii,$T1
  172. vpxor 0x70+8(%rsp),$Xi,$Xi # accumulate I[0]
  173. vaesenc $rndkey,$inout2,$inout2
  174. movbe 0x30($in0),%r12
  175. vpclmulqdq \$0x11,$T2,$Ii,$T2
  176. vaesenc $rndkey,$inout3,$inout3
  177. mov %r13,0x40+8(%rsp)
  178. vaesenc $rndkey,$inout4,$inout4
  179. mov %r12,0x48+8(%rsp)
  180. vpxor $Hkey,$Z0,$Z0
  181. vmovdqu 0x70-0x20($Xip),$Hkey # $Hkey^6
  182. vaesenc $rndkey,$inout5,$inout5
  183. vmovups 0x60-0x80($key),$rndkey
  184. vpxor $Z1,$Z2,$Z2
  185. vpclmulqdq \$0x10,$Hkey,$Xi,$Z1
  186. vaesenc $rndkey,$inout0,$inout0
  187. vpxor $T1,$Z2,$Z2
  188. vpclmulqdq \$0x01,$Hkey,$Xi,$T1
  189. vaesenc $rndkey,$inout1,$inout1
  190. movbe 0x28($in0),%r13
  191. vpxor $T2,$Z3,$Z3
  192. vpclmulqdq \$0x00,$Hkey,$Xi,$T2
  193. vaesenc $rndkey,$inout2,$inout2
  194. movbe 0x20($in0),%r12
  195. vpclmulqdq \$0x11,$Hkey,$Xi,$Xi
  196. vaesenc $rndkey,$inout3,$inout3
  197. mov %r13,0x50+8(%rsp)
  198. vaesenc $rndkey,$inout4,$inout4
  199. mov %r12,0x58+8(%rsp)
  200. vpxor $Z1,$Z2,$Z2
  201. vaesenc $rndkey,$inout5,$inout5
  202. vpxor $T1,$Z2,$Z2
  203. vmovups 0x70-0x80($key),$rndkey
  204. vpslldq \$8,$Z2,$Z1
  205. vpxor $T2,$Z0,$Z0
  206. vmovdqu 0x10($const),$Hkey # .Lpoly
  207. vaesenc $rndkey,$inout0,$inout0
  208. vpxor $Xi,$Z3,$Z3
  209. vaesenc $rndkey,$inout1,$inout1
  210. vpxor $Z1,$Z0,$Z0
  211. movbe 0x18($in0),%r13
  212. vaesenc $rndkey,$inout2,$inout2
  213. movbe 0x10($in0),%r12
  214. vpalignr \$8,$Z0,$Z0,$Ii # 1st phase
  215. vpclmulqdq \$0x10,$Hkey,$Z0,$Z0
  216. mov %r13,0x60+8(%rsp)
  217. vaesenc $rndkey,$inout3,$inout3
  218. mov %r12,0x68+8(%rsp)
  219. vaesenc $rndkey,$inout4,$inout4
  220. vmovups 0x80-0x80($key),$T1 # borrow $T1 for $rndkey
  221. vaesenc $rndkey,$inout5,$inout5
  222. vaesenc $T1,$inout0,$inout0
  223. vmovups 0x90-0x80($key),$rndkey
  224. vaesenc $T1,$inout1,$inout1
  225. vpsrldq \$8,$Z2,$Z2
  226. vaesenc $T1,$inout2,$inout2
  227. vpxor $Z2,$Z3,$Z3
  228. vaesenc $T1,$inout3,$inout3
  229. vpxor $Ii,$Z0,$Z0
  230. movbe 0x08($in0),%r13
  231. vaesenc $T1,$inout4,$inout4
  232. movbe 0x00($in0),%r12
  233. vaesenc $T1,$inout5,$inout5
  234. vmovups 0xa0-0x80($key),$T1
  235. cmp \$11,$rounds
  236. jb .Lenc_tail # 128-bit key
  237. vaesenc $rndkey,$inout0,$inout0
  238. vaesenc $rndkey,$inout1,$inout1
  239. vaesenc $rndkey,$inout2,$inout2
  240. vaesenc $rndkey,$inout3,$inout3
  241. vaesenc $rndkey,$inout4,$inout4
  242. vaesenc $rndkey,$inout5,$inout5
  243. vaesenc $T1,$inout0,$inout0
  244. vaesenc $T1,$inout1,$inout1
  245. vaesenc $T1,$inout2,$inout2
  246. vaesenc $T1,$inout3,$inout3
  247. vaesenc $T1,$inout4,$inout4
  248. vmovups 0xb0-0x80($key),$rndkey
  249. vaesenc $T1,$inout5,$inout5
  250. vmovups 0xc0-0x80($key),$T1
  251. je .Lenc_tail # 192-bit key
  252. vaesenc $rndkey,$inout0,$inout0
  253. vaesenc $rndkey,$inout1,$inout1
  254. vaesenc $rndkey,$inout2,$inout2
  255. vaesenc $rndkey,$inout3,$inout3
  256. vaesenc $rndkey,$inout4,$inout4
  257. vaesenc $rndkey,$inout5,$inout5
  258. vaesenc $T1,$inout0,$inout0
  259. vaesenc $T1,$inout1,$inout1
  260. vaesenc $T1,$inout2,$inout2
  261. vaesenc $T1,$inout3,$inout3
  262. vaesenc $T1,$inout4,$inout4
  263. vmovups 0xd0-0x80($key),$rndkey
  264. vaesenc $T1,$inout5,$inout5
  265. vmovups 0xe0-0x80($key),$T1
  266. jmp .Lenc_tail # 256-bit key
  267. .align 32
  268. .Lhandle_ctr32:
  269. vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
  270. vpshufb $Ii,$T1,$Z2 # byte-swap counter
  271. vmovdqu 0x30($const),$Z1 # borrow $Z1, .Ltwo_lsb
  272. vpaddd 0x40($const),$Z2,$inout1 # .Lone_lsb
  273. vpaddd $Z1,$Z2,$inout2
  274. vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
  275. vpaddd $Z1,$inout1,$inout3
  276. vpshufb $Ii,$inout1,$inout1
  277. vpaddd $Z1,$inout2,$inout4
  278. vpshufb $Ii,$inout2,$inout2
  279. vpxor $rndkey,$inout1,$inout1
  280. vpaddd $Z1,$inout3,$inout5
  281. vpshufb $Ii,$inout3,$inout3
  282. vpxor $rndkey,$inout2,$inout2
  283. vpaddd $Z1,$inout4,$T1 # byte-swapped next counter value
  284. vpshufb $Ii,$inout4,$inout4
  285. vpshufb $Ii,$inout5,$inout5
  286. vpshufb $Ii,$T1,$T1 # next counter value
  287. jmp .Lresume_ctr32
  288. .align 32
  289. .Lenc_tail:
  290. vaesenc $rndkey,$inout0,$inout0
  291. vmovdqu $Z3,16+8(%rsp) # postpone vpxor $Z3,$Xi,$Xi
  292. vpalignr \$8,$Z0,$Z0,$Xi # 2nd phase
  293. vaesenc $rndkey,$inout1,$inout1
  294. vpclmulqdq \$0x10,$Hkey,$Z0,$Z0
  295. vpxor 0x00($inp),$T1,$T2
  296. vaesenc $rndkey,$inout2,$inout2
  297. vpxor 0x10($inp),$T1,$Ii
  298. vaesenc $rndkey,$inout3,$inout3
  299. vpxor 0x20($inp),$T1,$Z1
  300. vaesenc $rndkey,$inout4,$inout4
  301. vpxor 0x30($inp),$T1,$Z2
  302. vaesenc $rndkey,$inout5,$inout5
  303. vpxor 0x40($inp),$T1,$Z3
  304. vpxor 0x50($inp),$T1,$Hkey
  305. vmovdqu ($ivp),$T1 # load next counter value
  306. vaesenclast $T2,$inout0,$inout0
  307. vmovdqu 0x20($const),$T2 # borrow $T2, .Lone_msb
  308. vaesenclast $Ii,$inout1,$inout1
  309. vpaddb $T2,$T1,$Ii
  310. mov %r13,0x70+8(%rsp)
  311. lea 0x60($inp),$inp
  312. vaesenclast $Z1,$inout2,$inout2
  313. vpaddb $T2,$Ii,$Z1
  314. mov %r12,0x78+8(%rsp)
  315. lea 0x60($out),$out
  316. vmovdqu 0x00-0x80($key),$rndkey
  317. vaesenclast $Z2,$inout3,$inout3
  318. vpaddb $T2,$Z1,$Z2
  319. vaesenclast $Z3, $inout4,$inout4
  320. vpaddb $T2,$Z2,$Z3
  321. vaesenclast $Hkey,$inout5,$inout5
  322. vpaddb $T2,$Z3,$Hkey
  323. add \$0x60,$ret
  324. sub \$0x6,$len
  325. jc .L6x_done
  326. vmovups $inout0,-0x60($out) # save output
  327. vpxor $rndkey,$T1,$inout0
  328. vmovups $inout1,-0x50($out)
  329. vmovdqa $Ii,$inout1 # 0 latency
  330. vmovups $inout2,-0x40($out)
  331. vmovdqa $Z1,$inout2 # 0 latency
  332. vmovups $inout3,-0x30($out)
  333. vmovdqa $Z2,$inout3 # 0 latency
  334. vmovups $inout4,-0x20($out)
  335. vmovdqa $Z3,$inout4 # 0 latency
  336. vmovups $inout5,-0x10($out)
  337. vmovdqa $Hkey,$inout5 # 0 latency
  338. vmovdqu 0x20+8(%rsp),$Z3 # I[5]
  339. jmp .Loop6x
  340. .L6x_done:
  341. vpxor 16+8(%rsp),$Xi,$Xi # modulo-scheduled
  342. vpxor $Z0,$Xi,$Xi # modulo-scheduled
  343. ret
  344. .size _aesni_ctr32_ghash_6x,.-_aesni_ctr32_ghash_6x
  345. ___
  346. ######################################################################
  347. #
  348. # size_t aesni_gcm_[en|de]crypt(const void *inp, void *out, size_t len,
  349. # const AES_KEY *key, unsigned char iv[16],
  350. # struct { u128 Xi,H,Htbl[9]; } *Xip);
  351. $code.=<<___;
  352. .globl aesni_gcm_decrypt
  353. .type aesni_gcm_decrypt,\@function,6
  354. .align 32
  355. aesni_gcm_decrypt:
  356. xor $ret,$ret
  357. cmp \$0x60,$len # minimal accepted length
  358. jb .Lgcm_dec_abort
  359. lea (%rsp),%rax # save stack pointer
  360. push %rbx
  361. push %rbp
  362. push %r12
  363. push %r13
  364. push %r14
  365. push %r15
  366. ___
  367. $code.=<<___ if ($win64);
  368. lea -0xa8(%rsp),%rsp
  369. movaps %xmm6,-0xd8(%rax)
  370. movaps %xmm7,-0xc8(%rax)
  371. movaps %xmm8,-0xb8(%rax)
  372. movaps %xmm9,-0xa8(%rax)
  373. movaps %xmm10,-0x98(%rax)
  374. movaps %xmm11,-0x88(%rax)
  375. movaps %xmm12,-0x78(%rax)
  376. movaps %xmm13,-0x68(%rax)
  377. movaps %xmm14,-0x58(%rax)
  378. movaps %xmm15,-0x48(%rax)
  379. .Lgcm_dec_body:
  380. ___
  381. $code.=<<___;
  382. vzeroupper
  383. vmovdqu ($ivp),$T1 # input counter value
  384. add \$-128,%rsp
  385. mov 12($ivp),$counter
  386. lea .Lbswap_mask(%rip),$const
  387. lea -0x80($key),$in0 # borrow $in0
  388. mov \$0xf80,$end0 # borrow $end0
  389. vmovdqu ($Xip),$Xi # load Xi
  390. and \$-128,%rsp # ensure stack alignment
  391. vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
  392. lea 0x80($key),$key # size optimization
  393. lea 0x20+0x20($Xip),$Xip # size optimization
  394. mov 0xf0-0x80($key),$rounds
  395. vpshufb $Ii,$Xi,$Xi
  396. and $end0,$in0
  397. and %rsp,$end0
  398. sub $in0,$end0
  399. jc .Ldec_no_key_aliasing
  400. cmp \$768,$end0
  401. jnc .Ldec_no_key_aliasing
  402. sub $end0,%rsp # avoid aliasing with key
  403. .Ldec_no_key_aliasing:
  404. vmovdqu 0x50($inp),$Z3 # I[5]
  405. lea ($inp),$in0
  406. vmovdqu 0x40($inp),$Z0
  407. lea -0xc0($inp,$len),$end0
  408. vmovdqu 0x30($inp),$Z1
  409. shr \$4,$len
  410. xor $ret,$ret
  411. vmovdqu 0x20($inp),$Z2
  412. vpshufb $Ii,$Z3,$Z3 # passed to _aesni_ctr32_ghash_6x
  413. vmovdqu 0x10($inp),$T2
  414. vpshufb $Ii,$Z0,$Z0
  415. vmovdqu ($inp),$Hkey
  416. vpshufb $Ii,$Z1,$Z1
  417. vmovdqu $Z0,0x30(%rsp)
  418. vpshufb $Ii,$Z2,$Z2
  419. vmovdqu $Z1,0x40(%rsp)
  420. vpshufb $Ii,$T2,$T2
  421. vmovdqu $Z2,0x50(%rsp)
  422. vpshufb $Ii,$Hkey,$Hkey
  423. vmovdqu $T2,0x60(%rsp)
  424. vmovdqu $Hkey,0x70(%rsp)
  425. call _aesni_ctr32_ghash_6x
  426. vmovups $inout0,-0x60($out) # save output
  427. vmovups $inout1,-0x50($out)
  428. vmovups $inout2,-0x40($out)
  429. vmovups $inout3,-0x30($out)
  430. vmovups $inout4,-0x20($out)
  431. vmovups $inout5,-0x10($out)
  432. vpshufb ($const),$Xi,$Xi # .Lbswap_mask
  433. vmovdqu $Xi,-0x40($Xip) # output Xi
  434. vzeroupper
  435. ___
  436. $code.=<<___ if ($win64);
  437. movaps -0xd8(%rax),%xmm6
  438. movaps -0xd8(%rax),%xmm7
  439. movaps -0xb8(%rax),%xmm8
  440. movaps -0xa8(%rax),%xmm9
  441. movaps -0x98(%rax),%xmm10
  442. movaps -0x88(%rax),%xmm11
  443. movaps -0x78(%rax),%xmm12
  444. movaps -0x68(%rax),%xmm13
  445. movaps -0x58(%rax),%xmm14
  446. movaps -0x48(%rax),%xmm15
  447. ___
  448. $code.=<<___;
  449. mov -48(%rax),%r15
  450. mov -40(%rax),%r14
  451. mov -32(%rax),%r13
  452. mov -24(%rax),%r12
  453. mov -16(%rax),%rbp
  454. mov -8(%rax),%rbx
  455. lea (%rax),%rsp # restore %rsp
  456. .Lgcm_dec_abort:
  457. mov $ret,%rax # return value
  458. ret
  459. .size aesni_gcm_decrypt,.-aesni_gcm_decrypt
  460. ___
  461. $code.=<<___;
  462. .type _aesni_ctr32_6x,\@abi-omnipotent
  463. .align 32
  464. _aesni_ctr32_6x:
  465. vmovdqu 0x00-0x80($key),$Z0 # borrow $Z0 for $rndkey
  466. vmovdqu 0x20($const),$T2 # borrow $T2, .Lone_msb
  467. lea -1($rounds),%r13
  468. vmovups 0x10-0x80($key),$rndkey
  469. lea 0x20-0x80($key),%r12
  470. vpxor $Z0,$T1,$inout0
  471. add \$`6<<24`,$counter
  472. jc .Lhandle_ctr32_2
  473. vpaddb $T2,$T1,$inout1
  474. vpaddb $T2,$inout1,$inout2
  475. vpxor $Z0,$inout1,$inout1
  476. vpaddb $T2,$inout2,$inout3
  477. vpxor $Z0,$inout2,$inout2
  478. vpaddb $T2,$inout3,$inout4
  479. vpxor $Z0,$inout3,$inout3
  480. vpaddb $T2,$inout4,$inout5
  481. vpxor $Z0,$inout4,$inout4
  482. vpaddb $T2,$inout5,$T1
  483. vpxor $Z0,$inout5,$inout5
  484. jmp .Loop_ctr32
  485. .align 16
  486. .Loop_ctr32:
  487. vaesenc $rndkey,$inout0,$inout0
  488. vaesenc $rndkey,$inout1,$inout1
  489. vaesenc $rndkey,$inout2,$inout2
  490. vaesenc $rndkey,$inout3,$inout3
  491. vaesenc $rndkey,$inout4,$inout4
  492. vaesenc $rndkey,$inout5,$inout5
  493. vmovups (%r12),$rndkey
  494. lea 0x10(%r12),%r12
  495. dec %r13d
  496. jnz .Loop_ctr32
  497. vmovdqu (%r12),$Hkey # last round key
  498. vaesenc $rndkey,$inout0,$inout0
  499. vpxor 0x00($inp),$Hkey,$Z0
  500. vaesenc $rndkey,$inout1,$inout1
  501. vpxor 0x10($inp),$Hkey,$Z1
  502. vaesenc $rndkey,$inout2,$inout2
  503. vpxor 0x20($inp),$Hkey,$Z2
  504. vaesenc $rndkey,$inout3,$inout3
  505. vpxor 0x30($inp),$Hkey,$Xi
  506. vaesenc $rndkey,$inout4,$inout4
  507. vpxor 0x40($inp),$Hkey,$T2
  508. vaesenc $rndkey,$inout5,$inout5
  509. vpxor 0x50($inp),$Hkey,$Hkey
  510. lea 0x60($inp),$inp
  511. vaesenclast $Z0,$inout0,$inout0
  512. vaesenclast $Z1,$inout1,$inout1
  513. vaesenclast $Z2,$inout2,$inout2
  514. vaesenclast $Xi,$inout3,$inout3
  515. vaesenclast $T2,$inout4,$inout4
  516. vaesenclast $Hkey,$inout5,$inout5
  517. vmovups $inout0,0x00($out)
  518. vmovups $inout1,0x10($out)
  519. vmovups $inout2,0x20($out)
  520. vmovups $inout3,0x30($out)
  521. vmovups $inout4,0x40($out)
  522. vmovups $inout5,0x50($out)
  523. lea 0x60($out),$out
  524. ret
  525. .align 32
  526. .Lhandle_ctr32_2:
  527. vpshufb $Ii,$T1,$Z2 # byte-swap counter
  528. vmovdqu 0x30($const),$Z1 # borrow $Z1, .Ltwo_lsb
  529. vpaddd 0x40($const),$Z2,$inout1 # .Lone_lsb
  530. vpaddd $Z1,$Z2,$inout2
  531. vpaddd $Z1,$inout1,$inout3
  532. vpshufb $Ii,$inout1,$inout1
  533. vpaddd $Z1,$inout2,$inout4
  534. vpshufb $Ii,$inout2,$inout2
  535. vpxor $Z0,$inout1,$inout1
  536. vpaddd $Z1,$inout3,$inout5
  537. vpshufb $Ii,$inout3,$inout3
  538. vpxor $Z0,$inout2,$inout2
  539. vpaddd $Z1,$inout4,$T1 # byte-swapped next counter value
  540. vpshufb $Ii,$inout4,$inout4
  541. vpxor $Z0,$inout3,$inout3
  542. vpshufb $Ii,$inout5,$inout5
  543. vpxor $Z0,$inout4,$inout4
  544. vpshufb $Ii,$T1,$T1 # next counter value
  545. vpxor $Z0,$inout5,$inout5
  546. jmp .Loop_ctr32
  547. .size _aesni_ctr32_6x,.-_aesni_ctr32_6x
  548. .globl aesni_gcm_encrypt
  549. .type aesni_gcm_encrypt,\@function,6
  550. .align 32
  551. aesni_gcm_encrypt:
  552. xor $ret,$ret
  553. cmp \$0x60*3,$len # minimal accepted length
  554. jb .Lgcm_enc_abort
  555. lea (%rsp),%rax # save stack pointer
  556. push %rbx
  557. push %rbp
  558. push %r12
  559. push %r13
  560. push %r14
  561. push %r15
  562. ___
  563. $code.=<<___ if ($win64);
  564. lea -0xa8(%rsp),%rsp
  565. movaps %xmm6,-0xd8(%rax)
  566. movaps %xmm7,-0xc8(%rax)
  567. movaps %xmm8,-0xb8(%rax)
  568. movaps %xmm9,-0xa8(%rax)
  569. movaps %xmm10,-0x98(%rax)
  570. movaps %xmm11,-0x88(%rax)
  571. movaps %xmm12,-0x78(%rax)
  572. movaps %xmm13,-0x68(%rax)
  573. movaps %xmm14,-0x58(%rax)
  574. movaps %xmm15,-0x48(%rax)
  575. .Lgcm_enc_body:
  576. ___
  577. $code.=<<___;
  578. vzeroupper
  579. vmovdqu ($ivp),$T1 # input counter value
  580. add \$-128,%rsp
  581. mov 12($ivp),$counter
  582. lea .Lbswap_mask(%rip),$const
  583. lea -0x80($key),$in0 # borrow $in0
  584. mov \$0xf80,$end0 # borrow $end0
  585. lea 0x80($key),$key # size optimization
  586. vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
  587. and \$-128,%rsp # ensure stack alignment
  588. mov 0xf0-0x80($key),$rounds
  589. and $end0,$in0
  590. and %rsp,$end0
  591. sub $in0,$end0
  592. jc .Lenc_no_key_aliasing
  593. cmp \$768,$end0
  594. jnc .Lenc_no_key_aliasing
  595. sub $end0,%rsp # avoid aliasing with key
  596. .Lenc_no_key_aliasing:
  597. lea ($out),$in0
  598. lea -0xc0($out,$len),$end0
  599. shr \$4,$len
  600. call _aesni_ctr32_6x
  601. vpshufb $Ii,$inout0,$Xi # save bswapped output on stack
  602. vpshufb $Ii,$inout1,$T2
  603. vmovdqu $Xi,0x70(%rsp)
  604. vpshufb $Ii,$inout2,$Z0
  605. vmovdqu $T2,0x60(%rsp)
  606. vpshufb $Ii,$inout3,$Z1
  607. vmovdqu $Z0,0x50(%rsp)
  608. vpshufb $Ii,$inout4,$Z2
  609. vmovdqu $Z1,0x40(%rsp)
  610. vpshufb $Ii,$inout5,$Z3 # passed to _aesni_ctr32_ghash_6x
  611. vmovdqu $Z2,0x30(%rsp)
  612. call _aesni_ctr32_6x
  613. vmovdqu ($Xip),$Xi # load Xi
  614. lea 0x20+0x20($Xip),$Xip # size optimization
  615. sub \$12,$len
  616. mov \$0x60*2,$ret
  617. vpshufb $Ii,$Xi,$Xi
  618. call _aesni_ctr32_ghash_6x
  619. vmovdqu 0x20(%rsp),$Z3 # I[5]
  620. vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
  621. vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
  622. vpunpckhqdq $Z3,$Z3,$T1
  623. vmovdqu 0x20-0x20($Xip),$rndkey # borrow $rndkey for $HK
  624. vmovups $inout0,-0x60($out) # save output
  625. vpshufb $Ii,$inout0,$inout0 # but keep bswapped copy
  626. vpxor $Z3,$T1,$T1
  627. vmovups $inout1,-0x50($out)
  628. vpshufb $Ii,$inout1,$inout1
  629. vmovups $inout2,-0x40($out)
  630. vpshufb $Ii,$inout2,$inout2
  631. vmovups $inout3,-0x30($out)
  632. vpshufb $Ii,$inout3,$inout3
  633. vmovups $inout4,-0x20($out)
  634. vpshufb $Ii,$inout4,$inout4
  635. vmovups $inout5,-0x10($out)
  636. vpshufb $Ii,$inout5,$inout5
  637. vmovdqu $inout0,0x10(%rsp) # free $inout0
  638. ___
  639. { my ($HK,$T3)=($rndkey,$inout0);
  640. $code.=<<___;
  641. vmovdqu 0x30(%rsp),$Z2 # I[4]
  642. vmovdqu 0x10-0x20($Xip),$Ii # borrow $Ii for $Hkey^2
  643. vpunpckhqdq $Z2,$Z2,$T2
  644. vpclmulqdq \$0x00,$Hkey,$Z3,$Z1
  645. vpxor $Z2,$T2,$T2
  646. vpclmulqdq \$0x11,$Hkey,$Z3,$Z3
  647. vpclmulqdq \$0x00,$HK,$T1,$T1
  648. vmovdqu 0x40(%rsp),$T3 # I[3]
  649. vpclmulqdq \$0x00,$Ii,$Z2,$Z0
  650. vmovdqu 0x30-0x20($Xip),$Hkey # $Hkey^3
  651. vpxor $Z1,$Z0,$Z0
  652. vpunpckhqdq $T3,$T3,$Z1
  653. vpclmulqdq \$0x11,$Ii,$Z2,$Z2
  654. vpxor $T3,$Z1,$Z1
  655. vpxor $Z3,$Z2,$Z2
  656. vpclmulqdq \$0x10,$HK,$T2,$T2
  657. vmovdqu 0x50-0x20($Xip),$HK
  658. vpxor $T1,$T2,$T2
  659. vmovdqu 0x50(%rsp),$T1 # I[2]
  660. vpclmulqdq \$0x00,$Hkey,$T3,$Z3
  661. vmovdqu 0x40-0x20($Xip),$Ii # borrow $Ii for $Hkey^4
  662. vpxor $Z0,$Z3,$Z3
  663. vpunpckhqdq $T1,$T1,$Z0
  664. vpclmulqdq \$0x11,$Hkey,$T3,$T3
  665. vpxor $T1,$Z0,$Z0
  666. vpxor $Z2,$T3,$T3
  667. vpclmulqdq \$0x00,$HK,$Z1,$Z1
  668. vpxor $T2,$Z1,$Z1
  669. vmovdqu 0x60(%rsp),$T2 # I[1]
  670. vpclmulqdq \$0x00,$Ii,$T1,$Z2
  671. vmovdqu 0x60-0x20($Xip),$Hkey # $Hkey^5
  672. vpxor $Z3,$Z2,$Z2
  673. vpunpckhqdq $T2,$T2,$Z3
  674. vpclmulqdq \$0x11,$Ii,$T1,$T1
  675. vpxor $T2,$Z3,$Z3
  676. vpxor $T3,$T1,$T1
  677. vpclmulqdq \$0x10,$HK,$Z0,$Z0
  678. vmovdqu 0x80-0x20($Xip),$HK
  679. vpxor $Z1,$Z0,$Z0
  680. vpxor 0x70(%rsp),$Xi,$Xi # accumulate I[0]
  681. vpclmulqdq \$0x00,$Hkey,$T2,$Z1
  682. vmovdqu 0x70-0x20($Xip),$Ii # borrow $Ii for $Hkey^6
  683. vpunpckhqdq $Xi,$Xi,$T3
  684. vpxor $Z2,$Z1,$Z1
  685. vpclmulqdq \$0x11,$Hkey,$T2,$T2
  686. vpxor $Xi,$T3,$T3
  687. vpxor $T1,$T2,$T2
  688. vpclmulqdq \$0x00,$HK,$Z3,$Z3
  689. vpxor $Z0,$Z3,$Z0
  690. vpclmulqdq \$0x00,$Ii,$Xi,$Z2
  691. vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
  692. vpunpckhqdq $inout5,$inout5,$T1
  693. vpclmulqdq \$0x11,$Ii,$Xi,$Xi
  694. vpxor $inout5,$T1,$T1
  695. vpxor $Z1,$Z2,$Z1
  696. vpclmulqdq \$0x10,$HK,$T3,$T3
  697. vmovdqu 0x20-0x20($Xip),$HK
  698. vpxor $T2,$Xi,$Z3
  699. vpxor $Z0,$T3,$Z2
  700. vmovdqu 0x10-0x20($Xip),$Ii # borrow $Ii for $Hkey^2
  701. vpxor $Z1,$Z3,$T3 # aggregated Karatsuba post-processing
  702. vpclmulqdq \$0x00,$Hkey,$inout5,$Z0
  703. vpxor $T3,$Z2,$Z2
  704. vpunpckhqdq $inout4,$inout4,$T2
  705. vpclmulqdq \$0x11,$Hkey,$inout5,$inout5
  706. vpxor $inout4,$T2,$T2
  707. vpslldq \$8,$Z2,$T3
  708. vpclmulqdq \$0x00,$HK,$T1,$T1
  709. vpxor $T3,$Z1,$Xi
  710. vpsrldq \$8,$Z2,$Z2
  711. vpxor $Z2,$Z3,$Z3
  712. vpclmulqdq \$0x00,$Ii,$inout4,$Z1
  713. vmovdqu 0x30-0x20($Xip),$Hkey # $Hkey^3
  714. vpxor $Z0,$Z1,$Z1
  715. vpunpckhqdq $inout3,$inout3,$T3
  716. vpclmulqdq \$0x11,$Ii,$inout4,$inout4
  717. vpxor $inout3,$T3,$T3
  718. vpxor $inout5,$inout4,$inout4
  719. vpalignr \$8,$Xi,$Xi,$inout5 # 1st phase
  720. vpclmulqdq \$0x10,$HK,$T2,$T2
  721. vmovdqu 0x50-0x20($Xip),$HK
  722. vpxor $T1,$T2,$T2
  723. vpclmulqdq \$0x00,$Hkey,$inout3,$Z0
  724. vmovdqu 0x40-0x20($Xip),$Ii # borrow $Ii for $Hkey^4
  725. vpxor $Z1,$Z0,$Z0
  726. vpunpckhqdq $inout2,$inout2,$T1
  727. vpclmulqdq \$0x11,$Hkey,$inout3,$inout3
  728. vpxor $inout2,$T1,$T1
  729. vpxor $inout4,$inout3,$inout3
  730. vxorps 0x10(%rsp),$Z3,$Z3 # accumulate $inout0
  731. vpclmulqdq \$0x00,$HK,$T3,$T3
  732. vpxor $T2,$T3,$T3
  733. vpclmulqdq \$0x10,0x10($const),$Xi,$Xi
  734. vxorps $inout5,$Xi,$Xi
  735. vpclmulqdq \$0x00,$Ii,$inout2,$Z1
  736. vmovdqu 0x60-0x20($Xip),$Hkey # $Hkey^5
  737. vpxor $Z0,$Z1,$Z1
  738. vpunpckhqdq $inout1,$inout1,$T2
  739. vpclmulqdq \$0x11,$Ii,$inout2,$inout2
  740. vpxor $inout1,$T2,$T2
  741. vpalignr \$8,$Xi,$Xi,$inout5 # 2nd phase
  742. vpxor $inout3,$inout2,$inout2
  743. vpclmulqdq \$0x10,$HK,$T1,$T1
  744. vmovdqu 0x80-0x20($Xip),$HK
  745. vpxor $T3,$T1,$T1
  746. vxorps $Z3,$inout5,$inout5
  747. vpclmulqdq \$0x10,0x10($const),$Xi,$Xi
  748. vxorps $inout5,$Xi,$Xi
  749. vpclmulqdq \$0x00,$Hkey,$inout1,$Z0
  750. vmovdqu 0x70-0x20($Xip),$Ii # borrow $Ii for $Hkey^6
  751. vpxor $Z1,$Z0,$Z0
  752. vpunpckhqdq $Xi,$Xi,$T3
  753. vpclmulqdq \$0x11,$Hkey,$inout1,$inout1
  754. vpxor $Xi,$T3,$T3
  755. vpxor $inout2,$inout1,$inout1
  756. vpclmulqdq \$0x00,$HK,$T2,$T2
  757. vpxor $T1,$T2,$T2
  758. vpclmulqdq \$0x00,$Ii,$Xi,$Z1
  759. vpclmulqdq \$0x11,$Ii,$Xi,$Z3
  760. vpxor $Z0,$Z1,$Z1
  761. vpclmulqdq \$0x10,$HK,$T3,$Z2
  762. vpxor $inout1,$Z3,$Z3
  763. vpxor $T2,$Z2,$Z2
  764. vpxor $Z1,$Z3,$Z0 # aggregated Karatsuba post-processing
  765. vpxor $Z0,$Z2,$Z2
  766. vpslldq \$8,$Z2,$T1
  767. vmovdqu 0x10($const),$Hkey # .Lpoly
  768. vpsrldq \$8,$Z2,$Z2
  769. vpxor $T1,$Z1,$Xi
  770. vpxor $Z2,$Z3,$Z3
  771. vpalignr \$8,$Xi,$Xi,$T2 # 1st phase
  772. vpclmulqdq \$0x10,$Hkey,$Xi,$Xi
  773. vpxor $T2,$Xi,$Xi
  774. vpalignr \$8,$Xi,$Xi,$T2 # 2nd phase
  775. vpclmulqdq \$0x10,$Hkey,$Xi,$Xi
  776. vpxor $Z3,$T2,$T2
  777. vpxor $T2,$Xi,$Xi
  778. ___
  779. }
  780. $code.=<<___;
  781. vpshufb ($const),$Xi,$Xi # .Lbswap_mask
  782. vmovdqu $Xi,-0x40($Xip) # output Xi
  783. vzeroupper
  784. ___
  785. $code.=<<___ if ($win64);
  786. movaps -0xd8(%rax),%xmm6
  787. movaps -0xc8(%rax),%xmm7
  788. movaps -0xb8(%rax),%xmm8
  789. movaps -0xa8(%rax),%xmm9
  790. movaps -0x98(%rax),%xmm10
  791. movaps -0x88(%rax),%xmm11
  792. movaps -0x78(%rax),%xmm12
  793. movaps -0x68(%rax),%xmm13
  794. movaps -0x58(%rax),%xmm14
  795. movaps -0x48(%rax),%xmm15
  796. ___
  797. $code.=<<___;
  798. mov -48(%rax),%r15
  799. mov -40(%rax),%r14
  800. mov -32(%rax),%r13
  801. mov -24(%rax),%r12
  802. mov -16(%rax),%rbp
  803. mov -8(%rax),%rbx
  804. lea (%rax),%rsp # restore %rsp
  805. .Lgcm_enc_abort:
  806. mov $ret,%rax # return value
  807. ret
  808. .size aesni_gcm_encrypt,.-aesni_gcm_encrypt
  809. ___
  810. $code.=<<___;
  811. .align 64
  812. .Lbswap_mask:
  813. .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
  814. .Lpoly:
  815. .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
  816. .Lone_msb:
  817. .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
  818. .Ltwo_lsb:
  819. .byte 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
  820. .Lone_lsb:
  821. .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
  822. .asciz "AES-NI GCM module for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
  823. .align 64
  824. ___
  825. if ($win64) {
  826. $rec="%rcx";
  827. $frame="%rdx";
  828. $context="%r8";
  829. $disp="%r9";
  830. $code.=<<___
  831. .extern __imp_RtlVirtualUnwind
  832. .type gcm_se_handler,\@abi-omnipotent
  833. .align 16
  834. gcm_se_handler:
  835. push %rsi
  836. push %rdi
  837. push %rbx
  838. push %rbp
  839. push %r12
  840. push %r13
  841. push %r14
  842. push %r15
  843. pushfq
  844. sub \$64,%rsp
  845. mov 120($context),%rax # pull context->Rax
  846. mov 248($context),%rbx # pull context->Rip
  847. mov 8($disp),%rsi # disp->ImageBase
  848. mov 56($disp),%r11 # disp->HandlerData
  849. mov 0(%r11),%r10d # HandlerData[0]
  850. lea (%rsi,%r10),%r10 # prologue label
  851. cmp %r10,%rbx # context->Rip<prologue label
  852. jb .Lcommon_seh_tail
  853. mov 152($context),%rax # pull context->Rsp
  854. mov 4(%r11),%r10d # HandlerData[1]
  855. lea (%rsi,%r10),%r10 # epilogue label
  856. cmp %r10,%rbx # context->Rip>=epilogue label
  857. jae .Lcommon_seh_tail
  858. mov 120($context),%rax # pull context->Rax
  859. mov -48(%rax),%r15
  860. mov -40(%rax),%r14
  861. mov -32(%rax),%r13
  862. mov -24(%rax),%r12
  863. mov -16(%rax),%rbp
  864. mov -8(%rax),%rbx
  865. mov %r15,240($context)
  866. mov %r14,232($context)
  867. mov %r13,224($context)
  868. mov %r12,216($context)
  869. mov %rbp,160($context)
  870. mov %rbx,144($context)
  871. lea -0xd8(%rax),%rsi # %xmm save area
  872. lea 512($context),%rdi # & context.Xmm6
  873. mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
  874. .long 0xa548f3fc # cld; rep movsq
  875. .Lcommon_seh_tail:
  876. mov 8(%rax),%rdi
  877. mov 16(%rax),%rsi
  878. mov %rax,152($context) # restore context->Rsp
  879. mov %rsi,168($context) # restore context->Rsi
  880. mov %rdi,176($context) # restore context->Rdi
  881. mov 40($disp),%rdi # disp->ContextRecord
  882. mov $context,%rsi # context
  883. mov \$154,%ecx # sizeof(CONTEXT)
  884. .long 0xa548f3fc # cld; rep movsq
  885. mov $disp,%rsi
  886. xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
  887. mov 8(%rsi),%rdx # arg2, disp->ImageBase
  888. mov 0(%rsi),%r8 # arg3, disp->ControlPc
  889. mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
  890. mov 40(%rsi),%r10 # disp->ContextRecord
  891. lea 56(%rsi),%r11 # &disp->HandlerData
  892. lea 24(%rsi),%r12 # &disp->EstablisherFrame
  893. mov %r10,32(%rsp) # arg5
  894. mov %r11,40(%rsp) # arg6
  895. mov %r12,48(%rsp) # arg7
  896. mov %rcx,56(%rsp) # arg8, (NULL)
  897. call *__imp_RtlVirtualUnwind(%rip)
  898. mov \$1,%eax # ExceptionContinueSearch
  899. add \$64,%rsp
  900. popfq
  901. pop %r15
  902. pop %r14
  903. pop %r13
  904. pop %r12
  905. pop %rbp
  906. pop %rbx
  907. pop %rdi
  908. pop %rsi
  909. ret
  910. .size gcm_se_handler,.-gcm_se_handler
  911. .section .pdata
  912. .align 4
  913. .rva .LSEH_begin_aesni_gcm_decrypt
  914. .rva .LSEH_end_aesni_gcm_decrypt
  915. .rva .LSEH_gcm_dec_info
  916. .rva .LSEH_begin_aesni_gcm_encrypt
  917. .rva .LSEH_end_aesni_gcm_encrypt
  918. .rva .LSEH_gcm_enc_info
  919. .section .xdata
  920. .align 8
  921. .LSEH_gcm_dec_info:
  922. .byte 9,0,0,0
  923. .rva gcm_se_handler
  924. .rva .Lgcm_dec_body,.Lgcm_dec_abort
  925. .LSEH_gcm_enc_info:
  926. .byte 9,0,0,0
  927. .rva gcm_se_handler
  928. .rva .Lgcm_enc_body,.Lgcm_enc_abort
  929. ___
  930. }
  931. }}} else {{{
  932. $code=<<___; # assembler is too old
  933. .text
  934. .globl aesni_gcm_encrypt
  935. .type aesni_gcm_encrypt,\@abi-omnipotent
  936. aesni_gcm_encrypt:
  937. xor %eax,%eax
  938. ret
  939. .size aesni_gcm_encrypt,.-aesni_gcm_encrypt
  940. .globl aesni_gcm_decrypt
  941. .type aesni_gcm_decrypt,\@abi-omnipotent
  942. aesni_gcm_decrypt:
  943. xor %eax,%eax
  944. ret
  945. .size aesni_gcm_decrypt,.-aesni_gcm_decrypt
  946. ___
  947. }}}
  948. $code =~ s/\`([^\`]*)\`/eval($1)/gem;
  949. print $code;
  950. close STDOUT;