Nelze vybrat více než 25 témat Téma musí začínat písmenem nebo číslem, může obsahovat pomlčky („-“) a může být dlouhé až 35 znaků.
 
 
 
 
 
 

1088 řádky
30 KiB

  1. #!/usr/bin/env perl
  2. #
  3. # ====================================================================
  4. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  5. # project. The module is, however, dual licensed under OpenSSL and
  6. # CRYPTOGAMS licenses depending on where you obtain it. For further
  7. # details see http://www.openssl.org/~appro/cryptogams/.
  8. # ====================================================================
  9. #
  10. #
  11. # AES-NI-CTR+GHASH stitch.
  12. #
  13. # February 2013
  14. #
  15. # OpenSSL GCM implementation is organized in such way that its
  16. # performance is rather close to the sum of its streamed components,
  17. # in the context parallelized AES-NI CTR and modulo-scheduled
  18. # PCLMULQDQ-enabled GHASH. Unfortunately, as no stitch implementation
  19. # was observed to perform significantly better than the sum of the
  20. # components on contemporary CPUs, the effort was deemed impossible to
  21. # justify. This module is based on combination of Intel submissions,
  22. # [1] and [2], with MOVBE twist suggested by Ilya Albrekht and Max
  23. # Locktyukhin of Intel Corp. who verified that it reduces shuffles
  24. # pressure with notable relative improvement, achieving 1.0 cycle per
  25. # byte processed with 128-bit key on Haswell processor, and 0.74 -
  26. # on Broadwell. [Mentioned results are raw profiled measurements for
  27. # favourable packet size, one divisible by 96. Applications using the
  28. # EVP interface will observe a few percent worse performance.]
  29. #
  30. # [1] http://rt.openssl.org/Ticket/Display.html?id=2900&user=guest&pass=guest
  31. # [2] http://www.intel.com/content/dam/www/public/us/en/documents/software-support/enabling-high-performance-gcm.pdf
  32. $flavour = shift;
  33. $output = shift;
  34. if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
  35. $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
  36. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  37. ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
  38. ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
  39. die "can't locate x86_64-xlate.pl";
  40. # This must be kept in sync with |$avx| in ghash-x86_64.pl; otherwise tags will
  41. # be computed incorrectly.
  42. #
  43. # In upstream, this is controlled by shelling out to the compiler to check
  44. # versions, but BoringSSL is intended to be used with pre-generated perlasm
  45. # output, so this isn't useful anyway.
  46. $avx = 0;
  47. open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
  48. *STDOUT=*OUT;
  49. if ($avx>1) {{{
  50. ($inp,$out,$len,$key,$ivp,$Xip)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9");
  51. ($Ii,$T1,$T2,$Hkey,
  52. $Z0,$Z1,$Z2,$Z3,$Xi) = map("%xmm$_",(0..8));
  53. ($inout0,$inout1,$inout2,$inout3,$inout4,$inout5,$rndkey) = map("%xmm$_",(9..15));
  54. ($counter,$rounds,$ret,$const,$in0,$end0)=("%ebx","%ebp","%r10","%r11","%r14","%r15");
  55. $code=<<___;
  56. .text
  57. .type _aesni_ctr32_ghash_6x,\@abi-omnipotent
  58. .align 32
  59. _aesni_ctr32_ghash_6x:
  60. vmovdqu 0x20($const),$T2 # borrow $T2, .Lone_msb
  61. sub \$6,$len
  62. vpxor $Z0,$Z0,$Z0 # $Z0 = 0
  63. vmovdqu 0x00-0x80($key),$rndkey
  64. vpaddb $T2,$T1,$inout1
  65. vpaddb $T2,$inout1,$inout2
  66. vpaddb $T2,$inout2,$inout3
  67. vpaddb $T2,$inout3,$inout4
  68. vpaddb $T2,$inout4,$inout5
  69. vpxor $rndkey,$T1,$inout0
  70. vmovdqu $Z0,16+8(%rsp) # "$Z3" = 0
  71. jmp .Loop6x
  72. .align 32
  73. .Loop6x:
  74. add \$`6<<24`,$counter
  75. jc .Lhandle_ctr32 # discard $inout[1-5]?
  76. vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
  77. vpaddb $T2,$inout5,$T1 # next counter value
  78. vpxor $rndkey,$inout1,$inout1
  79. vpxor $rndkey,$inout2,$inout2
  80. .Lresume_ctr32:
  81. vmovdqu $T1,($ivp) # save next counter value
  82. vpclmulqdq \$0x10,$Hkey,$Z3,$Z1
  83. vpxor $rndkey,$inout3,$inout3
  84. vmovups 0x10-0x80($key),$T2 # borrow $T2 for $rndkey
  85. vpclmulqdq \$0x01,$Hkey,$Z3,$Z2
  86. # At this point, the current block of 96 (0x60) bytes has already been
  87. # loaded into registers. Concurrently with processing it, we want to
  88. # load the next 96 bytes of input for the next round. Obviously, we can
  89. # only do this if there are at least 96 more bytes of input beyond the
  90. # input we're currently processing, or else we'd read past the end of
  91. # the input buffer. Here, we set |%r12| to 96 if there are at least 96
  92. # bytes of input beyond the 96 bytes we're already processing, and we
  93. # set |%r12| to 0 otherwise. In the case where we set |%r12| to 96,
  94. # we'll read in the next block so that it is in registers for the next
  95. # loop iteration. In the case where we set |%r12| to 0, we'll re-read
  96. # the current block and then ignore what we re-read.
  97. #
  98. # At this point, |$in0| points to the current (already read into
  99. # registers) block, and |$end0| points to 2*96 bytes before the end of
  100. # the input. Thus, |$in0| > |$end0| means that we do not have the next
  101. # 96-byte block to read in, and |$in0| <= |$end0| means we do.
  102. xor %r12,%r12
  103. cmp $in0,$end0
  104. vaesenc $T2,$inout0,$inout0
  105. vmovdqu 0x30+8(%rsp),$Ii # I[4]
  106. vpxor $rndkey,$inout4,$inout4
  107. vpclmulqdq \$0x00,$Hkey,$Z3,$T1
  108. vaesenc $T2,$inout1,$inout1
  109. vpxor $rndkey,$inout5,$inout5
  110. setnc %r12b
  111. vpclmulqdq \$0x11,$Hkey,$Z3,$Z3
  112. vaesenc $T2,$inout2,$inout2
  113. vmovdqu 0x10-0x20($Xip),$Hkey # $Hkey^2
  114. neg %r12
  115. vaesenc $T2,$inout3,$inout3
  116. vpxor $Z1,$Z2,$Z2
  117. vpclmulqdq \$0x00,$Hkey,$Ii,$Z1
  118. vpxor $Z0,$Xi,$Xi # modulo-scheduled
  119. vaesenc $T2,$inout4,$inout4
  120. vpxor $Z1,$T1,$Z0
  121. and \$0x60,%r12
  122. vmovups 0x20-0x80($key),$rndkey
  123. vpclmulqdq \$0x10,$Hkey,$Ii,$T1
  124. vaesenc $T2,$inout5,$inout5
  125. vpclmulqdq \$0x01,$Hkey,$Ii,$T2
  126. lea ($in0,%r12),$in0
  127. vaesenc $rndkey,$inout0,$inout0
  128. vpxor 16+8(%rsp),$Xi,$Xi # modulo-scheduled [vpxor $Z3,$Xi,$Xi]
  129. vpclmulqdq \$0x11,$Hkey,$Ii,$Hkey
  130. vmovdqu 0x40+8(%rsp),$Ii # I[3]
  131. vaesenc $rndkey,$inout1,$inout1
  132. movbe 0x58($in0),%r13
  133. vaesenc $rndkey,$inout2,$inout2
  134. movbe 0x50($in0),%r12
  135. vaesenc $rndkey,$inout3,$inout3
  136. mov %r13,0x20+8(%rsp)
  137. vaesenc $rndkey,$inout4,$inout4
  138. mov %r12,0x28+8(%rsp)
  139. vmovdqu 0x30-0x20($Xip),$Z1 # borrow $Z1 for $Hkey^3
  140. vaesenc $rndkey,$inout5,$inout5
  141. vmovups 0x30-0x80($key),$rndkey
  142. vpxor $T1,$Z2,$Z2
  143. vpclmulqdq \$0x00,$Z1,$Ii,$T1
  144. vaesenc $rndkey,$inout0,$inout0
  145. vpxor $T2,$Z2,$Z2
  146. vpclmulqdq \$0x10,$Z1,$Ii,$T2
  147. vaesenc $rndkey,$inout1,$inout1
  148. vpxor $Hkey,$Z3,$Z3
  149. vpclmulqdq \$0x01,$Z1,$Ii,$Hkey
  150. vaesenc $rndkey,$inout2,$inout2
  151. vpclmulqdq \$0x11,$Z1,$Ii,$Z1
  152. vmovdqu 0x50+8(%rsp),$Ii # I[2]
  153. vaesenc $rndkey,$inout3,$inout3
  154. vaesenc $rndkey,$inout4,$inout4
  155. vpxor $T1,$Z0,$Z0
  156. vmovdqu 0x40-0x20($Xip),$T1 # borrow $T1 for $Hkey^4
  157. vaesenc $rndkey,$inout5,$inout5
  158. vmovups 0x40-0x80($key),$rndkey
  159. vpxor $T2,$Z2,$Z2
  160. vpclmulqdq \$0x00,$T1,$Ii,$T2
  161. vaesenc $rndkey,$inout0,$inout0
  162. vpxor $Hkey,$Z2,$Z2
  163. vpclmulqdq \$0x10,$T1,$Ii,$Hkey
  164. vaesenc $rndkey,$inout1,$inout1
  165. movbe 0x48($in0),%r13
  166. vpxor $Z1,$Z3,$Z3
  167. vpclmulqdq \$0x01,$T1,$Ii,$Z1
  168. vaesenc $rndkey,$inout2,$inout2
  169. movbe 0x40($in0),%r12
  170. vpclmulqdq \$0x11,$T1,$Ii,$T1
  171. vmovdqu 0x60+8(%rsp),$Ii # I[1]
  172. vaesenc $rndkey,$inout3,$inout3
  173. mov %r13,0x30+8(%rsp)
  174. vaesenc $rndkey,$inout4,$inout4
  175. mov %r12,0x38+8(%rsp)
  176. vpxor $T2,$Z0,$Z0
  177. vmovdqu 0x60-0x20($Xip),$T2 # borrow $T2 for $Hkey^5
  178. vaesenc $rndkey,$inout5,$inout5
  179. vmovups 0x50-0x80($key),$rndkey
  180. vpxor $Hkey,$Z2,$Z2
  181. vpclmulqdq \$0x00,$T2,$Ii,$Hkey
  182. vaesenc $rndkey,$inout0,$inout0
  183. vpxor $Z1,$Z2,$Z2
  184. vpclmulqdq \$0x10,$T2,$Ii,$Z1
  185. vaesenc $rndkey,$inout1,$inout1
  186. movbe 0x38($in0),%r13
  187. vpxor $T1,$Z3,$Z3
  188. vpclmulqdq \$0x01,$T2,$Ii,$T1
  189. vpxor 0x70+8(%rsp),$Xi,$Xi # accumulate I[0]
  190. vaesenc $rndkey,$inout2,$inout2
  191. movbe 0x30($in0),%r12
  192. vpclmulqdq \$0x11,$T2,$Ii,$T2
  193. vaesenc $rndkey,$inout3,$inout3
  194. mov %r13,0x40+8(%rsp)
  195. vaesenc $rndkey,$inout4,$inout4
  196. mov %r12,0x48+8(%rsp)
  197. vpxor $Hkey,$Z0,$Z0
  198. vmovdqu 0x70-0x20($Xip),$Hkey # $Hkey^6
  199. vaesenc $rndkey,$inout5,$inout5
  200. vmovups 0x60-0x80($key),$rndkey
  201. vpxor $Z1,$Z2,$Z2
  202. vpclmulqdq \$0x10,$Hkey,$Xi,$Z1
  203. vaesenc $rndkey,$inout0,$inout0
  204. vpxor $T1,$Z2,$Z2
  205. vpclmulqdq \$0x01,$Hkey,$Xi,$T1
  206. vaesenc $rndkey,$inout1,$inout1
  207. movbe 0x28($in0),%r13
  208. vpxor $T2,$Z3,$Z3
  209. vpclmulqdq \$0x00,$Hkey,$Xi,$T2
  210. vaesenc $rndkey,$inout2,$inout2
  211. movbe 0x20($in0),%r12
  212. vpclmulqdq \$0x11,$Hkey,$Xi,$Xi
  213. vaesenc $rndkey,$inout3,$inout3
  214. mov %r13,0x50+8(%rsp)
  215. vaesenc $rndkey,$inout4,$inout4
  216. mov %r12,0x58+8(%rsp)
  217. vpxor $Z1,$Z2,$Z2
  218. vaesenc $rndkey,$inout5,$inout5
  219. vpxor $T1,$Z2,$Z2
  220. vmovups 0x70-0x80($key),$rndkey
  221. vpslldq \$8,$Z2,$Z1
  222. vpxor $T2,$Z0,$Z0
  223. vmovdqu 0x10($const),$Hkey # .Lpoly
  224. vaesenc $rndkey,$inout0,$inout0
  225. vpxor $Xi,$Z3,$Z3
  226. vaesenc $rndkey,$inout1,$inout1
  227. vpxor $Z1,$Z0,$Z0
  228. movbe 0x18($in0),%r13
  229. vaesenc $rndkey,$inout2,$inout2
  230. movbe 0x10($in0),%r12
  231. vpalignr \$8,$Z0,$Z0,$Ii # 1st phase
  232. vpclmulqdq \$0x10,$Hkey,$Z0,$Z0
  233. mov %r13,0x60+8(%rsp)
  234. vaesenc $rndkey,$inout3,$inout3
  235. mov %r12,0x68+8(%rsp)
  236. vaesenc $rndkey,$inout4,$inout4
  237. vmovups 0x80-0x80($key),$T1 # borrow $T1 for $rndkey
  238. vaesenc $rndkey,$inout5,$inout5
  239. vaesenc $T1,$inout0,$inout0
  240. vmovups 0x90-0x80($key),$rndkey
  241. vaesenc $T1,$inout1,$inout1
  242. vpsrldq \$8,$Z2,$Z2
  243. vaesenc $T1,$inout2,$inout2
  244. vpxor $Z2,$Z3,$Z3
  245. vaesenc $T1,$inout3,$inout3
  246. vpxor $Ii,$Z0,$Z0
  247. movbe 0x08($in0),%r13
  248. vaesenc $T1,$inout4,$inout4
  249. movbe 0x00($in0),%r12
  250. vaesenc $T1,$inout5,$inout5
  251. vmovups 0xa0-0x80($key),$T1
  252. cmp \$11,$rounds
  253. jb .Lenc_tail # 128-bit key
  254. vaesenc $rndkey,$inout0,$inout0
  255. vaesenc $rndkey,$inout1,$inout1
  256. vaesenc $rndkey,$inout2,$inout2
  257. vaesenc $rndkey,$inout3,$inout3
  258. vaesenc $rndkey,$inout4,$inout4
  259. vaesenc $rndkey,$inout5,$inout5
  260. vaesenc $T1,$inout0,$inout0
  261. vaesenc $T1,$inout1,$inout1
  262. vaesenc $T1,$inout2,$inout2
  263. vaesenc $T1,$inout3,$inout3
  264. vaesenc $T1,$inout4,$inout4
  265. vmovups 0xb0-0x80($key),$rndkey
  266. vaesenc $T1,$inout5,$inout5
  267. vmovups 0xc0-0x80($key),$T1
  268. je .Lenc_tail # 192-bit key
  269. vaesenc $rndkey,$inout0,$inout0
  270. vaesenc $rndkey,$inout1,$inout1
  271. vaesenc $rndkey,$inout2,$inout2
  272. vaesenc $rndkey,$inout3,$inout3
  273. vaesenc $rndkey,$inout4,$inout4
  274. vaesenc $rndkey,$inout5,$inout5
  275. vaesenc $T1,$inout0,$inout0
  276. vaesenc $T1,$inout1,$inout1
  277. vaesenc $T1,$inout2,$inout2
  278. vaesenc $T1,$inout3,$inout3
  279. vaesenc $T1,$inout4,$inout4
  280. vmovups 0xd0-0x80($key),$rndkey
  281. vaesenc $T1,$inout5,$inout5
  282. vmovups 0xe0-0x80($key),$T1
  283. jmp .Lenc_tail # 256-bit key
  284. .align 32
  285. .Lhandle_ctr32:
  286. vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
  287. vpshufb $Ii,$T1,$Z2 # byte-swap counter
  288. vmovdqu 0x30($const),$Z1 # borrow $Z1, .Ltwo_lsb
  289. vpaddd 0x40($const),$Z2,$inout1 # .Lone_lsb
  290. vpaddd $Z1,$Z2,$inout2
  291. vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
  292. vpaddd $Z1,$inout1,$inout3
  293. vpshufb $Ii,$inout1,$inout1
  294. vpaddd $Z1,$inout2,$inout4
  295. vpshufb $Ii,$inout2,$inout2
  296. vpxor $rndkey,$inout1,$inout1
  297. vpaddd $Z1,$inout3,$inout5
  298. vpshufb $Ii,$inout3,$inout3
  299. vpxor $rndkey,$inout2,$inout2
  300. vpaddd $Z1,$inout4,$T1 # byte-swapped next counter value
  301. vpshufb $Ii,$inout4,$inout4
  302. vpshufb $Ii,$inout5,$inout5
  303. vpshufb $Ii,$T1,$T1 # next counter value
  304. jmp .Lresume_ctr32
  305. .align 32
  306. .Lenc_tail:
  307. vaesenc $rndkey,$inout0,$inout0
  308. vmovdqu $Z3,16+8(%rsp) # postpone vpxor $Z3,$Xi,$Xi
  309. vpalignr \$8,$Z0,$Z0,$Xi # 2nd phase
  310. vaesenc $rndkey,$inout1,$inout1
  311. vpclmulqdq \$0x10,$Hkey,$Z0,$Z0
  312. vpxor 0x00($inp),$T1,$T2
  313. vaesenc $rndkey,$inout2,$inout2
  314. vpxor 0x10($inp),$T1,$Ii
  315. vaesenc $rndkey,$inout3,$inout3
  316. vpxor 0x20($inp),$T1,$Z1
  317. vaesenc $rndkey,$inout4,$inout4
  318. vpxor 0x30($inp),$T1,$Z2
  319. vaesenc $rndkey,$inout5,$inout5
  320. vpxor 0x40($inp),$T1,$Z3
  321. vpxor 0x50($inp),$T1,$Hkey
  322. vmovdqu ($ivp),$T1 # load next counter value
  323. vaesenclast $T2,$inout0,$inout0
  324. vmovdqu 0x20($const),$T2 # borrow $T2, .Lone_msb
  325. vaesenclast $Ii,$inout1,$inout1
  326. vpaddb $T2,$T1,$Ii
  327. mov %r13,0x70+8(%rsp)
  328. lea 0x60($inp),$inp
  329. vaesenclast $Z1,$inout2,$inout2
  330. vpaddb $T2,$Ii,$Z1
  331. mov %r12,0x78+8(%rsp)
  332. lea 0x60($out),$out
  333. vmovdqu 0x00-0x80($key),$rndkey
  334. vaesenclast $Z2,$inout3,$inout3
  335. vpaddb $T2,$Z1,$Z2
  336. vaesenclast $Z3, $inout4,$inout4
  337. vpaddb $T2,$Z2,$Z3
  338. vaesenclast $Hkey,$inout5,$inout5
  339. vpaddb $T2,$Z3,$Hkey
  340. add \$0x60,$ret
  341. sub \$0x6,$len
  342. jc .L6x_done
  343. vmovups $inout0,-0x60($out) # save output
  344. vpxor $rndkey,$T1,$inout0
  345. vmovups $inout1,-0x50($out)
  346. vmovdqa $Ii,$inout1 # 0 latency
  347. vmovups $inout2,-0x40($out)
  348. vmovdqa $Z1,$inout2 # 0 latency
  349. vmovups $inout3,-0x30($out)
  350. vmovdqa $Z2,$inout3 # 0 latency
  351. vmovups $inout4,-0x20($out)
  352. vmovdqa $Z3,$inout4 # 0 latency
  353. vmovups $inout5,-0x10($out)
  354. vmovdqa $Hkey,$inout5 # 0 latency
  355. vmovdqu 0x20+8(%rsp),$Z3 # I[5]
  356. jmp .Loop6x
  357. .L6x_done:
  358. vpxor 16+8(%rsp),$Xi,$Xi # modulo-scheduled
  359. vpxor $Z0,$Xi,$Xi # modulo-scheduled
  360. ret
  361. .size _aesni_ctr32_ghash_6x,.-_aesni_ctr32_ghash_6x
  362. ___
  363. ######################################################################
  364. #
  365. # size_t aesni_gcm_[en|de]crypt(const void *inp, void *out, size_t len,
  366. # const AES_KEY *key, unsigned char iv[16],
  367. # struct { u128 Xi,H,Htbl[9]; } *Xip);
  368. $code.=<<___;
  369. .globl aesni_gcm_decrypt
  370. .type aesni_gcm_decrypt,\@function,6
  371. .align 32
  372. aesni_gcm_decrypt:
  373. xor $ret,$ret
  374. # We call |_aesni_ctr32_ghash_6x|, which requires at least 96 (0x60)
  375. # bytes of input.
  376. cmp \$0x60,$len # minimal accepted length
  377. jb .Lgcm_dec_abort
  378. lea (%rsp),%rax # save stack pointer
  379. push %rbx
  380. push %rbp
  381. push %r12
  382. push %r13
  383. push %r14
  384. push %r15
  385. ___
  386. $code.=<<___ if ($win64);
  387. lea -0xa8(%rsp),%rsp
  388. movaps %xmm6,-0xd8(%rax)
  389. movaps %xmm7,-0xc8(%rax)
  390. movaps %xmm8,-0xb8(%rax)
  391. movaps %xmm9,-0xa8(%rax)
  392. movaps %xmm10,-0x98(%rax)
  393. movaps %xmm11,-0x88(%rax)
  394. movaps %xmm12,-0x78(%rax)
  395. movaps %xmm13,-0x68(%rax)
  396. movaps %xmm14,-0x58(%rax)
  397. movaps %xmm15,-0x48(%rax)
  398. .Lgcm_dec_body:
  399. ___
  400. $code.=<<___;
  401. vzeroupper
  402. vmovdqu ($ivp),$T1 # input counter value
  403. add \$-128,%rsp
  404. mov 12($ivp),$counter
  405. lea .Lbswap_mask(%rip),$const
  406. lea -0x80($key),$in0 # borrow $in0
  407. mov \$0xf80,$end0 # borrow $end0
  408. vmovdqu ($Xip),$Xi # load Xi
  409. and \$-128,%rsp # ensure stack alignment
  410. vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
  411. lea 0x80($key),$key # size optimization
  412. lea 0x20+0x20($Xip),$Xip # size optimization
  413. mov 0xf0-0x80($key),$rounds
  414. vpshufb $Ii,$Xi,$Xi
  415. and $end0,$in0
  416. and %rsp,$end0
  417. sub $in0,$end0
  418. jc .Ldec_no_key_aliasing
  419. cmp \$768,$end0
  420. jnc .Ldec_no_key_aliasing
  421. sub $end0,%rsp # avoid aliasing with key
  422. .Ldec_no_key_aliasing:
  423. vmovdqu 0x50($inp),$Z3 # I[5]
  424. lea ($inp),$in0
  425. vmovdqu 0x40($inp),$Z0
  426. # |_aesni_ctr32_ghash_6x| requires |$end0| to point to 2*96 (0xc0)
  427. # bytes before the end of the input. Note, in particular, that this is
  428. # correct even if |$len| is not an even multiple of 96 or 16. XXX: This
  429. # seems to require that |$inp| + |$len| >= 2*96 (0xc0); i.e. |$inp| must
  430. # not be near the very beginning of the address space when |$len| < 2*96
  431. # (0xc0).
  432. lea -0xc0($inp,$len),$end0
  433. vmovdqu 0x30($inp),$Z1
  434. shr \$4,$len
  435. xor $ret,$ret
  436. vmovdqu 0x20($inp),$Z2
  437. vpshufb $Ii,$Z3,$Z3 # passed to _aesni_ctr32_ghash_6x
  438. vmovdqu 0x10($inp),$T2
  439. vpshufb $Ii,$Z0,$Z0
  440. vmovdqu ($inp),$Hkey
  441. vpshufb $Ii,$Z1,$Z1
  442. vmovdqu $Z0,0x30(%rsp)
  443. vpshufb $Ii,$Z2,$Z2
  444. vmovdqu $Z1,0x40(%rsp)
  445. vpshufb $Ii,$T2,$T2
  446. vmovdqu $Z2,0x50(%rsp)
  447. vpshufb $Ii,$Hkey,$Hkey
  448. vmovdqu $T2,0x60(%rsp)
  449. vmovdqu $Hkey,0x70(%rsp)
  450. call _aesni_ctr32_ghash_6x
  451. vmovups $inout0,-0x60($out) # save output
  452. vmovups $inout1,-0x50($out)
  453. vmovups $inout2,-0x40($out)
  454. vmovups $inout3,-0x30($out)
  455. vmovups $inout4,-0x20($out)
  456. vmovups $inout5,-0x10($out)
  457. vpshufb ($const),$Xi,$Xi # .Lbswap_mask
  458. vmovdqu $Xi,-0x40($Xip) # output Xi
  459. vzeroupper
  460. ___
  461. $code.=<<___ if ($win64);
  462. movaps -0xd8(%rax),%xmm6
  463. movaps -0xc8(%rax),%xmm7
  464. movaps -0xb8(%rax),%xmm8
  465. movaps -0xa8(%rax),%xmm9
  466. movaps -0x98(%rax),%xmm10
  467. movaps -0x88(%rax),%xmm11
  468. movaps -0x78(%rax),%xmm12
  469. movaps -0x68(%rax),%xmm13
  470. movaps -0x58(%rax),%xmm14
  471. movaps -0x48(%rax),%xmm15
  472. ___
  473. $code.=<<___;
  474. mov -48(%rax),%r15
  475. mov -40(%rax),%r14
  476. mov -32(%rax),%r13
  477. mov -24(%rax),%r12
  478. mov -16(%rax),%rbp
  479. mov -8(%rax),%rbx
  480. lea (%rax),%rsp # restore %rsp
  481. .Lgcm_dec_abort:
  482. mov $ret,%rax # return value
  483. ret
  484. .size aesni_gcm_decrypt,.-aesni_gcm_decrypt
  485. ___
  486. $code.=<<___;
  487. .type _aesni_ctr32_6x,\@abi-omnipotent
  488. .align 32
  489. _aesni_ctr32_6x:
  490. vmovdqu 0x00-0x80($key),$Z0 # borrow $Z0 for $rndkey
  491. vmovdqu 0x20($const),$T2 # borrow $T2, .Lone_msb
  492. lea -1($rounds),%r13
  493. vmovups 0x10-0x80($key),$rndkey
  494. lea 0x20-0x80($key),%r12
  495. vpxor $Z0,$T1,$inout0
  496. add \$`6<<24`,$counter
  497. jc .Lhandle_ctr32_2
  498. vpaddb $T2,$T1,$inout1
  499. vpaddb $T2,$inout1,$inout2
  500. vpxor $Z0,$inout1,$inout1
  501. vpaddb $T2,$inout2,$inout3
  502. vpxor $Z0,$inout2,$inout2
  503. vpaddb $T2,$inout3,$inout4
  504. vpxor $Z0,$inout3,$inout3
  505. vpaddb $T2,$inout4,$inout5
  506. vpxor $Z0,$inout4,$inout4
  507. vpaddb $T2,$inout5,$T1
  508. vpxor $Z0,$inout5,$inout5
  509. jmp .Loop_ctr32
  510. .align 16
  511. .Loop_ctr32:
  512. vaesenc $rndkey,$inout0,$inout0
  513. vaesenc $rndkey,$inout1,$inout1
  514. vaesenc $rndkey,$inout2,$inout2
  515. vaesenc $rndkey,$inout3,$inout3
  516. vaesenc $rndkey,$inout4,$inout4
  517. vaesenc $rndkey,$inout5,$inout5
  518. vmovups (%r12),$rndkey
  519. lea 0x10(%r12),%r12
  520. dec %r13d
  521. jnz .Loop_ctr32
  522. vmovdqu (%r12),$Hkey # last round key
  523. vaesenc $rndkey,$inout0,$inout0
  524. vpxor 0x00($inp),$Hkey,$Z0
  525. vaesenc $rndkey,$inout1,$inout1
  526. vpxor 0x10($inp),$Hkey,$Z1
  527. vaesenc $rndkey,$inout2,$inout2
  528. vpxor 0x20($inp),$Hkey,$Z2
  529. vaesenc $rndkey,$inout3,$inout3
  530. vpxor 0x30($inp),$Hkey,$Xi
  531. vaesenc $rndkey,$inout4,$inout4
  532. vpxor 0x40($inp),$Hkey,$T2
  533. vaesenc $rndkey,$inout5,$inout5
  534. vpxor 0x50($inp),$Hkey,$Hkey
  535. lea 0x60($inp),$inp
  536. vaesenclast $Z0,$inout0,$inout0
  537. vaesenclast $Z1,$inout1,$inout1
  538. vaesenclast $Z2,$inout2,$inout2
  539. vaesenclast $Xi,$inout3,$inout3
  540. vaesenclast $T2,$inout4,$inout4
  541. vaesenclast $Hkey,$inout5,$inout5
  542. vmovups $inout0,0x00($out)
  543. vmovups $inout1,0x10($out)
  544. vmovups $inout2,0x20($out)
  545. vmovups $inout3,0x30($out)
  546. vmovups $inout4,0x40($out)
  547. vmovups $inout5,0x50($out)
  548. lea 0x60($out),$out
  549. ret
  550. .align 32
  551. .Lhandle_ctr32_2:
  552. vpshufb $Ii,$T1,$Z2 # byte-swap counter
  553. vmovdqu 0x30($const),$Z1 # borrow $Z1, .Ltwo_lsb
  554. vpaddd 0x40($const),$Z2,$inout1 # .Lone_lsb
  555. vpaddd $Z1,$Z2,$inout2
  556. vpaddd $Z1,$inout1,$inout3
  557. vpshufb $Ii,$inout1,$inout1
  558. vpaddd $Z1,$inout2,$inout4
  559. vpshufb $Ii,$inout2,$inout2
  560. vpxor $Z0,$inout1,$inout1
  561. vpaddd $Z1,$inout3,$inout5
  562. vpshufb $Ii,$inout3,$inout3
  563. vpxor $Z0,$inout2,$inout2
  564. vpaddd $Z1,$inout4,$T1 # byte-swapped next counter value
  565. vpshufb $Ii,$inout4,$inout4
  566. vpxor $Z0,$inout3,$inout3
  567. vpshufb $Ii,$inout5,$inout5
  568. vpxor $Z0,$inout4,$inout4
  569. vpshufb $Ii,$T1,$T1 # next counter value
  570. vpxor $Z0,$inout5,$inout5
  571. jmp .Loop_ctr32
  572. .size _aesni_ctr32_6x,.-_aesni_ctr32_6x
  573. .globl aesni_gcm_encrypt
  574. .type aesni_gcm_encrypt,\@function,6
  575. .align 32
  576. aesni_gcm_encrypt:
  577. xor $ret,$ret
  578. # We call |_aesni_ctr32_6x| twice, each call consuming 96 bytes of
  579. # input. Then we call |_aesni_ctr32_ghash_6x|, which requires at
  580. # least 96 more bytes of input.
  581. cmp \$0x60*3,$len # minimal accepted length
  582. jb .Lgcm_enc_abort
  583. lea (%rsp),%rax # save stack pointer
  584. push %rbx
  585. push %rbp
  586. push %r12
  587. push %r13
  588. push %r14
  589. push %r15
  590. ___
  591. $code.=<<___ if ($win64);
  592. lea -0xa8(%rsp),%rsp
  593. movaps %xmm6,-0xd8(%rax)
  594. movaps %xmm7,-0xc8(%rax)
  595. movaps %xmm8,-0xb8(%rax)
  596. movaps %xmm9,-0xa8(%rax)
  597. movaps %xmm10,-0x98(%rax)
  598. movaps %xmm11,-0x88(%rax)
  599. movaps %xmm12,-0x78(%rax)
  600. movaps %xmm13,-0x68(%rax)
  601. movaps %xmm14,-0x58(%rax)
  602. movaps %xmm15,-0x48(%rax)
  603. .Lgcm_enc_body:
  604. ___
  605. $code.=<<___;
  606. vzeroupper
  607. vmovdqu ($ivp),$T1 # input counter value
  608. add \$-128,%rsp
  609. mov 12($ivp),$counter
  610. lea .Lbswap_mask(%rip),$const
  611. lea -0x80($key),$in0 # borrow $in0
  612. mov \$0xf80,$end0 # borrow $end0
  613. lea 0x80($key),$key # size optimization
  614. vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
  615. and \$-128,%rsp # ensure stack alignment
  616. mov 0xf0-0x80($key),$rounds
  617. and $end0,$in0
  618. and %rsp,$end0
  619. sub $in0,$end0
  620. jc .Lenc_no_key_aliasing
  621. cmp \$768,$end0
  622. jnc .Lenc_no_key_aliasing
  623. sub $end0,%rsp # avoid aliasing with key
  624. .Lenc_no_key_aliasing:
  625. lea ($out),$in0
  626. # |_aesni_ctr32_ghash_6x| requires |$end0| to point to 2*96 (0xc0)
  627. # bytes before the end of the input. Note, in particular, that this is
  628. # correct even if |$len| is not an even multiple of 96 or 16. Unlike in
  629. # the decryption case, there's no caveat that |$out| must not be near
  630. # the very beginning of the address space, because we know that
  631. # |$len| >= 3*96 from the check above, and so we know
  632. # |$out| + |$len| >= 2*96 (0xc0).
  633. lea -0xc0($out,$len),$end0
  634. shr \$4,$len
  635. call _aesni_ctr32_6x
  636. vpshufb $Ii,$inout0,$Xi # save bswapped output on stack
  637. vpshufb $Ii,$inout1,$T2
  638. vmovdqu $Xi,0x70(%rsp)
  639. vpshufb $Ii,$inout2,$Z0
  640. vmovdqu $T2,0x60(%rsp)
  641. vpshufb $Ii,$inout3,$Z1
  642. vmovdqu $Z0,0x50(%rsp)
  643. vpshufb $Ii,$inout4,$Z2
  644. vmovdqu $Z1,0x40(%rsp)
  645. vpshufb $Ii,$inout5,$Z3 # passed to _aesni_ctr32_ghash_6x
  646. vmovdqu $Z2,0x30(%rsp)
  647. call _aesni_ctr32_6x
  648. vmovdqu ($Xip),$Xi # load Xi
  649. lea 0x20+0x20($Xip),$Xip # size optimization
  650. sub \$12,$len
  651. mov \$0x60*2,$ret
  652. vpshufb $Ii,$Xi,$Xi
  653. call _aesni_ctr32_ghash_6x
  654. vmovdqu 0x20(%rsp),$Z3 # I[5]
  655. vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
  656. vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
  657. vpunpckhqdq $Z3,$Z3,$T1
  658. vmovdqu 0x20-0x20($Xip),$rndkey # borrow $rndkey for $HK
  659. vmovups $inout0,-0x60($out) # save output
  660. vpshufb $Ii,$inout0,$inout0 # but keep bswapped copy
  661. vpxor $Z3,$T1,$T1
  662. vmovups $inout1,-0x50($out)
  663. vpshufb $Ii,$inout1,$inout1
  664. vmovups $inout2,-0x40($out)
  665. vpshufb $Ii,$inout2,$inout2
  666. vmovups $inout3,-0x30($out)
  667. vpshufb $Ii,$inout3,$inout3
  668. vmovups $inout4,-0x20($out)
  669. vpshufb $Ii,$inout4,$inout4
  670. vmovups $inout5,-0x10($out)
  671. vpshufb $Ii,$inout5,$inout5
  672. vmovdqu $inout0,0x10(%rsp) # free $inout0
  673. ___
  674. { my ($HK,$T3)=($rndkey,$inout0);
  675. $code.=<<___;
  676. vmovdqu 0x30(%rsp),$Z2 # I[4]
  677. vmovdqu 0x10-0x20($Xip),$Ii # borrow $Ii for $Hkey^2
  678. vpunpckhqdq $Z2,$Z2,$T2
  679. vpclmulqdq \$0x00,$Hkey,$Z3,$Z1
  680. vpxor $Z2,$T2,$T2
  681. vpclmulqdq \$0x11,$Hkey,$Z3,$Z3
  682. vpclmulqdq \$0x00,$HK,$T1,$T1
  683. vmovdqu 0x40(%rsp),$T3 # I[3]
  684. vpclmulqdq \$0x00,$Ii,$Z2,$Z0
  685. vmovdqu 0x30-0x20($Xip),$Hkey # $Hkey^3
  686. vpxor $Z1,$Z0,$Z0
  687. vpunpckhqdq $T3,$T3,$Z1
  688. vpclmulqdq \$0x11,$Ii,$Z2,$Z2
  689. vpxor $T3,$Z1,$Z1
  690. vpxor $Z3,$Z2,$Z2
  691. vpclmulqdq \$0x10,$HK,$T2,$T2
  692. vmovdqu 0x50-0x20($Xip),$HK
  693. vpxor $T1,$T2,$T2
  694. vmovdqu 0x50(%rsp),$T1 # I[2]
  695. vpclmulqdq \$0x00,$Hkey,$T3,$Z3
  696. vmovdqu 0x40-0x20($Xip),$Ii # borrow $Ii for $Hkey^4
  697. vpxor $Z0,$Z3,$Z3
  698. vpunpckhqdq $T1,$T1,$Z0
  699. vpclmulqdq \$0x11,$Hkey,$T3,$T3
  700. vpxor $T1,$Z0,$Z0
  701. vpxor $Z2,$T3,$T3
  702. vpclmulqdq \$0x00,$HK,$Z1,$Z1
  703. vpxor $T2,$Z1,$Z1
  704. vmovdqu 0x60(%rsp),$T2 # I[1]
  705. vpclmulqdq \$0x00,$Ii,$T1,$Z2
  706. vmovdqu 0x60-0x20($Xip),$Hkey # $Hkey^5
  707. vpxor $Z3,$Z2,$Z2
  708. vpunpckhqdq $T2,$T2,$Z3
  709. vpclmulqdq \$0x11,$Ii,$T1,$T1
  710. vpxor $T2,$Z3,$Z3
  711. vpxor $T3,$T1,$T1
  712. vpclmulqdq \$0x10,$HK,$Z0,$Z0
  713. vmovdqu 0x80-0x20($Xip),$HK
  714. vpxor $Z1,$Z0,$Z0
  715. vpxor 0x70(%rsp),$Xi,$Xi # accumulate I[0]
  716. vpclmulqdq \$0x00,$Hkey,$T2,$Z1
  717. vmovdqu 0x70-0x20($Xip),$Ii # borrow $Ii for $Hkey^6
  718. vpunpckhqdq $Xi,$Xi,$T3
  719. vpxor $Z2,$Z1,$Z1
  720. vpclmulqdq \$0x11,$Hkey,$T2,$T2
  721. vpxor $Xi,$T3,$T3
  722. vpxor $T1,$T2,$T2
  723. vpclmulqdq \$0x00,$HK,$Z3,$Z3
  724. vpxor $Z0,$Z3,$Z0
  725. vpclmulqdq \$0x00,$Ii,$Xi,$Z2
  726. vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
  727. vpunpckhqdq $inout5,$inout5,$T1
  728. vpclmulqdq \$0x11,$Ii,$Xi,$Xi
  729. vpxor $inout5,$T1,$T1
  730. vpxor $Z1,$Z2,$Z1
  731. vpclmulqdq \$0x10,$HK,$T3,$T3
  732. vmovdqu 0x20-0x20($Xip),$HK
  733. vpxor $T2,$Xi,$Z3
  734. vpxor $Z0,$T3,$Z2
  735. vmovdqu 0x10-0x20($Xip),$Ii # borrow $Ii for $Hkey^2
  736. vpxor $Z1,$Z3,$T3 # aggregated Karatsuba post-processing
  737. vpclmulqdq \$0x00,$Hkey,$inout5,$Z0
  738. vpxor $T3,$Z2,$Z2
  739. vpunpckhqdq $inout4,$inout4,$T2
  740. vpclmulqdq \$0x11,$Hkey,$inout5,$inout5
  741. vpxor $inout4,$T2,$T2
  742. vpslldq \$8,$Z2,$T3
  743. vpclmulqdq \$0x00,$HK,$T1,$T1
  744. vpxor $T3,$Z1,$Xi
  745. vpsrldq \$8,$Z2,$Z2
  746. vpxor $Z2,$Z3,$Z3
  747. vpclmulqdq \$0x00,$Ii,$inout4,$Z1
  748. vmovdqu 0x30-0x20($Xip),$Hkey # $Hkey^3
  749. vpxor $Z0,$Z1,$Z1
  750. vpunpckhqdq $inout3,$inout3,$T3
  751. vpclmulqdq \$0x11,$Ii,$inout4,$inout4
  752. vpxor $inout3,$T3,$T3
  753. vpxor $inout5,$inout4,$inout4
  754. vpalignr \$8,$Xi,$Xi,$inout5 # 1st phase
  755. vpclmulqdq \$0x10,$HK,$T2,$T2
  756. vmovdqu 0x50-0x20($Xip),$HK
  757. vpxor $T1,$T2,$T2
  758. vpclmulqdq \$0x00,$Hkey,$inout3,$Z0
  759. vmovdqu 0x40-0x20($Xip),$Ii # borrow $Ii for $Hkey^4
  760. vpxor $Z1,$Z0,$Z0
  761. vpunpckhqdq $inout2,$inout2,$T1
  762. vpclmulqdq \$0x11,$Hkey,$inout3,$inout3
  763. vpxor $inout2,$T1,$T1
  764. vpxor $inout4,$inout3,$inout3
  765. vxorps 0x10(%rsp),$Z3,$Z3 # accumulate $inout0
  766. vpclmulqdq \$0x00,$HK,$T3,$T3
  767. vpxor $T2,$T3,$T3
  768. vpclmulqdq \$0x10,0x10($const),$Xi,$Xi
  769. vxorps $inout5,$Xi,$Xi
  770. vpclmulqdq \$0x00,$Ii,$inout2,$Z1
  771. vmovdqu 0x60-0x20($Xip),$Hkey # $Hkey^5
  772. vpxor $Z0,$Z1,$Z1
  773. vpunpckhqdq $inout1,$inout1,$T2
  774. vpclmulqdq \$0x11,$Ii,$inout2,$inout2
  775. vpxor $inout1,$T2,$T2
  776. vpalignr \$8,$Xi,$Xi,$inout5 # 2nd phase
  777. vpxor $inout3,$inout2,$inout2
  778. vpclmulqdq \$0x10,$HK,$T1,$T1
  779. vmovdqu 0x80-0x20($Xip),$HK
  780. vpxor $T3,$T1,$T1
  781. vxorps $Z3,$inout5,$inout5
  782. vpclmulqdq \$0x10,0x10($const),$Xi,$Xi
  783. vxorps $inout5,$Xi,$Xi
  784. vpclmulqdq \$0x00,$Hkey,$inout1,$Z0
  785. vmovdqu 0x70-0x20($Xip),$Ii # borrow $Ii for $Hkey^6
  786. vpxor $Z1,$Z0,$Z0
  787. vpunpckhqdq $Xi,$Xi,$T3
  788. vpclmulqdq \$0x11,$Hkey,$inout1,$inout1
  789. vpxor $Xi,$T3,$T3
  790. vpxor $inout2,$inout1,$inout1
  791. vpclmulqdq \$0x00,$HK,$T2,$T2
  792. vpxor $T1,$T2,$T2
  793. vpclmulqdq \$0x00,$Ii,$Xi,$Z1
  794. vpclmulqdq \$0x11,$Ii,$Xi,$Z3
  795. vpxor $Z0,$Z1,$Z1
  796. vpclmulqdq \$0x10,$HK,$T3,$Z2
  797. vpxor $inout1,$Z3,$Z3
  798. vpxor $T2,$Z2,$Z2
  799. vpxor $Z1,$Z3,$Z0 # aggregated Karatsuba post-processing
  800. vpxor $Z0,$Z2,$Z2
  801. vpslldq \$8,$Z2,$T1
  802. vmovdqu 0x10($const),$Hkey # .Lpoly
  803. vpsrldq \$8,$Z2,$Z2
  804. vpxor $T1,$Z1,$Xi
  805. vpxor $Z2,$Z3,$Z3
  806. vpalignr \$8,$Xi,$Xi,$T2 # 1st phase
  807. vpclmulqdq \$0x10,$Hkey,$Xi,$Xi
  808. vpxor $T2,$Xi,$Xi
  809. vpalignr \$8,$Xi,$Xi,$T2 # 2nd phase
  810. vpclmulqdq \$0x10,$Hkey,$Xi,$Xi
  811. vpxor $Z3,$T2,$T2
  812. vpxor $T2,$Xi,$Xi
  813. ___
  814. }
  815. $code.=<<___;
  816. vpshufb ($const),$Xi,$Xi # .Lbswap_mask
  817. vmovdqu $Xi,-0x40($Xip) # output Xi
  818. vzeroupper
  819. ___
  820. $code.=<<___ if ($win64);
  821. movaps -0xd8(%rax),%xmm6
  822. movaps -0xc8(%rax),%xmm7
  823. movaps -0xb8(%rax),%xmm8
  824. movaps -0xa8(%rax),%xmm9
  825. movaps -0x98(%rax),%xmm10
  826. movaps -0x88(%rax),%xmm11
  827. movaps -0x78(%rax),%xmm12
  828. movaps -0x68(%rax),%xmm13
  829. movaps -0x58(%rax),%xmm14
  830. movaps -0x48(%rax),%xmm15
  831. ___
  832. $code.=<<___;
  833. mov -48(%rax),%r15
  834. mov -40(%rax),%r14
  835. mov -32(%rax),%r13
  836. mov -24(%rax),%r12
  837. mov -16(%rax),%rbp
  838. mov -8(%rax),%rbx
  839. lea (%rax),%rsp # restore %rsp
  840. .Lgcm_enc_abort:
  841. mov $ret,%rax # return value
  842. ret
  843. .size aesni_gcm_encrypt,.-aesni_gcm_encrypt
  844. ___
  845. $code.=<<___;
  846. .align 64
  847. .Lbswap_mask:
  848. .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
  849. .Lpoly:
  850. .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
  851. .Lone_msb:
  852. .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
  853. .Ltwo_lsb:
  854. .byte 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
  855. .Lone_lsb:
  856. .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
  857. .asciz "AES-NI GCM module for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
  858. .align 64
  859. ___
  860. if ($win64) {
  861. $rec="%rcx";
  862. $frame="%rdx";
  863. $context="%r8";
  864. $disp="%r9";
  865. $code.=<<___
  866. .extern __imp_RtlVirtualUnwind
  867. .type gcm_se_handler,\@abi-omnipotent
  868. .align 16
  869. gcm_se_handler:
  870. push %rsi
  871. push %rdi
  872. push %rbx
  873. push %rbp
  874. push %r12
  875. push %r13
  876. push %r14
  877. push %r15
  878. pushfq
  879. sub \$64,%rsp
  880. mov 120($context),%rax # pull context->Rax
  881. mov 248($context),%rbx # pull context->Rip
  882. mov 8($disp),%rsi # disp->ImageBase
  883. mov 56($disp),%r11 # disp->HandlerData
  884. mov 0(%r11),%r10d # HandlerData[0]
  885. lea (%rsi,%r10),%r10 # prologue label
  886. cmp %r10,%rbx # context->Rip<prologue label
  887. jb .Lcommon_seh_tail
  888. mov 152($context),%rax # pull context->Rsp
  889. mov 4(%r11),%r10d # HandlerData[1]
  890. lea (%rsi,%r10),%r10 # epilogue label
  891. cmp %r10,%rbx # context->Rip>=epilogue label
  892. jae .Lcommon_seh_tail
  893. mov 120($context),%rax # pull context->Rax
  894. mov -48(%rax),%r15
  895. mov -40(%rax),%r14
  896. mov -32(%rax),%r13
  897. mov -24(%rax),%r12
  898. mov -16(%rax),%rbp
  899. mov -8(%rax),%rbx
  900. mov %r15,240($context)
  901. mov %r14,232($context)
  902. mov %r13,224($context)
  903. mov %r12,216($context)
  904. mov %rbp,160($context)
  905. mov %rbx,144($context)
  906. lea -0xd8(%rax),%rsi # %xmm save area
  907. lea 512($context),%rdi # & context.Xmm6
  908. mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
  909. .long 0xa548f3fc # cld; rep movsq
  910. .Lcommon_seh_tail:
  911. mov 8(%rax),%rdi
  912. mov 16(%rax),%rsi
  913. mov %rax,152($context) # restore context->Rsp
  914. mov %rsi,168($context) # restore context->Rsi
  915. mov %rdi,176($context) # restore context->Rdi
  916. mov 40($disp),%rdi # disp->ContextRecord
  917. mov $context,%rsi # context
  918. mov \$154,%ecx # sizeof(CONTEXT)
  919. .long 0xa548f3fc # cld; rep movsq
  920. mov $disp,%rsi
  921. xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
  922. mov 8(%rsi),%rdx # arg2, disp->ImageBase
  923. mov 0(%rsi),%r8 # arg3, disp->ControlPc
  924. mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
  925. mov 40(%rsi),%r10 # disp->ContextRecord
  926. lea 56(%rsi),%r11 # &disp->HandlerData
  927. lea 24(%rsi),%r12 # &disp->EstablisherFrame
  928. mov %r10,32(%rsp) # arg5
  929. mov %r11,40(%rsp) # arg6
  930. mov %r12,48(%rsp) # arg7
  931. mov %rcx,56(%rsp) # arg8, (NULL)
  932. call *__imp_RtlVirtualUnwind(%rip)
  933. mov \$1,%eax # ExceptionContinueSearch
  934. add \$64,%rsp
  935. popfq
  936. pop %r15
  937. pop %r14
  938. pop %r13
  939. pop %r12
  940. pop %rbp
  941. pop %rbx
  942. pop %rdi
  943. pop %rsi
  944. ret
  945. .size gcm_se_handler,.-gcm_se_handler
  946. .section .pdata
  947. .align 4
  948. .rva .LSEH_begin_aesni_gcm_decrypt
  949. .rva .LSEH_end_aesni_gcm_decrypt
  950. .rva .LSEH_gcm_dec_info
  951. .rva .LSEH_begin_aesni_gcm_encrypt
  952. .rva .LSEH_end_aesni_gcm_encrypt
  953. .rva .LSEH_gcm_enc_info
  954. .section .xdata
  955. .align 8
  956. .LSEH_gcm_dec_info:
  957. .byte 9,0,0,0
  958. .rva gcm_se_handler
  959. .rva .Lgcm_dec_body,.Lgcm_dec_abort
  960. .LSEH_gcm_enc_info:
  961. .byte 9,0,0,0
  962. .rva gcm_se_handler
  963. .rva .Lgcm_enc_body,.Lgcm_enc_abort
  964. ___
  965. }
  966. }}} else {{{
  967. $code=<<___; # assembler is too old
  968. .text
  969. .globl aesni_gcm_encrypt
  970. .type aesni_gcm_encrypt,\@abi-omnipotent
  971. aesni_gcm_encrypt:
  972. xor %eax,%eax
  973. ret
  974. .size aesni_gcm_encrypt,.-aesni_gcm_encrypt
  975. .globl aesni_gcm_decrypt
  976. .type aesni_gcm_decrypt,\@abi-omnipotent
  977. aesni_gcm_decrypt:
  978. xor %eax,%eax
  979. ret
  980. .size aesni_gcm_decrypt,.-aesni_gcm_decrypt
  981. ___
  982. }}}
  983. $code =~ s/\`([^\`]*)\`/eval($1)/gem;
  984. print $code;
  985. close STDOUT;