You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

aesni-gcm-x86_64.pl 28 KiB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057
  1. #!/usr/bin/env perl
  2. #
  3. # ====================================================================
  4. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  5. # project. The module is, however, dual licensed under OpenSSL and
  6. # CRYPTOGAMS licenses depending on where you obtain it. For further
  7. # details see http://www.openssl.org/~appro/cryptogams/.
  8. # ====================================================================
  9. #
  10. #
  11. # AES-NI-CTR+GHASH stitch.
  12. #
  13. # February 2013
  14. #
  15. # OpenSSL GCM implementation is organized in such way that its
  16. # performance is rather close to the sum of its streamed components,
  17. # in the context parallelized AES-NI CTR and modulo-scheduled
  18. # PCLMULQDQ-enabled GHASH. Unfortunately, as no stitch implementation
  19. # was observed to perform significantly better than the sum of the
  20. # components on contemporary CPUs, the effort was deemed impossible to
  21. # justify. This module is based on combination of Intel submissions,
  22. # [1] and [2], with MOVBE twist suggested by Ilya Albrekht and Max
  23. # Locktyukhin of Intel Corp. who verified that it reduces shuffles
  24. # pressure with notable relative improvement, achieving 1.0 cycle per
  25. # byte processed with 128-bit key on Haswell processor, and 0.74 -
  26. # on Broadwell. [Mentioned results are raw profiled measurements for
  27. # favourable packet size, one divisible by 96. Applications using the
  28. # EVP interface will observe a few percent worse performance.]
  29. #
  30. # [1] http://rt.openssl.org/Ticket/Display.html?id=2900&user=guest&pass=guest
  31. # [2] http://www.intel.com/content/dam/www/public/us/en/documents/software-support/enabling-high-performance-gcm.pdf
  32. $flavour = shift;
  33. $output = shift;
  34. if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
  35. $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
  36. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  37. ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
  38. ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
  39. die "can't locate x86_64-xlate.pl";
  40. if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
  41. =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
  42. $avx = ($1>=2.19) + ($1>=2.22);
  43. }
  44. if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
  45. `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
  46. $avx = ($1>=2.09) + ($1>=2.10);
  47. }
  48. if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
  49. `ml64 2>&1` =~ /Version ([0-9]+)\./) {
  50. $avx = ($1>=10) + ($1>=11);
  51. }
  52. if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) {
  53. $avx = ($2>=3.0) + ($2>3.0);
  54. }
  55. open OUT,"| \"$^X\" $xlate $flavour $output";
  56. *STDOUT=*OUT;
  57. if ($avx>1) {{{
  58. ($inp,$out,$len,$key,$ivp,$Xip)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9");
  59. ($Ii,$T1,$T2,$Hkey,
  60. $Z0,$Z1,$Z2,$Z3,$Xi) = map("%xmm$_",(0..8));
  61. ($inout0,$inout1,$inout2,$inout3,$inout4,$inout5,$rndkey) = map("%xmm$_",(9..15));
  62. ($counter,$rounds,$ret,$const,$in0,$end0)=("%ebx","%ebp","%r10","%r11","%r14","%r15");
  63. $code=<<___;
  64. .text
  65. .type _aesni_ctr32_ghash_6x,\@abi-omnipotent
  66. .align 32
  67. _aesni_ctr32_ghash_6x:
  68. vmovdqu 0x20($const),$T2 # borrow $T2, .Lone_msb
  69. sub \$6,$len
  70. vpxor $Z0,$Z0,$Z0 # $Z0 = 0
  71. vmovdqu 0x00-0x80($key),$rndkey
  72. vpaddb $T2,$T1,$inout1
  73. vpaddb $T2,$inout1,$inout2
  74. vpaddb $T2,$inout2,$inout3
  75. vpaddb $T2,$inout3,$inout4
  76. vpaddb $T2,$inout4,$inout5
  77. vpxor $rndkey,$T1,$inout0
  78. vmovdqu $Z0,16+8(%rsp) # "$Z3" = 0
  79. jmp .Loop6x
  80. .align 32
  81. .Loop6x:
  82. add \$`6<<24`,$counter
  83. jc .Lhandle_ctr32 # discard $inout[1-5]?
  84. vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
  85. vpaddb $T2,$inout5,$T1 # next counter value
  86. vpxor $rndkey,$inout1,$inout1
  87. vpxor $rndkey,$inout2,$inout2
  88. .Lresume_ctr32:
  89. vmovdqu $T1,($ivp) # save next counter value
  90. vpclmulqdq \$0x10,$Hkey,$Z3,$Z1
  91. vpxor $rndkey,$inout3,$inout3
  92. vmovups 0x10-0x80($key),$T2 # borrow $T2 for $rndkey
  93. vpclmulqdq \$0x01,$Hkey,$Z3,$Z2
  94. xor %r12,%r12
  95. cmp $in0,$end0
  96. vaesenc $T2,$inout0,$inout0
  97. vmovdqu 0x30+8(%rsp),$Ii # I[4]
  98. vpxor $rndkey,$inout4,$inout4
  99. vpclmulqdq \$0x00,$Hkey,$Z3,$T1
  100. vaesenc $T2,$inout1,$inout1
  101. vpxor $rndkey,$inout5,$inout5
  102. setnc %r12b
  103. vpclmulqdq \$0x11,$Hkey,$Z3,$Z3
  104. vaesenc $T2,$inout2,$inout2
  105. vmovdqu 0x10-0x20($Xip),$Hkey # $Hkey^2
  106. neg %r12
  107. vaesenc $T2,$inout3,$inout3
  108. vpxor $Z1,$Z2,$Z2
  109. vpclmulqdq \$0x00,$Hkey,$Ii,$Z1
  110. vpxor $Z0,$Xi,$Xi # modulo-scheduled
  111. vaesenc $T2,$inout4,$inout4
  112. vpxor $Z1,$T1,$Z0
  113. and \$0x60,%r12
  114. vmovups 0x20-0x80($key),$rndkey
  115. vpclmulqdq \$0x10,$Hkey,$Ii,$T1
  116. vaesenc $T2,$inout5,$inout5
  117. vpclmulqdq \$0x01,$Hkey,$Ii,$T2
  118. lea ($in0,%r12),$in0
  119. vaesenc $rndkey,$inout0,$inout0
  120. vpxor 16+8(%rsp),$Xi,$Xi # modulo-scheduled [vpxor $Z3,$Xi,$Xi]
  121. vpclmulqdq \$0x11,$Hkey,$Ii,$Hkey
  122. vmovdqu 0x40+8(%rsp),$Ii # I[3]
  123. vaesenc $rndkey,$inout1,$inout1
  124. movbe 0x58($in0),%r13
  125. vaesenc $rndkey,$inout2,$inout2
  126. movbe 0x50($in0),%r12
  127. vaesenc $rndkey,$inout3,$inout3
  128. mov %r13,0x20+8(%rsp)
  129. vaesenc $rndkey,$inout4,$inout4
  130. mov %r12,0x28+8(%rsp)
  131. vmovdqu 0x30-0x20($Xip),$Z1 # borrow $Z1 for $Hkey^3
  132. vaesenc $rndkey,$inout5,$inout5
  133. vmovups 0x30-0x80($key),$rndkey
  134. vpxor $T1,$Z2,$Z2
  135. vpclmulqdq \$0x00,$Z1,$Ii,$T1
  136. vaesenc $rndkey,$inout0,$inout0
  137. vpxor $T2,$Z2,$Z2
  138. vpclmulqdq \$0x10,$Z1,$Ii,$T2
  139. vaesenc $rndkey,$inout1,$inout1
  140. vpxor $Hkey,$Z3,$Z3
  141. vpclmulqdq \$0x01,$Z1,$Ii,$Hkey
  142. vaesenc $rndkey,$inout2,$inout2
  143. vpclmulqdq \$0x11,$Z1,$Ii,$Z1
  144. vmovdqu 0x50+8(%rsp),$Ii # I[2]
  145. vaesenc $rndkey,$inout3,$inout3
  146. vaesenc $rndkey,$inout4,$inout4
  147. vpxor $T1,$Z0,$Z0
  148. vmovdqu 0x40-0x20($Xip),$T1 # borrow $T1 for $Hkey^4
  149. vaesenc $rndkey,$inout5,$inout5
  150. vmovups 0x40-0x80($key),$rndkey
  151. vpxor $T2,$Z2,$Z2
  152. vpclmulqdq \$0x00,$T1,$Ii,$T2
  153. vaesenc $rndkey,$inout0,$inout0
  154. vpxor $Hkey,$Z2,$Z2
  155. vpclmulqdq \$0x10,$T1,$Ii,$Hkey
  156. vaesenc $rndkey,$inout1,$inout1
  157. movbe 0x48($in0),%r13
  158. vpxor $Z1,$Z3,$Z3
  159. vpclmulqdq \$0x01,$T1,$Ii,$Z1
  160. vaesenc $rndkey,$inout2,$inout2
  161. movbe 0x40($in0),%r12
  162. vpclmulqdq \$0x11,$T1,$Ii,$T1
  163. vmovdqu 0x60+8(%rsp),$Ii # I[1]
  164. vaesenc $rndkey,$inout3,$inout3
  165. mov %r13,0x30+8(%rsp)
  166. vaesenc $rndkey,$inout4,$inout4
  167. mov %r12,0x38+8(%rsp)
  168. vpxor $T2,$Z0,$Z0
  169. vmovdqu 0x60-0x20($Xip),$T2 # borrow $T2 for $Hkey^5
  170. vaesenc $rndkey,$inout5,$inout5
  171. vmovups 0x50-0x80($key),$rndkey
  172. vpxor $Hkey,$Z2,$Z2
  173. vpclmulqdq \$0x00,$T2,$Ii,$Hkey
  174. vaesenc $rndkey,$inout0,$inout0
  175. vpxor $Z1,$Z2,$Z2
  176. vpclmulqdq \$0x10,$T2,$Ii,$Z1
  177. vaesenc $rndkey,$inout1,$inout1
  178. movbe 0x38($in0),%r13
  179. vpxor $T1,$Z3,$Z3
  180. vpclmulqdq \$0x01,$T2,$Ii,$T1
  181. vpxor 0x70+8(%rsp),$Xi,$Xi # accumulate I[0]
  182. vaesenc $rndkey,$inout2,$inout2
  183. movbe 0x30($in0),%r12
  184. vpclmulqdq \$0x11,$T2,$Ii,$T2
  185. vaesenc $rndkey,$inout3,$inout3
  186. mov %r13,0x40+8(%rsp)
  187. vaesenc $rndkey,$inout4,$inout4
  188. mov %r12,0x48+8(%rsp)
  189. vpxor $Hkey,$Z0,$Z0
  190. vmovdqu 0x70-0x20($Xip),$Hkey # $Hkey^6
  191. vaesenc $rndkey,$inout5,$inout5
  192. vmovups 0x60-0x80($key),$rndkey
  193. vpxor $Z1,$Z2,$Z2
  194. vpclmulqdq \$0x10,$Hkey,$Xi,$Z1
  195. vaesenc $rndkey,$inout0,$inout0
  196. vpxor $T1,$Z2,$Z2
  197. vpclmulqdq \$0x01,$Hkey,$Xi,$T1
  198. vaesenc $rndkey,$inout1,$inout1
  199. movbe 0x28($in0),%r13
  200. vpxor $T2,$Z3,$Z3
  201. vpclmulqdq \$0x00,$Hkey,$Xi,$T2
  202. vaesenc $rndkey,$inout2,$inout2
  203. movbe 0x20($in0),%r12
  204. vpclmulqdq \$0x11,$Hkey,$Xi,$Xi
  205. vaesenc $rndkey,$inout3,$inout3
  206. mov %r13,0x50+8(%rsp)
  207. vaesenc $rndkey,$inout4,$inout4
  208. mov %r12,0x58+8(%rsp)
  209. vpxor $Z1,$Z2,$Z2
  210. vaesenc $rndkey,$inout5,$inout5
  211. vpxor $T1,$Z2,$Z2
  212. vmovups 0x70-0x80($key),$rndkey
  213. vpslldq \$8,$Z2,$Z1
  214. vpxor $T2,$Z0,$Z0
  215. vmovdqu 0x10($const),$Hkey # .Lpoly
  216. vaesenc $rndkey,$inout0,$inout0
  217. vpxor $Xi,$Z3,$Z3
  218. vaesenc $rndkey,$inout1,$inout1
  219. vpxor $Z1,$Z0,$Z0
  220. movbe 0x18($in0),%r13
  221. vaesenc $rndkey,$inout2,$inout2
  222. movbe 0x10($in0),%r12
  223. vpalignr \$8,$Z0,$Z0,$Ii # 1st phase
  224. vpclmulqdq \$0x10,$Hkey,$Z0,$Z0
  225. mov %r13,0x60+8(%rsp)
  226. vaesenc $rndkey,$inout3,$inout3
  227. mov %r12,0x68+8(%rsp)
  228. vaesenc $rndkey,$inout4,$inout4
  229. vmovups 0x80-0x80($key),$T1 # borrow $T1 for $rndkey
  230. vaesenc $rndkey,$inout5,$inout5
  231. vaesenc $T1,$inout0,$inout0
  232. vmovups 0x90-0x80($key),$rndkey
  233. vaesenc $T1,$inout1,$inout1
  234. vpsrldq \$8,$Z2,$Z2
  235. vaesenc $T1,$inout2,$inout2
  236. vpxor $Z2,$Z3,$Z3
  237. vaesenc $T1,$inout3,$inout3
  238. vpxor $Ii,$Z0,$Z0
  239. movbe 0x08($in0),%r13
  240. vaesenc $T1,$inout4,$inout4
  241. movbe 0x00($in0),%r12
  242. vaesenc $T1,$inout5,$inout5
  243. vmovups 0xa0-0x80($key),$T1
  244. cmp \$11,$rounds
  245. jb .Lenc_tail # 128-bit key
  246. vaesenc $rndkey,$inout0,$inout0
  247. vaesenc $rndkey,$inout1,$inout1
  248. vaesenc $rndkey,$inout2,$inout2
  249. vaesenc $rndkey,$inout3,$inout3
  250. vaesenc $rndkey,$inout4,$inout4
  251. vaesenc $rndkey,$inout5,$inout5
  252. vaesenc $T1,$inout0,$inout0
  253. vaesenc $T1,$inout1,$inout1
  254. vaesenc $T1,$inout2,$inout2
  255. vaesenc $T1,$inout3,$inout3
  256. vaesenc $T1,$inout4,$inout4
  257. vmovups 0xb0-0x80($key),$rndkey
  258. vaesenc $T1,$inout5,$inout5
  259. vmovups 0xc0-0x80($key),$T1
  260. je .Lenc_tail # 192-bit key
  261. vaesenc $rndkey,$inout0,$inout0
  262. vaesenc $rndkey,$inout1,$inout1
  263. vaesenc $rndkey,$inout2,$inout2
  264. vaesenc $rndkey,$inout3,$inout3
  265. vaesenc $rndkey,$inout4,$inout4
  266. vaesenc $rndkey,$inout5,$inout5
  267. vaesenc $T1,$inout0,$inout0
  268. vaesenc $T1,$inout1,$inout1
  269. vaesenc $T1,$inout2,$inout2
  270. vaesenc $T1,$inout3,$inout3
  271. vaesenc $T1,$inout4,$inout4
  272. vmovups 0xd0-0x80($key),$rndkey
  273. vaesenc $T1,$inout5,$inout5
  274. vmovups 0xe0-0x80($key),$T1
  275. jmp .Lenc_tail # 256-bit key
  276. .align 32
  277. .Lhandle_ctr32:
  278. vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
  279. vpshufb $Ii,$T1,$Z2 # byte-swap counter
  280. vmovdqu 0x30($const),$Z1 # borrow $Z1, .Ltwo_lsb
  281. vpaddd 0x40($const),$Z2,$inout1 # .Lone_lsb
  282. vpaddd $Z1,$Z2,$inout2
  283. vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
  284. vpaddd $Z1,$inout1,$inout3
  285. vpshufb $Ii,$inout1,$inout1
  286. vpaddd $Z1,$inout2,$inout4
  287. vpshufb $Ii,$inout2,$inout2
  288. vpxor $rndkey,$inout1,$inout1
  289. vpaddd $Z1,$inout3,$inout5
  290. vpshufb $Ii,$inout3,$inout3
  291. vpxor $rndkey,$inout2,$inout2
  292. vpaddd $Z1,$inout4,$T1 # byte-swapped next counter value
  293. vpshufb $Ii,$inout4,$inout4
  294. vpshufb $Ii,$inout5,$inout5
  295. vpshufb $Ii,$T1,$T1 # next counter value
  296. jmp .Lresume_ctr32
  297. .align 32
  298. .Lenc_tail:
  299. vaesenc $rndkey,$inout0,$inout0
  300. vmovdqu $Z3,16+8(%rsp) # postpone vpxor $Z3,$Xi,$Xi
  301. vpalignr \$8,$Z0,$Z0,$Xi # 2nd phase
  302. vaesenc $rndkey,$inout1,$inout1
  303. vpclmulqdq \$0x10,$Hkey,$Z0,$Z0
  304. vpxor 0x00($inp),$T1,$T2
  305. vaesenc $rndkey,$inout2,$inout2
  306. vpxor 0x10($inp),$T1,$Ii
  307. vaesenc $rndkey,$inout3,$inout3
  308. vpxor 0x20($inp),$T1,$Z1
  309. vaesenc $rndkey,$inout4,$inout4
  310. vpxor 0x30($inp),$T1,$Z2
  311. vaesenc $rndkey,$inout5,$inout5
  312. vpxor 0x40($inp),$T1,$Z3
  313. vpxor 0x50($inp),$T1,$Hkey
  314. vmovdqu ($ivp),$T1 # load next counter value
  315. vaesenclast $T2,$inout0,$inout0
  316. vmovdqu 0x20($const),$T2 # borrow $T2, .Lone_msb
  317. vaesenclast $Ii,$inout1,$inout1
  318. vpaddb $T2,$T1,$Ii
  319. mov %r13,0x70+8(%rsp)
  320. lea 0x60($inp),$inp
  321. vaesenclast $Z1,$inout2,$inout2
  322. vpaddb $T2,$Ii,$Z1
  323. mov %r12,0x78+8(%rsp)
  324. lea 0x60($out),$out
  325. vmovdqu 0x00-0x80($key),$rndkey
  326. vaesenclast $Z2,$inout3,$inout3
  327. vpaddb $T2,$Z1,$Z2
  328. vaesenclast $Z3, $inout4,$inout4
  329. vpaddb $T2,$Z2,$Z3
  330. vaesenclast $Hkey,$inout5,$inout5
  331. vpaddb $T2,$Z3,$Hkey
  332. add \$0x60,$ret
  333. sub \$0x6,$len
  334. jc .L6x_done
  335. vmovups $inout0,-0x60($out) # save output
  336. vpxor $rndkey,$T1,$inout0
  337. vmovups $inout1,-0x50($out)
  338. vmovdqa $Ii,$inout1 # 0 latency
  339. vmovups $inout2,-0x40($out)
  340. vmovdqa $Z1,$inout2 # 0 latency
  341. vmovups $inout3,-0x30($out)
  342. vmovdqa $Z2,$inout3 # 0 latency
  343. vmovups $inout4,-0x20($out)
  344. vmovdqa $Z3,$inout4 # 0 latency
  345. vmovups $inout5,-0x10($out)
  346. vmovdqa $Hkey,$inout5 # 0 latency
  347. vmovdqu 0x20+8(%rsp),$Z3 # I[5]
  348. jmp .Loop6x
  349. .L6x_done:
  350. vpxor 16+8(%rsp),$Xi,$Xi # modulo-scheduled
  351. vpxor $Z0,$Xi,$Xi # modulo-scheduled
  352. ret
  353. .size _aesni_ctr32_ghash_6x,.-_aesni_ctr32_ghash_6x
  354. ___
  355. ######################################################################
  356. #
  357. # size_t aesni_gcm_[en|de]crypt(const void *inp, void *out, size_t len,
  358. # const AES_KEY *key, unsigned char iv[16],
  359. # struct { u128 Xi,H,Htbl[9]; } *Xip);
  360. $code.=<<___;
  361. .globl aesni_gcm_decrypt
  362. .type aesni_gcm_decrypt,\@function,6
  363. .align 32
  364. aesni_gcm_decrypt:
  365. xor $ret,$ret
  366. cmp \$0x60,$len # minimal accepted length
  367. jb .Lgcm_dec_abort
  368. lea (%rsp),%rax # save stack pointer
  369. push %rbx
  370. push %rbp
  371. push %r12
  372. push %r13
  373. push %r14
  374. push %r15
  375. ___
  376. $code.=<<___ if ($win64);
  377. lea -0xa8(%rsp),%rsp
  378. movaps %xmm6,-0xd8(%rax)
  379. movaps %xmm7,-0xc8(%rax)
  380. movaps %xmm8,-0xb8(%rax)
  381. movaps %xmm9,-0xa8(%rax)
  382. movaps %xmm10,-0x98(%rax)
  383. movaps %xmm11,-0x88(%rax)
  384. movaps %xmm12,-0x78(%rax)
  385. movaps %xmm13,-0x68(%rax)
  386. movaps %xmm14,-0x58(%rax)
  387. movaps %xmm15,-0x48(%rax)
  388. .Lgcm_dec_body:
  389. ___
  390. $code.=<<___;
  391. vzeroupper
  392. vmovdqu ($ivp),$T1 # input counter value
  393. add \$-128,%rsp
  394. mov 12($ivp),$counter
  395. lea .Lbswap_mask(%rip),$const
  396. lea -0x80($key),$in0 # borrow $in0
  397. mov \$0xf80,$end0 # borrow $end0
  398. vmovdqu ($Xip),$Xi # load Xi
  399. and \$-128,%rsp # ensure stack alignment
  400. vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
  401. lea 0x80($key),$key # size optimization
  402. lea 0x20+0x20($Xip),$Xip # size optimization
  403. mov 0xf0-0x80($key),$rounds
  404. vpshufb $Ii,$Xi,$Xi
  405. and $end0,$in0
  406. and %rsp,$end0
  407. sub $in0,$end0
  408. jc .Ldec_no_key_aliasing
  409. cmp \$768,$end0
  410. jnc .Ldec_no_key_aliasing
  411. sub $end0,%rsp # avoid aliasing with key
  412. .Ldec_no_key_aliasing:
  413. vmovdqu 0x50($inp),$Z3 # I[5]
  414. lea ($inp),$in0
  415. vmovdqu 0x40($inp),$Z0
  416. lea -0xc0($inp,$len),$end0
  417. vmovdqu 0x30($inp),$Z1
  418. shr \$4,$len
  419. xor $ret,$ret
  420. vmovdqu 0x20($inp),$Z2
  421. vpshufb $Ii,$Z3,$Z3 # passed to _aesni_ctr32_ghash_6x
  422. vmovdqu 0x10($inp),$T2
  423. vpshufb $Ii,$Z0,$Z0
  424. vmovdqu ($inp),$Hkey
  425. vpshufb $Ii,$Z1,$Z1
  426. vmovdqu $Z0,0x30(%rsp)
  427. vpshufb $Ii,$Z2,$Z2
  428. vmovdqu $Z1,0x40(%rsp)
  429. vpshufb $Ii,$T2,$T2
  430. vmovdqu $Z2,0x50(%rsp)
  431. vpshufb $Ii,$Hkey,$Hkey
  432. vmovdqu $T2,0x60(%rsp)
  433. vmovdqu $Hkey,0x70(%rsp)
  434. call _aesni_ctr32_ghash_6x
  435. vmovups $inout0,-0x60($out) # save output
  436. vmovups $inout1,-0x50($out)
  437. vmovups $inout2,-0x40($out)
  438. vmovups $inout3,-0x30($out)
  439. vmovups $inout4,-0x20($out)
  440. vmovups $inout5,-0x10($out)
  441. vpshufb ($const),$Xi,$Xi # .Lbswap_mask
  442. vmovdqu $Xi,-0x40($Xip) # output Xi
  443. vzeroupper
  444. ___
  445. $code.=<<___ if ($win64);
  446. movaps -0xd8(%rax),%xmm6
  447. movaps -0xd8(%rax),%xmm7
  448. movaps -0xb8(%rax),%xmm8
  449. movaps -0xa8(%rax),%xmm9
  450. movaps -0x98(%rax),%xmm10
  451. movaps -0x88(%rax),%xmm11
  452. movaps -0x78(%rax),%xmm12
  453. movaps -0x68(%rax),%xmm13
  454. movaps -0x58(%rax),%xmm14
  455. movaps -0x48(%rax),%xmm15
  456. ___
  457. $code.=<<___;
  458. mov -48(%rax),%r15
  459. mov -40(%rax),%r14
  460. mov -32(%rax),%r13
  461. mov -24(%rax),%r12
  462. mov -16(%rax),%rbp
  463. mov -8(%rax),%rbx
  464. lea (%rax),%rsp # restore %rsp
  465. .Lgcm_dec_abort:
  466. mov $ret,%rax # return value
  467. ret
  468. .size aesni_gcm_decrypt,.-aesni_gcm_decrypt
  469. ___
  470. $code.=<<___;
  471. .type _aesni_ctr32_6x,\@abi-omnipotent
  472. .align 32
  473. _aesni_ctr32_6x:
  474. vmovdqu 0x00-0x80($key),$Z0 # borrow $Z0 for $rndkey
  475. vmovdqu 0x20($const),$T2 # borrow $T2, .Lone_msb
  476. lea -1($rounds),%r13
  477. vmovups 0x10-0x80($key),$rndkey
  478. lea 0x20-0x80($key),%r12
  479. vpxor $Z0,$T1,$inout0
  480. add \$`6<<24`,$counter
  481. jc .Lhandle_ctr32_2
  482. vpaddb $T2,$T1,$inout1
  483. vpaddb $T2,$inout1,$inout2
  484. vpxor $Z0,$inout1,$inout1
  485. vpaddb $T2,$inout2,$inout3
  486. vpxor $Z0,$inout2,$inout2
  487. vpaddb $T2,$inout3,$inout4
  488. vpxor $Z0,$inout3,$inout3
  489. vpaddb $T2,$inout4,$inout5
  490. vpxor $Z0,$inout4,$inout4
  491. vpaddb $T2,$inout5,$T1
  492. vpxor $Z0,$inout5,$inout5
  493. jmp .Loop_ctr32
  494. .align 16
  495. .Loop_ctr32:
  496. vaesenc $rndkey,$inout0,$inout0
  497. vaesenc $rndkey,$inout1,$inout1
  498. vaesenc $rndkey,$inout2,$inout2
  499. vaesenc $rndkey,$inout3,$inout3
  500. vaesenc $rndkey,$inout4,$inout4
  501. vaesenc $rndkey,$inout5,$inout5
  502. vmovups (%r12),$rndkey
  503. lea 0x10(%r12),%r12
  504. dec %r13d
  505. jnz .Loop_ctr32
  506. vmovdqu (%r12),$Hkey # last round key
  507. vaesenc $rndkey,$inout0,$inout0
  508. vpxor 0x00($inp),$Hkey,$Z0
  509. vaesenc $rndkey,$inout1,$inout1
  510. vpxor 0x10($inp),$Hkey,$Z1
  511. vaesenc $rndkey,$inout2,$inout2
  512. vpxor 0x20($inp),$Hkey,$Z2
  513. vaesenc $rndkey,$inout3,$inout3
  514. vpxor 0x30($inp),$Hkey,$Xi
  515. vaesenc $rndkey,$inout4,$inout4
  516. vpxor 0x40($inp),$Hkey,$T2
  517. vaesenc $rndkey,$inout5,$inout5
  518. vpxor 0x50($inp),$Hkey,$Hkey
  519. lea 0x60($inp),$inp
  520. vaesenclast $Z0,$inout0,$inout0
  521. vaesenclast $Z1,$inout1,$inout1
  522. vaesenclast $Z2,$inout2,$inout2
  523. vaesenclast $Xi,$inout3,$inout3
  524. vaesenclast $T2,$inout4,$inout4
  525. vaesenclast $Hkey,$inout5,$inout5
  526. vmovups $inout0,0x00($out)
  527. vmovups $inout1,0x10($out)
  528. vmovups $inout2,0x20($out)
  529. vmovups $inout3,0x30($out)
  530. vmovups $inout4,0x40($out)
  531. vmovups $inout5,0x50($out)
  532. lea 0x60($out),$out
  533. ret
  534. .align 32
  535. .Lhandle_ctr32_2:
  536. vpshufb $Ii,$T1,$Z2 # byte-swap counter
  537. vmovdqu 0x30($const),$Z1 # borrow $Z1, .Ltwo_lsb
  538. vpaddd 0x40($const),$Z2,$inout1 # .Lone_lsb
  539. vpaddd $Z1,$Z2,$inout2
  540. vpaddd $Z1,$inout1,$inout3
  541. vpshufb $Ii,$inout1,$inout1
  542. vpaddd $Z1,$inout2,$inout4
  543. vpshufb $Ii,$inout2,$inout2
  544. vpxor $Z0,$inout1,$inout1
  545. vpaddd $Z1,$inout3,$inout5
  546. vpshufb $Ii,$inout3,$inout3
  547. vpxor $Z0,$inout2,$inout2
  548. vpaddd $Z1,$inout4,$T1 # byte-swapped next counter value
  549. vpshufb $Ii,$inout4,$inout4
  550. vpxor $Z0,$inout3,$inout3
  551. vpshufb $Ii,$inout5,$inout5
  552. vpxor $Z0,$inout4,$inout4
  553. vpshufb $Ii,$T1,$T1 # next counter value
  554. vpxor $Z0,$inout5,$inout5
  555. jmp .Loop_ctr32
  556. .size _aesni_ctr32_6x,.-_aesni_ctr32_6x
  557. .globl aesni_gcm_encrypt
  558. .type aesni_gcm_encrypt,\@function,6
  559. .align 32
  560. aesni_gcm_encrypt:
  561. xor $ret,$ret
  562. cmp \$0x60*3,$len # minimal accepted length
  563. jb .Lgcm_enc_abort
  564. lea (%rsp),%rax # save stack pointer
  565. push %rbx
  566. push %rbp
  567. push %r12
  568. push %r13
  569. push %r14
  570. push %r15
  571. ___
  572. $code.=<<___ if ($win64);
  573. lea -0xa8(%rsp),%rsp
  574. movaps %xmm6,-0xd8(%rax)
  575. movaps %xmm7,-0xc8(%rax)
  576. movaps %xmm8,-0xb8(%rax)
  577. movaps %xmm9,-0xa8(%rax)
  578. movaps %xmm10,-0x98(%rax)
  579. movaps %xmm11,-0x88(%rax)
  580. movaps %xmm12,-0x78(%rax)
  581. movaps %xmm13,-0x68(%rax)
  582. movaps %xmm14,-0x58(%rax)
  583. movaps %xmm15,-0x48(%rax)
  584. .Lgcm_enc_body:
  585. ___
  586. $code.=<<___;
  587. vzeroupper
  588. vmovdqu ($ivp),$T1 # input counter value
  589. add \$-128,%rsp
  590. mov 12($ivp),$counter
  591. lea .Lbswap_mask(%rip),$const
  592. lea -0x80($key),$in0 # borrow $in0
  593. mov \$0xf80,$end0 # borrow $end0
  594. lea 0x80($key),$key # size optimization
  595. vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
  596. and \$-128,%rsp # ensure stack alignment
  597. mov 0xf0-0x80($key),$rounds
  598. and $end0,$in0
  599. and %rsp,$end0
  600. sub $in0,$end0
  601. jc .Lenc_no_key_aliasing
  602. cmp \$768,$end0
  603. jnc .Lenc_no_key_aliasing
  604. sub $end0,%rsp # avoid aliasing with key
  605. .Lenc_no_key_aliasing:
  606. lea ($out),$in0
  607. lea -0xc0($out,$len),$end0
  608. shr \$4,$len
  609. call _aesni_ctr32_6x
  610. vpshufb $Ii,$inout0,$Xi # save bswapped output on stack
  611. vpshufb $Ii,$inout1,$T2
  612. vmovdqu $Xi,0x70(%rsp)
  613. vpshufb $Ii,$inout2,$Z0
  614. vmovdqu $T2,0x60(%rsp)
  615. vpshufb $Ii,$inout3,$Z1
  616. vmovdqu $Z0,0x50(%rsp)
  617. vpshufb $Ii,$inout4,$Z2
  618. vmovdqu $Z1,0x40(%rsp)
  619. vpshufb $Ii,$inout5,$Z3 # passed to _aesni_ctr32_ghash_6x
  620. vmovdqu $Z2,0x30(%rsp)
  621. call _aesni_ctr32_6x
  622. vmovdqu ($Xip),$Xi # load Xi
  623. lea 0x20+0x20($Xip),$Xip # size optimization
  624. sub \$12,$len
  625. mov \$0x60*2,$ret
  626. vpshufb $Ii,$Xi,$Xi
  627. call _aesni_ctr32_ghash_6x
  628. vmovdqu 0x20(%rsp),$Z3 # I[5]
  629. vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
  630. vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
  631. vpunpckhqdq $Z3,$Z3,$T1
  632. vmovdqu 0x20-0x20($Xip),$rndkey # borrow $rndkey for $HK
  633. vmovups $inout0,-0x60($out) # save output
  634. vpshufb $Ii,$inout0,$inout0 # but keep bswapped copy
  635. vpxor $Z3,$T1,$T1
  636. vmovups $inout1,-0x50($out)
  637. vpshufb $Ii,$inout1,$inout1
  638. vmovups $inout2,-0x40($out)
  639. vpshufb $Ii,$inout2,$inout2
  640. vmovups $inout3,-0x30($out)
  641. vpshufb $Ii,$inout3,$inout3
  642. vmovups $inout4,-0x20($out)
  643. vpshufb $Ii,$inout4,$inout4
  644. vmovups $inout5,-0x10($out)
  645. vpshufb $Ii,$inout5,$inout5
  646. vmovdqu $inout0,0x10(%rsp) # free $inout0
  647. ___
  648. { my ($HK,$T3)=($rndkey,$inout0);
  649. $code.=<<___;
  650. vmovdqu 0x30(%rsp),$Z2 # I[4]
  651. vmovdqu 0x10-0x20($Xip),$Ii # borrow $Ii for $Hkey^2
  652. vpunpckhqdq $Z2,$Z2,$T2
  653. vpclmulqdq \$0x00,$Hkey,$Z3,$Z1
  654. vpxor $Z2,$T2,$T2
  655. vpclmulqdq \$0x11,$Hkey,$Z3,$Z3
  656. vpclmulqdq \$0x00,$HK,$T1,$T1
  657. vmovdqu 0x40(%rsp),$T3 # I[3]
  658. vpclmulqdq \$0x00,$Ii,$Z2,$Z0
  659. vmovdqu 0x30-0x20($Xip),$Hkey # $Hkey^3
  660. vpxor $Z1,$Z0,$Z0
  661. vpunpckhqdq $T3,$T3,$Z1
  662. vpclmulqdq \$0x11,$Ii,$Z2,$Z2
  663. vpxor $T3,$Z1,$Z1
  664. vpxor $Z3,$Z2,$Z2
  665. vpclmulqdq \$0x10,$HK,$T2,$T2
  666. vmovdqu 0x50-0x20($Xip),$HK
  667. vpxor $T1,$T2,$T2
  668. vmovdqu 0x50(%rsp),$T1 # I[2]
  669. vpclmulqdq \$0x00,$Hkey,$T3,$Z3
  670. vmovdqu 0x40-0x20($Xip),$Ii # borrow $Ii for $Hkey^4
  671. vpxor $Z0,$Z3,$Z3
  672. vpunpckhqdq $T1,$T1,$Z0
  673. vpclmulqdq \$0x11,$Hkey,$T3,$T3
  674. vpxor $T1,$Z0,$Z0
  675. vpxor $Z2,$T3,$T3
  676. vpclmulqdq \$0x00,$HK,$Z1,$Z1
  677. vpxor $T2,$Z1,$Z1
  678. vmovdqu 0x60(%rsp),$T2 # I[1]
  679. vpclmulqdq \$0x00,$Ii,$T1,$Z2
  680. vmovdqu 0x60-0x20($Xip),$Hkey # $Hkey^5
  681. vpxor $Z3,$Z2,$Z2
  682. vpunpckhqdq $T2,$T2,$Z3
  683. vpclmulqdq \$0x11,$Ii,$T1,$T1
  684. vpxor $T2,$Z3,$Z3
  685. vpxor $T3,$T1,$T1
  686. vpclmulqdq \$0x10,$HK,$Z0,$Z0
  687. vmovdqu 0x80-0x20($Xip),$HK
  688. vpxor $Z1,$Z0,$Z0
  689. vpxor 0x70(%rsp),$Xi,$Xi # accumulate I[0]
  690. vpclmulqdq \$0x00,$Hkey,$T2,$Z1
  691. vmovdqu 0x70-0x20($Xip),$Ii # borrow $Ii for $Hkey^6
  692. vpunpckhqdq $Xi,$Xi,$T3
  693. vpxor $Z2,$Z1,$Z1
  694. vpclmulqdq \$0x11,$Hkey,$T2,$T2
  695. vpxor $Xi,$T3,$T3
  696. vpxor $T1,$T2,$T2
  697. vpclmulqdq \$0x00,$HK,$Z3,$Z3
  698. vpxor $Z0,$Z3,$Z0
  699. vpclmulqdq \$0x00,$Ii,$Xi,$Z2
  700. vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
  701. vpunpckhqdq $inout5,$inout5,$T1
  702. vpclmulqdq \$0x11,$Ii,$Xi,$Xi
  703. vpxor $inout5,$T1,$T1
  704. vpxor $Z1,$Z2,$Z1
  705. vpclmulqdq \$0x10,$HK,$T3,$T3
  706. vmovdqu 0x20-0x20($Xip),$HK
  707. vpxor $T2,$Xi,$Z3
  708. vpxor $Z0,$T3,$Z2
  709. vmovdqu 0x10-0x20($Xip),$Ii # borrow $Ii for $Hkey^2
  710. vpxor $Z1,$Z3,$T3 # aggregated Karatsuba post-processing
  711. vpclmulqdq \$0x00,$Hkey,$inout5,$Z0
  712. vpxor $T3,$Z2,$Z2
  713. vpunpckhqdq $inout4,$inout4,$T2
  714. vpclmulqdq \$0x11,$Hkey,$inout5,$inout5
  715. vpxor $inout4,$T2,$T2
  716. vpslldq \$8,$Z2,$T3
  717. vpclmulqdq \$0x00,$HK,$T1,$T1
  718. vpxor $T3,$Z1,$Xi
  719. vpsrldq \$8,$Z2,$Z2
  720. vpxor $Z2,$Z3,$Z3
  721. vpclmulqdq \$0x00,$Ii,$inout4,$Z1
  722. vmovdqu 0x30-0x20($Xip),$Hkey # $Hkey^3
  723. vpxor $Z0,$Z1,$Z1
  724. vpunpckhqdq $inout3,$inout3,$T3
  725. vpclmulqdq \$0x11,$Ii,$inout4,$inout4
  726. vpxor $inout3,$T3,$T3
  727. vpxor $inout5,$inout4,$inout4
  728. vpalignr \$8,$Xi,$Xi,$inout5 # 1st phase
  729. vpclmulqdq \$0x10,$HK,$T2,$T2
  730. vmovdqu 0x50-0x20($Xip),$HK
  731. vpxor $T1,$T2,$T2
  732. vpclmulqdq \$0x00,$Hkey,$inout3,$Z0
  733. vmovdqu 0x40-0x20($Xip),$Ii # borrow $Ii for $Hkey^4
  734. vpxor $Z1,$Z0,$Z0
  735. vpunpckhqdq $inout2,$inout2,$T1
  736. vpclmulqdq \$0x11,$Hkey,$inout3,$inout3
  737. vpxor $inout2,$T1,$T1
  738. vpxor $inout4,$inout3,$inout3
  739. vxorps 0x10(%rsp),$Z3,$Z3 # accumulate $inout0
  740. vpclmulqdq \$0x00,$HK,$T3,$T3
  741. vpxor $T2,$T3,$T3
  742. vpclmulqdq \$0x10,0x10($const),$Xi,$Xi
  743. vxorps $inout5,$Xi,$Xi
  744. vpclmulqdq \$0x00,$Ii,$inout2,$Z1
  745. vmovdqu 0x60-0x20($Xip),$Hkey # $Hkey^5
  746. vpxor $Z0,$Z1,$Z1
  747. vpunpckhqdq $inout1,$inout1,$T2
  748. vpclmulqdq \$0x11,$Ii,$inout2,$inout2
  749. vpxor $inout1,$T2,$T2
  750. vpalignr \$8,$Xi,$Xi,$inout5 # 2nd phase
  751. vpxor $inout3,$inout2,$inout2
  752. vpclmulqdq \$0x10,$HK,$T1,$T1
  753. vmovdqu 0x80-0x20($Xip),$HK
  754. vpxor $T3,$T1,$T1
  755. vxorps $Z3,$inout5,$inout5
  756. vpclmulqdq \$0x10,0x10($const),$Xi,$Xi
  757. vxorps $inout5,$Xi,$Xi
  758. vpclmulqdq \$0x00,$Hkey,$inout1,$Z0
  759. vmovdqu 0x70-0x20($Xip),$Ii # borrow $Ii for $Hkey^6
  760. vpxor $Z1,$Z0,$Z0
  761. vpunpckhqdq $Xi,$Xi,$T3
  762. vpclmulqdq \$0x11,$Hkey,$inout1,$inout1
  763. vpxor $Xi,$T3,$T3
  764. vpxor $inout2,$inout1,$inout1
  765. vpclmulqdq \$0x00,$HK,$T2,$T2
  766. vpxor $T1,$T2,$T2
  767. vpclmulqdq \$0x00,$Ii,$Xi,$Z1
  768. vpclmulqdq \$0x11,$Ii,$Xi,$Z3
  769. vpxor $Z0,$Z1,$Z1
  770. vpclmulqdq \$0x10,$HK,$T3,$Z2
  771. vpxor $inout1,$Z3,$Z3
  772. vpxor $T2,$Z2,$Z2
  773. vpxor $Z1,$Z3,$Z0 # aggregated Karatsuba post-processing
  774. vpxor $Z0,$Z2,$Z2
  775. vpslldq \$8,$Z2,$T1
  776. vmovdqu 0x10($const),$Hkey # .Lpoly
  777. vpsrldq \$8,$Z2,$Z2
  778. vpxor $T1,$Z1,$Xi
  779. vpxor $Z2,$Z3,$Z3
  780. vpalignr \$8,$Xi,$Xi,$T2 # 1st phase
  781. vpclmulqdq \$0x10,$Hkey,$Xi,$Xi
  782. vpxor $T2,$Xi,$Xi
  783. vpalignr \$8,$Xi,$Xi,$T2 # 2nd phase
  784. vpclmulqdq \$0x10,$Hkey,$Xi,$Xi
  785. vpxor $Z3,$T2,$T2
  786. vpxor $T2,$Xi,$Xi
  787. ___
  788. }
  789. $code.=<<___;
  790. vpshufb ($const),$Xi,$Xi # .Lbswap_mask
  791. vmovdqu $Xi,-0x40($Xip) # output Xi
  792. vzeroupper
  793. ___
  794. $code.=<<___ if ($win64);
  795. movaps -0xd8(%rax),%xmm6
  796. movaps -0xc8(%rax),%xmm7
  797. movaps -0xb8(%rax),%xmm8
  798. movaps -0xa8(%rax),%xmm9
  799. movaps -0x98(%rax),%xmm10
  800. movaps -0x88(%rax),%xmm11
  801. movaps -0x78(%rax),%xmm12
  802. movaps -0x68(%rax),%xmm13
  803. movaps -0x58(%rax),%xmm14
  804. movaps -0x48(%rax),%xmm15
  805. ___
  806. $code.=<<___;
  807. mov -48(%rax),%r15
  808. mov -40(%rax),%r14
  809. mov -32(%rax),%r13
  810. mov -24(%rax),%r12
  811. mov -16(%rax),%rbp
  812. mov -8(%rax),%rbx
  813. lea (%rax),%rsp # restore %rsp
  814. .Lgcm_enc_abort:
  815. mov $ret,%rax # return value
  816. ret
  817. .size aesni_gcm_encrypt,.-aesni_gcm_encrypt
  818. ___
  819. $code.=<<___;
  820. .align 64
  821. .Lbswap_mask:
  822. .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
  823. .Lpoly:
  824. .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
  825. .Lone_msb:
  826. .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
  827. .Ltwo_lsb:
  828. .byte 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
  829. .Lone_lsb:
  830. .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
  831. .asciz "AES-NI GCM module for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
  832. .align 64
  833. ___
  834. if ($win64) {
  835. $rec="%rcx";
  836. $frame="%rdx";
  837. $context="%r8";
  838. $disp="%r9";
  839. $code.=<<___
  840. .extern __imp_RtlVirtualUnwind
  841. .type gcm_se_handler,\@abi-omnipotent
  842. .align 16
  843. gcm_se_handler:
  844. push %rsi
  845. push %rdi
  846. push %rbx
  847. push %rbp
  848. push %r12
  849. push %r13
  850. push %r14
  851. push %r15
  852. pushfq
  853. sub \$64,%rsp
  854. mov 120($context),%rax # pull context->Rax
  855. mov 248($context),%rbx # pull context->Rip
  856. mov 8($disp),%rsi # disp->ImageBase
  857. mov 56($disp),%r11 # disp->HandlerData
  858. mov 0(%r11),%r10d # HandlerData[0]
  859. lea (%rsi,%r10),%r10 # prologue label
  860. cmp %r10,%rbx # context->Rip<prologue label
  861. jb .Lcommon_seh_tail
  862. mov 152($context),%rax # pull context->Rsp
  863. mov 4(%r11),%r10d # HandlerData[1]
  864. lea (%rsi,%r10),%r10 # epilogue label
  865. cmp %r10,%rbx # context->Rip>=epilogue label
  866. jae .Lcommon_seh_tail
  867. mov 120($context),%rax # pull context->Rax
  868. mov -48(%rax),%r15
  869. mov -40(%rax),%r14
  870. mov -32(%rax),%r13
  871. mov -24(%rax),%r12
  872. mov -16(%rax),%rbp
  873. mov -8(%rax),%rbx
  874. mov %r15,240($context)
  875. mov %r14,232($context)
  876. mov %r13,224($context)
  877. mov %r12,216($context)
  878. mov %rbp,160($context)
  879. mov %rbx,144($context)
  880. lea -0xd8(%rax),%rsi # %xmm save area
  881. lea 512($context),%rdi # & context.Xmm6
  882. mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
  883. .long 0xa548f3fc # cld; rep movsq
  884. .Lcommon_seh_tail:
  885. mov 8(%rax),%rdi
  886. mov 16(%rax),%rsi
  887. mov %rax,152($context) # restore context->Rsp
  888. mov %rsi,168($context) # restore context->Rsi
  889. mov %rdi,176($context) # restore context->Rdi
  890. mov 40($disp),%rdi # disp->ContextRecord
  891. mov $context,%rsi # context
  892. mov \$154,%ecx # sizeof(CONTEXT)
  893. .long 0xa548f3fc # cld; rep movsq
  894. mov $disp,%rsi
  895. xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
  896. mov 8(%rsi),%rdx # arg2, disp->ImageBase
  897. mov 0(%rsi),%r8 # arg3, disp->ControlPc
  898. mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
  899. mov 40(%rsi),%r10 # disp->ContextRecord
  900. lea 56(%rsi),%r11 # &disp->HandlerData
  901. lea 24(%rsi),%r12 # &disp->EstablisherFrame
  902. mov %r10,32(%rsp) # arg5
  903. mov %r11,40(%rsp) # arg6
  904. mov %r12,48(%rsp) # arg7
  905. mov %rcx,56(%rsp) # arg8, (NULL)
  906. call *__imp_RtlVirtualUnwind(%rip)
  907. mov \$1,%eax # ExceptionContinueSearch
  908. add \$64,%rsp
  909. popfq
  910. pop %r15
  911. pop %r14
  912. pop %r13
  913. pop %r12
  914. pop %rbp
  915. pop %rbx
  916. pop %rdi
  917. pop %rsi
  918. ret
  919. .size gcm_se_handler,.-gcm_se_handler
  920. .section .pdata
  921. .align 4
  922. .rva .LSEH_begin_aesni_gcm_decrypt
  923. .rva .LSEH_end_aesni_gcm_decrypt
  924. .rva .LSEH_gcm_dec_info
  925. .rva .LSEH_begin_aesni_gcm_encrypt
  926. .rva .LSEH_end_aesni_gcm_encrypt
  927. .rva .LSEH_gcm_enc_info
  928. .section .xdata
  929. .align 8
  930. .LSEH_gcm_dec_info:
  931. .byte 9,0,0,0
  932. .rva gcm_se_handler
  933. .rva .Lgcm_dec_body,.Lgcm_dec_abort
  934. .LSEH_gcm_enc_info:
  935. .byte 9,0,0,0
  936. .rva gcm_se_handler
  937. .rva .Lgcm_enc_body,.Lgcm_enc_abort
  938. ___
  939. }
  940. }}} else {{{
  941. $code=<<___; # assembler is too old
  942. .text
  943. .globl aesni_gcm_encrypt
  944. .type aesni_gcm_encrypt,\@abi-omnipotent
  945. aesni_gcm_encrypt:
  946. xor %eax,%eax
  947. ret
  948. .size aesni_gcm_encrypt,.-aesni_gcm_encrypt
  949. .globl aesni_gcm_decrypt
  950. .type aesni_gcm_decrypt,\@abi-omnipotent
  951. aesni_gcm_decrypt:
  952. xor %eax,%eax
  953. ret
  954. .size aesni_gcm_decrypt,.-aesni_gcm_decrypt
  955. ___
  956. }}}
  957. $code =~ s/\`([^\`]*)\`/eval($1)/gem;
  958. print $code;
  959. close STDOUT;