選択できるのは25トピックまでです。 トピックは、先頭が英数字で、英数字とダッシュ('-')を使用した35文字以内のものにしてください。

vpaes-x86_64.pl 30 KiB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207
  1. #!/usr/bin/env perl
  2. ######################################################################
  3. ## Constant-time SSSE3 AES core implementation.
  4. ## version 0.1
  5. ##
  6. ## By Mike Hamburg (Stanford University), 2009
  7. ## Public domain.
  8. ##
  9. ## For details see http://shiftleft.org/papers/vector_aes/ and
  10. ## http://crypto.stanford.edu/vpaes/.
  11. ######################################################################
  12. # September 2011.
  13. #
  14. # Interface to OpenSSL as "almost" drop-in replacement for
  15. # aes-x86_64.pl. "Almost" refers to the fact that AES_cbc_encrypt
  16. # doesn't handle partial vectors (doesn't have to if called from
  17. # EVP only). "Drop-in" implies that this module doesn't share key
  18. # schedule structure with the original nor does it make assumption
  19. # about its alignment...
  20. #
  21. # Performance summary. aes-x86_64.pl column lists large-block CBC
  22. # encrypt/decrypt/with-hyper-threading-off(*) results in cycles per
  23. # byte processed with 128-bit key, and vpaes-x86_64.pl column -
  24. # [also large-block CBC] encrypt/decrypt.
  25. #
  26. # aes-x86_64.pl vpaes-x86_64.pl
  27. #
  28. # Core 2(**) 29.6/41.1/14.3 21.9/25.2(***)
  29. # Nehalem 29.6/40.3/14.6 10.0/11.8
  30. # Atom 57.3/74.2/32.1 60.9/77.2(***)
  31. # Silvermont 52.7/64.0/19.5 48.8/60.8(***)
  32. #
  33. # (*) "Hyper-threading" in the context refers rather to cache shared
  34. # among multiple cores, than to specifically Intel HTT. As vast
  35. # majority of contemporary cores share cache, slower code path
  36. # is common place. In other words "with-hyper-threading-off"
  37. # results are presented mostly for reference purposes.
  38. #
  39. # (**) "Core 2" refers to initial 65nm design, a.k.a. Conroe.
  40. #
  41. # (***) Less impressive improvement on Core 2 and Atom is due to slow
  42. # pshufb, yet it's respectable +36%/62% improvement on Core 2
  43. # (as implied, over "hyper-threading-safe" code path).
  44. #
  45. # <appro@openssl.org>
  46. $flavour = shift;
  47. $output = shift;
  48. if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
  49. $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
  50. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  51. ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
  52. ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
  53. die "can't locate x86_64-xlate.pl";
  54. open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
  55. *STDOUT=*OUT;
  56. $PREFIX="vpaes";
  57. $code.=<<___;
  58. .text
  59. ##
  60. ## _aes_encrypt_core
  61. ##
  62. ## AES-encrypt %xmm0.
  63. ##
  64. ## Inputs:
  65. ## %xmm0 = input
  66. ## %xmm9-%xmm15 as in _vpaes_preheat
  67. ## (%rdx) = scheduled keys
  68. ##
  69. ## Output in %xmm0
  70. ## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax
  71. ## Preserves %xmm6 - %xmm8 so you get some local vectors
  72. ##
  73. ##
  74. .type _vpaes_encrypt_core,\@abi-omnipotent
  75. .align 16
  76. _vpaes_encrypt_core:
  77. mov %rdx, %r9
  78. mov \$16, %r11
  79. mov 240(%rdx),%eax
  80. movdqa %xmm9, %xmm1
  81. movdqa .Lk_ipt(%rip), %xmm2 # iptlo
  82. pandn %xmm0, %xmm1
  83. movdqu (%r9), %xmm5 # round0 key
  84. psrld \$4, %xmm1
  85. pand %xmm9, %xmm0
  86. pshufb %xmm0, %xmm2
  87. movdqa .Lk_ipt+16(%rip), %xmm0 # ipthi
  88. pshufb %xmm1, %xmm0
  89. pxor %xmm5, %xmm2
  90. add \$16, %r9
  91. pxor %xmm2, %xmm0
  92. lea .Lk_mc_backward(%rip),%r10
  93. jmp .Lenc_entry
  94. .align 16
  95. .Lenc_loop:
  96. # middle of middle round
  97. movdqa %xmm13, %xmm4 # 4 : sb1u
  98. movdqa %xmm12, %xmm0 # 0 : sb1t
  99. pshufb %xmm2, %xmm4 # 4 = sb1u
  100. pshufb %xmm3, %xmm0 # 0 = sb1t
  101. pxor %xmm5, %xmm4 # 4 = sb1u + k
  102. movdqa %xmm15, %xmm5 # 4 : sb2u
  103. pxor %xmm4, %xmm0 # 0 = A
  104. movdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
  105. pshufb %xmm2, %xmm5 # 4 = sb2u
  106. movdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
  107. movdqa %xmm14, %xmm2 # 2 : sb2t
  108. pshufb %xmm3, %xmm2 # 2 = sb2t
  109. movdqa %xmm0, %xmm3 # 3 = A
  110. pxor %xmm5, %xmm2 # 2 = 2A
  111. pshufb %xmm1, %xmm0 # 0 = B
  112. add \$16, %r9 # next key
  113. pxor %xmm2, %xmm0 # 0 = 2A+B
  114. pshufb %xmm4, %xmm3 # 3 = D
  115. add \$16, %r11 # next mc
  116. pxor %xmm0, %xmm3 # 3 = 2A+B+D
  117. pshufb %xmm1, %xmm0 # 0 = 2B+C
  118. and \$0x30, %r11 # ... mod 4
  119. sub \$1,%rax # nr--
  120. pxor %xmm3, %xmm0 # 0 = 2A+3B+C+D
  121. .Lenc_entry:
  122. # top of round
  123. movdqa %xmm9, %xmm1 # 1 : i
  124. movdqa %xmm11, %xmm5 # 2 : a/k
  125. pandn %xmm0, %xmm1 # 1 = i<<4
  126. psrld \$4, %xmm1 # 1 = i
  127. pand %xmm9, %xmm0 # 0 = k
  128. pshufb %xmm0, %xmm5 # 2 = a/k
  129. movdqa %xmm10, %xmm3 # 3 : 1/i
  130. pxor %xmm1, %xmm0 # 0 = j
  131. pshufb %xmm1, %xmm3 # 3 = 1/i
  132. movdqa %xmm10, %xmm4 # 4 : 1/j
  133. pxor %xmm5, %xmm3 # 3 = iak = 1/i + a/k
  134. pshufb %xmm0, %xmm4 # 4 = 1/j
  135. movdqa %xmm10, %xmm2 # 2 : 1/iak
  136. pxor %xmm5, %xmm4 # 4 = jak = 1/j + a/k
  137. pshufb %xmm3, %xmm2 # 2 = 1/iak
  138. movdqa %xmm10, %xmm3 # 3 : 1/jak
  139. pxor %xmm0, %xmm2 # 2 = io
  140. pshufb %xmm4, %xmm3 # 3 = 1/jak
  141. movdqu (%r9), %xmm5
  142. pxor %xmm1, %xmm3 # 3 = jo
  143. jnz .Lenc_loop
  144. # middle of last round
  145. movdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
  146. movdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
  147. pshufb %xmm2, %xmm4 # 4 = sbou
  148. pxor %xmm5, %xmm4 # 4 = sb1u + k
  149. pshufb %xmm3, %xmm0 # 0 = sb1t
  150. movdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
  151. pxor %xmm4, %xmm0 # 0 = A
  152. pshufb %xmm1, %xmm0
  153. ret
  154. .size _vpaes_encrypt_core,.-_vpaes_encrypt_core
  155. ##
  156. ## Decryption core
  157. ##
  158. ## Same API as encryption core.
  159. ##
  160. .type _vpaes_decrypt_core,\@abi-omnipotent
  161. .align 16
  162. _vpaes_decrypt_core:
  163. mov %rdx, %r9 # load key
  164. mov 240(%rdx),%eax
  165. movdqa %xmm9, %xmm1
  166. movdqa .Lk_dipt(%rip), %xmm2 # iptlo
  167. pandn %xmm0, %xmm1
  168. mov %rax, %r11
  169. psrld \$4, %xmm1
  170. movdqu (%r9), %xmm5 # round0 key
  171. shl \$4, %r11
  172. pand %xmm9, %xmm0
  173. pshufb %xmm0, %xmm2
  174. movdqa .Lk_dipt+16(%rip), %xmm0 # ipthi
  175. xor \$0x30, %r11
  176. lea .Lk_dsbd(%rip),%r10
  177. pshufb %xmm1, %xmm0
  178. and \$0x30, %r11
  179. pxor %xmm5, %xmm2
  180. movdqa .Lk_mc_forward+48(%rip), %xmm5
  181. pxor %xmm2, %xmm0
  182. add \$16, %r9
  183. add %r10, %r11
  184. jmp .Ldec_entry
  185. .align 16
  186. .Ldec_loop:
  187. ##
  188. ## Inverse mix columns
  189. ##
  190. movdqa -0x20(%r10),%xmm4 # 4 : sb9u
  191. movdqa -0x10(%r10),%xmm1 # 0 : sb9t
  192. pshufb %xmm2, %xmm4 # 4 = sb9u
  193. pshufb %xmm3, %xmm1 # 0 = sb9t
  194. pxor %xmm4, %xmm0
  195. movdqa 0x00(%r10),%xmm4 # 4 : sbdu
  196. pxor %xmm1, %xmm0 # 0 = ch
  197. movdqa 0x10(%r10),%xmm1 # 0 : sbdt
  198. pshufb %xmm2, %xmm4 # 4 = sbdu
  199. pshufb %xmm5, %xmm0 # MC ch
  200. pshufb %xmm3, %xmm1 # 0 = sbdt
  201. pxor %xmm4, %xmm0 # 4 = ch
  202. movdqa 0x20(%r10),%xmm4 # 4 : sbbu
  203. pxor %xmm1, %xmm0 # 0 = ch
  204. movdqa 0x30(%r10),%xmm1 # 0 : sbbt
  205. pshufb %xmm2, %xmm4 # 4 = sbbu
  206. pshufb %xmm5, %xmm0 # MC ch
  207. pshufb %xmm3, %xmm1 # 0 = sbbt
  208. pxor %xmm4, %xmm0 # 4 = ch
  209. movdqa 0x40(%r10),%xmm4 # 4 : sbeu
  210. pxor %xmm1, %xmm0 # 0 = ch
  211. movdqa 0x50(%r10),%xmm1 # 0 : sbet
  212. pshufb %xmm2, %xmm4 # 4 = sbeu
  213. pshufb %xmm5, %xmm0 # MC ch
  214. pshufb %xmm3, %xmm1 # 0 = sbet
  215. pxor %xmm4, %xmm0 # 4 = ch
  216. add \$16, %r9 # next round key
  217. palignr \$12, %xmm5, %xmm5
  218. pxor %xmm1, %xmm0 # 0 = ch
  219. sub \$1,%rax # nr--
  220. .Ldec_entry:
  221. # top of round
  222. movdqa %xmm9, %xmm1 # 1 : i
  223. pandn %xmm0, %xmm1 # 1 = i<<4
  224. movdqa %xmm11, %xmm2 # 2 : a/k
  225. psrld \$4, %xmm1 # 1 = i
  226. pand %xmm9, %xmm0 # 0 = k
  227. pshufb %xmm0, %xmm2 # 2 = a/k
  228. movdqa %xmm10, %xmm3 # 3 : 1/i
  229. pxor %xmm1, %xmm0 # 0 = j
  230. pshufb %xmm1, %xmm3 # 3 = 1/i
  231. movdqa %xmm10, %xmm4 # 4 : 1/j
  232. pxor %xmm2, %xmm3 # 3 = iak = 1/i + a/k
  233. pshufb %xmm0, %xmm4 # 4 = 1/j
  234. pxor %xmm2, %xmm4 # 4 = jak = 1/j + a/k
  235. movdqa %xmm10, %xmm2 # 2 : 1/iak
  236. pshufb %xmm3, %xmm2 # 2 = 1/iak
  237. movdqa %xmm10, %xmm3 # 3 : 1/jak
  238. pxor %xmm0, %xmm2 # 2 = io
  239. pshufb %xmm4, %xmm3 # 3 = 1/jak
  240. movdqu (%r9), %xmm0
  241. pxor %xmm1, %xmm3 # 3 = jo
  242. jnz .Ldec_loop
  243. # middle of last round
  244. movdqa 0x60(%r10), %xmm4 # 3 : sbou
  245. pshufb %xmm2, %xmm4 # 4 = sbou
  246. pxor %xmm0, %xmm4 # 4 = sb1u + k
  247. movdqa 0x70(%r10), %xmm0 # 0 : sbot
  248. movdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160
  249. pshufb %xmm3, %xmm0 # 0 = sb1t
  250. pxor %xmm4, %xmm0 # 0 = A
  251. pshufb %xmm2, %xmm0
  252. ret
  253. .size _vpaes_decrypt_core,.-_vpaes_decrypt_core
  254. ########################################################
  255. ## ##
  256. ## AES key schedule ##
  257. ## ##
  258. ########################################################
  259. .type _vpaes_schedule_core,\@abi-omnipotent
  260. .align 16
  261. _vpaes_schedule_core:
  262. # rdi = key
  263. # rsi = size in bits
  264. # rdx = buffer
  265. # rcx = direction. 0=encrypt, 1=decrypt
  266. call _vpaes_preheat # load the tables
  267. movdqa .Lk_rcon(%rip), %xmm8 # load rcon
  268. movdqu (%rdi), %xmm0 # load key (unaligned)
  269. # input transform
  270. movdqa %xmm0, %xmm3
  271. lea .Lk_ipt(%rip), %r11
  272. call _vpaes_schedule_transform
  273. movdqa %xmm0, %xmm7
  274. lea .Lk_sr(%rip),%r10
  275. test %rcx, %rcx
  276. jnz .Lschedule_am_decrypting
  277. # encrypting, output zeroth round key after transform
  278. movdqu %xmm0, (%rdx)
  279. jmp .Lschedule_go
  280. .Lschedule_am_decrypting:
  281. # decrypting, output zeroth round key after shiftrows
  282. movdqa (%r8,%r10),%xmm1
  283. pshufb %xmm1, %xmm3
  284. movdqu %xmm3, (%rdx)
  285. xor \$0x30, %r8
  286. .Lschedule_go:
  287. cmp \$192, %esi
  288. ja .Lschedule_256
  289. je .Lschedule_192
  290. # 128: fall though
  291. ##
  292. ## .schedule_128
  293. ##
  294. ## 128-bit specific part of key schedule.
  295. ##
  296. ## This schedule is really simple, because all its parts
  297. ## are accomplished by the subroutines.
  298. ##
  299. .Lschedule_128:
  300. mov \$10, %esi
  301. .Loop_schedule_128:
  302. call _vpaes_schedule_round
  303. dec %rsi
  304. jz .Lschedule_mangle_last
  305. call _vpaes_schedule_mangle # write output
  306. jmp .Loop_schedule_128
  307. ##
  308. ## .aes_schedule_192
  309. ##
  310. ## 192-bit specific part of key schedule.
  311. ##
  312. ## The main body of this schedule is the same as the 128-bit
  313. ## schedule, but with more smearing. The long, high side is
  314. ## stored in %xmm7 as before, and the short, low side is in
  315. ## the high bits of %xmm6.
  316. ##
  317. ## This schedule is somewhat nastier, however, because each
  318. ## round produces 192 bits of key material, or 1.5 round keys.
  319. ## Therefore, on each cycle we do 2 rounds and produce 3 round
  320. ## keys.
  321. ##
  322. .align 16
  323. .Lschedule_192:
  324. movdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned)
  325. call _vpaes_schedule_transform # input transform
  326. movdqa %xmm0, %xmm6 # save short part
  327. pxor %xmm4, %xmm4 # clear 4
  328. movhlps %xmm4, %xmm6 # clobber low side with zeros
  329. mov \$4, %esi
  330. .Loop_schedule_192:
  331. call _vpaes_schedule_round
  332. palignr \$8,%xmm6,%xmm0
  333. call _vpaes_schedule_mangle # save key n
  334. call _vpaes_schedule_192_smear
  335. call _vpaes_schedule_mangle # save key n+1
  336. call _vpaes_schedule_round
  337. dec %rsi
  338. jz .Lschedule_mangle_last
  339. call _vpaes_schedule_mangle # save key n+2
  340. call _vpaes_schedule_192_smear
  341. jmp .Loop_schedule_192
  342. ##
  343. ## .aes_schedule_256
  344. ##
  345. ## 256-bit specific part of key schedule.
  346. ##
  347. ## The structure here is very similar to the 128-bit
  348. ## schedule, but with an additional "low side" in
  349. ## %xmm6. The low side's rounds are the same as the
  350. ## high side's, except no rcon and no rotation.
  351. ##
  352. .align 16
  353. .Lschedule_256:
  354. movdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
  355. call _vpaes_schedule_transform # input transform
  356. mov \$7, %esi
  357. .Loop_schedule_256:
  358. call _vpaes_schedule_mangle # output low result
  359. movdqa %xmm0, %xmm6 # save cur_lo in xmm6
  360. # high round
  361. call _vpaes_schedule_round
  362. dec %rsi
  363. jz .Lschedule_mangle_last
  364. call _vpaes_schedule_mangle
  365. # low round. swap xmm7 and xmm6
  366. pshufd \$0xFF, %xmm0, %xmm0
  367. movdqa %xmm7, %xmm5
  368. movdqa %xmm6, %xmm7
  369. call _vpaes_schedule_low_round
  370. movdqa %xmm5, %xmm7
  371. jmp .Loop_schedule_256
  372. ##
  373. ## .aes_schedule_mangle_last
  374. ##
  375. ## Mangler for last round of key schedule
  376. ## Mangles %xmm0
  377. ## when encrypting, outputs out(%xmm0) ^ 63
  378. ## when decrypting, outputs unskew(%xmm0)
  379. ##
  380. ## Always called right before return... jumps to cleanup and exits
  381. ##
  382. .align 16
  383. .Lschedule_mangle_last:
  384. # schedule last round key from xmm0
  385. lea .Lk_deskew(%rip),%r11 # prepare to deskew
  386. test %rcx, %rcx
  387. jnz .Lschedule_mangle_last_dec
  388. # encrypting
  389. movdqa (%r8,%r10),%xmm1
  390. pshufb %xmm1, %xmm0 # output permute
  391. lea .Lk_opt(%rip), %r11 # prepare to output transform
  392. add \$32, %rdx
  393. .Lschedule_mangle_last_dec:
  394. add \$-16, %rdx
  395. pxor .Lk_s63(%rip), %xmm0
  396. call _vpaes_schedule_transform # output transform
  397. movdqu %xmm0, (%rdx) # save last key
  398. # cleanup
  399. pxor %xmm0, %xmm0
  400. pxor %xmm1, %xmm1
  401. pxor %xmm2, %xmm2
  402. pxor %xmm3, %xmm3
  403. pxor %xmm4, %xmm4
  404. pxor %xmm5, %xmm5
  405. pxor %xmm6, %xmm6
  406. pxor %xmm7, %xmm7
  407. ret
  408. .size _vpaes_schedule_core,.-_vpaes_schedule_core
  409. ##
  410. ## .aes_schedule_192_smear
  411. ##
  412. ## Smear the short, low side in the 192-bit key schedule.
  413. ##
  414. ## Inputs:
  415. ## %xmm7: high side, b a x y
  416. ## %xmm6: low side, d c 0 0
  417. ## %xmm13: 0
  418. ##
  419. ## Outputs:
  420. ## %xmm6: b+c+d b+c 0 0
  421. ## %xmm0: b+c+d b+c b a
  422. ##
  423. .type _vpaes_schedule_192_smear,\@abi-omnipotent
  424. .align 16
  425. _vpaes_schedule_192_smear:
  426. pshufd \$0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0
  427. pshufd \$0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
  428. pxor %xmm1, %xmm6 # -> c+d c 0 0
  429. pxor %xmm1, %xmm1
  430. pxor %xmm0, %xmm6 # -> b+c+d b+c b a
  431. movdqa %xmm6, %xmm0
  432. movhlps %xmm1, %xmm6 # clobber low side with zeros
  433. ret
  434. .size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
  435. ##
  436. ## .aes_schedule_round
  437. ##
  438. ## Runs one main round of the key schedule on %xmm0, %xmm7
  439. ##
  440. ## Specifically, runs subbytes on the high dword of %xmm0
  441. ## then rotates it by one byte and xors into the low dword of
  442. ## %xmm7.
  443. ##
  444. ## Adds rcon from low byte of %xmm8, then rotates %xmm8 for
  445. ## next rcon.
  446. ##
  447. ## Smears the dwords of %xmm7 by xoring the low into the
  448. ## second low, result into third, result into highest.
  449. ##
  450. ## Returns results in %xmm7 = %xmm0.
  451. ## Clobbers %xmm1-%xmm4, %r11.
  452. ##
  453. .type _vpaes_schedule_round,\@abi-omnipotent
  454. .align 16
  455. _vpaes_schedule_round:
  456. # extract rcon from xmm8
  457. pxor %xmm1, %xmm1
  458. palignr \$15, %xmm8, %xmm1
  459. palignr \$15, %xmm8, %xmm8
  460. pxor %xmm1, %xmm7
  461. # rotate
  462. pshufd \$0xFF, %xmm0, %xmm0
  463. palignr \$1, %xmm0, %xmm0
  464. # fall through...
  465. # low round: same as high round, but no rotation and no rcon.
  466. _vpaes_schedule_low_round:
  467. # smear xmm7
  468. movdqa %xmm7, %xmm1
  469. pslldq \$4, %xmm7
  470. pxor %xmm1, %xmm7
  471. movdqa %xmm7, %xmm1
  472. pslldq \$8, %xmm7
  473. pxor %xmm1, %xmm7
  474. pxor .Lk_s63(%rip), %xmm7
  475. # subbytes
  476. movdqa %xmm9, %xmm1
  477. pandn %xmm0, %xmm1
  478. psrld \$4, %xmm1 # 1 = i
  479. pand %xmm9, %xmm0 # 0 = k
  480. movdqa %xmm11, %xmm2 # 2 : a/k
  481. pshufb %xmm0, %xmm2 # 2 = a/k
  482. pxor %xmm1, %xmm0 # 0 = j
  483. movdqa %xmm10, %xmm3 # 3 : 1/i
  484. pshufb %xmm1, %xmm3 # 3 = 1/i
  485. pxor %xmm2, %xmm3 # 3 = iak = 1/i + a/k
  486. movdqa %xmm10, %xmm4 # 4 : 1/j
  487. pshufb %xmm0, %xmm4 # 4 = 1/j
  488. pxor %xmm2, %xmm4 # 4 = jak = 1/j + a/k
  489. movdqa %xmm10, %xmm2 # 2 : 1/iak
  490. pshufb %xmm3, %xmm2 # 2 = 1/iak
  491. pxor %xmm0, %xmm2 # 2 = io
  492. movdqa %xmm10, %xmm3 # 3 : 1/jak
  493. pshufb %xmm4, %xmm3 # 3 = 1/jak
  494. pxor %xmm1, %xmm3 # 3 = jo
  495. movdqa %xmm13, %xmm4 # 4 : sbou
  496. pshufb %xmm2, %xmm4 # 4 = sbou
  497. movdqa %xmm12, %xmm0 # 0 : sbot
  498. pshufb %xmm3, %xmm0 # 0 = sb1t
  499. pxor %xmm4, %xmm0 # 0 = sbox output
  500. # add in smeared stuff
  501. pxor %xmm7, %xmm0
  502. movdqa %xmm0, %xmm7
  503. ret
  504. .size _vpaes_schedule_round,.-_vpaes_schedule_round
  505. ##
  506. ## .aes_schedule_transform
  507. ##
  508. ## Linear-transform %xmm0 according to tables at (%r11)
  509. ##
  510. ## Requires that %xmm9 = 0x0F0F... as in preheat
  511. ## Output in %xmm0
  512. ## Clobbers %xmm1, %xmm2
  513. ##
  514. .type _vpaes_schedule_transform,\@abi-omnipotent
  515. .align 16
  516. _vpaes_schedule_transform:
  517. movdqa %xmm9, %xmm1
  518. pandn %xmm0, %xmm1
  519. psrld \$4, %xmm1
  520. pand %xmm9, %xmm0
  521. movdqa (%r11), %xmm2 # lo
  522. pshufb %xmm0, %xmm2
  523. movdqa 16(%r11), %xmm0 # hi
  524. pshufb %xmm1, %xmm0
  525. pxor %xmm2, %xmm0
  526. ret
  527. .size _vpaes_schedule_transform,.-_vpaes_schedule_transform
  528. ##
  529. ## .aes_schedule_mangle
  530. ##
  531. ## Mangle xmm0 from (basis-transformed) standard version
  532. ## to our version.
  533. ##
  534. ## On encrypt,
  535. ## xor with 0x63
  536. ## multiply by circulant 0,1,1,1
  537. ## apply shiftrows transform
  538. ##
  539. ## On decrypt,
  540. ## xor with 0x63
  541. ## multiply by "inverse mixcolumns" circulant E,B,D,9
  542. ## deskew
  543. ## apply shiftrows transform
  544. ##
  545. ##
  546. ## Writes out to (%rdx), and increments or decrements it
  547. ## Keeps track of round number mod 4 in %r8
  548. ## Preserves xmm0
  549. ## Clobbers xmm1-xmm5
  550. ##
  551. .type _vpaes_schedule_mangle,\@abi-omnipotent
  552. .align 16
  553. _vpaes_schedule_mangle:
  554. movdqa %xmm0, %xmm4 # save xmm0 for later
  555. movdqa .Lk_mc_forward(%rip),%xmm5
  556. test %rcx, %rcx
  557. jnz .Lschedule_mangle_dec
  558. # encrypting
  559. add \$16, %rdx
  560. pxor .Lk_s63(%rip),%xmm4
  561. pshufb %xmm5, %xmm4
  562. movdqa %xmm4, %xmm3
  563. pshufb %xmm5, %xmm4
  564. pxor %xmm4, %xmm3
  565. pshufb %xmm5, %xmm4
  566. pxor %xmm4, %xmm3
  567. jmp .Lschedule_mangle_both
  568. .align 16
  569. .Lschedule_mangle_dec:
  570. # inverse mix columns
  571. lea .Lk_dksd(%rip),%r11
  572. movdqa %xmm9, %xmm1
  573. pandn %xmm4, %xmm1
  574. psrld \$4, %xmm1 # 1 = hi
  575. pand %xmm9, %xmm4 # 4 = lo
  576. movdqa 0x00(%r11), %xmm2
  577. pshufb %xmm4, %xmm2
  578. movdqa 0x10(%r11), %xmm3
  579. pshufb %xmm1, %xmm3
  580. pxor %xmm2, %xmm3
  581. pshufb %xmm5, %xmm3
  582. movdqa 0x20(%r11), %xmm2
  583. pshufb %xmm4, %xmm2
  584. pxor %xmm3, %xmm2
  585. movdqa 0x30(%r11), %xmm3
  586. pshufb %xmm1, %xmm3
  587. pxor %xmm2, %xmm3
  588. pshufb %xmm5, %xmm3
  589. movdqa 0x40(%r11), %xmm2
  590. pshufb %xmm4, %xmm2
  591. pxor %xmm3, %xmm2
  592. movdqa 0x50(%r11), %xmm3
  593. pshufb %xmm1, %xmm3
  594. pxor %xmm2, %xmm3
  595. pshufb %xmm5, %xmm3
  596. movdqa 0x60(%r11), %xmm2
  597. pshufb %xmm4, %xmm2
  598. pxor %xmm3, %xmm2
  599. movdqa 0x70(%r11), %xmm3
  600. pshufb %xmm1, %xmm3
  601. pxor %xmm2, %xmm3
  602. add \$-16, %rdx
  603. .Lschedule_mangle_both:
  604. movdqa (%r8,%r10),%xmm1
  605. pshufb %xmm1,%xmm3
  606. add \$-16, %r8
  607. and \$0x30, %r8
  608. movdqu %xmm3, (%rdx)
  609. ret
  610. .size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
  611. #
  612. # Interface to OpenSSL
  613. #
  614. .globl ${PREFIX}_set_encrypt_key
  615. .type ${PREFIX}_set_encrypt_key,\@function,3
  616. .align 16
  617. ${PREFIX}_set_encrypt_key:
  618. ___
  619. $code.=<<___ if ($win64);
  620. lea -0xb8(%rsp),%rsp
  621. movaps %xmm6,0x10(%rsp)
  622. movaps %xmm7,0x20(%rsp)
  623. movaps %xmm8,0x30(%rsp)
  624. movaps %xmm9,0x40(%rsp)
  625. movaps %xmm10,0x50(%rsp)
  626. movaps %xmm11,0x60(%rsp)
  627. movaps %xmm12,0x70(%rsp)
  628. movaps %xmm13,0x80(%rsp)
  629. movaps %xmm14,0x90(%rsp)
  630. movaps %xmm15,0xa0(%rsp)
  631. .Lenc_key_body:
  632. ___
  633. $code.=<<___;
  634. mov %esi,%eax
  635. shr \$5,%eax
  636. add \$5,%eax
  637. mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
  638. mov \$0,%ecx
  639. mov \$0x30,%r8d
  640. call _vpaes_schedule_core
  641. ___
  642. $code.=<<___ if ($win64);
  643. movaps 0x10(%rsp),%xmm6
  644. movaps 0x20(%rsp),%xmm7
  645. movaps 0x30(%rsp),%xmm8
  646. movaps 0x40(%rsp),%xmm9
  647. movaps 0x50(%rsp),%xmm10
  648. movaps 0x60(%rsp),%xmm11
  649. movaps 0x70(%rsp),%xmm12
  650. movaps 0x80(%rsp),%xmm13
  651. movaps 0x90(%rsp),%xmm14
  652. movaps 0xa0(%rsp),%xmm15
  653. lea 0xb8(%rsp),%rsp
  654. .Lenc_key_epilogue:
  655. ___
  656. $code.=<<___;
  657. xor %eax,%eax
  658. ret
  659. .size ${PREFIX}_set_encrypt_key,.-${PREFIX}_set_encrypt_key
  660. .globl ${PREFIX}_set_decrypt_key
  661. .type ${PREFIX}_set_decrypt_key,\@function,3
  662. .align 16
  663. ${PREFIX}_set_decrypt_key:
  664. ___
  665. $code.=<<___ if ($win64);
  666. lea -0xb8(%rsp),%rsp
  667. movaps %xmm6,0x10(%rsp)
  668. movaps %xmm7,0x20(%rsp)
  669. movaps %xmm8,0x30(%rsp)
  670. movaps %xmm9,0x40(%rsp)
  671. movaps %xmm10,0x50(%rsp)
  672. movaps %xmm11,0x60(%rsp)
  673. movaps %xmm12,0x70(%rsp)
  674. movaps %xmm13,0x80(%rsp)
  675. movaps %xmm14,0x90(%rsp)
  676. movaps %xmm15,0xa0(%rsp)
  677. .Ldec_key_body:
  678. ___
  679. $code.=<<___;
  680. mov %esi,%eax
  681. shr \$5,%eax
  682. add \$5,%eax
  683. mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
  684. shl \$4,%eax
  685. lea 16(%rdx,%rax),%rdx
  686. mov \$1,%ecx
  687. mov %esi,%r8d
  688. shr \$1,%r8d
  689. and \$32,%r8d
  690. xor \$32,%r8d # nbits==192?0:32
  691. call _vpaes_schedule_core
  692. ___
  693. $code.=<<___ if ($win64);
  694. movaps 0x10(%rsp),%xmm6
  695. movaps 0x20(%rsp),%xmm7
  696. movaps 0x30(%rsp),%xmm8
  697. movaps 0x40(%rsp),%xmm9
  698. movaps 0x50(%rsp),%xmm10
  699. movaps 0x60(%rsp),%xmm11
  700. movaps 0x70(%rsp),%xmm12
  701. movaps 0x80(%rsp),%xmm13
  702. movaps 0x90(%rsp),%xmm14
  703. movaps 0xa0(%rsp),%xmm15
  704. lea 0xb8(%rsp),%rsp
  705. .Ldec_key_epilogue:
  706. ___
  707. $code.=<<___;
  708. xor %eax,%eax
  709. ret
  710. .size ${PREFIX}_set_decrypt_key,.-${PREFIX}_set_decrypt_key
  711. .globl ${PREFIX}_encrypt
  712. .type ${PREFIX}_encrypt,\@function,3
  713. .align 16
  714. ${PREFIX}_encrypt:
  715. ___
  716. $code.=<<___ if ($win64);
  717. lea -0xb8(%rsp),%rsp
  718. movaps %xmm6,0x10(%rsp)
  719. movaps %xmm7,0x20(%rsp)
  720. movaps %xmm8,0x30(%rsp)
  721. movaps %xmm9,0x40(%rsp)
  722. movaps %xmm10,0x50(%rsp)
  723. movaps %xmm11,0x60(%rsp)
  724. movaps %xmm12,0x70(%rsp)
  725. movaps %xmm13,0x80(%rsp)
  726. movaps %xmm14,0x90(%rsp)
  727. movaps %xmm15,0xa0(%rsp)
  728. .Lenc_body:
  729. ___
  730. $code.=<<___;
  731. movdqu (%rdi),%xmm0
  732. call _vpaes_preheat
  733. call _vpaes_encrypt_core
  734. movdqu %xmm0,(%rsi)
  735. ___
  736. $code.=<<___ if ($win64);
  737. movaps 0x10(%rsp),%xmm6
  738. movaps 0x20(%rsp),%xmm7
  739. movaps 0x30(%rsp),%xmm8
  740. movaps 0x40(%rsp),%xmm9
  741. movaps 0x50(%rsp),%xmm10
  742. movaps 0x60(%rsp),%xmm11
  743. movaps 0x70(%rsp),%xmm12
  744. movaps 0x80(%rsp),%xmm13
  745. movaps 0x90(%rsp),%xmm14
  746. movaps 0xa0(%rsp),%xmm15
  747. lea 0xb8(%rsp),%rsp
  748. .Lenc_epilogue:
  749. ___
  750. $code.=<<___;
  751. ret
  752. .size ${PREFIX}_encrypt,.-${PREFIX}_encrypt
  753. .globl ${PREFIX}_decrypt
  754. .type ${PREFIX}_decrypt,\@function,3
  755. .align 16
  756. ${PREFIX}_decrypt:
  757. ___
  758. $code.=<<___ if ($win64);
  759. lea -0xb8(%rsp),%rsp
  760. movaps %xmm6,0x10(%rsp)
  761. movaps %xmm7,0x20(%rsp)
  762. movaps %xmm8,0x30(%rsp)
  763. movaps %xmm9,0x40(%rsp)
  764. movaps %xmm10,0x50(%rsp)
  765. movaps %xmm11,0x60(%rsp)
  766. movaps %xmm12,0x70(%rsp)
  767. movaps %xmm13,0x80(%rsp)
  768. movaps %xmm14,0x90(%rsp)
  769. movaps %xmm15,0xa0(%rsp)
  770. .Ldec_body:
  771. ___
  772. $code.=<<___;
  773. movdqu (%rdi),%xmm0
  774. call _vpaes_preheat
  775. call _vpaes_decrypt_core
  776. movdqu %xmm0,(%rsi)
  777. ___
  778. $code.=<<___ if ($win64);
  779. movaps 0x10(%rsp),%xmm6
  780. movaps 0x20(%rsp),%xmm7
  781. movaps 0x30(%rsp),%xmm8
  782. movaps 0x40(%rsp),%xmm9
  783. movaps 0x50(%rsp),%xmm10
  784. movaps 0x60(%rsp),%xmm11
  785. movaps 0x70(%rsp),%xmm12
  786. movaps 0x80(%rsp),%xmm13
  787. movaps 0x90(%rsp),%xmm14
  788. movaps 0xa0(%rsp),%xmm15
  789. lea 0xb8(%rsp),%rsp
  790. .Ldec_epilogue:
  791. ___
  792. $code.=<<___;
  793. ret
  794. .size ${PREFIX}_decrypt,.-${PREFIX}_decrypt
  795. ___
  796. {
  797. my ($inp,$out,$len,$key,$ivp,$enc)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9");
  798. # void AES_cbc_encrypt (const void char *inp, unsigned char *out,
  799. # size_t length, const AES_KEY *key,
  800. # unsigned char *ivp,const int enc);
  801. $code.=<<___;
  802. .globl ${PREFIX}_cbc_encrypt
  803. .type ${PREFIX}_cbc_encrypt,\@function,6
  804. .align 16
  805. ${PREFIX}_cbc_encrypt:
  806. xchg $key,$len
  807. ___
  808. ($len,$key)=($key,$len);
  809. $code.=<<___;
  810. sub \$16,$len
  811. jc .Lcbc_abort
  812. ___
  813. $code.=<<___ if ($win64);
  814. lea -0xb8(%rsp),%rsp
  815. movaps %xmm6,0x10(%rsp)
  816. movaps %xmm7,0x20(%rsp)
  817. movaps %xmm8,0x30(%rsp)
  818. movaps %xmm9,0x40(%rsp)
  819. movaps %xmm10,0x50(%rsp)
  820. movaps %xmm11,0x60(%rsp)
  821. movaps %xmm12,0x70(%rsp)
  822. movaps %xmm13,0x80(%rsp)
  823. movaps %xmm14,0x90(%rsp)
  824. movaps %xmm15,0xa0(%rsp)
  825. .Lcbc_body:
  826. ___
  827. $code.=<<___;
  828. movdqu ($ivp),%xmm6 # load IV
  829. sub $inp,$out
  830. call _vpaes_preheat
  831. cmp \$0,${enc}d
  832. je .Lcbc_dec_loop
  833. jmp .Lcbc_enc_loop
  834. .align 16
  835. .Lcbc_enc_loop:
  836. movdqu ($inp),%xmm0
  837. pxor %xmm6,%xmm0
  838. call _vpaes_encrypt_core
  839. movdqa %xmm0,%xmm6
  840. movdqu %xmm0,($out,$inp)
  841. lea 16($inp),$inp
  842. sub \$16,$len
  843. jnc .Lcbc_enc_loop
  844. jmp .Lcbc_done
  845. .align 16
  846. .Lcbc_dec_loop:
  847. movdqu ($inp),%xmm0
  848. movdqa %xmm0,%xmm7
  849. call _vpaes_decrypt_core
  850. pxor %xmm6,%xmm0
  851. movdqa %xmm7,%xmm6
  852. movdqu %xmm0,($out,$inp)
  853. lea 16($inp),$inp
  854. sub \$16,$len
  855. jnc .Lcbc_dec_loop
  856. .Lcbc_done:
  857. movdqu %xmm6,($ivp) # save IV
  858. ___
  859. $code.=<<___ if ($win64);
  860. movaps 0x10(%rsp),%xmm6
  861. movaps 0x20(%rsp),%xmm7
  862. movaps 0x30(%rsp),%xmm8
  863. movaps 0x40(%rsp),%xmm9
  864. movaps 0x50(%rsp),%xmm10
  865. movaps 0x60(%rsp),%xmm11
  866. movaps 0x70(%rsp),%xmm12
  867. movaps 0x80(%rsp),%xmm13
  868. movaps 0x90(%rsp),%xmm14
  869. movaps 0xa0(%rsp),%xmm15
  870. lea 0xb8(%rsp),%rsp
  871. .Lcbc_epilogue:
  872. ___
  873. $code.=<<___;
  874. .Lcbc_abort:
  875. ret
  876. .size ${PREFIX}_cbc_encrypt,.-${PREFIX}_cbc_encrypt
  877. ___
  878. }
  879. $code.=<<___;
  880. ##
  881. ## _aes_preheat
  882. ##
  883. ## Fills register %r10 -> .aes_consts (so you can -fPIC)
  884. ## and %xmm9-%xmm15 as specified below.
  885. ##
  886. .type _vpaes_preheat,\@abi-omnipotent
  887. .align 16
  888. _vpaes_preheat:
  889. lea .Lk_s0F(%rip), %r10
  890. movdqa -0x20(%r10), %xmm10 # .Lk_inv
  891. movdqa -0x10(%r10), %xmm11 # .Lk_inv+16
  892. movdqa 0x00(%r10), %xmm9 # .Lk_s0F
  893. movdqa 0x30(%r10), %xmm13 # .Lk_sb1
  894. movdqa 0x40(%r10), %xmm12 # .Lk_sb1+16
  895. movdqa 0x50(%r10), %xmm15 # .Lk_sb2
  896. movdqa 0x60(%r10), %xmm14 # .Lk_sb2+16
  897. ret
  898. .size _vpaes_preheat,.-_vpaes_preheat
  899. ########################################################
  900. ## ##
  901. ## Constants ##
  902. ## ##
  903. ########################################################
  904. .type _vpaes_consts,\@object
  905. .align 64
  906. _vpaes_consts:
  907. .Lk_inv: # inv, inva
  908. .quad 0x0E05060F0D080180, 0x040703090A0B0C02
  909. .quad 0x01040A060F0B0780, 0x030D0E0C02050809
  910. .Lk_s0F: # s0F
  911. .quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F
  912. .Lk_ipt: # input transform (lo, hi)
  913. .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
  914. .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
  915. .Lk_sb1: # sb1u, sb1t
  916. .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
  917. .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
  918. .Lk_sb2: # sb2u, sb2t
  919. .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
  920. .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
  921. .Lk_sbo: # sbou, sbot
  922. .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
  923. .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
  924. .Lk_mc_forward: # mc_forward
  925. .quad 0x0407060500030201, 0x0C0F0E0D080B0A09
  926. .quad 0x080B0A0904070605, 0x000302010C0F0E0D
  927. .quad 0x0C0F0E0D080B0A09, 0x0407060500030201
  928. .quad 0x000302010C0F0E0D, 0x080B0A0904070605
  929. .Lk_mc_backward:# mc_backward
  930. .quad 0x0605040702010003, 0x0E0D0C0F0A09080B
  931. .quad 0x020100030E0D0C0F, 0x0A09080B06050407
  932. .quad 0x0E0D0C0F0A09080B, 0x0605040702010003
  933. .quad 0x0A09080B06050407, 0x020100030E0D0C0F
  934. .Lk_sr: # sr
  935. .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
  936. .quad 0x030E09040F0A0500, 0x0B06010C07020D08
  937. .quad 0x0F060D040B020900, 0x070E050C030A0108
  938. .quad 0x0B0E0104070A0D00, 0x0306090C0F020508
  939. .Lk_rcon: # rcon
  940. .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
  941. .Lk_s63: # s63: all equal to 0x63 transformed
  942. .quad 0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B
  943. .Lk_opt: # output transform
  944. .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
  945. .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
  946. .Lk_deskew: # deskew tables: inverts the sbox's "skew"
  947. .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
  948. .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
  949. ##
  950. ## Decryption stuff
  951. ## Key schedule constants
  952. ##
  953. .Lk_dksd: # decryption key schedule: invskew x*D
  954. .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
  955. .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
  956. .Lk_dksb: # decryption key schedule: invskew x*B
  957. .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
  958. .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
  959. .Lk_dkse: # decryption key schedule: invskew x*E + 0x63
  960. .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
  961. .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
  962. .Lk_dks9: # decryption key schedule: invskew x*9
  963. .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
  964. .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
  965. ##
  966. ## Decryption stuff
  967. ## Round function constants
  968. ##
  969. .Lk_dipt: # decryption input transform
  970. .quad 0x0F505B040B545F00, 0x154A411E114E451A
  971. .quad 0x86E383E660056500, 0x12771772F491F194
  972. .Lk_dsb9: # decryption sbox output *9*u, *9*t
  973. .quad 0x851C03539A86D600, 0xCAD51F504F994CC9
  974. .quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565
  975. .Lk_dsbd: # decryption sbox output *D*u, *D*t
  976. .quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
  977. .quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
  978. .Lk_dsbb: # decryption sbox output *B*u, *B*t
  979. .quad 0xD022649296B44200, 0x602646F6B0F2D404
  980. .quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
  981. .Lk_dsbe: # decryption sbox output *E*u, *E*t
  982. .quad 0x46F2929626D4D000, 0x2242600464B4F6B0
  983. .quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32
  984. .Lk_dsbo: # decryption sbox final output
  985. .quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
  986. .quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C
  987. .asciz "Vector Permutation AES for x86_64/SSSE3, Mike Hamburg (Stanford University)"
  988. .align 64
  989. .size _vpaes_consts,.-_vpaes_consts
  990. ___
  991. if ($win64) {
  992. # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
  993. # CONTEXT *context,DISPATCHER_CONTEXT *disp)
  994. $rec="%rcx";
  995. $frame="%rdx";
  996. $context="%r8";
  997. $disp="%r9";
  998. $code.=<<___;
  999. .extern __imp_RtlVirtualUnwind
  1000. .type se_handler,\@abi-omnipotent
  1001. .align 16
  1002. se_handler:
  1003. push %rsi
  1004. push %rdi
  1005. push %rbx
  1006. push %rbp
  1007. push %r12
  1008. push %r13
  1009. push %r14
  1010. push %r15
  1011. pushfq
  1012. sub \$64,%rsp
  1013. mov 120($context),%rax # pull context->Rax
  1014. mov 248($context),%rbx # pull context->Rip
  1015. mov 8($disp),%rsi # disp->ImageBase
  1016. mov 56($disp),%r11 # disp->HandlerData
  1017. mov 0(%r11),%r10d # HandlerData[0]
  1018. lea (%rsi,%r10),%r10 # prologue label
  1019. cmp %r10,%rbx # context->Rip<prologue label
  1020. jb .Lin_prologue
  1021. mov 152($context),%rax # pull context->Rsp
  1022. mov 4(%r11),%r10d # HandlerData[1]
  1023. lea (%rsi,%r10),%r10 # epilogue label
  1024. cmp %r10,%rbx # context->Rip>=epilogue label
  1025. jae .Lin_prologue
  1026. lea 16(%rax),%rsi # %xmm save area
  1027. lea 512($context),%rdi # &context.Xmm6
  1028. mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
  1029. .long 0xa548f3fc # cld; rep movsq
  1030. lea 0xb8(%rax),%rax # adjust stack pointer
  1031. .Lin_prologue:
  1032. mov 8(%rax),%rdi
  1033. mov 16(%rax),%rsi
  1034. mov %rax,152($context) # restore context->Rsp
  1035. mov %rsi,168($context) # restore context->Rsi
  1036. mov %rdi,176($context) # restore context->Rdi
  1037. mov 40($disp),%rdi # disp->ContextRecord
  1038. mov $context,%rsi # context
  1039. mov \$`1232/8`,%ecx # sizeof(CONTEXT)
  1040. .long 0xa548f3fc # cld; rep movsq
  1041. mov $disp,%rsi
  1042. xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
  1043. mov 8(%rsi),%rdx # arg2, disp->ImageBase
  1044. mov 0(%rsi),%r8 # arg3, disp->ControlPc
  1045. mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
  1046. mov 40(%rsi),%r10 # disp->ContextRecord
  1047. lea 56(%rsi),%r11 # &disp->HandlerData
  1048. lea 24(%rsi),%r12 # &disp->EstablisherFrame
  1049. mov %r10,32(%rsp) # arg5
  1050. mov %r11,40(%rsp) # arg6
  1051. mov %r12,48(%rsp) # arg7
  1052. mov %rcx,56(%rsp) # arg8, (NULL)
  1053. call *__imp_RtlVirtualUnwind(%rip)
  1054. mov \$1,%eax # ExceptionContinueSearch
  1055. add \$64,%rsp
  1056. popfq
  1057. pop %r15
  1058. pop %r14
  1059. pop %r13
  1060. pop %r12
  1061. pop %rbp
  1062. pop %rbx
  1063. pop %rdi
  1064. pop %rsi
  1065. ret
  1066. .size se_handler,.-se_handler
  1067. .section .pdata
  1068. .align 4
  1069. .rva .LSEH_begin_${PREFIX}_set_encrypt_key
  1070. .rva .LSEH_end_${PREFIX}_set_encrypt_key
  1071. .rva .LSEH_info_${PREFIX}_set_encrypt_key
  1072. .rva .LSEH_begin_${PREFIX}_set_decrypt_key
  1073. .rva .LSEH_end_${PREFIX}_set_decrypt_key
  1074. .rva .LSEH_info_${PREFIX}_set_decrypt_key
  1075. .rva .LSEH_begin_${PREFIX}_encrypt
  1076. .rva .LSEH_end_${PREFIX}_encrypt
  1077. .rva .LSEH_info_${PREFIX}_encrypt
  1078. .rva .LSEH_begin_${PREFIX}_decrypt
  1079. .rva .LSEH_end_${PREFIX}_decrypt
  1080. .rva .LSEH_info_${PREFIX}_decrypt
  1081. .rva .LSEH_begin_${PREFIX}_cbc_encrypt
  1082. .rva .LSEH_end_${PREFIX}_cbc_encrypt
  1083. .rva .LSEH_info_${PREFIX}_cbc_encrypt
  1084. .section .xdata
  1085. .align 8
  1086. .LSEH_info_${PREFIX}_set_encrypt_key:
  1087. .byte 9,0,0,0
  1088. .rva se_handler
  1089. .rva .Lenc_key_body,.Lenc_key_epilogue # HandlerData[]
  1090. .LSEH_info_${PREFIX}_set_decrypt_key:
  1091. .byte 9,0,0,0
  1092. .rva se_handler
  1093. .rva .Ldec_key_body,.Ldec_key_epilogue # HandlerData[]
  1094. .LSEH_info_${PREFIX}_encrypt:
  1095. .byte 9,0,0,0
  1096. .rva se_handler
  1097. .rva .Lenc_body,.Lenc_epilogue # HandlerData[]
  1098. .LSEH_info_${PREFIX}_decrypt:
  1099. .byte 9,0,0,0
  1100. .rva se_handler
  1101. .rva .Ldec_body,.Ldec_epilogue # HandlerData[]
  1102. .LSEH_info_${PREFIX}_cbc_encrypt:
  1103. .byte 9,0,0,0
  1104. .rva se_handler
  1105. .rva .Lcbc_body,.Lcbc_epilogue # HandlerData[]
  1106. ___
  1107. }
  1108. $code =~ s/\`([^\`]*)\`/eval($1)/gem;
  1109. print $code;
  1110. close STDOUT;