Du kannst nicht mehr als 25 Themen auswählen Themen müssen entweder mit einem Buchstaben oder einer Ziffer beginnen. Sie können Bindestriche („-“) enthalten und bis zu 35 Zeichen lang sein.

vpaes-x86_64.pl 30 KiB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206
  1. #!/usr/bin/env perl
  2. ######################################################################
  3. ## Constant-time SSSE3 AES core implementation.
  4. ## version 0.1
  5. ##
  6. ## By Mike Hamburg (Stanford University), 2009
  7. ## Public domain.
  8. ##
  9. ## For details see http://shiftleft.org/papers/vector_aes/ and
  10. ## http://crypto.stanford.edu/vpaes/.
  11. ######################################################################
  12. # September 2011.
  13. #
  14. # Interface to OpenSSL as "almost" drop-in replacement for
  15. # aes-x86_64.pl. "Almost" refers to the fact that AES_cbc_encrypt
  16. # doesn't handle partial vectors (doesn't have to if called from
  17. # EVP only). "Drop-in" implies that this module doesn't share key
  18. # schedule structure with the original nor does it make assumption
  19. # about its alignment...
  20. #
  21. # Performance summary. aes-x86_64.pl column lists large-block CBC
  22. # encrypt/decrypt/with-hyper-threading-off(*) results in cycles per
  23. # byte processed with 128-bit key, and vpaes-x86_64.pl column -
  24. # [also large-block CBC] encrypt/decrypt.
  25. #
  26. # aes-x86_64.pl vpaes-x86_64.pl
  27. #
  28. # Core 2(**) 29.6/41.1/14.3 21.9/25.2(***)
  29. # Nehalem 29.6/40.3/14.6 10.0/11.8
  30. # Atom 57.3/74.2/32.1 60.9/77.2(***)
  31. #
  32. # (*) "Hyper-threading" in the context refers rather to cache shared
  33. # among multiple cores, than to specifically Intel HTT. As vast
  34. # majority of contemporary cores share cache, slower code path
  35. # is common place. In other words "with-hyper-threading-off"
  36. # results are presented mostly for reference purposes.
  37. #
  38. # (**) "Core 2" refers to initial 65nm design, a.k.a. Conroe.
  39. #
  40. # (***) Less impressive improvement on Core 2 and Atom is due to slow
  41. # pshufb, yet it's respectable +36%/62% improvement on Core 2
  42. # (as implied, over "hyper-threading-safe" code path).
  43. #
  44. # <appro@openssl.org>
  45. $flavour = shift;
  46. $output = shift;
  47. if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
  48. $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
  49. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  50. ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
  51. ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
  52. die "can't locate x86_64-xlate.pl";
  53. open OUT,"| \"$^X\" $xlate $flavour $output";
  54. *STDOUT=*OUT;
  55. $PREFIX="vpaes";
  56. $code.=<<___;
  57. .text
  58. ##
  59. ## _aes_encrypt_core
  60. ##
  61. ## AES-encrypt %xmm0.
  62. ##
  63. ## Inputs:
  64. ## %xmm0 = input
  65. ## %xmm9-%xmm15 as in _vpaes_preheat
  66. ## (%rdx) = scheduled keys
  67. ##
  68. ## Output in %xmm0
  69. ## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax
  70. ## Preserves %xmm6 - %xmm8 so you get some local vectors
  71. ##
  72. ##
  73. .type _vpaes_encrypt_core,\@abi-omnipotent
  74. .align 16
  75. _vpaes_encrypt_core:
  76. mov %rdx, %r9
  77. mov \$16, %r11
  78. mov 240(%rdx),%eax
  79. movdqa %xmm9, %xmm1
  80. movdqa .Lk_ipt(%rip), %xmm2 # iptlo
  81. pandn %xmm0, %xmm1
  82. movdqu (%r9), %xmm5 # round0 key
  83. psrld \$4, %xmm1
  84. pand %xmm9, %xmm0
  85. pshufb %xmm0, %xmm2
  86. movdqa .Lk_ipt+16(%rip), %xmm0 # ipthi
  87. pshufb %xmm1, %xmm0
  88. pxor %xmm5, %xmm2
  89. add \$16, %r9
  90. pxor %xmm2, %xmm0
  91. lea .Lk_mc_backward(%rip),%r10
  92. jmp .Lenc_entry
  93. .align 16
  94. .Lenc_loop:
  95. # middle of middle round
  96. movdqa %xmm13, %xmm4 # 4 : sb1u
  97. movdqa %xmm12, %xmm0 # 0 : sb1t
  98. pshufb %xmm2, %xmm4 # 4 = sb1u
  99. pshufb %xmm3, %xmm0 # 0 = sb1t
  100. pxor %xmm5, %xmm4 # 4 = sb1u + k
  101. movdqa %xmm15, %xmm5 # 4 : sb2u
  102. pxor %xmm4, %xmm0 # 0 = A
  103. movdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
  104. pshufb %xmm2, %xmm5 # 4 = sb2u
  105. movdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
  106. movdqa %xmm14, %xmm2 # 2 : sb2t
  107. pshufb %xmm3, %xmm2 # 2 = sb2t
  108. movdqa %xmm0, %xmm3 # 3 = A
  109. pxor %xmm5, %xmm2 # 2 = 2A
  110. pshufb %xmm1, %xmm0 # 0 = B
  111. add \$16, %r9 # next key
  112. pxor %xmm2, %xmm0 # 0 = 2A+B
  113. pshufb %xmm4, %xmm3 # 3 = D
  114. add \$16, %r11 # next mc
  115. pxor %xmm0, %xmm3 # 3 = 2A+B+D
  116. pshufb %xmm1, %xmm0 # 0 = 2B+C
  117. and \$0x30, %r11 # ... mod 4
  118. sub \$1,%rax # nr--
  119. pxor %xmm3, %xmm0 # 0 = 2A+3B+C+D
  120. .Lenc_entry:
  121. # top of round
  122. movdqa %xmm9, %xmm1 # 1 : i
  123. movdqa %xmm11, %xmm5 # 2 : a/k
  124. pandn %xmm0, %xmm1 # 1 = i<<4
  125. psrld \$4, %xmm1 # 1 = i
  126. pand %xmm9, %xmm0 # 0 = k
  127. pshufb %xmm0, %xmm5 # 2 = a/k
  128. movdqa %xmm10, %xmm3 # 3 : 1/i
  129. pxor %xmm1, %xmm0 # 0 = j
  130. pshufb %xmm1, %xmm3 # 3 = 1/i
  131. movdqa %xmm10, %xmm4 # 4 : 1/j
  132. pxor %xmm5, %xmm3 # 3 = iak = 1/i + a/k
  133. pshufb %xmm0, %xmm4 # 4 = 1/j
  134. movdqa %xmm10, %xmm2 # 2 : 1/iak
  135. pxor %xmm5, %xmm4 # 4 = jak = 1/j + a/k
  136. pshufb %xmm3, %xmm2 # 2 = 1/iak
  137. movdqa %xmm10, %xmm3 # 3 : 1/jak
  138. pxor %xmm0, %xmm2 # 2 = io
  139. pshufb %xmm4, %xmm3 # 3 = 1/jak
  140. movdqu (%r9), %xmm5
  141. pxor %xmm1, %xmm3 # 3 = jo
  142. jnz .Lenc_loop
  143. # middle of last round
  144. movdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
  145. movdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
  146. pshufb %xmm2, %xmm4 # 4 = sbou
  147. pxor %xmm5, %xmm4 # 4 = sb1u + k
  148. pshufb %xmm3, %xmm0 # 0 = sb1t
  149. movdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
  150. pxor %xmm4, %xmm0 # 0 = A
  151. pshufb %xmm1, %xmm0
  152. ret
  153. .size _vpaes_encrypt_core,.-_vpaes_encrypt_core
  154. ##
  155. ## Decryption core
  156. ##
  157. ## Same API as encryption core.
  158. ##
  159. .type _vpaes_decrypt_core,\@abi-omnipotent
  160. .align 16
  161. _vpaes_decrypt_core:
  162. mov %rdx, %r9 # load key
  163. mov 240(%rdx),%eax
  164. movdqa %xmm9, %xmm1
  165. movdqa .Lk_dipt(%rip), %xmm2 # iptlo
  166. pandn %xmm0, %xmm1
  167. mov %rax, %r11
  168. psrld \$4, %xmm1
  169. movdqu (%r9), %xmm5 # round0 key
  170. shl \$4, %r11
  171. pand %xmm9, %xmm0
  172. pshufb %xmm0, %xmm2
  173. movdqa .Lk_dipt+16(%rip), %xmm0 # ipthi
  174. xor \$0x30, %r11
  175. lea .Lk_dsbd(%rip),%r10
  176. pshufb %xmm1, %xmm0
  177. and \$0x30, %r11
  178. pxor %xmm5, %xmm2
  179. movdqa .Lk_mc_forward+48(%rip), %xmm5
  180. pxor %xmm2, %xmm0
  181. add \$16, %r9
  182. add %r10, %r11
  183. jmp .Ldec_entry
  184. .align 16
  185. .Ldec_loop:
  186. ##
  187. ## Inverse mix columns
  188. ##
  189. movdqa -0x20(%r10),%xmm4 # 4 : sb9u
  190. movdqa -0x10(%r10),%xmm1 # 0 : sb9t
  191. pshufb %xmm2, %xmm4 # 4 = sb9u
  192. pshufb %xmm3, %xmm1 # 0 = sb9t
  193. pxor %xmm4, %xmm0
  194. movdqa 0x00(%r10),%xmm4 # 4 : sbdu
  195. pxor %xmm1, %xmm0 # 0 = ch
  196. movdqa 0x10(%r10),%xmm1 # 0 : sbdt
  197. pshufb %xmm2, %xmm4 # 4 = sbdu
  198. pshufb %xmm5, %xmm0 # MC ch
  199. pshufb %xmm3, %xmm1 # 0 = sbdt
  200. pxor %xmm4, %xmm0 # 4 = ch
  201. movdqa 0x20(%r10),%xmm4 # 4 : sbbu
  202. pxor %xmm1, %xmm0 # 0 = ch
  203. movdqa 0x30(%r10),%xmm1 # 0 : sbbt
  204. pshufb %xmm2, %xmm4 # 4 = sbbu
  205. pshufb %xmm5, %xmm0 # MC ch
  206. pshufb %xmm3, %xmm1 # 0 = sbbt
  207. pxor %xmm4, %xmm0 # 4 = ch
  208. movdqa 0x40(%r10),%xmm4 # 4 : sbeu
  209. pxor %xmm1, %xmm0 # 0 = ch
  210. movdqa 0x50(%r10),%xmm1 # 0 : sbet
  211. pshufb %xmm2, %xmm4 # 4 = sbeu
  212. pshufb %xmm5, %xmm0 # MC ch
  213. pshufb %xmm3, %xmm1 # 0 = sbet
  214. pxor %xmm4, %xmm0 # 4 = ch
  215. add \$16, %r9 # next round key
  216. palignr \$12, %xmm5, %xmm5
  217. pxor %xmm1, %xmm0 # 0 = ch
  218. sub \$1,%rax # nr--
  219. .Ldec_entry:
  220. # top of round
  221. movdqa %xmm9, %xmm1 # 1 : i
  222. pandn %xmm0, %xmm1 # 1 = i<<4
  223. movdqa %xmm11, %xmm2 # 2 : a/k
  224. psrld \$4, %xmm1 # 1 = i
  225. pand %xmm9, %xmm0 # 0 = k
  226. pshufb %xmm0, %xmm2 # 2 = a/k
  227. movdqa %xmm10, %xmm3 # 3 : 1/i
  228. pxor %xmm1, %xmm0 # 0 = j
  229. pshufb %xmm1, %xmm3 # 3 = 1/i
  230. movdqa %xmm10, %xmm4 # 4 : 1/j
  231. pxor %xmm2, %xmm3 # 3 = iak = 1/i + a/k
  232. pshufb %xmm0, %xmm4 # 4 = 1/j
  233. pxor %xmm2, %xmm4 # 4 = jak = 1/j + a/k
  234. movdqa %xmm10, %xmm2 # 2 : 1/iak
  235. pshufb %xmm3, %xmm2 # 2 = 1/iak
  236. movdqa %xmm10, %xmm3 # 3 : 1/jak
  237. pxor %xmm0, %xmm2 # 2 = io
  238. pshufb %xmm4, %xmm3 # 3 = 1/jak
  239. movdqu (%r9), %xmm0
  240. pxor %xmm1, %xmm3 # 3 = jo
  241. jnz .Ldec_loop
  242. # middle of last round
  243. movdqa 0x60(%r10), %xmm4 # 3 : sbou
  244. pshufb %xmm2, %xmm4 # 4 = sbou
  245. pxor %xmm0, %xmm4 # 4 = sb1u + k
  246. movdqa 0x70(%r10), %xmm0 # 0 : sbot
  247. movdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160
  248. pshufb %xmm3, %xmm0 # 0 = sb1t
  249. pxor %xmm4, %xmm0 # 0 = A
  250. pshufb %xmm2, %xmm0
  251. ret
  252. .size _vpaes_decrypt_core,.-_vpaes_decrypt_core
  253. ########################################################
  254. ## ##
  255. ## AES key schedule ##
  256. ## ##
  257. ########################################################
  258. .type _vpaes_schedule_core,\@abi-omnipotent
  259. .align 16
  260. _vpaes_schedule_core:
  261. # rdi = key
  262. # rsi = size in bits
  263. # rdx = buffer
  264. # rcx = direction. 0=encrypt, 1=decrypt
  265. call _vpaes_preheat # load the tables
  266. movdqa .Lk_rcon(%rip), %xmm8 # load rcon
  267. movdqu (%rdi), %xmm0 # load key (unaligned)
  268. # input transform
  269. movdqa %xmm0, %xmm3
  270. lea .Lk_ipt(%rip), %r11
  271. call _vpaes_schedule_transform
  272. movdqa %xmm0, %xmm7
  273. lea .Lk_sr(%rip),%r10
  274. test %rcx, %rcx
  275. jnz .Lschedule_am_decrypting
  276. # encrypting, output zeroth round key after transform
  277. movdqu %xmm0, (%rdx)
  278. jmp .Lschedule_go
  279. .Lschedule_am_decrypting:
  280. # decrypting, output zeroth round key after shiftrows
  281. movdqa (%r8,%r10),%xmm1
  282. pshufb %xmm1, %xmm3
  283. movdqu %xmm3, (%rdx)
  284. xor \$0x30, %r8
  285. .Lschedule_go:
  286. cmp \$192, %esi
  287. ja .Lschedule_256
  288. je .Lschedule_192
  289. # 128: fall though
  290. ##
  291. ## .schedule_128
  292. ##
  293. ## 128-bit specific part of key schedule.
  294. ##
  295. ## This schedule is really simple, because all its parts
  296. ## are accomplished by the subroutines.
  297. ##
  298. .Lschedule_128:
  299. mov \$10, %esi
  300. .Loop_schedule_128:
  301. call _vpaes_schedule_round
  302. dec %rsi
  303. jz .Lschedule_mangle_last
  304. call _vpaes_schedule_mangle # write output
  305. jmp .Loop_schedule_128
  306. ##
  307. ## .aes_schedule_192
  308. ##
  309. ## 192-bit specific part of key schedule.
  310. ##
  311. ## The main body of this schedule is the same as the 128-bit
  312. ## schedule, but with more smearing. The long, high side is
  313. ## stored in %xmm7 as before, and the short, low side is in
  314. ## the high bits of %xmm6.
  315. ##
  316. ## This schedule is somewhat nastier, however, because each
  317. ## round produces 192 bits of key material, or 1.5 round keys.
  318. ## Therefore, on each cycle we do 2 rounds and produce 3 round
  319. ## keys.
  320. ##
  321. .align 16
  322. .Lschedule_192:
  323. movdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned)
  324. call _vpaes_schedule_transform # input transform
  325. movdqa %xmm0, %xmm6 # save short part
  326. pxor %xmm4, %xmm4 # clear 4
  327. movhlps %xmm4, %xmm6 # clobber low side with zeros
  328. mov \$4, %esi
  329. .Loop_schedule_192:
  330. call _vpaes_schedule_round
  331. palignr \$8,%xmm6,%xmm0
  332. call _vpaes_schedule_mangle # save key n
  333. call _vpaes_schedule_192_smear
  334. call _vpaes_schedule_mangle # save key n+1
  335. call _vpaes_schedule_round
  336. dec %rsi
  337. jz .Lschedule_mangle_last
  338. call _vpaes_schedule_mangle # save key n+2
  339. call _vpaes_schedule_192_smear
  340. jmp .Loop_schedule_192
  341. ##
  342. ## .aes_schedule_256
  343. ##
  344. ## 256-bit specific part of key schedule.
  345. ##
  346. ## The structure here is very similar to the 128-bit
  347. ## schedule, but with an additional "low side" in
  348. ## %xmm6. The low side's rounds are the same as the
  349. ## high side's, except no rcon and no rotation.
  350. ##
  351. .align 16
  352. .Lschedule_256:
  353. movdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
  354. call _vpaes_schedule_transform # input transform
  355. mov \$7, %esi
  356. .Loop_schedule_256:
  357. call _vpaes_schedule_mangle # output low result
  358. movdqa %xmm0, %xmm6 # save cur_lo in xmm6
  359. # high round
  360. call _vpaes_schedule_round
  361. dec %rsi
  362. jz .Lschedule_mangle_last
  363. call _vpaes_schedule_mangle
  364. # low round. swap xmm7 and xmm6
  365. pshufd \$0xFF, %xmm0, %xmm0
  366. movdqa %xmm7, %xmm5
  367. movdqa %xmm6, %xmm7
  368. call _vpaes_schedule_low_round
  369. movdqa %xmm5, %xmm7
  370. jmp .Loop_schedule_256
  371. ##
  372. ## .aes_schedule_mangle_last
  373. ##
  374. ## Mangler for last round of key schedule
  375. ## Mangles %xmm0
  376. ## when encrypting, outputs out(%xmm0) ^ 63
  377. ## when decrypting, outputs unskew(%xmm0)
  378. ##
  379. ## Always called right before return... jumps to cleanup and exits
  380. ##
  381. .align 16
  382. .Lschedule_mangle_last:
  383. # schedule last round key from xmm0
  384. lea .Lk_deskew(%rip),%r11 # prepare to deskew
  385. test %rcx, %rcx
  386. jnz .Lschedule_mangle_last_dec
  387. # encrypting
  388. movdqa (%r8,%r10),%xmm1
  389. pshufb %xmm1, %xmm0 # output permute
  390. lea .Lk_opt(%rip), %r11 # prepare to output transform
  391. add \$32, %rdx
  392. .Lschedule_mangle_last_dec:
  393. add \$-16, %rdx
  394. pxor .Lk_s63(%rip), %xmm0
  395. call _vpaes_schedule_transform # output transform
  396. movdqu %xmm0, (%rdx) # save last key
  397. # cleanup
  398. pxor %xmm0, %xmm0
  399. pxor %xmm1, %xmm1
  400. pxor %xmm2, %xmm2
  401. pxor %xmm3, %xmm3
  402. pxor %xmm4, %xmm4
  403. pxor %xmm5, %xmm5
  404. pxor %xmm6, %xmm6
  405. pxor %xmm7, %xmm7
  406. ret
  407. .size _vpaes_schedule_core,.-_vpaes_schedule_core
  408. ##
  409. ## .aes_schedule_192_smear
  410. ##
  411. ## Smear the short, low side in the 192-bit key schedule.
  412. ##
  413. ## Inputs:
  414. ## %xmm7: high side, b a x y
  415. ## %xmm6: low side, d c 0 0
  416. ## %xmm13: 0
  417. ##
  418. ## Outputs:
  419. ## %xmm6: b+c+d b+c 0 0
  420. ## %xmm0: b+c+d b+c b a
  421. ##
  422. .type _vpaes_schedule_192_smear,\@abi-omnipotent
  423. .align 16
  424. _vpaes_schedule_192_smear:
  425. pshufd \$0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0
  426. pshufd \$0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
  427. pxor %xmm1, %xmm6 # -> c+d c 0 0
  428. pxor %xmm1, %xmm1
  429. pxor %xmm0, %xmm6 # -> b+c+d b+c b a
  430. movdqa %xmm6, %xmm0
  431. movhlps %xmm1, %xmm6 # clobber low side with zeros
  432. ret
  433. .size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
  434. ##
  435. ## .aes_schedule_round
  436. ##
  437. ## Runs one main round of the key schedule on %xmm0, %xmm7
  438. ##
  439. ## Specifically, runs subbytes on the high dword of %xmm0
  440. ## then rotates it by one byte and xors into the low dword of
  441. ## %xmm7.
  442. ##
  443. ## Adds rcon from low byte of %xmm8, then rotates %xmm8 for
  444. ## next rcon.
  445. ##
  446. ## Smears the dwords of %xmm7 by xoring the low into the
  447. ## second low, result into third, result into highest.
  448. ##
  449. ## Returns results in %xmm7 = %xmm0.
  450. ## Clobbers %xmm1-%xmm4, %r11.
  451. ##
  452. .type _vpaes_schedule_round,\@abi-omnipotent
  453. .align 16
  454. _vpaes_schedule_round:
  455. # extract rcon from xmm8
  456. pxor %xmm1, %xmm1
  457. palignr \$15, %xmm8, %xmm1
  458. palignr \$15, %xmm8, %xmm8
  459. pxor %xmm1, %xmm7
  460. # rotate
  461. pshufd \$0xFF, %xmm0, %xmm0
  462. palignr \$1, %xmm0, %xmm0
  463. # fall through...
  464. # low round: same as high round, but no rotation and no rcon.
  465. _vpaes_schedule_low_round:
  466. # smear xmm7
  467. movdqa %xmm7, %xmm1
  468. pslldq \$4, %xmm7
  469. pxor %xmm1, %xmm7
  470. movdqa %xmm7, %xmm1
  471. pslldq \$8, %xmm7
  472. pxor %xmm1, %xmm7
  473. pxor .Lk_s63(%rip), %xmm7
  474. # subbytes
  475. movdqa %xmm9, %xmm1
  476. pandn %xmm0, %xmm1
  477. psrld \$4, %xmm1 # 1 = i
  478. pand %xmm9, %xmm0 # 0 = k
  479. movdqa %xmm11, %xmm2 # 2 : a/k
  480. pshufb %xmm0, %xmm2 # 2 = a/k
  481. pxor %xmm1, %xmm0 # 0 = j
  482. movdqa %xmm10, %xmm3 # 3 : 1/i
  483. pshufb %xmm1, %xmm3 # 3 = 1/i
  484. pxor %xmm2, %xmm3 # 3 = iak = 1/i + a/k
  485. movdqa %xmm10, %xmm4 # 4 : 1/j
  486. pshufb %xmm0, %xmm4 # 4 = 1/j
  487. pxor %xmm2, %xmm4 # 4 = jak = 1/j + a/k
  488. movdqa %xmm10, %xmm2 # 2 : 1/iak
  489. pshufb %xmm3, %xmm2 # 2 = 1/iak
  490. pxor %xmm0, %xmm2 # 2 = io
  491. movdqa %xmm10, %xmm3 # 3 : 1/jak
  492. pshufb %xmm4, %xmm3 # 3 = 1/jak
  493. pxor %xmm1, %xmm3 # 3 = jo
  494. movdqa %xmm13, %xmm4 # 4 : sbou
  495. pshufb %xmm2, %xmm4 # 4 = sbou
  496. movdqa %xmm12, %xmm0 # 0 : sbot
  497. pshufb %xmm3, %xmm0 # 0 = sb1t
  498. pxor %xmm4, %xmm0 # 0 = sbox output
  499. # add in smeared stuff
  500. pxor %xmm7, %xmm0
  501. movdqa %xmm0, %xmm7
  502. ret
  503. .size _vpaes_schedule_round,.-_vpaes_schedule_round
  504. ##
  505. ## .aes_schedule_transform
  506. ##
  507. ## Linear-transform %xmm0 according to tables at (%r11)
  508. ##
  509. ## Requires that %xmm9 = 0x0F0F... as in preheat
  510. ## Output in %xmm0
  511. ## Clobbers %xmm1, %xmm2
  512. ##
  513. .type _vpaes_schedule_transform,\@abi-omnipotent
  514. .align 16
  515. _vpaes_schedule_transform:
  516. movdqa %xmm9, %xmm1
  517. pandn %xmm0, %xmm1
  518. psrld \$4, %xmm1
  519. pand %xmm9, %xmm0
  520. movdqa (%r11), %xmm2 # lo
  521. pshufb %xmm0, %xmm2
  522. movdqa 16(%r11), %xmm0 # hi
  523. pshufb %xmm1, %xmm0
  524. pxor %xmm2, %xmm0
  525. ret
  526. .size _vpaes_schedule_transform,.-_vpaes_schedule_transform
  527. ##
  528. ## .aes_schedule_mangle
  529. ##
  530. ## Mangle xmm0 from (basis-transformed) standard version
  531. ## to our version.
  532. ##
  533. ## On encrypt,
  534. ## xor with 0x63
  535. ## multiply by circulant 0,1,1,1
  536. ## apply shiftrows transform
  537. ##
  538. ## On decrypt,
  539. ## xor with 0x63
  540. ## multiply by "inverse mixcolumns" circulant E,B,D,9
  541. ## deskew
  542. ## apply shiftrows transform
  543. ##
  544. ##
  545. ## Writes out to (%rdx), and increments or decrements it
  546. ## Keeps track of round number mod 4 in %r8
  547. ## Preserves xmm0
  548. ## Clobbers xmm1-xmm5
  549. ##
  550. .type _vpaes_schedule_mangle,\@abi-omnipotent
  551. .align 16
  552. _vpaes_schedule_mangle:
  553. movdqa %xmm0, %xmm4 # save xmm0 for later
  554. movdqa .Lk_mc_forward(%rip),%xmm5
  555. test %rcx, %rcx
  556. jnz .Lschedule_mangle_dec
  557. # encrypting
  558. add \$16, %rdx
  559. pxor .Lk_s63(%rip),%xmm4
  560. pshufb %xmm5, %xmm4
  561. movdqa %xmm4, %xmm3
  562. pshufb %xmm5, %xmm4
  563. pxor %xmm4, %xmm3
  564. pshufb %xmm5, %xmm4
  565. pxor %xmm4, %xmm3
  566. jmp .Lschedule_mangle_both
  567. .align 16
  568. .Lschedule_mangle_dec:
  569. # inverse mix columns
  570. lea .Lk_dksd(%rip),%r11
  571. movdqa %xmm9, %xmm1
  572. pandn %xmm4, %xmm1
  573. psrld \$4, %xmm1 # 1 = hi
  574. pand %xmm9, %xmm4 # 4 = lo
  575. movdqa 0x00(%r11), %xmm2
  576. pshufb %xmm4, %xmm2
  577. movdqa 0x10(%r11), %xmm3
  578. pshufb %xmm1, %xmm3
  579. pxor %xmm2, %xmm3
  580. pshufb %xmm5, %xmm3
  581. movdqa 0x20(%r11), %xmm2
  582. pshufb %xmm4, %xmm2
  583. pxor %xmm3, %xmm2
  584. movdqa 0x30(%r11), %xmm3
  585. pshufb %xmm1, %xmm3
  586. pxor %xmm2, %xmm3
  587. pshufb %xmm5, %xmm3
  588. movdqa 0x40(%r11), %xmm2
  589. pshufb %xmm4, %xmm2
  590. pxor %xmm3, %xmm2
  591. movdqa 0x50(%r11), %xmm3
  592. pshufb %xmm1, %xmm3
  593. pxor %xmm2, %xmm3
  594. pshufb %xmm5, %xmm3
  595. movdqa 0x60(%r11), %xmm2
  596. pshufb %xmm4, %xmm2
  597. pxor %xmm3, %xmm2
  598. movdqa 0x70(%r11), %xmm3
  599. pshufb %xmm1, %xmm3
  600. pxor %xmm2, %xmm3
  601. add \$-16, %rdx
  602. .Lschedule_mangle_both:
  603. movdqa (%r8,%r10),%xmm1
  604. pshufb %xmm1,%xmm3
  605. add \$-16, %r8
  606. and \$0x30, %r8
  607. movdqu %xmm3, (%rdx)
  608. ret
  609. .size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
  610. #
  611. # Interface to OpenSSL
  612. #
  613. .globl ${PREFIX}_set_encrypt_key
  614. .type ${PREFIX}_set_encrypt_key,\@function,3
  615. .align 16
  616. ${PREFIX}_set_encrypt_key:
  617. ___
  618. $code.=<<___ if ($win64);
  619. lea -0xb8(%rsp),%rsp
  620. movaps %xmm6,0x10(%rsp)
  621. movaps %xmm7,0x20(%rsp)
  622. movaps %xmm8,0x30(%rsp)
  623. movaps %xmm9,0x40(%rsp)
  624. movaps %xmm10,0x50(%rsp)
  625. movaps %xmm11,0x60(%rsp)
  626. movaps %xmm12,0x70(%rsp)
  627. movaps %xmm13,0x80(%rsp)
  628. movaps %xmm14,0x90(%rsp)
  629. movaps %xmm15,0xa0(%rsp)
  630. .Lenc_key_body:
  631. ___
  632. $code.=<<___;
  633. mov %esi,%eax
  634. shr \$5,%eax
  635. add \$5,%eax
  636. mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
  637. mov \$0,%ecx
  638. mov \$0x30,%r8d
  639. call _vpaes_schedule_core
  640. ___
  641. $code.=<<___ if ($win64);
  642. movaps 0x10(%rsp),%xmm6
  643. movaps 0x20(%rsp),%xmm7
  644. movaps 0x30(%rsp),%xmm8
  645. movaps 0x40(%rsp),%xmm9
  646. movaps 0x50(%rsp),%xmm10
  647. movaps 0x60(%rsp),%xmm11
  648. movaps 0x70(%rsp),%xmm12
  649. movaps 0x80(%rsp),%xmm13
  650. movaps 0x90(%rsp),%xmm14
  651. movaps 0xa0(%rsp),%xmm15
  652. lea 0xb8(%rsp),%rsp
  653. .Lenc_key_epilogue:
  654. ___
  655. $code.=<<___;
  656. xor %eax,%eax
  657. ret
  658. .size ${PREFIX}_set_encrypt_key,.-${PREFIX}_set_encrypt_key
  659. .globl ${PREFIX}_set_decrypt_key
  660. .type ${PREFIX}_set_decrypt_key,\@function,3
  661. .align 16
  662. ${PREFIX}_set_decrypt_key:
  663. ___
  664. $code.=<<___ if ($win64);
  665. lea -0xb8(%rsp),%rsp
  666. movaps %xmm6,0x10(%rsp)
  667. movaps %xmm7,0x20(%rsp)
  668. movaps %xmm8,0x30(%rsp)
  669. movaps %xmm9,0x40(%rsp)
  670. movaps %xmm10,0x50(%rsp)
  671. movaps %xmm11,0x60(%rsp)
  672. movaps %xmm12,0x70(%rsp)
  673. movaps %xmm13,0x80(%rsp)
  674. movaps %xmm14,0x90(%rsp)
  675. movaps %xmm15,0xa0(%rsp)
  676. .Ldec_key_body:
  677. ___
  678. $code.=<<___;
  679. mov %esi,%eax
  680. shr \$5,%eax
  681. add \$5,%eax
  682. mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
  683. shl \$4,%eax
  684. lea 16(%rdx,%rax),%rdx
  685. mov \$1,%ecx
  686. mov %esi,%r8d
  687. shr \$1,%r8d
  688. and \$32,%r8d
  689. xor \$32,%r8d # nbits==192?0:32
  690. call _vpaes_schedule_core
  691. ___
  692. $code.=<<___ if ($win64);
  693. movaps 0x10(%rsp),%xmm6
  694. movaps 0x20(%rsp),%xmm7
  695. movaps 0x30(%rsp),%xmm8
  696. movaps 0x40(%rsp),%xmm9
  697. movaps 0x50(%rsp),%xmm10
  698. movaps 0x60(%rsp),%xmm11
  699. movaps 0x70(%rsp),%xmm12
  700. movaps 0x80(%rsp),%xmm13
  701. movaps 0x90(%rsp),%xmm14
  702. movaps 0xa0(%rsp),%xmm15
  703. lea 0xb8(%rsp),%rsp
  704. .Ldec_key_epilogue:
  705. ___
  706. $code.=<<___;
  707. xor %eax,%eax
  708. ret
  709. .size ${PREFIX}_set_decrypt_key,.-${PREFIX}_set_decrypt_key
  710. .globl ${PREFIX}_encrypt
  711. .type ${PREFIX}_encrypt,\@function,3
  712. .align 16
  713. ${PREFIX}_encrypt:
  714. ___
  715. $code.=<<___ if ($win64);
  716. lea -0xb8(%rsp),%rsp
  717. movaps %xmm6,0x10(%rsp)
  718. movaps %xmm7,0x20(%rsp)
  719. movaps %xmm8,0x30(%rsp)
  720. movaps %xmm9,0x40(%rsp)
  721. movaps %xmm10,0x50(%rsp)
  722. movaps %xmm11,0x60(%rsp)
  723. movaps %xmm12,0x70(%rsp)
  724. movaps %xmm13,0x80(%rsp)
  725. movaps %xmm14,0x90(%rsp)
  726. movaps %xmm15,0xa0(%rsp)
  727. .Lenc_body:
  728. ___
  729. $code.=<<___;
  730. movdqu (%rdi),%xmm0
  731. call _vpaes_preheat
  732. call _vpaes_encrypt_core
  733. movdqu %xmm0,(%rsi)
  734. ___
  735. $code.=<<___ if ($win64);
  736. movaps 0x10(%rsp),%xmm6
  737. movaps 0x20(%rsp),%xmm7
  738. movaps 0x30(%rsp),%xmm8
  739. movaps 0x40(%rsp),%xmm9
  740. movaps 0x50(%rsp),%xmm10
  741. movaps 0x60(%rsp),%xmm11
  742. movaps 0x70(%rsp),%xmm12
  743. movaps 0x80(%rsp),%xmm13
  744. movaps 0x90(%rsp),%xmm14
  745. movaps 0xa0(%rsp),%xmm15
  746. lea 0xb8(%rsp),%rsp
  747. .Lenc_epilogue:
  748. ___
  749. $code.=<<___;
  750. ret
  751. .size ${PREFIX}_encrypt,.-${PREFIX}_encrypt
  752. .globl ${PREFIX}_decrypt
  753. .type ${PREFIX}_decrypt,\@function,3
  754. .align 16
  755. ${PREFIX}_decrypt:
  756. ___
  757. $code.=<<___ if ($win64);
  758. lea -0xb8(%rsp),%rsp
  759. movaps %xmm6,0x10(%rsp)
  760. movaps %xmm7,0x20(%rsp)
  761. movaps %xmm8,0x30(%rsp)
  762. movaps %xmm9,0x40(%rsp)
  763. movaps %xmm10,0x50(%rsp)
  764. movaps %xmm11,0x60(%rsp)
  765. movaps %xmm12,0x70(%rsp)
  766. movaps %xmm13,0x80(%rsp)
  767. movaps %xmm14,0x90(%rsp)
  768. movaps %xmm15,0xa0(%rsp)
  769. .Ldec_body:
  770. ___
  771. $code.=<<___;
  772. movdqu (%rdi),%xmm0
  773. call _vpaes_preheat
  774. call _vpaes_decrypt_core
  775. movdqu %xmm0,(%rsi)
  776. ___
  777. $code.=<<___ if ($win64);
  778. movaps 0x10(%rsp),%xmm6
  779. movaps 0x20(%rsp),%xmm7
  780. movaps 0x30(%rsp),%xmm8
  781. movaps 0x40(%rsp),%xmm9
  782. movaps 0x50(%rsp),%xmm10
  783. movaps 0x60(%rsp),%xmm11
  784. movaps 0x70(%rsp),%xmm12
  785. movaps 0x80(%rsp),%xmm13
  786. movaps 0x90(%rsp),%xmm14
  787. movaps 0xa0(%rsp),%xmm15
  788. lea 0xb8(%rsp),%rsp
  789. .Ldec_epilogue:
  790. ___
  791. $code.=<<___;
  792. ret
  793. .size ${PREFIX}_decrypt,.-${PREFIX}_decrypt
  794. ___
  795. {
  796. my ($inp,$out,$len,$key,$ivp,$enc)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9");
  797. # void AES_cbc_encrypt (const void char *inp, unsigned char *out,
  798. # size_t length, const AES_KEY *key,
  799. # unsigned char *ivp,const int enc);
  800. $code.=<<___;
  801. .globl ${PREFIX}_cbc_encrypt
  802. .type ${PREFIX}_cbc_encrypt,\@function,6
  803. .align 16
  804. ${PREFIX}_cbc_encrypt:
  805. xchg $key,$len
  806. ___
  807. ($len,$key)=($key,$len);
  808. $code.=<<___;
  809. sub \$16,$len
  810. jc .Lcbc_abort
  811. ___
  812. $code.=<<___ if ($win64);
  813. lea -0xb8(%rsp),%rsp
  814. movaps %xmm6,0x10(%rsp)
  815. movaps %xmm7,0x20(%rsp)
  816. movaps %xmm8,0x30(%rsp)
  817. movaps %xmm9,0x40(%rsp)
  818. movaps %xmm10,0x50(%rsp)
  819. movaps %xmm11,0x60(%rsp)
  820. movaps %xmm12,0x70(%rsp)
  821. movaps %xmm13,0x80(%rsp)
  822. movaps %xmm14,0x90(%rsp)
  823. movaps %xmm15,0xa0(%rsp)
  824. .Lcbc_body:
  825. ___
  826. $code.=<<___;
  827. movdqu ($ivp),%xmm6 # load IV
  828. sub $inp,$out
  829. call _vpaes_preheat
  830. cmp \$0,${enc}d
  831. je .Lcbc_dec_loop
  832. jmp .Lcbc_enc_loop
  833. .align 16
  834. .Lcbc_enc_loop:
  835. movdqu ($inp),%xmm0
  836. pxor %xmm6,%xmm0
  837. call _vpaes_encrypt_core
  838. movdqa %xmm0,%xmm6
  839. movdqu %xmm0,($out,$inp)
  840. lea 16($inp),$inp
  841. sub \$16,$len
  842. jnc .Lcbc_enc_loop
  843. jmp .Lcbc_done
  844. .align 16
  845. .Lcbc_dec_loop:
  846. movdqu ($inp),%xmm0
  847. movdqa %xmm0,%xmm7
  848. call _vpaes_decrypt_core
  849. pxor %xmm6,%xmm0
  850. movdqa %xmm7,%xmm6
  851. movdqu %xmm0,($out,$inp)
  852. lea 16($inp),$inp
  853. sub \$16,$len
  854. jnc .Lcbc_dec_loop
  855. .Lcbc_done:
  856. movdqu %xmm6,($ivp) # save IV
  857. ___
  858. $code.=<<___ if ($win64);
  859. movaps 0x10(%rsp),%xmm6
  860. movaps 0x20(%rsp),%xmm7
  861. movaps 0x30(%rsp),%xmm8
  862. movaps 0x40(%rsp),%xmm9
  863. movaps 0x50(%rsp),%xmm10
  864. movaps 0x60(%rsp),%xmm11
  865. movaps 0x70(%rsp),%xmm12
  866. movaps 0x80(%rsp),%xmm13
  867. movaps 0x90(%rsp),%xmm14
  868. movaps 0xa0(%rsp),%xmm15
  869. lea 0xb8(%rsp),%rsp
  870. .Lcbc_epilogue:
  871. ___
  872. $code.=<<___;
  873. .Lcbc_abort:
  874. ret
  875. .size ${PREFIX}_cbc_encrypt,.-${PREFIX}_cbc_encrypt
  876. ___
  877. }
  878. $code.=<<___;
  879. ##
  880. ## _aes_preheat
  881. ##
  882. ## Fills register %r10 -> .aes_consts (so you can -fPIC)
  883. ## and %xmm9-%xmm15 as specified below.
  884. ##
  885. .type _vpaes_preheat,\@abi-omnipotent
  886. .align 16
  887. _vpaes_preheat:
  888. lea .Lk_s0F(%rip), %r10
  889. movdqa -0x20(%r10), %xmm10 # .Lk_inv
  890. movdqa -0x10(%r10), %xmm11 # .Lk_inv+16
  891. movdqa 0x00(%r10), %xmm9 # .Lk_s0F
  892. movdqa 0x30(%r10), %xmm13 # .Lk_sb1
  893. movdqa 0x40(%r10), %xmm12 # .Lk_sb1+16
  894. movdqa 0x50(%r10), %xmm15 # .Lk_sb2
  895. movdqa 0x60(%r10), %xmm14 # .Lk_sb2+16
  896. ret
  897. .size _vpaes_preheat,.-_vpaes_preheat
  898. ########################################################
  899. ## ##
  900. ## Constants ##
  901. ## ##
  902. ########################################################
  903. .type _vpaes_consts,\@object
  904. .align 64
  905. _vpaes_consts:
  906. .Lk_inv: # inv, inva
  907. .quad 0x0E05060F0D080180, 0x040703090A0B0C02
  908. .quad 0x01040A060F0B0780, 0x030D0E0C02050809
  909. .Lk_s0F: # s0F
  910. .quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F
  911. .Lk_ipt: # input transform (lo, hi)
  912. .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
  913. .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
  914. .Lk_sb1: # sb1u, sb1t
  915. .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
  916. .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
  917. .Lk_sb2: # sb2u, sb2t
  918. .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
  919. .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
  920. .Lk_sbo: # sbou, sbot
  921. .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
  922. .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
  923. .Lk_mc_forward: # mc_forward
  924. .quad 0x0407060500030201, 0x0C0F0E0D080B0A09
  925. .quad 0x080B0A0904070605, 0x000302010C0F0E0D
  926. .quad 0x0C0F0E0D080B0A09, 0x0407060500030201
  927. .quad 0x000302010C0F0E0D, 0x080B0A0904070605
  928. .Lk_mc_backward:# mc_backward
  929. .quad 0x0605040702010003, 0x0E0D0C0F0A09080B
  930. .quad 0x020100030E0D0C0F, 0x0A09080B06050407
  931. .quad 0x0E0D0C0F0A09080B, 0x0605040702010003
  932. .quad 0x0A09080B06050407, 0x020100030E0D0C0F
  933. .Lk_sr: # sr
  934. .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
  935. .quad 0x030E09040F0A0500, 0x0B06010C07020D08
  936. .quad 0x0F060D040B020900, 0x070E050C030A0108
  937. .quad 0x0B0E0104070A0D00, 0x0306090C0F020508
  938. .Lk_rcon: # rcon
  939. .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
  940. .Lk_s63: # s63: all equal to 0x63 transformed
  941. .quad 0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B
  942. .Lk_opt: # output transform
  943. .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
  944. .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
  945. .Lk_deskew: # deskew tables: inverts the sbox's "skew"
  946. .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
  947. .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
  948. ##
  949. ## Decryption stuff
  950. ## Key schedule constants
  951. ##
  952. .Lk_dksd: # decryption key schedule: invskew x*D
  953. .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
  954. .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
  955. .Lk_dksb: # decryption key schedule: invskew x*B
  956. .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
  957. .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
  958. .Lk_dkse: # decryption key schedule: invskew x*E + 0x63
  959. .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
  960. .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
  961. .Lk_dks9: # decryption key schedule: invskew x*9
  962. .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
  963. .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
  964. ##
  965. ## Decryption stuff
  966. ## Round function constants
  967. ##
  968. .Lk_dipt: # decryption input transform
  969. .quad 0x0F505B040B545F00, 0x154A411E114E451A
  970. .quad 0x86E383E660056500, 0x12771772F491F194
  971. .Lk_dsb9: # decryption sbox output *9*u, *9*t
  972. .quad 0x851C03539A86D600, 0xCAD51F504F994CC9
  973. .quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565
  974. .Lk_dsbd: # decryption sbox output *D*u, *D*t
  975. .quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
  976. .quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
  977. .Lk_dsbb: # decryption sbox output *B*u, *B*t
  978. .quad 0xD022649296B44200, 0x602646F6B0F2D404
  979. .quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
  980. .Lk_dsbe: # decryption sbox output *E*u, *E*t
  981. .quad 0x46F2929626D4D000, 0x2242600464B4F6B0
  982. .quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32
  983. .Lk_dsbo: # decryption sbox final output
  984. .quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
  985. .quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C
  986. .asciz "Vector Permutation AES for x86_64/SSSE3, Mike Hamburg (Stanford University)"
  987. .align 64
  988. .size _vpaes_consts,.-_vpaes_consts
  989. ___
  990. if ($win64) {
  991. # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
  992. # CONTEXT *context,DISPATCHER_CONTEXT *disp)
  993. $rec="%rcx";
  994. $frame="%rdx";
  995. $context="%r8";
  996. $disp="%r9";
  997. $code.=<<___;
  998. .extern __imp_RtlVirtualUnwind
  999. .type se_handler,\@abi-omnipotent
  1000. .align 16
  1001. se_handler:
  1002. push %rsi
  1003. push %rdi
  1004. push %rbx
  1005. push %rbp
  1006. push %r12
  1007. push %r13
  1008. push %r14
  1009. push %r15
  1010. pushfq
  1011. sub \$64,%rsp
  1012. mov 120($context),%rax # pull context->Rax
  1013. mov 248($context),%rbx # pull context->Rip
  1014. mov 8($disp),%rsi # disp->ImageBase
  1015. mov 56($disp),%r11 # disp->HandlerData
  1016. mov 0(%r11),%r10d # HandlerData[0]
  1017. lea (%rsi,%r10),%r10 # prologue label
  1018. cmp %r10,%rbx # context->Rip<prologue label
  1019. jb .Lin_prologue
  1020. mov 152($context),%rax # pull context->Rsp
  1021. mov 4(%r11),%r10d # HandlerData[1]
  1022. lea (%rsi,%r10),%r10 # epilogue label
  1023. cmp %r10,%rbx # context->Rip>=epilogue label
  1024. jae .Lin_prologue
  1025. lea 16(%rax),%rsi # %xmm save area
  1026. lea 512($context),%rdi # &context.Xmm6
  1027. mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
  1028. .long 0xa548f3fc # cld; rep movsq
  1029. lea 0xb8(%rax),%rax # adjust stack pointer
  1030. .Lin_prologue:
  1031. mov 8(%rax),%rdi
  1032. mov 16(%rax),%rsi
  1033. mov %rax,152($context) # restore context->Rsp
  1034. mov %rsi,168($context) # restore context->Rsi
  1035. mov %rdi,176($context) # restore context->Rdi
  1036. mov 40($disp),%rdi # disp->ContextRecord
  1037. mov $context,%rsi # context
  1038. mov \$`1232/8`,%ecx # sizeof(CONTEXT)
  1039. .long 0xa548f3fc # cld; rep movsq
  1040. mov $disp,%rsi
  1041. xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
  1042. mov 8(%rsi),%rdx # arg2, disp->ImageBase
  1043. mov 0(%rsi),%r8 # arg3, disp->ControlPc
  1044. mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
  1045. mov 40(%rsi),%r10 # disp->ContextRecord
  1046. lea 56(%rsi),%r11 # &disp->HandlerData
  1047. lea 24(%rsi),%r12 # &disp->EstablisherFrame
  1048. mov %r10,32(%rsp) # arg5
  1049. mov %r11,40(%rsp) # arg6
  1050. mov %r12,48(%rsp) # arg7
  1051. mov %rcx,56(%rsp) # arg8, (NULL)
  1052. call *__imp_RtlVirtualUnwind(%rip)
  1053. mov \$1,%eax # ExceptionContinueSearch
  1054. add \$64,%rsp
  1055. popfq
  1056. pop %r15
  1057. pop %r14
  1058. pop %r13
  1059. pop %r12
  1060. pop %rbp
  1061. pop %rbx
  1062. pop %rdi
  1063. pop %rsi
  1064. ret
  1065. .size se_handler,.-se_handler
  1066. .section .pdata
  1067. .align 4
  1068. .rva .LSEH_begin_${PREFIX}_set_encrypt_key
  1069. .rva .LSEH_end_${PREFIX}_set_encrypt_key
  1070. .rva .LSEH_info_${PREFIX}_set_encrypt_key
  1071. .rva .LSEH_begin_${PREFIX}_set_decrypt_key
  1072. .rva .LSEH_end_${PREFIX}_set_decrypt_key
  1073. .rva .LSEH_info_${PREFIX}_set_decrypt_key
  1074. .rva .LSEH_begin_${PREFIX}_encrypt
  1075. .rva .LSEH_end_${PREFIX}_encrypt
  1076. .rva .LSEH_info_${PREFIX}_encrypt
  1077. .rva .LSEH_begin_${PREFIX}_decrypt
  1078. .rva .LSEH_end_${PREFIX}_decrypt
  1079. .rva .LSEH_info_${PREFIX}_decrypt
  1080. .rva .LSEH_begin_${PREFIX}_cbc_encrypt
  1081. .rva .LSEH_end_${PREFIX}_cbc_encrypt
  1082. .rva .LSEH_info_${PREFIX}_cbc_encrypt
  1083. .section .xdata
  1084. .align 8
  1085. .LSEH_info_${PREFIX}_set_encrypt_key:
  1086. .byte 9,0,0,0
  1087. .rva se_handler
  1088. .rva .Lenc_key_body,.Lenc_key_epilogue # HandlerData[]
  1089. .LSEH_info_${PREFIX}_set_decrypt_key:
  1090. .byte 9,0,0,0
  1091. .rva se_handler
  1092. .rva .Ldec_key_body,.Ldec_key_epilogue # HandlerData[]
  1093. .LSEH_info_${PREFIX}_encrypt:
  1094. .byte 9,0,0,0
  1095. .rva se_handler
  1096. .rva .Lenc_body,.Lenc_epilogue # HandlerData[]
  1097. .LSEH_info_${PREFIX}_decrypt:
  1098. .byte 9,0,0,0
  1099. .rva se_handler
  1100. .rva .Ldec_body,.Ldec_epilogue # HandlerData[]
  1101. .LSEH_info_${PREFIX}_cbc_encrypt:
  1102. .byte 9,0,0,0
  1103. .rva se_handler
  1104. .rva .Lcbc_body,.Lcbc_epilogue # HandlerData[]
  1105. ___
  1106. }
  1107. $code =~ s/\`([^\`]*)\`/eval($1)/gem;
  1108. print $code;
  1109. close STDOUT;