Vous ne pouvez pas sélectionner plus de 25 sujets Les noms de sujets doivent commencer par une lettre ou un nombre, peuvent contenir des tirets ('-') et peuvent comporter jusqu'à 35 caractères.
 
 
 
 
 
 

1277 lignes
35 KiB

  1. #!/usr/bin/env perl
  2. #
  3. # ====================================================================
  4. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  5. # project. The module is, however, dual licensed under OpenSSL and
  6. # CRYPTOGAMS licenses depending on where you obtain it. For further
  7. # details see http://www.openssl.org/~appro/cryptogams/.
  8. # ====================================================================
  9. #
  10. # SHA256 block transform for x86. September 2007.
  11. #
  12. # Performance improvement over compiler generated code varies from
  13. # 10% to 40% [see below]. Not very impressive on some µ-archs, but
  14. # it's 5 times smaller and optimizies amount of writes.
  15. #
  16. # May 2012.
  17. #
  18. # Optimization including two of Pavel Semjanov's ideas, alternative
  19. # Maj and full unroll, resulted in ~20-25% improvement on most CPUs,
  20. # ~7% on Pentium, ~40% on Atom. As fully unrolled loop body is almost
  21. # 15x larger, 8KB vs. 560B, it's fired only for longer inputs. But not
  22. # on P4, where it kills performance, nor Sandy Bridge, where folded
  23. # loop is approximately as fast...
  24. #
  25. # June 2012.
  26. #
  27. # Add AMD XOP-specific code path, >30% improvement on Bulldozer over
  28. # May version, >60% over original. Add AVX+shrd code path, >25%
  29. # improvement on Sandy Bridge over May version, 60% over original.
  30. #
  31. # May 2013.
  32. #
  33. # Replace AMD XOP code path with SSSE3 to cover more processors.
  34. # (Biggest improvement coefficient is on upcoming Atom Silvermont,
  35. # not shown.) Add AVX+BMI code path.
  36. #
  37. # March 2014.
  38. #
  39. # Add support for Intel SHA Extensions.
  40. #
  41. # Performance in clock cycles per processed byte (less is better):
  42. #
  43. # gcc icc x86 asm(*) SIMD x86_64 asm(**)
  44. # Pentium 46 57 40/38 - -
  45. # PIII 36 33 27/24 - -
  46. # P4 41 38 28 - 17.3
  47. # AMD K8 27 25 19/15.5 - 14.9
  48. # Core2 26 23 18/15.6 14.3 13.8
  49. # Westmere 27 - 19/15.7 13.4 12.3
  50. # Sandy Bridge 25 - 15.9 12.4 11.6
  51. # Ivy Bridge 24 - 15.0 11.4 10.3
  52. # Haswell 22 - 13.9 9.46 7.80
  53. # Bulldozer 36 - 27/22 17.0 13.6
  54. # VIA Nano 36 - 25/22 16.8 16.5
  55. # Atom 50 - 30/25 21.9 18.9
  56. #
  57. # (*) numbers after slash are for unrolled loop, where applicable;
  58. # (**) x86_64 assembly performance is presented for reference
  59. # purposes, results are best-available;
  60. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  61. push(@INC,"${dir}","${dir}../../perlasm");
  62. require "x86asm.pl";
  63. &asm_init($ARGV[0],"sha512-586.pl",$ARGV[$#ARGV] eq "386");
  64. $xmm=$avx=0;
  65. for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
  66. if ($xmm && `$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
  67. =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
  68. $avx = ($1>=2.19) + ($1>=2.22);
  69. }
  70. if ($xmm && !$avx && $ARGV[0] eq "win32n" &&
  71. `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
  72. $avx = ($1>=2.03) + ($1>=2.10);
  73. }
  74. if ($xmm && !$avx && $ARGV[0] eq "win32" &&
  75. `ml 2>&1` =~ /Version ([0-9]+)\./) {
  76. $avx = ($1>=10) + ($1>=11);
  77. }
  78. $shaext=$xmm; ### set to zero if compiling for 1.0.1
  79. $unroll_after = 64*4; # If pre-evicted from L1P cache first spin of
  80. # fully unrolled loop was measured to run about
  81. # 3-4x slower. If slowdown coefficient is N and
  82. # unrolled loop is m times faster, then you break
  83. # even at (N-1)/(m-1) blocks. Then it needs to be
  84. # adjusted for probability of code being evicted,
  85. # code size/cache size=1/4. Typical m is 1.15...
  86. $A="eax";
  87. $E="edx";
  88. $T="ebx";
  89. $Aoff=&DWP(4,"esp");
  90. $Boff=&DWP(8,"esp");
  91. $Coff=&DWP(12,"esp");
  92. $Doff=&DWP(16,"esp");
  93. $Eoff=&DWP(20,"esp");
  94. $Foff=&DWP(24,"esp");
  95. $Goff=&DWP(28,"esp");
  96. $Hoff=&DWP(32,"esp");
  97. $Xoff=&DWP(36,"esp");
  98. $K256="ebp";
  99. sub BODY_16_63() {
  100. &mov ($T,"ecx"); # "ecx" is preloaded
  101. &mov ("esi",&DWP(4*(9+15+16-14),"esp"));
  102. &ror ("ecx",18-7);
  103. &mov ("edi","esi");
  104. &ror ("esi",19-17);
  105. &xor ("ecx",$T);
  106. &shr ($T,3);
  107. &ror ("ecx",7);
  108. &xor ("esi","edi");
  109. &xor ($T,"ecx"); # T = sigma0(X[-15])
  110. &ror ("esi",17);
  111. &add ($T,&DWP(4*(9+15+16),"esp")); # T += X[-16]
  112. &shr ("edi",10);
  113. &add ($T,&DWP(4*(9+15+16-9),"esp")); # T += X[-7]
  114. #&xor ("edi","esi") # sigma1(X[-2])
  115. # &add ($T,"edi"); # T += sigma1(X[-2])
  116. # &mov (&DWP(4*(9+15),"esp"),$T); # save X[0]
  117. &BODY_00_15(1);
  118. }
  119. sub BODY_00_15() {
  120. my $in_16_63=shift;
  121. &mov ("ecx",$E);
  122. &xor ("edi","esi") if ($in_16_63); # sigma1(X[-2])
  123. &mov ("esi",$Foff);
  124. &ror ("ecx",25-11);
  125. &add ($T,"edi") if ($in_16_63); # T += sigma1(X[-2])
  126. &mov ("edi",$Goff);
  127. &xor ("ecx",$E);
  128. &xor ("esi","edi");
  129. &mov ($T,&DWP(4*(9+15),"esp")) if (!$in_16_63);
  130. &mov (&DWP(4*(9+15),"esp"),$T) if ($in_16_63); # save X[0]
  131. &ror ("ecx",11-6);
  132. &and ("esi",$E);
  133. &mov ($Eoff,$E); # modulo-scheduled
  134. &xor ($E,"ecx");
  135. &add ($T,$Hoff); # T += h
  136. &xor ("esi","edi"); # Ch(e,f,g)
  137. &ror ($E,6); # Sigma1(e)
  138. &mov ("ecx",$A);
  139. &add ($T,"esi"); # T += Ch(e,f,g)
  140. &ror ("ecx",22-13);
  141. &add ($T,$E); # T += Sigma1(e)
  142. &mov ("edi",$Boff);
  143. &xor ("ecx",$A);
  144. &mov ($Aoff,$A); # modulo-scheduled
  145. &lea ("esp",&DWP(-4,"esp"));
  146. &ror ("ecx",13-2);
  147. &mov ("esi",&DWP(0,$K256));
  148. &xor ("ecx",$A);
  149. &mov ($E,$Eoff); # e in next iteration, d in this one
  150. &xor ($A,"edi"); # a ^= b
  151. &ror ("ecx",2); # Sigma0(a)
  152. &add ($T,"esi"); # T+= K[i]
  153. &mov (&DWP(0,"esp"),$A); # (b^c) in next round
  154. &add ($E,$T); # d += T
  155. &and ($A,&DWP(4,"esp")); # a &= (b^c)
  156. &add ($T,"ecx"); # T += Sigma0(a)
  157. &xor ($A,"edi"); # h = Maj(a,b,c) = Ch(a^b,c,b)
  158. &mov ("ecx",&DWP(4*(9+15+16-1),"esp")) if ($in_16_63); # preload T
  159. &add ($K256,4);
  160. &add ($A,$T); # h += T
  161. }
  162. &external_label("OPENSSL_ia32cap_P") if (!$i386);
  163. &function_begin("sha256_block_data_order");
  164. &mov ("esi",wparam(0)); # ctx
  165. &mov ("edi",wparam(1)); # inp
  166. &mov ("eax",wparam(2)); # num
  167. &mov ("ebx","esp"); # saved sp
  168. &call (&label("pic_point")); # make it PIC!
  169. &set_label("pic_point");
  170. &blindpop($K256);
  171. &lea ($K256,&DWP(&label("K256")."-".&label("pic_point"),$K256));
  172. &sub ("esp",16);
  173. &and ("esp",-64);
  174. &shl ("eax",6);
  175. &add ("eax","edi");
  176. &mov (&DWP(0,"esp"),"esi"); # ctx
  177. &mov (&DWP(4,"esp"),"edi"); # inp
  178. &mov (&DWP(8,"esp"),"eax"); # inp+num*128
  179. &mov (&DWP(12,"esp"),"ebx"); # saved sp
  180. if (!$i386 && $xmm) {
  181. &picmeup("edx","OPENSSL_ia32cap_P",$K256,&label("K256"));
  182. &mov ("ecx",&DWP(0,"edx"));
  183. &mov ("ebx",&DWP(4,"edx"));
  184. &test ("ecx",1<<20); # check for P4
  185. &jnz (&label("loop"));
  186. &mov ("edx",&DWP(8,"edx")) if ($xmm);
  187. &test ("ecx",1<<24); # check for FXSR
  188. &jz ($unroll_after?&label("no_xmm"):&label("loop"));
  189. &and ("ecx",1<<30); # mask "Intel CPU" bit
  190. &and ("ebx",1<<28|1<<9); # mask AVX and SSSE3 bits
  191. &test ("edx",1<<29) if ($shaext); # check for SHA
  192. &jnz (&label("shaext")) if ($shaext);
  193. &or ("ecx","ebx");
  194. &and ("ecx",1<<28|1<<30);
  195. &cmp ("ecx",1<<28|1<<30);
  196. if ($xmm) {
  197. &je (&label("AVX")) if ($avx);
  198. &test ("ebx",1<<9); # check for SSSE3
  199. &jnz (&label("SSSE3"));
  200. } else {
  201. &je (&label("loop_shrd"));
  202. }
  203. if ($unroll_after) {
  204. &set_label("no_xmm");
  205. &sub ("eax","edi");
  206. &cmp ("eax",$unroll_after);
  207. &jae (&label("unrolled"));
  208. } }
  209. &jmp (&label("loop"));
  210. sub COMPACT_LOOP() {
  211. my $suffix=shift;
  212. &set_label("loop$suffix",$suffix?32:16);
  213. # copy input block to stack reversing byte and dword order
  214. for($i=0;$i<4;$i++) {
  215. &mov ("eax",&DWP($i*16+0,"edi"));
  216. &mov ("ebx",&DWP($i*16+4,"edi"));
  217. &mov ("ecx",&DWP($i*16+8,"edi"));
  218. &bswap ("eax");
  219. &mov ("edx",&DWP($i*16+12,"edi"));
  220. &bswap ("ebx");
  221. &push ("eax");
  222. &bswap ("ecx");
  223. &push ("ebx");
  224. &bswap ("edx");
  225. &push ("ecx");
  226. &push ("edx");
  227. }
  228. &add ("edi",64);
  229. &lea ("esp",&DWP(-4*9,"esp"));# place for A,B,C,D,E,F,G,H
  230. &mov (&DWP(4*(9+16)+4,"esp"),"edi");
  231. # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
  232. &mov ($A,&DWP(0,"esi"));
  233. &mov ("ebx",&DWP(4,"esi"));
  234. &mov ("ecx",&DWP(8,"esi"));
  235. &mov ("edi",&DWP(12,"esi"));
  236. # &mov ($Aoff,$A);
  237. &mov ($Boff,"ebx");
  238. &xor ("ebx","ecx");
  239. &mov ($Coff,"ecx");
  240. &mov ($Doff,"edi");
  241. &mov (&DWP(0,"esp"),"ebx"); # magic
  242. &mov ($E,&DWP(16,"esi"));
  243. &mov ("ebx",&DWP(20,"esi"));
  244. &mov ("ecx",&DWP(24,"esi"));
  245. &mov ("edi",&DWP(28,"esi"));
  246. # &mov ($Eoff,$E);
  247. &mov ($Foff,"ebx");
  248. &mov ($Goff,"ecx");
  249. &mov ($Hoff,"edi");
  250. &set_label("00_15$suffix",16);
  251. &BODY_00_15();
  252. &cmp ("esi",0xc19bf174);
  253. &jne (&label("00_15$suffix"));
  254. &mov ("ecx",&DWP(4*(9+15+16-1),"esp")); # preloaded in BODY_00_15(1)
  255. &jmp (&label("16_63$suffix"));
  256. &set_label("16_63$suffix",16);
  257. &BODY_16_63();
  258. &cmp ("esi",0xc67178f2);
  259. &jne (&label("16_63$suffix"));
  260. &mov ("esi",&DWP(4*(9+16+64)+0,"esp"));#ctx
  261. # &mov ($A,$Aoff);
  262. &mov ("ebx",$Boff);
  263. # &mov ("edi",$Coff);
  264. &mov ("ecx",$Doff);
  265. &add ($A,&DWP(0,"esi"));
  266. &add ("ebx",&DWP(4,"esi"));
  267. &add ("edi",&DWP(8,"esi"));
  268. &add ("ecx",&DWP(12,"esi"));
  269. &mov (&DWP(0,"esi"),$A);
  270. &mov (&DWP(4,"esi"),"ebx");
  271. &mov (&DWP(8,"esi"),"edi");
  272. &mov (&DWP(12,"esi"),"ecx");
  273. # &mov ($E,$Eoff);
  274. &mov ("eax",$Foff);
  275. &mov ("ebx",$Goff);
  276. &mov ("ecx",$Hoff);
  277. &mov ("edi",&DWP(4*(9+16+64)+4,"esp"));#inp
  278. &add ($E,&DWP(16,"esi"));
  279. &add ("eax",&DWP(20,"esi"));
  280. &add ("ebx",&DWP(24,"esi"));
  281. &add ("ecx",&DWP(28,"esi"));
  282. &mov (&DWP(16,"esi"),$E);
  283. &mov (&DWP(20,"esi"),"eax");
  284. &mov (&DWP(24,"esi"),"ebx");
  285. &mov (&DWP(28,"esi"),"ecx");
  286. &lea ("esp",&DWP(4*(9+16+64),"esp"));# destroy frame
  287. &sub ($K256,4*64); # rewind K
  288. &cmp ("edi",&DWP(8,"esp")); # are we done yet?
  289. &jb (&label("loop$suffix"));
  290. }
  291. &COMPACT_LOOP();
  292. &mov ("esp",&DWP(12,"esp")); # restore sp
  293. &function_end_A();
  294. if (!$i386 && !$xmm) {
  295. # ~20% improvement on Sandy Bridge
  296. local *ror = sub { &shrd(@_[0],@_) };
  297. &COMPACT_LOOP("_shrd");
  298. &mov ("esp",&DWP(12,"esp")); # restore sp
  299. &function_end_A();
  300. }
  301. &set_label("K256",64); # Yes! I keep it in the code segment!
  302. @K256=( 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,
  303. 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
  304. 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,
  305. 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
  306. 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,
  307. 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
  308. 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,
  309. 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
  310. 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,
  311. 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
  312. 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,
  313. 0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
  314. 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,
  315. 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
  316. 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,
  317. 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 );
  318. &data_word(@K256);
  319. &data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f); # byte swap mask
  320. &asciz("SHA256 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
  321. ($a,$b,$c,$d,$e,$f,$g,$h)=(0..7); # offsets
  322. sub off { &DWP(4*(((shift)-$i)&7),"esp"); }
  323. if (!$i386 && $unroll_after) {
  324. my @AH=($A,$K256);
  325. &set_label("unrolled",16);
  326. &lea ("esp",&DWP(-96,"esp"));
  327. # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
  328. &mov ($AH[0],&DWP(0,"esi"));
  329. &mov ($AH[1],&DWP(4,"esi"));
  330. &mov ("ecx",&DWP(8,"esi"));
  331. &mov ("ebx",&DWP(12,"esi"));
  332. #&mov (&DWP(0,"esp"),$AH[0]);
  333. &mov (&DWP(4,"esp"),$AH[1]);
  334. &xor ($AH[1],"ecx"); # magic
  335. &mov (&DWP(8,"esp"),"ecx");
  336. &mov (&DWP(12,"esp"),"ebx");
  337. &mov ($E,&DWP(16,"esi"));
  338. &mov ("ebx",&DWP(20,"esi"));
  339. &mov ("ecx",&DWP(24,"esi"));
  340. &mov ("esi",&DWP(28,"esi"));
  341. #&mov (&DWP(16,"esp"),$E);
  342. &mov (&DWP(20,"esp"),"ebx");
  343. &mov (&DWP(24,"esp"),"ecx");
  344. &mov (&DWP(28,"esp"),"esi");
  345. &jmp (&label("grand_loop"));
  346. &set_label("grand_loop",16);
  347. # copy input block to stack reversing byte order
  348. for($i=0;$i<5;$i++) {
  349. &mov ("ebx",&DWP(12*$i+0,"edi"));
  350. &mov ("ecx",&DWP(12*$i+4,"edi"));
  351. &bswap ("ebx");
  352. &mov ("esi",&DWP(12*$i+8,"edi"));
  353. &bswap ("ecx");
  354. &mov (&DWP(32+12*$i+0,"esp"),"ebx");
  355. &bswap ("esi");
  356. &mov (&DWP(32+12*$i+4,"esp"),"ecx");
  357. &mov (&DWP(32+12*$i+8,"esp"),"esi");
  358. }
  359. &mov ("ebx",&DWP($i*12,"edi"));
  360. &add ("edi",64);
  361. &bswap ("ebx");
  362. &mov (&DWP(96+4,"esp"),"edi");
  363. &mov (&DWP(32+12*$i,"esp"),"ebx");
  364. my ($t1,$t2) = ("ecx","esi");
  365. for ($i=0;$i<64;$i++) {
  366. if ($i>=16) {
  367. &mov ($T,$t1); # $t1 is preloaded
  368. # &mov ($t2,&DWP(32+4*(($i+14)&15),"esp"));
  369. &ror ($t1,18-7);
  370. &mov ("edi",$t2);
  371. &ror ($t2,19-17);
  372. &xor ($t1,$T);
  373. &shr ($T,3);
  374. &ror ($t1,7);
  375. &xor ($t2,"edi");
  376. &xor ($T,$t1); # T = sigma0(X[-15])
  377. &ror ($t2,17);
  378. &add ($T,&DWP(32+4*($i&15),"esp")); # T += X[-16]
  379. &shr ("edi",10);
  380. &add ($T,&DWP(32+4*(($i+9)&15),"esp")); # T += X[-7]
  381. #&xor ("edi",$t2) # sigma1(X[-2])
  382. # &add ($T,"edi"); # T += sigma1(X[-2])
  383. # &mov (&DWP(4*(9+15),"esp"),$T); # save X[0]
  384. }
  385. &mov ($t1,$E);
  386. &xor ("edi",$t2) if ($i>=16); # sigma1(X[-2])
  387. &mov ($t2,&off($f));
  388. &ror ($E,25-11);
  389. &add ($T,"edi") if ($i>=16); # T += sigma1(X[-2])
  390. &mov ("edi",&off($g));
  391. &xor ($E,$t1);
  392. &mov ($T,&DWP(32+4*($i&15),"esp")) if ($i<16); # X[i]
  393. &mov (&DWP(32+4*($i&15),"esp"),$T) if ($i>=16 && $i<62); # save X[0]
  394. &xor ($t2,"edi");
  395. &ror ($E,11-6);
  396. &and ($t2,$t1);
  397. &mov (&off($e),$t1); # save $E, modulo-scheduled
  398. &xor ($E,$t1);
  399. &add ($T,&off($h)); # T += h
  400. &xor ("edi",$t2); # Ch(e,f,g)
  401. &ror ($E,6); # Sigma1(e)
  402. &mov ($t1,$AH[0]);
  403. &add ($T,"edi"); # T += Ch(e,f,g)
  404. &ror ($t1,22-13);
  405. &mov ($t2,$AH[0]);
  406. &mov ("edi",&off($b));
  407. &xor ($t1,$AH[0]);
  408. &mov (&off($a),$AH[0]); # save $A, modulo-scheduled
  409. &xor ($AH[0],"edi"); # a ^= b, (b^c) in next round
  410. &ror ($t1,13-2);
  411. &and ($AH[1],$AH[0]); # (b^c) &= (a^b)
  412. &lea ($E,&DWP(@K256[$i],$T,$E)); # T += Sigma1(1)+K[i]
  413. &xor ($t1,$t2);
  414. &xor ($AH[1],"edi"); # h = Maj(a,b,c) = Ch(a^b,c,b)
  415. &mov ($t2,&DWP(32+4*(($i+2)&15),"esp")) if ($i>=15 && $i<63);
  416. &ror ($t1,2); # Sigma0(a)
  417. &add ($AH[1],$E); # h += T
  418. &add ($E,&off($d)); # d += T
  419. &add ($AH[1],$t1); # h += Sigma0(a)
  420. &mov ($t1,&DWP(32+4*(($i+15)&15),"esp")) if ($i>=15 && $i<63);
  421. @AH = reverse(@AH); # rotate(a,h)
  422. ($t1,$t2) = ($t2,$t1); # rotate(t1,t2)
  423. }
  424. &mov ("esi",&DWP(96,"esp")); #ctx
  425. #&mov ($AH[0],&DWP(0,"esp"));
  426. &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
  427. #&mov ("edi", &DWP(8,"esp"));
  428. &mov ("ecx",&DWP(12,"esp"));
  429. &add ($AH[0],&DWP(0,"esi"));
  430. &add ($AH[1],&DWP(4,"esi"));
  431. &add ("edi",&DWP(8,"esi"));
  432. &add ("ecx",&DWP(12,"esi"));
  433. &mov (&DWP(0,"esi"),$AH[0]);
  434. &mov (&DWP(4,"esi"),$AH[1]);
  435. &mov (&DWP(8,"esi"),"edi");
  436. &mov (&DWP(12,"esi"),"ecx");
  437. #&mov (&DWP(0,"esp"),$AH[0]);
  438. &mov (&DWP(4,"esp"),$AH[1]);
  439. &xor ($AH[1],"edi"); # magic
  440. &mov (&DWP(8,"esp"),"edi");
  441. &mov (&DWP(12,"esp"),"ecx");
  442. #&mov ($E,&DWP(16,"esp"));
  443. &mov ("edi",&DWP(20,"esp"));
  444. &mov ("ebx",&DWP(24,"esp"));
  445. &mov ("ecx",&DWP(28,"esp"));
  446. &add ($E,&DWP(16,"esi"));
  447. &add ("edi",&DWP(20,"esi"));
  448. &add ("ebx",&DWP(24,"esi"));
  449. &add ("ecx",&DWP(28,"esi"));
  450. &mov (&DWP(16,"esi"),$E);
  451. &mov (&DWP(20,"esi"),"edi");
  452. &mov (&DWP(24,"esi"),"ebx");
  453. &mov (&DWP(28,"esi"),"ecx");
  454. #&mov (&DWP(16,"esp"),$E);
  455. &mov (&DWP(20,"esp"),"edi");
  456. &mov ("edi",&DWP(96+4,"esp")); # inp
  457. &mov (&DWP(24,"esp"),"ebx");
  458. &mov (&DWP(28,"esp"),"ecx");
  459. &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
  460. &jb (&label("grand_loop"));
  461. &mov ("esp",&DWP(96+12,"esp")); # restore sp
  462. &function_end_A();
  463. }
  464. if (!$i386 && $xmm) {{{
  465. if ($shaext) {
  466. ######################################################################
  467. # Intel SHA Extensions implementation of SHA256 update function.
  468. #
  469. my ($ctx,$inp,$end)=("esi","edi","eax");
  470. my ($Wi,$ABEF,$CDGH,$TMP)=map("xmm$_",(0..2,7));
  471. my @MSG=map("xmm$_",(3..6));
  472. sub sha256op38 {
  473. my ($opcodelet,$dst,$src)=@_;
  474. if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
  475. { &data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2); }
  476. }
  477. sub sha256rnds2 { sha256op38(0xcb,@_); }
  478. sub sha256msg1 { sha256op38(0xcc,@_); }
  479. sub sha256msg2 { sha256op38(0xcd,@_); }
  480. &set_label("shaext",32);
  481. &sub ("esp",32);
  482. &movdqu ($ABEF,&QWP(0,$ctx)); # DCBA
  483. &lea ($K256,&DWP(0x80,$K256));
  484. &movdqu ($CDGH,&QWP(16,$ctx)); # HGFE
  485. &movdqa ($TMP,&QWP(0x100-0x80,$K256)); # byte swap mask
  486. &pshufd ($Wi,$ABEF,0x1b); # ABCD
  487. &pshufd ($ABEF,$ABEF,0xb1); # CDAB
  488. &pshufd ($CDGH,$CDGH,0x1b); # EFGH
  489. &palignr ($ABEF,$CDGH,8); # ABEF
  490. &punpcklqdq ($CDGH,$Wi); # CDGH
  491. &jmp (&label("loop_shaext"));
  492. &set_label("loop_shaext",16);
  493. &movdqu (@MSG[0],&QWP(0,$inp));
  494. &movdqu (@MSG[1],&QWP(0x10,$inp));
  495. &movdqu (@MSG[2],&QWP(0x20,$inp));
  496. &pshufb (@MSG[0],$TMP);
  497. &movdqu (@MSG[3],&QWP(0x30,$inp));
  498. &movdqa (&QWP(16,"esp"),$CDGH); # offload
  499. &movdqa ($Wi,&QWP(0*16-0x80,$K256));
  500. &paddd ($Wi,@MSG[0]);
  501. &pshufb (@MSG[1],$TMP);
  502. &sha256rnds2 ($CDGH,$ABEF); # 0-3
  503. &pshufd ($Wi,$Wi,0x0e);
  504. &nop ();
  505. &movdqa (&QWP(0,"esp"),$ABEF); # offload
  506. &sha256rnds2 ($ABEF,$CDGH);
  507. &movdqa ($Wi,&QWP(1*16-0x80,$K256));
  508. &paddd ($Wi,@MSG[1]);
  509. &pshufb (@MSG[2],$TMP);
  510. &sha256rnds2 ($CDGH,$ABEF); # 4-7
  511. &pshufd ($Wi,$Wi,0x0e);
  512. &lea ($inp,&DWP(0x40,$inp));
  513. &sha256msg1 (@MSG[0],@MSG[1]);
  514. &sha256rnds2 ($ABEF,$CDGH);
  515. &movdqa ($Wi,&QWP(2*16-0x80,$K256));
  516. &paddd ($Wi,@MSG[2]);
  517. &pshufb (@MSG[3],$TMP);
  518. &sha256rnds2 ($CDGH,$ABEF); # 8-11
  519. &pshufd ($Wi,$Wi,0x0e);
  520. &movdqa ($TMP,@MSG[3]);
  521. &palignr ($TMP,@MSG[2],4);
  522. &nop ();
  523. &paddd (@MSG[0],$TMP);
  524. &sha256msg1 (@MSG[1],@MSG[2]);
  525. &sha256rnds2 ($ABEF,$CDGH);
  526. &movdqa ($Wi,&QWP(3*16-0x80,$K256));
  527. &paddd ($Wi,@MSG[3]);
  528. &sha256msg2 (@MSG[0],@MSG[3]);
  529. &sha256rnds2 ($CDGH,$ABEF); # 12-15
  530. &pshufd ($Wi,$Wi,0x0e);
  531. &movdqa ($TMP,@MSG[0]);
  532. &palignr ($TMP,@MSG[3],4);
  533. &nop ();
  534. &paddd (@MSG[1],$TMP);
  535. &sha256msg1 (@MSG[2],@MSG[3]);
  536. &sha256rnds2 ($ABEF,$CDGH);
  537. for($i=4;$i<16-3;$i++) {
  538. &movdqa ($Wi,&QWP($i*16-0x80,$K256));
  539. &paddd ($Wi,@MSG[0]);
  540. &sha256msg2 (@MSG[1],@MSG[0]);
  541. &sha256rnds2 ($CDGH,$ABEF); # 16-19...
  542. &pshufd ($Wi,$Wi,0x0e);
  543. &movdqa ($TMP,@MSG[1]);
  544. &palignr ($TMP,@MSG[0],4);
  545. &nop ();
  546. &paddd (@MSG[2],$TMP);
  547. &sha256msg1 (@MSG[3],@MSG[0]);
  548. &sha256rnds2 ($ABEF,$CDGH);
  549. push(@MSG,shift(@MSG));
  550. }
  551. &movdqa ($Wi,&QWP(13*16-0x80,$K256));
  552. &paddd ($Wi,@MSG[0]);
  553. &sha256msg2 (@MSG[1],@MSG[0]);
  554. &sha256rnds2 ($CDGH,$ABEF); # 52-55
  555. &pshufd ($Wi,$Wi,0x0e);
  556. &movdqa ($TMP,@MSG[1])
  557. &palignr ($TMP,@MSG[0],4);
  558. &sha256rnds2 ($ABEF,$CDGH);
  559. &paddd (@MSG[2],$TMP);
  560. &movdqa ($Wi,&QWP(14*16-0x80,$K256));
  561. &paddd ($Wi,@MSG[1]);
  562. &sha256rnds2 ($CDGH,$ABEF); # 56-59
  563. &pshufd ($Wi,$Wi,0x0e);
  564. &sha256msg2 (@MSG[2],@MSG[1]);
  565. &movdqa ($TMP,&QWP(0x100-0x80,$K256)); # byte swap mask
  566. &sha256rnds2 ($ABEF,$CDGH);
  567. &movdqa ($Wi,&QWP(15*16-0x80,$K256));
  568. &paddd ($Wi,@MSG[2]);
  569. &nop ();
  570. &sha256rnds2 ($CDGH,$ABEF); # 60-63
  571. &pshufd ($Wi,$Wi,0x0e);
  572. &cmp ($end,$inp);
  573. &nop ();
  574. &sha256rnds2 ($ABEF,$CDGH);
  575. &paddd ($CDGH,&QWP(16,"esp"));
  576. &paddd ($ABEF,&QWP(0,"esp"));
  577. &jnz (&label("loop_shaext"));
  578. &pshufd ($CDGH,$CDGH,0xb1); # DCHG
  579. &pshufd ($TMP,$ABEF,0x1b); # FEBA
  580. &pshufd ($ABEF,$ABEF,0xb1); # BAFE
  581. &punpckhqdq ($ABEF,$CDGH); # DCBA
  582. &palignr ($CDGH,$TMP,8); # HGFE
  583. &mov ("esp",&DWP(32+12,"esp"));
  584. &movdqu (&QWP(0,$ctx),$ABEF);
  585. &movdqu (&QWP(16,$ctx),$CDGH);
  586. &function_end_A();
  587. }
  588. my @X = map("xmm$_",(0..3));
  589. my ($t0,$t1,$t2,$t3) = map("xmm$_",(4..7));
  590. my @AH = ($A,$T);
  591. &set_label("SSSE3",32);
  592. &lea ("esp",&DWP(-96,"esp"));
  593. # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
  594. &mov ($AH[0],&DWP(0,"esi"));
  595. &mov ($AH[1],&DWP(4,"esi"));
  596. &mov ("ecx",&DWP(8,"esi"));
  597. &mov ("edi",&DWP(12,"esi"));
  598. #&mov (&DWP(0,"esp"),$AH[0]);
  599. &mov (&DWP(4,"esp"),$AH[1]);
  600. &xor ($AH[1],"ecx"); # magic
  601. &mov (&DWP(8,"esp"),"ecx");
  602. &mov (&DWP(12,"esp"),"edi");
  603. &mov ($E,&DWP(16,"esi"));
  604. &mov ("edi",&DWP(20,"esi"));
  605. &mov ("ecx",&DWP(24,"esi"));
  606. &mov ("esi",&DWP(28,"esi"));
  607. #&mov (&DWP(16,"esp"),$E);
  608. &mov (&DWP(20,"esp"),"edi");
  609. &mov ("edi",&DWP(96+4,"esp")); # inp
  610. &mov (&DWP(24,"esp"),"ecx");
  611. &mov (&DWP(28,"esp"),"esi");
  612. &movdqa ($t3,&QWP(256,$K256));
  613. &jmp (&label("grand_ssse3"));
  614. &set_label("grand_ssse3",16);
  615. # load input, reverse byte order, add K256[0..15], save to stack
  616. &movdqu (@X[0],&QWP(0,"edi"));
  617. &movdqu (@X[1],&QWP(16,"edi"));
  618. &movdqu (@X[2],&QWP(32,"edi"));
  619. &movdqu (@X[3],&QWP(48,"edi"));
  620. &add ("edi",64);
  621. &pshufb (@X[0],$t3);
  622. &mov (&DWP(96+4,"esp"),"edi");
  623. &pshufb (@X[1],$t3);
  624. &movdqa ($t0,&QWP(0,$K256));
  625. &pshufb (@X[2],$t3);
  626. &movdqa ($t1,&QWP(16,$K256));
  627. &paddd ($t0,@X[0]);
  628. &pshufb (@X[3],$t3);
  629. &movdqa ($t2,&QWP(32,$K256));
  630. &paddd ($t1,@X[1]);
  631. &movdqa ($t3,&QWP(48,$K256));
  632. &movdqa (&QWP(32+0,"esp"),$t0);
  633. &paddd ($t2,@X[2]);
  634. &movdqa (&QWP(32+16,"esp"),$t1);
  635. &paddd ($t3,@X[3]);
  636. &movdqa (&QWP(32+32,"esp"),$t2);
  637. &movdqa (&QWP(32+48,"esp"),$t3);
  638. &jmp (&label("ssse3_00_47"));
  639. &set_label("ssse3_00_47",16);
  640. &add ($K256,64);
  641. sub SSSE3_00_47 () {
  642. my $j = shift;
  643. my $body = shift;
  644. my @X = @_;
  645. my @insns = (&$body,&$body,&$body,&$body); # 120 instructions
  646. eval(shift(@insns));
  647. &movdqa ($t0,@X[1]);
  648. eval(shift(@insns)); # @
  649. eval(shift(@insns));
  650. &movdqa ($t3,@X[3]);
  651. eval(shift(@insns));
  652. eval(shift(@insns));
  653. &palignr ($t0,@X[0],4); # X[1..4]
  654. eval(shift(@insns));
  655. eval(shift(@insns)); # @
  656. eval(shift(@insns));
  657. &palignr ($t3,@X[2],4); # X[9..12]
  658. eval(shift(@insns));
  659. eval(shift(@insns));
  660. eval(shift(@insns));
  661. &movdqa ($t1,$t0);
  662. eval(shift(@insns)); # @
  663. eval(shift(@insns));
  664. &movdqa ($t2,$t0);
  665. eval(shift(@insns));
  666. eval(shift(@insns));
  667. &psrld ($t0,3);
  668. eval(shift(@insns));
  669. eval(shift(@insns)); # @
  670. &paddd (@X[0],$t3); # X[0..3] += X[9..12]
  671. eval(shift(@insns));
  672. eval(shift(@insns));
  673. &psrld ($t2,7);
  674. eval(shift(@insns));
  675. eval(shift(@insns));
  676. eval(shift(@insns)); # @
  677. eval(shift(@insns));
  678. &pshufd ($t3,@X[3],0b11111010); # X[14..15]
  679. eval(shift(@insns));
  680. eval(shift(@insns));
  681. &pslld ($t1,32-18);
  682. eval(shift(@insns));
  683. eval(shift(@insns)); # @
  684. &pxor ($t0,$t2);
  685. eval(shift(@insns));
  686. eval(shift(@insns));
  687. &psrld ($t2,18-7);
  688. eval(shift(@insns));
  689. eval(shift(@insns));
  690. eval(shift(@insns)); # @
  691. &pxor ($t0,$t1);
  692. eval(shift(@insns));
  693. eval(shift(@insns));
  694. &pslld ($t1,18-7);
  695. eval(shift(@insns));
  696. eval(shift(@insns));
  697. eval(shift(@insns)); # @
  698. &pxor ($t0,$t2);
  699. eval(shift(@insns));
  700. eval(shift(@insns));
  701. &movdqa ($t2,$t3);
  702. eval(shift(@insns));
  703. eval(shift(@insns));
  704. eval(shift(@insns)); # @
  705. &pxor ($t0,$t1); # sigma0(X[1..4])
  706. eval(shift(@insns));
  707. eval(shift(@insns));
  708. &psrld ($t3,10);
  709. eval(shift(@insns));
  710. eval(shift(@insns));
  711. eval(shift(@insns)); # @
  712. &paddd (@X[0],$t0); # X[0..3] += sigma0(X[1..4])
  713. eval(shift(@insns));
  714. eval(shift(@insns));
  715. &psrlq ($t2,17);
  716. eval(shift(@insns));
  717. eval(shift(@insns));
  718. eval(shift(@insns)); # @
  719. &pxor ($t3,$t2);
  720. eval(shift(@insns));
  721. eval(shift(@insns));
  722. &psrlq ($t2,19-17);
  723. eval(shift(@insns));
  724. eval(shift(@insns));
  725. eval(shift(@insns)); # @
  726. &pxor ($t3,$t2);
  727. eval(shift(@insns));
  728. eval(shift(@insns));
  729. &pshufd ($t3,$t3,0b10000000);
  730. eval(shift(@insns));
  731. eval(shift(@insns));
  732. eval(shift(@insns)); # @
  733. eval(shift(@insns));
  734. eval(shift(@insns));
  735. eval(shift(@insns));
  736. eval(shift(@insns));
  737. eval(shift(@insns)); # @
  738. eval(shift(@insns));
  739. &psrldq ($t3,8);
  740. eval(shift(@insns));
  741. eval(shift(@insns));
  742. eval(shift(@insns));
  743. &paddd (@X[0],$t3); # X[0..1] += sigma1(X[14..15])
  744. eval(shift(@insns)); # @
  745. eval(shift(@insns));
  746. eval(shift(@insns));
  747. eval(shift(@insns));
  748. eval(shift(@insns));
  749. eval(shift(@insns)); # @
  750. eval(shift(@insns));
  751. &pshufd ($t3,@X[0],0b01010000); # X[16..17]
  752. eval(shift(@insns));
  753. eval(shift(@insns));
  754. eval(shift(@insns));
  755. &movdqa ($t2,$t3);
  756. eval(shift(@insns)); # @
  757. &psrld ($t3,10);
  758. eval(shift(@insns));
  759. &psrlq ($t2,17);
  760. eval(shift(@insns));
  761. eval(shift(@insns));
  762. eval(shift(@insns));
  763. eval(shift(@insns)); # @
  764. &pxor ($t3,$t2);
  765. eval(shift(@insns));
  766. eval(shift(@insns));
  767. &psrlq ($t2,19-17);
  768. eval(shift(@insns));
  769. eval(shift(@insns));
  770. eval(shift(@insns)); # @
  771. &pxor ($t3,$t2);
  772. eval(shift(@insns));
  773. eval(shift(@insns));
  774. eval(shift(@insns));
  775. &pshufd ($t3,$t3,0b00001000);
  776. eval(shift(@insns));
  777. eval(shift(@insns)); # @
  778. &movdqa ($t2,&QWP(16*$j,$K256));
  779. eval(shift(@insns));
  780. eval(shift(@insns));
  781. &pslldq ($t3,8);
  782. eval(shift(@insns));
  783. eval(shift(@insns));
  784. eval(shift(@insns)); # @
  785. eval(shift(@insns));
  786. eval(shift(@insns));
  787. eval(shift(@insns));
  788. eval(shift(@insns));
  789. eval(shift(@insns)); # @
  790. &paddd (@X[0],$t3); # X[2..3] += sigma1(X[16..17])
  791. eval(shift(@insns));
  792. eval(shift(@insns));
  793. eval(shift(@insns));
  794. eval(shift(@insns));
  795. &paddd ($t2,@X[0]);
  796. eval(shift(@insns)); # @
  797. foreach (@insns) { eval; } # remaining instructions
  798. &movdqa (&QWP(32+16*$j,"esp"),$t2);
  799. }
  800. sub body_00_15 () {
  801. (
  802. '&mov ("ecx",$E);',
  803. '&ror ($E,25-11);',
  804. '&mov ("esi",&off($f));',
  805. '&xor ($E,"ecx");',
  806. '&mov ("edi",&off($g));',
  807. '&xor ("esi","edi");',
  808. '&ror ($E,11-6);',
  809. '&and ("esi","ecx");',
  810. '&mov (&off($e),"ecx");', # save $E, modulo-scheduled
  811. '&xor ($E,"ecx");',
  812. '&xor ("edi","esi");', # Ch(e,f,g)
  813. '&ror ($E,6);', # T = Sigma1(e)
  814. '&mov ("ecx",$AH[0]);',
  815. '&add ($E,"edi");', # T += Ch(e,f,g)
  816. '&mov ("edi",&off($b));',
  817. '&mov ("esi",$AH[0]);',
  818. '&ror ("ecx",22-13);',
  819. '&mov (&off($a),$AH[0]);', # save $A, modulo-scheduled
  820. '&xor ("ecx",$AH[0]);',
  821. '&xor ($AH[0],"edi");', # a ^= b, (b^c) in next round
  822. '&add ($E,&off($h));', # T += h
  823. '&ror ("ecx",13-2);',
  824. '&and ($AH[1],$AH[0]);', # (b^c) &= (a^b)
  825. '&xor ("ecx","esi");',
  826. '&add ($E,&DWP(32+4*($i&15),"esp"));', # T += K[i]+X[i]
  827. '&xor ($AH[1],"edi");', # h = Maj(a,b,c) = Ch(a^b,c,b)
  828. '&ror ("ecx",2);', # Sigma0(a)
  829. '&add ($AH[1],$E);', # h += T
  830. '&add ($E,&off($d));', # d += T
  831. '&add ($AH[1],"ecx");'. # h += Sigma0(a)
  832. '@AH = reverse(@AH); $i++;' # rotate(a,h)
  833. );
  834. }
  835. for ($i=0,$j=0; $j<4; $j++) {
  836. &SSSE3_00_47($j,\&body_00_15,@X);
  837. push(@X,shift(@X)); # rotate(@X)
  838. }
  839. &cmp (&DWP(16*$j,$K256),0x00010203);
  840. &jne (&label("ssse3_00_47"));
  841. for ($i=0; $i<16; ) {
  842. foreach(body_00_15()) { eval; }
  843. }
  844. &mov ("esi",&DWP(96,"esp")); #ctx
  845. #&mov ($AH[0],&DWP(0,"esp"));
  846. &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
  847. #&mov ("edi", &DWP(8,"esp"));
  848. &mov ("ecx",&DWP(12,"esp"));
  849. &add ($AH[0],&DWP(0,"esi"));
  850. &add ($AH[1],&DWP(4,"esi"));
  851. &add ("edi",&DWP(8,"esi"));
  852. &add ("ecx",&DWP(12,"esi"));
  853. &mov (&DWP(0,"esi"),$AH[0]);
  854. &mov (&DWP(4,"esi"),$AH[1]);
  855. &mov (&DWP(8,"esi"),"edi");
  856. &mov (&DWP(12,"esi"),"ecx");
  857. #&mov (&DWP(0,"esp"),$AH[0]);
  858. &mov (&DWP(4,"esp"),$AH[1]);
  859. &xor ($AH[1],"edi"); # magic
  860. &mov (&DWP(8,"esp"),"edi");
  861. &mov (&DWP(12,"esp"),"ecx");
  862. #&mov ($E,&DWP(16,"esp"));
  863. &mov ("edi",&DWP(20,"esp"));
  864. &mov ("ecx",&DWP(24,"esp"));
  865. &add ($E,&DWP(16,"esi"));
  866. &add ("edi",&DWP(20,"esi"));
  867. &add ("ecx",&DWP(24,"esi"));
  868. &mov (&DWP(16,"esi"),$E);
  869. &mov (&DWP(20,"esi"),"edi");
  870. &mov (&DWP(20,"esp"),"edi");
  871. &mov ("edi",&DWP(28,"esp"));
  872. &mov (&DWP(24,"esi"),"ecx");
  873. #&mov (&DWP(16,"esp"),$E);
  874. &add ("edi",&DWP(28,"esi"));
  875. &mov (&DWP(24,"esp"),"ecx");
  876. &mov (&DWP(28,"esi"),"edi");
  877. &mov (&DWP(28,"esp"),"edi");
  878. &mov ("edi",&DWP(96+4,"esp")); # inp
  879. &movdqa ($t3,&QWP(64,$K256));
  880. &sub ($K256,3*64); # rewind K
  881. &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
  882. &jb (&label("grand_ssse3"));
  883. &mov ("esp",&DWP(96+12,"esp")); # restore sp
  884. &function_end_A();
  885. if ($avx) {
  886. &set_label("AVX",32);
  887. if ($avx>1) {
  888. &and ("edx",1<<8|1<<3); # check for BMI2+BMI1
  889. &cmp ("edx",1<<8|1<<3);
  890. &je (&label("AVX_BMI"));
  891. }
  892. &lea ("esp",&DWP(-96,"esp"));
  893. &vzeroall ();
  894. # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
  895. &mov ($AH[0],&DWP(0,"esi"));
  896. &mov ($AH[1],&DWP(4,"esi"));
  897. &mov ("ecx",&DWP(8,"esi"));
  898. &mov ("edi",&DWP(12,"esi"));
  899. #&mov (&DWP(0,"esp"),$AH[0]);
  900. &mov (&DWP(4,"esp"),$AH[1]);
  901. &xor ($AH[1],"ecx"); # magic
  902. &mov (&DWP(8,"esp"),"ecx");
  903. &mov (&DWP(12,"esp"),"edi");
  904. &mov ($E,&DWP(16,"esi"));
  905. &mov ("edi",&DWP(20,"esi"));
  906. &mov ("ecx",&DWP(24,"esi"));
  907. &mov ("esi",&DWP(28,"esi"));
  908. #&mov (&DWP(16,"esp"),$E);
  909. &mov (&DWP(20,"esp"),"edi");
  910. &mov ("edi",&DWP(96+4,"esp")); # inp
  911. &mov (&DWP(24,"esp"),"ecx");
  912. &mov (&DWP(28,"esp"),"esi");
  913. &vmovdqa ($t3,&QWP(256,$K256));
  914. &jmp (&label("grand_avx"));
  915. &set_label("grand_avx",32);
  916. # load input, reverse byte order, add K256[0..15], save to stack
  917. &vmovdqu (@X[0],&QWP(0,"edi"));
  918. &vmovdqu (@X[1],&QWP(16,"edi"));
  919. &vmovdqu (@X[2],&QWP(32,"edi"));
  920. &vmovdqu (@X[3],&QWP(48,"edi"));
  921. &add ("edi",64);
  922. &vpshufb (@X[0],@X[0],$t3);
  923. &mov (&DWP(96+4,"esp"),"edi");
  924. &vpshufb (@X[1],@X[1],$t3);
  925. &vpshufb (@X[2],@X[2],$t3);
  926. &vpaddd ($t0,@X[0],&QWP(0,$K256));
  927. &vpshufb (@X[3],@X[3],$t3);
  928. &vpaddd ($t1,@X[1],&QWP(16,$K256));
  929. &vpaddd ($t2,@X[2],&QWP(32,$K256));
  930. &vpaddd ($t3,@X[3],&QWP(48,$K256));
  931. &vmovdqa (&QWP(32+0,"esp"),$t0);
  932. &vmovdqa (&QWP(32+16,"esp"),$t1);
  933. &vmovdqa (&QWP(32+32,"esp"),$t2);
  934. &vmovdqa (&QWP(32+48,"esp"),$t3);
  935. &jmp (&label("avx_00_47"));
  936. &set_label("avx_00_47",16);
  937. &add ($K256,64);
  938. sub Xupdate_AVX () {
  939. (
  940. '&vpalignr ($t0,@X[1],@X[0],4);', # X[1..4]
  941. '&vpalignr ($t3,@X[3],@X[2],4);', # X[9..12]
  942. '&vpsrld ($t2,$t0,7);',
  943. '&vpaddd (@X[0],@X[0],$t3);', # X[0..3] += X[9..16]
  944. '&vpsrld ($t3,$t0,3);',
  945. '&vpslld ($t1,$t0,14);',
  946. '&vpxor ($t0,$t3,$t2);',
  947. '&vpshufd ($t3,@X[3],0b11111010)',# X[14..15]
  948. '&vpsrld ($t2,$t2,18-7);',
  949. '&vpxor ($t0,$t0,$t1);',
  950. '&vpslld ($t1,$t1,25-14);',
  951. '&vpxor ($t0,$t0,$t2);',
  952. '&vpsrld ($t2,$t3,10);',
  953. '&vpxor ($t0,$t0,$t1);', # sigma0(X[1..4])
  954. '&vpsrlq ($t1,$t3,17);',
  955. '&vpaddd (@X[0],@X[0],$t0);', # X[0..3] += sigma0(X[1..4])
  956. '&vpxor ($t2,$t2,$t1);',
  957. '&vpsrlq ($t3,$t3,19);',
  958. '&vpxor ($t2,$t2,$t3);', # sigma1(X[14..15]
  959. '&vpshufd ($t3,$t2,0b10000100);',
  960. '&vpsrldq ($t3,$t3,8);',
  961. '&vpaddd (@X[0],@X[0],$t3);', # X[0..1] += sigma1(X[14..15])
  962. '&vpshufd ($t3,@X[0],0b01010000)',# X[16..17]
  963. '&vpsrld ($t2,$t3,10);',
  964. '&vpsrlq ($t1,$t3,17);',
  965. '&vpxor ($t2,$t2,$t1);',
  966. '&vpsrlq ($t3,$t3,19);',
  967. '&vpxor ($t2,$t2,$t3);', # sigma1(X[16..17]
  968. '&vpshufd ($t3,$t2,0b11101000);',
  969. '&vpslldq ($t3,$t3,8);',
  970. '&vpaddd (@X[0],@X[0],$t3);' # X[2..3] += sigma1(X[16..17])
  971. );
  972. }
  973. local *ror = sub { &shrd(@_[0],@_) };
  974. sub AVX_00_47 () {
  975. my $j = shift;
  976. my $body = shift;
  977. my @X = @_;
  978. my @insns = (&$body,&$body,&$body,&$body); # 120 instructions
  979. my $insn;
  980. foreach (Xupdate_AVX()) { # 31 instructions
  981. eval;
  982. eval(shift(@insns));
  983. eval(shift(@insns));
  984. eval($insn = shift(@insns));
  985. eval(shift(@insns)) if ($insn =~ /rorx/ && @insns[0] =~ /rorx/);
  986. }
  987. &vpaddd ($t2,@X[0],&QWP(16*$j,$K256));
  988. foreach (@insns) { eval; } # remaining instructions
  989. &vmovdqa (&QWP(32+16*$j,"esp"),$t2);
  990. }
  991. for ($i=0,$j=0; $j<4; $j++) {
  992. &AVX_00_47($j,\&body_00_15,@X);
  993. push(@X,shift(@X)); # rotate(@X)
  994. }
  995. &cmp (&DWP(16*$j,$K256),0x00010203);
  996. &jne (&label("avx_00_47"));
  997. for ($i=0; $i<16; ) {
  998. foreach(body_00_15()) { eval; }
  999. }
  1000. &mov ("esi",&DWP(96,"esp")); #ctx
  1001. #&mov ($AH[0],&DWP(0,"esp"));
  1002. &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
  1003. #&mov ("edi", &DWP(8,"esp"));
  1004. &mov ("ecx",&DWP(12,"esp"));
  1005. &add ($AH[0],&DWP(0,"esi"));
  1006. &add ($AH[1],&DWP(4,"esi"));
  1007. &add ("edi",&DWP(8,"esi"));
  1008. &add ("ecx",&DWP(12,"esi"));
  1009. &mov (&DWP(0,"esi"),$AH[0]);
  1010. &mov (&DWP(4,"esi"),$AH[1]);
  1011. &mov (&DWP(8,"esi"),"edi");
  1012. &mov (&DWP(12,"esi"),"ecx");
  1013. #&mov (&DWP(0,"esp"),$AH[0]);
  1014. &mov (&DWP(4,"esp"),$AH[1]);
  1015. &xor ($AH[1],"edi"); # magic
  1016. &mov (&DWP(8,"esp"),"edi");
  1017. &mov (&DWP(12,"esp"),"ecx");
  1018. #&mov ($E,&DWP(16,"esp"));
  1019. &mov ("edi",&DWP(20,"esp"));
  1020. &mov ("ecx",&DWP(24,"esp"));
  1021. &add ($E,&DWP(16,"esi"));
  1022. &add ("edi",&DWP(20,"esi"));
  1023. &add ("ecx",&DWP(24,"esi"));
  1024. &mov (&DWP(16,"esi"),$E);
  1025. &mov (&DWP(20,"esi"),"edi");
  1026. &mov (&DWP(20,"esp"),"edi");
  1027. &mov ("edi",&DWP(28,"esp"));
  1028. &mov (&DWP(24,"esi"),"ecx");
  1029. #&mov (&DWP(16,"esp"),$E);
  1030. &add ("edi",&DWP(28,"esi"));
  1031. &mov (&DWP(24,"esp"),"ecx");
  1032. &mov (&DWP(28,"esi"),"edi");
  1033. &mov (&DWP(28,"esp"),"edi");
  1034. &mov ("edi",&DWP(96+4,"esp")); # inp
  1035. &vmovdqa ($t3,&QWP(64,$K256));
  1036. &sub ($K256,3*64); # rewind K
  1037. &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
  1038. &jb (&label("grand_avx"));
  1039. &mov ("esp",&DWP(96+12,"esp")); # restore sp
  1040. &vzeroall ();
  1041. &function_end_A();
  1042. if ($avx>1) {
  1043. sub bodyx_00_15 () { # +10%
  1044. (
  1045. '&rorx ("ecx",$E,6)',
  1046. '&rorx ("esi",$E,11)',
  1047. '&mov (&off($e),$E)', # save $E, modulo-scheduled
  1048. '&rorx ("edi",$E,25)',
  1049. '&xor ("ecx","esi")',
  1050. '&andn ("esi",$E,&off($g))',
  1051. '&xor ("ecx","edi")', # Sigma1(e)
  1052. '&and ($E,&off($f))',
  1053. '&mov (&off($a),$AH[0]);', # save $A, modulo-scheduled
  1054. '&or ($E,"esi")', # T = Ch(e,f,g)
  1055. '&rorx ("edi",$AH[0],2)',
  1056. '&rorx ("esi",$AH[0],13)',
  1057. '&lea ($E,&DWP(0,$E,"ecx"))', # T += Sigma1(e)
  1058. '&rorx ("ecx",$AH[0],22)',
  1059. '&xor ("esi","edi")',
  1060. '&mov ("edi",&off($b))',
  1061. '&xor ("ecx","esi")', # Sigma0(a)
  1062. '&xor ($AH[0],"edi")', # a ^= b, (b^c) in next round
  1063. '&add ($E,&off($h))', # T += h
  1064. '&and ($AH[1],$AH[0])', # (b^c) &= (a^b)
  1065. '&add ($E,&DWP(32+4*($i&15),"esp"))', # T += K[i]+X[i]
  1066. '&xor ($AH[1],"edi")', # h = Maj(a,b,c) = Ch(a^b,c,b)
  1067. '&add ("ecx",$E)', # h += T
  1068. '&add ($E,&off($d))', # d += T
  1069. '&lea ($AH[1],&DWP(0,$AH[1],"ecx"));'. # h += Sigma0(a)
  1070. '@AH = reverse(@AH); $i++;' # rotate(a,h)
  1071. );
  1072. }
  1073. &set_label("AVX_BMI",32);
  1074. &lea ("esp",&DWP(-96,"esp"));
  1075. &vzeroall ();
  1076. # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
  1077. &mov ($AH[0],&DWP(0,"esi"));
  1078. &mov ($AH[1],&DWP(4,"esi"));
  1079. &mov ("ecx",&DWP(8,"esi"));
  1080. &mov ("edi",&DWP(12,"esi"));
  1081. #&mov (&DWP(0,"esp"),$AH[0]);
  1082. &mov (&DWP(4,"esp"),$AH[1]);
  1083. &xor ($AH[1],"ecx"); # magic
  1084. &mov (&DWP(8,"esp"),"ecx");
  1085. &mov (&DWP(12,"esp"),"edi");
  1086. &mov ($E,&DWP(16,"esi"));
  1087. &mov ("edi",&DWP(20,"esi"));
  1088. &mov ("ecx",&DWP(24,"esi"));
  1089. &mov ("esi",&DWP(28,"esi"));
  1090. #&mov (&DWP(16,"esp"),$E);
  1091. &mov (&DWP(20,"esp"),"edi");
  1092. &mov ("edi",&DWP(96+4,"esp")); # inp
  1093. &mov (&DWP(24,"esp"),"ecx");
  1094. &mov (&DWP(28,"esp"),"esi");
  1095. &vmovdqa ($t3,&QWP(256,$K256));
  1096. &jmp (&label("grand_avx_bmi"));
  1097. &set_label("grand_avx_bmi",32);
  1098. # load input, reverse byte order, add K256[0..15], save to stack
  1099. &vmovdqu (@X[0],&QWP(0,"edi"));
  1100. &vmovdqu (@X[1],&QWP(16,"edi"));
  1101. &vmovdqu (@X[2],&QWP(32,"edi"));
  1102. &vmovdqu (@X[3],&QWP(48,"edi"));
  1103. &add ("edi",64);
  1104. &vpshufb (@X[0],@X[0],$t3);
  1105. &mov (&DWP(96+4,"esp"),"edi");
  1106. &vpshufb (@X[1],@X[1],$t3);
  1107. &vpshufb (@X[2],@X[2],$t3);
  1108. &vpaddd ($t0,@X[0],&QWP(0,$K256));
  1109. &vpshufb (@X[3],@X[3],$t3);
  1110. &vpaddd ($t1,@X[1],&QWP(16,$K256));
  1111. &vpaddd ($t2,@X[2],&QWP(32,$K256));
  1112. &vpaddd ($t3,@X[3],&QWP(48,$K256));
  1113. &vmovdqa (&QWP(32+0,"esp"),$t0);
  1114. &vmovdqa (&QWP(32+16,"esp"),$t1);
  1115. &vmovdqa (&QWP(32+32,"esp"),$t2);
  1116. &vmovdqa (&QWP(32+48,"esp"),$t3);
  1117. &jmp (&label("avx_bmi_00_47"));
  1118. &set_label("avx_bmi_00_47",16);
  1119. &add ($K256,64);
  1120. for ($i=0,$j=0; $j<4; $j++) {
  1121. &AVX_00_47($j,\&bodyx_00_15,@X);
  1122. push(@X,shift(@X)); # rotate(@X)
  1123. }
  1124. &cmp (&DWP(16*$j,$K256),0x00010203);
  1125. &jne (&label("avx_bmi_00_47"));
  1126. for ($i=0; $i<16; ) {
  1127. foreach(bodyx_00_15()) { eval; }
  1128. }
  1129. &mov ("esi",&DWP(96,"esp")); #ctx
  1130. #&mov ($AH[0],&DWP(0,"esp"));
  1131. &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
  1132. #&mov ("edi", &DWP(8,"esp"));
  1133. &mov ("ecx",&DWP(12,"esp"));
  1134. &add ($AH[0],&DWP(0,"esi"));
  1135. &add ($AH[1],&DWP(4,"esi"));
  1136. &add ("edi",&DWP(8,"esi"));
  1137. &add ("ecx",&DWP(12,"esi"));
  1138. &mov (&DWP(0,"esi"),$AH[0]);
  1139. &mov (&DWP(4,"esi"),$AH[1]);
  1140. &mov (&DWP(8,"esi"),"edi");
  1141. &mov (&DWP(12,"esi"),"ecx");
  1142. #&mov (&DWP(0,"esp"),$AH[0]);
  1143. &mov (&DWP(4,"esp"),$AH[1]);
  1144. &xor ($AH[1],"edi"); # magic
  1145. &mov (&DWP(8,"esp"),"edi");
  1146. &mov (&DWP(12,"esp"),"ecx");
  1147. #&mov ($E,&DWP(16,"esp"));
  1148. &mov ("edi",&DWP(20,"esp"));
  1149. &mov ("ecx",&DWP(24,"esp"));
  1150. &add ($E,&DWP(16,"esi"));
  1151. &add ("edi",&DWP(20,"esi"));
  1152. &add ("ecx",&DWP(24,"esi"));
  1153. &mov (&DWP(16,"esi"),$E);
  1154. &mov (&DWP(20,"esi"),"edi");
  1155. &mov (&DWP(20,"esp"),"edi");
  1156. &mov ("edi",&DWP(28,"esp"));
  1157. &mov (&DWP(24,"esi"),"ecx");
  1158. #&mov (&DWP(16,"esp"),$E);
  1159. &add ("edi",&DWP(28,"esi"));
  1160. &mov (&DWP(24,"esp"),"ecx");
  1161. &mov (&DWP(28,"esi"),"edi");
  1162. &mov (&DWP(28,"esp"),"edi");
  1163. &mov ("edi",&DWP(96+4,"esp")); # inp
  1164. &vmovdqa ($t3,&QWP(64,$K256));
  1165. &sub ($K256,3*64); # rewind K
  1166. &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
  1167. &jb (&label("grand_avx_bmi"));
  1168. &mov ("esp",&DWP(96+12,"esp")); # restore sp
  1169. &vzeroall ();
  1170. &function_end_A();
  1171. }
  1172. }
  1173. }}}
  1174. &function_end_B("sha256_block_data_order");
  1175. &asm_finish();