Non puoi selezionare più di 25 argomenti Gli argomenti devono iniziare con una lettera o un numero, possono includere trattini ('-') e possono essere lunghi fino a 35 caratteri.
 
 
 
 
 
 

912 righe
26 KiB

  1. #!/usr/bin/env perl
  2. #
  3. # ====================================================================
  4. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  5. # project. The module is, however, dual licensed under OpenSSL and
  6. # CRYPTOGAMS licenses depending on where you obtain it. For further
  7. # details see http://www.openssl.org/~appro/cryptogams/.
  8. # ====================================================================
  9. #
  10. # SHA512 block transform for x86. September 2007.
  11. #
  12. # May 2013.
  13. #
  14. # Add SSSE3 code path, 20-25% improvement [over original SSE2 code].
  15. #
  16. # Performance in clock cycles per processed byte (less is better):
  17. #
  18. # gcc icc x86 asm SIMD(*) x86_64(**)
  19. # Pentium 100 97 61 - -
  20. # PIII 75 77 56 - -
  21. # P4 116 95 82 34.6 30.8
  22. # AMD K8 54 55 36 20.7 9.57
  23. # Core2 66 57 40 15.9 9.97
  24. # Westmere 70 - 38 12.2 9.58
  25. # Sandy Bridge 58 - 35 11.9 11.2
  26. # Ivy Bridge 50 - 33 11.5 8.17
  27. # Haswell 46 - 29 11.3 7.66
  28. # Bulldozer 121 - 50 14.0 13.5
  29. # VIA Nano 91 - 52 33 14.7
  30. # Atom 126 - 68 48(***) 14.7
  31. # Silvermont 97 - 58 42(***) 17.5
  32. #
  33. # (*) whichever best applicable.
  34. # (**) x86_64 assembler performance is presented for reference
  35. # purposes, the results are for integer-only code.
  36. # (***) paddq is increadibly slow on Atom.
  37. #
  38. # IALU code-path is optimized for elder Pentiums. On vanilla Pentium
  39. # performance improvement over compiler generated code reaches ~60%,
  40. # while on PIII - ~35%. On newer µ-archs improvement varies from 15%
  41. # to 50%, but it's less important as they are expected to execute SSE2
  42. # code-path, which is commonly ~2-3x faster [than compiler generated
  43. # code]. SSE2 code-path is as fast as original sha512-sse2.pl, even
  44. # though it does not use 128-bit operations. The latter means that
  45. # SSE2-aware kernel is no longer required to execute the code. Another
  46. # difference is that new code optimizes amount of writes, but at the
  47. # cost of increased data cache "footprint" by 1/2KB.
  48. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  49. push(@INC,"${dir}","${dir}../../perlasm");
  50. require "x86asm.pl";
  51. &asm_init($ARGV[0],"sha512-586.pl",$ARGV[$#ARGV] eq "386");
  52. $sse2=0;
  53. for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
  54. &external_label("OPENSSL_ia32cap_P") if ($sse2);
  55. $Tlo=&DWP(0,"esp"); $Thi=&DWP(4,"esp");
  56. $Alo=&DWP(8,"esp"); $Ahi=&DWP(8+4,"esp");
  57. $Blo=&DWP(16,"esp"); $Bhi=&DWP(16+4,"esp");
  58. $Clo=&DWP(24,"esp"); $Chi=&DWP(24+4,"esp");
  59. $Dlo=&DWP(32,"esp"); $Dhi=&DWP(32+4,"esp");
  60. $Elo=&DWP(40,"esp"); $Ehi=&DWP(40+4,"esp");
  61. $Flo=&DWP(48,"esp"); $Fhi=&DWP(48+4,"esp");
  62. $Glo=&DWP(56,"esp"); $Ghi=&DWP(56+4,"esp");
  63. $Hlo=&DWP(64,"esp"); $Hhi=&DWP(64+4,"esp");
  64. $K512="ebp";
  65. $Asse2=&QWP(0,"esp");
  66. $Bsse2=&QWP(8,"esp");
  67. $Csse2=&QWP(16,"esp");
  68. $Dsse2=&QWP(24,"esp");
  69. $Esse2=&QWP(32,"esp");
  70. $Fsse2=&QWP(40,"esp");
  71. $Gsse2=&QWP(48,"esp");
  72. $Hsse2=&QWP(56,"esp");
  73. $A="mm0"; # B-D and
  74. $E="mm4"; # F-H are commonly loaded to respectively mm1-mm3 and
  75. # mm5-mm7, but it's done on on-demand basis...
  76. $BxC="mm2"; # ... except for B^C
  77. sub BODY_00_15_sse2 {
  78. my $phase=shift;
  79. #&movq ("mm5",$Fsse2); # load f
  80. #&movq ("mm6",$Gsse2); # load g
  81. &movq ("mm1",$E); # %mm1 is sliding right
  82. &pxor ("mm5","mm6"); # f^=g
  83. &psrlq ("mm1",14);
  84. &movq ($Esse2,$E); # modulo-scheduled save e
  85. &pand ("mm5",$E); # f&=e
  86. &psllq ($E,23); # $E is sliding left
  87. &movq ($A,"mm3") if ($phase<2);
  88. &movq (&QWP(8*9,"esp"),"mm7") # save X[i]
  89. &movq ("mm3","mm1"); # %mm3 is T1
  90. &psrlq ("mm1",4);
  91. &pxor ("mm5","mm6"); # Ch(e,f,g)
  92. &pxor ("mm3",$E);
  93. &psllq ($E,23);
  94. &pxor ("mm3","mm1");
  95. &movq ($Asse2,$A); # modulo-scheduled save a
  96. &paddq ("mm7","mm5"); # X[i]+=Ch(e,f,g)
  97. &pxor ("mm3",$E);
  98. &psrlq ("mm1",23);
  99. &paddq ("mm7",$Hsse2); # X[i]+=h
  100. &pxor ("mm3","mm1");
  101. &psllq ($E,4);
  102. &paddq ("mm7",QWP(0,$K512)); # X[i]+=K512[i]
  103. &pxor ("mm3",$E); # T1=Sigma1_512(e)
  104. &movq ($E,$Dsse2); # e = load d, e in next round
  105. &paddq ("mm3","mm7"); # T1+=X[i]
  106. &movq ("mm5",$A); # %mm5 is sliding right
  107. &psrlq ("mm5",28);
  108. &paddq ($E,"mm3"); # d += T1
  109. &movq ("mm6",$A); # %mm6 is sliding left
  110. &movq ("mm7","mm5");
  111. &psllq ("mm6",25);
  112. &movq ("mm1",$Bsse2); # load b
  113. &psrlq ("mm5",6);
  114. &pxor ("mm7","mm6");
  115. &sub ("esp",8);
  116. &psllq ("mm6",5);
  117. &pxor ("mm7","mm5");
  118. &pxor ($A,"mm1"); # a^b, b^c in next round
  119. &psrlq ("mm5",5);
  120. &pxor ("mm7","mm6");
  121. &pand ($BxC,$A); # (b^c)&(a^b)
  122. &psllq ("mm6",6);
  123. &pxor ("mm7","mm5");
  124. &pxor ($BxC,"mm1"); # [h=]Maj(a,b,c)
  125. &pxor ("mm6","mm7"); # Sigma0_512(a)
  126. &movq ("mm7",&QWP(8*(9+16-1),"esp")) if ($phase!=0); # pre-fetch
  127. &movq ("mm5",$Fsse2) if ($phase==0); # load f
  128. if ($phase>1) {
  129. &paddq ($BxC,"mm6"); # h+=Sigma0(a)
  130. &add ($K512,8);
  131. #&paddq ($BxC,"mm3"); # h+=T1
  132. ($A,$BxC) = ($BxC,$A); # rotate registers
  133. } else {
  134. &paddq ("mm3",$BxC); # T1+=Maj(a,b,c)
  135. &movq ($BxC,$A);
  136. &add ($K512,8);
  137. &paddq ("mm3","mm6"); # T1+=Sigma0(a)
  138. &movq ("mm6",$Gsse2) if ($phase==0); # load g
  139. #&movq ($A,"mm3"); # h=T1
  140. }
  141. }
  142. sub BODY_00_15_x86 {
  143. #define Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
  144. # LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
  145. # HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
  146. &mov ("ecx",$Elo);
  147. &mov ("edx",$Ehi);
  148. &mov ("esi","ecx");
  149. &shr ("ecx",9); # lo>>9
  150. &mov ("edi","edx");
  151. &shr ("edx",9); # hi>>9
  152. &mov ("ebx","ecx");
  153. &shl ("esi",14); # lo<<14
  154. &mov ("eax","edx");
  155. &shl ("edi",14); # hi<<14
  156. &xor ("ebx","esi");
  157. &shr ("ecx",14-9); # lo>>14
  158. &xor ("eax","edi");
  159. &shr ("edx",14-9); # hi>>14
  160. &xor ("eax","ecx");
  161. &shl ("esi",18-14); # lo<<18
  162. &xor ("ebx","edx");
  163. &shl ("edi",18-14); # hi<<18
  164. &xor ("ebx","esi");
  165. &shr ("ecx",18-14); # lo>>18
  166. &xor ("eax","edi");
  167. &shr ("edx",18-14); # hi>>18
  168. &xor ("eax","ecx");
  169. &shl ("esi",23-18); # lo<<23
  170. &xor ("ebx","edx");
  171. &shl ("edi",23-18); # hi<<23
  172. &xor ("eax","esi");
  173. &xor ("ebx","edi"); # T1 = Sigma1(e)
  174. &mov ("ecx",$Flo);
  175. &mov ("edx",$Fhi);
  176. &mov ("esi",$Glo);
  177. &mov ("edi",$Ghi);
  178. &add ("eax",$Hlo);
  179. &adc ("ebx",$Hhi); # T1 += h
  180. &xor ("ecx","esi");
  181. &xor ("edx","edi");
  182. &and ("ecx",$Elo);
  183. &and ("edx",$Ehi);
  184. &add ("eax",&DWP(8*(9+15)+0,"esp"));
  185. &adc ("ebx",&DWP(8*(9+15)+4,"esp")); # T1 += X[0]
  186. &xor ("ecx","esi");
  187. &xor ("edx","edi"); # Ch(e,f,g) = (f^g)&e)^g
  188. &mov ("esi",&DWP(0,$K512));
  189. &mov ("edi",&DWP(4,$K512)); # K[i]
  190. &add ("eax","ecx");
  191. &adc ("ebx","edx"); # T1 += Ch(e,f,g)
  192. &mov ("ecx",$Dlo);
  193. &mov ("edx",$Dhi);
  194. &add ("eax","esi");
  195. &adc ("ebx","edi"); # T1 += K[i]
  196. &mov ($Tlo,"eax");
  197. &mov ($Thi,"ebx"); # put T1 away
  198. &add ("eax","ecx");
  199. &adc ("ebx","edx"); # d += T1
  200. #define Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
  201. # LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
  202. # HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
  203. &mov ("ecx",$Alo);
  204. &mov ("edx",$Ahi);
  205. &mov ($Dlo,"eax");
  206. &mov ($Dhi,"ebx");
  207. &mov ("esi","ecx");
  208. &shr ("ecx",2); # lo>>2
  209. &mov ("edi","edx");
  210. &shr ("edx",2); # hi>>2
  211. &mov ("ebx","ecx");
  212. &shl ("esi",4); # lo<<4
  213. &mov ("eax","edx");
  214. &shl ("edi",4); # hi<<4
  215. &xor ("ebx","esi");
  216. &shr ("ecx",7-2); # lo>>7
  217. &xor ("eax","edi");
  218. &shr ("edx",7-2); # hi>>7
  219. &xor ("ebx","ecx");
  220. &shl ("esi",25-4); # lo<<25
  221. &xor ("eax","edx");
  222. &shl ("edi",25-4); # hi<<25
  223. &xor ("eax","esi");
  224. &shr ("ecx",28-7); # lo>>28
  225. &xor ("ebx","edi");
  226. &shr ("edx",28-7); # hi>>28
  227. &xor ("eax","ecx");
  228. &shl ("esi",30-25); # lo<<30
  229. &xor ("ebx","edx");
  230. &shl ("edi",30-25); # hi<<30
  231. &xor ("eax","esi");
  232. &xor ("ebx","edi"); # Sigma0(a)
  233. &mov ("ecx",$Alo);
  234. &mov ("edx",$Ahi);
  235. &mov ("esi",$Blo);
  236. &mov ("edi",$Bhi);
  237. &add ("eax",$Tlo);
  238. &adc ("ebx",$Thi); # T1 = Sigma0(a)+T1
  239. &or ("ecx","esi");
  240. &or ("edx","edi");
  241. &and ("ecx",$Clo);
  242. &and ("edx",$Chi);
  243. &and ("esi",$Alo);
  244. &and ("edi",$Ahi);
  245. &or ("ecx","esi");
  246. &or ("edx","edi"); # Maj(a,b,c) = ((a|b)&c)|(a&b)
  247. &add ("eax","ecx");
  248. &adc ("ebx","edx"); # T1 += Maj(a,b,c)
  249. &mov ($Tlo,"eax");
  250. &mov ($Thi,"ebx");
  251. &mov (&LB("edx"),&BP(0,$K512)); # pre-fetch LSB of *K
  252. &sub ("esp",8);
  253. &lea ($K512,&DWP(8,$K512)); # K++
  254. }
  255. &function_begin("sha512_block_data_order");
  256. &mov ("esi",wparam(0)); # ctx
  257. &mov ("edi",wparam(1)); # inp
  258. &mov ("eax",wparam(2)); # num
  259. &mov ("ebx","esp"); # saved sp
  260. &call (&label("pic_point")); # make it PIC!
  261. &set_label("pic_point");
  262. &blindpop($K512);
  263. &lea ($K512,&DWP(&label("K512")."-".&label("pic_point"),$K512));
  264. &sub ("esp",16);
  265. &and ("esp",-64);
  266. &shl ("eax",7);
  267. &add ("eax","edi");
  268. &mov (&DWP(0,"esp"),"esi"); # ctx
  269. &mov (&DWP(4,"esp"),"edi"); # inp
  270. &mov (&DWP(8,"esp"),"eax"); # inp+num*128
  271. &mov (&DWP(12,"esp"),"ebx"); # saved sp
  272. if ($sse2) {
  273. &picmeup("edx","OPENSSL_ia32cap_P",$K512,&label("K512"));
  274. &mov ("ecx",&DWP(0,"edx"));
  275. &test ("ecx",1<<26);
  276. &jz (&label("loop_x86"));
  277. &mov ("edx",&DWP(4,"edx"));
  278. # load ctx->h[0-7]
  279. &movq ($A,&QWP(0,"esi"));
  280. &and ("ecx",1<<24); # XMM registers availability
  281. &movq ("mm1",&QWP(8,"esi"));
  282. &and ("edx",1<<9); # SSSE3 bit
  283. &movq ($BxC,&QWP(16,"esi"));
  284. &or ("ecx","edx");
  285. &movq ("mm3",&QWP(24,"esi"));
  286. &movq ($E,&QWP(32,"esi"));
  287. &movq ("mm5",&QWP(40,"esi"));
  288. &movq ("mm6",&QWP(48,"esi"));
  289. &movq ("mm7",&QWP(56,"esi"));
  290. &cmp ("ecx",1<<24|1<<9);
  291. &je (&label("SSSE3"));
  292. &sub ("esp",8*10);
  293. &jmp (&label("loop_sse2"));
  294. &set_label("loop_sse2",16);
  295. #&movq ($Asse2,$A);
  296. &movq ($Bsse2,"mm1");
  297. &movq ($Csse2,$BxC);
  298. &movq ($Dsse2,"mm3");
  299. #&movq ($Esse2,$E);
  300. &movq ($Fsse2,"mm5");
  301. &movq ($Gsse2,"mm6");
  302. &pxor ($BxC,"mm1"); # magic
  303. &movq ($Hsse2,"mm7");
  304. &movq ("mm3",$A); # magic
  305. &mov ("eax",&DWP(0,"edi"));
  306. &mov ("ebx",&DWP(4,"edi"));
  307. &add ("edi",8);
  308. &mov ("edx",15); # counter
  309. &bswap ("eax");
  310. &bswap ("ebx");
  311. &jmp (&label("00_14_sse2"));
  312. &set_label("00_14_sse2",16);
  313. &movd ("mm1","eax");
  314. &mov ("eax",&DWP(0,"edi"));
  315. &movd ("mm7","ebx");
  316. &mov ("ebx",&DWP(4,"edi"));
  317. &add ("edi",8);
  318. &bswap ("eax");
  319. &bswap ("ebx");
  320. &punpckldq("mm7","mm1");
  321. &BODY_00_15_sse2();
  322. &dec ("edx");
  323. &jnz (&label("00_14_sse2"));
  324. &movd ("mm1","eax");
  325. &movd ("mm7","ebx");
  326. &punpckldq("mm7","mm1");
  327. &BODY_00_15_sse2(1);
  328. &pxor ($A,$A); # A is in %mm3
  329. &mov ("edx",32); # counter
  330. &jmp (&label("16_79_sse2"));
  331. &set_label("16_79_sse2",16);
  332. for ($j=0;$j<2;$j++) { # 2x unroll
  333. #&movq ("mm7",&QWP(8*(9+16-1),"esp")); # prefetched in BODY_00_15
  334. &movq ("mm5",&QWP(8*(9+16-14),"esp"));
  335. &movq ("mm1","mm7");
  336. &psrlq ("mm7",1);
  337. &movq ("mm6","mm5");
  338. &psrlq ("mm5",6);
  339. &psllq ("mm1",56);
  340. &paddq ($A,"mm3"); # from BODY_00_15
  341. &movq ("mm3","mm7");
  342. &psrlq ("mm7",7-1);
  343. &pxor ("mm3","mm1");
  344. &psllq ("mm1",63-56);
  345. &pxor ("mm3","mm7");
  346. &psrlq ("mm7",8-7);
  347. &pxor ("mm3","mm1");
  348. &movq ("mm1","mm5");
  349. &psrlq ("mm5",19-6);
  350. &pxor ("mm7","mm3"); # sigma0
  351. &psllq ("mm6",3);
  352. &pxor ("mm1","mm5");
  353. &paddq ("mm7",&QWP(8*(9+16),"esp"));
  354. &pxor ("mm1","mm6");
  355. &psrlq ("mm5",61-19);
  356. &paddq ("mm7",&QWP(8*(9+16-9),"esp"));
  357. &pxor ("mm1","mm5");
  358. &psllq ("mm6",45-3);
  359. &movq ("mm5",$Fsse2); # load f
  360. &pxor ("mm1","mm6"); # sigma1
  361. &movq ("mm6",$Gsse2); # load g
  362. &paddq ("mm7","mm1"); # X[i]
  363. #&movq (&QWP(8*9,"esp"),"mm7"); # moved to BODY_00_15
  364. &BODY_00_15_sse2(2);
  365. }
  366. &dec ("edx");
  367. &jnz (&label("16_79_sse2"));
  368. #&movq ($A,$Asse2);
  369. &paddq ($A,"mm3"); # from BODY_00_15
  370. &movq ("mm1",$Bsse2);
  371. #&movq ($BxC,$Csse2);
  372. &movq ("mm3",$Dsse2);
  373. #&movq ($E,$Esse2);
  374. &movq ("mm5",$Fsse2);
  375. &movq ("mm6",$Gsse2);
  376. &movq ("mm7",$Hsse2);
  377. &pxor ($BxC,"mm1"); # de-magic
  378. &paddq ($A,&QWP(0,"esi"));
  379. &paddq ("mm1",&QWP(8,"esi"));
  380. &paddq ($BxC,&QWP(16,"esi"));
  381. &paddq ("mm3",&QWP(24,"esi"));
  382. &paddq ($E,&QWP(32,"esi"));
  383. &paddq ("mm5",&QWP(40,"esi"));
  384. &paddq ("mm6",&QWP(48,"esi"));
  385. &paddq ("mm7",&QWP(56,"esi"));
  386. &mov ("eax",8*80);
  387. &movq (&QWP(0,"esi"),$A);
  388. &movq (&QWP(8,"esi"),"mm1");
  389. &movq (&QWP(16,"esi"),$BxC);
  390. &movq (&QWP(24,"esi"),"mm3");
  391. &movq (&QWP(32,"esi"),$E);
  392. &movq (&QWP(40,"esi"),"mm5");
  393. &movq (&QWP(48,"esi"),"mm6");
  394. &movq (&QWP(56,"esi"),"mm7");
  395. &lea ("esp",&DWP(0,"esp","eax")); # destroy frame
  396. &sub ($K512,"eax"); # rewind K
  397. &cmp ("edi",&DWP(8*10+8,"esp")); # are we done yet?
  398. &jb (&label("loop_sse2"));
  399. &mov ("esp",&DWP(8*10+12,"esp")); # restore sp
  400. &emms ();
  401. &function_end_A();
  402. &set_label("SSSE3",32);
  403. { my ($cnt,$frame)=("ecx","edx");
  404. my @X=map("xmm$_",(0..7));
  405. my $j;
  406. my $i=0;
  407. &lea ($frame,&DWP(-64,"esp"));
  408. &sub ("esp",256);
  409. # fixed stack frame layout
  410. #
  411. # +0 A B C D E F G H # backing store
  412. # +64 X[0]+K[i] .. X[15]+K[i] # XMM->MM xfer area
  413. # +192 # XMM off-load ring buffer
  414. # +256 # saved parameters
  415. &movdqa (@X[1],&QWP(80*8,$K512)); # byte swap mask
  416. &movdqu (@X[0],&QWP(0,"edi"));
  417. &pshufb (@X[0],@X[1]);
  418. for ($j=0;$j<8;$j++) {
  419. &movdqa (&QWP(16*(($j-1)%4),$frame),@X[3]) if ($j>4); # off-load
  420. &movdqa (@X[3],&QWP(16*($j%8),$K512));
  421. &movdqa (@X[2],@X[1]) if ($j<7); # perpetuate byte swap mask
  422. &movdqu (@X[1],&QWP(16*($j+1),"edi")) if ($j<7); # next input
  423. &movdqa (@X[1],&QWP(16*(($j+1)%4),$frame)) if ($j==7);# restore @X[0]
  424. &paddq (@X[3],@X[0]);
  425. &pshufb (@X[1],@X[2]) if ($j<7);
  426. &movdqa (&QWP(16*($j%8)-128,$frame),@X[3]); # xfer X[i]+K[i]
  427. push(@X,shift(@X)); # rotate(@X)
  428. }
  429. #&jmp (&label("loop_ssse3"));
  430. &nop ();
  431. &set_label("loop_ssse3",32);
  432. &movdqa (@X[2],&QWP(16*(($j+1)%4),$frame)); # pre-restore @X[1]
  433. &movdqa (&QWP(16*(($j-1)%4),$frame),@X[3]); # off-load @X[3]
  434. &lea ($K512,&DWP(16*8,$K512));
  435. #&movq ($Asse2,$A); # off-load A-H
  436. &movq ($Bsse2,"mm1");
  437. &mov ("ebx","edi");
  438. &movq ($Csse2,$BxC);
  439. &lea ("edi",&DWP(128,"edi")); # advance input
  440. &movq ($Dsse2,"mm3");
  441. &cmp ("edi","eax");
  442. #&movq ($Esse2,$E);
  443. &movq ($Fsse2,"mm5");
  444. &cmovb ("ebx","edi");
  445. &movq ($Gsse2,"mm6");
  446. &mov ("ecx",4); # loop counter
  447. &pxor ($BxC,"mm1"); # magic
  448. &movq ($Hsse2,"mm7");
  449. &pxor ("mm3","mm3"); # magic
  450. &jmp (&label("00_47_ssse3"));
  451. sub BODY_00_15_ssse3 { # "phase-less" copy of BODY_00_15_sse2
  452. (
  453. '&movq ("mm1",$E)', # %mm1 is sliding right
  454. '&movq ("mm7",&QWP(((-8*$i)%128)-128,$frame))',# X[i]+K[i]
  455. '&pxor ("mm5","mm6")', # f^=g
  456. '&psrlq ("mm1",14)',
  457. '&movq (&QWP(8*($i+4)%64,"esp"),$E)', # modulo-scheduled save e
  458. '&pand ("mm5",$E)', # f&=e
  459. '&psllq ($E,23)', # $E is sliding left
  460. '&paddq ($A,"mm3")', # [h+=Maj(a,b,c)]
  461. '&movq ("mm3","mm1")', # %mm3 is T1
  462. '&psrlq("mm1",4)',
  463. '&pxor ("mm5","mm6")', # Ch(e,f,g)
  464. '&pxor ("mm3",$E)',
  465. '&psllq($E,23)',
  466. '&pxor ("mm3","mm1")',
  467. '&movq (&QWP(8*$i%64,"esp"),$A)', # modulo-scheduled save a
  468. '&paddq("mm7","mm5")', # X[i]+=Ch(e,f,g)
  469. '&pxor ("mm3",$E)',
  470. '&psrlq("mm1",23)',
  471. '&paddq("mm7",&QWP(8*($i+7)%64,"esp"))', # X[i]+=h
  472. '&pxor ("mm3","mm1")',
  473. '&psllq($E,4)',
  474. '&pxor ("mm3",$E)', # T1=Sigma1_512(e)
  475. '&movq ($E,&QWP(8*($i+3)%64,"esp"))', # e = load d, e in next round
  476. '&paddq ("mm3","mm7")', # T1+=X[i]
  477. '&movq ("mm5",$A)', # %mm5 is sliding right
  478. '&psrlq("mm5",28)',
  479. '&paddq ($E,"mm3")', # d += T1
  480. '&movq ("mm6",$A)', # %mm6 is sliding left
  481. '&movq ("mm7","mm5")',
  482. '&psllq("mm6",25)',
  483. '&movq ("mm1",&QWP(8*($i+1)%64,"esp"))', # load b
  484. '&psrlq("mm5",6)',
  485. '&pxor ("mm7","mm6")',
  486. '&psllq("mm6",5)',
  487. '&pxor ("mm7","mm5")',
  488. '&pxor ($A,"mm1")', # a^b, b^c in next round
  489. '&psrlq("mm5",5)',
  490. '&pxor ("mm7","mm6")',
  491. '&pand ($BxC,$A)', # (b^c)&(a^b)
  492. '&psllq("mm6",6)',
  493. '&pxor ("mm7","mm5")',
  494. '&pxor ($BxC,"mm1")', # [h=]Maj(a,b,c)
  495. '&pxor ("mm6","mm7")', # Sigma0_512(a)
  496. '&movq ("mm5",&QWP(8*($i+5-1)%64,"esp"))', # pre-load f
  497. '&paddq ($BxC,"mm6")', # h+=Sigma0(a)
  498. '&movq ("mm6",&QWP(8*($i+6-1)%64,"esp"))', # pre-load g
  499. '($A,$BxC) = ($BxC,$A); $i--;'
  500. );
  501. }
  502. &set_label("00_47_ssse3",32);
  503. for(;$j<16;$j++) {
  504. my ($t0,$t2,$t1)=@X[2..4];
  505. my @insns = (&BODY_00_15_ssse3(),&BODY_00_15_ssse3());
  506. &movdqa ($t2,@X[5]);
  507. &movdqa (@X[1],$t0); # restore @X[1]
  508. &palignr ($t0,@X[0],8); # X[1..2]
  509. &movdqa (&QWP(16*($j%4),$frame),@X[4]); # off-load @X[4]
  510. &palignr ($t2,@X[4],8); # X[9..10]
  511. &movdqa ($t1,$t0);
  512. &psrlq ($t0,7);
  513. &paddq (@X[0],$t2); # X[0..1] += X[9..10]
  514. &movdqa ($t2,$t1);
  515. &psrlq ($t1,1);
  516. &psllq ($t2,64-8);
  517. &pxor ($t0,$t1);
  518. &psrlq ($t1,8-1);
  519. &pxor ($t0,$t2);
  520. &psllq ($t2,8-1);
  521. &pxor ($t0,$t1);
  522. &movdqa ($t1,@X[7]);
  523. &pxor ($t0,$t2); # sigma0(X[1..2])
  524. &movdqa ($t2,@X[7]);
  525. &psrlq ($t1,6);
  526. &paddq (@X[0],$t0); # X[0..1] += sigma0(X[1..2])
  527. &movdqa ($t0,@X[7]);
  528. &psrlq ($t2,19);
  529. &psllq ($t0,64-61);
  530. &pxor ($t1,$t2);
  531. &psrlq ($t2,61-19);
  532. &pxor ($t1,$t0);
  533. &psllq ($t0,61-19);
  534. &pxor ($t1,$t2);
  535. &movdqa ($t2,&QWP(16*(($j+2)%4),$frame));# pre-restore @X[1]
  536. &pxor ($t1,$t0); # sigma0(X[1..2])
  537. &movdqa ($t0,&QWP(16*($j%8),$K512));
  538. eval(shift(@insns));
  539. &paddq (@X[0],$t1); # X[0..1] += sigma0(X[14..15])
  540. eval(shift(@insns));
  541. eval(shift(@insns));
  542. eval(shift(@insns));
  543. eval(shift(@insns));
  544. &paddq ($t0,@X[0]);
  545. foreach(@insns) { eval; }
  546. &movdqa (&QWP(16*($j%8)-128,$frame),$t0);# xfer X[i]+K[i]
  547. push(@X,shift(@X)); # rotate(@X)
  548. }
  549. &lea ($K512,&DWP(16*8,$K512));
  550. &dec ("ecx");
  551. &jnz (&label("00_47_ssse3"));
  552. &movdqa (@X[1],&QWP(0,$K512)); # byte swap mask
  553. &lea ($K512,&DWP(-80*8,$K512)); # rewind
  554. &movdqu (@X[0],&QWP(0,"ebx"));
  555. &pshufb (@X[0],@X[1]);
  556. for ($j=0;$j<8;$j++) { # load next or same block
  557. my @insns = (&BODY_00_15_ssse3(),&BODY_00_15_ssse3());
  558. &movdqa (&QWP(16*(($j-1)%4),$frame),@X[3]) if ($j>4); # off-load
  559. &movdqa (@X[3],&QWP(16*($j%8),$K512));
  560. &movdqa (@X[2],@X[1]) if ($j<7); # perpetuate byte swap mask
  561. &movdqu (@X[1],&QWP(16*($j+1),"ebx")) if ($j<7); # next input
  562. &movdqa (@X[1],&QWP(16*(($j+1)%4),$frame)) if ($j==7);# restore @X[0]
  563. &paddq (@X[3],@X[0]);
  564. &pshufb (@X[1],@X[2]) if ($j<7);
  565. foreach(@insns) { eval; }
  566. &movdqa (&QWP(16*($j%8)-128,$frame),@X[3]);# xfer X[i]+K[i]
  567. push(@X,shift(@X)); # rotate(@X)
  568. }
  569. #&movq ($A,$Asse2); # load A-H
  570. &movq ("mm1",$Bsse2);
  571. &paddq ($A,"mm3"); # from BODY_00_15
  572. #&movq ($BxC,$Csse2);
  573. &movq ("mm3",$Dsse2);
  574. #&movq ($E,$Esse2);
  575. #&movq ("mm5",$Fsse2);
  576. #&movq ("mm6",$Gsse2);
  577. &movq ("mm7",$Hsse2);
  578. &pxor ($BxC,"mm1"); # de-magic
  579. &paddq ($A,&QWP(0,"esi"));
  580. &paddq ("mm1",&QWP(8,"esi"));
  581. &paddq ($BxC,&QWP(16,"esi"));
  582. &paddq ("mm3",&QWP(24,"esi"));
  583. &paddq ($E,&QWP(32,"esi"));
  584. &paddq ("mm5",&QWP(40,"esi"));
  585. &paddq ("mm6",&QWP(48,"esi"));
  586. &paddq ("mm7",&QWP(56,"esi"));
  587. &movq (&QWP(0,"esi"),$A);
  588. &movq (&QWP(8,"esi"),"mm1");
  589. &movq (&QWP(16,"esi"),$BxC);
  590. &movq (&QWP(24,"esi"),"mm3");
  591. &movq (&QWP(32,"esi"),$E);
  592. &movq (&QWP(40,"esi"),"mm5");
  593. &movq (&QWP(48,"esi"),"mm6");
  594. &movq (&QWP(56,"esi"),"mm7");
  595. &cmp ("edi","eax") # are we done yet?
  596. &jb (&label("loop_ssse3"));
  597. &mov ("esp",&DWP(64+12,$frame)); # restore sp
  598. &emms ();
  599. }
  600. &function_end_A();
  601. }
  602. &set_label("loop_x86",16);
  603. # copy input block to stack reversing byte and qword order
  604. for ($i=0;$i<8;$i++) {
  605. &mov ("eax",&DWP($i*16+0,"edi"));
  606. &mov ("ebx",&DWP($i*16+4,"edi"));
  607. &mov ("ecx",&DWP($i*16+8,"edi"));
  608. &mov ("edx",&DWP($i*16+12,"edi"));
  609. &bswap ("eax");
  610. &bswap ("ebx");
  611. &bswap ("ecx");
  612. &bswap ("edx");
  613. &push ("eax");
  614. &push ("ebx");
  615. &push ("ecx");
  616. &push ("edx");
  617. }
  618. &add ("edi",128);
  619. &sub ("esp",9*8); # place for T,A,B,C,D,E,F,G,H
  620. &mov (&DWP(8*(9+16)+4,"esp"),"edi");
  621. # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
  622. &lea ("edi",&DWP(8,"esp"));
  623. &mov ("ecx",16);
  624. &data_word(0xA5F3F689); # rep movsd
  625. &set_label("00_15_x86",16);
  626. &BODY_00_15_x86();
  627. &cmp (&LB("edx"),0x94);
  628. &jne (&label("00_15_x86"));
  629. &set_label("16_79_x86",16);
  630. #define sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
  631. # LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
  632. # HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7
  633. &mov ("ecx",&DWP(8*(9+15+16-1)+0,"esp"));
  634. &mov ("edx",&DWP(8*(9+15+16-1)+4,"esp"));
  635. &mov ("esi","ecx");
  636. &shr ("ecx",1); # lo>>1
  637. &mov ("edi","edx");
  638. &shr ("edx",1); # hi>>1
  639. &mov ("eax","ecx");
  640. &shl ("esi",24); # lo<<24
  641. &mov ("ebx","edx");
  642. &shl ("edi",24); # hi<<24
  643. &xor ("ebx","esi");
  644. &shr ("ecx",7-1); # lo>>7
  645. &xor ("eax","edi");
  646. &shr ("edx",7-1); # hi>>7
  647. &xor ("eax","ecx");
  648. &shl ("esi",31-24); # lo<<31
  649. &xor ("ebx","edx");
  650. &shl ("edi",25-24); # hi<<25
  651. &xor ("ebx","esi");
  652. &shr ("ecx",8-7); # lo>>8
  653. &xor ("eax","edi");
  654. &shr ("edx",8-7); # hi>>8
  655. &xor ("eax","ecx");
  656. &shl ("edi",31-25); # hi<<31
  657. &xor ("ebx","edx");
  658. &xor ("eax","edi"); # T1 = sigma0(X[-15])
  659. &mov (&DWP(0,"esp"),"eax");
  660. &mov (&DWP(4,"esp"),"ebx"); # put T1 away
  661. #define sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
  662. # LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
  663. # HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
  664. &mov ("ecx",&DWP(8*(9+15+16-14)+0,"esp"));
  665. &mov ("edx",&DWP(8*(9+15+16-14)+4,"esp"));
  666. &mov ("esi","ecx");
  667. &shr ("ecx",6); # lo>>6
  668. &mov ("edi","edx");
  669. &shr ("edx",6); # hi>>6
  670. &mov ("eax","ecx");
  671. &shl ("esi",3); # lo<<3
  672. &mov ("ebx","edx");
  673. &shl ("edi",3); # hi<<3
  674. &xor ("eax","esi");
  675. &shr ("ecx",19-6); # lo>>19
  676. &xor ("ebx","edi");
  677. &shr ("edx",19-6); # hi>>19
  678. &xor ("eax","ecx");
  679. &shl ("esi",13-3); # lo<<13
  680. &xor ("ebx","edx");
  681. &shl ("edi",13-3); # hi<<13
  682. &xor ("ebx","esi");
  683. &shr ("ecx",29-19); # lo>>29
  684. &xor ("eax","edi");
  685. &shr ("edx",29-19); # hi>>29
  686. &xor ("ebx","ecx");
  687. &shl ("edi",26-13); # hi<<26
  688. &xor ("eax","edx");
  689. &xor ("eax","edi"); # sigma1(X[-2])
  690. &mov ("ecx",&DWP(8*(9+15+16)+0,"esp"));
  691. &mov ("edx",&DWP(8*(9+15+16)+4,"esp"));
  692. &add ("eax",&DWP(0,"esp"));
  693. &adc ("ebx",&DWP(4,"esp")); # T1 = sigma1(X[-2])+T1
  694. &mov ("esi",&DWP(8*(9+15+16-9)+0,"esp"));
  695. &mov ("edi",&DWP(8*(9+15+16-9)+4,"esp"));
  696. &add ("eax","ecx");
  697. &adc ("ebx","edx"); # T1 += X[-16]
  698. &add ("eax","esi");
  699. &adc ("ebx","edi"); # T1 += X[-7]
  700. &mov (&DWP(8*(9+15)+0,"esp"),"eax");
  701. &mov (&DWP(8*(9+15)+4,"esp"),"ebx"); # save X[0]
  702. &BODY_00_15_x86();
  703. &cmp (&LB("edx"),0x17);
  704. &jne (&label("16_79_x86"));
  705. &mov ("esi",&DWP(8*(9+16+80)+0,"esp"));# ctx
  706. &mov ("edi",&DWP(8*(9+16+80)+4,"esp"));# inp
  707. for($i=0;$i<4;$i++) {
  708. &mov ("eax",&DWP($i*16+0,"esi"));
  709. &mov ("ebx",&DWP($i*16+4,"esi"));
  710. &mov ("ecx",&DWP($i*16+8,"esi"));
  711. &mov ("edx",&DWP($i*16+12,"esi"));
  712. &add ("eax",&DWP(8+($i*16)+0,"esp"));
  713. &adc ("ebx",&DWP(8+($i*16)+4,"esp"));
  714. &mov (&DWP($i*16+0,"esi"),"eax");
  715. &mov (&DWP($i*16+4,"esi"),"ebx");
  716. &add ("ecx",&DWP(8+($i*16)+8,"esp"));
  717. &adc ("edx",&DWP(8+($i*16)+12,"esp"));
  718. &mov (&DWP($i*16+8,"esi"),"ecx");
  719. &mov (&DWP($i*16+12,"esi"),"edx");
  720. }
  721. &add ("esp",8*(9+16+80)); # destroy frame
  722. &sub ($K512,8*80); # rewind K
  723. &cmp ("edi",&DWP(8,"esp")); # are we done yet?
  724. &jb (&label("loop_x86"));
  725. &mov ("esp",&DWP(12,"esp")); # restore sp
  726. &function_end_A();
  727. &set_label("K512",64); # Yes! I keep it in the code segment!
  728. &data_word(0xd728ae22,0x428a2f98); # u64
  729. &data_word(0x23ef65cd,0x71374491); # u64
  730. &data_word(0xec4d3b2f,0xb5c0fbcf); # u64
  731. &data_word(0x8189dbbc,0xe9b5dba5); # u64
  732. &data_word(0xf348b538,0x3956c25b); # u64
  733. &data_word(0xb605d019,0x59f111f1); # u64
  734. &data_word(0xaf194f9b,0x923f82a4); # u64
  735. &data_word(0xda6d8118,0xab1c5ed5); # u64
  736. &data_word(0xa3030242,0xd807aa98); # u64
  737. &data_word(0x45706fbe,0x12835b01); # u64
  738. &data_word(0x4ee4b28c,0x243185be); # u64
  739. &data_word(0xd5ffb4e2,0x550c7dc3); # u64
  740. &data_word(0xf27b896f,0x72be5d74); # u64
  741. &data_word(0x3b1696b1,0x80deb1fe); # u64
  742. &data_word(0x25c71235,0x9bdc06a7); # u64
  743. &data_word(0xcf692694,0xc19bf174); # u64
  744. &data_word(0x9ef14ad2,0xe49b69c1); # u64
  745. &data_word(0x384f25e3,0xefbe4786); # u64
  746. &data_word(0x8b8cd5b5,0x0fc19dc6); # u64
  747. &data_word(0x77ac9c65,0x240ca1cc); # u64
  748. &data_word(0x592b0275,0x2de92c6f); # u64
  749. &data_word(0x6ea6e483,0x4a7484aa); # u64
  750. &data_word(0xbd41fbd4,0x5cb0a9dc); # u64
  751. &data_word(0x831153b5,0x76f988da); # u64
  752. &data_word(0xee66dfab,0x983e5152); # u64
  753. &data_word(0x2db43210,0xa831c66d); # u64
  754. &data_word(0x98fb213f,0xb00327c8); # u64
  755. &data_word(0xbeef0ee4,0xbf597fc7); # u64
  756. &data_word(0x3da88fc2,0xc6e00bf3); # u64
  757. &data_word(0x930aa725,0xd5a79147); # u64
  758. &data_word(0xe003826f,0x06ca6351); # u64
  759. &data_word(0x0a0e6e70,0x14292967); # u64
  760. &data_word(0x46d22ffc,0x27b70a85); # u64
  761. &data_word(0x5c26c926,0x2e1b2138); # u64
  762. &data_word(0x5ac42aed,0x4d2c6dfc); # u64
  763. &data_word(0x9d95b3df,0x53380d13); # u64
  764. &data_word(0x8baf63de,0x650a7354); # u64
  765. &data_word(0x3c77b2a8,0x766a0abb); # u64
  766. &data_word(0x47edaee6,0x81c2c92e); # u64
  767. &data_word(0x1482353b,0x92722c85); # u64
  768. &data_word(0x4cf10364,0xa2bfe8a1); # u64
  769. &data_word(0xbc423001,0xa81a664b); # u64
  770. &data_word(0xd0f89791,0xc24b8b70); # u64
  771. &data_word(0x0654be30,0xc76c51a3); # u64
  772. &data_word(0xd6ef5218,0xd192e819); # u64
  773. &data_word(0x5565a910,0xd6990624); # u64
  774. &data_word(0x5771202a,0xf40e3585); # u64
  775. &data_word(0x32bbd1b8,0x106aa070); # u64
  776. &data_word(0xb8d2d0c8,0x19a4c116); # u64
  777. &data_word(0x5141ab53,0x1e376c08); # u64
  778. &data_word(0xdf8eeb99,0x2748774c); # u64
  779. &data_word(0xe19b48a8,0x34b0bcb5); # u64
  780. &data_word(0xc5c95a63,0x391c0cb3); # u64
  781. &data_word(0xe3418acb,0x4ed8aa4a); # u64
  782. &data_word(0x7763e373,0x5b9cca4f); # u64
  783. &data_word(0xd6b2b8a3,0x682e6ff3); # u64
  784. &data_word(0x5defb2fc,0x748f82ee); # u64
  785. &data_word(0x43172f60,0x78a5636f); # u64
  786. &data_word(0xa1f0ab72,0x84c87814); # u64
  787. &data_word(0x1a6439ec,0x8cc70208); # u64
  788. &data_word(0x23631e28,0x90befffa); # u64
  789. &data_word(0xde82bde9,0xa4506ceb); # u64
  790. &data_word(0xb2c67915,0xbef9a3f7); # u64
  791. &data_word(0xe372532b,0xc67178f2); # u64
  792. &data_word(0xea26619c,0xca273ece); # u64
  793. &data_word(0x21c0c207,0xd186b8c7); # u64
  794. &data_word(0xcde0eb1e,0xeada7dd6); # u64
  795. &data_word(0xee6ed178,0xf57d4f7f); # u64
  796. &data_word(0x72176fba,0x06f067aa); # u64
  797. &data_word(0xa2c898a6,0x0a637dc5); # u64
  798. &data_word(0xbef90dae,0x113f9804); # u64
  799. &data_word(0x131c471b,0x1b710b35); # u64
  800. &data_word(0x23047d84,0x28db77f5); # u64
  801. &data_word(0x40c72493,0x32caab7b); # u64
  802. &data_word(0x15c9bebc,0x3c9ebe0a); # u64
  803. &data_word(0x9c100d4c,0x431d67c4); # u64
  804. &data_word(0xcb3e42b6,0x4cc5d4be); # u64
  805. &data_word(0xfc657e2a,0x597f299c); # u64
  806. &data_word(0x3ad6faec,0x5fcb6fab); # u64
  807. &data_word(0x4a475817,0x6c44198c); # u64
  808. &data_word(0x04050607,0x00010203); # byte swap
  809. &data_word(0x0c0d0e0f,0x08090a0b); # mask
  810. &function_end_B("sha512_block_data_order");
  811. &asciz("SHA512 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
  812. &asm_finish();