You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

667 lines
17 KiB

  1. #!/usr/bin/env perl
  2. # ====================================================================
  3. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  4. # project. The module is, however, dual licensed under OpenSSL and
  5. # CRYPTOGAMS licenses depending on where you obtain it. For further
  6. # details see http://www.openssl.org/~appro/cryptogams/.
  7. #
  8. # Permission to use under GPL terms is granted.
  9. # ====================================================================
  10. # SHA512 block procedure for ARMv4. September 2007.
  11. # This code is ~4.5 (four and a half) times faster than code generated
  12. # by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue
  13. # Xscale PXA250 core].
  14. #
  15. # July 2010.
  16. #
  17. # Rescheduling for dual-issue pipeline resulted in 6% improvement on
  18. # Cortex A8 core and ~40 cycles per processed byte.
  19. # February 2011.
  20. #
  21. # Profiler-assisted and platform-specific optimization resulted in 7%
  22. # improvement on Coxtex A8 core and ~38 cycles per byte.
  23. # March 2011.
  24. #
  25. # Add NEON implementation. On Cortex A8 it was measured to process
  26. # one byte in 23.3 cycles or ~60% faster than integer-only code.
  27. # August 2012.
  28. #
  29. # Improve NEON performance by 12% on Snapdragon S4. In absolute
  30. # terms it's 22.6 cycles per byte, which is disappointing result.
  31. # Technical writers asserted that 3-way S4 pipeline can sustain
  32. # multiple NEON instructions per cycle, but dual NEON issue could
  33. # not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html
  34. # for further details. On side note Cortex-A15 processes one byte in
  35. # 16 cycles.
  36. # Byte order [in]dependence. =========================================
  37. #
  38. # Originally caller was expected to maintain specific *dword* order in
  39. # h[0-7], namely with most significant dword at *lower* address, which
  40. # was reflected in below two parameters as 0 and 4. Now caller is
  41. # expected to maintain native byte order for whole 64-bit values.
  42. $hi="HI";
  43. $lo="LO";
  44. # ====================================================================
  45. $flavour = shift;
  46. if ($flavour=~/^\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
  47. else { while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} }
  48. if ($flavour && $flavour ne "void") {
  49. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  50. ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
  51. ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
  52. die "can't locate arm-xlate.pl";
  53. open STDOUT,"| \"$^X\" $xlate $flavour $output";
  54. } else {
  55. open STDOUT,">$output";
  56. }
  57. $ctx="r0"; # parameter block
  58. $inp="r1";
  59. $len="r2";
  60. $Tlo="r3";
  61. $Thi="r4";
  62. $Alo="r5";
  63. $Ahi="r6";
  64. $Elo="r7";
  65. $Ehi="r8";
  66. $t0="r9";
  67. $t1="r10";
  68. $t2="r11";
  69. $t3="r12";
  70. ############ r13 is stack pointer
  71. $Ktbl="r14";
  72. ############ r15 is program counter
  73. $Aoff=8*0;
  74. $Boff=8*1;
  75. $Coff=8*2;
  76. $Doff=8*3;
  77. $Eoff=8*4;
  78. $Foff=8*5;
  79. $Goff=8*6;
  80. $Hoff=8*7;
  81. $Xoff=8*8;
  82. sub BODY_00_15() {
  83. my $magic = shift;
  84. $code.=<<___;
  85. @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
  86. @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
  87. @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
  88. mov $t0,$Elo,lsr#14
  89. str $Tlo,[sp,#$Xoff+0]
  90. mov $t1,$Ehi,lsr#14
  91. str $Thi,[sp,#$Xoff+4]
  92. eor $t0,$t0,$Ehi,lsl#18
  93. ldr $t2,[sp,#$Hoff+0] @ h.lo
  94. eor $t1,$t1,$Elo,lsl#18
  95. ldr $t3,[sp,#$Hoff+4] @ h.hi
  96. eor $t0,$t0,$Elo,lsr#18
  97. eor $t1,$t1,$Ehi,lsr#18
  98. eor $t0,$t0,$Ehi,lsl#14
  99. eor $t1,$t1,$Elo,lsl#14
  100. eor $t0,$t0,$Ehi,lsr#9
  101. eor $t1,$t1,$Elo,lsr#9
  102. eor $t0,$t0,$Elo,lsl#23
  103. eor $t1,$t1,$Ehi,lsl#23 @ Sigma1(e)
  104. adds $Tlo,$Tlo,$t0
  105. ldr $t0,[sp,#$Foff+0] @ f.lo
  106. adc $Thi,$Thi,$t1 @ T += Sigma1(e)
  107. ldr $t1,[sp,#$Foff+4] @ f.hi
  108. adds $Tlo,$Tlo,$t2
  109. ldr $t2,[sp,#$Goff+0] @ g.lo
  110. adc $Thi,$Thi,$t3 @ T += h
  111. ldr $t3,[sp,#$Goff+4] @ g.hi
  112. eor $t0,$t0,$t2
  113. str $Elo,[sp,#$Eoff+0]
  114. eor $t1,$t1,$t3
  115. str $Ehi,[sp,#$Eoff+4]
  116. and $t0,$t0,$Elo
  117. str $Alo,[sp,#$Aoff+0]
  118. and $t1,$t1,$Ehi
  119. str $Ahi,[sp,#$Aoff+4]
  120. eor $t0,$t0,$t2
  121. ldr $t2,[$Ktbl,#$lo] @ K[i].lo
  122. eor $t1,$t1,$t3 @ Ch(e,f,g)
  123. ldr $t3,[$Ktbl,#$hi] @ K[i].hi
  124. adds $Tlo,$Tlo,$t0
  125. ldr $Elo,[sp,#$Doff+0] @ d.lo
  126. adc $Thi,$Thi,$t1 @ T += Ch(e,f,g)
  127. ldr $Ehi,[sp,#$Doff+4] @ d.hi
  128. adds $Tlo,$Tlo,$t2
  129. and $t0,$t2,#0xff
  130. adc $Thi,$Thi,$t3 @ T += K[i]
  131. adds $Elo,$Elo,$Tlo
  132. ldr $t2,[sp,#$Boff+0] @ b.lo
  133. adc $Ehi,$Ehi,$Thi @ d += T
  134. teq $t0,#$magic
  135. ldr $t3,[sp,#$Coff+0] @ c.lo
  136. #if __ARM_ARCH__>=7
  137. it eq @ Thumb2 thing, sanity check in ARM
  138. #endif
  139. orreq $Ktbl,$Ktbl,#1
  140. @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
  141. @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
  142. @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
  143. mov $t0,$Alo,lsr#28
  144. mov $t1,$Ahi,lsr#28
  145. eor $t0,$t0,$Ahi,lsl#4
  146. eor $t1,$t1,$Alo,lsl#4
  147. eor $t0,$t0,$Ahi,lsr#2
  148. eor $t1,$t1,$Alo,lsr#2
  149. eor $t0,$t0,$Alo,lsl#30
  150. eor $t1,$t1,$Ahi,lsl#30
  151. eor $t0,$t0,$Ahi,lsr#7
  152. eor $t1,$t1,$Alo,lsr#7
  153. eor $t0,$t0,$Alo,lsl#25
  154. eor $t1,$t1,$Ahi,lsl#25 @ Sigma0(a)
  155. adds $Tlo,$Tlo,$t0
  156. and $t0,$Alo,$t2
  157. adc $Thi,$Thi,$t1 @ T += Sigma0(a)
  158. ldr $t1,[sp,#$Boff+4] @ b.hi
  159. orr $Alo,$Alo,$t2
  160. ldr $t2,[sp,#$Coff+4] @ c.hi
  161. and $Alo,$Alo,$t3
  162. and $t3,$Ahi,$t1
  163. orr $Ahi,$Ahi,$t1
  164. orr $Alo,$Alo,$t0 @ Maj(a,b,c).lo
  165. and $Ahi,$Ahi,$t2
  166. adds $Alo,$Alo,$Tlo
  167. orr $Ahi,$Ahi,$t3 @ Maj(a,b,c).hi
  168. sub sp,sp,#8
  169. adc $Ahi,$Ahi,$Thi @ h += T
  170. tst $Ktbl,#1
  171. add $Ktbl,$Ktbl,#8
  172. ___
  173. }
  174. $code=<<___;
  175. #ifndef __KERNEL__
  176. # include "arm_arch.h"
  177. # define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
  178. # define VFP_ABI_POP vldmia sp!,{d8-d15}
  179. #else
  180. # define __ARM_ARCH__ __LINUX_ARM_ARCH__
  181. # define __ARM_MAX_ARCH__ 7
  182. # define VFP_ABI_PUSH
  183. # define VFP_ABI_POP
  184. #endif
  185. #ifdef __ARMEL__
  186. # define LO 0
  187. # define HI 4
  188. # define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1
  189. #else
  190. # define HI 0
  191. # define LO 4
  192. # define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1
  193. #endif
  194. .text
  195. #if __ARM_ARCH__<7 || defined(__APPLE__)
  196. .code 32
  197. #else
  198. .syntax unified
  199. # ifdef __thumb2__
  200. # define adrl adr
  201. .thumb
  202. # else
  203. .code 32
  204. # endif
  205. #endif
  206. .type K512,%object
  207. .align 5
  208. K512:
  209. WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
  210. WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
  211. WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
  212. WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
  213. WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
  214. WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
  215. WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
  216. WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
  217. WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
  218. WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
  219. WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
  220. WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
  221. WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
  222. WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
  223. WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
  224. WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
  225. WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
  226. WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
  227. WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
  228. WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
  229. WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
  230. WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
  231. WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
  232. WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
  233. WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
  234. WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
  235. WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
  236. WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
  237. WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
  238. WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
  239. WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
  240. WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
  241. WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
  242. WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
  243. WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
  244. WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
  245. WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
  246. WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
  247. WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
  248. WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
  249. .size K512,.-K512
  250. #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
  251. .LOPENSSL_armcap:
  252. .word OPENSSL_armcap_P-.Lsha512_block_data_order
  253. .skip 32-4
  254. #else
  255. .skip 32
  256. #endif
  257. .global sha512_block_data_order
  258. .type sha512_block_data_order,%function
  259. sha512_block_data_order:
  260. .Lsha512_block_data_order:
  261. #if __ARM_ARCH__<7
  262. sub r3,pc,#8 @ sha512_block_data_order
  263. #else
  264. adr r3,sha512_block_data_order
  265. #endif
  266. #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
  267. ldr r12,.LOPENSSL_armcap
  268. ldr r12,[r3,r12] @ OPENSSL_armcap_P
  269. #ifdef __APPLE__
  270. ldr r12,[r12]
  271. #endif
  272. tst r12,#1
  273. bne .LNEON
  274. #endif
  275. add $len,$inp,$len,lsl#7 @ len to point at the end of inp
  276. stmdb sp!,{r4-r12,lr}
  277. sub $Ktbl,r3,#672 @ K512
  278. sub sp,sp,#9*8
  279. ldr $Elo,[$ctx,#$Eoff+$lo]
  280. ldr $Ehi,[$ctx,#$Eoff+$hi]
  281. ldr $t0, [$ctx,#$Goff+$lo]
  282. ldr $t1, [$ctx,#$Goff+$hi]
  283. ldr $t2, [$ctx,#$Hoff+$lo]
  284. ldr $t3, [$ctx,#$Hoff+$hi]
  285. .Loop:
  286. str $t0, [sp,#$Goff+0]
  287. str $t1, [sp,#$Goff+4]
  288. str $t2, [sp,#$Hoff+0]
  289. str $t3, [sp,#$Hoff+4]
  290. ldr $Alo,[$ctx,#$Aoff+$lo]
  291. ldr $Ahi,[$ctx,#$Aoff+$hi]
  292. ldr $Tlo,[$ctx,#$Boff+$lo]
  293. ldr $Thi,[$ctx,#$Boff+$hi]
  294. ldr $t0, [$ctx,#$Coff+$lo]
  295. ldr $t1, [$ctx,#$Coff+$hi]
  296. ldr $t2, [$ctx,#$Doff+$lo]
  297. ldr $t3, [$ctx,#$Doff+$hi]
  298. str $Tlo,[sp,#$Boff+0]
  299. str $Thi,[sp,#$Boff+4]
  300. str $t0, [sp,#$Coff+0]
  301. str $t1, [sp,#$Coff+4]
  302. str $t2, [sp,#$Doff+0]
  303. str $t3, [sp,#$Doff+4]
  304. ldr $Tlo,[$ctx,#$Foff+$lo]
  305. ldr $Thi,[$ctx,#$Foff+$hi]
  306. str $Tlo,[sp,#$Foff+0]
  307. str $Thi,[sp,#$Foff+4]
  308. .L00_15:
  309. #if __ARM_ARCH__<7
  310. ldrb $Tlo,[$inp,#7]
  311. ldrb $t0, [$inp,#6]
  312. ldrb $t1, [$inp,#5]
  313. ldrb $t2, [$inp,#4]
  314. ldrb $Thi,[$inp,#3]
  315. ldrb $t3, [$inp,#2]
  316. orr $Tlo,$Tlo,$t0,lsl#8
  317. ldrb $t0, [$inp,#1]
  318. orr $Tlo,$Tlo,$t1,lsl#16
  319. ldrb $t1, [$inp],#8
  320. orr $Tlo,$Tlo,$t2,lsl#24
  321. orr $Thi,$Thi,$t3,lsl#8
  322. orr $Thi,$Thi,$t0,lsl#16
  323. orr $Thi,$Thi,$t1,lsl#24
  324. #else
  325. ldr $Tlo,[$inp,#4]
  326. ldr $Thi,[$inp],#8
  327. #ifdef __ARMEL__
  328. rev $Tlo,$Tlo
  329. rev $Thi,$Thi
  330. #endif
  331. #endif
  332. ___
  333. &BODY_00_15(0x94);
  334. $code.=<<___;
  335. tst $Ktbl,#1
  336. beq .L00_15
  337. ldr $t0,[sp,#`$Xoff+8*(16-1)`+0]
  338. ldr $t1,[sp,#`$Xoff+8*(16-1)`+4]
  339. bic $Ktbl,$Ktbl,#1
  340. .L16_79:
  341. @ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
  342. @ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
  343. @ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7
  344. mov $Tlo,$t0,lsr#1
  345. ldr $t2,[sp,#`$Xoff+8*(16-14)`+0]
  346. mov $Thi,$t1,lsr#1
  347. ldr $t3,[sp,#`$Xoff+8*(16-14)`+4]
  348. eor $Tlo,$Tlo,$t1,lsl#31
  349. eor $Thi,$Thi,$t0,lsl#31
  350. eor $Tlo,$Tlo,$t0,lsr#8
  351. eor $Thi,$Thi,$t1,lsr#8
  352. eor $Tlo,$Tlo,$t1,lsl#24
  353. eor $Thi,$Thi,$t0,lsl#24
  354. eor $Tlo,$Tlo,$t0,lsr#7
  355. eor $Thi,$Thi,$t1,lsr#7
  356. eor $Tlo,$Tlo,$t1,lsl#25
  357. @ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
  358. @ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
  359. @ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
  360. mov $t0,$t2,lsr#19
  361. mov $t1,$t3,lsr#19
  362. eor $t0,$t0,$t3,lsl#13
  363. eor $t1,$t1,$t2,lsl#13
  364. eor $t0,$t0,$t3,lsr#29
  365. eor $t1,$t1,$t2,lsr#29
  366. eor $t0,$t0,$t2,lsl#3
  367. eor $t1,$t1,$t3,lsl#3
  368. eor $t0,$t0,$t2,lsr#6
  369. eor $t1,$t1,$t3,lsr#6
  370. ldr $t2,[sp,#`$Xoff+8*(16-9)`+0]
  371. eor $t0,$t0,$t3,lsl#26
  372. ldr $t3,[sp,#`$Xoff+8*(16-9)`+4]
  373. adds $Tlo,$Tlo,$t0
  374. ldr $t0,[sp,#`$Xoff+8*16`+0]
  375. adc $Thi,$Thi,$t1
  376. ldr $t1,[sp,#`$Xoff+8*16`+4]
  377. adds $Tlo,$Tlo,$t2
  378. adc $Thi,$Thi,$t3
  379. adds $Tlo,$Tlo,$t0
  380. adc $Thi,$Thi,$t1
  381. ___
  382. &BODY_00_15(0x17);
  383. $code.=<<___;
  384. #if __ARM_ARCH__>=7
  385. ittt eq @ Thumb2 thing, sanity check in ARM
  386. #endif
  387. ldreq $t0,[sp,#`$Xoff+8*(16-1)`+0]
  388. ldreq $t1,[sp,#`$Xoff+8*(16-1)`+4]
  389. beq .L16_79
  390. bic $Ktbl,$Ktbl,#1
  391. ldr $Tlo,[sp,#$Boff+0]
  392. ldr $Thi,[sp,#$Boff+4]
  393. ldr $t0, [$ctx,#$Aoff+$lo]
  394. ldr $t1, [$ctx,#$Aoff+$hi]
  395. ldr $t2, [$ctx,#$Boff+$lo]
  396. ldr $t3, [$ctx,#$Boff+$hi]
  397. adds $t0,$Alo,$t0
  398. str $t0, [$ctx,#$Aoff+$lo]
  399. adc $t1,$Ahi,$t1
  400. str $t1, [$ctx,#$Aoff+$hi]
  401. adds $t2,$Tlo,$t2
  402. str $t2, [$ctx,#$Boff+$lo]
  403. adc $t3,$Thi,$t3
  404. str $t3, [$ctx,#$Boff+$hi]
  405. ldr $Alo,[sp,#$Coff+0]
  406. ldr $Ahi,[sp,#$Coff+4]
  407. ldr $Tlo,[sp,#$Doff+0]
  408. ldr $Thi,[sp,#$Doff+4]
  409. ldr $t0, [$ctx,#$Coff+$lo]
  410. ldr $t1, [$ctx,#$Coff+$hi]
  411. ldr $t2, [$ctx,#$Doff+$lo]
  412. ldr $t3, [$ctx,#$Doff+$hi]
  413. adds $t0,$Alo,$t0
  414. str $t0, [$ctx,#$Coff+$lo]
  415. adc $t1,$Ahi,$t1
  416. str $t1, [$ctx,#$Coff+$hi]
  417. adds $t2,$Tlo,$t2
  418. str $t2, [$ctx,#$Doff+$lo]
  419. adc $t3,$Thi,$t3
  420. str $t3, [$ctx,#$Doff+$hi]
  421. ldr $Tlo,[sp,#$Foff+0]
  422. ldr $Thi,[sp,#$Foff+4]
  423. ldr $t0, [$ctx,#$Eoff+$lo]
  424. ldr $t1, [$ctx,#$Eoff+$hi]
  425. ldr $t2, [$ctx,#$Foff+$lo]
  426. ldr $t3, [$ctx,#$Foff+$hi]
  427. adds $Elo,$Elo,$t0
  428. str $Elo,[$ctx,#$Eoff+$lo]
  429. adc $Ehi,$Ehi,$t1
  430. str $Ehi,[$ctx,#$Eoff+$hi]
  431. adds $t2,$Tlo,$t2
  432. str $t2, [$ctx,#$Foff+$lo]
  433. adc $t3,$Thi,$t3
  434. str $t3, [$ctx,#$Foff+$hi]
  435. ldr $Alo,[sp,#$Goff+0]
  436. ldr $Ahi,[sp,#$Goff+4]
  437. ldr $Tlo,[sp,#$Hoff+0]
  438. ldr $Thi,[sp,#$Hoff+4]
  439. ldr $t0, [$ctx,#$Goff+$lo]
  440. ldr $t1, [$ctx,#$Goff+$hi]
  441. ldr $t2, [$ctx,#$Hoff+$lo]
  442. ldr $t3, [$ctx,#$Hoff+$hi]
  443. adds $t0,$Alo,$t0
  444. str $t0, [$ctx,#$Goff+$lo]
  445. adc $t1,$Ahi,$t1
  446. str $t1, [$ctx,#$Goff+$hi]
  447. adds $t2,$Tlo,$t2
  448. str $t2, [$ctx,#$Hoff+$lo]
  449. adc $t3,$Thi,$t3
  450. str $t3, [$ctx,#$Hoff+$hi]
  451. add sp,sp,#640
  452. sub $Ktbl,$Ktbl,#640
  453. teq $inp,$len
  454. bne .Loop
  455. add sp,sp,#8*9 @ destroy frame
  456. #if __ARM_ARCH__>=5
  457. ldmia sp!,{r4-r12,pc}
  458. #else
  459. ldmia sp!,{r4-r12,lr}
  460. tst lr,#1
  461. moveq pc,lr @ be binary compatible with V4, yet
  462. bx lr @ interoperable with Thumb ISA:-)
  463. #endif
  464. .size sha512_block_data_order,.-sha512_block_data_order
  465. ___
  466. {
  467. my @Sigma0=(28,34,39);
  468. my @Sigma1=(14,18,41);
  469. my @sigma0=(1, 8, 7);
  470. my @sigma1=(19,61,6);
  471. my $Ktbl="r3";
  472. my $cnt="r12"; # volatile register known as ip, intra-procedure-call scratch
  473. my @X=map("d$_",(0..15));
  474. my @V=($A,$B,$C,$D,$E,$F,$G,$H)=map("d$_",(16..23));
  475. sub NEON_00_15() {
  476. my $i=shift;
  477. my ($a,$b,$c,$d,$e,$f,$g,$h)=@_;
  478. my ($t0,$t1,$t2,$T1,$K,$Ch,$Maj)=map("d$_",(24..31)); # temps
  479. $code.=<<___ if ($i<16 || $i&1);
  480. vshr.u64 $t0,$e,#@Sigma1[0] @ $i
  481. #if $i<16
  482. vld1.64 {@X[$i%16]},[$inp]! @ handles unaligned
  483. #endif
  484. vshr.u64 $t1,$e,#@Sigma1[1]
  485. #if $i>0
  486. vadd.i64 $a,$Maj @ h+=Maj from the past
  487. #endif
  488. vshr.u64 $t2,$e,#@Sigma1[2]
  489. ___
  490. $code.=<<___;
  491. vld1.64 {$K},[$Ktbl,:64]! @ K[i++]
  492. vsli.64 $t0,$e,#`64-@Sigma1[0]`
  493. vsli.64 $t1,$e,#`64-@Sigma1[1]`
  494. vmov $Ch,$e
  495. vsli.64 $t2,$e,#`64-@Sigma1[2]`
  496. #if $i<16 && defined(__ARMEL__)
  497. vrev64.8 @X[$i],@X[$i]
  498. #endif
  499. veor $t1,$t0
  500. vbsl $Ch,$f,$g @ Ch(e,f,g)
  501. vshr.u64 $t0,$a,#@Sigma0[0]
  502. veor $t2,$t1 @ Sigma1(e)
  503. vadd.i64 $T1,$Ch,$h
  504. vshr.u64 $t1,$a,#@Sigma0[1]
  505. vsli.64 $t0,$a,#`64-@Sigma0[0]`
  506. vadd.i64 $T1,$t2
  507. vshr.u64 $t2,$a,#@Sigma0[2]
  508. vadd.i64 $K,@X[$i%16]
  509. vsli.64 $t1,$a,#`64-@Sigma0[1]`
  510. veor $Maj,$a,$b
  511. vsli.64 $t2,$a,#`64-@Sigma0[2]`
  512. veor $h,$t0,$t1
  513. vadd.i64 $T1,$K
  514. vbsl $Maj,$c,$b @ Maj(a,b,c)
  515. veor $h,$t2 @ Sigma0(a)
  516. vadd.i64 $d,$T1
  517. vadd.i64 $Maj,$T1
  518. @ vadd.i64 $h,$Maj
  519. ___
  520. }
  521. sub NEON_16_79() {
  522. my $i=shift;
  523. if ($i&1) { &NEON_00_15($i,@_); return; }
  524. # 2x-vectorized, therefore runs every 2nd round
  525. my @X=map("q$_",(0..7)); # view @X as 128-bit vector
  526. my ($t0,$t1,$s0,$s1) = map("q$_",(12..15)); # temps
  527. my ($d0,$d1,$d2) = map("d$_",(24..26)); # temps from NEON_00_15
  528. my $e=@_[4]; # $e from NEON_00_15
  529. $i /= 2;
  530. $code.=<<___;
  531. vshr.u64 $t0,@X[($i+7)%8],#@sigma1[0]
  532. vshr.u64 $t1,@X[($i+7)%8],#@sigma1[1]
  533. vadd.i64 @_[0],d30 @ h+=Maj from the past
  534. vshr.u64 $s1,@X[($i+7)%8],#@sigma1[2]
  535. vsli.64 $t0,@X[($i+7)%8],#`64-@sigma1[0]`
  536. vext.8 $s0,@X[$i%8],@X[($i+1)%8],#8 @ X[i+1]
  537. vsli.64 $t1,@X[($i+7)%8],#`64-@sigma1[1]`
  538. veor $s1,$t0
  539. vshr.u64 $t0,$s0,#@sigma0[0]
  540. veor $s1,$t1 @ sigma1(X[i+14])
  541. vshr.u64 $t1,$s0,#@sigma0[1]
  542. vadd.i64 @X[$i%8],$s1
  543. vshr.u64 $s1,$s0,#@sigma0[2]
  544. vsli.64 $t0,$s0,#`64-@sigma0[0]`
  545. vsli.64 $t1,$s0,#`64-@sigma0[1]`
  546. vext.8 $s0,@X[($i+4)%8],@X[($i+5)%8],#8 @ X[i+9]
  547. veor $s1,$t0
  548. vshr.u64 $d0,$e,#@Sigma1[0] @ from NEON_00_15
  549. vadd.i64 @X[$i%8],$s0
  550. vshr.u64 $d1,$e,#@Sigma1[1] @ from NEON_00_15
  551. veor $s1,$t1 @ sigma0(X[i+1])
  552. vshr.u64 $d2,$e,#@Sigma1[2] @ from NEON_00_15
  553. vadd.i64 @X[$i%8],$s1
  554. ___
  555. &NEON_00_15(2*$i,@_);
  556. }
  557. $code.=<<___;
  558. #if __ARM_MAX_ARCH__>=7
  559. .arch armv7-a
  560. .fpu neon
  561. .global sha512_block_data_order_neon
  562. .type sha512_block_data_order_neon,%function
  563. .align 4
  564. sha512_block_data_order_neon:
  565. .LNEON:
  566. dmb @ errata #451034 on early Cortex A8
  567. add $len,$inp,$len,lsl#7 @ len to point at the end of inp
  568. adr $Ktbl,K512
  569. VFP_ABI_PUSH
  570. vldmia $ctx,{$A-$H} @ load context
  571. .Loop_neon:
  572. ___
  573. for($i=0;$i<16;$i++) { &NEON_00_15($i,@V); unshift(@V,pop(@V)); }
  574. $code.=<<___;
  575. mov $cnt,#4
  576. .L16_79_neon:
  577. subs $cnt,#1
  578. ___
  579. for(;$i<32;$i++) { &NEON_16_79($i,@V); unshift(@V,pop(@V)); }
  580. $code.=<<___;
  581. bne .L16_79_neon
  582. vadd.i64 $A,d30 @ h+=Maj from the past
  583. vldmia $ctx,{d24-d31} @ load context to temp
  584. vadd.i64 q8,q12 @ vectorized accumulate
  585. vadd.i64 q9,q13
  586. vadd.i64 q10,q14
  587. vadd.i64 q11,q15
  588. vstmia $ctx,{$A-$H} @ save context
  589. teq $inp,$len
  590. sub $Ktbl,#640 @ rewind K512
  591. bne .Loop_neon
  592. VFP_ABI_POP
  593. ret @ bx lr
  594. .size sha512_block_data_order_neon,.-sha512_block_data_order_neon
  595. #endif
  596. ___
  597. }
  598. $code.=<<___;
  599. .asciz "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
  600. .align 2
  601. #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
  602. .comm OPENSSL_armcap_P,4,4
  603. .hidden OPENSSL_armcap_P
  604. #endif
  605. ___
  606. $code =~ s/\`([^\`]*)\`/eval $1/gem;
  607. $code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
  608. $code =~ s/\bret\b/bx lr/gm;
  609. open SELF,$0;
  610. while(<SELF>) {
  611. next if (/^#!/);
  612. last if (!s/^#/@/ and !/^$/);
  613. print;
  614. }
  615. close SELF;
  616. print $code;
  617. close STDOUT; # enforce flush