Nelze vybrat více než 25 témat Téma musí začínat písmenem nebo číslem, může obsahovat pomlčky („-“) a může být dlouhé až 35 znaků.
 
 
 
 
 
 

926 řádky
20 KiB

  1. #!/usr/bin/env perl
  2. #
  3. # ====================================================================
  4. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  5. # project. The module is, however, dual licensed under OpenSSL and
  6. # CRYPTOGAMS licenses depending on where you obtain it. For further
  7. # details see http://www.openssl.org/~appro/cryptogams/.
  8. # ====================================================================
  9. #
  10. # This module implements Poly1305 hash for ARMv8.
  11. #
  12. # June 2015
  13. #
  14. # Numbers are cycles per processed byte with poly1305_blocks alone.
  15. #
  16. # IALU/gcc-4.9 NEON
  17. #
  18. # Apple A7 1.86/+5% 0.72
  19. # Cortex-A53 2.63/+58% 1.47
  20. # Cortex-A57 2.70/+7% 1.14
  21. # Denver 1.39/+50% 1.18(*)
  22. # X-Gene 2.00/+68% 2.19
  23. #
  24. # (*) estimate based on resources availability is less than 1.0,
  25. # i.e. measured result is worse than expected, presumably binary
  26. # translator is not almighty;
  27. $flavour=shift;
  28. $output=shift;
  29. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  30. ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
  31. ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
  32. die "can't locate arm-xlate.pl";
  33. open OUT,"| \"$^X\" $xlate $flavour $output";
  34. *STDOUT=*OUT;
  35. my ($ctx,$inp,$len,$padbit) = map("x$_",(0..3));
  36. my ($mac,$nonce)=($inp,$len);
  37. my ($h0,$h1,$h2,$r0,$r1,$s1,$t0,$t1,$d0,$d1,$d2) = map("x$_",(4..14));
  38. $code.=<<___;
  39. #include <openssl/arm_arch.h>
  40. .text
  41. // forward "declarations" are required for Apple
  42. .extern OPENSSL_armcap_P
  43. .globl poly1305_blocks
  44. .globl poly1305_emit
  45. .globl poly1305_init
  46. .type poly1305_init,%function
  47. .align 5
  48. poly1305_init:
  49. cmp $inp,xzr
  50. stp xzr,xzr,[$ctx] // zero hash value
  51. stp xzr,xzr,[$ctx,#16] // [along with is_base2_26]
  52. csel x0,xzr,x0,eq
  53. b.eq .Lno_key
  54. #ifdef __ILP32__
  55. ldrsw $t1,.LOPENSSL_armcap_P
  56. #else
  57. ldr $t1,.LOPENSSL_armcap_P
  58. #endif
  59. adr $t0,.LOPENSSL_armcap_P
  60. ldp $r0,$r1,[$inp] // load key
  61. mov $s1,#0xfffffffc0fffffff
  62. movk $s1,#0x0fff,lsl#48
  63. ldr w17,[$t0,$t1]
  64. #ifdef __ARMEB__
  65. rev $r0,$r0 // flip bytes
  66. rev $r1,$r1
  67. #endif
  68. and $r0,$r0,$s1 // &=0ffffffc0fffffff
  69. and $s1,$s1,#-4
  70. and $r1,$r1,$s1 // &=0ffffffc0ffffffc
  71. stp $r0,$r1,[$ctx,#32] // save key value
  72. tst w17,#ARMV7_NEON
  73. adr $d0,poly1305_blocks
  74. adr $r0,poly1305_blocks_neon
  75. adr $d1,poly1305_emit
  76. adr $r1,poly1305_emit_neon
  77. csel $d0,$d0,$r0,eq
  78. csel $d1,$d1,$r1,eq
  79. stp $d0,$d1,[$len]
  80. mov x0,#1
  81. .Lno_key:
  82. ret
  83. .size poly1305_init,.-poly1305_init
  84. .type poly1305_blocks,%function
  85. .align 5
  86. poly1305_blocks:
  87. ands $len,$len,#-16
  88. b.eq .Lno_data
  89. ldp $h0,$h1,[$ctx] // load hash value
  90. ldp $r0,$r1,[$ctx,#32] // load key value
  91. ldr $h2,[$ctx,#16]
  92. add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
  93. b .Loop
  94. .align 5
  95. .Loop:
  96. ldp $t0,$t1,[$inp],#16 // load input
  97. sub $len,$len,#16
  98. #ifdef __ARMEB__
  99. rev $t0,$t0
  100. rev $t1,$t1
  101. #endif
  102. adds $h0,$h0,$t0 // accumulate input
  103. adcs $h1,$h1,$t1
  104. mul $d0,$h0,$r0 // h0*r0
  105. adc $h2,$h2,$padbit
  106. umulh $d1,$h0,$r0
  107. mul $t0,$h1,$s1 // h1*5*r1
  108. umulh $t1,$h1,$s1
  109. adds $d0,$d0,$t0
  110. mul $t0,$h0,$r1 // h0*r1
  111. adc $d1,$d1,$t1
  112. umulh $d2,$h0,$r1
  113. adds $d1,$d1,$t0
  114. mul $t0,$h1,$r0 // h1*r0
  115. adc $d2,$d2,xzr
  116. umulh $t1,$h1,$r0
  117. adds $d1,$d1,$t0
  118. mul $t0,$h2,$s1 // h2*5*r1
  119. adc $d2,$d2,$t1
  120. mul $t1,$h2,$r0 // h2*r0
  121. adds $d1,$d1,$t0
  122. adc $d2,$d2,$t1
  123. and $t0,$d2,#-4 // final reduction
  124. and $h2,$d2,#3
  125. add $t0,$t0,$d2,lsr#2
  126. adds $h0,$d0,$t0
  127. adc $h1,$d1,xzr
  128. cbnz $len,.Loop
  129. stp $h0,$h1,[$ctx] // store hash value
  130. str $h2,[$ctx,#16]
  131. .Lno_data:
  132. ret
  133. .size poly1305_blocks,.-poly1305_blocks
  134. .type poly1305_emit,%function
  135. .align 5
  136. poly1305_emit:
  137. ldp $h0,$h1,[$ctx] // load hash base 2^64
  138. ldr $h2,[$ctx,#16]
  139. ldp $t0,$t1,[$nonce] // load nonce
  140. adds $d0,$h0,#5 // compare to modulus
  141. adcs $d1,$h1,xzr
  142. adc $d2,$h2,xzr
  143. tst $d2,#-4 // see if it's carried/borrowed
  144. csel $h0,$h0,$d0,eq
  145. csel $h1,$h1,$d1,eq
  146. #ifdef __ARMEB__
  147. ror $t0,$t0,#32 // flip nonce words
  148. ror $t1,$t1,#32
  149. #endif
  150. adds $h0,$h0,$t0 // accumulate nonce
  151. adc $h1,$h1,$t1
  152. #ifdef __ARMEB__
  153. rev $h0,$h0 // flip output bytes
  154. rev $h1,$h1
  155. #endif
  156. stp $h0,$h1,[$mac] // write result
  157. ret
  158. .size poly1305_emit,.-poly1305_emit
  159. ___
  160. my ($R0,$R1,$S1,$R2,$S2,$R3,$S3,$R4,$S4) = map("v$_.4s",(0..8));
  161. my ($IN01_0,$IN01_1,$IN01_2,$IN01_3,$IN01_4) = map("v$_.2s",(9..13));
  162. my ($IN23_0,$IN23_1,$IN23_2,$IN23_3,$IN23_4) = map("v$_.2s",(14..18));
  163. my ($ACC0,$ACC1,$ACC2,$ACC3,$ACC4) = map("v$_.2d",(19..23));
  164. my ($H0,$H1,$H2,$H3,$H4) = map("v$_.2s",(24..28));
  165. my ($T0,$T1,$MASK) = map("v$_",(29..31));
  166. my ($in2,$zeros)=("x16","x17");
  167. my $is_base2_26 = $zeros; # borrow
  168. $code.=<<___;
  169. .type poly1305_mult,%function
  170. .align 5
  171. poly1305_mult:
  172. mul $d0,$h0,$r0 // h0*r0
  173. umulh $d1,$h0,$r0
  174. mul $t0,$h1,$s1 // h1*5*r1
  175. umulh $t1,$h1,$s1
  176. adds $d0,$d0,$t0
  177. mul $t0,$h0,$r1 // h0*r1
  178. adc $d1,$d1,$t1
  179. umulh $d2,$h0,$r1
  180. adds $d1,$d1,$t0
  181. mul $t0,$h1,$r0 // h1*r0
  182. adc $d2,$d2,xzr
  183. umulh $t1,$h1,$r0
  184. adds $d1,$d1,$t0
  185. mul $t0,$h2,$s1 // h2*5*r1
  186. adc $d2,$d2,$t1
  187. mul $t1,$h2,$r0 // h2*r0
  188. adds $d1,$d1,$t0
  189. adc $d2,$d2,$t1
  190. and $t0,$d2,#-4 // final reduction
  191. and $h2,$d2,#3
  192. add $t0,$t0,$d2,lsr#2
  193. adds $h0,$d0,$t0
  194. adc $h1,$d1,xzr
  195. ret
  196. .size poly1305_mult,.-poly1305_mult
  197. .type poly1305_splat,%function
  198. .align 5
  199. poly1305_splat:
  200. and x12,$h0,#0x03ffffff // base 2^64 -> base 2^26
  201. ubfx x13,$h0,#26,#26
  202. extr x14,$h1,$h0,#52
  203. and x14,x14,#0x03ffffff
  204. ubfx x15,$h1,#14,#26
  205. extr x16,$h2,$h1,#40
  206. str w12,[$ctx,#16*0] // r0
  207. add w12,w13,w13,lsl#2 // r1*5
  208. str w13,[$ctx,#16*1] // r1
  209. add w13,w14,w14,lsl#2 // r2*5
  210. str w12,[$ctx,#16*2] // s1
  211. str w14,[$ctx,#16*3] // r2
  212. add w14,w15,w15,lsl#2 // r3*5
  213. str w13,[$ctx,#16*4] // s2
  214. str w15,[$ctx,#16*5] // r3
  215. add w15,w16,w16,lsl#2 // r4*5
  216. str w14,[$ctx,#16*6] // s3
  217. str w16,[$ctx,#16*7] // r4
  218. str w15,[$ctx,#16*8] // s4
  219. ret
  220. .size poly1305_splat,.-poly1305_splat
  221. .type poly1305_blocks_neon,%function
  222. .align 5
  223. poly1305_blocks_neon:
  224. ldr $is_base2_26,[$ctx,#24]
  225. cmp $len,#128
  226. b.hs .Lblocks_neon
  227. cbz $is_base2_26,poly1305_blocks
  228. .Lblocks_neon:
  229. stp x29,x30,[sp,#-80]!
  230. add x29,sp,#0
  231. ands $len,$len,#-16
  232. b.eq .Lno_data_neon
  233. cbz $is_base2_26,.Lbase2_64_neon
  234. ldp w10,w11,[$ctx] // load hash value base 2^26
  235. ldp w12,w13,[$ctx,#8]
  236. ldr w14,[$ctx,#16]
  237. tst $len,#31
  238. b.eq .Leven_neon
  239. ldp $r0,$r1,[$ctx,#32] // load key value
  240. add $h0,x10,x11,lsl#26 // base 2^26 -> base 2^64
  241. lsr $h1,x12,#12
  242. adds $h0,$h0,x12,lsl#52
  243. add $h1,$h1,x13,lsl#14
  244. adc $h1,$h1,xzr
  245. lsr $h2,x14,#24
  246. adds $h1,$h1,x14,lsl#40
  247. adc $d2,$h2,xzr // can be partially reduced...
  248. ldp $d0,$d1,[$inp],#16 // load input
  249. sub $len,$len,#16
  250. add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
  251. and $t0,$d2,#-4 // ... so reduce
  252. and $h2,$d2,#3
  253. add $t0,$t0,$d2,lsr#2
  254. adds $h0,$h0,$t0
  255. adc $h1,$h1,xzr
  256. #ifdef __ARMEB__
  257. rev $d0,$d0
  258. rev $d1,$d1
  259. #endif
  260. adds $h0,$h0,$d0 // accumulate input
  261. adcs $h1,$h1,$d1
  262. adc $h2,$h2,$padbit
  263. bl poly1305_mult
  264. ldr x30,[sp,#8]
  265. cbz $padbit,.Lstore_base2_64_neon
  266. and x10,$h0,#0x03ffffff // base 2^64 -> base 2^26
  267. ubfx x11,$h0,#26,#26
  268. extr x12,$h1,$h0,#52
  269. and x12,x12,#0x03ffffff
  270. ubfx x13,$h1,#14,#26
  271. extr x14,$h2,$h1,#40
  272. cbnz $len,.Leven_neon
  273. stp w10,w11,[$ctx] // store hash value base 2^26
  274. stp w12,w13,[$ctx,#8]
  275. str w14,[$ctx,#16]
  276. b .Lno_data_neon
  277. .align 4
  278. .Lstore_base2_64_neon:
  279. stp $h0,$h1,[$ctx] // store hash value base 2^64
  280. stp $h2,xzr,[$ctx,#16] // note that is_base2_26 is zeroed
  281. b .Lno_data_neon
  282. .align 4
  283. .Lbase2_64_neon:
  284. ldp $r0,$r1,[$ctx,#32] // load key value
  285. ldp $h0,$h1,[$ctx] // load hash value base 2^64
  286. ldr $h2,[$ctx,#16]
  287. tst $len,#31
  288. b.eq .Linit_neon
  289. ldp $d0,$d1,[$inp],#16 // load input
  290. sub $len,$len,#16
  291. add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
  292. #ifdef __ARMEB__
  293. rev $d0,$d0
  294. rev $d1,$d1
  295. #endif
  296. adds $h0,$h0,$d0 // accumulate input
  297. adcs $h1,$h1,$d1
  298. adc $h2,$h2,$padbit
  299. bl poly1305_mult
  300. .Linit_neon:
  301. and x10,$h0,#0x03ffffff // base 2^64 -> base 2^26
  302. ubfx x11,$h0,#26,#26
  303. extr x12,$h1,$h0,#52
  304. and x12,x12,#0x03ffffff
  305. ubfx x13,$h1,#14,#26
  306. extr x14,$h2,$h1,#40
  307. stp d8,d9,[sp,#16] // meet ABI requirements
  308. stp d10,d11,[sp,#32]
  309. stp d12,d13,[sp,#48]
  310. stp d14,d15,[sp,#64]
  311. fmov ${H0},x10
  312. fmov ${H1},x11
  313. fmov ${H2},x12
  314. fmov ${H3},x13
  315. fmov ${H4},x14
  316. ////////////////////////////////// initialize r^n table
  317. mov $h0,$r0 // r^1
  318. add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
  319. mov $h1,$r1
  320. mov $h2,xzr
  321. add $ctx,$ctx,#48+12
  322. bl poly1305_splat
  323. bl poly1305_mult // r^2
  324. sub $ctx,$ctx,#4
  325. bl poly1305_splat
  326. bl poly1305_mult // r^3
  327. sub $ctx,$ctx,#4
  328. bl poly1305_splat
  329. bl poly1305_mult // r^4
  330. sub $ctx,$ctx,#4
  331. bl poly1305_splat
  332. ldr x30,[sp,#8]
  333. add $in2,$inp,#32
  334. adr $zeros,.Lzeros
  335. subs $len,$len,#64
  336. csel $in2,$zeros,$in2,lo
  337. mov x4,#1
  338. str x4,[$ctx,#-24] // set is_base2_26
  339. sub $ctx,$ctx,#48 // restore original $ctx
  340. b .Ldo_neon
  341. .align 4
  342. .Leven_neon:
  343. add $in2,$inp,#32
  344. adr $zeros,.Lzeros
  345. subs $len,$len,#64
  346. csel $in2,$zeros,$in2,lo
  347. stp d8,d9,[sp,#16] // meet ABI requirements
  348. stp d10,d11,[sp,#32]
  349. stp d12,d13,[sp,#48]
  350. stp d14,d15,[sp,#64]
  351. fmov ${H0},x10
  352. fmov ${H1},x11
  353. fmov ${H2},x12
  354. fmov ${H3},x13
  355. fmov ${H4},x14
  356. .Ldo_neon:
  357. ldp x8,x12,[$in2],#16 // inp[2:3] (or zero)
  358. ldp x9,x13,[$in2],#48
  359. lsl $padbit,$padbit,#24
  360. add x15,$ctx,#48
  361. #ifdef __ARMEB__
  362. rev x8,x8
  363. rev x12,x12
  364. rev x9,x9
  365. rev x13,x13
  366. #endif
  367. and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
  368. and x5,x9,#0x03ffffff
  369. ubfx x6,x8,#26,#26
  370. ubfx x7,x9,#26,#26
  371. add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
  372. extr x8,x12,x8,#52
  373. extr x9,x13,x9,#52
  374. add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
  375. fmov $IN23_0,x4
  376. and x8,x8,#0x03ffffff
  377. and x9,x9,#0x03ffffff
  378. ubfx x10,x12,#14,#26
  379. ubfx x11,x13,#14,#26
  380. add x12,$padbit,x12,lsr#40
  381. add x13,$padbit,x13,lsr#40
  382. add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
  383. fmov $IN23_1,x6
  384. add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
  385. add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
  386. fmov $IN23_2,x8
  387. fmov $IN23_3,x10
  388. fmov $IN23_4,x12
  389. ldp x8,x12,[$inp],#16 // inp[0:1]
  390. ldp x9,x13,[$inp],#48
  391. ld1 {$R0,$R1,$S1,$R2},[x15],#64
  392. ld1 {$S2,$R3,$S3,$R4},[x15],#64
  393. ld1 {$S4},[x15]
  394. #ifdef __ARMEB__
  395. rev x8,x8
  396. rev x12,x12
  397. rev x9,x9
  398. rev x13,x13
  399. #endif
  400. and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
  401. and x5,x9,#0x03ffffff
  402. ubfx x6,x8,#26,#26
  403. ubfx x7,x9,#26,#26
  404. add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
  405. extr x8,x12,x8,#52
  406. extr x9,x13,x9,#52
  407. add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
  408. fmov $IN01_0,x4
  409. and x8,x8,#0x03ffffff
  410. and x9,x9,#0x03ffffff
  411. ubfx x10,x12,#14,#26
  412. ubfx x11,x13,#14,#26
  413. add x12,$padbit,x12,lsr#40
  414. add x13,$padbit,x13,lsr#40
  415. add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
  416. fmov $IN01_1,x6
  417. add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
  418. add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
  419. fmov $IN01_2,x8
  420. fmov $IN01_3,x10
  421. fmov $IN01_4,x12
  422. b.ls .Lskip_loop
  423. .align 4
  424. .Loop_neon:
  425. ////////////////////////////////////////////////////////////////
  426. // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
  427. // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
  428. // \___________________/
  429. // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
  430. // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
  431. // \___________________/ \____________________/
  432. //
  433. // Note that we start with inp[2:3]*r^2. This is because it
  434. // doesn't depend on reduction in previous iteration.
  435. ////////////////////////////////////////////////////////////////
  436. // d4 = h0*r4 + h1*r3 + h2*r2 + h3*r1 + h4*r0
  437. // d3 = h0*r3 + h1*r2 + h2*r1 + h3*r0 + h4*5*r4
  438. // d2 = h0*r2 + h1*r1 + h2*r0 + h3*5*r4 + h4*5*r3
  439. // d1 = h0*r1 + h1*r0 + h2*5*r4 + h3*5*r3 + h4*5*r2
  440. // d0 = h0*r0 + h1*5*r4 + h2*5*r3 + h3*5*r2 + h4*5*r1
  441. subs $len,$len,#64
  442. umull $ACC4,$IN23_0,${R4}[2]
  443. csel $in2,$zeros,$in2,lo
  444. umull $ACC3,$IN23_0,${R3}[2]
  445. umull $ACC2,$IN23_0,${R2}[2]
  446. ldp x8,x12,[$in2],#16 // inp[2:3] (or zero)
  447. umull $ACC1,$IN23_0,${R1}[2]
  448. ldp x9,x13,[$in2],#48
  449. umull $ACC0,$IN23_0,${R0}[2]
  450. #ifdef __ARMEB__
  451. rev x8,x8
  452. rev x12,x12
  453. rev x9,x9
  454. rev x13,x13
  455. #endif
  456. umlal $ACC4,$IN23_1,${R3}[2]
  457. and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
  458. umlal $ACC3,$IN23_1,${R2}[2]
  459. and x5,x9,#0x03ffffff
  460. umlal $ACC2,$IN23_1,${R1}[2]
  461. ubfx x6,x8,#26,#26
  462. umlal $ACC1,$IN23_1,${R0}[2]
  463. ubfx x7,x9,#26,#26
  464. umlal $ACC0,$IN23_1,${S4}[2]
  465. add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
  466. umlal $ACC4,$IN23_2,${R2}[2]
  467. extr x8,x12,x8,#52
  468. umlal $ACC3,$IN23_2,${R1}[2]
  469. extr x9,x13,x9,#52
  470. umlal $ACC2,$IN23_2,${R0}[2]
  471. add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
  472. umlal $ACC1,$IN23_2,${S4}[2]
  473. fmov $IN23_0,x4
  474. umlal $ACC0,$IN23_2,${S3}[2]
  475. and x8,x8,#0x03ffffff
  476. umlal $ACC4,$IN23_3,${R1}[2]
  477. and x9,x9,#0x03ffffff
  478. umlal $ACC3,$IN23_3,${R0}[2]
  479. ubfx x10,x12,#14,#26
  480. umlal $ACC2,$IN23_3,${S4}[2]
  481. ubfx x11,x13,#14,#26
  482. umlal $ACC1,$IN23_3,${S3}[2]
  483. add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
  484. umlal $ACC0,$IN23_3,${S2}[2]
  485. fmov $IN23_1,x6
  486. add $IN01_2,$IN01_2,$H2
  487. add x12,$padbit,x12,lsr#40
  488. umlal $ACC4,$IN23_4,${R0}[2]
  489. add x13,$padbit,x13,lsr#40
  490. umlal $ACC3,$IN23_4,${S4}[2]
  491. add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
  492. umlal $ACC2,$IN23_4,${S3}[2]
  493. add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
  494. umlal $ACC1,$IN23_4,${S2}[2]
  495. fmov $IN23_2,x8
  496. umlal $ACC0,$IN23_4,${S1}[2]
  497. fmov $IN23_3,x10
  498. ////////////////////////////////////////////////////////////////
  499. // (hash+inp[0:1])*r^4 and accumulate
  500. add $IN01_0,$IN01_0,$H0
  501. fmov $IN23_4,x12
  502. umlal $ACC3,$IN01_2,${R1}[0]
  503. ldp x8,x12,[$inp],#16 // inp[0:1]
  504. umlal $ACC0,$IN01_2,${S3}[0]
  505. ldp x9,x13,[$inp],#48
  506. umlal $ACC4,$IN01_2,${R2}[0]
  507. umlal $ACC1,$IN01_2,${S4}[0]
  508. umlal $ACC2,$IN01_2,${R0}[0]
  509. #ifdef __ARMEB__
  510. rev x8,x8
  511. rev x12,x12
  512. rev x9,x9
  513. rev x13,x13
  514. #endif
  515. add $IN01_1,$IN01_1,$H1
  516. umlal $ACC3,$IN01_0,${R3}[0]
  517. umlal $ACC4,$IN01_0,${R4}[0]
  518. and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
  519. umlal $ACC2,$IN01_0,${R2}[0]
  520. and x5,x9,#0x03ffffff
  521. umlal $ACC0,$IN01_0,${R0}[0]
  522. ubfx x6,x8,#26,#26
  523. umlal $ACC1,$IN01_0,${R1}[0]
  524. ubfx x7,x9,#26,#26
  525. add $IN01_3,$IN01_3,$H3
  526. add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
  527. umlal $ACC3,$IN01_1,${R2}[0]
  528. extr x8,x12,x8,#52
  529. umlal $ACC4,$IN01_1,${R3}[0]
  530. extr x9,x13,x9,#52
  531. umlal $ACC0,$IN01_1,${S4}[0]
  532. add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
  533. umlal $ACC2,$IN01_1,${R1}[0]
  534. fmov $IN01_0,x4
  535. umlal $ACC1,$IN01_1,${R0}[0]
  536. and x8,x8,#0x03ffffff
  537. add $IN01_4,$IN01_4,$H4
  538. and x9,x9,#0x03ffffff
  539. umlal $ACC3,$IN01_3,${R0}[0]
  540. ubfx x10,x12,#14,#26
  541. umlal $ACC0,$IN01_3,${S2}[0]
  542. ubfx x11,x13,#14,#26
  543. umlal $ACC4,$IN01_3,${R1}[0]
  544. add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
  545. umlal $ACC1,$IN01_3,${S3}[0]
  546. fmov $IN01_1,x6
  547. umlal $ACC2,$IN01_3,${S4}[0]
  548. add x12,$padbit,x12,lsr#40
  549. umlal $ACC3,$IN01_4,${S4}[0]
  550. add x13,$padbit,x13,lsr#40
  551. umlal $ACC0,$IN01_4,${S1}[0]
  552. add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
  553. umlal $ACC4,$IN01_4,${R0}[0]
  554. add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
  555. umlal $ACC1,$IN01_4,${S2}[0]
  556. fmov $IN01_2,x8
  557. umlal $ACC2,$IN01_4,${S3}[0]
  558. fmov $IN01_3,x10
  559. /////////////////////////////////////////////////////////////////
  560. // lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
  561. // and P. Schwabe
  562. ushr $T0.2d,$ACC3,#26
  563. fmov $IN01_4,x12
  564. xtn $H3,$ACC3
  565. ushr $T1.2d,$ACC0,#26
  566. xtn $H0,$ACC0
  567. add $ACC4,$ACC4,$T0.2d // h3 -> h4
  568. bic $H3,#0xfc,lsl#24 // &=0x03ffffff
  569. add $ACC1,$ACC1,$T1.2d // h0 -> h1
  570. bic $H0,#0xfc,lsl#24
  571. shrn $T0.2s,$ACC4,#26
  572. xtn $H4,$ACC4
  573. ushr $T1.2d,$ACC1,#26
  574. xtn $H1,$ACC1
  575. add $ACC2,$ACC2,$T1.2d // h1 -> h2
  576. bic $H4,#0xfc,lsl#24
  577. bic $H1,#0xfc,lsl#24
  578. add $H0,$H0,$T0.2s
  579. shl $T0.2s,$T0.2s,#2
  580. shrn $T1.2s,$ACC2,#26
  581. xtn $H2,$ACC2
  582. add $H0,$H0,$T0.2s // h4 -> h0
  583. add $H3,$H3,$T1.2s // h2 -> h3
  584. bic $H2,#0xfc,lsl#24
  585. ushr $T0.2s,$H0,#26
  586. bic $H0,#0xfc,lsl#24
  587. ushr $T1.2s,$H3,#26
  588. bic $H3,#0xfc,lsl#24
  589. add $H1,$H1,$T0.2s // h0 -> h1
  590. add $H4,$H4,$T1.2s // h3 -> h4
  591. b.hi .Loop_neon
  592. .Lskip_loop:
  593. dup $IN23_2,${IN23_2}[0]
  594. movi $MASK.2d,#-1
  595. add $IN01_2,$IN01_2,$H2
  596. ushr $MASK.2d,$MASK.2d,#38
  597. ////////////////////////////////////////////////////////////////
  598. // multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
  599. adds $len,$len,#32
  600. b.ne .Long_tail
  601. dup $IN23_2,${IN01_2}[0]
  602. add $IN23_0,$IN01_0,$H0
  603. add $IN23_3,$IN01_3,$H3
  604. add $IN23_1,$IN01_1,$H1
  605. add $IN23_4,$IN01_4,$H4
  606. .Long_tail:
  607. dup $IN23_0,${IN23_0}[0]
  608. umull2 $ACC0,$IN23_2,${S3}
  609. umull2 $ACC3,$IN23_2,${R1}
  610. umull2 $ACC4,$IN23_2,${R2}
  611. umull2 $ACC2,$IN23_2,${R0}
  612. umull2 $ACC1,$IN23_2,${S4}
  613. dup $IN23_1,${IN23_1}[0]
  614. umlal2 $ACC0,$IN23_0,${R0}
  615. umlal2 $ACC2,$IN23_0,${R2}
  616. umlal2 $ACC3,$IN23_0,${R3}
  617. umlal2 $ACC4,$IN23_0,${R4}
  618. umlal2 $ACC1,$IN23_0,${R1}
  619. dup $IN23_3,${IN23_3}[0]
  620. umlal2 $ACC0,$IN23_1,${S4}
  621. umlal2 $ACC3,$IN23_1,${R2}
  622. umlal2 $ACC2,$IN23_1,${R1}
  623. umlal2 $ACC4,$IN23_1,${R3}
  624. umlal2 $ACC1,$IN23_1,${R0}
  625. dup $IN23_4,${IN23_4}[0]
  626. umlal2 $ACC3,$IN23_3,${R0}
  627. umlal2 $ACC4,$IN23_3,${R1}
  628. umlal2 $ACC0,$IN23_3,${S2}
  629. umlal2 $ACC1,$IN23_3,${S3}
  630. umlal2 $ACC2,$IN23_3,${S4}
  631. umlal2 $ACC3,$IN23_4,${S4}
  632. umlal2 $ACC0,$IN23_4,${S1}
  633. umlal2 $ACC4,$IN23_4,${R0}
  634. umlal2 $ACC1,$IN23_4,${S2}
  635. umlal2 $ACC2,$IN23_4,${S3}
  636. b.eq .Lshort_tail
  637. ////////////////////////////////////////////////////////////////
  638. // (hash+inp[0:1])*r^4:r^3 and accumulate
  639. add $IN01_0,$IN01_0,$H0
  640. umlal $ACC3,$IN01_2,${R1}
  641. umlal $ACC0,$IN01_2,${S3}
  642. umlal $ACC4,$IN01_2,${R2}
  643. umlal $ACC1,$IN01_2,${S4}
  644. umlal $ACC2,$IN01_2,${R0}
  645. add $IN01_1,$IN01_1,$H1
  646. umlal $ACC3,$IN01_0,${R3}
  647. umlal $ACC0,$IN01_0,${R0}
  648. umlal $ACC4,$IN01_0,${R4}
  649. umlal $ACC1,$IN01_0,${R1}
  650. umlal $ACC2,$IN01_0,${R2}
  651. add $IN01_3,$IN01_3,$H3
  652. umlal $ACC3,$IN01_1,${R2}
  653. umlal $ACC0,$IN01_1,${S4}
  654. umlal $ACC4,$IN01_1,${R3}
  655. umlal $ACC1,$IN01_1,${R0}
  656. umlal $ACC2,$IN01_1,${R1}
  657. add $IN01_4,$IN01_4,$H4
  658. umlal $ACC3,$IN01_3,${R0}
  659. umlal $ACC0,$IN01_3,${S2}
  660. umlal $ACC4,$IN01_3,${R1}
  661. umlal $ACC1,$IN01_3,${S3}
  662. umlal $ACC2,$IN01_3,${S4}
  663. umlal $ACC3,$IN01_4,${S4}
  664. umlal $ACC0,$IN01_4,${S1}
  665. umlal $ACC4,$IN01_4,${R0}
  666. umlal $ACC1,$IN01_4,${S2}
  667. umlal $ACC2,$IN01_4,${S3}
  668. .Lshort_tail:
  669. ////////////////////////////////////////////////////////////////
  670. // horizontal add
  671. addp $ACC3,$ACC3,$ACC3
  672. ldp d8,d9,[sp,#16] // meet ABI requirements
  673. addp $ACC0,$ACC0,$ACC0
  674. ldp d10,d11,[sp,#32]
  675. addp $ACC4,$ACC4,$ACC4
  676. ldp d12,d13,[sp,#48]
  677. addp $ACC1,$ACC1,$ACC1
  678. ldp d14,d15,[sp,#64]
  679. addp $ACC2,$ACC2,$ACC2
  680. ////////////////////////////////////////////////////////////////
  681. // lazy reduction, but without narrowing
  682. ushr $T0.2d,$ACC3,#26
  683. and $ACC3,$ACC3,$MASK.2d
  684. ushr $T1.2d,$ACC0,#26
  685. and $ACC0,$ACC0,$MASK.2d
  686. add $ACC4,$ACC4,$T0.2d // h3 -> h4
  687. add $ACC1,$ACC1,$T1.2d // h0 -> h1
  688. ushr $T0.2d,$ACC4,#26
  689. and $ACC4,$ACC4,$MASK.2d
  690. ushr $T1.2d,$ACC1,#26
  691. and $ACC1,$ACC1,$MASK.2d
  692. add $ACC2,$ACC2,$T1.2d // h1 -> h2
  693. add $ACC0,$ACC0,$T0.2d
  694. shl $T0.2d,$T0.2d,#2
  695. ushr $T1.2d,$ACC2,#26
  696. and $ACC2,$ACC2,$MASK.2d
  697. add $ACC0,$ACC0,$T0.2d // h4 -> h0
  698. add $ACC3,$ACC3,$T1.2d // h2 -> h3
  699. ushr $T0.2d,$ACC0,#26
  700. and $ACC0,$ACC0,$MASK.2d
  701. ushr $T1.2d,$ACC3,#26
  702. and $ACC3,$ACC3,$MASK.2d
  703. add $ACC1,$ACC1,$T0.2d // h0 -> h1
  704. add $ACC4,$ACC4,$T1.2d // h3 -> h4
  705. ////////////////////////////////////////////////////////////////
  706. // write the result, can be partially reduced
  707. st4 {$ACC0,$ACC1,$ACC2,$ACC3}[0],[$ctx],#16
  708. st1 {$ACC4}[0],[$ctx]
  709. .Lno_data_neon:
  710. ldr x29,[sp],#80
  711. ret
  712. .size poly1305_blocks_neon,.-poly1305_blocks_neon
  713. .type poly1305_emit_neon,%function
  714. .align 5
  715. poly1305_emit_neon:
  716. ldr $is_base2_26,[$ctx,#24]
  717. cbz $is_base2_26,poly1305_emit
  718. ldp w10,w11,[$ctx] // load hash value base 2^26
  719. ldp w12,w13,[$ctx,#8]
  720. ldr w14,[$ctx,#16]
  721. add $h0,x10,x11,lsl#26 // base 2^26 -> base 2^64
  722. lsr $h1,x12,#12
  723. adds $h0,$h0,x12,lsl#52
  724. add $h1,$h1,x13,lsl#14
  725. adc $h1,$h1,xzr
  726. lsr $h2,x14,#24
  727. adds $h1,$h1,x14,lsl#40
  728. adc $h2,$h2,xzr // can be partially reduced...
  729. ldp $t0,$t1,[$nonce] // load nonce
  730. and $d0,$h2,#-4 // ... so reduce
  731. add $d0,$d0,$h2,lsr#2
  732. and $h2,$h2,#3
  733. adds $h0,$h0,$d0
  734. adc $h1,$h1,xzr
  735. adds $d0,$h0,#5 // compare to modulus
  736. adcs $d1,$h1,xzr
  737. adc $d2,$h2,xzr
  738. tst $d2,#-4 // see if it's carried/borrowed
  739. csel $h0,$h0,$d0,eq
  740. csel $h1,$h1,$d1,eq
  741. #ifdef __ARMEB__
  742. ror $t0,$t0,#32 // flip nonce words
  743. ror $t1,$t1,#32
  744. #endif
  745. adds $h0,$h0,$t0 // accumulate nonce
  746. adc $h1,$h1,$t1
  747. #ifdef __ARMEB__
  748. rev $h0,$h0 // flip output bytes
  749. rev $h1,$h1
  750. #endif
  751. stp $h0,$h1,[$mac] // write result
  752. ret
  753. .size poly1305_emit_neon,.-poly1305_emit_neon
  754. .align 5
  755. .Lzeros:
  756. .long 0,0,0,0,0,0,0,0
  757. .LOPENSSL_armcap_P:
  758. #ifdef __ILP32__
  759. .long OPENSSL_armcap_P-.
  760. #else
  761. .quad OPENSSL_armcap_P-.
  762. #endif
  763. .asciz "Poly1305 for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
  764. .align 2
  765. ___
  766. foreach (split("\n",$code)) {
  767. s/\b(shrn\s+v[0-9]+)\.[24]d/$1.2s/ or
  768. s/\b(fmov\s+)v([0-9]+)[^,]*,\s*x([0-9]+)/$1d$2,x$3/ or
  769. (m/\bdup\b/ and (s/\.[24]s/.2d/g or 1)) or
  770. (m/\b(eor|and)/ and (s/\.[248][sdh]/.16b/g or 1)) or
  771. (m/\bum(ul|la)l\b/ and (s/\.4s/.2s/g or 1)) or
  772. (m/\bum(ul|la)l2\b/ and (s/\.2s/.4s/g or 1)) or
  773. (m/\bst[1-4]\s+{[^}]+}\[/ and (s/\.[24]d/.s/g or 1));
  774. s/\.[124]([sd])\[/.$1\[/;
  775. print $_,"\n";
  776. }
  777. close STDOUT;