You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

671 lines
18 KiB

  1. #!/usr/bin/env perl
  2. # ====================================================================
  3. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  4. # project. The module is, however, dual licensed under OpenSSL and
  5. # CRYPTOGAMS licenses depending on where you obtain it. For further
  6. # details see http://www.openssl.org/~appro/cryptogams/.
  7. # ====================================================================
  8. # January 2007.
  9. # Montgomery multiplication for ARMv4.
  10. #
  11. # Performance improvement naturally varies among CPU implementations
  12. # and compilers. The code was observed to provide +65-35% improvement
  13. # [depending on key length, less for longer keys] on ARM920T, and
  14. # +115-80% on Intel IXP425. This is compared to pre-bn_mul_mont code
  15. # base and compiler generated code with in-lined umull and even umlal
  16. # instructions. The latter means that this code didn't really have an
  17. # "advantage" of utilizing some "secret" instruction.
  18. #
  19. # The code is interoperable with Thumb ISA and is rather compact, less
  20. # than 1/2KB. Windows CE port would be trivial, as it's exclusively
  21. # about decorations, ABI and instruction syntax are identical.
  22. # November 2013
  23. #
  24. # Add NEON code path, which handles lengths divisible by 8. RSA/DSA
  25. # performance improvement on Cortex-A8 is ~45-100% depending on key
  26. # length, more for longer keys. On Cortex-A15 the span is ~10-105%.
  27. # On Snapdragon S4 improvement was measured to vary from ~70% to
  28. # incredible ~380%, yes, 4.8x faster, for RSA4096 sign. But this is
  29. # rather because original integer-only code seems to perform
  30. # suboptimally on S4. Situation on Cortex-A9 is unfortunately
  31. # different. It's being looked into, but the trouble is that
  32. # performance for vectors longer than 256 bits is actually couple
  33. # of percent worse than for integer-only code. The code is chosen
  34. # for execution on all NEON-capable processors, because gain on
  35. # others outweighs the marginal loss on Cortex-A9.
  36. while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
  37. open STDOUT,">$output";
  38. $num="r0"; # starts as num argument, but holds &tp[num-1]
  39. $ap="r1";
  40. $bp="r2"; $bi="r2"; $rp="r2";
  41. $np="r3";
  42. $tp="r4";
  43. $aj="r5";
  44. $nj="r6";
  45. $tj="r7";
  46. $n0="r8";
  47. ########### # r9 is reserved by ELF as platform specific, e.g. TLS pointer
  48. $alo="r10"; # sl, gcc uses it to keep @GOT
  49. $ahi="r11"; # fp
  50. $nlo="r12"; # ip
  51. ########### # r13 is stack pointer
  52. $nhi="r14"; # lr
  53. ########### # r15 is program counter
  54. #### argument block layout relative to &tp[num-1], a.k.a. $num
  55. $_rp="$num,#12*4";
  56. # ap permanently resides in r1
  57. $_bp="$num,#13*4";
  58. # np permanently resides in r3
  59. $_n0="$num,#14*4";
  60. $_num="$num,#15*4"; $_bpend=$_num;
  61. $code=<<___;
  62. #include "arm_arch.h"
  63. .text
  64. .code 32
  65. #if __ARM_ARCH__>=7
  66. .align 5
  67. .LOPENSSL_armcap:
  68. .word OPENSSL_armcap_P-bn_mul_mont
  69. #endif
  70. .global bn_mul_mont
  71. .hidden bn_mul_mont
  72. .type bn_mul_mont,%function
  73. .align 5
  74. bn_mul_mont:
  75. ldr ip,[sp,#4] @ load num
  76. stmdb sp!,{r0,r2} @ sp points at argument block
  77. #if __ARM_ARCH__>=7
  78. tst ip,#7
  79. bne .Lialu
  80. adr r0,bn_mul_mont
  81. ldr r2,.LOPENSSL_armcap
  82. ldr r0,[r0,r2]
  83. tst r0,#1 @ NEON available?
  84. ldmia sp, {r0,r2}
  85. beq .Lialu
  86. add sp,sp,#8
  87. b bn_mul8x_mont_neon
  88. .align 4
  89. .Lialu:
  90. #endif
  91. cmp ip,#2
  92. mov $num,ip @ load num
  93. movlt r0,#0
  94. addlt sp,sp,#2*4
  95. blt .Labrt
  96. stmdb sp!,{r4-r12,lr} @ save 10 registers
  97. mov $num,$num,lsl#2 @ rescale $num for byte count
  98. sub sp,sp,$num @ alloca(4*num)
  99. sub sp,sp,#4 @ +extra dword
  100. sub $num,$num,#4 @ "num=num-1"
  101. add $tp,$bp,$num @ &bp[num-1]
  102. add $num,sp,$num @ $num to point at &tp[num-1]
  103. ldr $n0,[$_n0] @ &n0
  104. ldr $bi,[$bp] @ bp[0]
  105. ldr $aj,[$ap],#4 @ ap[0],ap++
  106. ldr $nj,[$np],#4 @ np[0],np++
  107. ldr $n0,[$n0] @ *n0
  108. str $tp,[$_bpend] @ save &bp[num]
  109. umull $alo,$ahi,$aj,$bi @ ap[0]*bp[0]
  110. str $n0,[$_n0] @ save n0 value
  111. mul $n0,$alo,$n0 @ "tp[0]"*n0
  112. mov $nlo,#0
  113. umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"t[0]"
  114. mov $tp,sp
  115. .L1st:
  116. ldr $aj,[$ap],#4 @ ap[j],ap++
  117. mov $alo,$ahi
  118. ldr $nj,[$np],#4 @ np[j],np++
  119. mov $ahi,#0
  120. umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[0]
  121. mov $nhi,#0
  122. umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0
  123. adds $nlo,$nlo,$alo
  124. str $nlo,[$tp],#4 @ tp[j-1]=,tp++
  125. adc $nlo,$nhi,#0
  126. cmp $tp,$num
  127. bne .L1st
  128. adds $nlo,$nlo,$ahi
  129. ldr $tp,[$_bp] @ restore bp
  130. mov $nhi,#0
  131. ldr $n0,[$_n0] @ restore n0
  132. adc $nhi,$nhi,#0
  133. str $nlo,[$num] @ tp[num-1]=
  134. str $nhi,[$num,#4] @ tp[num]=
  135. .Louter:
  136. sub $tj,$num,sp @ "original" $num-1 value
  137. sub $ap,$ap,$tj @ "rewind" ap to &ap[1]
  138. ldr $bi,[$tp,#4]! @ *(++bp)
  139. sub $np,$np,$tj @ "rewind" np to &np[1]
  140. ldr $aj,[$ap,#-4] @ ap[0]
  141. ldr $alo,[sp] @ tp[0]
  142. ldr $nj,[$np,#-4] @ np[0]
  143. ldr $tj,[sp,#4] @ tp[1]
  144. mov $ahi,#0
  145. umlal $alo,$ahi,$aj,$bi @ ap[0]*bp[i]+tp[0]
  146. str $tp,[$_bp] @ save bp
  147. mul $n0,$alo,$n0
  148. mov $nlo,#0
  149. umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"tp[0]"
  150. mov $tp,sp
  151. .Linner:
  152. ldr $aj,[$ap],#4 @ ap[j],ap++
  153. adds $alo,$ahi,$tj @ +=tp[j]
  154. ldr $nj,[$np],#4 @ np[j],np++
  155. mov $ahi,#0
  156. umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[i]
  157. mov $nhi,#0
  158. umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0
  159. adc $ahi,$ahi,#0
  160. ldr $tj,[$tp,#8] @ tp[j+1]
  161. adds $nlo,$nlo,$alo
  162. str $nlo,[$tp],#4 @ tp[j-1]=,tp++
  163. adc $nlo,$nhi,#0
  164. cmp $tp,$num
  165. bne .Linner
  166. adds $nlo,$nlo,$ahi
  167. mov $nhi,#0
  168. ldr $tp,[$_bp] @ restore bp
  169. adc $nhi,$nhi,#0
  170. ldr $n0,[$_n0] @ restore n0
  171. adds $nlo,$nlo,$tj
  172. ldr $tj,[$_bpend] @ restore &bp[num]
  173. adc $nhi,$nhi,#0
  174. str $nlo,[$num] @ tp[num-1]=
  175. str $nhi,[$num,#4] @ tp[num]=
  176. cmp $tp,$tj
  177. bne .Louter
  178. ldr $rp,[$_rp] @ pull rp
  179. add $num,$num,#4 @ $num to point at &tp[num]
  180. sub $aj,$num,sp @ "original" num value
  181. mov $tp,sp @ "rewind" $tp
  182. mov $ap,$tp @ "borrow" $ap
  183. sub $np,$np,$aj @ "rewind" $np to &np[0]
  184. subs $tj,$tj,$tj @ "clear" carry flag
  185. .Lsub: ldr $tj,[$tp],#4
  186. ldr $nj,[$np],#4
  187. sbcs $tj,$tj,$nj @ tp[j]-np[j]
  188. str $tj,[$rp],#4 @ rp[j]=
  189. teq $tp,$num @ preserve carry
  190. bne .Lsub
  191. sbcs $nhi,$nhi,#0 @ upmost carry
  192. mov $tp,sp @ "rewind" $tp
  193. sub $rp,$rp,$aj @ "rewind" $rp
  194. and $ap,$tp,$nhi
  195. bic $np,$rp,$nhi
  196. orr $ap,$ap,$np @ ap=borrow?tp:rp
  197. .Lcopy: ldr $tj,[$ap],#4 @ copy or in-place refresh
  198. str sp,[$tp],#4 @ zap tp
  199. str $tj,[$rp],#4
  200. cmp $tp,$num
  201. bne .Lcopy
  202. add sp,$num,#4 @ skip over tp[num+1]
  203. ldmia sp!,{r4-r12,lr} @ restore registers
  204. add sp,sp,#2*4 @ skip over {r0,r2}
  205. mov r0,#1
  206. .Labrt: tst lr,#1
  207. moveq pc,lr @ be binary compatible with V4, yet
  208. bx lr @ interoperable with Thumb ISA:-)
  209. .size bn_mul_mont,.-bn_mul_mont
  210. ___
  211. {
  212. sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; }
  213. sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; }
  214. my ($A0,$A1,$A2,$A3)=map("d$_",(0..3));
  215. my ($N0,$N1,$N2,$N3)=map("d$_",(4..7));
  216. my ($Z,$Temp)=("q4","q5");
  217. my ($A0xB,$A1xB,$A2xB,$A3xB,$A4xB,$A5xB,$A6xB,$A7xB)=map("q$_",(6..13));
  218. my ($Bi,$Ni,$M0)=map("d$_",(28..31));
  219. my $zero=&Dlo($Z);
  220. my $temp=&Dlo($Temp);
  221. my ($rptr,$aptr,$bptr,$nptr,$n0,$num)=map("r$_",(0..5));
  222. my ($tinptr,$toutptr,$inner,$outer)=map("r$_",(6..9));
  223. $code.=<<___;
  224. #if __ARM_ARCH__>=7
  225. .fpu neon
  226. .type bn_mul8x_mont_neon,%function
  227. .align 5
  228. bn_mul8x_mont_neon:
  229. mov ip,sp
  230. stmdb sp!,{r4-r11}
  231. vstmdb sp!,{d8-d15} @ ABI specification says so
  232. ldmia ip,{r4-r5} @ load rest of parameter block
  233. sub $toutptr,sp,#16
  234. vld1.32 {${Bi}[0]}, [$bptr,:32]!
  235. sub $toutptr,$toutptr,$num,lsl#4
  236. vld1.32 {$A0-$A3}, [$aptr]! @ can't specify :32 :-(
  237. and $toutptr,$toutptr,#-64
  238. vld1.32 {${M0}[0]}, [$n0,:32]
  239. mov sp,$toutptr @ alloca
  240. veor $zero,$zero,$zero
  241. subs $inner,$num,#8
  242. vzip.16 $Bi,$zero
  243. vmull.u32 $A0xB,$Bi,${A0}[0]
  244. vmull.u32 $A1xB,$Bi,${A0}[1]
  245. vmull.u32 $A2xB,$Bi,${A1}[0]
  246. vshl.i64 $temp,`&Dhi("$A0xB")`,#16
  247. vmull.u32 $A3xB,$Bi,${A1}[1]
  248. vadd.u64 $temp,$temp,`&Dlo("$A0xB")`
  249. veor $zero,$zero,$zero
  250. vmul.u32 $Ni,$temp,$M0
  251. vmull.u32 $A4xB,$Bi,${A2}[0]
  252. vld1.32 {$N0-$N3}, [$nptr]!
  253. vmull.u32 $A5xB,$Bi,${A2}[1]
  254. vmull.u32 $A6xB,$Bi,${A3}[0]
  255. vzip.16 $Ni,$zero
  256. vmull.u32 $A7xB,$Bi,${A3}[1]
  257. bne .LNEON_1st
  258. @ special case for num=8, everything is in register bank...
  259. vmlal.u32 $A0xB,$Ni,${N0}[0]
  260. sub $outer,$num,#1
  261. vmlal.u32 $A1xB,$Ni,${N0}[1]
  262. vmlal.u32 $A2xB,$Ni,${N1}[0]
  263. vmlal.u32 $A3xB,$Ni,${N1}[1]
  264. vmlal.u32 $A4xB,$Ni,${N2}[0]
  265. vmov $Temp,$A0xB
  266. vmlal.u32 $A5xB,$Ni,${N2}[1]
  267. vmov $A0xB,$A1xB
  268. vmlal.u32 $A6xB,$Ni,${N3}[0]
  269. vmov $A1xB,$A2xB
  270. vmlal.u32 $A7xB,$Ni,${N3}[1]
  271. vmov $A2xB,$A3xB
  272. vmov $A3xB,$A4xB
  273. vshr.u64 $temp,$temp,#16
  274. vmov $A4xB,$A5xB
  275. vmov $A5xB,$A6xB
  276. vadd.u64 $temp,$temp,`&Dhi("$Temp")`
  277. vmov $A6xB,$A7xB
  278. veor $A7xB,$A7xB
  279. vshr.u64 $temp,$temp,#16
  280. b .LNEON_outer8
  281. .align 4
  282. .LNEON_outer8:
  283. vld1.32 {${Bi}[0]}, [$bptr,:32]!
  284. veor $zero,$zero,$zero
  285. vzip.16 $Bi,$zero
  286. vadd.u64 `&Dlo("$A0xB")`,`&Dlo("$A0xB")`,$temp
  287. vmlal.u32 $A0xB,$Bi,${A0}[0]
  288. vmlal.u32 $A1xB,$Bi,${A0}[1]
  289. vmlal.u32 $A2xB,$Bi,${A1}[0]
  290. vshl.i64 $temp,`&Dhi("$A0xB")`,#16
  291. vmlal.u32 $A3xB,$Bi,${A1}[1]
  292. vadd.u64 $temp,$temp,`&Dlo("$A0xB")`
  293. veor $zero,$zero,$zero
  294. subs $outer,$outer,#1
  295. vmul.u32 $Ni,$temp,$M0
  296. vmlal.u32 $A4xB,$Bi,${A2}[0]
  297. vmlal.u32 $A5xB,$Bi,${A2}[1]
  298. vmlal.u32 $A6xB,$Bi,${A3}[0]
  299. vzip.16 $Ni,$zero
  300. vmlal.u32 $A7xB,$Bi,${A3}[1]
  301. vmlal.u32 $A0xB,$Ni,${N0}[0]
  302. vmlal.u32 $A1xB,$Ni,${N0}[1]
  303. vmlal.u32 $A2xB,$Ni,${N1}[0]
  304. vmlal.u32 $A3xB,$Ni,${N1}[1]
  305. vmlal.u32 $A4xB,$Ni,${N2}[0]
  306. vmov $Temp,$A0xB
  307. vmlal.u32 $A5xB,$Ni,${N2}[1]
  308. vmov $A0xB,$A1xB
  309. vmlal.u32 $A6xB,$Ni,${N3}[0]
  310. vmov $A1xB,$A2xB
  311. vmlal.u32 $A7xB,$Ni,${N3}[1]
  312. vmov $A2xB,$A3xB
  313. vmov $A3xB,$A4xB
  314. vshr.u64 $temp,$temp,#16
  315. vmov $A4xB,$A5xB
  316. vmov $A5xB,$A6xB
  317. vadd.u64 $temp,$temp,`&Dhi("$Temp")`
  318. vmov $A6xB,$A7xB
  319. veor $A7xB,$A7xB
  320. vshr.u64 $temp,$temp,#16
  321. bne .LNEON_outer8
  322. vadd.u64 `&Dlo("$A0xB")`,`&Dlo("$A0xB")`,$temp
  323. mov $toutptr,sp
  324. vshr.u64 $temp,`&Dlo("$A0xB")`,#16
  325. mov $inner,$num
  326. vadd.u64 `&Dhi("$A0xB")`,`&Dhi("$A0xB")`,$temp
  327. add $tinptr,sp,#16
  328. vshr.u64 $temp,`&Dhi("$A0xB")`,#16
  329. vzip.16 `&Dlo("$A0xB")`,`&Dhi("$A0xB")`
  330. b .LNEON_tail2
  331. .align 4
  332. .LNEON_1st:
  333. vmlal.u32 $A0xB,$Ni,${N0}[0]
  334. vld1.32 {$A0-$A3}, [$aptr]!
  335. vmlal.u32 $A1xB,$Ni,${N0}[1]
  336. subs $inner,$inner,#8
  337. vmlal.u32 $A2xB,$Ni,${N1}[0]
  338. vmlal.u32 $A3xB,$Ni,${N1}[1]
  339. vmlal.u32 $A4xB,$Ni,${N2}[0]
  340. vld1.32 {$N0-$N1}, [$nptr]!
  341. vmlal.u32 $A5xB,$Ni,${N2}[1]
  342. vst1.64 {$A0xB-$A1xB}, [$toutptr,:256]!
  343. vmlal.u32 $A6xB,$Ni,${N3}[0]
  344. vmlal.u32 $A7xB,$Ni,${N3}[1]
  345. vst1.64 {$A2xB-$A3xB}, [$toutptr,:256]!
  346. vmull.u32 $A0xB,$Bi,${A0}[0]
  347. vld1.32 {$N2-$N3}, [$nptr]!
  348. vmull.u32 $A1xB,$Bi,${A0}[1]
  349. vst1.64 {$A4xB-$A5xB}, [$toutptr,:256]!
  350. vmull.u32 $A2xB,$Bi,${A1}[0]
  351. vmull.u32 $A3xB,$Bi,${A1}[1]
  352. vst1.64 {$A6xB-$A7xB}, [$toutptr,:256]!
  353. vmull.u32 $A4xB,$Bi,${A2}[0]
  354. vmull.u32 $A5xB,$Bi,${A2}[1]
  355. vmull.u32 $A6xB,$Bi,${A3}[0]
  356. vmull.u32 $A7xB,$Bi,${A3}[1]
  357. bne .LNEON_1st
  358. vmlal.u32 $A0xB,$Ni,${N0}[0]
  359. add $tinptr,sp,#16
  360. vmlal.u32 $A1xB,$Ni,${N0}[1]
  361. sub $aptr,$aptr,$num,lsl#2 @ rewind $aptr
  362. vmlal.u32 $A2xB,$Ni,${N1}[0]
  363. vld1.64 {$Temp}, [sp,:128]
  364. vmlal.u32 $A3xB,$Ni,${N1}[1]
  365. sub $outer,$num,#1
  366. vmlal.u32 $A4xB,$Ni,${N2}[0]
  367. vst1.64 {$A0xB-$A1xB}, [$toutptr,:256]!
  368. vmlal.u32 $A5xB,$Ni,${N2}[1]
  369. vshr.u64 $temp,$temp,#16
  370. vld1.64 {$A0xB}, [$tinptr, :128]!
  371. vmlal.u32 $A6xB,$Ni,${N3}[0]
  372. vst1.64 {$A2xB-$A3xB}, [$toutptr,:256]!
  373. vmlal.u32 $A7xB,$Ni,${N3}[1]
  374. vst1.64 {$A4xB-$A5xB}, [$toutptr,:256]!
  375. vadd.u64 $temp,$temp,`&Dhi("$Temp")`
  376. veor $Z,$Z,$Z
  377. vst1.64 {$A6xB-$A7xB}, [$toutptr,:256]!
  378. vld1.64 {$A1xB-$A2xB}, [$tinptr, :256]!
  379. vst1.64 {$Z}, [$toutptr,:128]
  380. vshr.u64 $temp,$temp,#16
  381. b .LNEON_outer
  382. .align 4
  383. .LNEON_outer:
  384. vld1.32 {${Bi}[0]}, [$bptr,:32]!
  385. sub $nptr,$nptr,$num,lsl#2 @ rewind $nptr
  386. vld1.32 {$A0-$A3}, [$aptr]!
  387. veor $zero,$zero,$zero
  388. mov $toutptr,sp
  389. vzip.16 $Bi,$zero
  390. sub $inner,$num,#8
  391. vadd.u64 `&Dlo("$A0xB")`,`&Dlo("$A0xB")`,$temp
  392. vmlal.u32 $A0xB,$Bi,${A0}[0]
  393. vld1.64 {$A3xB-$A4xB},[$tinptr,:256]!
  394. vmlal.u32 $A1xB,$Bi,${A0}[1]
  395. vmlal.u32 $A2xB,$Bi,${A1}[0]
  396. vld1.64 {$A5xB-$A6xB},[$tinptr,:256]!
  397. vmlal.u32 $A3xB,$Bi,${A1}[1]
  398. vshl.i64 $temp,`&Dhi("$A0xB")`,#16
  399. veor $zero,$zero,$zero
  400. vadd.u64 $temp,$temp,`&Dlo("$A0xB")`
  401. vld1.64 {$A7xB},[$tinptr,:128]!
  402. vmul.u32 $Ni,$temp,$M0
  403. vmlal.u32 $A4xB,$Bi,${A2}[0]
  404. vld1.32 {$N0-$N3}, [$nptr]!
  405. vmlal.u32 $A5xB,$Bi,${A2}[1]
  406. vmlal.u32 $A6xB,$Bi,${A3}[0]
  407. vzip.16 $Ni,$zero
  408. vmlal.u32 $A7xB,$Bi,${A3}[1]
  409. .LNEON_inner:
  410. vmlal.u32 $A0xB,$Ni,${N0}[0]
  411. vld1.32 {$A0-$A3}, [$aptr]!
  412. vmlal.u32 $A1xB,$Ni,${N0}[1]
  413. subs $inner,$inner,#8
  414. vmlal.u32 $A2xB,$Ni,${N1}[0]
  415. vmlal.u32 $A3xB,$Ni,${N1}[1]
  416. vst1.64 {$A0xB-$A1xB}, [$toutptr,:256]!
  417. vmlal.u32 $A4xB,$Ni,${N2}[0]
  418. vld1.64 {$A0xB}, [$tinptr, :128]!
  419. vmlal.u32 $A5xB,$Ni,${N2}[1]
  420. vst1.64 {$A2xB-$A3xB}, [$toutptr,:256]!
  421. vmlal.u32 $A6xB,$Ni,${N3}[0]
  422. vld1.64 {$A1xB-$A2xB}, [$tinptr, :256]!
  423. vmlal.u32 $A7xB,$Ni,${N3}[1]
  424. vst1.64 {$A4xB-$A5xB}, [$toutptr,:256]!
  425. vmlal.u32 $A0xB,$Bi,${A0}[0]
  426. vld1.64 {$A3xB-$A4xB}, [$tinptr, :256]!
  427. vmlal.u32 $A1xB,$Bi,${A0}[1]
  428. vst1.64 {$A6xB-$A7xB}, [$toutptr,:256]!
  429. vmlal.u32 $A2xB,$Bi,${A1}[0]
  430. vld1.64 {$A5xB-$A6xB}, [$tinptr, :256]!
  431. vmlal.u32 $A3xB,$Bi,${A1}[1]
  432. vld1.32 {$N0-$N3}, [$nptr]!
  433. vmlal.u32 $A4xB,$Bi,${A2}[0]
  434. vld1.64 {$A7xB}, [$tinptr, :128]!
  435. vmlal.u32 $A5xB,$Bi,${A2}[1]
  436. vmlal.u32 $A6xB,$Bi,${A3}[0]
  437. vmlal.u32 $A7xB,$Bi,${A3}[1]
  438. bne .LNEON_inner
  439. vmlal.u32 $A0xB,$Ni,${N0}[0]
  440. add $tinptr,sp,#16
  441. vmlal.u32 $A1xB,$Ni,${N0}[1]
  442. sub $aptr,$aptr,$num,lsl#2 @ rewind $aptr
  443. vmlal.u32 $A2xB,$Ni,${N1}[0]
  444. vld1.64 {$Temp}, [sp,:128]
  445. vmlal.u32 $A3xB,$Ni,${N1}[1]
  446. subs $outer,$outer,#1
  447. vmlal.u32 $A4xB,$Ni,${N2}[0]
  448. vst1.64 {$A0xB-$A1xB}, [$toutptr,:256]!
  449. vmlal.u32 $A5xB,$Ni,${N2}[1]
  450. vld1.64 {$A0xB}, [$tinptr, :128]!
  451. vshr.u64 $temp,$temp,#16
  452. vst1.64 {$A2xB-$A3xB}, [$toutptr,:256]!
  453. vmlal.u32 $A6xB,$Ni,${N3}[0]
  454. vld1.64 {$A1xB-$A2xB}, [$tinptr, :256]!
  455. vmlal.u32 $A7xB,$Ni,${N3}[1]
  456. vst1.64 {$A4xB-$A5xB}, [$toutptr,:256]!
  457. vadd.u64 $temp,$temp,`&Dhi("$Temp")`
  458. vst1.64 {$A6xB-$A7xB}, [$toutptr,:256]!
  459. vshr.u64 $temp,$temp,#16
  460. bne .LNEON_outer
  461. mov $toutptr,sp
  462. mov $inner,$num
  463. .LNEON_tail:
  464. vadd.u64 `&Dlo("$A0xB")`,`&Dlo("$A0xB")`,$temp
  465. vld1.64 {$A3xB-$A4xB}, [$tinptr, :256]!
  466. vshr.u64 $temp,`&Dlo("$A0xB")`,#16
  467. vadd.u64 `&Dhi("$A0xB")`,`&Dhi("$A0xB")`,$temp
  468. vld1.64 {$A5xB-$A6xB}, [$tinptr, :256]!
  469. vshr.u64 $temp,`&Dhi("$A0xB")`,#16
  470. vld1.64 {$A7xB}, [$tinptr, :128]!
  471. vzip.16 `&Dlo("$A0xB")`,`&Dhi("$A0xB")`
  472. .LNEON_tail2:
  473. vadd.u64 `&Dlo("$A1xB")`,`&Dlo("$A1xB")`,$temp
  474. vst1.32 {`&Dlo("$A0xB")`[0]}, [$toutptr, :32]!
  475. vshr.u64 $temp,`&Dlo("$A1xB")`,#16
  476. vadd.u64 `&Dhi("$A1xB")`,`&Dhi("$A1xB")`,$temp
  477. vshr.u64 $temp,`&Dhi("$A1xB")`,#16
  478. vzip.16 `&Dlo("$A1xB")`,`&Dhi("$A1xB")`
  479. vadd.u64 `&Dlo("$A2xB")`,`&Dlo("$A2xB")`,$temp
  480. vst1.32 {`&Dlo("$A1xB")`[0]}, [$toutptr, :32]!
  481. vshr.u64 $temp,`&Dlo("$A2xB")`,#16
  482. vadd.u64 `&Dhi("$A2xB")`,`&Dhi("$A2xB")`,$temp
  483. vshr.u64 $temp,`&Dhi("$A2xB")`,#16
  484. vzip.16 `&Dlo("$A2xB")`,`&Dhi("$A2xB")`
  485. vadd.u64 `&Dlo("$A3xB")`,`&Dlo("$A3xB")`,$temp
  486. vst1.32 {`&Dlo("$A2xB")`[0]}, [$toutptr, :32]!
  487. vshr.u64 $temp,`&Dlo("$A3xB")`,#16
  488. vadd.u64 `&Dhi("$A3xB")`,`&Dhi("$A3xB")`,$temp
  489. vshr.u64 $temp,`&Dhi("$A3xB")`,#16
  490. vzip.16 `&Dlo("$A3xB")`,`&Dhi("$A3xB")`
  491. vadd.u64 `&Dlo("$A4xB")`,`&Dlo("$A4xB")`,$temp
  492. vst1.32 {`&Dlo("$A3xB")`[0]}, [$toutptr, :32]!
  493. vshr.u64 $temp,`&Dlo("$A4xB")`,#16
  494. vadd.u64 `&Dhi("$A4xB")`,`&Dhi("$A4xB")`,$temp
  495. vshr.u64 $temp,`&Dhi("$A4xB")`,#16
  496. vzip.16 `&Dlo("$A4xB")`,`&Dhi("$A4xB")`
  497. vadd.u64 `&Dlo("$A5xB")`,`&Dlo("$A5xB")`,$temp
  498. vst1.32 {`&Dlo("$A4xB")`[0]}, [$toutptr, :32]!
  499. vshr.u64 $temp,`&Dlo("$A5xB")`,#16
  500. vadd.u64 `&Dhi("$A5xB")`,`&Dhi("$A5xB")`,$temp
  501. vshr.u64 $temp,`&Dhi("$A5xB")`,#16
  502. vzip.16 `&Dlo("$A5xB")`,`&Dhi("$A5xB")`
  503. vadd.u64 `&Dlo("$A6xB")`,`&Dlo("$A6xB")`,$temp
  504. vst1.32 {`&Dlo("$A5xB")`[0]}, [$toutptr, :32]!
  505. vshr.u64 $temp,`&Dlo("$A6xB")`,#16
  506. vadd.u64 `&Dhi("$A6xB")`,`&Dhi("$A6xB")`,$temp
  507. vld1.64 {$A0xB}, [$tinptr, :128]!
  508. vshr.u64 $temp,`&Dhi("$A6xB")`,#16
  509. vzip.16 `&Dlo("$A6xB")`,`&Dhi("$A6xB")`
  510. vadd.u64 `&Dlo("$A7xB")`,`&Dlo("$A7xB")`,$temp
  511. vst1.32 {`&Dlo("$A6xB")`[0]}, [$toutptr, :32]!
  512. vshr.u64 $temp,`&Dlo("$A7xB")`,#16
  513. vadd.u64 `&Dhi("$A7xB")`,`&Dhi("$A7xB")`,$temp
  514. vld1.64 {$A1xB-$A2xB}, [$tinptr, :256]!
  515. vshr.u64 $temp,`&Dhi("$A7xB")`,#16
  516. vzip.16 `&Dlo("$A7xB")`,`&Dhi("$A7xB")`
  517. subs $inner,$inner,#8
  518. vst1.32 {`&Dlo("$A7xB")`[0]}, [$toutptr, :32]!
  519. bne .LNEON_tail
  520. vst1.32 {${temp}[0]}, [$toutptr, :32] @ top-most bit
  521. sub $nptr,$nptr,$num,lsl#2 @ rewind $nptr
  522. subs $aptr,sp,#0 @ clear carry flag
  523. add $bptr,sp,$num,lsl#2
  524. .LNEON_sub:
  525. ldmia $aptr!, {r4-r7}
  526. ldmia $nptr!, {r8-r11}
  527. sbcs r8, r4,r8
  528. sbcs r9, r5,r9
  529. sbcs r10,r6,r10
  530. sbcs r11,r7,r11
  531. teq $aptr,$bptr @ preserves carry
  532. stmia $rptr!, {r8-r11}
  533. bne .LNEON_sub
  534. ldr r10, [$aptr] @ load top-most bit
  535. veor q0,q0,q0
  536. sub r11,$bptr,sp @ this is num*4
  537. veor q1,q1,q1
  538. mov $aptr,sp
  539. sub $rptr,$rptr,r11 @ rewind $rptr
  540. mov $nptr,$bptr @ second 3/4th of frame
  541. sbcs r10,r10,#0 @ result is carry flag
  542. .LNEON_copy_n_zap:
  543. ldmia $aptr!, {r4-r7}
  544. ldmia $rptr, {r8-r11}
  545. movcc r8, r4
  546. vst1.64 {q0-q1}, [$nptr,:256]! @ wipe
  547. movcc r9, r5
  548. movcc r10,r6
  549. vst1.64 {q0-q1}, [$nptr,:256]! @ wipe
  550. movcc r11,r7
  551. ldmia $aptr, {r4-r7}
  552. stmia $rptr!, {r8-r11}
  553. sub $aptr,$aptr,#16
  554. ldmia $rptr, {r8-r11}
  555. movcc r8, r4
  556. vst1.64 {q0-q1}, [$aptr,:256]! @ wipe
  557. movcc r9, r5
  558. movcc r10,r6
  559. vst1.64 {q0-q1}, [$nptr,:256]! @ wipe
  560. movcc r11,r7
  561. teq $aptr,$bptr @ preserves carry
  562. stmia $rptr!, {r8-r11}
  563. bne .LNEON_copy_n_zap
  564. sub sp,ip,#96
  565. vldmia sp!,{d8-d15}
  566. ldmia sp!,{r4-r11}
  567. bx lr
  568. .size bn_mul8x_mont_neon,.-bn_mul8x_mont_neon
  569. #endif
  570. ___
  571. }
  572. $code.=<<___;
  573. .asciz "Montgomery multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
  574. .align 2
  575. #if __ARM_ARCH__>=7
  576. .comm OPENSSL_armcap_P,4,4
  577. #endif
  578. ___
  579. $code =~ s/\`([^\`]*)\`/eval $1/gem;
  580. $code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
  581. print $code;
  582. close STDOUT;