25'ten fazla konu seçemezsiniz Konular bir harf veya rakamla başlamalı, kısa çizgiler ('-') içerebilir ve en fazla 35 karakter uzunluğunda olabilir.
 
 
 
 
 
 

2016 satır
61 KiB

  1. #if defined(__arm__) && !defined(OPENSSL_NO_ASM)
  2. # This implementation was taken from the public domain, neon2 version in
  3. # SUPERCOP by D. J. Bernstein and Peter Schwabe.
  4. # qhasm: int32 input_0
  5. # qhasm: int32 input_1
  6. # qhasm: int32 input_2
  7. # qhasm: int32 input_3
  8. # qhasm: stack32 input_4
  9. # qhasm: stack32 input_5
  10. # qhasm: stack32 input_6
  11. # qhasm: stack32 input_7
  12. # qhasm: int32 caller_r4
  13. # qhasm: int32 caller_r5
  14. # qhasm: int32 caller_r6
  15. # qhasm: int32 caller_r7
  16. # qhasm: int32 caller_r8
  17. # qhasm: int32 caller_r9
  18. # qhasm: int32 caller_r10
  19. # qhasm: int32 caller_r11
  20. # qhasm: int32 caller_r12
  21. # qhasm: int32 caller_r14
  22. # qhasm: reg128 caller_q4
  23. # qhasm: reg128 caller_q5
  24. # qhasm: reg128 caller_q6
  25. # qhasm: reg128 caller_q7
  26. # qhasm: startcode
  27. .fpu neon
  28. .text
  29. # qhasm: reg128 r0
  30. # qhasm: reg128 r1
  31. # qhasm: reg128 r2
  32. # qhasm: reg128 r3
  33. # qhasm: reg128 r4
  34. # qhasm: reg128 x01
  35. # qhasm: reg128 x23
  36. # qhasm: reg128 x4
  37. # qhasm: reg128 y0
  38. # qhasm: reg128 y12
  39. # qhasm: reg128 y34
  40. # qhasm: reg128 5y12
  41. # qhasm: reg128 5y34
  42. # qhasm: stack128 y0_stack
  43. # qhasm: stack128 y12_stack
  44. # qhasm: stack128 y34_stack
  45. # qhasm: stack128 5y12_stack
  46. # qhasm: stack128 5y34_stack
  47. # qhasm: reg128 z0
  48. # qhasm: reg128 z12
  49. # qhasm: reg128 z34
  50. # qhasm: reg128 5z12
  51. # qhasm: reg128 5z34
  52. # qhasm: stack128 z0_stack
  53. # qhasm: stack128 z12_stack
  54. # qhasm: stack128 z34_stack
  55. # qhasm: stack128 5z12_stack
  56. # qhasm: stack128 5z34_stack
  57. # qhasm: stack128 two24
  58. # qhasm: int32 ptr
  59. # qhasm: reg128 c01
  60. # qhasm: reg128 c23
  61. # qhasm: reg128 d01
  62. # qhasm: reg128 d23
  63. # qhasm: reg128 t0
  64. # qhasm: reg128 t1
  65. # qhasm: reg128 t2
  66. # qhasm: reg128 t3
  67. # qhasm: reg128 t4
  68. # qhasm: reg128 mask
  69. # qhasm: reg128 u0
  70. # qhasm: reg128 u1
  71. # qhasm: reg128 u2
  72. # qhasm: reg128 u3
  73. # qhasm: reg128 u4
  74. # qhasm: reg128 v01
  75. # qhasm: reg128 mid
  76. # qhasm: reg128 v23
  77. # qhasm: reg128 v4
  78. # qhasm: int32 len
  79. # qhasm: qpushenter crypto_onetimeauth_poly1305_neon2_blocks
  80. .align 4
  81. .global openssl_poly1305_neon2_blocks
  82. .hidden openssl_poly1305_neon2_blocks
  83. .type openssl_poly1305_neon2_blocks STT_FUNC
  84. openssl_poly1305_neon2_blocks:
  85. vpush {q4,q5,q6,q7}
  86. mov r12,sp
  87. sub sp,sp,#192
  88. and sp,sp,#0xffffffe0
  89. # qhasm: len = input_3
  90. # asm 1: mov >len=int32#4,<input_3=int32#4
  91. # asm 2: mov >len=r3,<input_3=r3
  92. mov r3,r3
  93. # qhasm: new y0
  94. # qhasm: y0 = mem64[input_1]y0[1]; input_1 += 8
  95. # asm 1: vld1.8 {<y0=reg128#1%bot},[<input_1=int32#2]!
  96. # asm 2: vld1.8 {<y0=d0},[<input_1=r1]!
  97. vld1.8 {d0},[r1]!
  98. # qhasm: y12 = mem128[input_1]; input_1 += 16
  99. # asm 1: vld1.8 {>y12=reg128#2%bot->y12=reg128#2%top},[<input_1=int32#2]!
  100. # asm 2: vld1.8 {>y12=d2->y12=d3},[<input_1=r1]!
  101. vld1.8 {d2-d3},[r1]!
  102. # qhasm: y34 = mem128[input_1]; input_1 += 16
  103. # asm 1: vld1.8 {>y34=reg128#3%bot->y34=reg128#3%top},[<input_1=int32#2]!
  104. # asm 2: vld1.8 {>y34=d4->y34=d5},[<input_1=r1]!
  105. vld1.8 {d4-d5},[r1]!
  106. # qhasm: input_1 += 8
  107. # asm 1: add >input_1=int32#2,<input_1=int32#2,#8
  108. # asm 2: add >input_1=r1,<input_1=r1,#8
  109. add r1,r1,#8
  110. # qhasm: new z0
  111. # qhasm: z0 = mem64[input_1]z0[1]; input_1 += 8
  112. # asm 1: vld1.8 {<z0=reg128#4%bot},[<input_1=int32#2]!
  113. # asm 2: vld1.8 {<z0=d6},[<input_1=r1]!
  114. vld1.8 {d6},[r1]!
  115. # qhasm: z12 = mem128[input_1]; input_1 += 16
  116. # asm 1: vld1.8 {>z12=reg128#5%bot->z12=reg128#5%top},[<input_1=int32#2]!
  117. # asm 2: vld1.8 {>z12=d8->z12=d9},[<input_1=r1]!
  118. vld1.8 {d8-d9},[r1]!
  119. # qhasm: z34 = mem128[input_1]; input_1 += 16
  120. # asm 1: vld1.8 {>z34=reg128#6%bot->z34=reg128#6%top},[<input_1=int32#2]!
  121. # asm 2: vld1.8 {>z34=d10->z34=d11},[<input_1=r1]!
  122. vld1.8 {d10-d11},[r1]!
  123. # qhasm: 2x mask = 0xffffffff
  124. # asm 1: vmov.i64 >mask=reg128#7,#0xffffffff
  125. # asm 2: vmov.i64 >mask=q6,#0xffffffff
  126. vmov.i64 q6,#0xffffffff
  127. # qhasm: 2x u4 = 0xff
  128. # asm 1: vmov.i64 >u4=reg128#8,#0xff
  129. # asm 2: vmov.i64 >u4=q7,#0xff
  130. vmov.i64 q7,#0xff
  131. # qhasm: x01 aligned= mem128[input_0];input_0+=16
  132. # asm 1: vld1.8 {>x01=reg128#9%bot->x01=reg128#9%top},[<input_0=int32#1,: 128]!
  133. # asm 2: vld1.8 {>x01=d16->x01=d17},[<input_0=r0,: 128]!
  134. vld1.8 {d16-d17},[r0,: 128]!
  135. # qhasm: x23 aligned= mem128[input_0];input_0+=16
  136. # asm 1: vld1.8 {>x23=reg128#10%bot->x23=reg128#10%top},[<input_0=int32#1,: 128]!
  137. # asm 2: vld1.8 {>x23=d18->x23=d19},[<input_0=r0,: 128]!
  138. vld1.8 {d18-d19},[r0,: 128]!
  139. # qhasm: x4 aligned= mem64[input_0]x4[1]
  140. # asm 1: vld1.8 {<x4=reg128#11%bot},[<input_0=int32#1,: 64]
  141. # asm 2: vld1.8 {<x4=d20},[<input_0=r0,: 64]
  142. vld1.8 {d20},[r0,: 64]
  143. # qhasm: input_0 -= 32
  144. # asm 1: sub >input_0=int32#1,<input_0=int32#1,#32
  145. # asm 2: sub >input_0=r0,<input_0=r0,#32
  146. sub r0,r0,#32
  147. # qhasm: 2x mask unsigned>>=6
  148. # asm 1: vshr.u64 >mask=reg128#7,<mask=reg128#7,#6
  149. # asm 2: vshr.u64 >mask=q6,<mask=q6,#6
  150. vshr.u64 q6,q6,#6
  151. # qhasm: 2x u4 unsigned>>= 7
  152. # asm 1: vshr.u64 >u4=reg128#8,<u4=reg128#8,#7
  153. # asm 2: vshr.u64 >u4=q7,<u4=q7,#7
  154. vshr.u64 q7,q7,#7
  155. # qhasm: 4x 5y12 = y12 << 2
  156. # asm 1: vshl.i32 >5y12=reg128#12,<y12=reg128#2,#2
  157. # asm 2: vshl.i32 >5y12=q11,<y12=q1,#2
  158. vshl.i32 q11,q1,#2
  159. # qhasm: 4x 5y34 = y34 << 2
  160. # asm 1: vshl.i32 >5y34=reg128#13,<y34=reg128#3,#2
  161. # asm 2: vshl.i32 >5y34=q12,<y34=q2,#2
  162. vshl.i32 q12,q2,#2
  163. # qhasm: 4x 5y12 += y12
  164. # asm 1: vadd.i32 >5y12=reg128#12,<5y12=reg128#12,<y12=reg128#2
  165. # asm 2: vadd.i32 >5y12=q11,<5y12=q11,<y12=q1
  166. vadd.i32 q11,q11,q1
  167. # qhasm: 4x 5y34 += y34
  168. # asm 1: vadd.i32 >5y34=reg128#13,<5y34=reg128#13,<y34=reg128#3
  169. # asm 2: vadd.i32 >5y34=q12,<5y34=q12,<y34=q2
  170. vadd.i32 q12,q12,q2
  171. # qhasm: 2x u4 <<= 24
  172. # asm 1: vshl.i64 >u4=reg128#8,<u4=reg128#8,#24
  173. # asm 2: vshl.i64 >u4=q7,<u4=q7,#24
  174. vshl.i64 q7,q7,#24
  175. # qhasm: 4x 5z12 = z12 << 2
  176. # asm 1: vshl.i32 >5z12=reg128#14,<z12=reg128#5,#2
  177. # asm 2: vshl.i32 >5z12=q13,<z12=q4,#2
  178. vshl.i32 q13,q4,#2
  179. # qhasm: 4x 5z34 = z34 << 2
  180. # asm 1: vshl.i32 >5z34=reg128#15,<z34=reg128#6,#2
  181. # asm 2: vshl.i32 >5z34=q14,<z34=q5,#2
  182. vshl.i32 q14,q5,#2
  183. # qhasm: 4x 5z12 += z12
  184. # asm 1: vadd.i32 >5z12=reg128#14,<5z12=reg128#14,<z12=reg128#5
  185. # asm 2: vadd.i32 >5z12=q13,<5z12=q13,<z12=q4
  186. vadd.i32 q13,q13,q4
  187. # qhasm: 4x 5z34 += z34
  188. # asm 1: vadd.i32 >5z34=reg128#15,<5z34=reg128#15,<z34=reg128#6
  189. # asm 2: vadd.i32 >5z34=q14,<5z34=q14,<z34=q5
  190. vadd.i32 q14,q14,q5
  191. # qhasm: new two24
  192. # qhasm: new y0_stack
  193. # qhasm: new y12_stack
  194. # qhasm: new y34_stack
  195. # qhasm: new 5y12_stack
  196. # qhasm: new 5y34_stack
  197. # qhasm: new z0_stack
  198. # qhasm: new z12_stack
  199. # qhasm: new z34_stack
  200. # qhasm: new 5z12_stack
  201. # qhasm: new 5z34_stack
  202. # qhasm: ptr = &two24
  203. # asm 1: lea >ptr=int32#2,<two24=stack128#1
  204. # asm 2: lea >ptr=r1,<two24=[sp,#0]
  205. add r1,sp,#0
  206. # qhasm: mem128[ptr] aligned= u4
  207. # asm 1: vst1.8 {<u4=reg128#8%bot-<u4=reg128#8%top},[<ptr=int32#2,: 128]
  208. # asm 2: vst1.8 {<u4=d14-<u4=d15},[<ptr=r1,: 128]
  209. vst1.8 {d14-d15},[r1,: 128]
  210. # qhasm: r4 = u4
  211. # asm 1: vmov >r4=reg128#16,<u4=reg128#8
  212. # asm 2: vmov >r4=q15,<u4=q7
  213. vmov q15,q7
  214. # qhasm: r0 = u4
  215. # asm 1: vmov >r0=reg128#8,<u4=reg128#8
  216. # asm 2: vmov >r0=q7,<u4=q7
  217. vmov q7,q7
  218. # qhasm: ptr = &y0_stack
  219. # asm 1: lea >ptr=int32#2,<y0_stack=stack128#2
  220. # asm 2: lea >ptr=r1,<y0_stack=[sp,#16]
  221. add r1,sp,#16
  222. # qhasm: mem128[ptr] aligned= y0
  223. # asm 1: vst1.8 {<y0=reg128#1%bot-<y0=reg128#1%top},[<ptr=int32#2,: 128]
  224. # asm 2: vst1.8 {<y0=d0-<y0=d1},[<ptr=r1,: 128]
  225. vst1.8 {d0-d1},[r1,: 128]
  226. # qhasm: ptr = &y12_stack
  227. # asm 1: lea >ptr=int32#2,<y12_stack=stack128#3
  228. # asm 2: lea >ptr=r1,<y12_stack=[sp,#32]
  229. add r1,sp,#32
  230. # qhasm: mem128[ptr] aligned= y12
  231. # asm 1: vst1.8 {<y12=reg128#2%bot-<y12=reg128#2%top},[<ptr=int32#2,: 128]
  232. # asm 2: vst1.8 {<y12=d2-<y12=d3},[<ptr=r1,: 128]
  233. vst1.8 {d2-d3},[r1,: 128]
  234. # qhasm: ptr = &y34_stack
  235. # asm 1: lea >ptr=int32#2,<y34_stack=stack128#4
  236. # asm 2: lea >ptr=r1,<y34_stack=[sp,#48]
  237. add r1,sp,#48
  238. # qhasm: mem128[ptr] aligned= y34
  239. # asm 1: vst1.8 {<y34=reg128#3%bot-<y34=reg128#3%top},[<ptr=int32#2,: 128]
  240. # asm 2: vst1.8 {<y34=d4-<y34=d5},[<ptr=r1,: 128]
  241. vst1.8 {d4-d5},[r1,: 128]
  242. # qhasm: ptr = &z0_stack
  243. # asm 1: lea >ptr=int32#2,<z0_stack=stack128#7
  244. # asm 2: lea >ptr=r1,<z0_stack=[sp,#96]
  245. add r1,sp,#96
  246. # qhasm: mem128[ptr] aligned= z0
  247. # asm 1: vst1.8 {<z0=reg128#4%bot-<z0=reg128#4%top},[<ptr=int32#2,: 128]
  248. # asm 2: vst1.8 {<z0=d6-<z0=d7},[<ptr=r1,: 128]
  249. vst1.8 {d6-d7},[r1,: 128]
  250. # qhasm: ptr = &z12_stack
  251. # asm 1: lea >ptr=int32#2,<z12_stack=stack128#8
  252. # asm 2: lea >ptr=r1,<z12_stack=[sp,#112]
  253. add r1,sp,#112
  254. # qhasm: mem128[ptr] aligned= z12
  255. # asm 1: vst1.8 {<z12=reg128#5%bot-<z12=reg128#5%top},[<ptr=int32#2,: 128]
  256. # asm 2: vst1.8 {<z12=d8-<z12=d9},[<ptr=r1,: 128]
  257. vst1.8 {d8-d9},[r1,: 128]
  258. # qhasm: ptr = &z34_stack
  259. # asm 1: lea >ptr=int32#2,<z34_stack=stack128#9
  260. # asm 2: lea >ptr=r1,<z34_stack=[sp,#128]
  261. add r1,sp,#128
  262. # qhasm: mem128[ptr] aligned= z34
  263. # asm 1: vst1.8 {<z34=reg128#6%bot-<z34=reg128#6%top},[<ptr=int32#2,: 128]
  264. # asm 2: vst1.8 {<z34=d10-<z34=d11},[<ptr=r1,: 128]
  265. vst1.8 {d10-d11},[r1,: 128]
  266. # qhasm: ptr = &5y12_stack
  267. # asm 1: lea >ptr=int32#2,<5y12_stack=stack128#5
  268. # asm 2: lea >ptr=r1,<5y12_stack=[sp,#64]
  269. add r1,sp,#64
  270. # qhasm: mem128[ptr] aligned= 5y12
  271. # asm 1: vst1.8 {<5y12=reg128#12%bot-<5y12=reg128#12%top},[<ptr=int32#2,: 128]
  272. # asm 2: vst1.8 {<5y12=d22-<5y12=d23},[<ptr=r1,: 128]
  273. vst1.8 {d22-d23},[r1,: 128]
  274. # qhasm: ptr = &5y34_stack
  275. # asm 1: lea >ptr=int32#2,<5y34_stack=stack128#6
  276. # asm 2: lea >ptr=r1,<5y34_stack=[sp,#80]
  277. add r1,sp,#80
  278. # qhasm: mem128[ptr] aligned= 5y34
  279. # asm 1: vst1.8 {<5y34=reg128#13%bot-<5y34=reg128#13%top},[<ptr=int32#2,: 128]
  280. # asm 2: vst1.8 {<5y34=d24-<5y34=d25},[<ptr=r1,: 128]
  281. vst1.8 {d24-d25},[r1,: 128]
  282. # qhasm: ptr = &5z12_stack
  283. # asm 1: lea >ptr=int32#2,<5z12_stack=stack128#10
  284. # asm 2: lea >ptr=r1,<5z12_stack=[sp,#144]
  285. add r1,sp,#144
  286. # qhasm: mem128[ptr] aligned= 5z12
  287. # asm 1: vst1.8 {<5z12=reg128#14%bot-<5z12=reg128#14%top},[<ptr=int32#2,: 128]
  288. # asm 2: vst1.8 {<5z12=d26-<5z12=d27},[<ptr=r1,: 128]
  289. vst1.8 {d26-d27},[r1,: 128]
  290. # qhasm: ptr = &5z34_stack
  291. # asm 1: lea >ptr=int32#2,<5z34_stack=stack128#11
  292. # asm 2: lea >ptr=r1,<5z34_stack=[sp,#160]
  293. add r1,sp,#160
  294. # qhasm: mem128[ptr] aligned= 5z34
  295. # asm 1: vst1.8 {<5z34=reg128#15%bot-<5z34=reg128#15%top},[<ptr=int32#2,: 128]
  296. # asm 2: vst1.8 {<5z34=d28-<5z34=d29},[<ptr=r1,: 128]
  297. vst1.8 {d28-d29},[r1,: 128]
  298. # qhasm: unsigned>? len - 64
  299. # asm 1: cmp <len=int32#4,#64
  300. # asm 2: cmp <len=r3,#64
  301. cmp r3,#64
  302. # qhasm: goto below64bytes if !unsigned>
  303. bls ._below64bytes
  304. # qhasm: input_2 += 32
  305. # asm 1: add >input_2=int32#2,<input_2=int32#3,#32
  306. # asm 2: add >input_2=r1,<input_2=r2,#32
  307. add r1,r2,#32
  308. # qhasm: mainloop2:
  309. ._mainloop2:
  310. # qhasm: c01 = mem128[input_2];input_2+=16
  311. # asm 1: vld1.8 {>c01=reg128#1%bot->c01=reg128#1%top},[<input_2=int32#2]!
  312. # asm 2: vld1.8 {>c01=d0->c01=d1},[<input_2=r1]!
  313. vld1.8 {d0-d1},[r1]!
  314. # qhasm: c23 = mem128[input_2];input_2+=16
  315. # asm 1: vld1.8 {>c23=reg128#2%bot->c23=reg128#2%top},[<input_2=int32#2]!
  316. # asm 2: vld1.8 {>c23=d2->c23=d3},[<input_2=r1]!
  317. vld1.8 {d2-d3},[r1]!
  318. # qhasm: r4[0,1] += x01[0] unsigned* z34[2]; r4[2,3] += x01[1] unsigned* z34[3]
  319. # asm 1: vmlal.u32 <r4=reg128#16,<x01=reg128#9%bot,<z34=reg128#6%top
  320. # asm 2: vmlal.u32 <r4=q15,<x01=d16,<z34=d11
  321. vmlal.u32 q15,d16,d11
  322. # qhasm: ptr = &z12_stack
  323. # asm 1: lea >ptr=int32#3,<z12_stack=stack128#8
  324. # asm 2: lea >ptr=r2,<z12_stack=[sp,#112]
  325. add r2,sp,#112
  326. # qhasm: z12 aligned= mem128[ptr]
  327. # asm 1: vld1.8 {>z12=reg128#3%bot->z12=reg128#3%top},[<ptr=int32#3,: 128]
  328. # asm 2: vld1.8 {>z12=d4->z12=d5},[<ptr=r2,: 128]
  329. vld1.8 {d4-d5},[r2,: 128]
  330. # qhasm: r4[0,1] += x01[2] unsigned* z34[0]; r4[2,3] += x01[3] unsigned* z34[1]
  331. # asm 1: vmlal.u32 <r4=reg128#16,<x01=reg128#9%top,<z34=reg128#6%bot
  332. # asm 2: vmlal.u32 <r4=q15,<x01=d17,<z34=d10
  333. vmlal.u32 q15,d17,d10
  334. # qhasm: ptr = &z0_stack
  335. # asm 1: lea >ptr=int32#3,<z0_stack=stack128#7
  336. # asm 2: lea >ptr=r2,<z0_stack=[sp,#96]
  337. add r2,sp,#96
  338. # qhasm: z0 aligned= mem128[ptr]
  339. # asm 1: vld1.8 {>z0=reg128#4%bot->z0=reg128#4%top},[<ptr=int32#3,: 128]
  340. # asm 2: vld1.8 {>z0=d6->z0=d7},[<ptr=r2,: 128]
  341. vld1.8 {d6-d7},[r2,: 128]
  342. # qhasm: r4[0,1] += x23[0] unsigned* z12[2]; r4[2,3] += x23[1] unsigned* z12[3]
  343. # asm 1: vmlal.u32 <r4=reg128#16,<x23=reg128#10%bot,<z12=reg128#3%top
  344. # asm 2: vmlal.u32 <r4=q15,<x23=d18,<z12=d5
  345. vmlal.u32 q15,d18,d5
  346. # qhasm: c01 c23 = c01[0]c01[1]c01[2]c23[2]c23[0]c23[1]c01[3]c23[3]
  347. # asm 1: vtrn.32 <c01=reg128#1%top,<c23=reg128#2%top
  348. # asm 2: vtrn.32 <c01=d1,<c23=d3
  349. vtrn.32 d1,d3
  350. # qhasm: r4[0,1] += x23[2] unsigned* z12[0]; r4[2,3] += x23[3] unsigned* z12[1]
  351. # asm 1: vmlal.u32 <r4=reg128#16,<x23=reg128#10%top,<z12=reg128#3%bot
  352. # asm 2: vmlal.u32 <r4=q15,<x23=d19,<z12=d4
  353. vmlal.u32 q15,d19,d4
  354. # qhasm: r4[0,1] += x4[0] unsigned* z0[0]; r4[2,3] += x4[1] unsigned* z0[1]
  355. # asm 1: vmlal.u32 <r4=reg128#16,<x4=reg128#11%bot,<z0=reg128#4%bot
  356. # asm 2: vmlal.u32 <r4=q15,<x4=d20,<z0=d6
  357. vmlal.u32 q15,d20,d6
  358. # qhasm: r3[0,1] = c23[2]<<18; r3[2,3] = c23[3]<<18
  359. # asm 1: vshll.u32 >r3=reg128#5,<c23=reg128#2%top,#18
  360. # asm 2: vshll.u32 >r3=q4,<c23=d3,#18
  361. vshll.u32 q4,d3,#18
  362. # qhasm: c01 c23 = c01[0]c23[0]c01[2]c01[3]c01[1]c23[1]c23[2]c23[3]
  363. # asm 1: vtrn.32 <c01=reg128#1%bot,<c23=reg128#2%bot
  364. # asm 2: vtrn.32 <c01=d0,<c23=d2
  365. vtrn.32 d0,d2
  366. # qhasm: r3[0,1] += x01[0] unsigned* z34[0]; r3[2,3] += x01[1] unsigned* z34[1]
  367. # asm 1: vmlal.u32 <r3=reg128#5,<x01=reg128#9%bot,<z34=reg128#6%bot
  368. # asm 2: vmlal.u32 <r3=q4,<x01=d16,<z34=d10
  369. vmlal.u32 q4,d16,d10
  370. # qhasm: r3[0,1] += x01[2] unsigned* z12[2]; r3[2,3] += x01[3] unsigned* z12[3]
  371. # asm 1: vmlal.u32 <r3=reg128#5,<x01=reg128#9%top,<z12=reg128#3%top
  372. # asm 2: vmlal.u32 <r3=q4,<x01=d17,<z12=d5
  373. vmlal.u32 q4,d17,d5
  374. # qhasm: r0 = r0[1]c01[0]r0[2,3]
  375. # asm 1: vext.32 <r0=reg128#8%bot,<r0=reg128#8%bot,<c01=reg128#1%bot,#1
  376. # asm 2: vext.32 <r0=d14,<r0=d14,<c01=d0,#1
  377. vext.32 d14,d14,d0,#1
  378. # qhasm: r3[0,1] += x23[0] unsigned* z12[0]; r3[2,3] += x23[1] unsigned* z12[1]
  379. # asm 1: vmlal.u32 <r3=reg128#5,<x23=reg128#10%bot,<z12=reg128#3%bot
  380. # asm 2: vmlal.u32 <r3=q4,<x23=d18,<z12=d4
  381. vmlal.u32 q4,d18,d4
  382. # qhasm: input_2 -= 64
  383. # asm 1: sub >input_2=int32#2,<input_2=int32#2,#64
  384. # asm 2: sub >input_2=r1,<input_2=r1,#64
  385. sub r1,r1,#64
  386. # qhasm: r3[0,1] += x23[2] unsigned* z0[0]; r3[2,3] += x23[3] unsigned* z0[1]
  387. # asm 1: vmlal.u32 <r3=reg128#5,<x23=reg128#10%top,<z0=reg128#4%bot
  388. # asm 2: vmlal.u32 <r3=q4,<x23=d19,<z0=d6
  389. vmlal.u32 q4,d19,d6
  390. # qhasm: ptr = &5z34_stack
  391. # asm 1: lea >ptr=int32#3,<5z34_stack=stack128#11
  392. # asm 2: lea >ptr=r2,<5z34_stack=[sp,#160]
  393. add r2,sp,#160
  394. # qhasm: 5z34 aligned= mem128[ptr]
  395. # asm 1: vld1.8 {>5z34=reg128#6%bot->5z34=reg128#6%top},[<ptr=int32#3,: 128]
  396. # asm 2: vld1.8 {>5z34=d10->5z34=d11},[<ptr=r2,: 128]
  397. vld1.8 {d10-d11},[r2,: 128]
  398. # qhasm: r3[0,1] += x4[0] unsigned* 5z34[2]; r3[2,3] += x4[1] unsigned* 5z34[3]
  399. # asm 1: vmlal.u32 <r3=reg128#5,<x4=reg128#11%bot,<5z34=reg128#6%top
  400. # asm 2: vmlal.u32 <r3=q4,<x4=d20,<5z34=d11
  401. vmlal.u32 q4,d20,d11
  402. # qhasm: r0 = r0[1]r0[0]r0[3]r0[2]
  403. # asm 1: vrev64.i32 >r0=reg128#8,<r0=reg128#8
  404. # asm 2: vrev64.i32 >r0=q7,<r0=q7
  405. vrev64.i32 q7,q7
  406. # qhasm: r2[0,1] = c01[2]<<12; r2[2,3] = c01[3]<<12
  407. # asm 1: vshll.u32 >r2=reg128#14,<c01=reg128#1%top,#12
  408. # asm 2: vshll.u32 >r2=q13,<c01=d1,#12
  409. vshll.u32 q13,d1,#12
  410. # qhasm: d01 = mem128[input_2];input_2+=16
  411. # asm 1: vld1.8 {>d01=reg128#12%bot->d01=reg128#12%top},[<input_2=int32#2]!
  412. # asm 2: vld1.8 {>d01=d22->d01=d23},[<input_2=r1]!
  413. vld1.8 {d22-d23},[r1]!
  414. # qhasm: r2[0,1] += x01[0] unsigned* z12[2]; r2[2,3] += x01[1] unsigned* z12[3]
  415. # asm 1: vmlal.u32 <r2=reg128#14,<x01=reg128#9%bot,<z12=reg128#3%top
  416. # asm 2: vmlal.u32 <r2=q13,<x01=d16,<z12=d5
  417. vmlal.u32 q13,d16,d5
  418. # qhasm: r2[0,1] += x01[2] unsigned* z12[0]; r2[2,3] += x01[3] unsigned* z12[1]
  419. # asm 1: vmlal.u32 <r2=reg128#14,<x01=reg128#9%top,<z12=reg128#3%bot
  420. # asm 2: vmlal.u32 <r2=q13,<x01=d17,<z12=d4
  421. vmlal.u32 q13,d17,d4
  422. # qhasm: r2[0,1] += x23[0] unsigned* z0[0]; r2[2,3] += x23[1] unsigned* z0[1]
  423. # asm 1: vmlal.u32 <r2=reg128#14,<x23=reg128#10%bot,<z0=reg128#4%bot
  424. # asm 2: vmlal.u32 <r2=q13,<x23=d18,<z0=d6
  425. vmlal.u32 q13,d18,d6
  426. # qhasm: r2[0,1] += x23[2] unsigned* 5z34[2]; r2[2,3] += x23[3] unsigned* 5z34[3]
  427. # asm 1: vmlal.u32 <r2=reg128#14,<x23=reg128#10%top,<5z34=reg128#6%top
  428. # asm 2: vmlal.u32 <r2=q13,<x23=d19,<5z34=d11
  429. vmlal.u32 q13,d19,d11
  430. # qhasm: r2[0,1] += x4[0] unsigned* 5z34[0]; r2[2,3] += x4[1] unsigned* 5z34[1]
  431. # asm 1: vmlal.u32 <r2=reg128#14,<x4=reg128#11%bot,<5z34=reg128#6%bot
  432. # asm 2: vmlal.u32 <r2=q13,<x4=d20,<5z34=d10
  433. vmlal.u32 q13,d20,d10
  434. # qhasm: r0 = r0[0,1]c01[1]r0[2]
  435. # asm 1: vext.32 <r0=reg128#8%top,<c01=reg128#1%bot,<r0=reg128#8%top,#1
  436. # asm 2: vext.32 <r0=d15,<c01=d0,<r0=d15,#1
  437. vext.32 d15,d0,d15,#1
  438. # qhasm: r1[0,1] = c23[0]<<6; r1[2,3] = c23[1]<<6
  439. # asm 1: vshll.u32 >r1=reg128#15,<c23=reg128#2%bot,#6
  440. # asm 2: vshll.u32 >r1=q14,<c23=d2,#6
  441. vshll.u32 q14,d2,#6
  442. # qhasm: r1[0,1] += x01[0] unsigned* z12[0]; r1[2,3] += x01[1] unsigned* z12[1]
  443. # asm 1: vmlal.u32 <r1=reg128#15,<x01=reg128#9%bot,<z12=reg128#3%bot
  444. # asm 2: vmlal.u32 <r1=q14,<x01=d16,<z12=d4
  445. vmlal.u32 q14,d16,d4
  446. # qhasm: r1[0,1] += x01[2] unsigned* z0[0]; r1[2,3] += x01[3] unsigned* z0[1]
  447. # asm 1: vmlal.u32 <r1=reg128#15,<x01=reg128#9%top,<z0=reg128#4%bot
  448. # asm 2: vmlal.u32 <r1=q14,<x01=d17,<z0=d6
  449. vmlal.u32 q14,d17,d6
  450. # qhasm: r1[0,1] += x23[0] unsigned* 5z34[2]; r1[2,3] += x23[1] unsigned* 5z34[3]
  451. # asm 1: vmlal.u32 <r1=reg128#15,<x23=reg128#10%bot,<5z34=reg128#6%top
  452. # asm 2: vmlal.u32 <r1=q14,<x23=d18,<5z34=d11
  453. vmlal.u32 q14,d18,d11
  454. # qhasm: r1[0,1] += x23[2] unsigned* 5z34[0]; r1[2,3] += x23[3] unsigned* 5z34[1]
  455. # asm 1: vmlal.u32 <r1=reg128#15,<x23=reg128#10%top,<5z34=reg128#6%bot
  456. # asm 2: vmlal.u32 <r1=q14,<x23=d19,<5z34=d10
  457. vmlal.u32 q14,d19,d10
  458. # qhasm: ptr = &5z12_stack
  459. # asm 1: lea >ptr=int32#3,<5z12_stack=stack128#10
  460. # asm 2: lea >ptr=r2,<5z12_stack=[sp,#144]
  461. add r2,sp,#144
  462. # qhasm: 5z12 aligned= mem128[ptr]
  463. # asm 1: vld1.8 {>5z12=reg128#1%bot->5z12=reg128#1%top},[<ptr=int32#3,: 128]
  464. # asm 2: vld1.8 {>5z12=d0->5z12=d1},[<ptr=r2,: 128]
  465. vld1.8 {d0-d1},[r2,: 128]
  466. # qhasm: r1[0,1] += x4[0] unsigned* 5z12[2]; r1[2,3] += x4[1] unsigned* 5z12[3]
  467. # asm 1: vmlal.u32 <r1=reg128#15,<x4=reg128#11%bot,<5z12=reg128#1%top
  468. # asm 2: vmlal.u32 <r1=q14,<x4=d20,<5z12=d1
  469. vmlal.u32 q14,d20,d1
  470. # qhasm: d23 = mem128[input_2];input_2+=16
  471. # asm 1: vld1.8 {>d23=reg128#2%bot->d23=reg128#2%top},[<input_2=int32#2]!
  472. # asm 2: vld1.8 {>d23=d2->d23=d3},[<input_2=r1]!
  473. vld1.8 {d2-d3},[r1]!
  474. # qhasm: input_2 += 32
  475. # asm 1: add >input_2=int32#2,<input_2=int32#2,#32
  476. # asm 2: add >input_2=r1,<input_2=r1,#32
  477. add r1,r1,#32
  478. # qhasm: r0[0,1] += x4[0] unsigned* 5z12[0]; r0[2,3] += x4[1] unsigned* 5z12[1]
  479. # asm 1: vmlal.u32 <r0=reg128#8,<x4=reg128#11%bot,<5z12=reg128#1%bot
  480. # asm 2: vmlal.u32 <r0=q7,<x4=d20,<5z12=d0
  481. vmlal.u32 q7,d20,d0
  482. # qhasm: r0[0,1] += x23[0] unsigned* 5z34[0]; r0[2,3] += x23[1] unsigned* 5z34[1]
  483. # asm 1: vmlal.u32 <r0=reg128#8,<x23=reg128#10%bot,<5z34=reg128#6%bot
  484. # asm 2: vmlal.u32 <r0=q7,<x23=d18,<5z34=d10
  485. vmlal.u32 q7,d18,d10
  486. # qhasm: d01 d23 = d01[0] d23[0] d01[1] d23[1]
  487. # asm 1: vswp <d23=reg128#2%bot,<d01=reg128#12%top
  488. # asm 2: vswp <d23=d2,<d01=d23
  489. vswp d2,d23
  490. # qhasm: r0[0,1] += x23[2] unsigned* 5z12[2]; r0[2,3] += x23[3] unsigned* 5z12[3]
  491. # asm 1: vmlal.u32 <r0=reg128#8,<x23=reg128#10%top,<5z12=reg128#1%top
  492. # asm 2: vmlal.u32 <r0=q7,<x23=d19,<5z12=d1
  493. vmlal.u32 q7,d19,d1
  494. # qhasm: r0[0,1] += x01[0] unsigned* z0[0]; r0[2,3] += x01[1] unsigned* z0[1]
  495. # asm 1: vmlal.u32 <r0=reg128#8,<x01=reg128#9%bot,<z0=reg128#4%bot
  496. # asm 2: vmlal.u32 <r0=q7,<x01=d16,<z0=d6
  497. vmlal.u32 q7,d16,d6
  498. # qhasm: new mid
  499. # qhasm: 2x v4 = d23 unsigned>> 40
  500. # asm 1: vshr.u64 >v4=reg128#4,<d23=reg128#2,#40
  501. # asm 2: vshr.u64 >v4=q3,<d23=q1,#40
  502. vshr.u64 q3,q1,#40
  503. # qhasm: mid = d01[1]d23[0] mid[2,3]
  504. # asm 1: vext.32 <mid=reg128#1%bot,<d01=reg128#12%bot,<d23=reg128#2%bot,#1
  505. # asm 2: vext.32 <mid=d0,<d01=d22,<d23=d2,#1
  506. vext.32 d0,d22,d2,#1
  507. # qhasm: new v23
  508. # qhasm: v23[2] = d23[0,1] unsigned>> 14; v23[3] = d23[2,3] unsigned>> 14
  509. # asm 1: vshrn.u64 <v23=reg128#10%top,<d23=reg128#2,#14
  510. # asm 2: vshrn.u64 <v23=d19,<d23=q1,#14
  511. vshrn.u64 d19,q1,#14
  512. # qhasm: mid = mid[0,1] d01[3]d23[2]
  513. # asm 1: vext.32 <mid=reg128#1%top,<d01=reg128#12%top,<d23=reg128#2%top,#1
  514. # asm 2: vext.32 <mid=d1,<d01=d23,<d23=d3,#1
  515. vext.32 d1,d23,d3,#1
  516. # qhasm: new v01
  517. # qhasm: v01[2] = d01[0,1] unsigned>> 26; v01[3] = d01[2,3] unsigned>> 26
  518. # asm 1: vshrn.u64 <v01=reg128#11%top,<d01=reg128#12,#26
  519. # asm 2: vshrn.u64 <v01=d21,<d01=q11,#26
  520. vshrn.u64 d21,q11,#26
  521. # qhasm: v01 = d01[1]d01[0] v01[2,3]
  522. # asm 1: vext.32 <v01=reg128#11%bot,<d01=reg128#12%bot,<d01=reg128#12%bot,#1
  523. # asm 2: vext.32 <v01=d20,<d01=d22,<d01=d22,#1
  524. vext.32 d20,d22,d22,#1
  525. # qhasm: r0[0,1] += x01[2] unsigned* 5z34[2]; r0[2,3] += x01[3] unsigned* 5z34[3]
  526. # asm 1: vmlal.u32 <r0=reg128#8,<x01=reg128#9%top,<5z34=reg128#6%top
  527. # asm 2: vmlal.u32 <r0=q7,<x01=d17,<5z34=d11
  528. vmlal.u32 q7,d17,d11
  529. # qhasm: v01 = v01[1]d01[2] v01[2,3]
  530. # asm 1: vext.32 <v01=reg128#11%bot,<v01=reg128#11%bot,<d01=reg128#12%top,#1
  531. # asm 2: vext.32 <v01=d20,<v01=d20,<d01=d23,#1
  532. vext.32 d20,d20,d23,#1
  533. # qhasm: v23[0] = mid[0,1] unsigned>> 20; v23[1] = mid[2,3] unsigned>> 20
  534. # asm 1: vshrn.u64 <v23=reg128#10%bot,<mid=reg128#1,#20
  535. # asm 2: vshrn.u64 <v23=d18,<mid=q0,#20
  536. vshrn.u64 d18,q0,#20
  537. # qhasm: v4 = v4[0]v4[2]v4[1]v4[3]
  538. # asm 1: vtrn.32 <v4=reg128#4%bot,<v4=reg128#4%top
  539. # asm 2: vtrn.32 <v4=d6,<v4=d7
  540. vtrn.32 d6,d7
  541. # qhasm: 4x v01 &= 0x03ffffff
  542. # asm 1: vand.i32 <v01=reg128#11,#0x03ffffff
  543. # asm 2: vand.i32 <v01=q10,#0x03ffffff
  544. vand.i32 q10,#0x03ffffff
  545. # qhasm: ptr = &y34_stack
  546. # asm 1: lea >ptr=int32#3,<y34_stack=stack128#4
  547. # asm 2: lea >ptr=r2,<y34_stack=[sp,#48]
  548. add r2,sp,#48
  549. # qhasm: y34 aligned= mem128[ptr]
  550. # asm 1: vld1.8 {>y34=reg128#3%bot->y34=reg128#3%top},[<ptr=int32#3,: 128]
  551. # asm 2: vld1.8 {>y34=d4->y34=d5},[<ptr=r2,: 128]
  552. vld1.8 {d4-d5},[r2,: 128]
  553. # qhasm: 4x v23 &= 0x03ffffff
  554. # asm 1: vand.i32 <v23=reg128#10,#0x03ffffff
  555. # asm 2: vand.i32 <v23=q9,#0x03ffffff
  556. vand.i32 q9,#0x03ffffff
  557. # qhasm: ptr = &y12_stack
  558. # asm 1: lea >ptr=int32#3,<y12_stack=stack128#3
  559. # asm 2: lea >ptr=r2,<y12_stack=[sp,#32]
  560. add r2,sp,#32
  561. # qhasm: y12 aligned= mem128[ptr]
  562. # asm 1: vld1.8 {>y12=reg128#2%bot->y12=reg128#2%top},[<ptr=int32#3,: 128]
  563. # asm 2: vld1.8 {>y12=d2->y12=d3},[<ptr=r2,: 128]
  564. vld1.8 {d2-d3},[r2,: 128]
  565. # qhasm: 4x v4 |= 0x01000000
  566. # asm 1: vorr.i32 <v4=reg128#4,#0x01000000
  567. # asm 2: vorr.i32 <v4=q3,#0x01000000
  568. vorr.i32 q3,#0x01000000
  569. # qhasm: ptr = &y0_stack
  570. # asm 1: lea >ptr=int32#3,<y0_stack=stack128#2
  571. # asm 2: lea >ptr=r2,<y0_stack=[sp,#16]
  572. add r2,sp,#16
  573. # qhasm: y0 aligned= mem128[ptr]
  574. # asm 1: vld1.8 {>y0=reg128#1%bot->y0=reg128#1%top},[<ptr=int32#3,: 128]
  575. # asm 2: vld1.8 {>y0=d0->y0=d1},[<ptr=r2,: 128]
  576. vld1.8 {d0-d1},[r2,: 128]
  577. # qhasm: r4[0,1] += v01[0] unsigned* y34[2]; r4[2,3] += v01[1] unsigned* y34[3]
  578. # asm 1: vmlal.u32 <r4=reg128#16,<v01=reg128#11%bot,<y34=reg128#3%top
  579. # asm 2: vmlal.u32 <r4=q15,<v01=d20,<y34=d5
  580. vmlal.u32 q15,d20,d5
  581. # qhasm: r4[0,1] += v01[2] unsigned* y34[0]; r4[2,3] += v01[3] unsigned* y34[1]
  582. # asm 1: vmlal.u32 <r4=reg128#16,<v01=reg128#11%top,<y34=reg128#3%bot
  583. # asm 2: vmlal.u32 <r4=q15,<v01=d21,<y34=d4
  584. vmlal.u32 q15,d21,d4
  585. # qhasm: r4[0,1] += v23[0] unsigned* y12[2]; r4[2,3] += v23[1] unsigned* y12[3]
  586. # asm 1: vmlal.u32 <r4=reg128#16,<v23=reg128#10%bot,<y12=reg128#2%top
  587. # asm 2: vmlal.u32 <r4=q15,<v23=d18,<y12=d3
  588. vmlal.u32 q15,d18,d3
  589. # qhasm: r4[0,1] += v23[2] unsigned* y12[0]; r4[2,3] += v23[3] unsigned* y12[1]
  590. # asm 1: vmlal.u32 <r4=reg128#16,<v23=reg128#10%top,<y12=reg128#2%bot
  591. # asm 2: vmlal.u32 <r4=q15,<v23=d19,<y12=d2
  592. vmlal.u32 q15,d19,d2
  593. # qhasm: r4[0,1] += v4[0] unsigned* y0[0]; r4[2,3] += v4[1] unsigned* y0[1]
  594. # asm 1: vmlal.u32 <r4=reg128#16,<v4=reg128#4%bot,<y0=reg128#1%bot
  595. # asm 2: vmlal.u32 <r4=q15,<v4=d6,<y0=d0
  596. vmlal.u32 q15,d6,d0
  597. # qhasm: ptr = &5y34_stack
  598. # asm 1: lea >ptr=int32#3,<5y34_stack=stack128#6
  599. # asm 2: lea >ptr=r2,<5y34_stack=[sp,#80]
  600. add r2,sp,#80
  601. # qhasm: 5y34 aligned= mem128[ptr]
  602. # asm 1: vld1.8 {>5y34=reg128#13%bot->5y34=reg128#13%top},[<ptr=int32#3,: 128]
  603. # asm 2: vld1.8 {>5y34=d24->5y34=d25},[<ptr=r2,: 128]
  604. vld1.8 {d24-d25},[r2,: 128]
  605. # qhasm: r3[0,1] += v01[0] unsigned* y34[0]; r3[2,3] += v01[1] unsigned* y34[1]
  606. # asm 1: vmlal.u32 <r3=reg128#5,<v01=reg128#11%bot,<y34=reg128#3%bot
  607. # asm 2: vmlal.u32 <r3=q4,<v01=d20,<y34=d4
  608. vmlal.u32 q4,d20,d4
  609. # qhasm: r3[0,1] += v01[2] unsigned* y12[2]; r3[2,3] += v01[3] unsigned* y12[3]
  610. # asm 1: vmlal.u32 <r3=reg128#5,<v01=reg128#11%top,<y12=reg128#2%top
  611. # asm 2: vmlal.u32 <r3=q4,<v01=d21,<y12=d3
  612. vmlal.u32 q4,d21,d3
  613. # qhasm: r3[0,1] += v23[0] unsigned* y12[0]; r3[2,3] += v23[1] unsigned* y12[1]
  614. # asm 1: vmlal.u32 <r3=reg128#5,<v23=reg128#10%bot,<y12=reg128#2%bot
  615. # asm 2: vmlal.u32 <r3=q4,<v23=d18,<y12=d2
  616. vmlal.u32 q4,d18,d2
  617. # qhasm: r3[0,1] += v23[2] unsigned* y0[0]; r3[2,3] += v23[3] unsigned* y0[1]
  618. # asm 1: vmlal.u32 <r3=reg128#5,<v23=reg128#10%top,<y0=reg128#1%bot
  619. # asm 2: vmlal.u32 <r3=q4,<v23=d19,<y0=d0
  620. vmlal.u32 q4,d19,d0
  621. # qhasm: r3[0,1] += v4[0] unsigned* 5y34[2]; r3[2,3] += v4[1] unsigned* 5y34[3]
  622. # asm 1: vmlal.u32 <r3=reg128#5,<v4=reg128#4%bot,<5y34=reg128#13%top
  623. # asm 2: vmlal.u32 <r3=q4,<v4=d6,<5y34=d25
  624. vmlal.u32 q4,d6,d25
  625. # qhasm: ptr = &5y12_stack
  626. # asm 1: lea >ptr=int32#3,<5y12_stack=stack128#5
  627. # asm 2: lea >ptr=r2,<5y12_stack=[sp,#64]
  628. add r2,sp,#64
  629. # qhasm: 5y12 aligned= mem128[ptr]
  630. # asm 1: vld1.8 {>5y12=reg128#12%bot->5y12=reg128#12%top},[<ptr=int32#3,: 128]
  631. # asm 2: vld1.8 {>5y12=d22->5y12=d23},[<ptr=r2,: 128]
  632. vld1.8 {d22-d23},[r2,: 128]
  633. # qhasm: r0[0,1] += v4[0] unsigned* 5y12[0]; r0[2,3] += v4[1] unsigned* 5y12[1]
  634. # asm 1: vmlal.u32 <r0=reg128#8,<v4=reg128#4%bot,<5y12=reg128#12%bot
  635. # asm 2: vmlal.u32 <r0=q7,<v4=d6,<5y12=d22
  636. vmlal.u32 q7,d6,d22
  637. # qhasm: r0[0,1] += v23[0] unsigned* 5y34[0]; r0[2,3] += v23[1] unsigned* 5y34[1]
  638. # asm 1: vmlal.u32 <r0=reg128#8,<v23=reg128#10%bot,<5y34=reg128#13%bot
  639. # asm 2: vmlal.u32 <r0=q7,<v23=d18,<5y34=d24
  640. vmlal.u32 q7,d18,d24
  641. # qhasm: r0[0,1] += v23[2] unsigned* 5y12[2]; r0[2,3] += v23[3] unsigned* 5y12[3]
  642. # asm 1: vmlal.u32 <r0=reg128#8,<v23=reg128#10%top,<5y12=reg128#12%top
  643. # asm 2: vmlal.u32 <r0=q7,<v23=d19,<5y12=d23
  644. vmlal.u32 q7,d19,d23
  645. # qhasm: r0[0,1] += v01[0] unsigned* y0[0]; r0[2,3] += v01[1] unsigned* y0[1]
  646. # asm 1: vmlal.u32 <r0=reg128#8,<v01=reg128#11%bot,<y0=reg128#1%bot
  647. # asm 2: vmlal.u32 <r0=q7,<v01=d20,<y0=d0
  648. vmlal.u32 q7,d20,d0
  649. # qhasm: r0[0,1] += v01[2] unsigned* 5y34[2]; r0[2,3] += v01[3] unsigned* 5y34[3]
  650. # asm 1: vmlal.u32 <r0=reg128#8,<v01=reg128#11%top,<5y34=reg128#13%top
  651. # asm 2: vmlal.u32 <r0=q7,<v01=d21,<5y34=d25
  652. vmlal.u32 q7,d21,d25
  653. # qhasm: r1[0,1] += v01[0] unsigned* y12[0]; r1[2,3] += v01[1] unsigned* y12[1]
  654. # asm 1: vmlal.u32 <r1=reg128#15,<v01=reg128#11%bot,<y12=reg128#2%bot
  655. # asm 2: vmlal.u32 <r1=q14,<v01=d20,<y12=d2
  656. vmlal.u32 q14,d20,d2
  657. # qhasm: r1[0,1] += v01[2] unsigned* y0[0]; r1[2,3] += v01[3] unsigned* y0[1]
  658. # asm 1: vmlal.u32 <r1=reg128#15,<v01=reg128#11%top,<y0=reg128#1%bot
  659. # asm 2: vmlal.u32 <r1=q14,<v01=d21,<y0=d0
  660. vmlal.u32 q14,d21,d0
  661. # qhasm: r1[0,1] += v23[0] unsigned* 5y34[2]; r1[2,3] += v23[1] unsigned* 5y34[3]
  662. # asm 1: vmlal.u32 <r1=reg128#15,<v23=reg128#10%bot,<5y34=reg128#13%top
  663. # asm 2: vmlal.u32 <r1=q14,<v23=d18,<5y34=d25
  664. vmlal.u32 q14,d18,d25
  665. # qhasm: r1[0,1] += v23[2] unsigned* 5y34[0]; r1[2,3] += v23[3] unsigned* 5y34[1]
  666. # asm 1: vmlal.u32 <r1=reg128#15,<v23=reg128#10%top,<5y34=reg128#13%bot
  667. # asm 2: vmlal.u32 <r1=q14,<v23=d19,<5y34=d24
  668. vmlal.u32 q14,d19,d24
  669. # qhasm: r1[0,1] += v4[0] unsigned* 5y12[2]; r1[2,3] += v4[1] unsigned* 5y12[3]
  670. # asm 1: vmlal.u32 <r1=reg128#15,<v4=reg128#4%bot,<5y12=reg128#12%top
  671. # asm 2: vmlal.u32 <r1=q14,<v4=d6,<5y12=d23
  672. vmlal.u32 q14,d6,d23
  673. # qhasm: r2[0,1] += v01[0] unsigned* y12[2]; r2[2,3] += v01[1] unsigned* y12[3]
  674. # asm 1: vmlal.u32 <r2=reg128#14,<v01=reg128#11%bot,<y12=reg128#2%top
  675. # asm 2: vmlal.u32 <r2=q13,<v01=d20,<y12=d3
  676. vmlal.u32 q13,d20,d3
  677. # qhasm: r2[0,1] += v01[2] unsigned* y12[0]; r2[2,3] += v01[3] unsigned* y12[1]
  678. # asm 1: vmlal.u32 <r2=reg128#14,<v01=reg128#11%top,<y12=reg128#2%bot
  679. # asm 2: vmlal.u32 <r2=q13,<v01=d21,<y12=d2
  680. vmlal.u32 q13,d21,d2
  681. # qhasm: r2[0,1] += v23[0] unsigned* y0[0]; r2[2,3] += v23[1] unsigned* y0[1]
  682. # asm 1: vmlal.u32 <r2=reg128#14,<v23=reg128#10%bot,<y0=reg128#1%bot
  683. # asm 2: vmlal.u32 <r2=q13,<v23=d18,<y0=d0
  684. vmlal.u32 q13,d18,d0
  685. # qhasm: r2[0,1] += v23[2] unsigned* 5y34[2]; r2[2,3] += v23[3] unsigned* 5y34[3]
  686. # asm 1: vmlal.u32 <r2=reg128#14,<v23=reg128#10%top,<5y34=reg128#13%top
  687. # asm 2: vmlal.u32 <r2=q13,<v23=d19,<5y34=d25
  688. vmlal.u32 q13,d19,d25
  689. # qhasm: r2[0,1] += v4[0] unsigned* 5y34[0]; r2[2,3] += v4[1] unsigned* 5y34[1]
  690. # asm 1: vmlal.u32 <r2=reg128#14,<v4=reg128#4%bot,<5y34=reg128#13%bot
  691. # asm 2: vmlal.u32 <r2=q13,<v4=d6,<5y34=d24
  692. vmlal.u32 q13,d6,d24
  693. # qhasm: ptr = &two24
  694. # asm 1: lea >ptr=int32#3,<two24=stack128#1
  695. # asm 2: lea >ptr=r2,<two24=[sp,#0]
  696. add r2,sp,#0
  697. # qhasm: 2x t1 = r0 unsigned>> 26
  698. # asm 1: vshr.u64 >t1=reg128#4,<r0=reg128#8,#26
  699. # asm 2: vshr.u64 >t1=q3,<r0=q7,#26
  700. vshr.u64 q3,q7,#26
  701. # qhasm: len -= 64
  702. # asm 1: sub >len=int32#4,<len=int32#4,#64
  703. # asm 2: sub >len=r3,<len=r3,#64
  704. sub r3,r3,#64
  705. # qhasm: r0 &= mask
  706. # asm 1: vand >r0=reg128#6,<r0=reg128#8,<mask=reg128#7
  707. # asm 2: vand >r0=q5,<r0=q7,<mask=q6
  708. vand q5,q7,q6
  709. # qhasm: 2x r1 += t1
  710. # asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#15,<t1=reg128#4
  711. # asm 2: vadd.i64 >r1=q3,<r1=q14,<t1=q3
  712. vadd.i64 q3,q14,q3
  713. # qhasm: 2x t4 = r3 unsigned>> 26
  714. # asm 1: vshr.u64 >t4=reg128#8,<r3=reg128#5,#26
  715. # asm 2: vshr.u64 >t4=q7,<r3=q4,#26
  716. vshr.u64 q7,q4,#26
  717. # qhasm: r3 &= mask
  718. # asm 1: vand >r3=reg128#5,<r3=reg128#5,<mask=reg128#7
  719. # asm 2: vand >r3=q4,<r3=q4,<mask=q6
  720. vand q4,q4,q6
  721. # qhasm: 2x x4 = r4 + t4
  722. # asm 1: vadd.i64 >x4=reg128#8,<r4=reg128#16,<t4=reg128#8
  723. # asm 2: vadd.i64 >x4=q7,<r4=q15,<t4=q7
  724. vadd.i64 q7,q15,q7
  725. # qhasm: r4 aligned= mem128[ptr]
  726. # asm 1: vld1.8 {>r4=reg128#16%bot->r4=reg128#16%top},[<ptr=int32#3,: 128]
  727. # asm 2: vld1.8 {>r4=d30->r4=d31},[<ptr=r2,: 128]
  728. vld1.8 {d30-d31},[r2,: 128]
  729. # qhasm: 2x t2 = r1 unsigned>> 26
  730. # asm 1: vshr.u64 >t2=reg128#9,<r1=reg128#4,#26
  731. # asm 2: vshr.u64 >t2=q8,<r1=q3,#26
  732. vshr.u64 q8,q3,#26
  733. # qhasm: r1 &= mask
  734. # asm 1: vand >r1=reg128#4,<r1=reg128#4,<mask=reg128#7
  735. # asm 2: vand >r1=q3,<r1=q3,<mask=q6
  736. vand q3,q3,q6
  737. # qhasm: 2x t0 = x4 unsigned>> 26
  738. # asm 1: vshr.u64 >t0=reg128#10,<x4=reg128#8,#26
  739. # asm 2: vshr.u64 >t0=q9,<x4=q7,#26
  740. vshr.u64 q9,q7,#26
  741. # qhasm: 2x r2 += t2
  742. # asm 1: vadd.i64 >r2=reg128#9,<r2=reg128#14,<t2=reg128#9
  743. # asm 2: vadd.i64 >r2=q8,<r2=q13,<t2=q8
  744. vadd.i64 q8,q13,q8
  745. # qhasm: x4 &= mask
  746. # asm 1: vand >x4=reg128#11,<x4=reg128#8,<mask=reg128#7
  747. # asm 2: vand >x4=q10,<x4=q7,<mask=q6
  748. vand q10,q7,q6
  749. # qhasm: 2x x01 = r0 + t0
  750. # asm 1: vadd.i64 >x01=reg128#6,<r0=reg128#6,<t0=reg128#10
  751. # asm 2: vadd.i64 >x01=q5,<r0=q5,<t0=q9
  752. vadd.i64 q5,q5,q9
  753. # qhasm: r0 aligned= mem128[ptr]
  754. # asm 1: vld1.8 {>r0=reg128#8%bot->r0=reg128#8%top},[<ptr=int32#3,: 128]
  755. # asm 2: vld1.8 {>r0=d14->r0=d15},[<ptr=r2,: 128]
  756. vld1.8 {d14-d15},[r2,: 128]
  757. # qhasm: ptr = &z34_stack
  758. # asm 1: lea >ptr=int32#3,<z34_stack=stack128#9
  759. # asm 2: lea >ptr=r2,<z34_stack=[sp,#128]
  760. add r2,sp,#128
  761. # qhasm: 2x t0 <<= 2
  762. # asm 1: vshl.i64 >t0=reg128#10,<t0=reg128#10,#2
  763. # asm 2: vshl.i64 >t0=q9,<t0=q9,#2
  764. vshl.i64 q9,q9,#2
  765. # qhasm: 2x t3 = r2 unsigned>> 26
  766. # asm 1: vshr.u64 >t3=reg128#14,<r2=reg128#9,#26
  767. # asm 2: vshr.u64 >t3=q13,<r2=q8,#26
  768. vshr.u64 q13,q8,#26
  769. # qhasm: 2x x01 += t0
  770. # asm 1: vadd.i64 >x01=reg128#15,<x01=reg128#6,<t0=reg128#10
  771. # asm 2: vadd.i64 >x01=q14,<x01=q5,<t0=q9
  772. vadd.i64 q14,q5,q9
  773. # qhasm: z34 aligned= mem128[ptr]
  774. # asm 1: vld1.8 {>z34=reg128#6%bot->z34=reg128#6%top},[<ptr=int32#3,: 128]
  775. # asm 2: vld1.8 {>z34=d10->z34=d11},[<ptr=r2,: 128]
  776. vld1.8 {d10-d11},[r2,: 128]
  777. # qhasm: x23 = r2 & mask
  778. # asm 1: vand >x23=reg128#10,<r2=reg128#9,<mask=reg128#7
  779. # asm 2: vand >x23=q9,<r2=q8,<mask=q6
  780. vand q9,q8,q6
  781. # qhasm: 2x r3 += t3
  782. # asm 1: vadd.i64 >r3=reg128#5,<r3=reg128#5,<t3=reg128#14
  783. # asm 2: vadd.i64 >r3=q4,<r3=q4,<t3=q13
  784. vadd.i64 q4,q4,q13
  785. # qhasm: input_2 += 32
  786. # asm 1: add >input_2=int32#2,<input_2=int32#2,#32
  787. # asm 2: add >input_2=r1,<input_2=r1,#32
  788. add r1,r1,#32
  789. # qhasm: 2x t1 = x01 unsigned>> 26
  790. # asm 1: vshr.u64 >t1=reg128#14,<x01=reg128#15,#26
  791. # asm 2: vshr.u64 >t1=q13,<x01=q14,#26
  792. vshr.u64 q13,q14,#26
  793. # qhasm: x23 = x23[0,2,1,3]
  794. # asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top
  795. # asm 2: vtrn.32 <x23=d18,<x23=d19
  796. vtrn.32 d18,d19
  797. # qhasm: x01 = x01 & mask
  798. # asm 1: vand >x01=reg128#9,<x01=reg128#15,<mask=reg128#7
  799. # asm 2: vand >x01=q8,<x01=q14,<mask=q6
  800. vand q8,q14,q6
  801. # qhasm: 2x r1 += t1
  802. # asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#4,<t1=reg128#14
  803. # asm 2: vadd.i64 >r1=q3,<r1=q3,<t1=q13
  804. vadd.i64 q3,q3,q13
  805. # qhasm: 2x t4 = r3 unsigned>> 26
  806. # asm 1: vshr.u64 >t4=reg128#14,<r3=reg128#5,#26
  807. # asm 2: vshr.u64 >t4=q13,<r3=q4,#26
  808. vshr.u64 q13,q4,#26
  809. # qhasm: x01 = x01[0,2,1,3]
  810. # asm 1: vtrn.32 <x01=reg128#9%bot,<x01=reg128#9%top
  811. # asm 2: vtrn.32 <x01=d16,<x01=d17
  812. vtrn.32 d16,d17
  813. # qhasm: r3 &= mask
  814. # asm 1: vand >r3=reg128#5,<r3=reg128#5,<mask=reg128#7
  815. # asm 2: vand >r3=q4,<r3=q4,<mask=q6
  816. vand q4,q4,q6
  817. # qhasm: r1 = r1[0,2,1,3]
  818. # asm 1: vtrn.32 <r1=reg128#4%bot,<r1=reg128#4%top
  819. # asm 2: vtrn.32 <r1=d6,<r1=d7
  820. vtrn.32 d6,d7
  821. # qhasm: 2x x4 += t4
  822. # asm 1: vadd.i64 >x4=reg128#11,<x4=reg128#11,<t4=reg128#14
  823. # asm 2: vadd.i64 >x4=q10,<x4=q10,<t4=q13
  824. vadd.i64 q10,q10,q13
  825. # qhasm: r3 = r3[0,2,1,3]
  826. # asm 1: vtrn.32 <r3=reg128#5%bot,<r3=reg128#5%top
  827. # asm 2: vtrn.32 <r3=d8,<r3=d9
  828. vtrn.32 d8,d9
  829. # qhasm: x01 = x01[0,1] r1[0,1]
  830. # asm 1: vext.32 <x01=reg128#9%top,<r1=reg128#4%bot,<r1=reg128#4%bot,#0
  831. # asm 2: vext.32 <x01=d17,<r1=d6,<r1=d6,#0
  832. vext.32 d17,d6,d6,#0
  833. # qhasm: x23 = x23[0,1] r3[0,1]
  834. # asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#5%bot,<r3=reg128#5%bot,#0
  835. # asm 2: vext.32 <x23=d19,<r3=d8,<r3=d8,#0
  836. vext.32 d19,d8,d8,#0
  837. # qhasm: x4 = x4[0,2,1,3]
  838. # asm 1: vtrn.32 <x4=reg128#11%bot,<x4=reg128#11%top
  839. # asm 2: vtrn.32 <x4=d20,<x4=d21
  840. vtrn.32 d20,d21
  841. # qhasm: unsigned>? len - 64
  842. # asm 1: cmp <len=int32#4,#64
  843. # asm 2: cmp <len=r3,#64
  844. cmp r3,#64
  845. # qhasm: goto mainloop2 if unsigned>
  846. bhi ._mainloop2
  847. # qhasm: input_2 -= 32
  848. # asm 1: sub >input_2=int32#3,<input_2=int32#2,#32
  849. # asm 2: sub >input_2=r2,<input_2=r1,#32
  850. sub r2,r1,#32
  851. # qhasm: below64bytes:
  852. ._below64bytes:
  853. # qhasm: unsigned>? len - 32
  854. # asm 1: cmp <len=int32#4,#32
  855. # asm 2: cmp <len=r3,#32
  856. cmp r3,#32
  857. # qhasm: goto end if !unsigned>
  858. bls ._end
  859. # qhasm: mainloop:
  860. ._mainloop:
  861. # qhasm: new r0
  862. # qhasm: ptr = &two24
  863. # asm 1: lea >ptr=int32#2,<two24=stack128#1
  864. # asm 2: lea >ptr=r1,<two24=[sp,#0]
  865. add r1,sp,#0
  866. # qhasm: r4 aligned= mem128[ptr]
  867. # asm 1: vld1.8 {>r4=reg128#5%bot->r4=reg128#5%top},[<ptr=int32#2,: 128]
  868. # asm 2: vld1.8 {>r4=d8->r4=d9},[<ptr=r1,: 128]
  869. vld1.8 {d8-d9},[r1,: 128]
  870. # qhasm: u4 aligned= mem128[ptr]
  871. # asm 1: vld1.8 {>u4=reg128#6%bot->u4=reg128#6%top},[<ptr=int32#2,: 128]
  872. # asm 2: vld1.8 {>u4=d10->u4=d11},[<ptr=r1,: 128]
  873. vld1.8 {d10-d11},[r1,: 128]
  874. # qhasm: c01 = mem128[input_2];input_2+=16
  875. # asm 1: vld1.8 {>c01=reg128#8%bot->c01=reg128#8%top},[<input_2=int32#3]!
  876. # asm 2: vld1.8 {>c01=d14->c01=d15},[<input_2=r2]!
  877. vld1.8 {d14-d15},[r2]!
  878. # qhasm: r4[0,1] += x01[0] unsigned* y34[2]; r4[2,3] += x01[1] unsigned* y34[3]
  879. # asm 1: vmlal.u32 <r4=reg128#5,<x01=reg128#9%bot,<y34=reg128#3%top
  880. # asm 2: vmlal.u32 <r4=q4,<x01=d16,<y34=d5
  881. vmlal.u32 q4,d16,d5
  882. # qhasm: c23 = mem128[input_2];input_2+=16
  883. # asm 1: vld1.8 {>c23=reg128#14%bot->c23=reg128#14%top},[<input_2=int32#3]!
  884. # asm 2: vld1.8 {>c23=d26->c23=d27},[<input_2=r2]!
  885. vld1.8 {d26-d27},[r2]!
  886. # qhasm: r4[0,1] += x01[2] unsigned* y34[0]; r4[2,3] += x01[3] unsigned* y34[1]
  887. # asm 1: vmlal.u32 <r4=reg128#5,<x01=reg128#9%top,<y34=reg128#3%bot
  888. # asm 2: vmlal.u32 <r4=q4,<x01=d17,<y34=d4
  889. vmlal.u32 q4,d17,d4
  890. # qhasm: r0 = u4[1]c01[0]r0[2,3]
  891. # asm 1: vext.32 <r0=reg128#4%bot,<u4=reg128#6%bot,<c01=reg128#8%bot,#1
  892. # asm 2: vext.32 <r0=d6,<u4=d10,<c01=d14,#1
  893. vext.32 d6,d10,d14,#1
  894. # qhasm: r4[0,1] += x23[0] unsigned* y12[2]; r4[2,3] += x23[1] unsigned* y12[3]
  895. # asm 1: vmlal.u32 <r4=reg128#5,<x23=reg128#10%bot,<y12=reg128#2%top
  896. # asm 2: vmlal.u32 <r4=q4,<x23=d18,<y12=d3
  897. vmlal.u32 q4,d18,d3
  898. # qhasm: r0 = r0[0,1]u4[1]c23[0]
  899. # asm 1: vext.32 <r0=reg128#4%top,<u4=reg128#6%bot,<c23=reg128#14%bot,#1
  900. # asm 2: vext.32 <r0=d7,<u4=d10,<c23=d26,#1
  901. vext.32 d7,d10,d26,#1
  902. # qhasm: r4[0,1] += x23[2] unsigned* y12[0]; r4[2,3] += x23[3] unsigned* y12[1]
  903. # asm 1: vmlal.u32 <r4=reg128#5,<x23=reg128#10%top,<y12=reg128#2%bot
  904. # asm 2: vmlal.u32 <r4=q4,<x23=d19,<y12=d2
  905. vmlal.u32 q4,d19,d2
  906. # qhasm: r0 = r0[1]r0[0]r0[3]r0[2]
  907. # asm 1: vrev64.i32 >r0=reg128#4,<r0=reg128#4
  908. # asm 2: vrev64.i32 >r0=q3,<r0=q3
  909. vrev64.i32 q3,q3
  910. # qhasm: r4[0,1] += x4[0] unsigned* y0[0]; r4[2,3] += x4[1] unsigned* y0[1]
  911. # asm 1: vmlal.u32 <r4=reg128#5,<x4=reg128#11%bot,<y0=reg128#1%bot
  912. # asm 2: vmlal.u32 <r4=q4,<x4=d20,<y0=d0
  913. vmlal.u32 q4,d20,d0
  914. # qhasm: r0[0,1] += x4[0] unsigned* 5y12[0]; r0[2,3] += x4[1] unsigned* 5y12[1]
  915. # asm 1: vmlal.u32 <r0=reg128#4,<x4=reg128#11%bot,<5y12=reg128#12%bot
  916. # asm 2: vmlal.u32 <r0=q3,<x4=d20,<5y12=d22
  917. vmlal.u32 q3,d20,d22
  918. # qhasm: r0[0,1] += x23[0] unsigned* 5y34[0]; r0[2,3] += x23[1] unsigned* 5y34[1]
  919. # asm 1: vmlal.u32 <r0=reg128#4,<x23=reg128#10%bot,<5y34=reg128#13%bot
  920. # asm 2: vmlal.u32 <r0=q3,<x23=d18,<5y34=d24
  921. vmlal.u32 q3,d18,d24
  922. # qhasm: r0[0,1] += x23[2] unsigned* 5y12[2]; r0[2,3] += x23[3] unsigned* 5y12[3]
  923. # asm 1: vmlal.u32 <r0=reg128#4,<x23=reg128#10%top,<5y12=reg128#12%top
  924. # asm 2: vmlal.u32 <r0=q3,<x23=d19,<5y12=d23
  925. vmlal.u32 q3,d19,d23
  926. # qhasm: c01 c23 = c01[0]c23[0]c01[2]c23[2]c01[1]c23[1]c01[3]c23[3]
  927. # asm 1: vtrn.32 <c01=reg128#8,<c23=reg128#14
  928. # asm 2: vtrn.32 <c01=q7,<c23=q13
  929. vtrn.32 q7,q13
  930. # qhasm: r0[0,1] += x01[0] unsigned* y0[0]; r0[2,3] += x01[1] unsigned* y0[1]
  931. # asm 1: vmlal.u32 <r0=reg128#4,<x01=reg128#9%bot,<y0=reg128#1%bot
  932. # asm 2: vmlal.u32 <r0=q3,<x01=d16,<y0=d0
  933. vmlal.u32 q3,d16,d0
  934. # qhasm: r3[0,1] = c23[2]<<18; r3[2,3] = c23[3]<<18
  935. # asm 1: vshll.u32 >r3=reg128#6,<c23=reg128#14%top,#18
  936. # asm 2: vshll.u32 >r3=q5,<c23=d27,#18
  937. vshll.u32 q5,d27,#18
  938. # qhasm: r0[0,1] += x01[2] unsigned* 5y34[2]; r0[2,3] += x01[3] unsigned* 5y34[3]
  939. # asm 1: vmlal.u32 <r0=reg128#4,<x01=reg128#9%top,<5y34=reg128#13%top
  940. # asm 2: vmlal.u32 <r0=q3,<x01=d17,<5y34=d25
  941. vmlal.u32 q3,d17,d25
  942. # qhasm: r3[0,1] += x01[0] unsigned* y34[0]; r3[2,3] += x01[1] unsigned* y34[1]
  943. # asm 1: vmlal.u32 <r3=reg128#6,<x01=reg128#9%bot,<y34=reg128#3%bot
  944. # asm 2: vmlal.u32 <r3=q5,<x01=d16,<y34=d4
  945. vmlal.u32 q5,d16,d4
  946. # qhasm: r3[0,1] += x01[2] unsigned* y12[2]; r3[2,3] += x01[3] unsigned* y12[3]
  947. # asm 1: vmlal.u32 <r3=reg128#6,<x01=reg128#9%top,<y12=reg128#2%top
  948. # asm 2: vmlal.u32 <r3=q5,<x01=d17,<y12=d3
  949. vmlal.u32 q5,d17,d3
  950. # qhasm: r3[0,1] += x23[0] unsigned* y12[0]; r3[2,3] += x23[1] unsigned* y12[1]
  951. # asm 1: vmlal.u32 <r3=reg128#6,<x23=reg128#10%bot,<y12=reg128#2%bot
  952. # asm 2: vmlal.u32 <r3=q5,<x23=d18,<y12=d2
  953. vmlal.u32 q5,d18,d2
  954. # qhasm: r3[0,1] += x23[2] unsigned* y0[0]; r3[2,3] += x23[3] unsigned* y0[1]
  955. # asm 1: vmlal.u32 <r3=reg128#6,<x23=reg128#10%top,<y0=reg128#1%bot
  956. # asm 2: vmlal.u32 <r3=q5,<x23=d19,<y0=d0
  957. vmlal.u32 q5,d19,d0
  958. # qhasm: r1[0,1] = c23[0]<<6; r1[2,3] = c23[1]<<6
  959. # asm 1: vshll.u32 >r1=reg128#14,<c23=reg128#14%bot,#6
  960. # asm 2: vshll.u32 >r1=q13,<c23=d26,#6
  961. vshll.u32 q13,d26,#6
  962. # qhasm: r3[0,1] += x4[0] unsigned* 5y34[2]; r3[2,3] += x4[1] unsigned* 5y34[3]
  963. # asm 1: vmlal.u32 <r3=reg128#6,<x4=reg128#11%bot,<5y34=reg128#13%top
  964. # asm 2: vmlal.u32 <r3=q5,<x4=d20,<5y34=d25
  965. vmlal.u32 q5,d20,d25
  966. # qhasm: r1[0,1] += x01[0] unsigned* y12[0]; r1[2,3] += x01[1] unsigned* y12[1]
  967. # asm 1: vmlal.u32 <r1=reg128#14,<x01=reg128#9%bot,<y12=reg128#2%bot
  968. # asm 2: vmlal.u32 <r1=q13,<x01=d16,<y12=d2
  969. vmlal.u32 q13,d16,d2
  970. # qhasm: r1[0,1] += x01[2] unsigned* y0[0]; r1[2,3] += x01[3] unsigned* y0[1]
  971. # asm 1: vmlal.u32 <r1=reg128#14,<x01=reg128#9%top,<y0=reg128#1%bot
  972. # asm 2: vmlal.u32 <r1=q13,<x01=d17,<y0=d0
  973. vmlal.u32 q13,d17,d0
  974. # qhasm: r1[0,1] += x23[0] unsigned* 5y34[2]; r1[2,3] += x23[1] unsigned* 5y34[3]
  975. # asm 1: vmlal.u32 <r1=reg128#14,<x23=reg128#10%bot,<5y34=reg128#13%top
  976. # asm 2: vmlal.u32 <r1=q13,<x23=d18,<5y34=d25
  977. vmlal.u32 q13,d18,d25
  978. # qhasm: r1[0,1] += x23[2] unsigned* 5y34[0]; r1[2,3] += x23[3] unsigned* 5y34[1]
  979. # asm 1: vmlal.u32 <r1=reg128#14,<x23=reg128#10%top,<5y34=reg128#13%bot
  980. # asm 2: vmlal.u32 <r1=q13,<x23=d19,<5y34=d24
  981. vmlal.u32 q13,d19,d24
  982. # qhasm: r2[0,1] = c01[2]<<12; r2[2,3] = c01[3]<<12
  983. # asm 1: vshll.u32 >r2=reg128#8,<c01=reg128#8%top,#12
  984. # asm 2: vshll.u32 >r2=q7,<c01=d15,#12
  985. vshll.u32 q7,d15,#12
  986. # qhasm: r1[0,1] += x4[0] unsigned* 5y12[2]; r1[2,3] += x4[1] unsigned* 5y12[3]
  987. # asm 1: vmlal.u32 <r1=reg128#14,<x4=reg128#11%bot,<5y12=reg128#12%top
  988. # asm 2: vmlal.u32 <r1=q13,<x4=d20,<5y12=d23
  989. vmlal.u32 q13,d20,d23
  990. # qhasm: r2[0,1] += x01[0] unsigned* y12[2]; r2[2,3] += x01[1] unsigned* y12[3]
  991. # asm 1: vmlal.u32 <r2=reg128#8,<x01=reg128#9%bot,<y12=reg128#2%top
  992. # asm 2: vmlal.u32 <r2=q7,<x01=d16,<y12=d3
  993. vmlal.u32 q7,d16,d3
  994. # qhasm: r2[0,1] += x01[2] unsigned* y12[0]; r2[2,3] += x01[3] unsigned* y12[1]
  995. # asm 1: vmlal.u32 <r2=reg128#8,<x01=reg128#9%top,<y12=reg128#2%bot
  996. # asm 2: vmlal.u32 <r2=q7,<x01=d17,<y12=d2
  997. vmlal.u32 q7,d17,d2
  998. # qhasm: r2[0,1] += x23[0] unsigned* y0[0]; r2[2,3] += x23[1] unsigned* y0[1]
  999. # asm 1: vmlal.u32 <r2=reg128#8,<x23=reg128#10%bot,<y0=reg128#1%bot
  1000. # asm 2: vmlal.u32 <r2=q7,<x23=d18,<y0=d0
  1001. vmlal.u32 q7,d18,d0
  1002. # qhasm: r2[0,1] += x23[2] unsigned* 5y34[2]; r2[2,3] += x23[3] unsigned* 5y34[3]
  1003. # asm 1: vmlal.u32 <r2=reg128#8,<x23=reg128#10%top,<5y34=reg128#13%top
  1004. # asm 2: vmlal.u32 <r2=q7,<x23=d19,<5y34=d25
  1005. vmlal.u32 q7,d19,d25
  1006. # qhasm: r2[0,1] += x4[0] unsigned* 5y34[0]; r2[2,3] += x4[1] unsigned* 5y34[1]
  1007. # asm 1: vmlal.u32 <r2=reg128#8,<x4=reg128#11%bot,<5y34=reg128#13%bot
  1008. # asm 2: vmlal.u32 <r2=q7,<x4=d20,<5y34=d24
  1009. vmlal.u32 q7,d20,d24
  1010. # qhasm: 2x t1 = r0 unsigned>> 26
  1011. # asm 1: vshr.u64 >t1=reg128#9,<r0=reg128#4,#26
  1012. # asm 2: vshr.u64 >t1=q8,<r0=q3,#26
  1013. vshr.u64 q8,q3,#26
  1014. # qhasm: r0 &= mask
  1015. # asm 1: vand >r0=reg128#4,<r0=reg128#4,<mask=reg128#7
  1016. # asm 2: vand >r0=q3,<r0=q3,<mask=q6
  1017. vand q3,q3,q6
  1018. # qhasm: 2x r1 += t1
  1019. # asm 1: vadd.i64 >r1=reg128#9,<r1=reg128#14,<t1=reg128#9
  1020. # asm 2: vadd.i64 >r1=q8,<r1=q13,<t1=q8
  1021. vadd.i64 q8,q13,q8
  1022. # qhasm: 2x t4 = r3 unsigned>> 26
  1023. # asm 1: vshr.u64 >t4=reg128#10,<r3=reg128#6,#26
  1024. # asm 2: vshr.u64 >t4=q9,<r3=q5,#26
  1025. vshr.u64 q9,q5,#26
  1026. # qhasm: r3 &= mask
  1027. # asm 1: vand >r3=reg128#6,<r3=reg128#6,<mask=reg128#7
  1028. # asm 2: vand >r3=q5,<r3=q5,<mask=q6
  1029. vand q5,q5,q6
  1030. # qhasm: 2x r4 += t4
  1031. # asm 1: vadd.i64 >r4=reg128#5,<r4=reg128#5,<t4=reg128#10
  1032. # asm 2: vadd.i64 >r4=q4,<r4=q4,<t4=q9
  1033. vadd.i64 q4,q4,q9
  1034. # qhasm: 2x t2 = r1 unsigned>> 26
  1035. # asm 1: vshr.u64 >t2=reg128#10,<r1=reg128#9,#26
  1036. # asm 2: vshr.u64 >t2=q9,<r1=q8,#26
  1037. vshr.u64 q9,q8,#26
  1038. # qhasm: r1 &= mask
  1039. # asm 1: vand >r1=reg128#11,<r1=reg128#9,<mask=reg128#7
  1040. # asm 2: vand >r1=q10,<r1=q8,<mask=q6
  1041. vand q10,q8,q6
  1042. # qhasm: 2x t0 = r4 unsigned>> 26
  1043. # asm 1: vshr.u64 >t0=reg128#9,<r4=reg128#5,#26
  1044. # asm 2: vshr.u64 >t0=q8,<r4=q4,#26
  1045. vshr.u64 q8,q4,#26
  1046. # qhasm: 2x r2 += t2
  1047. # asm 1: vadd.i64 >r2=reg128#8,<r2=reg128#8,<t2=reg128#10
  1048. # asm 2: vadd.i64 >r2=q7,<r2=q7,<t2=q9
  1049. vadd.i64 q7,q7,q9
  1050. # qhasm: r4 &= mask
  1051. # asm 1: vand >r4=reg128#5,<r4=reg128#5,<mask=reg128#7
  1052. # asm 2: vand >r4=q4,<r4=q4,<mask=q6
  1053. vand q4,q4,q6
  1054. # qhasm: 2x r0 += t0
  1055. # asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#9
  1056. # asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q8
  1057. vadd.i64 q3,q3,q8
  1058. # qhasm: 2x t0 <<= 2
  1059. # asm 1: vshl.i64 >t0=reg128#9,<t0=reg128#9,#2
  1060. # asm 2: vshl.i64 >t0=q8,<t0=q8,#2
  1061. vshl.i64 q8,q8,#2
  1062. # qhasm: 2x t3 = r2 unsigned>> 26
  1063. # asm 1: vshr.u64 >t3=reg128#14,<r2=reg128#8,#26
  1064. # asm 2: vshr.u64 >t3=q13,<r2=q7,#26
  1065. vshr.u64 q13,q7,#26
  1066. # qhasm: 2x r0 += t0
  1067. # asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#9
  1068. # asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q8
  1069. vadd.i64 q3,q3,q8
  1070. # qhasm: x23 = r2 & mask
  1071. # asm 1: vand >x23=reg128#10,<r2=reg128#8,<mask=reg128#7
  1072. # asm 2: vand >x23=q9,<r2=q7,<mask=q6
  1073. vand q9,q7,q6
  1074. # qhasm: 2x r3 += t3
  1075. # asm 1: vadd.i64 >r3=reg128#6,<r3=reg128#6,<t3=reg128#14
  1076. # asm 2: vadd.i64 >r3=q5,<r3=q5,<t3=q13
  1077. vadd.i64 q5,q5,q13
  1078. # qhasm: 2x t1 = r0 unsigned>> 26
  1079. # asm 1: vshr.u64 >t1=reg128#8,<r0=reg128#4,#26
  1080. # asm 2: vshr.u64 >t1=q7,<r0=q3,#26
  1081. vshr.u64 q7,q3,#26
  1082. # qhasm: x01 = r0 & mask
  1083. # asm 1: vand >x01=reg128#9,<r0=reg128#4,<mask=reg128#7
  1084. # asm 2: vand >x01=q8,<r0=q3,<mask=q6
  1085. vand q8,q3,q6
  1086. # qhasm: 2x r1 += t1
  1087. # asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#11,<t1=reg128#8
  1088. # asm 2: vadd.i64 >r1=q3,<r1=q10,<t1=q7
  1089. vadd.i64 q3,q10,q7
  1090. # qhasm: 2x t4 = r3 unsigned>> 26
  1091. # asm 1: vshr.u64 >t4=reg128#8,<r3=reg128#6,#26
  1092. # asm 2: vshr.u64 >t4=q7,<r3=q5,#26
  1093. vshr.u64 q7,q5,#26
  1094. # qhasm: r3 &= mask
  1095. # asm 1: vand >r3=reg128#6,<r3=reg128#6,<mask=reg128#7
  1096. # asm 2: vand >r3=q5,<r3=q5,<mask=q6
  1097. vand q5,q5,q6
  1098. # qhasm: 2x x4 = r4 + t4
  1099. # asm 1: vadd.i64 >x4=reg128#11,<r4=reg128#5,<t4=reg128#8
  1100. # asm 2: vadd.i64 >x4=q10,<r4=q4,<t4=q7
  1101. vadd.i64 q10,q4,q7
  1102. # qhasm: len -= 32
  1103. # asm 1: sub >len=int32#4,<len=int32#4,#32
  1104. # asm 2: sub >len=r3,<len=r3,#32
  1105. sub r3,r3,#32
  1106. # qhasm: x01 = x01[0,2,1,3]
  1107. # asm 1: vtrn.32 <x01=reg128#9%bot,<x01=reg128#9%top
  1108. # asm 2: vtrn.32 <x01=d16,<x01=d17
  1109. vtrn.32 d16,d17
  1110. # qhasm: x23 = x23[0,2,1,3]
  1111. # asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top
  1112. # asm 2: vtrn.32 <x23=d18,<x23=d19
  1113. vtrn.32 d18,d19
  1114. # qhasm: r1 = r1[0,2,1,3]
  1115. # asm 1: vtrn.32 <r1=reg128#4%bot,<r1=reg128#4%top
  1116. # asm 2: vtrn.32 <r1=d6,<r1=d7
  1117. vtrn.32 d6,d7
  1118. # qhasm: r3 = r3[0,2,1,3]
  1119. # asm 1: vtrn.32 <r3=reg128#6%bot,<r3=reg128#6%top
  1120. # asm 2: vtrn.32 <r3=d10,<r3=d11
  1121. vtrn.32 d10,d11
  1122. # qhasm: x4 = x4[0,2,1,3]
  1123. # asm 1: vtrn.32 <x4=reg128#11%bot,<x4=reg128#11%top
  1124. # asm 2: vtrn.32 <x4=d20,<x4=d21
  1125. vtrn.32 d20,d21
  1126. # qhasm: x01 = x01[0,1] r1[0,1]
  1127. # asm 1: vext.32 <x01=reg128#9%top,<r1=reg128#4%bot,<r1=reg128#4%bot,#0
  1128. # asm 2: vext.32 <x01=d17,<r1=d6,<r1=d6,#0
  1129. vext.32 d17,d6,d6,#0
  1130. # qhasm: x23 = x23[0,1] r3[0,1]
  1131. # asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#6%bot,<r3=reg128#6%bot,#0
  1132. # asm 2: vext.32 <x23=d19,<r3=d10,<r3=d10,#0
  1133. vext.32 d19,d10,d10,#0
  1134. # qhasm: unsigned>? len - 32
  1135. # asm 1: cmp <len=int32#4,#32
  1136. # asm 2: cmp <len=r3,#32
  1137. cmp r3,#32
  1138. # qhasm: goto mainloop if unsigned>
  1139. bhi ._mainloop
  1140. # qhasm: end:
  1141. ._end:
  1142. # qhasm: mem128[input_0] = x01;input_0+=16
  1143. # asm 1: vst1.8 {<x01=reg128#9%bot-<x01=reg128#9%top},[<input_0=int32#1]!
  1144. # asm 2: vst1.8 {<x01=d16-<x01=d17},[<input_0=r0]!
  1145. vst1.8 {d16-d17},[r0]!
  1146. # qhasm: mem128[input_0] = x23;input_0+=16
  1147. # asm 1: vst1.8 {<x23=reg128#10%bot-<x23=reg128#10%top},[<input_0=int32#1]!
  1148. # asm 2: vst1.8 {<x23=d18-<x23=d19},[<input_0=r0]!
  1149. vst1.8 {d18-d19},[r0]!
  1150. # qhasm: mem64[input_0] = x4[0]
  1151. # asm 1: vst1.8 <x4=reg128#11%bot,[<input_0=int32#1]
  1152. # asm 2: vst1.8 <x4=d20,[<input_0=r0]
  1153. vst1.8 d20,[r0]
  1154. # qhasm: len = len
  1155. # asm 1: mov >len=int32#1,<len=int32#4
  1156. # asm 2: mov >len=r0,<len=r3
  1157. mov r0,r3
  1158. # qhasm: qpopreturn len
  1159. mov sp,r12
  1160. vpop {q4,q5,q6,q7}
  1161. bx lr
  1162. # qhasm: int32 input_0
  1163. # qhasm: int32 input_1
  1164. # qhasm: int32 input_2
  1165. # qhasm: int32 input_3
  1166. # qhasm: stack32 input_4
  1167. # qhasm: stack32 input_5
  1168. # qhasm: stack32 input_6
  1169. # qhasm: stack32 input_7
  1170. # qhasm: int32 caller_r4
  1171. # qhasm: int32 caller_r5
  1172. # qhasm: int32 caller_r6
  1173. # qhasm: int32 caller_r7
  1174. # qhasm: int32 caller_r8
  1175. # qhasm: int32 caller_r9
  1176. # qhasm: int32 caller_r10
  1177. # qhasm: int32 caller_r11
  1178. # qhasm: int32 caller_r12
  1179. # qhasm: int32 caller_r14
  1180. # qhasm: reg128 caller_q4
  1181. # qhasm: reg128 caller_q5
  1182. # qhasm: reg128 caller_q6
  1183. # qhasm: reg128 caller_q7
  1184. # qhasm: reg128 r0
  1185. # qhasm: reg128 r1
  1186. # qhasm: reg128 r2
  1187. # qhasm: reg128 r3
  1188. # qhasm: reg128 r4
  1189. # qhasm: reg128 x01
  1190. # qhasm: reg128 x23
  1191. # qhasm: reg128 x4
  1192. # qhasm: reg128 y01
  1193. # qhasm: reg128 y23
  1194. # qhasm: reg128 y4
  1195. # qhasm: reg128 _5y01
  1196. # qhasm: reg128 _5y23
  1197. # qhasm: reg128 _5y4
  1198. # qhasm: reg128 c01
  1199. # qhasm: reg128 c23
  1200. # qhasm: reg128 c4
  1201. # qhasm: reg128 t0
  1202. # qhasm: reg128 t1
  1203. # qhasm: reg128 t2
  1204. # qhasm: reg128 t3
  1205. # qhasm: reg128 t4
  1206. # qhasm: reg128 mask
  1207. # qhasm: enter crypto_onetimeauth_poly1305_neon2_addmulmod
  1208. .align 2
  1209. .global openssl_poly1305_neon2_addmulmod
  1210. .hidden openssl_poly1305_neon2_addmulmod
  1211. .type openssl_poly1305_neon2_addmulmod STT_FUNC
  1212. openssl_poly1305_neon2_addmulmod:
  1213. sub sp,sp,#0
  1214. # qhasm: 2x mask = 0xffffffff
  1215. # asm 1: vmov.i64 >mask=reg128#1,#0xffffffff
  1216. # asm 2: vmov.i64 >mask=q0,#0xffffffff
  1217. vmov.i64 q0,#0xffffffff
  1218. # qhasm: y01 aligned= mem128[input_2];input_2+=16
  1219. # asm 1: vld1.8 {>y01=reg128#2%bot->y01=reg128#2%top},[<input_2=int32#3,: 128]!
  1220. # asm 2: vld1.8 {>y01=d2->y01=d3},[<input_2=r2,: 128]!
  1221. vld1.8 {d2-d3},[r2,: 128]!
  1222. # qhasm: 4x _5y01 = y01 << 2
  1223. # asm 1: vshl.i32 >_5y01=reg128#3,<y01=reg128#2,#2
  1224. # asm 2: vshl.i32 >_5y01=q2,<y01=q1,#2
  1225. vshl.i32 q2,q1,#2
  1226. # qhasm: y23 aligned= mem128[input_2];input_2+=16
  1227. # asm 1: vld1.8 {>y23=reg128#4%bot->y23=reg128#4%top},[<input_2=int32#3,: 128]!
  1228. # asm 2: vld1.8 {>y23=d6->y23=d7},[<input_2=r2,: 128]!
  1229. vld1.8 {d6-d7},[r2,: 128]!
  1230. # qhasm: 4x _5y23 = y23 << 2
  1231. # asm 1: vshl.i32 >_5y23=reg128#9,<y23=reg128#4,#2
  1232. # asm 2: vshl.i32 >_5y23=q8,<y23=q3,#2
  1233. vshl.i32 q8,q3,#2
  1234. # qhasm: y4 aligned= mem64[input_2]y4[1]
  1235. # asm 1: vld1.8 {<y4=reg128#10%bot},[<input_2=int32#3,: 64]
  1236. # asm 2: vld1.8 {<y4=d18},[<input_2=r2,: 64]
  1237. vld1.8 {d18},[r2,: 64]
  1238. # qhasm: 4x _5y4 = y4 << 2
  1239. # asm 1: vshl.i32 >_5y4=reg128#11,<y4=reg128#10,#2
  1240. # asm 2: vshl.i32 >_5y4=q10,<y4=q9,#2
  1241. vshl.i32 q10,q9,#2
  1242. # qhasm: x01 aligned= mem128[input_1];input_1+=16
  1243. # asm 1: vld1.8 {>x01=reg128#12%bot->x01=reg128#12%top},[<input_1=int32#2,: 128]!
  1244. # asm 2: vld1.8 {>x01=d22->x01=d23},[<input_1=r1,: 128]!
  1245. vld1.8 {d22-d23},[r1,: 128]!
  1246. # qhasm: 4x _5y01 += y01
  1247. # asm 1: vadd.i32 >_5y01=reg128#3,<_5y01=reg128#3,<y01=reg128#2
  1248. # asm 2: vadd.i32 >_5y01=q2,<_5y01=q2,<y01=q1
  1249. vadd.i32 q2,q2,q1
  1250. # qhasm: x23 aligned= mem128[input_1];input_1+=16
  1251. # asm 1: vld1.8 {>x23=reg128#13%bot->x23=reg128#13%top},[<input_1=int32#2,: 128]!
  1252. # asm 2: vld1.8 {>x23=d24->x23=d25},[<input_1=r1,: 128]!
  1253. vld1.8 {d24-d25},[r1,: 128]!
  1254. # qhasm: 4x _5y23 += y23
  1255. # asm 1: vadd.i32 >_5y23=reg128#9,<_5y23=reg128#9,<y23=reg128#4
  1256. # asm 2: vadd.i32 >_5y23=q8,<_5y23=q8,<y23=q3
  1257. vadd.i32 q8,q8,q3
  1258. # qhasm: 4x _5y4 += y4
  1259. # asm 1: vadd.i32 >_5y4=reg128#11,<_5y4=reg128#11,<y4=reg128#10
  1260. # asm 2: vadd.i32 >_5y4=q10,<_5y4=q10,<y4=q9
  1261. vadd.i32 q10,q10,q9
  1262. # qhasm: c01 aligned= mem128[input_3];input_3+=16
  1263. # asm 1: vld1.8 {>c01=reg128#14%bot->c01=reg128#14%top},[<input_3=int32#4,: 128]!
  1264. # asm 2: vld1.8 {>c01=d26->c01=d27},[<input_3=r3,: 128]!
  1265. vld1.8 {d26-d27},[r3,: 128]!
  1266. # qhasm: 4x x01 += c01
  1267. # asm 1: vadd.i32 >x01=reg128#12,<x01=reg128#12,<c01=reg128#14
  1268. # asm 2: vadd.i32 >x01=q11,<x01=q11,<c01=q13
  1269. vadd.i32 q11,q11,q13
  1270. # qhasm: c23 aligned= mem128[input_3];input_3+=16
  1271. # asm 1: vld1.8 {>c23=reg128#14%bot->c23=reg128#14%top},[<input_3=int32#4,: 128]!
  1272. # asm 2: vld1.8 {>c23=d26->c23=d27},[<input_3=r3,: 128]!
  1273. vld1.8 {d26-d27},[r3,: 128]!
  1274. # qhasm: 4x x23 += c23
  1275. # asm 1: vadd.i32 >x23=reg128#13,<x23=reg128#13,<c23=reg128#14
  1276. # asm 2: vadd.i32 >x23=q12,<x23=q12,<c23=q13
  1277. vadd.i32 q12,q12,q13
  1278. # qhasm: x4 aligned= mem64[input_1]x4[1]
  1279. # asm 1: vld1.8 {<x4=reg128#14%bot},[<input_1=int32#2,: 64]
  1280. # asm 2: vld1.8 {<x4=d26},[<input_1=r1,: 64]
  1281. vld1.8 {d26},[r1,: 64]
  1282. # qhasm: 2x mask unsigned>>=6
  1283. # asm 1: vshr.u64 >mask=reg128#1,<mask=reg128#1,#6
  1284. # asm 2: vshr.u64 >mask=q0,<mask=q0,#6
  1285. vshr.u64 q0,q0,#6
  1286. # qhasm: c4 aligned= mem64[input_3]c4[1]
  1287. # asm 1: vld1.8 {<c4=reg128#15%bot},[<input_3=int32#4,: 64]
  1288. # asm 2: vld1.8 {<c4=d28},[<input_3=r3,: 64]
  1289. vld1.8 {d28},[r3,: 64]
  1290. # qhasm: 4x x4 += c4
  1291. # asm 1: vadd.i32 >x4=reg128#14,<x4=reg128#14,<c4=reg128#15
  1292. # asm 2: vadd.i32 >x4=q13,<x4=q13,<c4=q14
  1293. vadd.i32 q13,q13,q14
  1294. # qhasm: r0[0,1] = x01[0] unsigned* y01[0]; r0[2,3] = x01[1] unsigned* y01[1]
  1295. # asm 1: vmull.u32 >r0=reg128#15,<x01=reg128#12%bot,<y01=reg128#2%bot
  1296. # asm 2: vmull.u32 >r0=q14,<x01=d22,<y01=d2
  1297. vmull.u32 q14,d22,d2
  1298. # qhasm: r0[0,1] += x01[2] unsigned* _5y4[0]; r0[2,3] += x01[3] unsigned* _5y4[1]
  1299. # asm 1: vmlal.u32 <r0=reg128#15,<x01=reg128#12%top,<_5y4=reg128#11%bot
  1300. # asm 2: vmlal.u32 <r0=q14,<x01=d23,<_5y4=d20
  1301. vmlal.u32 q14,d23,d20
  1302. # qhasm: r0[0,1] += x23[0] unsigned* _5y23[2]; r0[2,3] += x23[1] unsigned* _5y23[3]
  1303. # asm 1: vmlal.u32 <r0=reg128#15,<x23=reg128#13%bot,<_5y23=reg128#9%top
  1304. # asm 2: vmlal.u32 <r0=q14,<x23=d24,<_5y23=d17
  1305. vmlal.u32 q14,d24,d17
  1306. # qhasm: r0[0,1] += x23[2] unsigned* _5y23[0]; r0[2,3] += x23[3] unsigned* _5y23[1]
  1307. # asm 1: vmlal.u32 <r0=reg128#15,<x23=reg128#13%top,<_5y23=reg128#9%bot
  1308. # asm 2: vmlal.u32 <r0=q14,<x23=d25,<_5y23=d16
  1309. vmlal.u32 q14,d25,d16
  1310. # qhasm: r0[0,1] += x4[0] unsigned* _5y01[2]; r0[2,3] += x4[1] unsigned* _5y01[3]
  1311. # asm 1: vmlal.u32 <r0=reg128#15,<x4=reg128#14%bot,<_5y01=reg128#3%top
  1312. # asm 2: vmlal.u32 <r0=q14,<x4=d26,<_5y01=d5
  1313. vmlal.u32 q14,d26,d5
  1314. # qhasm: r1[0,1] = x01[0] unsigned* y01[2]; r1[2,3] = x01[1] unsigned* y01[3]
  1315. # asm 1: vmull.u32 >r1=reg128#3,<x01=reg128#12%bot,<y01=reg128#2%top
  1316. # asm 2: vmull.u32 >r1=q2,<x01=d22,<y01=d3
  1317. vmull.u32 q2,d22,d3
  1318. # qhasm: r1[0,1] += x01[2] unsigned* y01[0]; r1[2,3] += x01[3] unsigned* y01[1]
  1319. # asm 1: vmlal.u32 <r1=reg128#3,<x01=reg128#12%top,<y01=reg128#2%bot
  1320. # asm 2: vmlal.u32 <r1=q2,<x01=d23,<y01=d2
  1321. vmlal.u32 q2,d23,d2
  1322. # qhasm: r1[0,1] += x23[0] unsigned* _5y4[0]; r1[2,3] += x23[1] unsigned* _5y4[1]
  1323. # asm 1: vmlal.u32 <r1=reg128#3,<x23=reg128#13%bot,<_5y4=reg128#11%bot
  1324. # asm 2: vmlal.u32 <r1=q2,<x23=d24,<_5y4=d20
  1325. vmlal.u32 q2,d24,d20
  1326. # qhasm: r1[0,1] += x23[2] unsigned* _5y23[2]; r1[2,3] += x23[3] unsigned* _5y23[3]
  1327. # asm 1: vmlal.u32 <r1=reg128#3,<x23=reg128#13%top,<_5y23=reg128#9%top
  1328. # asm 2: vmlal.u32 <r1=q2,<x23=d25,<_5y23=d17
  1329. vmlal.u32 q2,d25,d17
  1330. # qhasm: r1[0,1] += x4[0] unsigned* _5y23[0]; r1[2,3] += x4[1] unsigned* _5y23[1]
  1331. # asm 1: vmlal.u32 <r1=reg128#3,<x4=reg128#14%bot,<_5y23=reg128#9%bot
  1332. # asm 2: vmlal.u32 <r1=q2,<x4=d26,<_5y23=d16
  1333. vmlal.u32 q2,d26,d16
  1334. # qhasm: r2[0,1] = x01[0] unsigned* y23[0]; r2[2,3] = x01[1] unsigned* y23[1]
  1335. # asm 1: vmull.u32 >r2=reg128#16,<x01=reg128#12%bot,<y23=reg128#4%bot
  1336. # asm 2: vmull.u32 >r2=q15,<x01=d22,<y23=d6
  1337. vmull.u32 q15,d22,d6
  1338. # qhasm: r2[0,1] += x01[2] unsigned* y01[2]; r2[2,3] += x01[3] unsigned* y01[3]
  1339. # asm 1: vmlal.u32 <r2=reg128#16,<x01=reg128#12%top,<y01=reg128#2%top
  1340. # asm 2: vmlal.u32 <r2=q15,<x01=d23,<y01=d3
  1341. vmlal.u32 q15,d23,d3
  1342. # qhasm: r2[0,1] += x23[0] unsigned* y01[0]; r2[2,3] += x23[1] unsigned* y01[1]
  1343. # asm 1: vmlal.u32 <r2=reg128#16,<x23=reg128#13%bot,<y01=reg128#2%bot
  1344. # asm 2: vmlal.u32 <r2=q15,<x23=d24,<y01=d2
  1345. vmlal.u32 q15,d24,d2
  1346. # qhasm: r2[0,1] += x23[2] unsigned* _5y4[0]; r2[2,3] += x23[3] unsigned* _5y4[1]
  1347. # asm 1: vmlal.u32 <r2=reg128#16,<x23=reg128#13%top,<_5y4=reg128#11%bot
  1348. # asm 2: vmlal.u32 <r2=q15,<x23=d25,<_5y4=d20
  1349. vmlal.u32 q15,d25,d20
  1350. # qhasm: r2[0,1] += x4[0] unsigned* _5y23[2]; r2[2,3] += x4[1] unsigned* _5y23[3]
  1351. # asm 1: vmlal.u32 <r2=reg128#16,<x4=reg128#14%bot,<_5y23=reg128#9%top
  1352. # asm 2: vmlal.u32 <r2=q15,<x4=d26,<_5y23=d17
  1353. vmlal.u32 q15,d26,d17
  1354. # qhasm: r3[0,1] = x01[0] unsigned* y23[2]; r3[2,3] = x01[1] unsigned* y23[3]
  1355. # asm 1: vmull.u32 >r3=reg128#9,<x01=reg128#12%bot,<y23=reg128#4%top
  1356. # asm 2: vmull.u32 >r3=q8,<x01=d22,<y23=d7
  1357. vmull.u32 q8,d22,d7
  1358. # qhasm: r3[0,1] += x01[2] unsigned* y23[0]; r3[2,3] += x01[3] unsigned* y23[1]
  1359. # asm 1: vmlal.u32 <r3=reg128#9,<x01=reg128#12%top,<y23=reg128#4%bot
  1360. # asm 2: vmlal.u32 <r3=q8,<x01=d23,<y23=d6
  1361. vmlal.u32 q8,d23,d6
  1362. # qhasm: r3[0,1] += x23[0] unsigned* y01[2]; r3[2,3] += x23[1] unsigned* y01[3]
  1363. # asm 1: vmlal.u32 <r3=reg128#9,<x23=reg128#13%bot,<y01=reg128#2%top
  1364. # asm 2: vmlal.u32 <r3=q8,<x23=d24,<y01=d3
  1365. vmlal.u32 q8,d24,d3
  1366. # qhasm: r3[0,1] += x23[2] unsigned* y01[0]; r3[2,3] += x23[3] unsigned* y01[1]
  1367. # asm 1: vmlal.u32 <r3=reg128#9,<x23=reg128#13%top,<y01=reg128#2%bot
  1368. # asm 2: vmlal.u32 <r3=q8,<x23=d25,<y01=d2
  1369. vmlal.u32 q8,d25,d2
  1370. # qhasm: r3[0,1] += x4[0] unsigned* _5y4[0]; r3[2,3] += x4[1] unsigned* _5y4[1]
  1371. # asm 1: vmlal.u32 <r3=reg128#9,<x4=reg128#14%bot,<_5y4=reg128#11%bot
  1372. # asm 2: vmlal.u32 <r3=q8,<x4=d26,<_5y4=d20
  1373. vmlal.u32 q8,d26,d20
  1374. # qhasm: r4[0,1] = x01[0] unsigned* y4[0]; r4[2,3] = x01[1] unsigned* y4[1]
  1375. # asm 1: vmull.u32 >r4=reg128#10,<x01=reg128#12%bot,<y4=reg128#10%bot
  1376. # asm 2: vmull.u32 >r4=q9,<x01=d22,<y4=d18
  1377. vmull.u32 q9,d22,d18
  1378. # qhasm: r4[0,1] += x01[2] unsigned* y23[2]; r4[2,3] += x01[3] unsigned* y23[3]
  1379. # asm 1: vmlal.u32 <r4=reg128#10,<x01=reg128#12%top,<y23=reg128#4%top
  1380. # asm 2: vmlal.u32 <r4=q9,<x01=d23,<y23=d7
  1381. vmlal.u32 q9,d23,d7
  1382. # qhasm: r4[0,1] += x23[0] unsigned* y23[0]; r4[2,3] += x23[1] unsigned* y23[1]
  1383. # asm 1: vmlal.u32 <r4=reg128#10,<x23=reg128#13%bot,<y23=reg128#4%bot
  1384. # asm 2: vmlal.u32 <r4=q9,<x23=d24,<y23=d6
  1385. vmlal.u32 q9,d24,d6
  1386. # qhasm: r4[0,1] += x23[2] unsigned* y01[2]; r4[2,3] += x23[3] unsigned* y01[3]
  1387. # asm 1: vmlal.u32 <r4=reg128#10,<x23=reg128#13%top,<y01=reg128#2%top
  1388. # asm 2: vmlal.u32 <r4=q9,<x23=d25,<y01=d3
  1389. vmlal.u32 q9,d25,d3
  1390. # qhasm: r4[0,1] += x4[0] unsigned* y01[0]; r4[2,3] += x4[1] unsigned* y01[1]
  1391. # asm 1: vmlal.u32 <r4=reg128#10,<x4=reg128#14%bot,<y01=reg128#2%bot
  1392. # asm 2: vmlal.u32 <r4=q9,<x4=d26,<y01=d2
  1393. vmlal.u32 q9,d26,d2
  1394. # qhasm: 2x t1 = r0 unsigned>> 26
  1395. # asm 1: vshr.u64 >t1=reg128#2,<r0=reg128#15,#26
  1396. # asm 2: vshr.u64 >t1=q1,<r0=q14,#26
  1397. vshr.u64 q1,q14,#26
  1398. # qhasm: r0 &= mask
  1399. # asm 1: vand >r0=reg128#4,<r0=reg128#15,<mask=reg128#1
  1400. # asm 2: vand >r0=q3,<r0=q14,<mask=q0
  1401. vand q3,q14,q0
  1402. # qhasm: 2x r1 += t1
  1403. # asm 1: vadd.i64 >r1=reg128#2,<r1=reg128#3,<t1=reg128#2
  1404. # asm 2: vadd.i64 >r1=q1,<r1=q2,<t1=q1
  1405. vadd.i64 q1,q2,q1
  1406. # qhasm: 2x t4 = r3 unsigned>> 26
  1407. # asm 1: vshr.u64 >t4=reg128#3,<r3=reg128#9,#26
  1408. # asm 2: vshr.u64 >t4=q2,<r3=q8,#26
  1409. vshr.u64 q2,q8,#26
  1410. # qhasm: r3 &= mask
  1411. # asm 1: vand >r3=reg128#9,<r3=reg128#9,<mask=reg128#1
  1412. # asm 2: vand >r3=q8,<r3=q8,<mask=q0
  1413. vand q8,q8,q0
  1414. # qhasm: 2x r4 += t4
  1415. # asm 1: vadd.i64 >r4=reg128#3,<r4=reg128#10,<t4=reg128#3
  1416. # asm 2: vadd.i64 >r4=q2,<r4=q9,<t4=q2
  1417. vadd.i64 q2,q9,q2
  1418. # qhasm: 2x t2 = r1 unsigned>> 26
  1419. # asm 1: vshr.u64 >t2=reg128#10,<r1=reg128#2,#26
  1420. # asm 2: vshr.u64 >t2=q9,<r1=q1,#26
  1421. vshr.u64 q9,q1,#26
  1422. # qhasm: r1 &= mask
  1423. # asm 1: vand >r1=reg128#2,<r1=reg128#2,<mask=reg128#1
  1424. # asm 2: vand >r1=q1,<r1=q1,<mask=q0
  1425. vand q1,q1,q0
  1426. # qhasm: 2x t0 = r4 unsigned>> 26
  1427. # asm 1: vshr.u64 >t0=reg128#11,<r4=reg128#3,#26
  1428. # asm 2: vshr.u64 >t0=q10,<r4=q2,#26
  1429. vshr.u64 q10,q2,#26
  1430. # qhasm: 2x r2 += t2
  1431. # asm 1: vadd.i64 >r2=reg128#10,<r2=reg128#16,<t2=reg128#10
  1432. # asm 2: vadd.i64 >r2=q9,<r2=q15,<t2=q9
  1433. vadd.i64 q9,q15,q9
  1434. # qhasm: r4 &= mask
  1435. # asm 1: vand >r4=reg128#3,<r4=reg128#3,<mask=reg128#1
  1436. # asm 2: vand >r4=q2,<r4=q2,<mask=q0
  1437. vand q2,q2,q0
  1438. # qhasm: 2x r0 += t0
  1439. # asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#11
  1440. # asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q10
  1441. vadd.i64 q3,q3,q10
  1442. # qhasm: 2x t0 <<= 2
  1443. # asm 1: vshl.i64 >t0=reg128#11,<t0=reg128#11,#2
  1444. # asm 2: vshl.i64 >t0=q10,<t0=q10,#2
  1445. vshl.i64 q10,q10,#2
  1446. # qhasm: 2x t3 = r2 unsigned>> 26
  1447. # asm 1: vshr.u64 >t3=reg128#12,<r2=reg128#10,#26
  1448. # asm 2: vshr.u64 >t3=q11,<r2=q9,#26
  1449. vshr.u64 q11,q9,#26
  1450. # qhasm: 2x r0 += t0
  1451. # asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#11
  1452. # asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q10
  1453. vadd.i64 q3,q3,q10
  1454. # qhasm: x23 = r2 & mask
  1455. # asm 1: vand >x23=reg128#10,<r2=reg128#10,<mask=reg128#1
  1456. # asm 2: vand >x23=q9,<r2=q9,<mask=q0
  1457. vand q9,q9,q0
  1458. # qhasm: 2x r3 += t3
  1459. # asm 1: vadd.i64 >r3=reg128#9,<r3=reg128#9,<t3=reg128#12
  1460. # asm 2: vadd.i64 >r3=q8,<r3=q8,<t3=q11
  1461. vadd.i64 q8,q8,q11
  1462. # qhasm: 2x t1 = r0 unsigned>> 26
  1463. # asm 1: vshr.u64 >t1=reg128#11,<r0=reg128#4,#26
  1464. # asm 2: vshr.u64 >t1=q10,<r0=q3,#26
  1465. vshr.u64 q10,q3,#26
  1466. # qhasm: x23 = x23[0,2,1,3]
  1467. # asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top
  1468. # asm 2: vtrn.32 <x23=d18,<x23=d19
  1469. vtrn.32 d18,d19
  1470. # qhasm: x01 = r0 & mask
  1471. # asm 1: vand >x01=reg128#4,<r0=reg128#4,<mask=reg128#1
  1472. # asm 2: vand >x01=q3,<r0=q3,<mask=q0
  1473. vand q3,q3,q0
  1474. # qhasm: 2x r1 += t1
  1475. # asm 1: vadd.i64 >r1=reg128#2,<r1=reg128#2,<t1=reg128#11
  1476. # asm 2: vadd.i64 >r1=q1,<r1=q1,<t1=q10
  1477. vadd.i64 q1,q1,q10
  1478. # qhasm: 2x t4 = r3 unsigned>> 26
  1479. # asm 1: vshr.u64 >t4=reg128#11,<r3=reg128#9,#26
  1480. # asm 2: vshr.u64 >t4=q10,<r3=q8,#26
  1481. vshr.u64 q10,q8,#26
  1482. # qhasm: x01 = x01[0,2,1,3]
  1483. # asm 1: vtrn.32 <x01=reg128#4%bot,<x01=reg128#4%top
  1484. # asm 2: vtrn.32 <x01=d6,<x01=d7
  1485. vtrn.32 d6,d7
  1486. # qhasm: r3 &= mask
  1487. # asm 1: vand >r3=reg128#1,<r3=reg128#9,<mask=reg128#1
  1488. # asm 2: vand >r3=q0,<r3=q8,<mask=q0
  1489. vand q0,q8,q0
  1490. # qhasm: r1 = r1[0,2,1,3]
  1491. # asm 1: vtrn.32 <r1=reg128#2%bot,<r1=reg128#2%top
  1492. # asm 2: vtrn.32 <r1=d2,<r1=d3
  1493. vtrn.32 d2,d3
  1494. # qhasm: 2x x4 = r4 + t4
  1495. # asm 1: vadd.i64 >x4=reg128#3,<r4=reg128#3,<t4=reg128#11
  1496. # asm 2: vadd.i64 >x4=q2,<r4=q2,<t4=q10
  1497. vadd.i64 q2,q2,q10
  1498. # qhasm: r3 = r3[0,2,1,3]
  1499. # asm 1: vtrn.32 <r3=reg128#1%bot,<r3=reg128#1%top
  1500. # asm 2: vtrn.32 <r3=d0,<r3=d1
  1501. vtrn.32 d0,d1
  1502. # qhasm: x01 = x01[0,1] r1[0,1]
  1503. # asm 1: vext.32 <x01=reg128#4%top,<r1=reg128#2%bot,<r1=reg128#2%bot,#0
  1504. # asm 2: vext.32 <x01=d7,<r1=d2,<r1=d2,#0
  1505. vext.32 d7,d2,d2,#0
  1506. # qhasm: x23 = x23[0,1] r3[0,1]
  1507. # asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#1%bot,<r3=reg128#1%bot,#0
  1508. # asm 2: vext.32 <x23=d19,<r3=d0,<r3=d0,#0
  1509. vext.32 d19,d0,d0,#0
  1510. # qhasm: x4 = x4[0,2,1,3]
  1511. # asm 1: vtrn.32 <x4=reg128#3%bot,<x4=reg128#3%top
  1512. # asm 2: vtrn.32 <x4=d4,<x4=d5
  1513. vtrn.32 d4,d5
  1514. # qhasm: mem128[input_0] aligned= x01;input_0+=16
  1515. # asm 1: vst1.8 {<x01=reg128#4%bot-<x01=reg128#4%top},[<input_0=int32#1,: 128]!
  1516. # asm 2: vst1.8 {<x01=d6-<x01=d7},[<input_0=r0,: 128]!
  1517. vst1.8 {d6-d7},[r0,: 128]!
  1518. # qhasm: mem128[input_0] aligned= x23;input_0+=16
  1519. # asm 1: vst1.8 {<x23=reg128#10%bot-<x23=reg128#10%top},[<input_0=int32#1,: 128]!
  1520. # asm 2: vst1.8 {<x23=d18-<x23=d19},[<input_0=r0,: 128]!
  1521. vst1.8 {d18-d19},[r0,: 128]!
  1522. # qhasm: mem64[input_0] aligned= x4[0]
  1523. # asm 1: vst1.8 <x4=reg128#3%bot,[<input_0=int32#1,: 64]
  1524. # asm 2: vst1.8 <x4=d4,[<input_0=r0,: 64]
  1525. vst1.8 d4,[r0,: 64]
  1526. # qhasm: return
  1527. add sp,sp,#0
  1528. bx lr
  1529. #endif /* __arm__ && !OPENSSL_NO_ASM */