Du kannst nicht mehr als 25 Themen auswählen Themen müssen entweder mit einem Buchstaben oder einer Ziffer beginnen. Sie können Bindestriche („-“) enthalten und bis zu 35 Zeichen lang sein.
 
 
 

8146 Zeilen
251 KiB

  1. # qhasm: int64 input_0
  2. # qhasm: int64 input_1
  3. # qhasm: int64 input_2
  4. # qhasm: int64 input_3
  5. # qhasm: int64 input_4
  6. # qhasm: int64 input_5
  7. # qhasm: stack64 input_6
  8. # qhasm: stack64 input_7
  9. # qhasm: int64 caller_r11
  10. # qhasm: int64 caller_r12
  11. # qhasm: int64 caller_r13
  12. # qhasm: int64 caller_r14
  13. # qhasm: int64 caller_r15
  14. # qhasm: int64 caller_rbx
  15. # qhasm: int64 caller_rbp
  16. # qhasm: reg256 x0
  17. # qhasm: reg256 x1
  18. # qhasm: reg256 x2
  19. # qhasm: reg256 x3
  20. # qhasm: reg256 x4
  21. # qhasm: reg256 x5
  22. # qhasm: reg256 x6
  23. # qhasm: reg256 x7
  24. # qhasm: reg256 t0
  25. # qhasm: reg256 t1
  26. # qhasm: reg256 v00
  27. # qhasm: reg256 v01
  28. # qhasm: reg256 v10
  29. # qhasm: reg256 v11
  30. # qhasm: reg256 mask0
  31. # qhasm: reg256 mask1
  32. # qhasm: reg256 mask2
  33. # qhasm: reg256 mask3
  34. # qhasm: reg256 mask4
  35. # qhasm: reg256 mask5
  36. # qhasm: enter transpose_64x256_sp_asm
  37. .p2align 5
  38. .global _PQCLEAN_MCELIECE460896F_AVX_transpose_64x256_sp_asm
  39. .global PQCLEAN_MCELIECE460896F_AVX_transpose_64x256_sp_asm
  40. _PQCLEAN_MCELIECE460896F_AVX_transpose_64x256_sp_asm:
  41. PQCLEAN_MCELIECE460896F_AVX_transpose_64x256_sp_asm:
  42. mov %rsp,%r11
  43. and $31,%r11
  44. add $0,%r11
  45. sub %r11,%rsp
  46. # qhasm: mask0 aligned= mem256[ PQCLEAN_MCELIECE460896F_AVX_MASK5_0 ]
  47. # asm 1: vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK5_0,>mask0=reg256#1
  48. # asm 2: vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK5_0,>mask0=%ymm0
  49. vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK5_0(%rip),%ymm0
  50. # qhasm: mask1 aligned= mem256[ PQCLEAN_MCELIECE460896F_AVX_MASK5_1 ]
  51. # asm 1: vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK5_1,>mask1=reg256#2
  52. # asm 2: vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK5_1,>mask1=%ymm1
  53. vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK5_1(%rip),%ymm1
  54. # qhasm: mask2 aligned= mem256[ PQCLEAN_MCELIECE460896F_AVX_MASK4_0 ]
  55. # asm 1: vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK4_0,>mask2=reg256#3
  56. # asm 2: vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK4_0,>mask2=%ymm2
  57. vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK4_0(%rip),%ymm2
  58. # qhasm: mask3 aligned= mem256[ PQCLEAN_MCELIECE460896F_AVX_MASK4_1 ]
  59. # asm 1: vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK4_1,>mask3=reg256#4
  60. # asm 2: vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK4_1,>mask3=%ymm3
  61. vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK4_1(%rip),%ymm3
  62. # qhasm: mask4 aligned= mem256[ PQCLEAN_MCELIECE460896F_AVX_MASK3_0 ]
  63. # asm 1: vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK3_0,>mask4=reg256#5
  64. # asm 2: vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK3_0,>mask4=%ymm4
  65. vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK3_0(%rip),%ymm4
  66. # qhasm: mask5 aligned= mem256[ PQCLEAN_MCELIECE460896F_AVX_MASK3_1 ]
  67. # asm 1: vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK3_1,>mask5=reg256#6
  68. # asm 2: vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK3_1,>mask5=%ymm5
  69. vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK3_1(%rip),%ymm5
  70. # qhasm: x0 = mem256[ input_0 + 0 ]
  71. # asm 1: vmovupd 0(<input_0=int64#1),>x0=reg256#7
  72. # asm 2: vmovupd 0(<input_0=%rdi),>x0=%ymm6
  73. vmovupd 0(%rdi),%ymm6
  74. # qhasm: x1 = mem256[ input_0 + 256 ]
  75. # asm 1: vmovupd 256(<input_0=int64#1),>x1=reg256#8
  76. # asm 2: vmovupd 256(<input_0=%rdi),>x1=%ymm7
  77. vmovupd 256(%rdi),%ymm7
  78. # qhasm: x2 = mem256[ input_0 + 512 ]
  79. # asm 1: vmovupd 512(<input_0=int64#1),>x2=reg256#9
  80. # asm 2: vmovupd 512(<input_0=%rdi),>x2=%ymm8
  81. vmovupd 512(%rdi),%ymm8
  82. # qhasm: x3 = mem256[ input_0 + 768 ]
  83. # asm 1: vmovupd 768(<input_0=int64#1),>x3=reg256#10
  84. # asm 2: vmovupd 768(<input_0=%rdi),>x3=%ymm9
  85. vmovupd 768(%rdi),%ymm9
  86. # qhasm: x4 = mem256[ input_0 + 1024 ]
  87. # asm 1: vmovupd 1024(<input_0=int64#1),>x4=reg256#11
  88. # asm 2: vmovupd 1024(<input_0=%rdi),>x4=%ymm10
  89. vmovupd 1024(%rdi),%ymm10
  90. # qhasm: x5 = mem256[ input_0 + 1280 ]
  91. # asm 1: vmovupd 1280(<input_0=int64#1),>x5=reg256#12
  92. # asm 2: vmovupd 1280(<input_0=%rdi),>x5=%ymm11
  93. vmovupd 1280(%rdi),%ymm11
  94. # qhasm: x6 = mem256[ input_0 + 1536 ]
  95. # asm 1: vmovupd 1536(<input_0=int64#1),>x6=reg256#13
  96. # asm 2: vmovupd 1536(<input_0=%rdi),>x6=%ymm12
  97. vmovupd 1536(%rdi),%ymm12
  98. # qhasm: x7 = mem256[ input_0 + 1792 ]
  99. # asm 1: vmovupd 1792(<input_0=int64#1),>x7=reg256#14
  100. # asm 2: vmovupd 1792(<input_0=%rdi),>x7=%ymm13
  101. vmovupd 1792(%rdi),%ymm13
  102. # qhasm: v00 = x0 & mask0
  103. # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
  104. # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
  105. vpand %ymm6,%ymm0,%ymm14
  106. # qhasm: 4x v10 = x4 << 32
  107. # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16
  108. # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15
  109. vpsllq $32,%ymm10,%ymm15
  110. # qhasm: 4x v01 = x0 unsigned>> 32
  111. # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7
  112. # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6
  113. vpsrlq $32,%ymm6,%ymm6
  114. # qhasm: v11 = x4 & mask1
  115. # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
  116. # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
  117. vpand %ymm10,%ymm1,%ymm10
  118. # qhasm: x0 = v00 | v10
  119. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
  120. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
  121. vpor %ymm14,%ymm15,%ymm14
  122. # qhasm: x4 = v01 | v11
  123. # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
  124. # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
  125. vpor %ymm6,%ymm10,%ymm6
  126. # qhasm: v00 = x1 & mask0
  127. # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
  128. # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
  129. vpand %ymm7,%ymm0,%ymm10
  130. # qhasm: 4x v10 = x5 << 32
  131. # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16
  132. # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15
  133. vpsllq $32,%ymm11,%ymm15
  134. # qhasm: 4x v01 = x1 unsigned>> 32
  135. # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8
  136. # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7
  137. vpsrlq $32,%ymm7,%ymm7
  138. # qhasm: v11 = x5 & mask1
  139. # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
  140. # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
  141. vpand %ymm11,%ymm1,%ymm11
  142. # qhasm: x1 = v00 | v10
  143. # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
  144. # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
  145. vpor %ymm10,%ymm15,%ymm10
  146. # qhasm: x5 = v01 | v11
  147. # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
  148. # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
  149. vpor %ymm7,%ymm11,%ymm7
  150. # qhasm: v00 = x2 & mask0
  151. # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
  152. # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
  153. vpand %ymm8,%ymm0,%ymm11
  154. # qhasm: 4x v10 = x6 << 32
  155. # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16
  156. # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15
  157. vpsllq $32,%ymm12,%ymm15
  158. # qhasm: 4x v01 = x2 unsigned>> 32
  159. # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9
  160. # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8
  161. vpsrlq $32,%ymm8,%ymm8
  162. # qhasm: v11 = x6 & mask1
  163. # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
  164. # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
  165. vpand %ymm12,%ymm1,%ymm12
  166. # qhasm: x2 = v00 | v10
  167. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
  168. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
  169. vpor %ymm11,%ymm15,%ymm11
  170. # qhasm: x6 = v01 | v11
  171. # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
  172. # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
  173. vpor %ymm8,%ymm12,%ymm8
  174. # qhasm: v00 = x3 & mask0
  175. # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
  176. # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
  177. vpand %ymm9,%ymm0,%ymm12
  178. # qhasm: 4x v10 = x7 << 32
  179. # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16
  180. # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15
  181. vpsllq $32,%ymm13,%ymm15
  182. # qhasm: 4x v01 = x3 unsigned>> 32
  183. # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10
  184. # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9
  185. vpsrlq $32,%ymm9,%ymm9
  186. # qhasm: v11 = x7 & mask1
  187. # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
  188. # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
  189. vpand %ymm13,%ymm1,%ymm13
  190. # qhasm: x3 = v00 | v10
  191. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
  192. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
  193. vpor %ymm12,%ymm15,%ymm12
  194. # qhasm: x7 = v01 | v11
  195. # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
  196. # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
  197. vpor %ymm9,%ymm13,%ymm9
  198. # qhasm: v00 = x0 & mask2
  199. # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
  200. # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
  201. vpand %ymm14,%ymm2,%ymm13
  202. # qhasm: 8x v10 = x2 << 16
  203. # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16
  204. # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15
  205. vpslld $16,%ymm11,%ymm15
  206. # qhasm: 8x v01 = x0 unsigned>> 16
  207. # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15
  208. # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14
  209. vpsrld $16,%ymm14,%ymm14
  210. # qhasm: v11 = x2 & mask3
  211. # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
  212. # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
  213. vpand %ymm11,%ymm3,%ymm11
  214. # qhasm: x0 = v00 | v10
  215. # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
  216. # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
  217. vpor %ymm13,%ymm15,%ymm13
  218. # qhasm: x2 = v01 | v11
  219. # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
  220. # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
  221. vpor %ymm14,%ymm11,%ymm11
  222. # qhasm: v00 = x1 & mask2
  223. # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
  224. # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
  225. vpand %ymm10,%ymm2,%ymm14
  226. # qhasm: 8x v10 = x3 << 16
  227. # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16
  228. # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15
  229. vpslld $16,%ymm12,%ymm15
  230. # qhasm: 8x v01 = x1 unsigned>> 16
  231. # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11
  232. # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10
  233. vpsrld $16,%ymm10,%ymm10
  234. # qhasm: v11 = x3 & mask3
  235. # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
  236. # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
  237. vpand %ymm12,%ymm3,%ymm12
  238. # qhasm: x1 = v00 | v10
  239. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
  240. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
  241. vpor %ymm14,%ymm15,%ymm14
  242. # qhasm: x3 = v01 | v11
  243. # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
  244. # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
  245. vpor %ymm10,%ymm12,%ymm10
  246. # qhasm: v00 = x4 & mask2
  247. # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
  248. # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
  249. vpand %ymm6,%ymm2,%ymm12
  250. # qhasm: 8x v10 = x6 << 16
  251. # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16
  252. # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15
  253. vpslld $16,%ymm8,%ymm15
  254. # qhasm: 8x v01 = x4 unsigned>> 16
  255. # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7
  256. # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6
  257. vpsrld $16,%ymm6,%ymm6
  258. # qhasm: v11 = x6 & mask3
  259. # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
  260. # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
  261. vpand %ymm8,%ymm3,%ymm8
  262. # qhasm: x4 = v00 | v10
  263. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
  264. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
  265. vpor %ymm12,%ymm15,%ymm12
  266. # qhasm: x6 = v01 | v11
  267. # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
  268. # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
  269. vpor %ymm6,%ymm8,%ymm6
  270. # qhasm: v00 = x5 & mask2
  271. # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
  272. # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
  273. vpand %ymm7,%ymm2,%ymm8
  274. # qhasm: 8x v10 = x7 << 16
  275. # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16
  276. # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15
  277. vpslld $16,%ymm9,%ymm15
  278. # qhasm: 8x v01 = x5 unsigned>> 16
  279. # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8
  280. # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7
  281. vpsrld $16,%ymm7,%ymm7
  282. # qhasm: v11 = x7 & mask3
  283. # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
  284. # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
  285. vpand %ymm9,%ymm3,%ymm9
  286. # qhasm: x5 = v00 | v10
  287. # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
  288. # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
  289. vpor %ymm8,%ymm15,%ymm8
  290. # qhasm: x7 = v01 | v11
  291. # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
  292. # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
  293. vpor %ymm7,%ymm9,%ymm7
  294. # qhasm: v00 = x0 & mask4
  295. # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
  296. # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
  297. vpand %ymm13,%ymm4,%ymm9
  298. # qhasm: 16x v10 = x1 << 8
  299. # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16
  300. # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15
  301. vpsllw $8,%ymm14,%ymm15
  302. # qhasm: 16x v01 = x0 unsigned>> 8
  303. # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14
  304. # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13
  305. vpsrlw $8,%ymm13,%ymm13
  306. # qhasm: v11 = x1 & mask5
  307. # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
  308. # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
  309. vpand %ymm14,%ymm5,%ymm14
  310. # qhasm: x0 = v00 | v10
  311. # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
  312. # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
  313. vpor %ymm9,%ymm15,%ymm9
  314. # qhasm: x1 = v01 | v11
  315. # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
  316. # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
  317. vpor %ymm13,%ymm14,%ymm13
  318. # qhasm: v00 = x2 & mask4
  319. # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
  320. # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
  321. vpand %ymm11,%ymm4,%ymm14
  322. # qhasm: 16x v10 = x3 << 8
  323. # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16
  324. # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15
  325. vpsllw $8,%ymm10,%ymm15
  326. # qhasm: 16x v01 = x2 unsigned>> 8
  327. # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12
  328. # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11
  329. vpsrlw $8,%ymm11,%ymm11
  330. # qhasm: v11 = x3 & mask5
  331. # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
  332. # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
  333. vpand %ymm10,%ymm5,%ymm10
  334. # qhasm: x2 = v00 | v10
  335. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
  336. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
  337. vpor %ymm14,%ymm15,%ymm14
  338. # qhasm: x3 = v01 | v11
  339. # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
  340. # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
  341. vpor %ymm11,%ymm10,%ymm10
  342. # qhasm: v00 = x4 & mask4
  343. # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
  344. # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
  345. vpand %ymm12,%ymm4,%ymm11
  346. # qhasm: 16x v10 = x5 << 8
  347. # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16
  348. # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15
  349. vpsllw $8,%ymm8,%ymm15
  350. # qhasm: 16x v01 = x4 unsigned>> 8
  351. # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13
  352. # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12
  353. vpsrlw $8,%ymm12,%ymm12
  354. # qhasm: v11 = x5 & mask5
  355. # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
  356. # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
  357. vpand %ymm8,%ymm5,%ymm8
  358. # qhasm: x4 = v00 | v10
  359. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
  360. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
  361. vpor %ymm11,%ymm15,%ymm11
  362. # qhasm: x5 = v01 | v11
  363. # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
  364. # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
  365. vpor %ymm12,%ymm8,%ymm8
  366. # qhasm: v00 = x6 & mask4
  367. # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
  368. # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
  369. vpand %ymm6,%ymm4,%ymm12
  370. # qhasm: 16x v10 = x7 << 8
  371. # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16
  372. # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15
  373. vpsllw $8,%ymm7,%ymm15
  374. # qhasm: 16x v01 = x6 unsigned>> 8
  375. # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7
  376. # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6
  377. vpsrlw $8,%ymm6,%ymm6
  378. # qhasm: v11 = x7 & mask5
  379. # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
  380. # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
  381. vpand %ymm7,%ymm5,%ymm7
  382. # qhasm: x6 = v00 | v10
  383. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
  384. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
  385. vpor %ymm12,%ymm15,%ymm12
  386. # qhasm: x7 = v01 | v11
  387. # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
  388. # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
  389. vpor %ymm6,%ymm7,%ymm6
  390. # qhasm: mem256[ input_0 + 0 ] = x0
  391. # asm 1: vmovupd <x0=reg256#10,0(<input_0=int64#1)
  392. # asm 2: vmovupd <x0=%ymm9,0(<input_0=%rdi)
  393. vmovupd %ymm9,0(%rdi)
  394. # qhasm: mem256[ input_0 + 256 ] = x1
  395. # asm 1: vmovupd <x1=reg256#14,256(<input_0=int64#1)
  396. # asm 2: vmovupd <x1=%ymm13,256(<input_0=%rdi)
  397. vmovupd %ymm13,256(%rdi)
  398. # qhasm: mem256[ input_0 + 512 ] = x2
  399. # asm 1: vmovupd <x2=reg256#15,512(<input_0=int64#1)
  400. # asm 2: vmovupd <x2=%ymm14,512(<input_0=%rdi)
  401. vmovupd %ymm14,512(%rdi)
  402. # qhasm: mem256[ input_0 + 768 ] = x3
  403. # asm 1: vmovupd <x3=reg256#11,768(<input_0=int64#1)
  404. # asm 2: vmovupd <x3=%ymm10,768(<input_0=%rdi)
  405. vmovupd %ymm10,768(%rdi)
  406. # qhasm: mem256[ input_0 + 1024 ] = x4
  407. # asm 1: vmovupd <x4=reg256#12,1024(<input_0=int64#1)
  408. # asm 2: vmovupd <x4=%ymm11,1024(<input_0=%rdi)
  409. vmovupd %ymm11,1024(%rdi)
  410. # qhasm: mem256[ input_0 + 1280 ] = x5
  411. # asm 1: vmovupd <x5=reg256#9,1280(<input_0=int64#1)
  412. # asm 2: vmovupd <x5=%ymm8,1280(<input_0=%rdi)
  413. vmovupd %ymm8,1280(%rdi)
  414. # qhasm: mem256[ input_0 + 1536 ] = x6
  415. # asm 1: vmovupd <x6=reg256#13,1536(<input_0=int64#1)
  416. # asm 2: vmovupd <x6=%ymm12,1536(<input_0=%rdi)
  417. vmovupd %ymm12,1536(%rdi)
  418. # qhasm: mem256[ input_0 + 1792 ] = x7
  419. # asm 1: vmovupd <x7=reg256#7,1792(<input_0=int64#1)
  420. # asm 2: vmovupd <x7=%ymm6,1792(<input_0=%rdi)
  421. vmovupd %ymm6,1792(%rdi)
  422. # qhasm: x0 = mem256[ input_0 + 32 ]
  423. # asm 1: vmovupd 32(<input_0=int64#1),>x0=reg256#7
  424. # asm 2: vmovupd 32(<input_0=%rdi),>x0=%ymm6
  425. vmovupd 32(%rdi),%ymm6
  426. # qhasm: x1 = mem256[ input_0 + 288 ]
  427. # asm 1: vmovupd 288(<input_0=int64#1),>x1=reg256#8
  428. # asm 2: vmovupd 288(<input_0=%rdi),>x1=%ymm7
  429. vmovupd 288(%rdi),%ymm7
  430. # qhasm: x2 = mem256[ input_0 + 544 ]
  431. # asm 1: vmovupd 544(<input_0=int64#1),>x2=reg256#9
  432. # asm 2: vmovupd 544(<input_0=%rdi),>x2=%ymm8
  433. vmovupd 544(%rdi),%ymm8
  434. # qhasm: x3 = mem256[ input_0 + 800 ]
  435. # asm 1: vmovupd 800(<input_0=int64#1),>x3=reg256#10
  436. # asm 2: vmovupd 800(<input_0=%rdi),>x3=%ymm9
  437. vmovupd 800(%rdi),%ymm9
  438. # qhasm: x4 = mem256[ input_0 + 1056 ]
  439. # asm 1: vmovupd 1056(<input_0=int64#1),>x4=reg256#11
  440. # asm 2: vmovupd 1056(<input_0=%rdi),>x4=%ymm10
  441. vmovupd 1056(%rdi),%ymm10
  442. # qhasm: x5 = mem256[ input_0 + 1312 ]
  443. # asm 1: vmovupd 1312(<input_0=int64#1),>x5=reg256#12
  444. # asm 2: vmovupd 1312(<input_0=%rdi),>x5=%ymm11
  445. vmovupd 1312(%rdi),%ymm11
  446. # qhasm: x6 = mem256[ input_0 + 1568 ]
  447. # asm 1: vmovupd 1568(<input_0=int64#1),>x6=reg256#13
  448. # asm 2: vmovupd 1568(<input_0=%rdi),>x6=%ymm12
  449. vmovupd 1568(%rdi),%ymm12
  450. # qhasm: x7 = mem256[ input_0 + 1824 ]
  451. # asm 1: vmovupd 1824(<input_0=int64#1),>x7=reg256#14
  452. # asm 2: vmovupd 1824(<input_0=%rdi),>x7=%ymm13
  453. vmovupd 1824(%rdi),%ymm13
  454. # qhasm: v00 = x0 & mask0
  455. # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
  456. # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
  457. vpand %ymm6,%ymm0,%ymm14
  458. # qhasm: 4x v10 = x4 << 32
  459. # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16
  460. # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15
  461. vpsllq $32,%ymm10,%ymm15
  462. # qhasm: 4x v01 = x0 unsigned>> 32
  463. # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7
  464. # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6
  465. vpsrlq $32,%ymm6,%ymm6
  466. # qhasm: v11 = x4 & mask1
  467. # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
  468. # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
  469. vpand %ymm10,%ymm1,%ymm10
  470. # qhasm: x0 = v00 | v10
  471. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
  472. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
  473. vpor %ymm14,%ymm15,%ymm14
  474. # qhasm: x4 = v01 | v11
  475. # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
  476. # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
  477. vpor %ymm6,%ymm10,%ymm6
  478. # qhasm: v00 = x1 & mask0
  479. # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
  480. # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
  481. vpand %ymm7,%ymm0,%ymm10
  482. # qhasm: 4x v10 = x5 << 32
  483. # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16
  484. # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15
  485. vpsllq $32,%ymm11,%ymm15
  486. # qhasm: 4x v01 = x1 unsigned>> 32
  487. # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8
  488. # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7
  489. vpsrlq $32,%ymm7,%ymm7
  490. # qhasm: v11 = x5 & mask1
  491. # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
  492. # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
  493. vpand %ymm11,%ymm1,%ymm11
  494. # qhasm: x1 = v00 | v10
  495. # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
  496. # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
  497. vpor %ymm10,%ymm15,%ymm10
  498. # qhasm: x5 = v01 | v11
  499. # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
  500. # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
  501. vpor %ymm7,%ymm11,%ymm7
  502. # qhasm: v00 = x2 & mask0
  503. # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
  504. # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
  505. vpand %ymm8,%ymm0,%ymm11
  506. # qhasm: 4x v10 = x6 << 32
  507. # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16
  508. # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15
  509. vpsllq $32,%ymm12,%ymm15
  510. # qhasm: 4x v01 = x2 unsigned>> 32
  511. # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9
  512. # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8
  513. vpsrlq $32,%ymm8,%ymm8
  514. # qhasm: v11 = x6 & mask1
  515. # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
  516. # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
  517. vpand %ymm12,%ymm1,%ymm12
  518. # qhasm: x2 = v00 | v10
  519. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
  520. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
  521. vpor %ymm11,%ymm15,%ymm11
  522. # qhasm: x6 = v01 | v11
  523. # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
  524. # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
  525. vpor %ymm8,%ymm12,%ymm8
  526. # qhasm: v00 = x3 & mask0
  527. # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
  528. # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
  529. vpand %ymm9,%ymm0,%ymm12
  530. # qhasm: 4x v10 = x7 << 32
  531. # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16
  532. # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15
  533. vpsllq $32,%ymm13,%ymm15
  534. # qhasm: 4x v01 = x3 unsigned>> 32
  535. # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10
  536. # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9
  537. vpsrlq $32,%ymm9,%ymm9
  538. # qhasm: v11 = x7 & mask1
  539. # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
  540. # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
  541. vpand %ymm13,%ymm1,%ymm13
  542. # qhasm: x3 = v00 | v10
  543. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
  544. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
  545. vpor %ymm12,%ymm15,%ymm12
  546. # qhasm: x7 = v01 | v11
  547. # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
  548. # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
  549. vpor %ymm9,%ymm13,%ymm9
  550. # qhasm: v00 = x0 & mask2
  551. # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
  552. # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
  553. vpand %ymm14,%ymm2,%ymm13
  554. # qhasm: 8x v10 = x2 << 16
  555. # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16
  556. # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15
  557. vpslld $16,%ymm11,%ymm15
  558. # qhasm: 8x v01 = x0 unsigned>> 16
  559. # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15
  560. # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14
  561. vpsrld $16,%ymm14,%ymm14
  562. # qhasm: v11 = x2 & mask3
  563. # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
  564. # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
  565. vpand %ymm11,%ymm3,%ymm11
  566. # qhasm: x0 = v00 | v10
  567. # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
  568. # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
  569. vpor %ymm13,%ymm15,%ymm13
  570. # qhasm: x2 = v01 | v11
  571. # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
  572. # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
  573. vpor %ymm14,%ymm11,%ymm11
  574. # qhasm: v00 = x1 & mask2
  575. # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
  576. # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
  577. vpand %ymm10,%ymm2,%ymm14
  578. # qhasm: 8x v10 = x3 << 16
  579. # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16
  580. # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15
  581. vpslld $16,%ymm12,%ymm15
  582. # qhasm: 8x v01 = x1 unsigned>> 16
  583. # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11
  584. # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10
  585. vpsrld $16,%ymm10,%ymm10
  586. # qhasm: v11 = x3 & mask3
  587. # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
  588. # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
  589. vpand %ymm12,%ymm3,%ymm12
  590. # qhasm: x1 = v00 | v10
  591. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
  592. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
  593. vpor %ymm14,%ymm15,%ymm14
  594. # qhasm: x3 = v01 | v11
  595. # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
  596. # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
  597. vpor %ymm10,%ymm12,%ymm10
  598. # qhasm: v00 = x4 & mask2
  599. # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
  600. # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
  601. vpand %ymm6,%ymm2,%ymm12
  602. # qhasm: 8x v10 = x6 << 16
  603. # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16
  604. # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15
  605. vpslld $16,%ymm8,%ymm15
  606. # qhasm: 8x v01 = x4 unsigned>> 16
  607. # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7
  608. # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6
  609. vpsrld $16,%ymm6,%ymm6
  610. # qhasm: v11 = x6 & mask3
  611. # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
  612. # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
  613. vpand %ymm8,%ymm3,%ymm8
  614. # qhasm: x4 = v00 | v10
  615. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
  616. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
  617. vpor %ymm12,%ymm15,%ymm12
  618. # qhasm: x6 = v01 | v11
  619. # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
  620. # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
  621. vpor %ymm6,%ymm8,%ymm6
  622. # qhasm: v00 = x5 & mask2
  623. # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
  624. # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
  625. vpand %ymm7,%ymm2,%ymm8
  626. # qhasm: 8x v10 = x7 << 16
  627. # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16
  628. # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15
  629. vpslld $16,%ymm9,%ymm15
  630. # qhasm: 8x v01 = x5 unsigned>> 16
  631. # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8
  632. # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7
  633. vpsrld $16,%ymm7,%ymm7
  634. # qhasm: v11 = x7 & mask3
  635. # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
  636. # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
  637. vpand %ymm9,%ymm3,%ymm9
  638. # qhasm: x5 = v00 | v10
  639. # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
  640. # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
  641. vpor %ymm8,%ymm15,%ymm8
  642. # qhasm: x7 = v01 | v11
  643. # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
  644. # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
  645. vpor %ymm7,%ymm9,%ymm7
  646. # qhasm: v00 = x0 & mask4
  647. # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
  648. # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
  649. vpand %ymm13,%ymm4,%ymm9
  650. # qhasm: 16x v10 = x1 << 8
  651. # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16
  652. # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15
  653. vpsllw $8,%ymm14,%ymm15
  654. # qhasm: 16x v01 = x0 unsigned>> 8
  655. # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14
  656. # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13
  657. vpsrlw $8,%ymm13,%ymm13
  658. # qhasm: v11 = x1 & mask5
  659. # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
  660. # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
  661. vpand %ymm14,%ymm5,%ymm14
  662. # qhasm: x0 = v00 | v10
  663. # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
  664. # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
  665. vpor %ymm9,%ymm15,%ymm9
  666. # qhasm: x1 = v01 | v11
  667. # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
  668. # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
  669. vpor %ymm13,%ymm14,%ymm13
  670. # qhasm: v00 = x2 & mask4
  671. # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
  672. # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
  673. vpand %ymm11,%ymm4,%ymm14
  674. # qhasm: 16x v10 = x3 << 8
  675. # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16
  676. # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15
  677. vpsllw $8,%ymm10,%ymm15
  678. # qhasm: 16x v01 = x2 unsigned>> 8
  679. # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12
  680. # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11
  681. vpsrlw $8,%ymm11,%ymm11
  682. # qhasm: v11 = x3 & mask5
  683. # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
  684. # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
  685. vpand %ymm10,%ymm5,%ymm10
  686. # qhasm: x2 = v00 | v10
  687. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
  688. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
  689. vpor %ymm14,%ymm15,%ymm14
  690. # qhasm: x3 = v01 | v11
  691. # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
  692. # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
  693. vpor %ymm11,%ymm10,%ymm10
  694. # qhasm: v00 = x4 & mask4
  695. # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
  696. # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
  697. vpand %ymm12,%ymm4,%ymm11
  698. # qhasm: 16x v10 = x5 << 8
  699. # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16
  700. # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15
  701. vpsllw $8,%ymm8,%ymm15
  702. # qhasm: 16x v01 = x4 unsigned>> 8
  703. # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13
  704. # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12
  705. vpsrlw $8,%ymm12,%ymm12
  706. # qhasm: v11 = x5 & mask5
  707. # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
  708. # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
  709. vpand %ymm8,%ymm5,%ymm8
  710. # qhasm: x4 = v00 | v10
  711. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
  712. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
  713. vpor %ymm11,%ymm15,%ymm11
  714. # qhasm: x5 = v01 | v11
  715. # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
  716. # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
  717. vpor %ymm12,%ymm8,%ymm8
  718. # qhasm: v00 = x6 & mask4
  719. # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
  720. # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
  721. vpand %ymm6,%ymm4,%ymm12
  722. # qhasm: 16x v10 = x7 << 8
  723. # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16
  724. # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15
  725. vpsllw $8,%ymm7,%ymm15
  726. # qhasm: 16x v01 = x6 unsigned>> 8
  727. # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7
  728. # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6
  729. vpsrlw $8,%ymm6,%ymm6
  730. # qhasm: v11 = x7 & mask5
  731. # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
  732. # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
  733. vpand %ymm7,%ymm5,%ymm7
  734. # qhasm: x6 = v00 | v10
  735. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
  736. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
  737. vpor %ymm12,%ymm15,%ymm12
  738. # qhasm: x7 = v01 | v11
  739. # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
  740. # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
  741. vpor %ymm6,%ymm7,%ymm6
  742. # qhasm: mem256[ input_0 + 32 ] = x0
  743. # asm 1: vmovupd <x0=reg256#10,32(<input_0=int64#1)
  744. # asm 2: vmovupd <x0=%ymm9,32(<input_0=%rdi)
  745. vmovupd %ymm9,32(%rdi)
  746. # qhasm: mem256[ input_0 + 288 ] = x1
  747. # asm 1: vmovupd <x1=reg256#14,288(<input_0=int64#1)
  748. # asm 2: vmovupd <x1=%ymm13,288(<input_0=%rdi)
  749. vmovupd %ymm13,288(%rdi)
  750. # qhasm: mem256[ input_0 + 544 ] = x2
  751. # asm 1: vmovupd <x2=reg256#15,544(<input_0=int64#1)
  752. # asm 2: vmovupd <x2=%ymm14,544(<input_0=%rdi)
  753. vmovupd %ymm14,544(%rdi)
  754. # qhasm: mem256[ input_0 + 800 ] = x3
  755. # asm 1: vmovupd <x3=reg256#11,800(<input_0=int64#1)
  756. # asm 2: vmovupd <x3=%ymm10,800(<input_0=%rdi)
  757. vmovupd %ymm10,800(%rdi)
  758. # qhasm: mem256[ input_0 + 1056 ] = x4
  759. # asm 1: vmovupd <x4=reg256#12,1056(<input_0=int64#1)
  760. # asm 2: vmovupd <x4=%ymm11,1056(<input_0=%rdi)
  761. vmovupd %ymm11,1056(%rdi)
  762. # qhasm: mem256[ input_0 + 1312 ] = x5
  763. # asm 1: vmovupd <x5=reg256#9,1312(<input_0=int64#1)
  764. # asm 2: vmovupd <x5=%ymm8,1312(<input_0=%rdi)
  765. vmovupd %ymm8,1312(%rdi)
  766. # qhasm: mem256[ input_0 + 1568 ] = x6
  767. # asm 1: vmovupd <x6=reg256#13,1568(<input_0=int64#1)
  768. # asm 2: vmovupd <x6=%ymm12,1568(<input_0=%rdi)
  769. vmovupd %ymm12,1568(%rdi)
  770. # qhasm: mem256[ input_0 + 1824 ] = x7
  771. # asm 1: vmovupd <x7=reg256#7,1824(<input_0=int64#1)
  772. # asm 2: vmovupd <x7=%ymm6,1824(<input_0=%rdi)
  773. vmovupd %ymm6,1824(%rdi)
  774. # qhasm: x0 = mem256[ input_0 + 64 ]
  775. # asm 1: vmovupd 64(<input_0=int64#1),>x0=reg256#7
  776. # asm 2: vmovupd 64(<input_0=%rdi),>x0=%ymm6
  777. vmovupd 64(%rdi),%ymm6
  778. # qhasm: x1 = mem256[ input_0 + 320 ]
  779. # asm 1: vmovupd 320(<input_0=int64#1),>x1=reg256#8
  780. # asm 2: vmovupd 320(<input_0=%rdi),>x1=%ymm7
  781. vmovupd 320(%rdi),%ymm7
  782. # qhasm: x2 = mem256[ input_0 + 576 ]
  783. # asm 1: vmovupd 576(<input_0=int64#1),>x2=reg256#9
  784. # asm 2: vmovupd 576(<input_0=%rdi),>x2=%ymm8
  785. vmovupd 576(%rdi),%ymm8
  786. # qhasm: x3 = mem256[ input_0 + 832 ]
  787. # asm 1: vmovupd 832(<input_0=int64#1),>x3=reg256#10
  788. # asm 2: vmovupd 832(<input_0=%rdi),>x3=%ymm9
  789. vmovupd 832(%rdi),%ymm9
  790. # qhasm: x4 = mem256[ input_0 + 1088 ]
  791. # asm 1: vmovupd 1088(<input_0=int64#1),>x4=reg256#11
  792. # asm 2: vmovupd 1088(<input_0=%rdi),>x4=%ymm10
  793. vmovupd 1088(%rdi),%ymm10
  794. # qhasm: x5 = mem256[ input_0 + 1344 ]
  795. # asm 1: vmovupd 1344(<input_0=int64#1),>x5=reg256#12
  796. # asm 2: vmovupd 1344(<input_0=%rdi),>x5=%ymm11
  797. vmovupd 1344(%rdi),%ymm11
  798. # qhasm: x6 = mem256[ input_0 + 1600 ]
  799. # asm 1: vmovupd 1600(<input_0=int64#1),>x6=reg256#13
  800. # asm 2: vmovupd 1600(<input_0=%rdi),>x6=%ymm12
  801. vmovupd 1600(%rdi),%ymm12
  802. # qhasm: x7 = mem256[ input_0 + 1856 ]
  803. # asm 1: vmovupd 1856(<input_0=int64#1),>x7=reg256#14
  804. # asm 2: vmovupd 1856(<input_0=%rdi),>x7=%ymm13
  805. vmovupd 1856(%rdi),%ymm13
  806. # qhasm: v00 = x0 & mask0
  807. # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
  808. # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
  809. vpand %ymm6,%ymm0,%ymm14
  810. # qhasm: 4x v10 = x4 << 32
  811. # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16
  812. # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15
  813. vpsllq $32,%ymm10,%ymm15
  814. # qhasm: 4x v01 = x0 unsigned>> 32
  815. # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7
  816. # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6
  817. vpsrlq $32,%ymm6,%ymm6
  818. # qhasm: v11 = x4 & mask1
  819. # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
  820. # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
  821. vpand %ymm10,%ymm1,%ymm10
  822. # qhasm: x0 = v00 | v10
  823. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
  824. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
  825. vpor %ymm14,%ymm15,%ymm14
  826. # qhasm: x4 = v01 | v11
  827. # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
  828. # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
  829. vpor %ymm6,%ymm10,%ymm6
  830. # qhasm: v00 = x1 & mask0
  831. # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
  832. # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
  833. vpand %ymm7,%ymm0,%ymm10
  834. # qhasm: 4x v10 = x5 << 32
  835. # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16
  836. # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15
  837. vpsllq $32,%ymm11,%ymm15
  838. # qhasm: 4x v01 = x1 unsigned>> 32
  839. # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8
  840. # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7
  841. vpsrlq $32,%ymm7,%ymm7
  842. # qhasm: v11 = x5 & mask1
  843. # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
  844. # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
  845. vpand %ymm11,%ymm1,%ymm11
  846. # qhasm: x1 = v00 | v10
  847. # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
  848. # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
  849. vpor %ymm10,%ymm15,%ymm10
  850. # qhasm: x5 = v01 | v11
  851. # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
  852. # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
  853. vpor %ymm7,%ymm11,%ymm7
  854. # qhasm: v00 = x2 & mask0
  855. # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
  856. # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
  857. vpand %ymm8,%ymm0,%ymm11
  858. # qhasm: 4x v10 = x6 << 32
  859. # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16
  860. # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15
  861. vpsllq $32,%ymm12,%ymm15
  862. # qhasm: 4x v01 = x2 unsigned>> 32
  863. # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9
  864. # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8
  865. vpsrlq $32,%ymm8,%ymm8
  866. # qhasm: v11 = x6 & mask1
  867. # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
  868. # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
  869. vpand %ymm12,%ymm1,%ymm12
  870. # qhasm: x2 = v00 | v10
  871. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
  872. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
  873. vpor %ymm11,%ymm15,%ymm11
  874. # qhasm: x6 = v01 | v11
  875. # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
  876. # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
  877. vpor %ymm8,%ymm12,%ymm8
  878. # qhasm: v00 = x3 & mask0
  879. # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
  880. # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
  881. vpand %ymm9,%ymm0,%ymm12
  882. # qhasm: 4x v10 = x7 << 32
  883. # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16
  884. # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15
  885. vpsllq $32,%ymm13,%ymm15
  886. # qhasm: 4x v01 = x3 unsigned>> 32
  887. # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10
  888. # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9
  889. vpsrlq $32,%ymm9,%ymm9
  890. # qhasm: v11 = x7 & mask1
  891. # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
  892. # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
  893. vpand %ymm13,%ymm1,%ymm13
  894. # qhasm: x3 = v00 | v10
  895. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
  896. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
  897. vpor %ymm12,%ymm15,%ymm12
  898. # qhasm: x7 = v01 | v11
  899. # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
  900. # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
  901. vpor %ymm9,%ymm13,%ymm9
  902. # qhasm: v00 = x0 & mask2
  903. # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
  904. # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
  905. vpand %ymm14,%ymm2,%ymm13
  906. # qhasm: 8x v10 = x2 << 16
  907. # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16
  908. # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15
  909. vpslld $16,%ymm11,%ymm15
  910. # qhasm: 8x v01 = x0 unsigned>> 16
  911. # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15
  912. # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14
  913. vpsrld $16,%ymm14,%ymm14
  914. # qhasm: v11 = x2 & mask3
  915. # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
  916. # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
  917. vpand %ymm11,%ymm3,%ymm11
  918. # qhasm: x0 = v00 | v10
  919. # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
  920. # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
  921. vpor %ymm13,%ymm15,%ymm13
  922. # qhasm: x2 = v01 | v11
  923. # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
  924. # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
  925. vpor %ymm14,%ymm11,%ymm11
  926. # qhasm: v00 = x1 & mask2
  927. # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
  928. # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
  929. vpand %ymm10,%ymm2,%ymm14
  930. # qhasm: 8x v10 = x3 << 16
  931. # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16
  932. # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15
  933. vpslld $16,%ymm12,%ymm15
  934. # qhasm: 8x v01 = x1 unsigned>> 16
  935. # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11
  936. # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10
  937. vpsrld $16,%ymm10,%ymm10
  938. # qhasm: v11 = x3 & mask3
  939. # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
  940. # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
  941. vpand %ymm12,%ymm3,%ymm12
  942. # qhasm: x1 = v00 | v10
  943. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
  944. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
  945. vpor %ymm14,%ymm15,%ymm14
  946. # qhasm: x3 = v01 | v11
  947. # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
  948. # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
  949. vpor %ymm10,%ymm12,%ymm10
  950. # qhasm: v00 = x4 & mask2
  951. # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
  952. # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
  953. vpand %ymm6,%ymm2,%ymm12
  954. # qhasm: 8x v10 = x6 << 16
  955. # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16
  956. # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15
  957. vpslld $16,%ymm8,%ymm15
  958. # qhasm: 8x v01 = x4 unsigned>> 16
  959. # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7
  960. # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6
  961. vpsrld $16,%ymm6,%ymm6
  962. # qhasm: v11 = x6 & mask3
  963. # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
  964. # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
  965. vpand %ymm8,%ymm3,%ymm8
  966. # qhasm: x4 = v00 | v10
  967. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
  968. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
  969. vpor %ymm12,%ymm15,%ymm12
  970. # qhasm: x6 = v01 | v11
  971. # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
  972. # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
  973. vpor %ymm6,%ymm8,%ymm6
  974. # qhasm: v00 = x5 & mask2
  975. # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
  976. # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
  977. vpand %ymm7,%ymm2,%ymm8
  978. # qhasm: 8x v10 = x7 << 16
  979. # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16
  980. # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15
  981. vpslld $16,%ymm9,%ymm15
  982. # qhasm: 8x v01 = x5 unsigned>> 16
  983. # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8
  984. # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7
  985. vpsrld $16,%ymm7,%ymm7
  986. # qhasm: v11 = x7 & mask3
  987. # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
  988. # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
  989. vpand %ymm9,%ymm3,%ymm9
  990. # qhasm: x5 = v00 | v10
  991. # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
  992. # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
  993. vpor %ymm8,%ymm15,%ymm8
  994. # qhasm: x7 = v01 | v11
  995. # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
  996. # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
  997. vpor %ymm7,%ymm9,%ymm7
  998. # qhasm: v00 = x0 & mask4
  999. # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
  1000. # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
  1001. vpand %ymm13,%ymm4,%ymm9
  1002. # qhasm: 16x v10 = x1 << 8
  1003. # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16
  1004. # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15
  1005. vpsllw $8,%ymm14,%ymm15
  1006. # qhasm: 16x v01 = x0 unsigned>> 8
  1007. # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14
  1008. # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13
  1009. vpsrlw $8,%ymm13,%ymm13
  1010. # qhasm: v11 = x1 & mask5
  1011. # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
  1012. # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
  1013. vpand %ymm14,%ymm5,%ymm14
  1014. # qhasm: x0 = v00 | v10
  1015. # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
  1016. # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
  1017. vpor %ymm9,%ymm15,%ymm9
  1018. # qhasm: x1 = v01 | v11
  1019. # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
  1020. # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
  1021. vpor %ymm13,%ymm14,%ymm13
  1022. # qhasm: v00 = x2 & mask4
  1023. # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
  1024. # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
  1025. vpand %ymm11,%ymm4,%ymm14
  1026. # qhasm: 16x v10 = x3 << 8
  1027. # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16
  1028. # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15
  1029. vpsllw $8,%ymm10,%ymm15
  1030. # qhasm: 16x v01 = x2 unsigned>> 8
  1031. # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12
  1032. # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11
  1033. vpsrlw $8,%ymm11,%ymm11
  1034. # qhasm: v11 = x3 & mask5
  1035. # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
  1036. # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
  1037. vpand %ymm10,%ymm5,%ymm10
  1038. # qhasm: x2 = v00 | v10
  1039. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
  1040. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
  1041. vpor %ymm14,%ymm15,%ymm14
  1042. # qhasm: x3 = v01 | v11
  1043. # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
  1044. # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
  1045. vpor %ymm11,%ymm10,%ymm10
  1046. # qhasm: v00 = x4 & mask4
  1047. # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
  1048. # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
  1049. vpand %ymm12,%ymm4,%ymm11
  1050. # qhasm: 16x v10 = x5 << 8
  1051. # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16
  1052. # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15
  1053. vpsllw $8,%ymm8,%ymm15
  1054. # qhasm: 16x v01 = x4 unsigned>> 8
  1055. # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13
  1056. # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12
  1057. vpsrlw $8,%ymm12,%ymm12
  1058. # qhasm: v11 = x5 & mask5
  1059. # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
  1060. # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
  1061. vpand %ymm8,%ymm5,%ymm8
  1062. # qhasm: x4 = v00 | v10
  1063. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
  1064. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
  1065. vpor %ymm11,%ymm15,%ymm11
  1066. # qhasm: x5 = v01 | v11
  1067. # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
  1068. # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
  1069. vpor %ymm12,%ymm8,%ymm8
  1070. # qhasm: v00 = x6 & mask4
  1071. # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
  1072. # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
  1073. vpand %ymm6,%ymm4,%ymm12
  1074. # qhasm: 16x v10 = x7 << 8
  1075. # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16
  1076. # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15
  1077. vpsllw $8,%ymm7,%ymm15
  1078. # qhasm: 16x v01 = x6 unsigned>> 8
  1079. # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7
  1080. # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6
  1081. vpsrlw $8,%ymm6,%ymm6
  1082. # qhasm: v11 = x7 & mask5
  1083. # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
  1084. # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
  1085. vpand %ymm7,%ymm5,%ymm7
  1086. # qhasm: x6 = v00 | v10
  1087. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
  1088. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
  1089. vpor %ymm12,%ymm15,%ymm12
  1090. # qhasm: x7 = v01 | v11
  1091. # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
  1092. # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
  1093. vpor %ymm6,%ymm7,%ymm6
  1094. # qhasm: mem256[ input_0 + 64 ] = x0
  1095. # asm 1: vmovupd <x0=reg256#10,64(<input_0=int64#1)
  1096. # asm 2: vmovupd <x0=%ymm9,64(<input_0=%rdi)
  1097. vmovupd %ymm9,64(%rdi)
  1098. # qhasm: mem256[ input_0 + 320 ] = x1
  1099. # asm 1: vmovupd <x1=reg256#14,320(<input_0=int64#1)
  1100. # asm 2: vmovupd <x1=%ymm13,320(<input_0=%rdi)
  1101. vmovupd %ymm13,320(%rdi)
  1102. # qhasm: mem256[ input_0 + 576 ] = x2
  1103. # asm 1: vmovupd <x2=reg256#15,576(<input_0=int64#1)
  1104. # asm 2: vmovupd <x2=%ymm14,576(<input_0=%rdi)
  1105. vmovupd %ymm14,576(%rdi)
  1106. # qhasm: mem256[ input_0 + 832 ] = x3
  1107. # asm 1: vmovupd <x3=reg256#11,832(<input_0=int64#1)
  1108. # asm 2: vmovupd <x3=%ymm10,832(<input_0=%rdi)
  1109. vmovupd %ymm10,832(%rdi)
  1110. # qhasm: mem256[ input_0 + 1088 ] = x4
  1111. # asm 1: vmovupd <x4=reg256#12,1088(<input_0=int64#1)
  1112. # asm 2: vmovupd <x4=%ymm11,1088(<input_0=%rdi)
  1113. vmovupd %ymm11,1088(%rdi)
  1114. # qhasm: mem256[ input_0 + 1344 ] = x5
  1115. # asm 1: vmovupd <x5=reg256#9,1344(<input_0=int64#1)
  1116. # asm 2: vmovupd <x5=%ymm8,1344(<input_0=%rdi)
  1117. vmovupd %ymm8,1344(%rdi)
  1118. # qhasm: mem256[ input_0 + 1600 ] = x6
  1119. # asm 1: vmovupd <x6=reg256#13,1600(<input_0=int64#1)
  1120. # asm 2: vmovupd <x6=%ymm12,1600(<input_0=%rdi)
  1121. vmovupd %ymm12,1600(%rdi)
  1122. # qhasm: mem256[ input_0 + 1856 ] = x7
  1123. # asm 1: vmovupd <x7=reg256#7,1856(<input_0=int64#1)
  1124. # asm 2: vmovupd <x7=%ymm6,1856(<input_0=%rdi)
  1125. vmovupd %ymm6,1856(%rdi)
  1126. # qhasm: x0 = mem256[ input_0 + 96 ]
  1127. # asm 1: vmovupd 96(<input_0=int64#1),>x0=reg256#7
  1128. # asm 2: vmovupd 96(<input_0=%rdi),>x0=%ymm6
  1129. vmovupd 96(%rdi),%ymm6
  1130. # qhasm: x1 = mem256[ input_0 + 352 ]
  1131. # asm 1: vmovupd 352(<input_0=int64#1),>x1=reg256#8
  1132. # asm 2: vmovupd 352(<input_0=%rdi),>x1=%ymm7
  1133. vmovupd 352(%rdi),%ymm7
  1134. # qhasm: x2 = mem256[ input_0 + 608 ]
  1135. # asm 1: vmovupd 608(<input_0=int64#1),>x2=reg256#9
  1136. # asm 2: vmovupd 608(<input_0=%rdi),>x2=%ymm8
  1137. vmovupd 608(%rdi),%ymm8
  1138. # qhasm: x3 = mem256[ input_0 + 864 ]
  1139. # asm 1: vmovupd 864(<input_0=int64#1),>x3=reg256#10
  1140. # asm 2: vmovupd 864(<input_0=%rdi),>x3=%ymm9
  1141. vmovupd 864(%rdi),%ymm9
  1142. # qhasm: x4 = mem256[ input_0 + 1120 ]
  1143. # asm 1: vmovupd 1120(<input_0=int64#1),>x4=reg256#11
  1144. # asm 2: vmovupd 1120(<input_0=%rdi),>x4=%ymm10
  1145. vmovupd 1120(%rdi),%ymm10
  1146. # qhasm: x5 = mem256[ input_0 + 1376 ]
  1147. # asm 1: vmovupd 1376(<input_0=int64#1),>x5=reg256#12
  1148. # asm 2: vmovupd 1376(<input_0=%rdi),>x5=%ymm11
  1149. vmovupd 1376(%rdi),%ymm11
  1150. # qhasm: x6 = mem256[ input_0 + 1632 ]
  1151. # asm 1: vmovupd 1632(<input_0=int64#1),>x6=reg256#13
  1152. # asm 2: vmovupd 1632(<input_0=%rdi),>x6=%ymm12
  1153. vmovupd 1632(%rdi),%ymm12
  1154. # qhasm: x7 = mem256[ input_0 + 1888 ]
  1155. # asm 1: vmovupd 1888(<input_0=int64#1),>x7=reg256#14
  1156. # asm 2: vmovupd 1888(<input_0=%rdi),>x7=%ymm13
  1157. vmovupd 1888(%rdi),%ymm13
  1158. # qhasm: v00 = x0 & mask0
  1159. # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
  1160. # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
  1161. vpand %ymm6,%ymm0,%ymm14
  1162. # qhasm: 4x v10 = x4 << 32
  1163. # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16
  1164. # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15
  1165. vpsllq $32,%ymm10,%ymm15
  1166. # qhasm: 4x v01 = x0 unsigned>> 32
  1167. # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7
  1168. # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6
  1169. vpsrlq $32,%ymm6,%ymm6
  1170. # qhasm: v11 = x4 & mask1
  1171. # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
  1172. # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
  1173. vpand %ymm10,%ymm1,%ymm10
  1174. # qhasm: x0 = v00 | v10
  1175. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
  1176. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
  1177. vpor %ymm14,%ymm15,%ymm14
  1178. # qhasm: x4 = v01 | v11
  1179. # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
  1180. # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
  1181. vpor %ymm6,%ymm10,%ymm6
  1182. # qhasm: v00 = x1 & mask0
  1183. # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
  1184. # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
  1185. vpand %ymm7,%ymm0,%ymm10
  1186. # qhasm: 4x v10 = x5 << 32
  1187. # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16
  1188. # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15
  1189. vpsllq $32,%ymm11,%ymm15
  1190. # qhasm: 4x v01 = x1 unsigned>> 32
  1191. # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8
  1192. # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7
  1193. vpsrlq $32,%ymm7,%ymm7
  1194. # qhasm: v11 = x5 & mask1
  1195. # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
  1196. # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
  1197. vpand %ymm11,%ymm1,%ymm11
  1198. # qhasm: x1 = v00 | v10
  1199. # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
  1200. # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
  1201. vpor %ymm10,%ymm15,%ymm10
  1202. # qhasm: x5 = v01 | v11
  1203. # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
  1204. # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
  1205. vpor %ymm7,%ymm11,%ymm7
  1206. # qhasm: v00 = x2 & mask0
  1207. # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
  1208. # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
  1209. vpand %ymm8,%ymm0,%ymm11
  1210. # qhasm: 4x v10 = x6 << 32
  1211. # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16
  1212. # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15
  1213. vpsllq $32,%ymm12,%ymm15
  1214. # qhasm: 4x v01 = x2 unsigned>> 32
  1215. # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9
  1216. # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8
  1217. vpsrlq $32,%ymm8,%ymm8
  1218. # qhasm: v11 = x6 & mask1
  1219. # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
  1220. # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
  1221. vpand %ymm12,%ymm1,%ymm12
  1222. # qhasm: x2 = v00 | v10
  1223. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
  1224. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
  1225. vpor %ymm11,%ymm15,%ymm11
  1226. # qhasm: x6 = v01 | v11
  1227. # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
  1228. # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
  1229. vpor %ymm8,%ymm12,%ymm8
  1230. # qhasm: v00 = x3 & mask0
  1231. # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
  1232. # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
  1233. vpand %ymm9,%ymm0,%ymm12
  1234. # qhasm: 4x v10 = x7 << 32
  1235. # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16
  1236. # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15
  1237. vpsllq $32,%ymm13,%ymm15
  1238. # qhasm: 4x v01 = x3 unsigned>> 32
  1239. # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10
  1240. # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9
  1241. vpsrlq $32,%ymm9,%ymm9
  1242. # qhasm: v11 = x7 & mask1
  1243. # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
  1244. # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
  1245. vpand %ymm13,%ymm1,%ymm13
  1246. # qhasm: x3 = v00 | v10
  1247. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
  1248. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
  1249. vpor %ymm12,%ymm15,%ymm12
  1250. # qhasm: x7 = v01 | v11
  1251. # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
  1252. # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
  1253. vpor %ymm9,%ymm13,%ymm9
  1254. # qhasm: v00 = x0 & mask2
  1255. # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
  1256. # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
  1257. vpand %ymm14,%ymm2,%ymm13
  1258. # qhasm: 8x v10 = x2 << 16
  1259. # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16
  1260. # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15
  1261. vpslld $16,%ymm11,%ymm15
  1262. # qhasm: 8x v01 = x0 unsigned>> 16
  1263. # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15
  1264. # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14
  1265. vpsrld $16,%ymm14,%ymm14
  1266. # qhasm: v11 = x2 & mask3
  1267. # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
  1268. # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
  1269. vpand %ymm11,%ymm3,%ymm11
  1270. # qhasm: x0 = v00 | v10
  1271. # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
  1272. # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
  1273. vpor %ymm13,%ymm15,%ymm13
  1274. # qhasm: x2 = v01 | v11
  1275. # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
  1276. # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
  1277. vpor %ymm14,%ymm11,%ymm11
  1278. # qhasm: v00 = x1 & mask2
  1279. # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
  1280. # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
  1281. vpand %ymm10,%ymm2,%ymm14
  1282. # qhasm: 8x v10 = x3 << 16
  1283. # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16
  1284. # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15
  1285. vpslld $16,%ymm12,%ymm15
  1286. # qhasm: 8x v01 = x1 unsigned>> 16
  1287. # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11
  1288. # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10
  1289. vpsrld $16,%ymm10,%ymm10
  1290. # qhasm: v11 = x3 & mask3
  1291. # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
  1292. # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
  1293. vpand %ymm12,%ymm3,%ymm12
  1294. # qhasm: x1 = v00 | v10
  1295. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
  1296. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
  1297. vpor %ymm14,%ymm15,%ymm14
  1298. # qhasm: x3 = v01 | v11
  1299. # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
  1300. # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
  1301. vpor %ymm10,%ymm12,%ymm10
  1302. # qhasm: v00 = x4 & mask2
  1303. # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
  1304. # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
  1305. vpand %ymm6,%ymm2,%ymm12
  1306. # qhasm: 8x v10 = x6 << 16
  1307. # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16
  1308. # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15
  1309. vpslld $16,%ymm8,%ymm15
  1310. # qhasm: 8x v01 = x4 unsigned>> 16
  1311. # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7
  1312. # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6
  1313. vpsrld $16,%ymm6,%ymm6
  1314. # qhasm: v11 = x6 & mask3
  1315. # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
  1316. # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
  1317. vpand %ymm8,%ymm3,%ymm8
  1318. # qhasm: x4 = v00 | v10
  1319. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
  1320. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
  1321. vpor %ymm12,%ymm15,%ymm12
  1322. # qhasm: x6 = v01 | v11
  1323. # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
  1324. # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
  1325. vpor %ymm6,%ymm8,%ymm6
  1326. # qhasm: v00 = x5 & mask2
  1327. # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
  1328. # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
  1329. vpand %ymm7,%ymm2,%ymm8
  1330. # qhasm: 8x v10 = x7 << 16
  1331. # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16
  1332. # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15
  1333. vpslld $16,%ymm9,%ymm15
  1334. # qhasm: 8x v01 = x5 unsigned>> 16
  1335. # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8
  1336. # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7
  1337. vpsrld $16,%ymm7,%ymm7
  1338. # qhasm: v11 = x7 & mask3
  1339. # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
  1340. # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
  1341. vpand %ymm9,%ymm3,%ymm9
  1342. # qhasm: x5 = v00 | v10
  1343. # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
  1344. # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
  1345. vpor %ymm8,%ymm15,%ymm8
  1346. # qhasm: x7 = v01 | v11
  1347. # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
  1348. # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
  1349. vpor %ymm7,%ymm9,%ymm7
  1350. # qhasm: v00 = x0 & mask4
  1351. # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
  1352. # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
  1353. vpand %ymm13,%ymm4,%ymm9
  1354. # qhasm: 16x v10 = x1 << 8
  1355. # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16
  1356. # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15
  1357. vpsllw $8,%ymm14,%ymm15
  1358. # qhasm: 16x v01 = x0 unsigned>> 8
  1359. # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14
  1360. # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13
  1361. vpsrlw $8,%ymm13,%ymm13
  1362. # qhasm: v11 = x1 & mask5
  1363. # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
  1364. # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
  1365. vpand %ymm14,%ymm5,%ymm14
  1366. # qhasm: x0 = v00 | v10
  1367. # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
  1368. # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
  1369. vpor %ymm9,%ymm15,%ymm9
  1370. # qhasm: x1 = v01 | v11
  1371. # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
  1372. # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
  1373. vpor %ymm13,%ymm14,%ymm13
  1374. # qhasm: v00 = x2 & mask4
  1375. # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
  1376. # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
  1377. vpand %ymm11,%ymm4,%ymm14
  1378. # qhasm: 16x v10 = x3 << 8
  1379. # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16
  1380. # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15
  1381. vpsllw $8,%ymm10,%ymm15
  1382. # qhasm: 16x v01 = x2 unsigned>> 8
  1383. # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12
  1384. # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11
  1385. vpsrlw $8,%ymm11,%ymm11
  1386. # qhasm: v11 = x3 & mask5
  1387. # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
  1388. # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
  1389. vpand %ymm10,%ymm5,%ymm10
  1390. # qhasm: x2 = v00 | v10
  1391. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
  1392. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
  1393. vpor %ymm14,%ymm15,%ymm14
  1394. # qhasm: x3 = v01 | v11
  1395. # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
  1396. # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
  1397. vpor %ymm11,%ymm10,%ymm10
  1398. # qhasm: v00 = x4 & mask4
  1399. # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
  1400. # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
  1401. vpand %ymm12,%ymm4,%ymm11
  1402. # qhasm: 16x v10 = x5 << 8
  1403. # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16
  1404. # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15
  1405. vpsllw $8,%ymm8,%ymm15
  1406. # qhasm: 16x v01 = x4 unsigned>> 8
  1407. # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13
  1408. # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12
  1409. vpsrlw $8,%ymm12,%ymm12
  1410. # qhasm: v11 = x5 & mask5
  1411. # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
  1412. # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
  1413. vpand %ymm8,%ymm5,%ymm8
  1414. # qhasm: x4 = v00 | v10
  1415. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
  1416. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
  1417. vpor %ymm11,%ymm15,%ymm11
  1418. # qhasm: x5 = v01 | v11
  1419. # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
  1420. # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
  1421. vpor %ymm12,%ymm8,%ymm8
  1422. # qhasm: v00 = x6 & mask4
  1423. # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
  1424. # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
  1425. vpand %ymm6,%ymm4,%ymm12
  1426. # qhasm: 16x v10 = x7 << 8
  1427. # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16
  1428. # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15
  1429. vpsllw $8,%ymm7,%ymm15
  1430. # qhasm: 16x v01 = x6 unsigned>> 8
  1431. # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7
  1432. # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6
  1433. vpsrlw $8,%ymm6,%ymm6
  1434. # qhasm: v11 = x7 & mask5
  1435. # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
  1436. # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
  1437. vpand %ymm7,%ymm5,%ymm7
  1438. # qhasm: x6 = v00 | v10
  1439. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
  1440. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
  1441. vpor %ymm12,%ymm15,%ymm12
  1442. # qhasm: x7 = v01 | v11
  1443. # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
  1444. # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
  1445. vpor %ymm6,%ymm7,%ymm6
  1446. # qhasm: mem256[ input_0 + 96 ] = x0
  1447. # asm 1: vmovupd <x0=reg256#10,96(<input_0=int64#1)
  1448. # asm 2: vmovupd <x0=%ymm9,96(<input_0=%rdi)
  1449. vmovupd %ymm9,96(%rdi)
  1450. # qhasm: mem256[ input_0 + 352 ] = x1
  1451. # asm 1: vmovupd <x1=reg256#14,352(<input_0=int64#1)
  1452. # asm 2: vmovupd <x1=%ymm13,352(<input_0=%rdi)
  1453. vmovupd %ymm13,352(%rdi)
  1454. # qhasm: mem256[ input_0 + 608 ] = x2
  1455. # asm 1: vmovupd <x2=reg256#15,608(<input_0=int64#1)
  1456. # asm 2: vmovupd <x2=%ymm14,608(<input_0=%rdi)
  1457. vmovupd %ymm14,608(%rdi)
  1458. # qhasm: mem256[ input_0 + 864 ] = x3
  1459. # asm 1: vmovupd <x3=reg256#11,864(<input_0=int64#1)
  1460. # asm 2: vmovupd <x3=%ymm10,864(<input_0=%rdi)
  1461. vmovupd %ymm10,864(%rdi)
  1462. # qhasm: mem256[ input_0 + 1120 ] = x4
  1463. # asm 1: vmovupd <x4=reg256#12,1120(<input_0=int64#1)
  1464. # asm 2: vmovupd <x4=%ymm11,1120(<input_0=%rdi)
  1465. vmovupd %ymm11,1120(%rdi)
  1466. # qhasm: mem256[ input_0 + 1376 ] = x5
  1467. # asm 1: vmovupd <x5=reg256#9,1376(<input_0=int64#1)
  1468. # asm 2: vmovupd <x5=%ymm8,1376(<input_0=%rdi)
  1469. vmovupd %ymm8,1376(%rdi)
  1470. # qhasm: mem256[ input_0 + 1632 ] = x6
  1471. # asm 1: vmovupd <x6=reg256#13,1632(<input_0=int64#1)
  1472. # asm 2: vmovupd <x6=%ymm12,1632(<input_0=%rdi)
  1473. vmovupd %ymm12,1632(%rdi)
  1474. # qhasm: mem256[ input_0 + 1888 ] = x7
  1475. # asm 1: vmovupd <x7=reg256#7,1888(<input_0=int64#1)
  1476. # asm 2: vmovupd <x7=%ymm6,1888(<input_0=%rdi)
  1477. vmovupd %ymm6,1888(%rdi)
  1478. # qhasm: x0 = mem256[ input_0 + 128 ]
  1479. # asm 1: vmovupd 128(<input_0=int64#1),>x0=reg256#7
  1480. # asm 2: vmovupd 128(<input_0=%rdi),>x0=%ymm6
  1481. vmovupd 128(%rdi),%ymm6
  1482. # qhasm: x1 = mem256[ input_0 + 384 ]
  1483. # asm 1: vmovupd 384(<input_0=int64#1),>x1=reg256#8
  1484. # asm 2: vmovupd 384(<input_0=%rdi),>x1=%ymm7
  1485. vmovupd 384(%rdi),%ymm7
  1486. # qhasm: x2 = mem256[ input_0 + 640 ]
  1487. # asm 1: vmovupd 640(<input_0=int64#1),>x2=reg256#9
  1488. # asm 2: vmovupd 640(<input_0=%rdi),>x2=%ymm8
  1489. vmovupd 640(%rdi),%ymm8
  1490. # qhasm: x3 = mem256[ input_0 + 896 ]
  1491. # asm 1: vmovupd 896(<input_0=int64#1),>x3=reg256#10
  1492. # asm 2: vmovupd 896(<input_0=%rdi),>x3=%ymm9
  1493. vmovupd 896(%rdi),%ymm9
  1494. # qhasm: x4 = mem256[ input_0 + 1152 ]
  1495. # asm 1: vmovupd 1152(<input_0=int64#1),>x4=reg256#11
  1496. # asm 2: vmovupd 1152(<input_0=%rdi),>x4=%ymm10
  1497. vmovupd 1152(%rdi),%ymm10
  1498. # qhasm: x5 = mem256[ input_0 + 1408 ]
  1499. # asm 1: vmovupd 1408(<input_0=int64#1),>x5=reg256#12
  1500. # asm 2: vmovupd 1408(<input_0=%rdi),>x5=%ymm11
  1501. vmovupd 1408(%rdi),%ymm11
  1502. # qhasm: x6 = mem256[ input_0 + 1664 ]
  1503. # asm 1: vmovupd 1664(<input_0=int64#1),>x6=reg256#13
  1504. # asm 2: vmovupd 1664(<input_0=%rdi),>x6=%ymm12
  1505. vmovupd 1664(%rdi),%ymm12
  1506. # qhasm: x7 = mem256[ input_0 + 1920 ]
  1507. # asm 1: vmovupd 1920(<input_0=int64#1),>x7=reg256#14
  1508. # asm 2: vmovupd 1920(<input_0=%rdi),>x7=%ymm13
  1509. vmovupd 1920(%rdi),%ymm13
  1510. # qhasm: v00 = x0 & mask0
  1511. # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
  1512. # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
  1513. vpand %ymm6,%ymm0,%ymm14
  1514. # qhasm: 4x v10 = x4 << 32
  1515. # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16
  1516. # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15
  1517. vpsllq $32,%ymm10,%ymm15
  1518. # qhasm: 4x v01 = x0 unsigned>> 32
  1519. # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7
  1520. # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6
  1521. vpsrlq $32,%ymm6,%ymm6
  1522. # qhasm: v11 = x4 & mask1
  1523. # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
  1524. # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
  1525. vpand %ymm10,%ymm1,%ymm10
  1526. # qhasm: x0 = v00 | v10
  1527. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
  1528. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
  1529. vpor %ymm14,%ymm15,%ymm14
  1530. # qhasm: x4 = v01 | v11
  1531. # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
  1532. # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
  1533. vpor %ymm6,%ymm10,%ymm6
  1534. # qhasm: v00 = x1 & mask0
  1535. # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
  1536. # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
  1537. vpand %ymm7,%ymm0,%ymm10
  1538. # qhasm: 4x v10 = x5 << 32
  1539. # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16
  1540. # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15
  1541. vpsllq $32,%ymm11,%ymm15
  1542. # qhasm: 4x v01 = x1 unsigned>> 32
  1543. # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8
  1544. # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7
  1545. vpsrlq $32,%ymm7,%ymm7
  1546. # qhasm: v11 = x5 & mask1
  1547. # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
  1548. # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
  1549. vpand %ymm11,%ymm1,%ymm11
  1550. # qhasm: x1 = v00 | v10
  1551. # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
  1552. # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
  1553. vpor %ymm10,%ymm15,%ymm10
  1554. # qhasm: x5 = v01 | v11
  1555. # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
  1556. # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
  1557. vpor %ymm7,%ymm11,%ymm7
  1558. # qhasm: v00 = x2 & mask0
  1559. # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
  1560. # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
  1561. vpand %ymm8,%ymm0,%ymm11
  1562. # qhasm: 4x v10 = x6 << 32
  1563. # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16
  1564. # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15
  1565. vpsllq $32,%ymm12,%ymm15
  1566. # qhasm: 4x v01 = x2 unsigned>> 32
  1567. # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9
  1568. # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8
  1569. vpsrlq $32,%ymm8,%ymm8
  1570. # qhasm: v11 = x6 & mask1
  1571. # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
  1572. # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
  1573. vpand %ymm12,%ymm1,%ymm12
  1574. # qhasm: x2 = v00 | v10
  1575. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
  1576. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
  1577. vpor %ymm11,%ymm15,%ymm11
  1578. # qhasm: x6 = v01 | v11
  1579. # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
  1580. # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
  1581. vpor %ymm8,%ymm12,%ymm8
  1582. # qhasm: v00 = x3 & mask0
  1583. # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
  1584. # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
  1585. vpand %ymm9,%ymm0,%ymm12
  1586. # qhasm: 4x v10 = x7 << 32
  1587. # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16
  1588. # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15
  1589. vpsllq $32,%ymm13,%ymm15
  1590. # qhasm: 4x v01 = x3 unsigned>> 32
  1591. # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10
  1592. # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9
  1593. vpsrlq $32,%ymm9,%ymm9
  1594. # qhasm: v11 = x7 & mask1
  1595. # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
  1596. # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
  1597. vpand %ymm13,%ymm1,%ymm13
  1598. # qhasm: x3 = v00 | v10
  1599. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
  1600. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
  1601. vpor %ymm12,%ymm15,%ymm12
  1602. # qhasm: x7 = v01 | v11
  1603. # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
  1604. # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
  1605. vpor %ymm9,%ymm13,%ymm9
  1606. # qhasm: v00 = x0 & mask2
  1607. # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
  1608. # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
  1609. vpand %ymm14,%ymm2,%ymm13
  1610. # qhasm: 8x v10 = x2 << 16
  1611. # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16
  1612. # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15
  1613. vpslld $16,%ymm11,%ymm15
  1614. # qhasm: 8x v01 = x0 unsigned>> 16
  1615. # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15
  1616. # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14
  1617. vpsrld $16,%ymm14,%ymm14
  1618. # qhasm: v11 = x2 & mask3
  1619. # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
  1620. # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
  1621. vpand %ymm11,%ymm3,%ymm11
  1622. # qhasm: x0 = v00 | v10
  1623. # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
  1624. # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
  1625. vpor %ymm13,%ymm15,%ymm13
  1626. # qhasm: x2 = v01 | v11
  1627. # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
  1628. # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
  1629. vpor %ymm14,%ymm11,%ymm11
  1630. # qhasm: v00 = x1 & mask2
  1631. # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
  1632. # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
  1633. vpand %ymm10,%ymm2,%ymm14
  1634. # qhasm: 8x v10 = x3 << 16
  1635. # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16
  1636. # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15
  1637. vpslld $16,%ymm12,%ymm15
  1638. # qhasm: 8x v01 = x1 unsigned>> 16
  1639. # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11
  1640. # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10
  1641. vpsrld $16,%ymm10,%ymm10
  1642. # qhasm: v11 = x3 & mask3
  1643. # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
  1644. # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
  1645. vpand %ymm12,%ymm3,%ymm12
  1646. # qhasm: x1 = v00 | v10
  1647. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
  1648. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
  1649. vpor %ymm14,%ymm15,%ymm14
  1650. # qhasm: x3 = v01 | v11
  1651. # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
  1652. # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
  1653. vpor %ymm10,%ymm12,%ymm10
  1654. # qhasm: v00 = x4 & mask2
  1655. # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
  1656. # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
  1657. vpand %ymm6,%ymm2,%ymm12
  1658. # qhasm: 8x v10 = x6 << 16
  1659. # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16
  1660. # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15
  1661. vpslld $16,%ymm8,%ymm15
  1662. # qhasm: 8x v01 = x4 unsigned>> 16
  1663. # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7
  1664. # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6
  1665. vpsrld $16,%ymm6,%ymm6
  1666. # qhasm: v11 = x6 & mask3
  1667. # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
  1668. # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
  1669. vpand %ymm8,%ymm3,%ymm8
  1670. # qhasm: x4 = v00 | v10
  1671. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
  1672. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
  1673. vpor %ymm12,%ymm15,%ymm12
  1674. # qhasm: x6 = v01 | v11
  1675. # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
  1676. # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
  1677. vpor %ymm6,%ymm8,%ymm6
  1678. # qhasm: v00 = x5 & mask2
  1679. # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
  1680. # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
  1681. vpand %ymm7,%ymm2,%ymm8
  1682. # qhasm: 8x v10 = x7 << 16
  1683. # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16
  1684. # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15
  1685. vpslld $16,%ymm9,%ymm15
  1686. # qhasm: 8x v01 = x5 unsigned>> 16
  1687. # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8
  1688. # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7
  1689. vpsrld $16,%ymm7,%ymm7
  1690. # qhasm: v11 = x7 & mask3
  1691. # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
  1692. # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
  1693. vpand %ymm9,%ymm3,%ymm9
  1694. # qhasm: x5 = v00 | v10
  1695. # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
  1696. # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
  1697. vpor %ymm8,%ymm15,%ymm8
  1698. # qhasm: x7 = v01 | v11
  1699. # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
  1700. # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
  1701. vpor %ymm7,%ymm9,%ymm7
  1702. # qhasm: v00 = x0 & mask4
  1703. # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
  1704. # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
  1705. vpand %ymm13,%ymm4,%ymm9
  1706. # qhasm: 16x v10 = x1 << 8
  1707. # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16
  1708. # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15
  1709. vpsllw $8,%ymm14,%ymm15
  1710. # qhasm: 16x v01 = x0 unsigned>> 8
  1711. # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14
  1712. # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13
  1713. vpsrlw $8,%ymm13,%ymm13
  1714. # qhasm: v11 = x1 & mask5
  1715. # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
  1716. # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
  1717. vpand %ymm14,%ymm5,%ymm14
  1718. # qhasm: x0 = v00 | v10
  1719. # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
  1720. # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
  1721. vpor %ymm9,%ymm15,%ymm9
  1722. # qhasm: x1 = v01 | v11
  1723. # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
  1724. # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
  1725. vpor %ymm13,%ymm14,%ymm13
  1726. # qhasm: v00 = x2 & mask4
  1727. # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
  1728. # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
  1729. vpand %ymm11,%ymm4,%ymm14
  1730. # qhasm: 16x v10 = x3 << 8
  1731. # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16
  1732. # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15
  1733. vpsllw $8,%ymm10,%ymm15
  1734. # qhasm: 16x v01 = x2 unsigned>> 8
  1735. # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12
  1736. # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11
  1737. vpsrlw $8,%ymm11,%ymm11
  1738. # qhasm: v11 = x3 & mask5
  1739. # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
  1740. # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
  1741. vpand %ymm10,%ymm5,%ymm10
  1742. # qhasm: x2 = v00 | v10
  1743. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
  1744. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
  1745. vpor %ymm14,%ymm15,%ymm14
  1746. # qhasm: x3 = v01 | v11
  1747. # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
  1748. # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
  1749. vpor %ymm11,%ymm10,%ymm10
  1750. # qhasm: v00 = x4 & mask4
  1751. # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
  1752. # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
  1753. vpand %ymm12,%ymm4,%ymm11
  1754. # qhasm: 16x v10 = x5 << 8
  1755. # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16
  1756. # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15
  1757. vpsllw $8,%ymm8,%ymm15
  1758. # qhasm: 16x v01 = x4 unsigned>> 8
  1759. # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13
  1760. # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12
  1761. vpsrlw $8,%ymm12,%ymm12
  1762. # qhasm: v11 = x5 & mask5
  1763. # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
  1764. # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
  1765. vpand %ymm8,%ymm5,%ymm8
  1766. # qhasm: x4 = v00 | v10
  1767. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
  1768. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
  1769. vpor %ymm11,%ymm15,%ymm11
  1770. # qhasm: x5 = v01 | v11
  1771. # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
  1772. # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
  1773. vpor %ymm12,%ymm8,%ymm8
  1774. # qhasm: v00 = x6 & mask4
  1775. # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
  1776. # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
  1777. vpand %ymm6,%ymm4,%ymm12
  1778. # qhasm: 16x v10 = x7 << 8
  1779. # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16
  1780. # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15
  1781. vpsllw $8,%ymm7,%ymm15
  1782. # qhasm: 16x v01 = x6 unsigned>> 8
  1783. # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7
  1784. # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6
  1785. vpsrlw $8,%ymm6,%ymm6
  1786. # qhasm: v11 = x7 & mask5
  1787. # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
  1788. # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
  1789. vpand %ymm7,%ymm5,%ymm7
  1790. # qhasm: x6 = v00 | v10
  1791. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
  1792. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
  1793. vpor %ymm12,%ymm15,%ymm12
  1794. # qhasm: x7 = v01 | v11
  1795. # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
  1796. # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
  1797. vpor %ymm6,%ymm7,%ymm6
  1798. # qhasm: mem256[ input_0 + 128 ] = x0
  1799. # asm 1: vmovupd <x0=reg256#10,128(<input_0=int64#1)
  1800. # asm 2: vmovupd <x0=%ymm9,128(<input_0=%rdi)
  1801. vmovupd %ymm9,128(%rdi)
  1802. # qhasm: mem256[ input_0 + 384 ] = x1
  1803. # asm 1: vmovupd <x1=reg256#14,384(<input_0=int64#1)
  1804. # asm 2: vmovupd <x1=%ymm13,384(<input_0=%rdi)
  1805. vmovupd %ymm13,384(%rdi)
  1806. # qhasm: mem256[ input_0 + 640 ] = x2
  1807. # asm 1: vmovupd <x2=reg256#15,640(<input_0=int64#1)
  1808. # asm 2: vmovupd <x2=%ymm14,640(<input_0=%rdi)
  1809. vmovupd %ymm14,640(%rdi)
  1810. # qhasm: mem256[ input_0 + 896 ] = x3
  1811. # asm 1: vmovupd <x3=reg256#11,896(<input_0=int64#1)
  1812. # asm 2: vmovupd <x3=%ymm10,896(<input_0=%rdi)
  1813. vmovupd %ymm10,896(%rdi)
  1814. # qhasm: mem256[ input_0 + 1152 ] = x4
  1815. # asm 1: vmovupd <x4=reg256#12,1152(<input_0=int64#1)
  1816. # asm 2: vmovupd <x4=%ymm11,1152(<input_0=%rdi)
  1817. vmovupd %ymm11,1152(%rdi)
  1818. # qhasm: mem256[ input_0 + 1408 ] = x5
  1819. # asm 1: vmovupd <x5=reg256#9,1408(<input_0=int64#1)
  1820. # asm 2: vmovupd <x5=%ymm8,1408(<input_0=%rdi)
  1821. vmovupd %ymm8,1408(%rdi)
  1822. # qhasm: mem256[ input_0 + 1664 ] = x6
  1823. # asm 1: vmovupd <x6=reg256#13,1664(<input_0=int64#1)
  1824. # asm 2: vmovupd <x6=%ymm12,1664(<input_0=%rdi)
  1825. vmovupd %ymm12,1664(%rdi)
  1826. # qhasm: mem256[ input_0 + 1920 ] = x7
  1827. # asm 1: vmovupd <x7=reg256#7,1920(<input_0=int64#1)
  1828. # asm 2: vmovupd <x7=%ymm6,1920(<input_0=%rdi)
  1829. vmovupd %ymm6,1920(%rdi)
  1830. # qhasm: x0 = mem256[ input_0 + 160 ]
  1831. # asm 1: vmovupd 160(<input_0=int64#1),>x0=reg256#7
  1832. # asm 2: vmovupd 160(<input_0=%rdi),>x0=%ymm6
  1833. vmovupd 160(%rdi),%ymm6
  1834. # qhasm: x1 = mem256[ input_0 + 416 ]
  1835. # asm 1: vmovupd 416(<input_0=int64#1),>x1=reg256#8
  1836. # asm 2: vmovupd 416(<input_0=%rdi),>x1=%ymm7
  1837. vmovupd 416(%rdi),%ymm7
  1838. # qhasm: x2 = mem256[ input_0 + 672 ]
  1839. # asm 1: vmovupd 672(<input_0=int64#1),>x2=reg256#9
  1840. # asm 2: vmovupd 672(<input_0=%rdi),>x2=%ymm8
  1841. vmovupd 672(%rdi),%ymm8
  1842. # qhasm: x3 = mem256[ input_0 + 928 ]
  1843. # asm 1: vmovupd 928(<input_0=int64#1),>x3=reg256#10
  1844. # asm 2: vmovupd 928(<input_0=%rdi),>x3=%ymm9
  1845. vmovupd 928(%rdi),%ymm9
  1846. # qhasm: x4 = mem256[ input_0 + 1184 ]
  1847. # asm 1: vmovupd 1184(<input_0=int64#1),>x4=reg256#11
  1848. # asm 2: vmovupd 1184(<input_0=%rdi),>x4=%ymm10
  1849. vmovupd 1184(%rdi),%ymm10
  1850. # qhasm: x5 = mem256[ input_0 + 1440 ]
  1851. # asm 1: vmovupd 1440(<input_0=int64#1),>x5=reg256#12
  1852. # asm 2: vmovupd 1440(<input_0=%rdi),>x5=%ymm11
  1853. vmovupd 1440(%rdi),%ymm11
  1854. # qhasm: x6 = mem256[ input_0 + 1696 ]
  1855. # asm 1: vmovupd 1696(<input_0=int64#1),>x6=reg256#13
  1856. # asm 2: vmovupd 1696(<input_0=%rdi),>x6=%ymm12
  1857. vmovupd 1696(%rdi),%ymm12
  1858. # qhasm: x7 = mem256[ input_0 + 1952 ]
  1859. # asm 1: vmovupd 1952(<input_0=int64#1),>x7=reg256#14
  1860. # asm 2: vmovupd 1952(<input_0=%rdi),>x7=%ymm13
  1861. vmovupd 1952(%rdi),%ymm13
  1862. # qhasm: v00 = x0 & mask0
  1863. # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
  1864. # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
  1865. vpand %ymm6,%ymm0,%ymm14
  1866. # qhasm: 4x v10 = x4 << 32
  1867. # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16
  1868. # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15
  1869. vpsllq $32,%ymm10,%ymm15
  1870. # qhasm: 4x v01 = x0 unsigned>> 32
  1871. # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7
  1872. # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6
  1873. vpsrlq $32,%ymm6,%ymm6
  1874. # qhasm: v11 = x4 & mask1
  1875. # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
  1876. # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
  1877. vpand %ymm10,%ymm1,%ymm10
  1878. # qhasm: x0 = v00 | v10
  1879. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
  1880. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
  1881. vpor %ymm14,%ymm15,%ymm14
  1882. # qhasm: x4 = v01 | v11
  1883. # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
  1884. # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
  1885. vpor %ymm6,%ymm10,%ymm6
  1886. # qhasm: v00 = x1 & mask0
  1887. # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
  1888. # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
  1889. vpand %ymm7,%ymm0,%ymm10
  1890. # qhasm: 4x v10 = x5 << 32
  1891. # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16
  1892. # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15
  1893. vpsllq $32,%ymm11,%ymm15
  1894. # qhasm: 4x v01 = x1 unsigned>> 32
  1895. # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8
  1896. # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7
  1897. vpsrlq $32,%ymm7,%ymm7
  1898. # qhasm: v11 = x5 & mask1
  1899. # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
  1900. # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
  1901. vpand %ymm11,%ymm1,%ymm11
  1902. # qhasm: x1 = v00 | v10
  1903. # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
  1904. # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
  1905. vpor %ymm10,%ymm15,%ymm10
  1906. # qhasm: x5 = v01 | v11
  1907. # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
  1908. # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
  1909. vpor %ymm7,%ymm11,%ymm7
  1910. # qhasm: v00 = x2 & mask0
  1911. # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
  1912. # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
  1913. vpand %ymm8,%ymm0,%ymm11
  1914. # qhasm: 4x v10 = x6 << 32
  1915. # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16
  1916. # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15
  1917. vpsllq $32,%ymm12,%ymm15
  1918. # qhasm: 4x v01 = x2 unsigned>> 32
  1919. # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9
  1920. # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8
  1921. vpsrlq $32,%ymm8,%ymm8
  1922. # qhasm: v11 = x6 & mask1
  1923. # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
  1924. # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
  1925. vpand %ymm12,%ymm1,%ymm12
  1926. # qhasm: x2 = v00 | v10
  1927. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
  1928. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
  1929. vpor %ymm11,%ymm15,%ymm11
  1930. # qhasm: x6 = v01 | v11
  1931. # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
  1932. # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
  1933. vpor %ymm8,%ymm12,%ymm8
  1934. # qhasm: v00 = x3 & mask0
  1935. # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
  1936. # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
  1937. vpand %ymm9,%ymm0,%ymm12
  1938. # qhasm: 4x v10 = x7 << 32
  1939. # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16
  1940. # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15
  1941. vpsllq $32,%ymm13,%ymm15
  1942. # qhasm: 4x v01 = x3 unsigned>> 32
  1943. # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10
  1944. # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9
  1945. vpsrlq $32,%ymm9,%ymm9
  1946. # qhasm: v11 = x7 & mask1
  1947. # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
  1948. # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
  1949. vpand %ymm13,%ymm1,%ymm13
  1950. # qhasm: x3 = v00 | v10
  1951. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
  1952. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
  1953. vpor %ymm12,%ymm15,%ymm12
  1954. # qhasm: x7 = v01 | v11
  1955. # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
  1956. # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
  1957. vpor %ymm9,%ymm13,%ymm9
  1958. # qhasm: v00 = x0 & mask2
  1959. # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
  1960. # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
  1961. vpand %ymm14,%ymm2,%ymm13
  1962. # qhasm: 8x v10 = x2 << 16
  1963. # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16
  1964. # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15
  1965. vpslld $16,%ymm11,%ymm15
  1966. # qhasm: 8x v01 = x0 unsigned>> 16
  1967. # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15
  1968. # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14
  1969. vpsrld $16,%ymm14,%ymm14
  1970. # qhasm: v11 = x2 & mask3
  1971. # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
  1972. # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
  1973. vpand %ymm11,%ymm3,%ymm11
  1974. # qhasm: x0 = v00 | v10
  1975. # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
  1976. # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
  1977. vpor %ymm13,%ymm15,%ymm13
  1978. # qhasm: x2 = v01 | v11
  1979. # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
  1980. # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
  1981. vpor %ymm14,%ymm11,%ymm11
  1982. # qhasm: v00 = x1 & mask2
  1983. # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
  1984. # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
  1985. vpand %ymm10,%ymm2,%ymm14
  1986. # qhasm: 8x v10 = x3 << 16
  1987. # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16
  1988. # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15
  1989. vpslld $16,%ymm12,%ymm15
  1990. # qhasm: 8x v01 = x1 unsigned>> 16
  1991. # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11
  1992. # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10
  1993. vpsrld $16,%ymm10,%ymm10
  1994. # qhasm: v11 = x3 & mask3
  1995. # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
  1996. # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
  1997. vpand %ymm12,%ymm3,%ymm12
  1998. # qhasm: x1 = v00 | v10
  1999. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
  2000. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
  2001. vpor %ymm14,%ymm15,%ymm14
  2002. # qhasm: x3 = v01 | v11
  2003. # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
  2004. # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
  2005. vpor %ymm10,%ymm12,%ymm10
  2006. # qhasm: v00 = x4 & mask2
  2007. # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
  2008. # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
  2009. vpand %ymm6,%ymm2,%ymm12
  2010. # qhasm: 8x v10 = x6 << 16
  2011. # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16
  2012. # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15
  2013. vpslld $16,%ymm8,%ymm15
  2014. # qhasm: 8x v01 = x4 unsigned>> 16
  2015. # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7
  2016. # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6
  2017. vpsrld $16,%ymm6,%ymm6
  2018. # qhasm: v11 = x6 & mask3
  2019. # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
  2020. # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
  2021. vpand %ymm8,%ymm3,%ymm8
  2022. # qhasm: x4 = v00 | v10
  2023. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
  2024. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
  2025. vpor %ymm12,%ymm15,%ymm12
  2026. # qhasm: x6 = v01 | v11
  2027. # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
  2028. # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
  2029. vpor %ymm6,%ymm8,%ymm6
  2030. # qhasm: v00 = x5 & mask2
  2031. # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
  2032. # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
  2033. vpand %ymm7,%ymm2,%ymm8
  2034. # qhasm: 8x v10 = x7 << 16
  2035. # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16
  2036. # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15
  2037. vpslld $16,%ymm9,%ymm15
  2038. # qhasm: 8x v01 = x5 unsigned>> 16
  2039. # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8
  2040. # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7
  2041. vpsrld $16,%ymm7,%ymm7
  2042. # qhasm: v11 = x7 & mask3
  2043. # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
  2044. # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
  2045. vpand %ymm9,%ymm3,%ymm9
  2046. # qhasm: x5 = v00 | v10
  2047. # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
  2048. # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
  2049. vpor %ymm8,%ymm15,%ymm8
  2050. # qhasm: x7 = v01 | v11
  2051. # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
  2052. # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
  2053. vpor %ymm7,%ymm9,%ymm7
  2054. # qhasm: v00 = x0 & mask4
  2055. # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
  2056. # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
  2057. vpand %ymm13,%ymm4,%ymm9
  2058. # qhasm: 16x v10 = x1 << 8
  2059. # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16
  2060. # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15
  2061. vpsllw $8,%ymm14,%ymm15
  2062. # qhasm: 16x v01 = x0 unsigned>> 8
  2063. # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14
  2064. # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13
  2065. vpsrlw $8,%ymm13,%ymm13
  2066. # qhasm: v11 = x1 & mask5
  2067. # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
  2068. # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
  2069. vpand %ymm14,%ymm5,%ymm14
  2070. # qhasm: x0 = v00 | v10
  2071. # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
  2072. # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
  2073. vpor %ymm9,%ymm15,%ymm9
  2074. # qhasm: x1 = v01 | v11
  2075. # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
  2076. # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
  2077. vpor %ymm13,%ymm14,%ymm13
  2078. # qhasm: v00 = x2 & mask4
  2079. # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
  2080. # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
  2081. vpand %ymm11,%ymm4,%ymm14
  2082. # qhasm: 16x v10 = x3 << 8
  2083. # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16
  2084. # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15
  2085. vpsllw $8,%ymm10,%ymm15
  2086. # qhasm: 16x v01 = x2 unsigned>> 8
  2087. # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12
  2088. # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11
  2089. vpsrlw $8,%ymm11,%ymm11
  2090. # qhasm: v11 = x3 & mask5
  2091. # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
  2092. # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
  2093. vpand %ymm10,%ymm5,%ymm10
  2094. # qhasm: x2 = v00 | v10
  2095. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
  2096. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
  2097. vpor %ymm14,%ymm15,%ymm14
  2098. # qhasm: x3 = v01 | v11
  2099. # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
  2100. # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
  2101. vpor %ymm11,%ymm10,%ymm10
  2102. # qhasm: v00 = x4 & mask4
  2103. # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
  2104. # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
  2105. vpand %ymm12,%ymm4,%ymm11
  2106. # qhasm: 16x v10 = x5 << 8
  2107. # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16
  2108. # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15
  2109. vpsllw $8,%ymm8,%ymm15
  2110. # qhasm: 16x v01 = x4 unsigned>> 8
  2111. # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13
  2112. # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12
  2113. vpsrlw $8,%ymm12,%ymm12
  2114. # qhasm: v11 = x5 & mask5
  2115. # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
  2116. # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
  2117. vpand %ymm8,%ymm5,%ymm8
  2118. # qhasm: x4 = v00 | v10
  2119. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
  2120. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
  2121. vpor %ymm11,%ymm15,%ymm11
  2122. # qhasm: x5 = v01 | v11
  2123. # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
  2124. # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
  2125. vpor %ymm12,%ymm8,%ymm8
  2126. # qhasm: v00 = x6 & mask4
  2127. # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
  2128. # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
  2129. vpand %ymm6,%ymm4,%ymm12
  2130. # qhasm: 16x v10 = x7 << 8
  2131. # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16
  2132. # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15
  2133. vpsllw $8,%ymm7,%ymm15
  2134. # qhasm: 16x v01 = x6 unsigned>> 8
  2135. # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7
  2136. # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6
  2137. vpsrlw $8,%ymm6,%ymm6
  2138. # qhasm: v11 = x7 & mask5
  2139. # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
  2140. # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
  2141. vpand %ymm7,%ymm5,%ymm7
  2142. # qhasm: x6 = v00 | v10
  2143. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
  2144. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
  2145. vpor %ymm12,%ymm15,%ymm12
  2146. # qhasm: x7 = v01 | v11
  2147. # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
  2148. # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
  2149. vpor %ymm6,%ymm7,%ymm6
  2150. # qhasm: mem256[ input_0 + 160 ] = x0
  2151. # asm 1: vmovupd <x0=reg256#10,160(<input_0=int64#1)
  2152. # asm 2: vmovupd <x0=%ymm9,160(<input_0=%rdi)
  2153. vmovupd %ymm9,160(%rdi)
  2154. # qhasm: mem256[ input_0 + 416 ] = x1
  2155. # asm 1: vmovupd <x1=reg256#14,416(<input_0=int64#1)
  2156. # asm 2: vmovupd <x1=%ymm13,416(<input_0=%rdi)
  2157. vmovupd %ymm13,416(%rdi)
  2158. # qhasm: mem256[ input_0 + 672 ] = x2
  2159. # asm 1: vmovupd <x2=reg256#15,672(<input_0=int64#1)
  2160. # asm 2: vmovupd <x2=%ymm14,672(<input_0=%rdi)
  2161. vmovupd %ymm14,672(%rdi)
  2162. # qhasm: mem256[ input_0 + 928 ] = x3
  2163. # asm 1: vmovupd <x3=reg256#11,928(<input_0=int64#1)
  2164. # asm 2: vmovupd <x3=%ymm10,928(<input_0=%rdi)
  2165. vmovupd %ymm10,928(%rdi)
  2166. # qhasm: mem256[ input_0 + 1184 ] = x4
  2167. # asm 1: vmovupd <x4=reg256#12,1184(<input_0=int64#1)
  2168. # asm 2: vmovupd <x4=%ymm11,1184(<input_0=%rdi)
  2169. vmovupd %ymm11,1184(%rdi)
  2170. # qhasm: mem256[ input_0 + 1440 ] = x5
  2171. # asm 1: vmovupd <x5=reg256#9,1440(<input_0=int64#1)
  2172. # asm 2: vmovupd <x5=%ymm8,1440(<input_0=%rdi)
  2173. vmovupd %ymm8,1440(%rdi)
  2174. # qhasm: mem256[ input_0 + 1696 ] = x6
  2175. # asm 1: vmovupd <x6=reg256#13,1696(<input_0=int64#1)
  2176. # asm 2: vmovupd <x6=%ymm12,1696(<input_0=%rdi)
  2177. vmovupd %ymm12,1696(%rdi)
  2178. # qhasm: mem256[ input_0 + 1952 ] = x7
  2179. # asm 1: vmovupd <x7=reg256#7,1952(<input_0=int64#1)
  2180. # asm 2: vmovupd <x7=%ymm6,1952(<input_0=%rdi)
  2181. vmovupd %ymm6,1952(%rdi)
  2182. # qhasm: x0 = mem256[ input_0 + 192 ]
  2183. # asm 1: vmovupd 192(<input_0=int64#1),>x0=reg256#7
  2184. # asm 2: vmovupd 192(<input_0=%rdi),>x0=%ymm6
  2185. vmovupd 192(%rdi),%ymm6
  2186. # qhasm: x1 = mem256[ input_0 + 448 ]
  2187. # asm 1: vmovupd 448(<input_0=int64#1),>x1=reg256#8
  2188. # asm 2: vmovupd 448(<input_0=%rdi),>x1=%ymm7
  2189. vmovupd 448(%rdi),%ymm7
  2190. # qhasm: x2 = mem256[ input_0 + 704 ]
  2191. # asm 1: vmovupd 704(<input_0=int64#1),>x2=reg256#9
  2192. # asm 2: vmovupd 704(<input_0=%rdi),>x2=%ymm8
  2193. vmovupd 704(%rdi),%ymm8
  2194. # qhasm: x3 = mem256[ input_0 + 960 ]
  2195. # asm 1: vmovupd 960(<input_0=int64#1),>x3=reg256#10
  2196. # asm 2: vmovupd 960(<input_0=%rdi),>x3=%ymm9
  2197. vmovupd 960(%rdi),%ymm9
  2198. # qhasm: x4 = mem256[ input_0 + 1216 ]
  2199. # asm 1: vmovupd 1216(<input_0=int64#1),>x4=reg256#11
  2200. # asm 2: vmovupd 1216(<input_0=%rdi),>x4=%ymm10
  2201. vmovupd 1216(%rdi),%ymm10
  2202. # qhasm: x5 = mem256[ input_0 + 1472 ]
  2203. # asm 1: vmovupd 1472(<input_0=int64#1),>x5=reg256#12
  2204. # asm 2: vmovupd 1472(<input_0=%rdi),>x5=%ymm11
  2205. vmovupd 1472(%rdi),%ymm11
  2206. # qhasm: x6 = mem256[ input_0 + 1728 ]
  2207. # asm 1: vmovupd 1728(<input_0=int64#1),>x6=reg256#13
  2208. # asm 2: vmovupd 1728(<input_0=%rdi),>x6=%ymm12
  2209. vmovupd 1728(%rdi),%ymm12
  2210. # qhasm: x7 = mem256[ input_0 + 1984 ]
  2211. # asm 1: vmovupd 1984(<input_0=int64#1),>x7=reg256#14
  2212. # asm 2: vmovupd 1984(<input_0=%rdi),>x7=%ymm13
  2213. vmovupd 1984(%rdi),%ymm13
  2214. # qhasm: v00 = x0 & mask0
  2215. # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
  2216. # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
  2217. vpand %ymm6,%ymm0,%ymm14
  2218. # qhasm: 4x v10 = x4 << 32
  2219. # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16
  2220. # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15
  2221. vpsllq $32,%ymm10,%ymm15
  2222. # qhasm: 4x v01 = x0 unsigned>> 32
  2223. # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7
  2224. # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6
  2225. vpsrlq $32,%ymm6,%ymm6
  2226. # qhasm: v11 = x4 & mask1
  2227. # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
  2228. # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
  2229. vpand %ymm10,%ymm1,%ymm10
  2230. # qhasm: x0 = v00 | v10
  2231. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
  2232. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
  2233. vpor %ymm14,%ymm15,%ymm14
  2234. # qhasm: x4 = v01 | v11
  2235. # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
  2236. # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
  2237. vpor %ymm6,%ymm10,%ymm6
  2238. # qhasm: v00 = x1 & mask0
  2239. # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
  2240. # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
  2241. vpand %ymm7,%ymm0,%ymm10
  2242. # qhasm: 4x v10 = x5 << 32
  2243. # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16
  2244. # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15
  2245. vpsllq $32,%ymm11,%ymm15
  2246. # qhasm: 4x v01 = x1 unsigned>> 32
  2247. # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8
  2248. # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7
  2249. vpsrlq $32,%ymm7,%ymm7
  2250. # qhasm: v11 = x5 & mask1
  2251. # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
  2252. # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
  2253. vpand %ymm11,%ymm1,%ymm11
  2254. # qhasm: x1 = v00 | v10
  2255. # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
  2256. # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
  2257. vpor %ymm10,%ymm15,%ymm10
  2258. # qhasm: x5 = v01 | v11
  2259. # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
  2260. # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
  2261. vpor %ymm7,%ymm11,%ymm7
  2262. # qhasm: v00 = x2 & mask0
  2263. # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
  2264. # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
  2265. vpand %ymm8,%ymm0,%ymm11
  2266. # qhasm: 4x v10 = x6 << 32
  2267. # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16
  2268. # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15
  2269. vpsllq $32,%ymm12,%ymm15
  2270. # qhasm: 4x v01 = x2 unsigned>> 32
  2271. # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9
  2272. # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8
  2273. vpsrlq $32,%ymm8,%ymm8
  2274. # qhasm: v11 = x6 & mask1
  2275. # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
  2276. # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
  2277. vpand %ymm12,%ymm1,%ymm12
  2278. # qhasm: x2 = v00 | v10
  2279. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
  2280. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
  2281. vpor %ymm11,%ymm15,%ymm11
  2282. # qhasm: x6 = v01 | v11
  2283. # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
  2284. # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
  2285. vpor %ymm8,%ymm12,%ymm8
  2286. # qhasm: v00 = x3 & mask0
  2287. # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
  2288. # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
  2289. vpand %ymm9,%ymm0,%ymm12
  2290. # qhasm: 4x v10 = x7 << 32
  2291. # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16
  2292. # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15
  2293. vpsllq $32,%ymm13,%ymm15
  2294. # qhasm: 4x v01 = x3 unsigned>> 32
  2295. # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10
  2296. # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9
  2297. vpsrlq $32,%ymm9,%ymm9
  2298. # qhasm: v11 = x7 & mask1
  2299. # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
  2300. # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
  2301. vpand %ymm13,%ymm1,%ymm13
  2302. # qhasm: x3 = v00 | v10
  2303. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
  2304. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
  2305. vpor %ymm12,%ymm15,%ymm12
  2306. # qhasm: x7 = v01 | v11
  2307. # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
  2308. # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
  2309. vpor %ymm9,%ymm13,%ymm9
  2310. # qhasm: v00 = x0 & mask2
  2311. # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
  2312. # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
  2313. vpand %ymm14,%ymm2,%ymm13
  2314. # qhasm: 8x v10 = x2 << 16
  2315. # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16
  2316. # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15
  2317. vpslld $16,%ymm11,%ymm15
  2318. # qhasm: 8x v01 = x0 unsigned>> 16
  2319. # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15
  2320. # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14
  2321. vpsrld $16,%ymm14,%ymm14
  2322. # qhasm: v11 = x2 & mask3
  2323. # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
  2324. # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
  2325. vpand %ymm11,%ymm3,%ymm11
  2326. # qhasm: x0 = v00 | v10
  2327. # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
  2328. # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
  2329. vpor %ymm13,%ymm15,%ymm13
  2330. # qhasm: x2 = v01 | v11
  2331. # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
  2332. # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
  2333. vpor %ymm14,%ymm11,%ymm11
  2334. # qhasm: v00 = x1 & mask2
  2335. # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
  2336. # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
  2337. vpand %ymm10,%ymm2,%ymm14
  2338. # qhasm: 8x v10 = x3 << 16
  2339. # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16
  2340. # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15
  2341. vpslld $16,%ymm12,%ymm15
  2342. # qhasm: 8x v01 = x1 unsigned>> 16
  2343. # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11
  2344. # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10
  2345. vpsrld $16,%ymm10,%ymm10
  2346. # qhasm: v11 = x3 & mask3
  2347. # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
  2348. # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
  2349. vpand %ymm12,%ymm3,%ymm12
  2350. # qhasm: x1 = v00 | v10
  2351. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
  2352. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
  2353. vpor %ymm14,%ymm15,%ymm14
  2354. # qhasm: x3 = v01 | v11
  2355. # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
  2356. # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
  2357. vpor %ymm10,%ymm12,%ymm10
  2358. # qhasm: v00 = x4 & mask2
  2359. # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
  2360. # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
  2361. vpand %ymm6,%ymm2,%ymm12
  2362. # qhasm: 8x v10 = x6 << 16
  2363. # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16
  2364. # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15
  2365. vpslld $16,%ymm8,%ymm15
  2366. # qhasm: 8x v01 = x4 unsigned>> 16
  2367. # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7
  2368. # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6
  2369. vpsrld $16,%ymm6,%ymm6
  2370. # qhasm: v11 = x6 & mask3
  2371. # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
  2372. # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
  2373. vpand %ymm8,%ymm3,%ymm8
  2374. # qhasm: x4 = v00 | v10
  2375. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
  2376. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
  2377. vpor %ymm12,%ymm15,%ymm12
  2378. # qhasm: x6 = v01 | v11
  2379. # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
  2380. # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
  2381. vpor %ymm6,%ymm8,%ymm6
  2382. # qhasm: v00 = x5 & mask2
  2383. # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
  2384. # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
  2385. vpand %ymm7,%ymm2,%ymm8
  2386. # qhasm: 8x v10 = x7 << 16
  2387. # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16
  2388. # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15
  2389. vpslld $16,%ymm9,%ymm15
  2390. # qhasm: 8x v01 = x5 unsigned>> 16
  2391. # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8
  2392. # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7
  2393. vpsrld $16,%ymm7,%ymm7
  2394. # qhasm: v11 = x7 & mask3
  2395. # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
  2396. # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
  2397. vpand %ymm9,%ymm3,%ymm9
  2398. # qhasm: x5 = v00 | v10
  2399. # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
  2400. # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
  2401. vpor %ymm8,%ymm15,%ymm8
  2402. # qhasm: x7 = v01 | v11
  2403. # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
  2404. # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
  2405. vpor %ymm7,%ymm9,%ymm7
  2406. # qhasm: v00 = x0 & mask4
  2407. # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
  2408. # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
  2409. vpand %ymm13,%ymm4,%ymm9
  2410. # qhasm: 16x v10 = x1 << 8
  2411. # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16
  2412. # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15
  2413. vpsllw $8,%ymm14,%ymm15
  2414. # qhasm: 16x v01 = x0 unsigned>> 8
  2415. # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14
  2416. # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13
  2417. vpsrlw $8,%ymm13,%ymm13
  2418. # qhasm: v11 = x1 & mask5
  2419. # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
  2420. # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
  2421. vpand %ymm14,%ymm5,%ymm14
  2422. # qhasm: x0 = v00 | v10
  2423. # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
  2424. # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
  2425. vpor %ymm9,%ymm15,%ymm9
  2426. # qhasm: x1 = v01 | v11
  2427. # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
  2428. # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
  2429. vpor %ymm13,%ymm14,%ymm13
  2430. # qhasm: v00 = x2 & mask4
  2431. # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
  2432. # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
  2433. vpand %ymm11,%ymm4,%ymm14
  2434. # qhasm: 16x v10 = x3 << 8
  2435. # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16
  2436. # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15
  2437. vpsllw $8,%ymm10,%ymm15
  2438. # qhasm: 16x v01 = x2 unsigned>> 8
  2439. # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12
  2440. # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11
  2441. vpsrlw $8,%ymm11,%ymm11
  2442. # qhasm: v11 = x3 & mask5
  2443. # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
  2444. # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
  2445. vpand %ymm10,%ymm5,%ymm10
  2446. # qhasm: x2 = v00 | v10
  2447. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
  2448. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
  2449. vpor %ymm14,%ymm15,%ymm14
  2450. # qhasm: x3 = v01 | v11
  2451. # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
  2452. # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
  2453. vpor %ymm11,%ymm10,%ymm10
  2454. # qhasm: v00 = x4 & mask4
  2455. # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
  2456. # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
  2457. vpand %ymm12,%ymm4,%ymm11
  2458. # qhasm: 16x v10 = x5 << 8
  2459. # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16
  2460. # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15
  2461. vpsllw $8,%ymm8,%ymm15
  2462. # qhasm: 16x v01 = x4 unsigned>> 8
  2463. # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13
  2464. # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12
  2465. vpsrlw $8,%ymm12,%ymm12
  2466. # qhasm: v11 = x5 & mask5
  2467. # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
  2468. # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
  2469. vpand %ymm8,%ymm5,%ymm8
  2470. # qhasm: x4 = v00 | v10
  2471. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
  2472. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
  2473. vpor %ymm11,%ymm15,%ymm11
  2474. # qhasm: x5 = v01 | v11
  2475. # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
  2476. # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
  2477. vpor %ymm12,%ymm8,%ymm8
  2478. # qhasm: v00 = x6 & mask4
  2479. # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
  2480. # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
  2481. vpand %ymm6,%ymm4,%ymm12
  2482. # qhasm: 16x v10 = x7 << 8
  2483. # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16
  2484. # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15
  2485. vpsllw $8,%ymm7,%ymm15
  2486. # qhasm: 16x v01 = x6 unsigned>> 8
  2487. # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7
  2488. # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6
  2489. vpsrlw $8,%ymm6,%ymm6
  2490. # qhasm: v11 = x7 & mask5
  2491. # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
  2492. # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
  2493. vpand %ymm7,%ymm5,%ymm7
  2494. # qhasm: x6 = v00 | v10
  2495. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
  2496. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
  2497. vpor %ymm12,%ymm15,%ymm12
  2498. # qhasm: x7 = v01 | v11
  2499. # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
  2500. # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
  2501. vpor %ymm6,%ymm7,%ymm6
  2502. # qhasm: mem256[ input_0 + 192 ] = x0
  2503. # asm 1: vmovupd <x0=reg256#10,192(<input_0=int64#1)
  2504. # asm 2: vmovupd <x0=%ymm9,192(<input_0=%rdi)
  2505. vmovupd %ymm9,192(%rdi)
  2506. # qhasm: mem256[ input_0 + 448 ] = x1
  2507. # asm 1: vmovupd <x1=reg256#14,448(<input_0=int64#1)
  2508. # asm 2: vmovupd <x1=%ymm13,448(<input_0=%rdi)
  2509. vmovupd %ymm13,448(%rdi)
  2510. # qhasm: mem256[ input_0 + 704 ] = x2
  2511. # asm 1: vmovupd <x2=reg256#15,704(<input_0=int64#1)
  2512. # asm 2: vmovupd <x2=%ymm14,704(<input_0=%rdi)
  2513. vmovupd %ymm14,704(%rdi)
  2514. # qhasm: mem256[ input_0 + 960 ] = x3
  2515. # asm 1: vmovupd <x3=reg256#11,960(<input_0=int64#1)
  2516. # asm 2: vmovupd <x3=%ymm10,960(<input_0=%rdi)
  2517. vmovupd %ymm10,960(%rdi)
  2518. # qhasm: mem256[ input_0 + 1216 ] = x4
  2519. # asm 1: vmovupd <x4=reg256#12,1216(<input_0=int64#1)
  2520. # asm 2: vmovupd <x4=%ymm11,1216(<input_0=%rdi)
  2521. vmovupd %ymm11,1216(%rdi)
  2522. # qhasm: mem256[ input_0 + 1472 ] = x5
  2523. # asm 1: vmovupd <x5=reg256#9,1472(<input_0=int64#1)
  2524. # asm 2: vmovupd <x5=%ymm8,1472(<input_0=%rdi)
  2525. vmovupd %ymm8,1472(%rdi)
  2526. # qhasm: mem256[ input_0 + 1728 ] = x6
  2527. # asm 1: vmovupd <x6=reg256#13,1728(<input_0=int64#1)
  2528. # asm 2: vmovupd <x6=%ymm12,1728(<input_0=%rdi)
  2529. vmovupd %ymm12,1728(%rdi)
  2530. # qhasm: mem256[ input_0 + 1984 ] = x7
  2531. # asm 1: vmovupd <x7=reg256#7,1984(<input_0=int64#1)
  2532. # asm 2: vmovupd <x7=%ymm6,1984(<input_0=%rdi)
  2533. vmovupd %ymm6,1984(%rdi)
  2534. # qhasm: x0 = mem256[ input_0 + 224 ]
  2535. # asm 1: vmovupd 224(<input_0=int64#1),>x0=reg256#7
  2536. # asm 2: vmovupd 224(<input_0=%rdi),>x0=%ymm6
  2537. vmovupd 224(%rdi),%ymm6
  2538. # qhasm: x1 = mem256[ input_0 + 480 ]
  2539. # asm 1: vmovupd 480(<input_0=int64#1),>x1=reg256#8
  2540. # asm 2: vmovupd 480(<input_0=%rdi),>x1=%ymm7
  2541. vmovupd 480(%rdi),%ymm7
  2542. # qhasm: x2 = mem256[ input_0 + 736 ]
  2543. # asm 1: vmovupd 736(<input_0=int64#1),>x2=reg256#9
  2544. # asm 2: vmovupd 736(<input_0=%rdi),>x2=%ymm8
  2545. vmovupd 736(%rdi),%ymm8
  2546. # qhasm: x3 = mem256[ input_0 + 992 ]
  2547. # asm 1: vmovupd 992(<input_0=int64#1),>x3=reg256#10
  2548. # asm 2: vmovupd 992(<input_0=%rdi),>x3=%ymm9
  2549. vmovupd 992(%rdi),%ymm9
  2550. # qhasm: x4 = mem256[ input_0 + 1248 ]
  2551. # asm 1: vmovupd 1248(<input_0=int64#1),>x4=reg256#11
  2552. # asm 2: vmovupd 1248(<input_0=%rdi),>x4=%ymm10
  2553. vmovupd 1248(%rdi),%ymm10
  2554. # qhasm: x5 = mem256[ input_0 + 1504 ]
  2555. # asm 1: vmovupd 1504(<input_0=int64#1),>x5=reg256#12
  2556. # asm 2: vmovupd 1504(<input_0=%rdi),>x5=%ymm11
  2557. vmovupd 1504(%rdi),%ymm11
  2558. # qhasm: x6 = mem256[ input_0 + 1760 ]
  2559. # asm 1: vmovupd 1760(<input_0=int64#1),>x6=reg256#13
  2560. # asm 2: vmovupd 1760(<input_0=%rdi),>x6=%ymm12
  2561. vmovupd 1760(%rdi),%ymm12
  2562. # qhasm: x7 = mem256[ input_0 + 2016 ]
  2563. # asm 1: vmovupd 2016(<input_0=int64#1),>x7=reg256#14
  2564. # asm 2: vmovupd 2016(<input_0=%rdi),>x7=%ymm13
  2565. vmovupd 2016(%rdi),%ymm13
  2566. # qhasm: v00 = x0 & mask0
  2567. # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
  2568. # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
  2569. vpand %ymm6,%ymm0,%ymm14
  2570. # qhasm: 4x v10 = x4 << 32
  2571. # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16
  2572. # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15
  2573. vpsllq $32,%ymm10,%ymm15
  2574. # qhasm: 4x v01 = x0 unsigned>> 32
  2575. # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7
  2576. # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6
  2577. vpsrlq $32,%ymm6,%ymm6
  2578. # qhasm: v11 = x4 & mask1
  2579. # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
  2580. # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
  2581. vpand %ymm10,%ymm1,%ymm10
  2582. # qhasm: x0 = v00 | v10
  2583. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
  2584. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
  2585. vpor %ymm14,%ymm15,%ymm14
  2586. # qhasm: x4 = v01 | v11
  2587. # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
  2588. # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
  2589. vpor %ymm6,%ymm10,%ymm6
  2590. # qhasm: v00 = x1 & mask0
  2591. # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
  2592. # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
  2593. vpand %ymm7,%ymm0,%ymm10
  2594. # qhasm: 4x v10 = x5 << 32
  2595. # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16
  2596. # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15
  2597. vpsllq $32,%ymm11,%ymm15
  2598. # qhasm: 4x v01 = x1 unsigned>> 32
  2599. # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8
  2600. # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7
  2601. vpsrlq $32,%ymm7,%ymm7
  2602. # qhasm: v11 = x5 & mask1
  2603. # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
  2604. # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
  2605. vpand %ymm11,%ymm1,%ymm11
  2606. # qhasm: x1 = v00 | v10
  2607. # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
  2608. # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
  2609. vpor %ymm10,%ymm15,%ymm10
  2610. # qhasm: x5 = v01 | v11
  2611. # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
  2612. # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
  2613. vpor %ymm7,%ymm11,%ymm7
  2614. # qhasm: v00 = x2 & mask0
  2615. # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
  2616. # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
  2617. vpand %ymm8,%ymm0,%ymm11
  2618. # qhasm: 4x v10 = x6 << 32
  2619. # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16
  2620. # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15
  2621. vpsllq $32,%ymm12,%ymm15
  2622. # qhasm: 4x v01 = x2 unsigned>> 32
  2623. # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9
  2624. # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8
  2625. vpsrlq $32,%ymm8,%ymm8
  2626. # qhasm: v11 = x6 & mask1
  2627. # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
  2628. # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
  2629. vpand %ymm12,%ymm1,%ymm12
  2630. # qhasm: x2 = v00 | v10
  2631. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
  2632. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
  2633. vpor %ymm11,%ymm15,%ymm11
  2634. # qhasm: x6 = v01 | v11
  2635. # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
  2636. # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
  2637. vpor %ymm8,%ymm12,%ymm8
  2638. # qhasm: v00 = x3 & mask0
  2639. # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#1
  2640. # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm0
  2641. vpand %ymm9,%ymm0,%ymm0
  2642. # qhasm: 4x v10 = x7 << 32
  2643. # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#13
  2644. # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm12
  2645. vpsllq $32,%ymm13,%ymm12
  2646. # qhasm: 4x v01 = x3 unsigned>> 32
  2647. # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10
  2648. # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9
  2649. vpsrlq $32,%ymm9,%ymm9
  2650. # qhasm: v11 = x7 & mask1
  2651. # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#2
  2652. # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm1
  2653. vpand %ymm13,%ymm1,%ymm1
  2654. # qhasm: x3 = v00 | v10
  2655. # asm 1: vpor <v00=reg256#1,<v10=reg256#13,>x3=reg256#1
  2656. # asm 2: vpor <v00=%ymm0,<v10=%ymm12,>x3=%ymm0
  2657. vpor %ymm0,%ymm12,%ymm0
  2658. # qhasm: x7 = v01 | v11
  2659. # asm 1: vpor <v01=reg256#10,<v11=reg256#2,>x7=reg256#2
  2660. # asm 2: vpor <v01=%ymm9,<v11=%ymm1,>x7=%ymm1
  2661. vpor %ymm9,%ymm1,%ymm1
  2662. # qhasm: v00 = x0 & mask2
  2663. # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#10
  2664. # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm9
  2665. vpand %ymm14,%ymm2,%ymm9
  2666. # qhasm: 8x v10 = x2 << 16
  2667. # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#13
  2668. # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm12
  2669. vpslld $16,%ymm11,%ymm12
  2670. # qhasm: 8x v01 = x0 unsigned>> 16
  2671. # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#14
  2672. # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm13
  2673. vpsrld $16,%ymm14,%ymm13
  2674. # qhasm: v11 = x2 & mask3
  2675. # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
  2676. # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
  2677. vpand %ymm11,%ymm3,%ymm11
  2678. # qhasm: x0 = v00 | v10
  2679. # asm 1: vpor <v00=reg256#10,<v10=reg256#13,>x0=reg256#10
  2680. # asm 2: vpor <v00=%ymm9,<v10=%ymm12,>x0=%ymm9
  2681. vpor %ymm9,%ymm12,%ymm9
  2682. # qhasm: x2 = v01 | v11
  2683. # asm 1: vpor <v01=reg256#14,<v11=reg256#12,>x2=reg256#12
  2684. # asm 2: vpor <v01=%ymm13,<v11=%ymm11,>x2=%ymm11
  2685. vpor %ymm13,%ymm11,%ymm11
  2686. # qhasm: v00 = x1 & mask2
  2687. # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#13
  2688. # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm12
  2689. vpand %ymm10,%ymm2,%ymm12
  2690. # qhasm: 8x v10 = x3 << 16
  2691. # asm 1: vpslld $16,<x3=reg256#1,>v10=reg256#14
  2692. # asm 2: vpslld $16,<x3=%ymm0,>v10=%ymm13
  2693. vpslld $16,%ymm0,%ymm13
  2694. # qhasm: 8x v01 = x1 unsigned>> 16
  2695. # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11
  2696. # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10
  2697. vpsrld $16,%ymm10,%ymm10
  2698. # qhasm: v11 = x3 & mask3
  2699. # asm 1: vpand <x3=reg256#1,<mask3=reg256#4,>v11=reg256#1
  2700. # asm 2: vpand <x3=%ymm0,<mask3=%ymm3,>v11=%ymm0
  2701. vpand %ymm0,%ymm3,%ymm0
  2702. # qhasm: x1 = v00 | v10
  2703. # asm 1: vpor <v00=reg256#13,<v10=reg256#14,>x1=reg256#13
  2704. # asm 2: vpor <v00=%ymm12,<v10=%ymm13,>x1=%ymm12
  2705. vpor %ymm12,%ymm13,%ymm12
  2706. # qhasm: x3 = v01 | v11
  2707. # asm 1: vpor <v01=reg256#11,<v11=reg256#1,>x3=reg256#1
  2708. # asm 2: vpor <v01=%ymm10,<v11=%ymm0,>x3=%ymm0
  2709. vpor %ymm10,%ymm0,%ymm0
  2710. # qhasm: v00 = x4 & mask2
  2711. # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#11
  2712. # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm10
  2713. vpand %ymm6,%ymm2,%ymm10
  2714. # qhasm: 8x v10 = x6 << 16
  2715. # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#14
  2716. # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm13
  2717. vpslld $16,%ymm8,%ymm13
  2718. # qhasm: 8x v01 = x4 unsigned>> 16
  2719. # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7
  2720. # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6
  2721. vpsrld $16,%ymm6,%ymm6
  2722. # qhasm: v11 = x6 & mask3
  2723. # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
  2724. # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
  2725. vpand %ymm8,%ymm3,%ymm8
  2726. # qhasm: x4 = v00 | v10
  2727. # asm 1: vpor <v00=reg256#11,<v10=reg256#14,>x4=reg256#11
  2728. # asm 2: vpor <v00=%ymm10,<v10=%ymm13,>x4=%ymm10
  2729. vpor %ymm10,%ymm13,%ymm10
  2730. # qhasm: x6 = v01 | v11
  2731. # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
  2732. # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
  2733. vpor %ymm6,%ymm8,%ymm6
  2734. # qhasm: v00 = x5 & mask2
  2735. # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#3
  2736. # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm2
  2737. vpand %ymm7,%ymm2,%ymm2
  2738. # qhasm: 8x v10 = x7 << 16
  2739. # asm 1: vpslld $16,<x7=reg256#2,>v10=reg256#9
  2740. # asm 2: vpslld $16,<x7=%ymm1,>v10=%ymm8
  2741. vpslld $16,%ymm1,%ymm8
  2742. # qhasm: 8x v01 = x5 unsigned>> 16
  2743. # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8
  2744. # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7
  2745. vpsrld $16,%ymm7,%ymm7
  2746. # qhasm: v11 = x7 & mask3
  2747. # asm 1: vpand <x7=reg256#2,<mask3=reg256#4,>v11=reg256#2
  2748. # asm 2: vpand <x7=%ymm1,<mask3=%ymm3,>v11=%ymm1
  2749. vpand %ymm1,%ymm3,%ymm1
  2750. # qhasm: x5 = v00 | v10
  2751. # asm 1: vpor <v00=reg256#3,<v10=reg256#9,>x5=reg256#3
  2752. # asm 2: vpor <v00=%ymm2,<v10=%ymm8,>x5=%ymm2
  2753. vpor %ymm2,%ymm8,%ymm2
  2754. # qhasm: x7 = v01 | v11
  2755. # asm 1: vpor <v01=reg256#8,<v11=reg256#2,>x7=reg256#2
  2756. # asm 2: vpor <v01=%ymm7,<v11=%ymm1,>x7=%ymm1
  2757. vpor %ymm7,%ymm1,%ymm1
  2758. # qhasm: v00 = x0 & mask4
  2759. # asm 1: vpand <x0=reg256#10,<mask4=reg256#5,>v00=reg256#4
  2760. # asm 2: vpand <x0=%ymm9,<mask4=%ymm4,>v00=%ymm3
  2761. vpand %ymm9,%ymm4,%ymm3
  2762. # qhasm: 16x v10 = x1 << 8
  2763. # asm 1: vpsllw $8,<x1=reg256#13,>v10=reg256#8
  2764. # asm 2: vpsllw $8,<x1=%ymm12,>v10=%ymm7
  2765. vpsllw $8,%ymm12,%ymm7
  2766. # qhasm: 16x v01 = x0 unsigned>> 8
  2767. # asm 1: vpsrlw $8,<x0=reg256#10,>v01=reg256#9
  2768. # asm 2: vpsrlw $8,<x0=%ymm9,>v01=%ymm8
  2769. vpsrlw $8,%ymm9,%ymm8
  2770. # qhasm: v11 = x1 & mask5
  2771. # asm 1: vpand <x1=reg256#13,<mask5=reg256#6,>v11=reg256#10
  2772. # asm 2: vpand <x1=%ymm12,<mask5=%ymm5,>v11=%ymm9
  2773. vpand %ymm12,%ymm5,%ymm9
  2774. # qhasm: x0 = v00 | v10
  2775. # asm 1: vpor <v00=reg256#4,<v10=reg256#8,>x0=reg256#4
  2776. # asm 2: vpor <v00=%ymm3,<v10=%ymm7,>x0=%ymm3
  2777. vpor %ymm3,%ymm7,%ymm3
  2778. # qhasm: x1 = v01 | v11
  2779. # asm 1: vpor <v01=reg256#9,<v11=reg256#10,>x1=reg256#8
  2780. # asm 2: vpor <v01=%ymm8,<v11=%ymm9,>x1=%ymm7
  2781. vpor %ymm8,%ymm9,%ymm7
  2782. # qhasm: v00 = x2 & mask4
  2783. # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#9
  2784. # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm8
  2785. vpand %ymm11,%ymm4,%ymm8
  2786. # qhasm: 16x v10 = x3 << 8
  2787. # asm 1: vpsllw $8,<x3=reg256#1,>v10=reg256#10
  2788. # asm 2: vpsllw $8,<x3=%ymm0,>v10=%ymm9
  2789. vpsllw $8,%ymm0,%ymm9
  2790. # qhasm: 16x v01 = x2 unsigned>> 8
  2791. # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12
  2792. # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11
  2793. vpsrlw $8,%ymm11,%ymm11
  2794. # qhasm: v11 = x3 & mask5
  2795. # asm 1: vpand <x3=reg256#1,<mask5=reg256#6,>v11=reg256#1
  2796. # asm 2: vpand <x3=%ymm0,<mask5=%ymm5,>v11=%ymm0
  2797. vpand %ymm0,%ymm5,%ymm0
  2798. # qhasm: x2 = v00 | v10
  2799. # asm 1: vpor <v00=reg256#9,<v10=reg256#10,>x2=reg256#9
  2800. # asm 2: vpor <v00=%ymm8,<v10=%ymm9,>x2=%ymm8
  2801. vpor %ymm8,%ymm9,%ymm8
  2802. # qhasm: x3 = v01 | v11
  2803. # asm 1: vpor <v01=reg256#12,<v11=reg256#1,>x3=reg256#1
  2804. # asm 2: vpor <v01=%ymm11,<v11=%ymm0,>x3=%ymm0
  2805. vpor %ymm11,%ymm0,%ymm0
  2806. # qhasm: v00 = x4 & mask4
  2807. # asm 1: vpand <x4=reg256#11,<mask4=reg256#5,>v00=reg256#10
  2808. # asm 2: vpand <x4=%ymm10,<mask4=%ymm4,>v00=%ymm9
  2809. vpand %ymm10,%ymm4,%ymm9
  2810. # qhasm: 16x v10 = x5 << 8
  2811. # asm 1: vpsllw $8,<x5=reg256#3,>v10=reg256#12
  2812. # asm 2: vpsllw $8,<x5=%ymm2,>v10=%ymm11
  2813. vpsllw $8,%ymm2,%ymm11
  2814. # qhasm: 16x v01 = x4 unsigned>> 8
  2815. # asm 1: vpsrlw $8,<x4=reg256#11,>v01=reg256#11
  2816. # asm 2: vpsrlw $8,<x4=%ymm10,>v01=%ymm10
  2817. vpsrlw $8,%ymm10,%ymm10
  2818. # qhasm: v11 = x5 & mask5
  2819. # asm 1: vpand <x5=reg256#3,<mask5=reg256#6,>v11=reg256#3
  2820. # asm 2: vpand <x5=%ymm2,<mask5=%ymm5,>v11=%ymm2
  2821. vpand %ymm2,%ymm5,%ymm2
  2822. # qhasm: x4 = v00 | v10
  2823. # asm 1: vpor <v00=reg256#10,<v10=reg256#12,>x4=reg256#10
  2824. # asm 2: vpor <v00=%ymm9,<v10=%ymm11,>x4=%ymm9
  2825. vpor %ymm9,%ymm11,%ymm9
  2826. # qhasm: x5 = v01 | v11
  2827. # asm 1: vpor <v01=reg256#11,<v11=reg256#3,>x5=reg256#3
  2828. # asm 2: vpor <v01=%ymm10,<v11=%ymm2,>x5=%ymm2
  2829. vpor %ymm10,%ymm2,%ymm2
  2830. # qhasm: v00 = x6 & mask4
  2831. # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#5
  2832. # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm4
  2833. vpand %ymm6,%ymm4,%ymm4
  2834. # qhasm: 16x v10 = x7 << 8
  2835. # asm 1: vpsllw $8,<x7=reg256#2,>v10=reg256#11
  2836. # asm 2: vpsllw $8,<x7=%ymm1,>v10=%ymm10
  2837. vpsllw $8,%ymm1,%ymm10
  2838. # qhasm: 16x v01 = x6 unsigned>> 8
  2839. # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7
  2840. # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6
  2841. vpsrlw $8,%ymm6,%ymm6
  2842. # qhasm: v11 = x7 & mask5
  2843. # asm 1: vpand <x7=reg256#2,<mask5=reg256#6,>v11=reg256#2
  2844. # asm 2: vpand <x7=%ymm1,<mask5=%ymm5,>v11=%ymm1
  2845. vpand %ymm1,%ymm5,%ymm1
  2846. # qhasm: x6 = v00 | v10
  2847. # asm 1: vpor <v00=reg256#5,<v10=reg256#11,>x6=reg256#5
  2848. # asm 2: vpor <v00=%ymm4,<v10=%ymm10,>x6=%ymm4
  2849. vpor %ymm4,%ymm10,%ymm4
  2850. # qhasm: x7 = v01 | v11
  2851. # asm 1: vpor <v01=reg256#7,<v11=reg256#2,>x7=reg256#2
  2852. # asm 2: vpor <v01=%ymm6,<v11=%ymm1,>x7=%ymm1
  2853. vpor %ymm6,%ymm1,%ymm1
  2854. # qhasm: mem256[ input_0 + 224 ] = x0
  2855. # asm 1: vmovupd <x0=reg256#4,224(<input_0=int64#1)
  2856. # asm 2: vmovupd <x0=%ymm3,224(<input_0=%rdi)
  2857. vmovupd %ymm3,224(%rdi)
  2858. # qhasm: mem256[ input_0 + 480 ] = x1
  2859. # asm 1: vmovupd <x1=reg256#8,480(<input_0=int64#1)
  2860. # asm 2: vmovupd <x1=%ymm7,480(<input_0=%rdi)
  2861. vmovupd %ymm7,480(%rdi)
  2862. # qhasm: mem256[ input_0 + 736 ] = x2
  2863. # asm 1: vmovupd <x2=reg256#9,736(<input_0=int64#1)
  2864. # asm 2: vmovupd <x2=%ymm8,736(<input_0=%rdi)
  2865. vmovupd %ymm8,736(%rdi)
  2866. # qhasm: mem256[ input_0 + 992 ] = x3
  2867. # asm 1: vmovupd <x3=reg256#1,992(<input_0=int64#1)
  2868. # asm 2: vmovupd <x3=%ymm0,992(<input_0=%rdi)
  2869. vmovupd %ymm0,992(%rdi)
  2870. # qhasm: mem256[ input_0 + 1248 ] = x4
  2871. # asm 1: vmovupd <x4=reg256#10,1248(<input_0=int64#1)
  2872. # asm 2: vmovupd <x4=%ymm9,1248(<input_0=%rdi)
  2873. vmovupd %ymm9,1248(%rdi)
  2874. # qhasm: mem256[ input_0 + 1504 ] = x5
  2875. # asm 1: vmovupd <x5=reg256#3,1504(<input_0=int64#1)
  2876. # asm 2: vmovupd <x5=%ymm2,1504(<input_0=%rdi)
  2877. vmovupd %ymm2,1504(%rdi)
  2878. # qhasm: mem256[ input_0 + 1760 ] = x6
  2879. # asm 1: vmovupd <x6=reg256#5,1760(<input_0=int64#1)
  2880. # asm 2: vmovupd <x6=%ymm4,1760(<input_0=%rdi)
  2881. vmovupd %ymm4,1760(%rdi)
  2882. # qhasm: mem256[ input_0 + 2016 ] = x7
  2883. # asm 1: vmovupd <x7=reg256#2,2016(<input_0=int64#1)
  2884. # asm 2: vmovupd <x7=%ymm1,2016(<input_0=%rdi)
  2885. vmovupd %ymm1,2016(%rdi)
  2886. # qhasm: mask0 aligned= mem256[ PQCLEAN_MCELIECE460896F_AVX_MASK2_0 ]
  2887. # asm 1: vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK2_0,>mask0=reg256#1
  2888. # asm 2: vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK2_0,>mask0=%ymm0
  2889. vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK2_0(%rip),%ymm0
  2890. # qhasm: mask1 aligned= mem256[ PQCLEAN_MCELIECE460896F_AVX_MASK2_1 ]
  2891. # asm 1: vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK2_1,>mask1=reg256#2
  2892. # asm 2: vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK2_1,>mask1=%ymm1
  2893. vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK2_1(%rip),%ymm1
  2894. # qhasm: mask2 aligned= mem256[ PQCLEAN_MCELIECE460896F_AVX_MASK1_0 ]
  2895. # asm 1: vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK1_0,>mask2=reg256#3
  2896. # asm 2: vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK1_0,>mask2=%ymm2
  2897. vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK1_0(%rip),%ymm2
  2898. # qhasm: mask3 aligned= mem256[ PQCLEAN_MCELIECE460896F_AVX_MASK1_1 ]
  2899. # asm 1: vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK1_1,>mask3=reg256#4
  2900. # asm 2: vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK1_1,>mask3=%ymm3
  2901. vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK1_1(%rip),%ymm3
  2902. # qhasm: mask4 aligned= mem256[ PQCLEAN_MCELIECE460896F_AVX_MASK0_0 ]
  2903. # asm 1: vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK0_0,>mask4=reg256#5
  2904. # asm 2: vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK0_0,>mask4=%ymm4
  2905. vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK0_0(%rip),%ymm4
  2906. # qhasm: mask5 aligned= mem256[ PQCLEAN_MCELIECE460896F_AVX_MASK0_1 ]
  2907. # asm 1: vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK0_1,>mask5=reg256#6
  2908. # asm 2: vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK0_1,>mask5=%ymm5
  2909. vmovapd PQCLEAN_MCELIECE460896F_AVX_MASK0_1(%rip),%ymm5
  2910. # qhasm: x0 = mem256[ input_0 + 0 ]
  2911. # asm 1: vmovupd 0(<input_0=int64#1),>x0=reg256#7
  2912. # asm 2: vmovupd 0(<input_0=%rdi),>x0=%ymm6
  2913. vmovupd 0(%rdi),%ymm6
  2914. # qhasm: x1 = mem256[ input_0 + 32 ]
  2915. # asm 1: vmovupd 32(<input_0=int64#1),>x1=reg256#8
  2916. # asm 2: vmovupd 32(<input_0=%rdi),>x1=%ymm7
  2917. vmovupd 32(%rdi),%ymm7
  2918. # qhasm: x2 = mem256[ input_0 + 64 ]
  2919. # asm 1: vmovupd 64(<input_0=int64#1),>x2=reg256#9
  2920. # asm 2: vmovupd 64(<input_0=%rdi),>x2=%ymm8
  2921. vmovupd 64(%rdi),%ymm8
  2922. # qhasm: x3 = mem256[ input_0 + 96 ]
  2923. # asm 1: vmovupd 96(<input_0=int64#1),>x3=reg256#10
  2924. # asm 2: vmovupd 96(<input_0=%rdi),>x3=%ymm9
  2925. vmovupd 96(%rdi),%ymm9
  2926. # qhasm: x4 = mem256[ input_0 + 128 ]
  2927. # asm 1: vmovupd 128(<input_0=int64#1),>x4=reg256#11
  2928. # asm 2: vmovupd 128(<input_0=%rdi),>x4=%ymm10
  2929. vmovupd 128(%rdi),%ymm10
  2930. # qhasm: x5 = mem256[ input_0 + 160 ]
  2931. # asm 1: vmovupd 160(<input_0=int64#1),>x5=reg256#12
  2932. # asm 2: vmovupd 160(<input_0=%rdi),>x5=%ymm11
  2933. vmovupd 160(%rdi),%ymm11
  2934. # qhasm: x6 = mem256[ input_0 + 192 ]
  2935. # asm 1: vmovupd 192(<input_0=int64#1),>x6=reg256#13
  2936. # asm 2: vmovupd 192(<input_0=%rdi),>x6=%ymm12
  2937. vmovupd 192(%rdi),%ymm12
  2938. # qhasm: x7 = mem256[ input_0 + 224 ]
  2939. # asm 1: vmovupd 224(<input_0=int64#1),>x7=reg256#14
  2940. # asm 2: vmovupd 224(<input_0=%rdi),>x7=%ymm13
  2941. vmovupd 224(%rdi),%ymm13
  2942. # qhasm: v00 = x0 & mask0
  2943. # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
  2944. # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
  2945. vpand %ymm6,%ymm0,%ymm14
  2946. # qhasm: v10 = x4 & mask0
  2947. # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16
  2948. # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15
  2949. vpand %ymm10,%ymm0,%ymm15
  2950. # qhasm: 4x v10 <<= 4
  2951. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  2952. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  2953. vpsllq $4,%ymm15,%ymm15
  2954. # qhasm: v01 = x0 & mask1
  2955. # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7
  2956. # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6
  2957. vpand %ymm6,%ymm1,%ymm6
  2958. # qhasm: v11 = x4 & mask1
  2959. # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
  2960. # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
  2961. vpand %ymm10,%ymm1,%ymm10
  2962. # qhasm: 4x v01 unsigned>>= 4
  2963. # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7
  2964. # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6
  2965. vpsrlq $4,%ymm6,%ymm6
  2966. # qhasm: x0 = v00 | v10
  2967. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
  2968. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
  2969. vpor %ymm14,%ymm15,%ymm14
  2970. # qhasm: x4 = v01 | v11
  2971. # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
  2972. # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
  2973. vpor %ymm6,%ymm10,%ymm6
  2974. # qhasm: v00 = x1 & mask0
  2975. # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
  2976. # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
  2977. vpand %ymm7,%ymm0,%ymm10
  2978. # qhasm: v10 = x5 & mask0
  2979. # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16
  2980. # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15
  2981. vpand %ymm11,%ymm0,%ymm15
  2982. # qhasm: 4x v10 <<= 4
  2983. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  2984. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  2985. vpsllq $4,%ymm15,%ymm15
  2986. # qhasm: v01 = x1 & mask1
  2987. # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8
  2988. # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7
  2989. vpand %ymm7,%ymm1,%ymm7
  2990. # qhasm: v11 = x5 & mask1
  2991. # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
  2992. # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
  2993. vpand %ymm11,%ymm1,%ymm11
  2994. # qhasm: 4x v01 unsigned>>= 4
  2995. # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8
  2996. # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7
  2997. vpsrlq $4,%ymm7,%ymm7
  2998. # qhasm: x1 = v00 | v10
  2999. # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
  3000. # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
  3001. vpor %ymm10,%ymm15,%ymm10
  3002. # qhasm: x5 = v01 | v11
  3003. # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
  3004. # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
  3005. vpor %ymm7,%ymm11,%ymm7
  3006. # qhasm: v00 = x2 & mask0
  3007. # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
  3008. # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
  3009. vpand %ymm8,%ymm0,%ymm11
  3010. # qhasm: v10 = x6 & mask0
  3011. # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16
  3012. # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15
  3013. vpand %ymm12,%ymm0,%ymm15
  3014. # qhasm: 4x v10 <<= 4
  3015. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  3016. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  3017. vpsllq $4,%ymm15,%ymm15
  3018. # qhasm: v01 = x2 & mask1
  3019. # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9
  3020. # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8
  3021. vpand %ymm8,%ymm1,%ymm8
  3022. # qhasm: v11 = x6 & mask1
  3023. # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
  3024. # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
  3025. vpand %ymm12,%ymm1,%ymm12
  3026. # qhasm: 4x v01 unsigned>>= 4
  3027. # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9
  3028. # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8
  3029. vpsrlq $4,%ymm8,%ymm8
  3030. # qhasm: x2 = v00 | v10
  3031. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
  3032. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
  3033. vpor %ymm11,%ymm15,%ymm11
  3034. # qhasm: x6 = v01 | v11
  3035. # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
  3036. # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
  3037. vpor %ymm8,%ymm12,%ymm8
  3038. # qhasm: v00 = x3 & mask0
  3039. # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
  3040. # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
  3041. vpand %ymm9,%ymm0,%ymm12
  3042. # qhasm: v10 = x7 & mask0
  3043. # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16
  3044. # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15
  3045. vpand %ymm13,%ymm0,%ymm15
  3046. # qhasm: 4x v10 <<= 4
  3047. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  3048. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  3049. vpsllq $4,%ymm15,%ymm15
  3050. # qhasm: v01 = x3 & mask1
  3051. # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10
  3052. # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9
  3053. vpand %ymm9,%ymm1,%ymm9
  3054. # qhasm: v11 = x7 & mask1
  3055. # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
  3056. # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
  3057. vpand %ymm13,%ymm1,%ymm13
  3058. # qhasm: 4x v01 unsigned>>= 4
  3059. # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10
  3060. # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9
  3061. vpsrlq $4,%ymm9,%ymm9
  3062. # qhasm: x3 = v00 | v10
  3063. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
  3064. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
  3065. vpor %ymm12,%ymm15,%ymm12
  3066. # qhasm: x7 = v01 | v11
  3067. # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
  3068. # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
  3069. vpor %ymm9,%ymm13,%ymm9
  3070. # qhasm: v00 = x0 & mask2
  3071. # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
  3072. # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
  3073. vpand %ymm14,%ymm2,%ymm13
  3074. # qhasm: v10 = x2 & mask2
  3075. # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16
  3076. # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15
  3077. vpand %ymm11,%ymm2,%ymm15
  3078. # qhasm: 4x v10 <<= 2
  3079. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  3080. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  3081. vpsllq $2,%ymm15,%ymm15
  3082. # qhasm: v01 = x0 & mask3
  3083. # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15
  3084. # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14
  3085. vpand %ymm14,%ymm3,%ymm14
  3086. # qhasm: v11 = x2 & mask3
  3087. # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
  3088. # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
  3089. vpand %ymm11,%ymm3,%ymm11
  3090. # qhasm: 4x v01 unsigned>>= 2
  3091. # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15
  3092. # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14
  3093. vpsrlq $2,%ymm14,%ymm14
  3094. # qhasm: x0 = v00 | v10
  3095. # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
  3096. # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
  3097. vpor %ymm13,%ymm15,%ymm13
  3098. # qhasm: x2 = v01 | v11
  3099. # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
  3100. # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
  3101. vpor %ymm14,%ymm11,%ymm11
  3102. # qhasm: v00 = x1 & mask2
  3103. # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
  3104. # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
  3105. vpand %ymm10,%ymm2,%ymm14
  3106. # qhasm: v10 = x3 & mask2
  3107. # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16
  3108. # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15
  3109. vpand %ymm12,%ymm2,%ymm15
  3110. # qhasm: 4x v10 <<= 2
  3111. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  3112. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  3113. vpsllq $2,%ymm15,%ymm15
  3114. # qhasm: v01 = x1 & mask3
  3115. # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11
  3116. # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10
  3117. vpand %ymm10,%ymm3,%ymm10
  3118. # qhasm: v11 = x3 & mask3
  3119. # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
  3120. # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
  3121. vpand %ymm12,%ymm3,%ymm12
  3122. # qhasm: 4x v01 unsigned>>= 2
  3123. # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11
  3124. # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10
  3125. vpsrlq $2,%ymm10,%ymm10
  3126. # qhasm: x1 = v00 | v10
  3127. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
  3128. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
  3129. vpor %ymm14,%ymm15,%ymm14
  3130. # qhasm: x3 = v01 | v11
  3131. # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
  3132. # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
  3133. vpor %ymm10,%ymm12,%ymm10
  3134. # qhasm: v00 = x4 & mask2
  3135. # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
  3136. # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
  3137. vpand %ymm6,%ymm2,%ymm12
  3138. # qhasm: v10 = x6 & mask2
  3139. # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16
  3140. # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15
  3141. vpand %ymm8,%ymm2,%ymm15
  3142. # qhasm: 4x v10 <<= 2
  3143. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  3144. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  3145. vpsllq $2,%ymm15,%ymm15
  3146. # qhasm: v01 = x4 & mask3
  3147. # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7
  3148. # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6
  3149. vpand %ymm6,%ymm3,%ymm6
  3150. # qhasm: v11 = x6 & mask3
  3151. # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
  3152. # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
  3153. vpand %ymm8,%ymm3,%ymm8
  3154. # qhasm: 4x v01 unsigned>>= 2
  3155. # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7
  3156. # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6
  3157. vpsrlq $2,%ymm6,%ymm6
  3158. # qhasm: x4 = v00 | v10
  3159. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
  3160. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
  3161. vpor %ymm12,%ymm15,%ymm12
  3162. # qhasm: x6 = v01 | v11
  3163. # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
  3164. # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
  3165. vpor %ymm6,%ymm8,%ymm6
  3166. # qhasm: v00 = x5 & mask2
  3167. # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
  3168. # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
  3169. vpand %ymm7,%ymm2,%ymm8
  3170. # qhasm: v10 = x7 & mask2
  3171. # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16
  3172. # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15
  3173. vpand %ymm9,%ymm2,%ymm15
  3174. # qhasm: 4x v10 <<= 2
  3175. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  3176. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  3177. vpsllq $2,%ymm15,%ymm15
  3178. # qhasm: v01 = x5 & mask3
  3179. # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8
  3180. # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7
  3181. vpand %ymm7,%ymm3,%ymm7
  3182. # qhasm: v11 = x7 & mask3
  3183. # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
  3184. # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
  3185. vpand %ymm9,%ymm3,%ymm9
  3186. # qhasm: 4x v01 unsigned>>= 2
  3187. # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8
  3188. # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7
  3189. vpsrlq $2,%ymm7,%ymm7
  3190. # qhasm: x5 = v00 | v10
  3191. # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
  3192. # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
  3193. vpor %ymm8,%ymm15,%ymm8
  3194. # qhasm: x7 = v01 | v11
  3195. # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
  3196. # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
  3197. vpor %ymm7,%ymm9,%ymm7
  3198. # qhasm: v00 = x0 & mask4
  3199. # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
  3200. # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
  3201. vpand %ymm13,%ymm4,%ymm9
  3202. # qhasm: v10 = x1 & mask4
  3203. # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16
  3204. # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15
  3205. vpand %ymm14,%ymm4,%ymm15
  3206. # qhasm: 4x v10 <<= 1
  3207. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  3208. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  3209. vpsllq $1,%ymm15,%ymm15
  3210. # qhasm: v01 = x0 & mask5
  3211. # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14
  3212. # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13
  3213. vpand %ymm13,%ymm5,%ymm13
  3214. # qhasm: v11 = x1 & mask5
  3215. # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
  3216. # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
  3217. vpand %ymm14,%ymm5,%ymm14
  3218. # qhasm: 4x v01 unsigned>>= 1
  3219. # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14
  3220. # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13
  3221. vpsrlq $1,%ymm13,%ymm13
  3222. # qhasm: x0 = v00 | v10
  3223. # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
  3224. # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
  3225. vpor %ymm9,%ymm15,%ymm9
  3226. # qhasm: x1 = v01 | v11
  3227. # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
  3228. # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
  3229. vpor %ymm13,%ymm14,%ymm13
  3230. # qhasm: v00 = x2 & mask4
  3231. # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
  3232. # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
  3233. vpand %ymm11,%ymm4,%ymm14
  3234. # qhasm: v10 = x3 & mask4
  3235. # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16
  3236. # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15
  3237. vpand %ymm10,%ymm4,%ymm15
  3238. # qhasm: 4x v10 <<= 1
  3239. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  3240. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  3241. vpsllq $1,%ymm15,%ymm15
  3242. # qhasm: v01 = x2 & mask5
  3243. # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12
  3244. # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11
  3245. vpand %ymm11,%ymm5,%ymm11
  3246. # qhasm: v11 = x3 & mask5
  3247. # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
  3248. # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
  3249. vpand %ymm10,%ymm5,%ymm10
  3250. # qhasm: 4x v01 unsigned>>= 1
  3251. # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12
  3252. # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11
  3253. vpsrlq $1,%ymm11,%ymm11
  3254. # qhasm: x2 = v00 | v10
  3255. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
  3256. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
  3257. vpor %ymm14,%ymm15,%ymm14
  3258. # qhasm: x3 = v01 | v11
  3259. # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
  3260. # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
  3261. vpor %ymm11,%ymm10,%ymm10
  3262. # qhasm: v00 = x4 & mask4
  3263. # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
  3264. # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
  3265. vpand %ymm12,%ymm4,%ymm11
  3266. # qhasm: v10 = x5 & mask4
  3267. # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16
  3268. # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15
  3269. vpand %ymm8,%ymm4,%ymm15
  3270. # qhasm: 4x v10 <<= 1
  3271. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  3272. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  3273. vpsllq $1,%ymm15,%ymm15
  3274. # qhasm: v01 = x4 & mask5
  3275. # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13
  3276. # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12
  3277. vpand %ymm12,%ymm5,%ymm12
  3278. # qhasm: v11 = x5 & mask5
  3279. # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
  3280. # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
  3281. vpand %ymm8,%ymm5,%ymm8
  3282. # qhasm: 4x v01 unsigned>>= 1
  3283. # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13
  3284. # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12
  3285. vpsrlq $1,%ymm12,%ymm12
  3286. # qhasm: x4 = v00 | v10
  3287. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
  3288. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
  3289. vpor %ymm11,%ymm15,%ymm11
  3290. # qhasm: x5 = v01 | v11
  3291. # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
  3292. # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
  3293. vpor %ymm12,%ymm8,%ymm8
  3294. # qhasm: v00 = x6 & mask4
  3295. # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
  3296. # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
  3297. vpand %ymm6,%ymm4,%ymm12
  3298. # qhasm: v10 = x7 & mask4
  3299. # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16
  3300. # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15
  3301. vpand %ymm7,%ymm4,%ymm15
  3302. # qhasm: 4x v10 <<= 1
  3303. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  3304. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  3305. vpsllq $1,%ymm15,%ymm15
  3306. # qhasm: v01 = x6 & mask5
  3307. # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7
  3308. # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6
  3309. vpand %ymm6,%ymm5,%ymm6
  3310. # qhasm: v11 = x7 & mask5
  3311. # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
  3312. # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
  3313. vpand %ymm7,%ymm5,%ymm7
  3314. # qhasm: 4x v01 unsigned>>= 1
  3315. # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7
  3316. # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6
  3317. vpsrlq $1,%ymm6,%ymm6
  3318. # qhasm: x6 = v00 | v10
  3319. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
  3320. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
  3321. vpor %ymm12,%ymm15,%ymm12
  3322. # qhasm: x7 = v01 | v11
  3323. # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
  3324. # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
  3325. vpor %ymm6,%ymm7,%ymm6
  3326. # qhasm: mem256[ input_0 + 0 ] = x0
  3327. # asm 1: vmovupd <x0=reg256#10,0(<input_0=int64#1)
  3328. # asm 2: vmovupd <x0=%ymm9,0(<input_0=%rdi)
  3329. vmovupd %ymm9,0(%rdi)
  3330. # qhasm: mem256[ input_0 + 32 ] = x1
  3331. # asm 1: vmovupd <x1=reg256#14,32(<input_0=int64#1)
  3332. # asm 2: vmovupd <x1=%ymm13,32(<input_0=%rdi)
  3333. vmovupd %ymm13,32(%rdi)
  3334. # qhasm: mem256[ input_0 + 64 ] = x2
  3335. # asm 1: vmovupd <x2=reg256#15,64(<input_0=int64#1)
  3336. # asm 2: vmovupd <x2=%ymm14,64(<input_0=%rdi)
  3337. vmovupd %ymm14,64(%rdi)
  3338. # qhasm: mem256[ input_0 + 96 ] = x3
  3339. # asm 1: vmovupd <x3=reg256#11,96(<input_0=int64#1)
  3340. # asm 2: vmovupd <x3=%ymm10,96(<input_0=%rdi)
  3341. vmovupd %ymm10,96(%rdi)
  3342. # qhasm: mem256[ input_0 + 128 ] = x4
  3343. # asm 1: vmovupd <x4=reg256#12,128(<input_0=int64#1)
  3344. # asm 2: vmovupd <x4=%ymm11,128(<input_0=%rdi)
  3345. vmovupd %ymm11,128(%rdi)
  3346. # qhasm: mem256[ input_0 + 160 ] = x5
  3347. # asm 1: vmovupd <x5=reg256#9,160(<input_0=int64#1)
  3348. # asm 2: vmovupd <x5=%ymm8,160(<input_0=%rdi)
  3349. vmovupd %ymm8,160(%rdi)
  3350. # qhasm: mem256[ input_0 + 192 ] = x6
  3351. # asm 1: vmovupd <x6=reg256#13,192(<input_0=int64#1)
  3352. # asm 2: vmovupd <x6=%ymm12,192(<input_0=%rdi)
  3353. vmovupd %ymm12,192(%rdi)
  3354. # qhasm: mem256[ input_0 + 224 ] = x7
  3355. # asm 1: vmovupd <x7=reg256#7,224(<input_0=int64#1)
  3356. # asm 2: vmovupd <x7=%ymm6,224(<input_0=%rdi)
  3357. vmovupd %ymm6,224(%rdi)
  3358. # qhasm: x0 = mem256[ input_0 + 256 ]
  3359. # asm 1: vmovupd 256(<input_0=int64#1),>x0=reg256#7
  3360. # asm 2: vmovupd 256(<input_0=%rdi),>x0=%ymm6
  3361. vmovupd 256(%rdi),%ymm6
  3362. # qhasm: x1 = mem256[ input_0 + 288 ]
  3363. # asm 1: vmovupd 288(<input_0=int64#1),>x1=reg256#8
  3364. # asm 2: vmovupd 288(<input_0=%rdi),>x1=%ymm7
  3365. vmovupd 288(%rdi),%ymm7
  3366. # qhasm: x2 = mem256[ input_0 + 320 ]
  3367. # asm 1: vmovupd 320(<input_0=int64#1),>x2=reg256#9
  3368. # asm 2: vmovupd 320(<input_0=%rdi),>x2=%ymm8
  3369. vmovupd 320(%rdi),%ymm8
  3370. # qhasm: x3 = mem256[ input_0 + 352 ]
  3371. # asm 1: vmovupd 352(<input_0=int64#1),>x3=reg256#10
  3372. # asm 2: vmovupd 352(<input_0=%rdi),>x3=%ymm9
  3373. vmovupd 352(%rdi),%ymm9
  3374. # qhasm: x4 = mem256[ input_0 + 384 ]
  3375. # asm 1: vmovupd 384(<input_0=int64#1),>x4=reg256#11
  3376. # asm 2: vmovupd 384(<input_0=%rdi),>x4=%ymm10
  3377. vmovupd 384(%rdi),%ymm10
  3378. # qhasm: x5 = mem256[ input_0 + 416 ]
  3379. # asm 1: vmovupd 416(<input_0=int64#1),>x5=reg256#12
  3380. # asm 2: vmovupd 416(<input_0=%rdi),>x5=%ymm11
  3381. vmovupd 416(%rdi),%ymm11
  3382. # qhasm: x6 = mem256[ input_0 + 448 ]
  3383. # asm 1: vmovupd 448(<input_0=int64#1),>x6=reg256#13
  3384. # asm 2: vmovupd 448(<input_0=%rdi),>x6=%ymm12
  3385. vmovupd 448(%rdi),%ymm12
  3386. # qhasm: x7 = mem256[ input_0 + 480 ]
  3387. # asm 1: vmovupd 480(<input_0=int64#1),>x7=reg256#14
  3388. # asm 2: vmovupd 480(<input_0=%rdi),>x7=%ymm13
  3389. vmovupd 480(%rdi),%ymm13
  3390. # qhasm: v00 = x0 & mask0
  3391. # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
  3392. # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
  3393. vpand %ymm6,%ymm0,%ymm14
  3394. # qhasm: v10 = x4 & mask0
  3395. # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16
  3396. # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15
  3397. vpand %ymm10,%ymm0,%ymm15
  3398. # qhasm: 4x v10 <<= 4
  3399. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  3400. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  3401. vpsllq $4,%ymm15,%ymm15
  3402. # qhasm: v01 = x0 & mask1
  3403. # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7
  3404. # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6
  3405. vpand %ymm6,%ymm1,%ymm6
  3406. # qhasm: v11 = x4 & mask1
  3407. # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
  3408. # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
  3409. vpand %ymm10,%ymm1,%ymm10
  3410. # qhasm: 4x v01 unsigned>>= 4
  3411. # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7
  3412. # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6
  3413. vpsrlq $4,%ymm6,%ymm6
  3414. # qhasm: x0 = v00 | v10
  3415. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
  3416. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
  3417. vpor %ymm14,%ymm15,%ymm14
  3418. # qhasm: x4 = v01 | v11
  3419. # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
  3420. # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
  3421. vpor %ymm6,%ymm10,%ymm6
  3422. # qhasm: v00 = x1 & mask0
  3423. # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
  3424. # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
  3425. vpand %ymm7,%ymm0,%ymm10
  3426. # qhasm: v10 = x5 & mask0
  3427. # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16
  3428. # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15
  3429. vpand %ymm11,%ymm0,%ymm15
  3430. # qhasm: 4x v10 <<= 4
  3431. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  3432. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  3433. vpsllq $4,%ymm15,%ymm15
  3434. # qhasm: v01 = x1 & mask1
  3435. # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8
  3436. # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7
  3437. vpand %ymm7,%ymm1,%ymm7
  3438. # qhasm: v11 = x5 & mask1
  3439. # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
  3440. # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
  3441. vpand %ymm11,%ymm1,%ymm11
  3442. # qhasm: 4x v01 unsigned>>= 4
  3443. # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8
  3444. # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7
  3445. vpsrlq $4,%ymm7,%ymm7
  3446. # qhasm: x1 = v00 | v10
  3447. # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
  3448. # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
  3449. vpor %ymm10,%ymm15,%ymm10
  3450. # qhasm: x5 = v01 | v11
  3451. # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
  3452. # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
  3453. vpor %ymm7,%ymm11,%ymm7
  3454. # qhasm: v00 = x2 & mask0
  3455. # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
  3456. # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
  3457. vpand %ymm8,%ymm0,%ymm11
  3458. # qhasm: v10 = x6 & mask0
  3459. # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16
  3460. # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15
  3461. vpand %ymm12,%ymm0,%ymm15
  3462. # qhasm: 4x v10 <<= 4
  3463. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  3464. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  3465. vpsllq $4,%ymm15,%ymm15
  3466. # qhasm: v01 = x2 & mask1
  3467. # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9
  3468. # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8
  3469. vpand %ymm8,%ymm1,%ymm8
  3470. # qhasm: v11 = x6 & mask1
  3471. # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
  3472. # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
  3473. vpand %ymm12,%ymm1,%ymm12
  3474. # qhasm: 4x v01 unsigned>>= 4
  3475. # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9
  3476. # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8
  3477. vpsrlq $4,%ymm8,%ymm8
  3478. # qhasm: x2 = v00 | v10
  3479. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
  3480. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
  3481. vpor %ymm11,%ymm15,%ymm11
  3482. # qhasm: x6 = v01 | v11
  3483. # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
  3484. # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
  3485. vpor %ymm8,%ymm12,%ymm8
  3486. # qhasm: v00 = x3 & mask0
  3487. # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
  3488. # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
  3489. vpand %ymm9,%ymm0,%ymm12
  3490. # qhasm: v10 = x7 & mask0
  3491. # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16
  3492. # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15
  3493. vpand %ymm13,%ymm0,%ymm15
  3494. # qhasm: 4x v10 <<= 4
  3495. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  3496. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  3497. vpsllq $4,%ymm15,%ymm15
  3498. # qhasm: v01 = x3 & mask1
  3499. # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10
  3500. # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9
  3501. vpand %ymm9,%ymm1,%ymm9
  3502. # qhasm: v11 = x7 & mask1
  3503. # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
  3504. # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
  3505. vpand %ymm13,%ymm1,%ymm13
  3506. # qhasm: 4x v01 unsigned>>= 4
  3507. # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10
  3508. # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9
  3509. vpsrlq $4,%ymm9,%ymm9
  3510. # qhasm: x3 = v00 | v10
  3511. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
  3512. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
  3513. vpor %ymm12,%ymm15,%ymm12
  3514. # qhasm: x7 = v01 | v11
  3515. # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
  3516. # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
  3517. vpor %ymm9,%ymm13,%ymm9
  3518. # qhasm: v00 = x0 & mask2
  3519. # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
  3520. # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
  3521. vpand %ymm14,%ymm2,%ymm13
  3522. # qhasm: v10 = x2 & mask2
  3523. # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16
  3524. # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15
  3525. vpand %ymm11,%ymm2,%ymm15
  3526. # qhasm: 4x v10 <<= 2
  3527. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  3528. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  3529. vpsllq $2,%ymm15,%ymm15
  3530. # qhasm: v01 = x0 & mask3
  3531. # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15
  3532. # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14
  3533. vpand %ymm14,%ymm3,%ymm14
  3534. # qhasm: v11 = x2 & mask3
  3535. # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
  3536. # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
  3537. vpand %ymm11,%ymm3,%ymm11
  3538. # qhasm: 4x v01 unsigned>>= 2
  3539. # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15
  3540. # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14
  3541. vpsrlq $2,%ymm14,%ymm14
  3542. # qhasm: x0 = v00 | v10
  3543. # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
  3544. # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
  3545. vpor %ymm13,%ymm15,%ymm13
  3546. # qhasm: x2 = v01 | v11
  3547. # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
  3548. # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
  3549. vpor %ymm14,%ymm11,%ymm11
  3550. # qhasm: v00 = x1 & mask2
  3551. # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
  3552. # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
  3553. vpand %ymm10,%ymm2,%ymm14
  3554. # qhasm: v10 = x3 & mask2
  3555. # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16
  3556. # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15
  3557. vpand %ymm12,%ymm2,%ymm15
  3558. # qhasm: 4x v10 <<= 2
  3559. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  3560. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  3561. vpsllq $2,%ymm15,%ymm15
  3562. # qhasm: v01 = x1 & mask3
  3563. # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11
  3564. # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10
  3565. vpand %ymm10,%ymm3,%ymm10
  3566. # qhasm: v11 = x3 & mask3
  3567. # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
  3568. # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
  3569. vpand %ymm12,%ymm3,%ymm12
  3570. # qhasm: 4x v01 unsigned>>= 2
  3571. # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11
  3572. # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10
  3573. vpsrlq $2,%ymm10,%ymm10
  3574. # qhasm: x1 = v00 | v10
  3575. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
  3576. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
  3577. vpor %ymm14,%ymm15,%ymm14
  3578. # qhasm: x3 = v01 | v11
  3579. # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
  3580. # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
  3581. vpor %ymm10,%ymm12,%ymm10
  3582. # qhasm: v00 = x4 & mask2
  3583. # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
  3584. # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
  3585. vpand %ymm6,%ymm2,%ymm12
  3586. # qhasm: v10 = x6 & mask2
  3587. # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16
  3588. # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15
  3589. vpand %ymm8,%ymm2,%ymm15
  3590. # qhasm: 4x v10 <<= 2
  3591. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  3592. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  3593. vpsllq $2,%ymm15,%ymm15
  3594. # qhasm: v01 = x4 & mask3
  3595. # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7
  3596. # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6
  3597. vpand %ymm6,%ymm3,%ymm6
  3598. # qhasm: v11 = x6 & mask3
  3599. # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
  3600. # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
  3601. vpand %ymm8,%ymm3,%ymm8
  3602. # qhasm: 4x v01 unsigned>>= 2
  3603. # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7
  3604. # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6
  3605. vpsrlq $2,%ymm6,%ymm6
  3606. # qhasm: x4 = v00 | v10
  3607. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
  3608. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
  3609. vpor %ymm12,%ymm15,%ymm12
  3610. # qhasm: x6 = v01 | v11
  3611. # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
  3612. # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
  3613. vpor %ymm6,%ymm8,%ymm6
  3614. # qhasm: v00 = x5 & mask2
  3615. # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
  3616. # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
  3617. vpand %ymm7,%ymm2,%ymm8
  3618. # qhasm: v10 = x7 & mask2
  3619. # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16
  3620. # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15
  3621. vpand %ymm9,%ymm2,%ymm15
  3622. # qhasm: 4x v10 <<= 2
  3623. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  3624. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  3625. vpsllq $2,%ymm15,%ymm15
  3626. # qhasm: v01 = x5 & mask3
  3627. # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8
  3628. # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7
  3629. vpand %ymm7,%ymm3,%ymm7
  3630. # qhasm: v11 = x7 & mask3
  3631. # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
  3632. # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
  3633. vpand %ymm9,%ymm3,%ymm9
  3634. # qhasm: 4x v01 unsigned>>= 2
  3635. # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8
  3636. # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7
  3637. vpsrlq $2,%ymm7,%ymm7
  3638. # qhasm: x5 = v00 | v10
  3639. # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
  3640. # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
  3641. vpor %ymm8,%ymm15,%ymm8
  3642. # qhasm: x7 = v01 | v11
  3643. # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
  3644. # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
  3645. vpor %ymm7,%ymm9,%ymm7
  3646. # qhasm: v00 = x0 & mask4
  3647. # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
  3648. # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
  3649. vpand %ymm13,%ymm4,%ymm9
  3650. # qhasm: v10 = x1 & mask4
  3651. # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16
  3652. # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15
  3653. vpand %ymm14,%ymm4,%ymm15
  3654. # qhasm: 4x v10 <<= 1
  3655. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  3656. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  3657. vpsllq $1,%ymm15,%ymm15
  3658. # qhasm: v01 = x0 & mask5
  3659. # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14
  3660. # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13
  3661. vpand %ymm13,%ymm5,%ymm13
  3662. # qhasm: v11 = x1 & mask5
  3663. # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
  3664. # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
  3665. vpand %ymm14,%ymm5,%ymm14
  3666. # qhasm: 4x v01 unsigned>>= 1
  3667. # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14
  3668. # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13
  3669. vpsrlq $1,%ymm13,%ymm13
  3670. # qhasm: x0 = v00 | v10
  3671. # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
  3672. # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
  3673. vpor %ymm9,%ymm15,%ymm9
  3674. # qhasm: x1 = v01 | v11
  3675. # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
  3676. # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
  3677. vpor %ymm13,%ymm14,%ymm13
  3678. # qhasm: v00 = x2 & mask4
  3679. # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
  3680. # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
  3681. vpand %ymm11,%ymm4,%ymm14
  3682. # qhasm: v10 = x3 & mask4
  3683. # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16
  3684. # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15
  3685. vpand %ymm10,%ymm4,%ymm15
  3686. # qhasm: 4x v10 <<= 1
  3687. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  3688. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  3689. vpsllq $1,%ymm15,%ymm15
  3690. # qhasm: v01 = x2 & mask5
  3691. # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12
  3692. # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11
  3693. vpand %ymm11,%ymm5,%ymm11
  3694. # qhasm: v11 = x3 & mask5
  3695. # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
  3696. # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
  3697. vpand %ymm10,%ymm5,%ymm10
  3698. # qhasm: 4x v01 unsigned>>= 1
  3699. # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12
  3700. # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11
  3701. vpsrlq $1,%ymm11,%ymm11
  3702. # qhasm: x2 = v00 | v10
  3703. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
  3704. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
  3705. vpor %ymm14,%ymm15,%ymm14
  3706. # qhasm: x3 = v01 | v11
  3707. # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
  3708. # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
  3709. vpor %ymm11,%ymm10,%ymm10
  3710. # qhasm: v00 = x4 & mask4
  3711. # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
  3712. # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
  3713. vpand %ymm12,%ymm4,%ymm11
  3714. # qhasm: v10 = x5 & mask4
  3715. # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16
  3716. # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15
  3717. vpand %ymm8,%ymm4,%ymm15
  3718. # qhasm: 4x v10 <<= 1
  3719. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  3720. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  3721. vpsllq $1,%ymm15,%ymm15
  3722. # qhasm: v01 = x4 & mask5
  3723. # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13
  3724. # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12
  3725. vpand %ymm12,%ymm5,%ymm12
  3726. # qhasm: v11 = x5 & mask5
  3727. # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
  3728. # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
  3729. vpand %ymm8,%ymm5,%ymm8
  3730. # qhasm: 4x v01 unsigned>>= 1
  3731. # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13
  3732. # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12
  3733. vpsrlq $1,%ymm12,%ymm12
  3734. # qhasm: x4 = v00 | v10
  3735. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
  3736. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
  3737. vpor %ymm11,%ymm15,%ymm11
  3738. # qhasm: x5 = v01 | v11
  3739. # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
  3740. # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
  3741. vpor %ymm12,%ymm8,%ymm8
  3742. # qhasm: v00 = x6 & mask4
  3743. # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
  3744. # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
  3745. vpand %ymm6,%ymm4,%ymm12
  3746. # qhasm: v10 = x7 & mask4
  3747. # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16
  3748. # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15
  3749. vpand %ymm7,%ymm4,%ymm15
  3750. # qhasm: 4x v10 <<= 1
  3751. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  3752. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  3753. vpsllq $1,%ymm15,%ymm15
  3754. # qhasm: v01 = x6 & mask5
  3755. # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7
  3756. # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6
  3757. vpand %ymm6,%ymm5,%ymm6
  3758. # qhasm: v11 = x7 & mask5
  3759. # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
  3760. # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
  3761. vpand %ymm7,%ymm5,%ymm7
  3762. # qhasm: 4x v01 unsigned>>= 1
  3763. # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7
  3764. # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6
  3765. vpsrlq $1,%ymm6,%ymm6
  3766. # qhasm: x6 = v00 | v10
  3767. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
  3768. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
  3769. vpor %ymm12,%ymm15,%ymm12
  3770. # qhasm: x7 = v01 | v11
  3771. # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
  3772. # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
  3773. vpor %ymm6,%ymm7,%ymm6
  3774. # qhasm: mem256[ input_0 + 256 ] = x0
  3775. # asm 1: vmovupd <x0=reg256#10,256(<input_0=int64#1)
  3776. # asm 2: vmovupd <x0=%ymm9,256(<input_0=%rdi)
  3777. vmovupd %ymm9,256(%rdi)
  3778. # qhasm: mem256[ input_0 + 288 ] = x1
  3779. # asm 1: vmovupd <x1=reg256#14,288(<input_0=int64#1)
  3780. # asm 2: vmovupd <x1=%ymm13,288(<input_0=%rdi)
  3781. vmovupd %ymm13,288(%rdi)
  3782. # qhasm: mem256[ input_0 + 320 ] = x2
  3783. # asm 1: vmovupd <x2=reg256#15,320(<input_0=int64#1)
  3784. # asm 2: vmovupd <x2=%ymm14,320(<input_0=%rdi)
  3785. vmovupd %ymm14,320(%rdi)
  3786. # qhasm: mem256[ input_0 + 352 ] = x3
  3787. # asm 1: vmovupd <x3=reg256#11,352(<input_0=int64#1)
  3788. # asm 2: vmovupd <x3=%ymm10,352(<input_0=%rdi)
  3789. vmovupd %ymm10,352(%rdi)
  3790. # qhasm: mem256[ input_0 + 384 ] = x4
  3791. # asm 1: vmovupd <x4=reg256#12,384(<input_0=int64#1)
  3792. # asm 2: vmovupd <x4=%ymm11,384(<input_0=%rdi)
  3793. vmovupd %ymm11,384(%rdi)
  3794. # qhasm: mem256[ input_0 + 416 ] = x5
  3795. # asm 1: vmovupd <x5=reg256#9,416(<input_0=int64#1)
  3796. # asm 2: vmovupd <x5=%ymm8,416(<input_0=%rdi)
  3797. vmovupd %ymm8,416(%rdi)
  3798. # qhasm: mem256[ input_0 + 448 ] = x6
  3799. # asm 1: vmovupd <x6=reg256#13,448(<input_0=int64#1)
  3800. # asm 2: vmovupd <x6=%ymm12,448(<input_0=%rdi)
  3801. vmovupd %ymm12,448(%rdi)
  3802. # qhasm: mem256[ input_0 + 480 ] = x7
  3803. # asm 1: vmovupd <x7=reg256#7,480(<input_0=int64#1)
  3804. # asm 2: vmovupd <x7=%ymm6,480(<input_0=%rdi)
  3805. vmovupd %ymm6,480(%rdi)
  3806. # qhasm: x0 = mem256[ input_0 + 512 ]
  3807. # asm 1: vmovupd 512(<input_0=int64#1),>x0=reg256#7
  3808. # asm 2: vmovupd 512(<input_0=%rdi),>x0=%ymm6
  3809. vmovupd 512(%rdi),%ymm6
  3810. # qhasm: x1 = mem256[ input_0 + 544 ]
  3811. # asm 1: vmovupd 544(<input_0=int64#1),>x1=reg256#8
  3812. # asm 2: vmovupd 544(<input_0=%rdi),>x1=%ymm7
  3813. vmovupd 544(%rdi),%ymm7
  3814. # qhasm: x2 = mem256[ input_0 + 576 ]
  3815. # asm 1: vmovupd 576(<input_0=int64#1),>x2=reg256#9
  3816. # asm 2: vmovupd 576(<input_0=%rdi),>x2=%ymm8
  3817. vmovupd 576(%rdi),%ymm8
  3818. # qhasm: x3 = mem256[ input_0 + 608 ]
  3819. # asm 1: vmovupd 608(<input_0=int64#1),>x3=reg256#10
  3820. # asm 2: vmovupd 608(<input_0=%rdi),>x3=%ymm9
  3821. vmovupd 608(%rdi),%ymm9
  3822. # qhasm: x4 = mem256[ input_0 + 640 ]
  3823. # asm 1: vmovupd 640(<input_0=int64#1),>x4=reg256#11
  3824. # asm 2: vmovupd 640(<input_0=%rdi),>x4=%ymm10
  3825. vmovupd 640(%rdi),%ymm10
  3826. # qhasm: x5 = mem256[ input_0 + 672 ]
  3827. # asm 1: vmovupd 672(<input_0=int64#1),>x5=reg256#12
  3828. # asm 2: vmovupd 672(<input_0=%rdi),>x5=%ymm11
  3829. vmovupd 672(%rdi),%ymm11
  3830. # qhasm: x6 = mem256[ input_0 + 704 ]
  3831. # asm 1: vmovupd 704(<input_0=int64#1),>x6=reg256#13
  3832. # asm 2: vmovupd 704(<input_0=%rdi),>x6=%ymm12
  3833. vmovupd 704(%rdi),%ymm12
  3834. # qhasm: x7 = mem256[ input_0 + 736 ]
  3835. # asm 1: vmovupd 736(<input_0=int64#1),>x7=reg256#14
  3836. # asm 2: vmovupd 736(<input_0=%rdi),>x7=%ymm13
  3837. vmovupd 736(%rdi),%ymm13
  3838. # qhasm: v00 = x0 & mask0
  3839. # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
  3840. # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
  3841. vpand %ymm6,%ymm0,%ymm14
  3842. # qhasm: v10 = x4 & mask0
  3843. # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16
  3844. # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15
  3845. vpand %ymm10,%ymm0,%ymm15
  3846. # qhasm: 4x v10 <<= 4
  3847. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  3848. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  3849. vpsllq $4,%ymm15,%ymm15
  3850. # qhasm: v01 = x0 & mask1
  3851. # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7
  3852. # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6
  3853. vpand %ymm6,%ymm1,%ymm6
  3854. # qhasm: v11 = x4 & mask1
  3855. # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
  3856. # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
  3857. vpand %ymm10,%ymm1,%ymm10
  3858. # qhasm: 4x v01 unsigned>>= 4
  3859. # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7
  3860. # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6
  3861. vpsrlq $4,%ymm6,%ymm6
  3862. # qhasm: x0 = v00 | v10
  3863. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
  3864. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
  3865. vpor %ymm14,%ymm15,%ymm14
  3866. # qhasm: x4 = v01 | v11
  3867. # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
  3868. # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
  3869. vpor %ymm6,%ymm10,%ymm6
  3870. # qhasm: v00 = x1 & mask0
  3871. # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
  3872. # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
  3873. vpand %ymm7,%ymm0,%ymm10
  3874. # qhasm: v10 = x5 & mask0
  3875. # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16
  3876. # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15
  3877. vpand %ymm11,%ymm0,%ymm15
  3878. # qhasm: 4x v10 <<= 4
  3879. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  3880. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  3881. vpsllq $4,%ymm15,%ymm15
  3882. # qhasm: v01 = x1 & mask1
  3883. # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8
  3884. # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7
  3885. vpand %ymm7,%ymm1,%ymm7
  3886. # qhasm: v11 = x5 & mask1
  3887. # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
  3888. # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
  3889. vpand %ymm11,%ymm1,%ymm11
  3890. # qhasm: 4x v01 unsigned>>= 4
  3891. # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8
  3892. # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7
  3893. vpsrlq $4,%ymm7,%ymm7
  3894. # qhasm: x1 = v00 | v10
  3895. # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
  3896. # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
  3897. vpor %ymm10,%ymm15,%ymm10
  3898. # qhasm: x5 = v01 | v11
  3899. # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
  3900. # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
  3901. vpor %ymm7,%ymm11,%ymm7
  3902. # qhasm: v00 = x2 & mask0
  3903. # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
  3904. # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
  3905. vpand %ymm8,%ymm0,%ymm11
  3906. # qhasm: v10 = x6 & mask0
  3907. # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16
  3908. # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15
  3909. vpand %ymm12,%ymm0,%ymm15
  3910. # qhasm: 4x v10 <<= 4
  3911. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  3912. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  3913. vpsllq $4,%ymm15,%ymm15
  3914. # qhasm: v01 = x2 & mask1
  3915. # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9
  3916. # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8
  3917. vpand %ymm8,%ymm1,%ymm8
  3918. # qhasm: v11 = x6 & mask1
  3919. # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
  3920. # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
  3921. vpand %ymm12,%ymm1,%ymm12
  3922. # qhasm: 4x v01 unsigned>>= 4
  3923. # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9
  3924. # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8
  3925. vpsrlq $4,%ymm8,%ymm8
  3926. # qhasm: x2 = v00 | v10
  3927. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
  3928. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
  3929. vpor %ymm11,%ymm15,%ymm11
  3930. # qhasm: x6 = v01 | v11
  3931. # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
  3932. # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
  3933. vpor %ymm8,%ymm12,%ymm8
  3934. # qhasm: v00 = x3 & mask0
  3935. # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
  3936. # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
  3937. vpand %ymm9,%ymm0,%ymm12
  3938. # qhasm: v10 = x7 & mask0
  3939. # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16
  3940. # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15
  3941. vpand %ymm13,%ymm0,%ymm15
  3942. # qhasm: 4x v10 <<= 4
  3943. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  3944. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  3945. vpsllq $4,%ymm15,%ymm15
  3946. # qhasm: v01 = x3 & mask1
  3947. # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10
  3948. # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9
  3949. vpand %ymm9,%ymm1,%ymm9
  3950. # qhasm: v11 = x7 & mask1
  3951. # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
  3952. # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
  3953. vpand %ymm13,%ymm1,%ymm13
  3954. # qhasm: 4x v01 unsigned>>= 4
  3955. # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10
  3956. # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9
  3957. vpsrlq $4,%ymm9,%ymm9
  3958. # qhasm: x3 = v00 | v10
  3959. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
  3960. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
  3961. vpor %ymm12,%ymm15,%ymm12
  3962. # qhasm: x7 = v01 | v11
  3963. # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
  3964. # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
  3965. vpor %ymm9,%ymm13,%ymm9
  3966. # qhasm: v00 = x0 & mask2
  3967. # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
  3968. # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
  3969. vpand %ymm14,%ymm2,%ymm13
  3970. # qhasm: v10 = x2 & mask2
  3971. # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16
  3972. # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15
  3973. vpand %ymm11,%ymm2,%ymm15
  3974. # qhasm: 4x v10 <<= 2
  3975. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  3976. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  3977. vpsllq $2,%ymm15,%ymm15
  3978. # qhasm: v01 = x0 & mask3
  3979. # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15
  3980. # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14
  3981. vpand %ymm14,%ymm3,%ymm14
  3982. # qhasm: v11 = x2 & mask3
  3983. # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
  3984. # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
  3985. vpand %ymm11,%ymm3,%ymm11
  3986. # qhasm: 4x v01 unsigned>>= 2
  3987. # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15
  3988. # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14
  3989. vpsrlq $2,%ymm14,%ymm14
  3990. # qhasm: x0 = v00 | v10
  3991. # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
  3992. # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
  3993. vpor %ymm13,%ymm15,%ymm13
  3994. # qhasm: x2 = v01 | v11
  3995. # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
  3996. # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
  3997. vpor %ymm14,%ymm11,%ymm11
  3998. # qhasm: v00 = x1 & mask2
  3999. # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
  4000. # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
  4001. vpand %ymm10,%ymm2,%ymm14
  4002. # qhasm: v10 = x3 & mask2
  4003. # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16
  4004. # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15
  4005. vpand %ymm12,%ymm2,%ymm15
  4006. # qhasm: 4x v10 <<= 2
  4007. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  4008. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  4009. vpsllq $2,%ymm15,%ymm15
  4010. # qhasm: v01 = x1 & mask3
  4011. # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11
  4012. # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10
  4013. vpand %ymm10,%ymm3,%ymm10
  4014. # qhasm: v11 = x3 & mask3
  4015. # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
  4016. # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
  4017. vpand %ymm12,%ymm3,%ymm12
  4018. # qhasm: 4x v01 unsigned>>= 2
  4019. # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11
  4020. # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10
  4021. vpsrlq $2,%ymm10,%ymm10
  4022. # qhasm: x1 = v00 | v10
  4023. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
  4024. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
  4025. vpor %ymm14,%ymm15,%ymm14
  4026. # qhasm: x3 = v01 | v11
  4027. # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
  4028. # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
  4029. vpor %ymm10,%ymm12,%ymm10
  4030. # qhasm: v00 = x4 & mask2
  4031. # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
  4032. # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
  4033. vpand %ymm6,%ymm2,%ymm12
  4034. # qhasm: v10 = x6 & mask2
  4035. # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16
  4036. # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15
  4037. vpand %ymm8,%ymm2,%ymm15
  4038. # qhasm: 4x v10 <<= 2
  4039. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  4040. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  4041. vpsllq $2,%ymm15,%ymm15
  4042. # qhasm: v01 = x4 & mask3
  4043. # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7
  4044. # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6
  4045. vpand %ymm6,%ymm3,%ymm6
  4046. # qhasm: v11 = x6 & mask3
  4047. # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
  4048. # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
  4049. vpand %ymm8,%ymm3,%ymm8
  4050. # qhasm: 4x v01 unsigned>>= 2
  4051. # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7
  4052. # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6
  4053. vpsrlq $2,%ymm6,%ymm6
  4054. # qhasm: x4 = v00 | v10
  4055. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
  4056. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
  4057. vpor %ymm12,%ymm15,%ymm12
  4058. # qhasm: x6 = v01 | v11
  4059. # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
  4060. # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
  4061. vpor %ymm6,%ymm8,%ymm6
  4062. # qhasm: v00 = x5 & mask2
  4063. # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
  4064. # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
  4065. vpand %ymm7,%ymm2,%ymm8
  4066. # qhasm: v10 = x7 & mask2
  4067. # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16
  4068. # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15
  4069. vpand %ymm9,%ymm2,%ymm15
  4070. # qhasm: 4x v10 <<= 2
  4071. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  4072. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  4073. vpsllq $2,%ymm15,%ymm15
  4074. # qhasm: v01 = x5 & mask3
  4075. # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8
  4076. # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7
  4077. vpand %ymm7,%ymm3,%ymm7
  4078. # qhasm: v11 = x7 & mask3
  4079. # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
  4080. # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
  4081. vpand %ymm9,%ymm3,%ymm9
  4082. # qhasm: 4x v01 unsigned>>= 2
  4083. # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8
  4084. # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7
  4085. vpsrlq $2,%ymm7,%ymm7
  4086. # qhasm: x5 = v00 | v10
  4087. # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
  4088. # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
  4089. vpor %ymm8,%ymm15,%ymm8
  4090. # qhasm: x7 = v01 | v11
  4091. # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
  4092. # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
  4093. vpor %ymm7,%ymm9,%ymm7
  4094. # qhasm: v00 = x0 & mask4
  4095. # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
  4096. # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
  4097. vpand %ymm13,%ymm4,%ymm9
  4098. # qhasm: v10 = x1 & mask4
  4099. # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16
  4100. # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15
  4101. vpand %ymm14,%ymm4,%ymm15
  4102. # qhasm: 4x v10 <<= 1
  4103. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  4104. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  4105. vpsllq $1,%ymm15,%ymm15
  4106. # qhasm: v01 = x0 & mask5
  4107. # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14
  4108. # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13
  4109. vpand %ymm13,%ymm5,%ymm13
  4110. # qhasm: v11 = x1 & mask5
  4111. # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
  4112. # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
  4113. vpand %ymm14,%ymm5,%ymm14
  4114. # qhasm: 4x v01 unsigned>>= 1
  4115. # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14
  4116. # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13
  4117. vpsrlq $1,%ymm13,%ymm13
  4118. # qhasm: x0 = v00 | v10
  4119. # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
  4120. # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
  4121. vpor %ymm9,%ymm15,%ymm9
  4122. # qhasm: x1 = v01 | v11
  4123. # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
  4124. # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
  4125. vpor %ymm13,%ymm14,%ymm13
  4126. # qhasm: v00 = x2 & mask4
  4127. # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
  4128. # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
  4129. vpand %ymm11,%ymm4,%ymm14
  4130. # qhasm: v10 = x3 & mask4
  4131. # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16
  4132. # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15
  4133. vpand %ymm10,%ymm4,%ymm15
  4134. # qhasm: 4x v10 <<= 1
  4135. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  4136. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  4137. vpsllq $1,%ymm15,%ymm15
  4138. # qhasm: v01 = x2 & mask5
  4139. # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12
  4140. # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11
  4141. vpand %ymm11,%ymm5,%ymm11
  4142. # qhasm: v11 = x3 & mask5
  4143. # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
  4144. # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
  4145. vpand %ymm10,%ymm5,%ymm10
  4146. # qhasm: 4x v01 unsigned>>= 1
  4147. # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12
  4148. # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11
  4149. vpsrlq $1,%ymm11,%ymm11
  4150. # qhasm: x2 = v00 | v10
  4151. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
  4152. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
  4153. vpor %ymm14,%ymm15,%ymm14
  4154. # qhasm: x3 = v01 | v11
  4155. # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
  4156. # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
  4157. vpor %ymm11,%ymm10,%ymm10
  4158. # qhasm: v00 = x4 & mask4
  4159. # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
  4160. # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
  4161. vpand %ymm12,%ymm4,%ymm11
  4162. # qhasm: v10 = x5 & mask4
  4163. # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16
  4164. # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15
  4165. vpand %ymm8,%ymm4,%ymm15
  4166. # qhasm: 4x v10 <<= 1
  4167. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  4168. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  4169. vpsllq $1,%ymm15,%ymm15
  4170. # qhasm: v01 = x4 & mask5
  4171. # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13
  4172. # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12
  4173. vpand %ymm12,%ymm5,%ymm12
  4174. # qhasm: v11 = x5 & mask5
  4175. # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
  4176. # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
  4177. vpand %ymm8,%ymm5,%ymm8
  4178. # qhasm: 4x v01 unsigned>>= 1
  4179. # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13
  4180. # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12
  4181. vpsrlq $1,%ymm12,%ymm12
  4182. # qhasm: x4 = v00 | v10
  4183. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
  4184. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
  4185. vpor %ymm11,%ymm15,%ymm11
  4186. # qhasm: x5 = v01 | v11
  4187. # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
  4188. # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
  4189. vpor %ymm12,%ymm8,%ymm8
  4190. # qhasm: v00 = x6 & mask4
  4191. # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
  4192. # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
  4193. vpand %ymm6,%ymm4,%ymm12
  4194. # qhasm: v10 = x7 & mask4
  4195. # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16
  4196. # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15
  4197. vpand %ymm7,%ymm4,%ymm15
  4198. # qhasm: 4x v10 <<= 1
  4199. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  4200. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  4201. vpsllq $1,%ymm15,%ymm15
  4202. # qhasm: v01 = x6 & mask5
  4203. # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7
  4204. # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6
  4205. vpand %ymm6,%ymm5,%ymm6
  4206. # qhasm: v11 = x7 & mask5
  4207. # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
  4208. # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
  4209. vpand %ymm7,%ymm5,%ymm7
  4210. # qhasm: 4x v01 unsigned>>= 1
  4211. # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7
  4212. # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6
  4213. vpsrlq $1,%ymm6,%ymm6
  4214. # qhasm: x6 = v00 | v10
  4215. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
  4216. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
  4217. vpor %ymm12,%ymm15,%ymm12
  4218. # qhasm: x7 = v01 | v11
  4219. # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
  4220. # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
  4221. vpor %ymm6,%ymm7,%ymm6
  4222. # qhasm: mem256[ input_0 + 512 ] = x0
  4223. # asm 1: vmovupd <x0=reg256#10,512(<input_0=int64#1)
  4224. # asm 2: vmovupd <x0=%ymm9,512(<input_0=%rdi)
  4225. vmovupd %ymm9,512(%rdi)
  4226. # qhasm: mem256[ input_0 + 544 ] = x1
  4227. # asm 1: vmovupd <x1=reg256#14,544(<input_0=int64#1)
  4228. # asm 2: vmovupd <x1=%ymm13,544(<input_0=%rdi)
  4229. vmovupd %ymm13,544(%rdi)
  4230. # qhasm: mem256[ input_0 + 576 ] = x2
  4231. # asm 1: vmovupd <x2=reg256#15,576(<input_0=int64#1)
  4232. # asm 2: vmovupd <x2=%ymm14,576(<input_0=%rdi)
  4233. vmovupd %ymm14,576(%rdi)
  4234. # qhasm: mem256[ input_0 + 608 ] = x3
  4235. # asm 1: vmovupd <x3=reg256#11,608(<input_0=int64#1)
  4236. # asm 2: vmovupd <x3=%ymm10,608(<input_0=%rdi)
  4237. vmovupd %ymm10,608(%rdi)
  4238. # qhasm: mem256[ input_0 + 640 ] = x4
  4239. # asm 1: vmovupd <x4=reg256#12,640(<input_0=int64#1)
  4240. # asm 2: vmovupd <x4=%ymm11,640(<input_0=%rdi)
  4241. vmovupd %ymm11,640(%rdi)
  4242. # qhasm: mem256[ input_0 + 672 ] = x5
  4243. # asm 1: vmovupd <x5=reg256#9,672(<input_0=int64#1)
  4244. # asm 2: vmovupd <x5=%ymm8,672(<input_0=%rdi)
  4245. vmovupd %ymm8,672(%rdi)
  4246. # qhasm: mem256[ input_0 + 704 ] = x6
  4247. # asm 1: vmovupd <x6=reg256#13,704(<input_0=int64#1)
  4248. # asm 2: vmovupd <x6=%ymm12,704(<input_0=%rdi)
  4249. vmovupd %ymm12,704(%rdi)
  4250. # qhasm: mem256[ input_0 + 736 ] = x7
  4251. # asm 1: vmovupd <x7=reg256#7,736(<input_0=int64#1)
  4252. # asm 2: vmovupd <x7=%ymm6,736(<input_0=%rdi)
  4253. vmovupd %ymm6,736(%rdi)
  4254. # qhasm: x0 = mem256[ input_0 + 768 ]
  4255. # asm 1: vmovupd 768(<input_0=int64#1),>x0=reg256#7
  4256. # asm 2: vmovupd 768(<input_0=%rdi),>x0=%ymm6
  4257. vmovupd 768(%rdi),%ymm6
  4258. # qhasm: x1 = mem256[ input_0 + 800 ]
  4259. # asm 1: vmovupd 800(<input_0=int64#1),>x1=reg256#8
  4260. # asm 2: vmovupd 800(<input_0=%rdi),>x1=%ymm7
  4261. vmovupd 800(%rdi),%ymm7
  4262. # qhasm: x2 = mem256[ input_0 + 832 ]
  4263. # asm 1: vmovupd 832(<input_0=int64#1),>x2=reg256#9
  4264. # asm 2: vmovupd 832(<input_0=%rdi),>x2=%ymm8
  4265. vmovupd 832(%rdi),%ymm8
  4266. # qhasm: x3 = mem256[ input_0 + 864 ]
  4267. # asm 1: vmovupd 864(<input_0=int64#1),>x3=reg256#10
  4268. # asm 2: vmovupd 864(<input_0=%rdi),>x3=%ymm9
  4269. vmovupd 864(%rdi),%ymm9
  4270. # qhasm: x4 = mem256[ input_0 + 896 ]
  4271. # asm 1: vmovupd 896(<input_0=int64#1),>x4=reg256#11
  4272. # asm 2: vmovupd 896(<input_0=%rdi),>x4=%ymm10
  4273. vmovupd 896(%rdi),%ymm10
  4274. # qhasm: x5 = mem256[ input_0 + 928 ]
  4275. # asm 1: vmovupd 928(<input_0=int64#1),>x5=reg256#12
  4276. # asm 2: vmovupd 928(<input_0=%rdi),>x5=%ymm11
  4277. vmovupd 928(%rdi),%ymm11
  4278. # qhasm: x6 = mem256[ input_0 + 960 ]
  4279. # asm 1: vmovupd 960(<input_0=int64#1),>x6=reg256#13
  4280. # asm 2: vmovupd 960(<input_0=%rdi),>x6=%ymm12
  4281. vmovupd 960(%rdi),%ymm12
  4282. # qhasm: x7 = mem256[ input_0 + 992 ]
  4283. # asm 1: vmovupd 992(<input_0=int64#1),>x7=reg256#14
  4284. # asm 2: vmovupd 992(<input_0=%rdi),>x7=%ymm13
  4285. vmovupd 992(%rdi),%ymm13
  4286. # qhasm: v00 = x0 & mask0
  4287. # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
  4288. # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
  4289. vpand %ymm6,%ymm0,%ymm14
  4290. # qhasm: v10 = x4 & mask0
  4291. # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16
  4292. # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15
  4293. vpand %ymm10,%ymm0,%ymm15
  4294. # qhasm: 4x v10 <<= 4
  4295. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  4296. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  4297. vpsllq $4,%ymm15,%ymm15
  4298. # qhasm: v01 = x0 & mask1
  4299. # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7
  4300. # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6
  4301. vpand %ymm6,%ymm1,%ymm6
  4302. # qhasm: v11 = x4 & mask1
  4303. # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
  4304. # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
  4305. vpand %ymm10,%ymm1,%ymm10
  4306. # qhasm: 4x v01 unsigned>>= 4
  4307. # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7
  4308. # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6
  4309. vpsrlq $4,%ymm6,%ymm6
  4310. # qhasm: x0 = v00 | v10
  4311. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
  4312. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
  4313. vpor %ymm14,%ymm15,%ymm14
  4314. # qhasm: x4 = v01 | v11
  4315. # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
  4316. # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
  4317. vpor %ymm6,%ymm10,%ymm6
  4318. # qhasm: v00 = x1 & mask0
  4319. # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
  4320. # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
  4321. vpand %ymm7,%ymm0,%ymm10
  4322. # qhasm: v10 = x5 & mask0
  4323. # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16
  4324. # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15
  4325. vpand %ymm11,%ymm0,%ymm15
  4326. # qhasm: 4x v10 <<= 4
  4327. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  4328. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  4329. vpsllq $4,%ymm15,%ymm15
  4330. # qhasm: v01 = x1 & mask1
  4331. # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8
  4332. # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7
  4333. vpand %ymm7,%ymm1,%ymm7
  4334. # qhasm: v11 = x5 & mask1
  4335. # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
  4336. # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
  4337. vpand %ymm11,%ymm1,%ymm11
  4338. # qhasm: 4x v01 unsigned>>= 4
  4339. # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8
  4340. # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7
  4341. vpsrlq $4,%ymm7,%ymm7
  4342. # qhasm: x1 = v00 | v10
  4343. # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
  4344. # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
  4345. vpor %ymm10,%ymm15,%ymm10
  4346. # qhasm: x5 = v01 | v11
  4347. # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
  4348. # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
  4349. vpor %ymm7,%ymm11,%ymm7
  4350. # qhasm: v00 = x2 & mask0
  4351. # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
  4352. # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
  4353. vpand %ymm8,%ymm0,%ymm11
  4354. # qhasm: v10 = x6 & mask0
  4355. # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16
  4356. # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15
  4357. vpand %ymm12,%ymm0,%ymm15
  4358. # qhasm: 4x v10 <<= 4
  4359. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  4360. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  4361. vpsllq $4,%ymm15,%ymm15
  4362. # qhasm: v01 = x2 & mask1
  4363. # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9
  4364. # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8
  4365. vpand %ymm8,%ymm1,%ymm8
  4366. # qhasm: v11 = x6 & mask1
  4367. # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
  4368. # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
  4369. vpand %ymm12,%ymm1,%ymm12
  4370. # qhasm: 4x v01 unsigned>>= 4
  4371. # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9
  4372. # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8
  4373. vpsrlq $4,%ymm8,%ymm8
  4374. # qhasm: x2 = v00 | v10
  4375. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
  4376. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
  4377. vpor %ymm11,%ymm15,%ymm11
  4378. # qhasm: x6 = v01 | v11
  4379. # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
  4380. # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
  4381. vpor %ymm8,%ymm12,%ymm8
  4382. # qhasm: v00 = x3 & mask0
  4383. # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
  4384. # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
  4385. vpand %ymm9,%ymm0,%ymm12
  4386. # qhasm: v10 = x7 & mask0
  4387. # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16
  4388. # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15
  4389. vpand %ymm13,%ymm0,%ymm15
  4390. # qhasm: 4x v10 <<= 4
  4391. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  4392. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  4393. vpsllq $4,%ymm15,%ymm15
  4394. # qhasm: v01 = x3 & mask1
  4395. # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10
  4396. # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9
  4397. vpand %ymm9,%ymm1,%ymm9
  4398. # qhasm: v11 = x7 & mask1
  4399. # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
  4400. # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
  4401. vpand %ymm13,%ymm1,%ymm13
  4402. # qhasm: 4x v01 unsigned>>= 4
  4403. # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10
  4404. # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9
  4405. vpsrlq $4,%ymm9,%ymm9
  4406. # qhasm: x3 = v00 | v10
  4407. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
  4408. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
  4409. vpor %ymm12,%ymm15,%ymm12
  4410. # qhasm: x7 = v01 | v11
  4411. # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
  4412. # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
  4413. vpor %ymm9,%ymm13,%ymm9
  4414. # qhasm: v00 = x0 & mask2
  4415. # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
  4416. # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
  4417. vpand %ymm14,%ymm2,%ymm13
  4418. # qhasm: v10 = x2 & mask2
  4419. # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16
  4420. # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15
  4421. vpand %ymm11,%ymm2,%ymm15
  4422. # qhasm: 4x v10 <<= 2
  4423. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  4424. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  4425. vpsllq $2,%ymm15,%ymm15
  4426. # qhasm: v01 = x0 & mask3
  4427. # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15
  4428. # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14
  4429. vpand %ymm14,%ymm3,%ymm14
  4430. # qhasm: v11 = x2 & mask3
  4431. # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
  4432. # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
  4433. vpand %ymm11,%ymm3,%ymm11
  4434. # qhasm: 4x v01 unsigned>>= 2
  4435. # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15
  4436. # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14
  4437. vpsrlq $2,%ymm14,%ymm14
  4438. # qhasm: x0 = v00 | v10
  4439. # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
  4440. # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
  4441. vpor %ymm13,%ymm15,%ymm13
  4442. # qhasm: x2 = v01 | v11
  4443. # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
  4444. # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
  4445. vpor %ymm14,%ymm11,%ymm11
  4446. # qhasm: v00 = x1 & mask2
  4447. # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
  4448. # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
  4449. vpand %ymm10,%ymm2,%ymm14
  4450. # qhasm: v10 = x3 & mask2
  4451. # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16
  4452. # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15
  4453. vpand %ymm12,%ymm2,%ymm15
  4454. # qhasm: 4x v10 <<= 2
  4455. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  4456. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  4457. vpsllq $2,%ymm15,%ymm15
  4458. # qhasm: v01 = x1 & mask3
  4459. # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11
  4460. # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10
  4461. vpand %ymm10,%ymm3,%ymm10
  4462. # qhasm: v11 = x3 & mask3
  4463. # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
  4464. # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
  4465. vpand %ymm12,%ymm3,%ymm12
  4466. # qhasm: 4x v01 unsigned>>= 2
  4467. # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11
  4468. # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10
  4469. vpsrlq $2,%ymm10,%ymm10
  4470. # qhasm: x1 = v00 | v10
  4471. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
  4472. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
  4473. vpor %ymm14,%ymm15,%ymm14
  4474. # qhasm: x3 = v01 | v11
  4475. # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
  4476. # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
  4477. vpor %ymm10,%ymm12,%ymm10
  4478. # qhasm: v00 = x4 & mask2
  4479. # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
  4480. # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
  4481. vpand %ymm6,%ymm2,%ymm12
  4482. # qhasm: v10 = x6 & mask2
  4483. # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16
  4484. # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15
  4485. vpand %ymm8,%ymm2,%ymm15
  4486. # qhasm: 4x v10 <<= 2
  4487. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  4488. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  4489. vpsllq $2,%ymm15,%ymm15
  4490. # qhasm: v01 = x4 & mask3
  4491. # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7
  4492. # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6
  4493. vpand %ymm6,%ymm3,%ymm6
  4494. # qhasm: v11 = x6 & mask3
  4495. # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
  4496. # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
  4497. vpand %ymm8,%ymm3,%ymm8
  4498. # qhasm: 4x v01 unsigned>>= 2
  4499. # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7
  4500. # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6
  4501. vpsrlq $2,%ymm6,%ymm6
  4502. # qhasm: x4 = v00 | v10
  4503. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
  4504. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
  4505. vpor %ymm12,%ymm15,%ymm12
  4506. # qhasm: x6 = v01 | v11
  4507. # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
  4508. # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
  4509. vpor %ymm6,%ymm8,%ymm6
  4510. # qhasm: v00 = x5 & mask2
  4511. # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
  4512. # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
  4513. vpand %ymm7,%ymm2,%ymm8
  4514. # qhasm: v10 = x7 & mask2
  4515. # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16
  4516. # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15
  4517. vpand %ymm9,%ymm2,%ymm15
  4518. # qhasm: 4x v10 <<= 2
  4519. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  4520. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  4521. vpsllq $2,%ymm15,%ymm15
  4522. # qhasm: v01 = x5 & mask3
  4523. # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8
  4524. # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7
  4525. vpand %ymm7,%ymm3,%ymm7
  4526. # qhasm: v11 = x7 & mask3
  4527. # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
  4528. # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
  4529. vpand %ymm9,%ymm3,%ymm9
  4530. # qhasm: 4x v01 unsigned>>= 2
  4531. # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8
  4532. # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7
  4533. vpsrlq $2,%ymm7,%ymm7
  4534. # qhasm: x5 = v00 | v10
  4535. # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
  4536. # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
  4537. vpor %ymm8,%ymm15,%ymm8
  4538. # qhasm: x7 = v01 | v11
  4539. # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
  4540. # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
  4541. vpor %ymm7,%ymm9,%ymm7
  4542. # qhasm: v00 = x0 & mask4
  4543. # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
  4544. # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
  4545. vpand %ymm13,%ymm4,%ymm9
  4546. # qhasm: v10 = x1 & mask4
  4547. # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16
  4548. # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15
  4549. vpand %ymm14,%ymm4,%ymm15
  4550. # qhasm: 4x v10 <<= 1
  4551. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  4552. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  4553. vpsllq $1,%ymm15,%ymm15
  4554. # qhasm: v01 = x0 & mask5
  4555. # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14
  4556. # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13
  4557. vpand %ymm13,%ymm5,%ymm13
  4558. # qhasm: v11 = x1 & mask5
  4559. # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
  4560. # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
  4561. vpand %ymm14,%ymm5,%ymm14
  4562. # qhasm: 4x v01 unsigned>>= 1
  4563. # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14
  4564. # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13
  4565. vpsrlq $1,%ymm13,%ymm13
  4566. # qhasm: x0 = v00 | v10
  4567. # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
  4568. # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
  4569. vpor %ymm9,%ymm15,%ymm9
  4570. # qhasm: x1 = v01 | v11
  4571. # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
  4572. # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
  4573. vpor %ymm13,%ymm14,%ymm13
  4574. # qhasm: v00 = x2 & mask4
  4575. # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
  4576. # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
  4577. vpand %ymm11,%ymm4,%ymm14
  4578. # qhasm: v10 = x3 & mask4
  4579. # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16
  4580. # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15
  4581. vpand %ymm10,%ymm4,%ymm15
  4582. # qhasm: 4x v10 <<= 1
  4583. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  4584. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  4585. vpsllq $1,%ymm15,%ymm15
  4586. # qhasm: v01 = x2 & mask5
  4587. # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12
  4588. # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11
  4589. vpand %ymm11,%ymm5,%ymm11
  4590. # qhasm: v11 = x3 & mask5
  4591. # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
  4592. # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
  4593. vpand %ymm10,%ymm5,%ymm10
  4594. # qhasm: 4x v01 unsigned>>= 1
  4595. # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12
  4596. # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11
  4597. vpsrlq $1,%ymm11,%ymm11
  4598. # qhasm: x2 = v00 | v10
  4599. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
  4600. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
  4601. vpor %ymm14,%ymm15,%ymm14
  4602. # qhasm: x3 = v01 | v11
  4603. # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
  4604. # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
  4605. vpor %ymm11,%ymm10,%ymm10
  4606. # qhasm: v00 = x4 & mask4
  4607. # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
  4608. # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
  4609. vpand %ymm12,%ymm4,%ymm11
  4610. # qhasm: v10 = x5 & mask4
  4611. # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16
  4612. # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15
  4613. vpand %ymm8,%ymm4,%ymm15
  4614. # qhasm: 4x v10 <<= 1
  4615. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  4616. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  4617. vpsllq $1,%ymm15,%ymm15
  4618. # qhasm: v01 = x4 & mask5
  4619. # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13
  4620. # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12
  4621. vpand %ymm12,%ymm5,%ymm12
  4622. # qhasm: v11 = x5 & mask5
  4623. # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
  4624. # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
  4625. vpand %ymm8,%ymm5,%ymm8
  4626. # qhasm: 4x v01 unsigned>>= 1
  4627. # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13
  4628. # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12
  4629. vpsrlq $1,%ymm12,%ymm12
  4630. # qhasm: x4 = v00 | v10
  4631. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
  4632. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
  4633. vpor %ymm11,%ymm15,%ymm11
  4634. # qhasm: x5 = v01 | v11
  4635. # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
  4636. # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
  4637. vpor %ymm12,%ymm8,%ymm8
  4638. # qhasm: v00 = x6 & mask4
  4639. # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
  4640. # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
  4641. vpand %ymm6,%ymm4,%ymm12
  4642. # qhasm: v10 = x7 & mask4
  4643. # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16
  4644. # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15
  4645. vpand %ymm7,%ymm4,%ymm15
  4646. # qhasm: 4x v10 <<= 1
  4647. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  4648. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  4649. vpsllq $1,%ymm15,%ymm15
  4650. # qhasm: v01 = x6 & mask5
  4651. # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7
  4652. # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6
  4653. vpand %ymm6,%ymm5,%ymm6
  4654. # qhasm: v11 = x7 & mask5
  4655. # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
  4656. # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
  4657. vpand %ymm7,%ymm5,%ymm7
  4658. # qhasm: 4x v01 unsigned>>= 1
  4659. # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7
  4660. # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6
  4661. vpsrlq $1,%ymm6,%ymm6
  4662. # qhasm: x6 = v00 | v10
  4663. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
  4664. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
  4665. vpor %ymm12,%ymm15,%ymm12
  4666. # qhasm: x7 = v01 | v11
  4667. # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
  4668. # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
  4669. vpor %ymm6,%ymm7,%ymm6
  4670. # qhasm: mem256[ input_0 + 768 ] = x0
  4671. # asm 1: vmovupd <x0=reg256#10,768(<input_0=int64#1)
  4672. # asm 2: vmovupd <x0=%ymm9,768(<input_0=%rdi)
  4673. vmovupd %ymm9,768(%rdi)
  4674. # qhasm: mem256[ input_0 + 800 ] = x1
  4675. # asm 1: vmovupd <x1=reg256#14,800(<input_0=int64#1)
  4676. # asm 2: vmovupd <x1=%ymm13,800(<input_0=%rdi)
  4677. vmovupd %ymm13,800(%rdi)
  4678. # qhasm: mem256[ input_0 + 832 ] = x2
  4679. # asm 1: vmovupd <x2=reg256#15,832(<input_0=int64#1)
  4680. # asm 2: vmovupd <x2=%ymm14,832(<input_0=%rdi)
  4681. vmovupd %ymm14,832(%rdi)
  4682. # qhasm: mem256[ input_0 + 864 ] = x3
  4683. # asm 1: vmovupd <x3=reg256#11,864(<input_0=int64#1)
  4684. # asm 2: vmovupd <x3=%ymm10,864(<input_0=%rdi)
  4685. vmovupd %ymm10,864(%rdi)
  4686. # qhasm: mem256[ input_0 + 896 ] = x4
  4687. # asm 1: vmovupd <x4=reg256#12,896(<input_0=int64#1)
  4688. # asm 2: vmovupd <x4=%ymm11,896(<input_0=%rdi)
  4689. vmovupd %ymm11,896(%rdi)
  4690. # qhasm: mem256[ input_0 + 928 ] = x5
  4691. # asm 1: vmovupd <x5=reg256#9,928(<input_0=int64#1)
  4692. # asm 2: vmovupd <x5=%ymm8,928(<input_0=%rdi)
  4693. vmovupd %ymm8,928(%rdi)
  4694. # qhasm: mem256[ input_0 + 960 ] = x6
  4695. # asm 1: vmovupd <x6=reg256#13,960(<input_0=int64#1)
  4696. # asm 2: vmovupd <x6=%ymm12,960(<input_0=%rdi)
  4697. vmovupd %ymm12,960(%rdi)
  4698. # qhasm: mem256[ input_0 + 992 ] = x7
  4699. # asm 1: vmovupd <x7=reg256#7,992(<input_0=int64#1)
  4700. # asm 2: vmovupd <x7=%ymm6,992(<input_0=%rdi)
  4701. vmovupd %ymm6,992(%rdi)
  4702. # qhasm: x0 = mem256[ input_0 + 1024 ]
  4703. # asm 1: vmovupd 1024(<input_0=int64#1),>x0=reg256#7
  4704. # asm 2: vmovupd 1024(<input_0=%rdi),>x0=%ymm6
  4705. vmovupd 1024(%rdi),%ymm6
  4706. # qhasm: x1 = mem256[ input_0 + 1056 ]
  4707. # asm 1: vmovupd 1056(<input_0=int64#1),>x1=reg256#8
  4708. # asm 2: vmovupd 1056(<input_0=%rdi),>x1=%ymm7
  4709. vmovupd 1056(%rdi),%ymm7
  4710. # qhasm: x2 = mem256[ input_0 + 1088 ]
  4711. # asm 1: vmovupd 1088(<input_0=int64#1),>x2=reg256#9
  4712. # asm 2: vmovupd 1088(<input_0=%rdi),>x2=%ymm8
  4713. vmovupd 1088(%rdi),%ymm8
  4714. # qhasm: x3 = mem256[ input_0 + 1120 ]
  4715. # asm 1: vmovupd 1120(<input_0=int64#1),>x3=reg256#10
  4716. # asm 2: vmovupd 1120(<input_0=%rdi),>x3=%ymm9
  4717. vmovupd 1120(%rdi),%ymm9
  4718. # qhasm: x4 = mem256[ input_0 + 1152 ]
  4719. # asm 1: vmovupd 1152(<input_0=int64#1),>x4=reg256#11
  4720. # asm 2: vmovupd 1152(<input_0=%rdi),>x4=%ymm10
  4721. vmovupd 1152(%rdi),%ymm10
  4722. # qhasm: x5 = mem256[ input_0 + 1184 ]
  4723. # asm 1: vmovupd 1184(<input_0=int64#1),>x5=reg256#12
  4724. # asm 2: vmovupd 1184(<input_0=%rdi),>x5=%ymm11
  4725. vmovupd 1184(%rdi),%ymm11
  4726. # qhasm: x6 = mem256[ input_0 + 1216 ]
  4727. # asm 1: vmovupd 1216(<input_0=int64#1),>x6=reg256#13
  4728. # asm 2: vmovupd 1216(<input_0=%rdi),>x6=%ymm12
  4729. vmovupd 1216(%rdi),%ymm12
  4730. # qhasm: x7 = mem256[ input_0 + 1248 ]
  4731. # asm 1: vmovupd 1248(<input_0=int64#1),>x7=reg256#14
  4732. # asm 2: vmovupd 1248(<input_0=%rdi),>x7=%ymm13
  4733. vmovupd 1248(%rdi),%ymm13
  4734. # qhasm: v00 = x0 & mask0
  4735. # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
  4736. # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
  4737. vpand %ymm6,%ymm0,%ymm14
  4738. # qhasm: v10 = x4 & mask0
  4739. # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16
  4740. # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15
  4741. vpand %ymm10,%ymm0,%ymm15
  4742. # qhasm: 4x v10 <<= 4
  4743. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  4744. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  4745. vpsllq $4,%ymm15,%ymm15
  4746. # qhasm: v01 = x0 & mask1
  4747. # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7
  4748. # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6
  4749. vpand %ymm6,%ymm1,%ymm6
  4750. # qhasm: v11 = x4 & mask1
  4751. # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
  4752. # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
  4753. vpand %ymm10,%ymm1,%ymm10
  4754. # qhasm: 4x v01 unsigned>>= 4
  4755. # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7
  4756. # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6
  4757. vpsrlq $4,%ymm6,%ymm6
  4758. # qhasm: x0 = v00 | v10
  4759. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
  4760. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
  4761. vpor %ymm14,%ymm15,%ymm14
  4762. # qhasm: x4 = v01 | v11
  4763. # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
  4764. # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
  4765. vpor %ymm6,%ymm10,%ymm6
  4766. # qhasm: v00 = x1 & mask0
  4767. # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
  4768. # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
  4769. vpand %ymm7,%ymm0,%ymm10
  4770. # qhasm: v10 = x5 & mask0
  4771. # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16
  4772. # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15
  4773. vpand %ymm11,%ymm0,%ymm15
  4774. # qhasm: 4x v10 <<= 4
  4775. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  4776. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  4777. vpsllq $4,%ymm15,%ymm15
  4778. # qhasm: v01 = x1 & mask1
  4779. # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8
  4780. # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7
  4781. vpand %ymm7,%ymm1,%ymm7
  4782. # qhasm: v11 = x5 & mask1
  4783. # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
  4784. # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
  4785. vpand %ymm11,%ymm1,%ymm11
  4786. # qhasm: 4x v01 unsigned>>= 4
  4787. # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8
  4788. # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7
  4789. vpsrlq $4,%ymm7,%ymm7
  4790. # qhasm: x1 = v00 | v10
  4791. # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
  4792. # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
  4793. vpor %ymm10,%ymm15,%ymm10
  4794. # qhasm: x5 = v01 | v11
  4795. # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
  4796. # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
  4797. vpor %ymm7,%ymm11,%ymm7
  4798. # qhasm: v00 = x2 & mask0
  4799. # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
  4800. # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
  4801. vpand %ymm8,%ymm0,%ymm11
  4802. # qhasm: v10 = x6 & mask0
  4803. # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16
  4804. # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15
  4805. vpand %ymm12,%ymm0,%ymm15
  4806. # qhasm: 4x v10 <<= 4
  4807. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  4808. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  4809. vpsllq $4,%ymm15,%ymm15
  4810. # qhasm: v01 = x2 & mask1
  4811. # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9
  4812. # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8
  4813. vpand %ymm8,%ymm1,%ymm8
  4814. # qhasm: v11 = x6 & mask1
  4815. # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
  4816. # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
  4817. vpand %ymm12,%ymm1,%ymm12
  4818. # qhasm: 4x v01 unsigned>>= 4
  4819. # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9
  4820. # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8
  4821. vpsrlq $4,%ymm8,%ymm8
  4822. # qhasm: x2 = v00 | v10
  4823. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
  4824. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
  4825. vpor %ymm11,%ymm15,%ymm11
  4826. # qhasm: x6 = v01 | v11
  4827. # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
  4828. # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
  4829. vpor %ymm8,%ymm12,%ymm8
  4830. # qhasm: v00 = x3 & mask0
  4831. # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
  4832. # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
  4833. vpand %ymm9,%ymm0,%ymm12
  4834. # qhasm: v10 = x7 & mask0
  4835. # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16
  4836. # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15
  4837. vpand %ymm13,%ymm0,%ymm15
  4838. # qhasm: 4x v10 <<= 4
  4839. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  4840. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  4841. vpsllq $4,%ymm15,%ymm15
  4842. # qhasm: v01 = x3 & mask1
  4843. # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10
  4844. # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9
  4845. vpand %ymm9,%ymm1,%ymm9
  4846. # qhasm: v11 = x7 & mask1
  4847. # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
  4848. # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
  4849. vpand %ymm13,%ymm1,%ymm13
  4850. # qhasm: 4x v01 unsigned>>= 4
  4851. # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10
  4852. # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9
  4853. vpsrlq $4,%ymm9,%ymm9
  4854. # qhasm: x3 = v00 | v10
  4855. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
  4856. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
  4857. vpor %ymm12,%ymm15,%ymm12
  4858. # qhasm: x7 = v01 | v11
  4859. # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
  4860. # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
  4861. vpor %ymm9,%ymm13,%ymm9
  4862. # qhasm: v00 = x0 & mask2
  4863. # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
  4864. # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
  4865. vpand %ymm14,%ymm2,%ymm13
  4866. # qhasm: v10 = x2 & mask2
  4867. # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16
  4868. # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15
  4869. vpand %ymm11,%ymm2,%ymm15
  4870. # qhasm: 4x v10 <<= 2
  4871. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  4872. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  4873. vpsllq $2,%ymm15,%ymm15
  4874. # qhasm: v01 = x0 & mask3
  4875. # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15
  4876. # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14
  4877. vpand %ymm14,%ymm3,%ymm14
  4878. # qhasm: v11 = x2 & mask3
  4879. # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
  4880. # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
  4881. vpand %ymm11,%ymm3,%ymm11
  4882. # qhasm: 4x v01 unsigned>>= 2
  4883. # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15
  4884. # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14
  4885. vpsrlq $2,%ymm14,%ymm14
  4886. # qhasm: x0 = v00 | v10
  4887. # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
  4888. # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
  4889. vpor %ymm13,%ymm15,%ymm13
  4890. # qhasm: x2 = v01 | v11
  4891. # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
  4892. # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
  4893. vpor %ymm14,%ymm11,%ymm11
  4894. # qhasm: v00 = x1 & mask2
  4895. # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
  4896. # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
  4897. vpand %ymm10,%ymm2,%ymm14
  4898. # qhasm: v10 = x3 & mask2
  4899. # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16
  4900. # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15
  4901. vpand %ymm12,%ymm2,%ymm15
  4902. # qhasm: 4x v10 <<= 2
  4903. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  4904. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  4905. vpsllq $2,%ymm15,%ymm15
  4906. # qhasm: v01 = x1 & mask3
  4907. # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11
  4908. # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10
  4909. vpand %ymm10,%ymm3,%ymm10
  4910. # qhasm: v11 = x3 & mask3
  4911. # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
  4912. # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
  4913. vpand %ymm12,%ymm3,%ymm12
  4914. # qhasm: 4x v01 unsigned>>= 2
  4915. # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11
  4916. # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10
  4917. vpsrlq $2,%ymm10,%ymm10
  4918. # qhasm: x1 = v00 | v10
  4919. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
  4920. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
  4921. vpor %ymm14,%ymm15,%ymm14
  4922. # qhasm: x3 = v01 | v11
  4923. # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
  4924. # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
  4925. vpor %ymm10,%ymm12,%ymm10
  4926. # qhasm: v00 = x4 & mask2
  4927. # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
  4928. # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
  4929. vpand %ymm6,%ymm2,%ymm12
  4930. # qhasm: v10 = x6 & mask2
  4931. # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16
  4932. # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15
  4933. vpand %ymm8,%ymm2,%ymm15
  4934. # qhasm: 4x v10 <<= 2
  4935. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  4936. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  4937. vpsllq $2,%ymm15,%ymm15
  4938. # qhasm: v01 = x4 & mask3
  4939. # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7
  4940. # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6
  4941. vpand %ymm6,%ymm3,%ymm6
  4942. # qhasm: v11 = x6 & mask3
  4943. # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
  4944. # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
  4945. vpand %ymm8,%ymm3,%ymm8
  4946. # qhasm: 4x v01 unsigned>>= 2
  4947. # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7
  4948. # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6
  4949. vpsrlq $2,%ymm6,%ymm6
  4950. # qhasm: x4 = v00 | v10
  4951. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
  4952. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
  4953. vpor %ymm12,%ymm15,%ymm12
  4954. # qhasm: x6 = v01 | v11
  4955. # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
  4956. # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
  4957. vpor %ymm6,%ymm8,%ymm6
  4958. # qhasm: v00 = x5 & mask2
  4959. # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
  4960. # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
  4961. vpand %ymm7,%ymm2,%ymm8
  4962. # qhasm: v10 = x7 & mask2
  4963. # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16
  4964. # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15
  4965. vpand %ymm9,%ymm2,%ymm15
  4966. # qhasm: 4x v10 <<= 2
  4967. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  4968. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  4969. vpsllq $2,%ymm15,%ymm15
  4970. # qhasm: v01 = x5 & mask3
  4971. # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8
  4972. # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7
  4973. vpand %ymm7,%ymm3,%ymm7
  4974. # qhasm: v11 = x7 & mask3
  4975. # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
  4976. # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
  4977. vpand %ymm9,%ymm3,%ymm9
  4978. # qhasm: 4x v01 unsigned>>= 2
  4979. # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8
  4980. # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7
  4981. vpsrlq $2,%ymm7,%ymm7
  4982. # qhasm: x5 = v00 | v10
  4983. # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
  4984. # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
  4985. vpor %ymm8,%ymm15,%ymm8
  4986. # qhasm: x7 = v01 | v11
  4987. # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
  4988. # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
  4989. vpor %ymm7,%ymm9,%ymm7
  4990. # qhasm: v00 = x0 & mask4
  4991. # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
  4992. # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
  4993. vpand %ymm13,%ymm4,%ymm9
  4994. # qhasm: v10 = x1 & mask4
  4995. # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16
  4996. # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15
  4997. vpand %ymm14,%ymm4,%ymm15
  4998. # qhasm: 4x v10 <<= 1
  4999. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  5000. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  5001. vpsllq $1,%ymm15,%ymm15
  5002. # qhasm: v01 = x0 & mask5
  5003. # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14
  5004. # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13
  5005. vpand %ymm13,%ymm5,%ymm13
  5006. # qhasm: v11 = x1 & mask5
  5007. # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
  5008. # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
  5009. vpand %ymm14,%ymm5,%ymm14
  5010. # qhasm: 4x v01 unsigned>>= 1
  5011. # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14
  5012. # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13
  5013. vpsrlq $1,%ymm13,%ymm13
  5014. # qhasm: x0 = v00 | v10
  5015. # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
  5016. # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
  5017. vpor %ymm9,%ymm15,%ymm9
  5018. # qhasm: x1 = v01 | v11
  5019. # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
  5020. # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
  5021. vpor %ymm13,%ymm14,%ymm13
  5022. # qhasm: v00 = x2 & mask4
  5023. # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
  5024. # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
  5025. vpand %ymm11,%ymm4,%ymm14
  5026. # qhasm: v10 = x3 & mask4
  5027. # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16
  5028. # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15
  5029. vpand %ymm10,%ymm4,%ymm15
  5030. # qhasm: 4x v10 <<= 1
  5031. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  5032. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  5033. vpsllq $1,%ymm15,%ymm15
  5034. # qhasm: v01 = x2 & mask5
  5035. # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12
  5036. # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11
  5037. vpand %ymm11,%ymm5,%ymm11
  5038. # qhasm: v11 = x3 & mask5
  5039. # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
  5040. # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
  5041. vpand %ymm10,%ymm5,%ymm10
  5042. # qhasm: 4x v01 unsigned>>= 1
  5043. # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12
  5044. # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11
  5045. vpsrlq $1,%ymm11,%ymm11
  5046. # qhasm: x2 = v00 | v10
  5047. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
  5048. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
  5049. vpor %ymm14,%ymm15,%ymm14
  5050. # qhasm: x3 = v01 | v11
  5051. # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
  5052. # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
  5053. vpor %ymm11,%ymm10,%ymm10
  5054. # qhasm: v00 = x4 & mask4
  5055. # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
  5056. # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
  5057. vpand %ymm12,%ymm4,%ymm11
  5058. # qhasm: v10 = x5 & mask4
  5059. # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16
  5060. # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15
  5061. vpand %ymm8,%ymm4,%ymm15
  5062. # qhasm: 4x v10 <<= 1
  5063. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  5064. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  5065. vpsllq $1,%ymm15,%ymm15
  5066. # qhasm: v01 = x4 & mask5
  5067. # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13
  5068. # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12
  5069. vpand %ymm12,%ymm5,%ymm12
  5070. # qhasm: v11 = x5 & mask5
  5071. # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
  5072. # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
  5073. vpand %ymm8,%ymm5,%ymm8
  5074. # qhasm: 4x v01 unsigned>>= 1
  5075. # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13
  5076. # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12
  5077. vpsrlq $1,%ymm12,%ymm12
  5078. # qhasm: x4 = v00 | v10
  5079. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
  5080. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
  5081. vpor %ymm11,%ymm15,%ymm11
  5082. # qhasm: x5 = v01 | v11
  5083. # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
  5084. # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
  5085. vpor %ymm12,%ymm8,%ymm8
  5086. # qhasm: v00 = x6 & mask4
  5087. # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
  5088. # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
  5089. vpand %ymm6,%ymm4,%ymm12
  5090. # qhasm: v10 = x7 & mask4
  5091. # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16
  5092. # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15
  5093. vpand %ymm7,%ymm4,%ymm15
  5094. # qhasm: 4x v10 <<= 1
  5095. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  5096. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  5097. vpsllq $1,%ymm15,%ymm15
  5098. # qhasm: v01 = x6 & mask5
  5099. # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7
  5100. # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6
  5101. vpand %ymm6,%ymm5,%ymm6
  5102. # qhasm: v11 = x7 & mask5
  5103. # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
  5104. # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
  5105. vpand %ymm7,%ymm5,%ymm7
  5106. # qhasm: 4x v01 unsigned>>= 1
  5107. # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7
  5108. # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6
  5109. vpsrlq $1,%ymm6,%ymm6
  5110. # qhasm: x6 = v00 | v10
  5111. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
  5112. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
  5113. vpor %ymm12,%ymm15,%ymm12
  5114. # qhasm: x7 = v01 | v11
  5115. # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
  5116. # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
  5117. vpor %ymm6,%ymm7,%ymm6
  5118. # qhasm: mem256[ input_0 + 1024 ] = x0
  5119. # asm 1: vmovupd <x0=reg256#10,1024(<input_0=int64#1)
  5120. # asm 2: vmovupd <x0=%ymm9,1024(<input_0=%rdi)
  5121. vmovupd %ymm9,1024(%rdi)
  5122. # qhasm: mem256[ input_0 + 1056 ] = x1
  5123. # asm 1: vmovupd <x1=reg256#14,1056(<input_0=int64#1)
  5124. # asm 2: vmovupd <x1=%ymm13,1056(<input_0=%rdi)
  5125. vmovupd %ymm13,1056(%rdi)
  5126. # qhasm: mem256[ input_0 + 1088 ] = x2
  5127. # asm 1: vmovupd <x2=reg256#15,1088(<input_0=int64#1)
  5128. # asm 2: vmovupd <x2=%ymm14,1088(<input_0=%rdi)
  5129. vmovupd %ymm14,1088(%rdi)
  5130. # qhasm: mem256[ input_0 + 1120 ] = x3
  5131. # asm 1: vmovupd <x3=reg256#11,1120(<input_0=int64#1)
  5132. # asm 2: vmovupd <x3=%ymm10,1120(<input_0=%rdi)
  5133. vmovupd %ymm10,1120(%rdi)
  5134. # qhasm: mem256[ input_0 + 1152 ] = x4
  5135. # asm 1: vmovupd <x4=reg256#12,1152(<input_0=int64#1)
  5136. # asm 2: vmovupd <x4=%ymm11,1152(<input_0=%rdi)
  5137. vmovupd %ymm11,1152(%rdi)
  5138. # qhasm: mem256[ input_0 + 1184 ] = x5
  5139. # asm 1: vmovupd <x5=reg256#9,1184(<input_0=int64#1)
  5140. # asm 2: vmovupd <x5=%ymm8,1184(<input_0=%rdi)
  5141. vmovupd %ymm8,1184(%rdi)
  5142. # qhasm: mem256[ input_0 + 1216 ] = x6
  5143. # asm 1: vmovupd <x6=reg256#13,1216(<input_0=int64#1)
  5144. # asm 2: vmovupd <x6=%ymm12,1216(<input_0=%rdi)
  5145. vmovupd %ymm12,1216(%rdi)
  5146. # qhasm: mem256[ input_0 + 1248 ] = x7
  5147. # asm 1: vmovupd <x7=reg256#7,1248(<input_0=int64#1)
  5148. # asm 2: vmovupd <x7=%ymm6,1248(<input_0=%rdi)
  5149. vmovupd %ymm6,1248(%rdi)
  5150. # qhasm: x0 = mem256[ input_0 + 1280 ]
  5151. # asm 1: vmovupd 1280(<input_0=int64#1),>x0=reg256#7
  5152. # asm 2: vmovupd 1280(<input_0=%rdi),>x0=%ymm6
  5153. vmovupd 1280(%rdi),%ymm6
  5154. # qhasm: x1 = mem256[ input_0 + 1312 ]
  5155. # asm 1: vmovupd 1312(<input_0=int64#1),>x1=reg256#8
  5156. # asm 2: vmovupd 1312(<input_0=%rdi),>x1=%ymm7
  5157. vmovupd 1312(%rdi),%ymm7
  5158. # qhasm: x2 = mem256[ input_0 + 1344 ]
  5159. # asm 1: vmovupd 1344(<input_0=int64#1),>x2=reg256#9
  5160. # asm 2: vmovupd 1344(<input_0=%rdi),>x2=%ymm8
  5161. vmovupd 1344(%rdi),%ymm8
  5162. # qhasm: x3 = mem256[ input_0 + 1376 ]
  5163. # asm 1: vmovupd 1376(<input_0=int64#1),>x3=reg256#10
  5164. # asm 2: vmovupd 1376(<input_0=%rdi),>x3=%ymm9
  5165. vmovupd 1376(%rdi),%ymm9
  5166. # qhasm: x4 = mem256[ input_0 + 1408 ]
  5167. # asm 1: vmovupd 1408(<input_0=int64#1),>x4=reg256#11
  5168. # asm 2: vmovupd 1408(<input_0=%rdi),>x4=%ymm10
  5169. vmovupd 1408(%rdi),%ymm10
  5170. # qhasm: x5 = mem256[ input_0 + 1440 ]
  5171. # asm 1: vmovupd 1440(<input_0=int64#1),>x5=reg256#12
  5172. # asm 2: vmovupd 1440(<input_0=%rdi),>x5=%ymm11
  5173. vmovupd 1440(%rdi),%ymm11
  5174. # qhasm: x6 = mem256[ input_0 + 1472 ]
  5175. # asm 1: vmovupd 1472(<input_0=int64#1),>x6=reg256#13
  5176. # asm 2: vmovupd 1472(<input_0=%rdi),>x6=%ymm12
  5177. vmovupd 1472(%rdi),%ymm12
  5178. # qhasm: x7 = mem256[ input_0 + 1504 ]
  5179. # asm 1: vmovupd 1504(<input_0=int64#1),>x7=reg256#14
  5180. # asm 2: vmovupd 1504(<input_0=%rdi),>x7=%ymm13
  5181. vmovupd 1504(%rdi),%ymm13
  5182. # qhasm: v00 = x0 & mask0
  5183. # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
  5184. # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
  5185. vpand %ymm6,%ymm0,%ymm14
  5186. # qhasm: v10 = x4 & mask0
  5187. # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16
  5188. # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15
  5189. vpand %ymm10,%ymm0,%ymm15
  5190. # qhasm: 4x v10 <<= 4
  5191. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  5192. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  5193. vpsllq $4,%ymm15,%ymm15
  5194. # qhasm: v01 = x0 & mask1
  5195. # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7
  5196. # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6
  5197. vpand %ymm6,%ymm1,%ymm6
  5198. # qhasm: v11 = x4 & mask1
  5199. # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
  5200. # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
  5201. vpand %ymm10,%ymm1,%ymm10
  5202. # qhasm: 4x v01 unsigned>>= 4
  5203. # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7
  5204. # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6
  5205. vpsrlq $4,%ymm6,%ymm6
  5206. # qhasm: x0 = v00 | v10
  5207. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
  5208. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
  5209. vpor %ymm14,%ymm15,%ymm14
  5210. # qhasm: x4 = v01 | v11
  5211. # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
  5212. # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
  5213. vpor %ymm6,%ymm10,%ymm6
  5214. # qhasm: v00 = x1 & mask0
  5215. # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
  5216. # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
  5217. vpand %ymm7,%ymm0,%ymm10
  5218. # qhasm: v10 = x5 & mask0
  5219. # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16
  5220. # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15
  5221. vpand %ymm11,%ymm0,%ymm15
  5222. # qhasm: 4x v10 <<= 4
  5223. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  5224. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  5225. vpsllq $4,%ymm15,%ymm15
  5226. # qhasm: v01 = x1 & mask1
  5227. # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8
  5228. # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7
  5229. vpand %ymm7,%ymm1,%ymm7
  5230. # qhasm: v11 = x5 & mask1
  5231. # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
  5232. # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
  5233. vpand %ymm11,%ymm1,%ymm11
  5234. # qhasm: 4x v01 unsigned>>= 4
  5235. # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8
  5236. # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7
  5237. vpsrlq $4,%ymm7,%ymm7
  5238. # qhasm: x1 = v00 | v10
  5239. # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
  5240. # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
  5241. vpor %ymm10,%ymm15,%ymm10
  5242. # qhasm: x5 = v01 | v11
  5243. # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
  5244. # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
  5245. vpor %ymm7,%ymm11,%ymm7
  5246. # qhasm: v00 = x2 & mask0
  5247. # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
  5248. # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
  5249. vpand %ymm8,%ymm0,%ymm11
  5250. # qhasm: v10 = x6 & mask0
  5251. # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16
  5252. # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15
  5253. vpand %ymm12,%ymm0,%ymm15
  5254. # qhasm: 4x v10 <<= 4
  5255. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  5256. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  5257. vpsllq $4,%ymm15,%ymm15
  5258. # qhasm: v01 = x2 & mask1
  5259. # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9
  5260. # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8
  5261. vpand %ymm8,%ymm1,%ymm8
  5262. # qhasm: v11 = x6 & mask1
  5263. # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
  5264. # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
  5265. vpand %ymm12,%ymm1,%ymm12
  5266. # qhasm: 4x v01 unsigned>>= 4
  5267. # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9
  5268. # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8
  5269. vpsrlq $4,%ymm8,%ymm8
  5270. # qhasm: x2 = v00 | v10
  5271. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
  5272. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
  5273. vpor %ymm11,%ymm15,%ymm11
  5274. # qhasm: x6 = v01 | v11
  5275. # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
  5276. # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
  5277. vpor %ymm8,%ymm12,%ymm8
  5278. # qhasm: v00 = x3 & mask0
  5279. # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
  5280. # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
  5281. vpand %ymm9,%ymm0,%ymm12
  5282. # qhasm: v10 = x7 & mask0
  5283. # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16
  5284. # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15
  5285. vpand %ymm13,%ymm0,%ymm15
  5286. # qhasm: 4x v10 <<= 4
  5287. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  5288. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  5289. vpsllq $4,%ymm15,%ymm15
  5290. # qhasm: v01 = x3 & mask1
  5291. # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10
  5292. # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9
  5293. vpand %ymm9,%ymm1,%ymm9
  5294. # qhasm: v11 = x7 & mask1
  5295. # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
  5296. # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
  5297. vpand %ymm13,%ymm1,%ymm13
  5298. # qhasm: 4x v01 unsigned>>= 4
  5299. # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10
  5300. # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9
  5301. vpsrlq $4,%ymm9,%ymm9
  5302. # qhasm: x3 = v00 | v10
  5303. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
  5304. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
  5305. vpor %ymm12,%ymm15,%ymm12
  5306. # qhasm: x7 = v01 | v11
  5307. # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
  5308. # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
  5309. vpor %ymm9,%ymm13,%ymm9
  5310. # qhasm: v00 = x0 & mask2
  5311. # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
  5312. # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
  5313. vpand %ymm14,%ymm2,%ymm13
  5314. # qhasm: v10 = x2 & mask2
  5315. # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16
  5316. # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15
  5317. vpand %ymm11,%ymm2,%ymm15
  5318. # qhasm: 4x v10 <<= 2
  5319. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  5320. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  5321. vpsllq $2,%ymm15,%ymm15
  5322. # qhasm: v01 = x0 & mask3
  5323. # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15
  5324. # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14
  5325. vpand %ymm14,%ymm3,%ymm14
  5326. # qhasm: v11 = x2 & mask3
  5327. # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
  5328. # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
  5329. vpand %ymm11,%ymm3,%ymm11
  5330. # qhasm: 4x v01 unsigned>>= 2
  5331. # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15
  5332. # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14
  5333. vpsrlq $2,%ymm14,%ymm14
  5334. # qhasm: x0 = v00 | v10
  5335. # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
  5336. # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
  5337. vpor %ymm13,%ymm15,%ymm13
  5338. # qhasm: x2 = v01 | v11
  5339. # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
  5340. # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
  5341. vpor %ymm14,%ymm11,%ymm11
  5342. # qhasm: v00 = x1 & mask2
  5343. # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
  5344. # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
  5345. vpand %ymm10,%ymm2,%ymm14
  5346. # qhasm: v10 = x3 & mask2
  5347. # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16
  5348. # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15
  5349. vpand %ymm12,%ymm2,%ymm15
  5350. # qhasm: 4x v10 <<= 2
  5351. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  5352. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  5353. vpsllq $2,%ymm15,%ymm15
  5354. # qhasm: v01 = x1 & mask3
  5355. # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11
  5356. # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10
  5357. vpand %ymm10,%ymm3,%ymm10
  5358. # qhasm: v11 = x3 & mask3
  5359. # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
  5360. # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
  5361. vpand %ymm12,%ymm3,%ymm12
  5362. # qhasm: 4x v01 unsigned>>= 2
  5363. # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11
  5364. # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10
  5365. vpsrlq $2,%ymm10,%ymm10
  5366. # qhasm: x1 = v00 | v10
  5367. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
  5368. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
  5369. vpor %ymm14,%ymm15,%ymm14
  5370. # qhasm: x3 = v01 | v11
  5371. # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
  5372. # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
  5373. vpor %ymm10,%ymm12,%ymm10
  5374. # qhasm: v00 = x4 & mask2
  5375. # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
  5376. # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
  5377. vpand %ymm6,%ymm2,%ymm12
  5378. # qhasm: v10 = x6 & mask2
  5379. # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16
  5380. # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15
  5381. vpand %ymm8,%ymm2,%ymm15
  5382. # qhasm: 4x v10 <<= 2
  5383. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  5384. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  5385. vpsllq $2,%ymm15,%ymm15
  5386. # qhasm: v01 = x4 & mask3
  5387. # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7
  5388. # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6
  5389. vpand %ymm6,%ymm3,%ymm6
  5390. # qhasm: v11 = x6 & mask3
  5391. # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
  5392. # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
  5393. vpand %ymm8,%ymm3,%ymm8
  5394. # qhasm: 4x v01 unsigned>>= 2
  5395. # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7
  5396. # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6
  5397. vpsrlq $2,%ymm6,%ymm6
  5398. # qhasm: x4 = v00 | v10
  5399. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
  5400. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
  5401. vpor %ymm12,%ymm15,%ymm12
  5402. # qhasm: x6 = v01 | v11
  5403. # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
  5404. # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
  5405. vpor %ymm6,%ymm8,%ymm6
  5406. # qhasm: v00 = x5 & mask2
  5407. # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
  5408. # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
  5409. vpand %ymm7,%ymm2,%ymm8
  5410. # qhasm: v10 = x7 & mask2
  5411. # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16
  5412. # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15
  5413. vpand %ymm9,%ymm2,%ymm15
  5414. # qhasm: 4x v10 <<= 2
  5415. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  5416. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  5417. vpsllq $2,%ymm15,%ymm15
  5418. # qhasm: v01 = x5 & mask3
  5419. # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8
  5420. # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7
  5421. vpand %ymm7,%ymm3,%ymm7
  5422. # qhasm: v11 = x7 & mask3
  5423. # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
  5424. # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
  5425. vpand %ymm9,%ymm3,%ymm9
  5426. # qhasm: 4x v01 unsigned>>= 2
  5427. # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8
  5428. # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7
  5429. vpsrlq $2,%ymm7,%ymm7
  5430. # qhasm: x5 = v00 | v10
  5431. # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
  5432. # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
  5433. vpor %ymm8,%ymm15,%ymm8
  5434. # qhasm: x7 = v01 | v11
  5435. # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
  5436. # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
  5437. vpor %ymm7,%ymm9,%ymm7
  5438. # qhasm: v00 = x0 & mask4
  5439. # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
  5440. # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
  5441. vpand %ymm13,%ymm4,%ymm9
  5442. # qhasm: v10 = x1 & mask4
  5443. # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16
  5444. # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15
  5445. vpand %ymm14,%ymm4,%ymm15
  5446. # qhasm: 4x v10 <<= 1
  5447. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  5448. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  5449. vpsllq $1,%ymm15,%ymm15
  5450. # qhasm: v01 = x0 & mask5
  5451. # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14
  5452. # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13
  5453. vpand %ymm13,%ymm5,%ymm13
  5454. # qhasm: v11 = x1 & mask5
  5455. # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
  5456. # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
  5457. vpand %ymm14,%ymm5,%ymm14
  5458. # qhasm: 4x v01 unsigned>>= 1
  5459. # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14
  5460. # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13
  5461. vpsrlq $1,%ymm13,%ymm13
  5462. # qhasm: x0 = v00 | v10
  5463. # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
  5464. # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
  5465. vpor %ymm9,%ymm15,%ymm9
  5466. # qhasm: x1 = v01 | v11
  5467. # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
  5468. # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
  5469. vpor %ymm13,%ymm14,%ymm13
  5470. # qhasm: v00 = x2 & mask4
  5471. # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
  5472. # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
  5473. vpand %ymm11,%ymm4,%ymm14
  5474. # qhasm: v10 = x3 & mask4
  5475. # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16
  5476. # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15
  5477. vpand %ymm10,%ymm4,%ymm15
  5478. # qhasm: 4x v10 <<= 1
  5479. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  5480. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  5481. vpsllq $1,%ymm15,%ymm15
  5482. # qhasm: v01 = x2 & mask5
  5483. # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12
  5484. # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11
  5485. vpand %ymm11,%ymm5,%ymm11
  5486. # qhasm: v11 = x3 & mask5
  5487. # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
  5488. # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
  5489. vpand %ymm10,%ymm5,%ymm10
  5490. # qhasm: 4x v01 unsigned>>= 1
  5491. # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12
  5492. # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11
  5493. vpsrlq $1,%ymm11,%ymm11
  5494. # qhasm: x2 = v00 | v10
  5495. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
  5496. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
  5497. vpor %ymm14,%ymm15,%ymm14
  5498. # qhasm: x3 = v01 | v11
  5499. # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
  5500. # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
  5501. vpor %ymm11,%ymm10,%ymm10
  5502. # qhasm: v00 = x4 & mask4
  5503. # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
  5504. # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
  5505. vpand %ymm12,%ymm4,%ymm11
  5506. # qhasm: v10 = x5 & mask4
  5507. # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16
  5508. # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15
  5509. vpand %ymm8,%ymm4,%ymm15
  5510. # qhasm: 4x v10 <<= 1
  5511. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  5512. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  5513. vpsllq $1,%ymm15,%ymm15
  5514. # qhasm: v01 = x4 & mask5
  5515. # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13
  5516. # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12
  5517. vpand %ymm12,%ymm5,%ymm12
  5518. # qhasm: v11 = x5 & mask5
  5519. # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
  5520. # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
  5521. vpand %ymm8,%ymm5,%ymm8
  5522. # qhasm: 4x v01 unsigned>>= 1
  5523. # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13
  5524. # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12
  5525. vpsrlq $1,%ymm12,%ymm12
  5526. # qhasm: x4 = v00 | v10
  5527. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
  5528. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
  5529. vpor %ymm11,%ymm15,%ymm11
  5530. # qhasm: x5 = v01 | v11
  5531. # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
  5532. # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
  5533. vpor %ymm12,%ymm8,%ymm8
  5534. # qhasm: v00 = x6 & mask4
  5535. # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
  5536. # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
  5537. vpand %ymm6,%ymm4,%ymm12
  5538. # qhasm: v10 = x7 & mask4
  5539. # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16
  5540. # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15
  5541. vpand %ymm7,%ymm4,%ymm15
  5542. # qhasm: 4x v10 <<= 1
  5543. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  5544. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  5545. vpsllq $1,%ymm15,%ymm15
  5546. # qhasm: v01 = x6 & mask5
  5547. # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7
  5548. # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6
  5549. vpand %ymm6,%ymm5,%ymm6
  5550. # qhasm: v11 = x7 & mask5
  5551. # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
  5552. # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
  5553. vpand %ymm7,%ymm5,%ymm7
  5554. # qhasm: 4x v01 unsigned>>= 1
  5555. # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7
  5556. # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6
  5557. vpsrlq $1,%ymm6,%ymm6
  5558. # qhasm: x6 = v00 | v10
  5559. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
  5560. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
  5561. vpor %ymm12,%ymm15,%ymm12
  5562. # qhasm: x7 = v01 | v11
  5563. # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
  5564. # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
  5565. vpor %ymm6,%ymm7,%ymm6
  5566. # qhasm: mem256[ input_0 + 1280 ] = x0
  5567. # asm 1: vmovupd <x0=reg256#10,1280(<input_0=int64#1)
  5568. # asm 2: vmovupd <x0=%ymm9,1280(<input_0=%rdi)
  5569. vmovupd %ymm9,1280(%rdi)
  5570. # qhasm: mem256[ input_0 + 1312 ] = x1
  5571. # asm 1: vmovupd <x1=reg256#14,1312(<input_0=int64#1)
  5572. # asm 2: vmovupd <x1=%ymm13,1312(<input_0=%rdi)
  5573. vmovupd %ymm13,1312(%rdi)
  5574. # qhasm: mem256[ input_0 + 1344 ] = x2
  5575. # asm 1: vmovupd <x2=reg256#15,1344(<input_0=int64#1)
  5576. # asm 2: vmovupd <x2=%ymm14,1344(<input_0=%rdi)
  5577. vmovupd %ymm14,1344(%rdi)
  5578. # qhasm: mem256[ input_0 + 1376 ] = x3
  5579. # asm 1: vmovupd <x3=reg256#11,1376(<input_0=int64#1)
  5580. # asm 2: vmovupd <x3=%ymm10,1376(<input_0=%rdi)
  5581. vmovupd %ymm10,1376(%rdi)
  5582. # qhasm: mem256[ input_0 + 1408 ] = x4
  5583. # asm 1: vmovupd <x4=reg256#12,1408(<input_0=int64#1)
  5584. # asm 2: vmovupd <x4=%ymm11,1408(<input_0=%rdi)
  5585. vmovupd %ymm11,1408(%rdi)
  5586. # qhasm: mem256[ input_0 + 1440 ] = x5
  5587. # asm 1: vmovupd <x5=reg256#9,1440(<input_0=int64#1)
  5588. # asm 2: vmovupd <x5=%ymm8,1440(<input_0=%rdi)
  5589. vmovupd %ymm8,1440(%rdi)
  5590. # qhasm: mem256[ input_0 + 1472 ] = x6
  5591. # asm 1: vmovupd <x6=reg256#13,1472(<input_0=int64#1)
  5592. # asm 2: vmovupd <x6=%ymm12,1472(<input_0=%rdi)
  5593. vmovupd %ymm12,1472(%rdi)
  5594. # qhasm: mem256[ input_0 + 1504 ] = x7
  5595. # asm 1: vmovupd <x7=reg256#7,1504(<input_0=int64#1)
  5596. # asm 2: vmovupd <x7=%ymm6,1504(<input_0=%rdi)
  5597. vmovupd %ymm6,1504(%rdi)
  5598. # qhasm: x0 = mem256[ input_0 + 1536 ]
  5599. # asm 1: vmovupd 1536(<input_0=int64#1),>x0=reg256#7
  5600. # asm 2: vmovupd 1536(<input_0=%rdi),>x0=%ymm6
  5601. vmovupd 1536(%rdi),%ymm6
  5602. # qhasm: x1 = mem256[ input_0 + 1568 ]
  5603. # asm 1: vmovupd 1568(<input_0=int64#1),>x1=reg256#8
  5604. # asm 2: vmovupd 1568(<input_0=%rdi),>x1=%ymm7
  5605. vmovupd 1568(%rdi),%ymm7
  5606. # qhasm: x2 = mem256[ input_0 + 1600 ]
  5607. # asm 1: vmovupd 1600(<input_0=int64#1),>x2=reg256#9
  5608. # asm 2: vmovupd 1600(<input_0=%rdi),>x2=%ymm8
  5609. vmovupd 1600(%rdi),%ymm8
  5610. # qhasm: x3 = mem256[ input_0 + 1632 ]
  5611. # asm 1: vmovupd 1632(<input_0=int64#1),>x3=reg256#10
  5612. # asm 2: vmovupd 1632(<input_0=%rdi),>x3=%ymm9
  5613. vmovupd 1632(%rdi),%ymm9
  5614. # qhasm: x4 = mem256[ input_0 + 1664 ]
  5615. # asm 1: vmovupd 1664(<input_0=int64#1),>x4=reg256#11
  5616. # asm 2: vmovupd 1664(<input_0=%rdi),>x4=%ymm10
  5617. vmovupd 1664(%rdi),%ymm10
  5618. # qhasm: x5 = mem256[ input_0 + 1696 ]
  5619. # asm 1: vmovupd 1696(<input_0=int64#1),>x5=reg256#12
  5620. # asm 2: vmovupd 1696(<input_0=%rdi),>x5=%ymm11
  5621. vmovupd 1696(%rdi),%ymm11
  5622. # qhasm: x6 = mem256[ input_0 + 1728 ]
  5623. # asm 1: vmovupd 1728(<input_0=int64#1),>x6=reg256#13
  5624. # asm 2: vmovupd 1728(<input_0=%rdi),>x6=%ymm12
  5625. vmovupd 1728(%rdi),%ymm12
  5626. # qhasm: x7 = mem256[ input_0 + 1760 ]
  5627. # asm 1: vmovupd 1760(<input_0=int64#1),>x7=reg256#14
  5628. # asm 2: vmovupd 1760(<input_0=%rdi),>x7=%ymm13
  5629. vmovupd 1760(%rdi),%ymm13
  5630. # qhasm: v00 = x0 & mask0
  5631. # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
  5632. # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
  5633. vpand %ymm6,%ymm0,%ymm14
  5634. # qhasm: v10 = x4 & mask0
  5635. # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16
  5636. # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15
  5637. vpand %ymm10,%ymm0,%ymm15
  5638. # qhasm: 4x v10 <<= 4
  5639. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  5640. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  5641. vpsllq $4,%ymm15,%ymm15
  5642. # qhasm: v01 = x0 & mask1
  5643. # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7
  5644. # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6
  5645. vpand %ymm6,%ymm1,%ymm6
  5646. # qhasm: v11 = x4 & mask1
  5647. # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
  5648. # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
  5649. vpand %ymm10,%ymm1,%ymm10
  5650. # qhasm: 4x v01 unsigned>>= 4
  5651. # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7
  5652. # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6
  5653. vpsrlq $4,%ymm6,%ymm6
  5654. # qhasm: x0 = v00 | v10
  5655. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
  5656. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
  5657. vpor %ymm14,%ymm15,%ymm14
  5658. # qhasm: x4 = v01 | v11
  5659. # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
  5660. # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
  5661. vpor %ymm6,%ymm10,%ymm6
  5662. # qhasm: v00 = x1 & mask0
  5663. # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
  5664. # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
  5665. vpand %ymm7,%ymm0,%ymm10
  5666. # qhasm: v10 = x5 & mask0
  5667. # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16
  5668. # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15
  5669. vpand %ymm11,%ymm0,%ymm15
  5670. # qhasm: 4x v10 <<= 4
  5671. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  5672. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  5673. vpsllq $4,%ymm15,%ymm15
  5674. # qhasm: v01 = x1 & mask1
  5675. # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8
  5676. # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7
  5677. vpand %ymm7,%ymm1,%ymm7
  5678. # qhasm: v11 = x5 & mask1
  5679. # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
  5680. # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
  5681. vpand %ymm11,%ymm1,%ymm11
  5682. # qhasm: 4x v01 unsigned>>= 4
  5683. # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8
  5684. # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7
  5685. vpsrlq $4,%ymm7,%ymm7
  5686. # qhasm: x1 = v00 | v10
  5687. # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
  5688. # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
  5689. vpor %ymm10,%ymm15,%ymm10
  5690. # qhasm: x5 = v01 | v11
  5691. # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
  5692. # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
  5693. vpor %ymm7,%ymm11,%ymm7
  5694. # qhasm: v00 = x2 & mask0
  5695. # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
  5696. # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
  5697. vpand %ymm8,%ymm0,%ymm11
  5698. # qhasm: v10 = x6 & mask0
  5699. # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16
  5700. # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15
  5701. vpand %ymm12,%ymm0,%ymm15
  5702. # qhasm: 4x v10 <<= 4
  5703. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  5704. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  5705. vpsllq $4,%ymm15,%ymm15
  5706. # qhasm: v01 = x2 & mask1
  5707. # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9
  5708. # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8
  5709. vpand %ymm8,%ymm1,%ymm8
  5710. # qhasm: v11 = x6 & mask1
  5711. # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
  5712. # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
  5713. vpand %ymm12,%ymm1,%ymm12
  5714. # qhasm: 4x v01 unsigned>>= 4
  5715. # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9
  5716. # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8
  5717. vpsrlq $4,%ymm8,%ymm8
  5718. # qhasm: x2 = v00 | v10
  5719. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
  5720. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
  5721. vpor %ymm11,%ymm15,%ymm11
  5722. # qhasm: x6 = v01 | v11
  5723. # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
  5724. # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
  5725. vpor %ymm8,%ymm12,%ymm8
  5726. # qhasm: v00 = x3 & mask0
  5727. # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
  5728. # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
  5729. vpand %ymm9,%ymm0,%ymm12
  5730. # qhasm: v10 = x7 & mask0
  5731. # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16
  5732. # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15
  5733. vpand %ymm13,%ymm0,%ymm15
  5734. # qhasm: 4x v10 <<= 4
  5735. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  5736. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  5737. vpsllq $4,%ymm15,%ymm15
  5738. # qhasm: v01 = x3 & mask1
  5739. # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10
  5740. # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9
  5741. vpand %ymm9,%ymm1,%ymm9
  5742. # qhasm: v11 = x7 & mask1
  5743. # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
  5744. # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
  5745. vpand %ymm13,%ymm1,%ymm13
  5746. # qhasm: 4x v01 unsigned>>= 4
  5747. # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10
  5748. # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9
  5749. vpsrlq $4,%ymm9,%ymm9
  5750. # qhasm: x3 = v00 | v10
  5751. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
  5752. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
  5753. vpor %ymm12,%ymm15,%ymm12
  5754. # qhasm: x7 = v01 | v11
  5755. # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
  5756. # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
  5757. vpor %ymm9,%ymm13,%ymm9
  5758. # qhasm: v00 = x0 & mask2
  5759. # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
  5760. # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
  5761. vpand %ymm14,%ymm2,%ymm13
  5762. # qhasm: v10 = x2 & mask2
  5763. # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16
  5764. # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15
  5765. vpand %ymm11,%ymm2,%ymm15
  5766. # qhasm: 4x v10 <<= 2
  5767. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  5768. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  5769. vpsllq $2,%ymm15,%ymm15
  5770. # qhasm: v01 = x0 & mask3
  5771. # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15
  5772. # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14
  5773. vpand %ymm14,%ymm3,%ymm14
  5774. # qhasm: v11 = x2 & mask3
  5775. # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
  5776. # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
  5777. vpand %ymm11,%ymm3,%ymm11
  5778. # qhasm: 4x v01 unsigned>>= 2
  5779. # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15
  5780. # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14
  5781. vpsrlq $2,%ymm14,%ymm14
  5782. # qhasm: x0 = v00 | v10
  5783. # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
  5784. # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
  5785. vpor %ymm13,%ymm15,%ymm13
  5786. # qhasm: x2 = v01 | v11
  5787. # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
  5788. # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
  5789. vpor %ymm14,%ymm11,%ymm11
  5790. # qhasm: v00 = x1 & mask2
  5791. # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
  5792. # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
  5793. vpand %ymm10,%ymm2,%ymm14
  5794. # qhasm: v10 = x3 & mask2
  5795. # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16
  5796. # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15
  5797. vpand %ymm12,%ymm2,%ymm15
  5798. # qhasm: 4x v10 <<= 2
  5799. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  5800. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  5801. vpsllq $2,%ymm15,%ymm15
  5802. # qhasm: v01 = x1 & mask3
  5803. # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11
  5804. # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10
  5805. vpand %ymm10,%ymm3,%ymm10
  5806. # qhasm: v11 = x3 & mask3
  5807. # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
  5808. # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
  5809. vpand %ymm12,%ymm3,%ymm12
  5810. # qhasm: 4x v01 unsigned>>= 2
  5811. # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11
  5812. # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10
  5813. vpsrlq $2,%ymm10,%ymm10
  5814. # qhasm: x1 = v00 | v10
  5815. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
  5816. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
  5817. vpor %ymm14,%ymm15,%ymm14
  5818. # qhasm: x3 = v01 | v11
  5819. # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
  5820. # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
  5821. vpor %ymm10,%ymm12,%ymm10
  5822. # qhasm: v00 = x4 & mask2
  5823. # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
  5824. # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
  5825. vpand %ymm6,%ymm2,%ymm12
  5826. # qhasm: v10 = x6 & mask2
  5827. # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16
  5828. # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15
  5829. vpand %ymm8,%ymm2,%ymm15
  5830. # qhasm: 4x v10 <<= 2
  5831. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  5832. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  5833. vpsllq $2,%ymm15,%ymm15
  5834. # qhasm: v01 = x4 & mask3
  5835. # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7
  5836. # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6
  5837. vpand %ymm6,%ymm3,%ymm6
  5838. # qhasm: v11 = x6 & mask3
  5839. # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
  5840. # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
  5841. vpand %ymm8,%ymm3,%ymm8
  5842. # qhasm: 4x v01 unsigned>>= 2
  5843. # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7
  5844. # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6
  5845. vpsrlq $2,%ymm6,%ymm6
  5846. # qhasm: x4 = v00 | v10
  5847. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
  5848. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
  5849. vpor %ymm12,%ymm15,%ymm12
  5850. # qhasm: x6 = v01 | v11
  5851. # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
  5852. # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
  5853. vpor %ymm6,%ymm8,%ymm6
  5854. # qhasm: v00 = x5 & mask2
  5855. # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
  5856. # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
  5857. vpand %ymm7,%ymm2,%ymm8
  5858. # qhasm: v10 = x7 & mask2
  5859. # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16
  5860. # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15
  5861. vpand %ymm9,%ymm2,%ymm15
  5862. # qhasm: 4x v10 <<= 2
  5863. # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
  5864. # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
  5865. vpsllq $2,%ymm15,%ymm15
  5866. # qhasm: v01 = x5 & mask3
  5867. # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8
  5868. # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7
  5869. vpand %ymm7,%ymm3,%ymm7
  5870. # qhasm: v11 = x7 & mask3
  5871. # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
  5872. # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
  5873. vpand %ymm9,%ymm3,%ymm9
  5874. # qhasm: 4x v01 unsigned>>= 2
  5875. # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8
  5876. # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7
  5877. vpsrlq $2,%ymm7,%ymm7
  5878. # qhasm: x5 = v00 | v10
  5879. # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
  5880. # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
  5881. vpor %ymm8,%ymm15,%ymm8
  5882. # qhasm: x7 = v01 | v11
  5883. # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
  5884. # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
  5885. vpor %ymm7,%ymm9,%ymm7
  5886. # qhasm: v00 = x0 & mask4
  5887. # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
  5888. # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
  5889. vpand %ymm13,%ymm4,%ymm9
  5890. # qhasm: v10 = x1 & mask4
  5891. # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16
  5892. # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15
  5893. vpand %ymm14,%ymm4,%ymm15
  5894. # qhasm: 4x v10 <<= 1
  5895. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  5896. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  5897. vpsllq $1,%ymm15,%ymm15
  5898. # qhasm: v01 = x0 & mask5
  5899. # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14
  5900. # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13
  5901. vpand %ymm13,%ymm5,%ymm13
  5902. # qhasm: v11 = x1 & mask5
  5903. # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
  5904. # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
  5905. vpand %ymm14,%ymm5,%ymm14
  5906. # qhasm: 4x v01 unsigned>>= 1
  5907. # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14
  5908. # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13
  5909. vpsrlq $1,%ymm13,%ymm13
  5910. # qhasm: x0 = v00 | v10
  5911. # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
  5912. # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
  5913. vpor %ymm9,%ymm15,%ymm9
  5914. # qhasm: x1 = v01 | v11
  5915. # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
  5916. # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
  5917. vpor %ymm13,%ymm14,%ymm13
  5918. # qhasm: v00 = x2 & mask4
  5919. # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
  5920. # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
  5921. vpand %ymm11,%ymm4,%ymm14
  5922. # qhasm: v10 = x3 & mask4
  5923. # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16
  5924. # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15
  5925. vpand %ymm10,%ymm4,%ymm15
  5926. # qhasm: 4x v10 <<= 1
  5927. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  5928. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  5929. vpsllq $1,%ymm15,%ymm15
  5930. # qhasm: v01 = x2 & mask5
  5931. # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12
  5932. # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11
  5933. vpand %ymm11,%ymm5,%ymm11
  5934. # qhasm: v11 = x3 & mask5
  5935. # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
  5936. # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
  5937. vpand %ymm10,%ymm5,%ymm10
  5938. # qhasm: 4x v01 unsigned>>= 1
  5939. # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12
  5940. # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11
  5941. vpsrlq $1,%ymm11,%ymm11
  5942. # qhasm: x2 = v00 | v10
  5943. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
  5944. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
  5945. vpor %ymm14,%ymm15,%ymm14
  5946. # qhasm: x3 = v01 | v11
  5947. # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
  5948. # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
  5949. vpor %ymm11,%ymm10,%ymm10
  5950. # qhasm: v00 = x4 & mask4
  5951. # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
  5952. # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
  5953. vpand %ymm12,%ymm4,%ymm11
  5954. # qhasm: v10 = x5 & mask4
  5955. # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16
  5956. # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15
  5957. vpand %ymm8,%ymm4,%ymm15
  5958. # qhasm: 4x v10 <<= 1
  5959. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  5960. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  5961. vpsllq $1,%ymm15,%ymm15
  5962. # qhasm: v01 = x4 & mask5
  5963. # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13
  5964. # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12
  5965. vpand %ymm12,%ymm5,%ymm12
  5966. # qhasm: v11 = x5 & mask5
  5967. # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
  5968. # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
  5969. vpand %ymm8,%ymm5,%ymm8
  5970. # qhasm: 4x v01 unsigned>>= 1
  5971. # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13
  5972. # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12
  5973. vpsrlq $1,%ymm12,%ymm12
  5974. # qhasm: x4 = v00 | v10
  5975. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
  5976. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
  5977. vpor %ymm11,%ymm15,%ymm11
  5978. # qhasm: x5 = v01 | v11
  5979. # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
  5980. # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
  5981. vpor %ymm12,%ymm8,%ymm8
  5982. # qhasm: v00 = x6 & mask4
  5983. # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
  5984. # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
  5985. vpand %ymm6,%ymm4,%ymm12
  5986. # qhasm: v10 = x7 & mask4
  5987. # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16
  5988. # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15
  5989. vpand %ymm7,%ymm4,%ymm15
  5990. # qhasm: 4x v10 <<= 1
  5991. # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
  5992. # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
  5993. vpsllq $1,%ymm15,%ymm15
  5994. # qhasm: v01 = x6 & mask5
  5995. # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7
  5996. # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6
  5997. vpand %ymm6,%ymm5,%ymm6
  5998. # qhasm: v11 = x7 & mask5
  5999. # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
  6000. # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
  6001. vpand %ymm7,%ymm5,%ymm7
  6002. # qhasm: 4x v01 unsigned>>= 1
  6003. # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7
  6004. # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6
  6005. vpsrlq $1,%ymm6,%ymm6
  6006. # qhasm: x6 = v00 | v10
  6007. # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
  6008. # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
  6009. vpor %ymm12,%ymm15,%ymm12
  6010. # qhasm: x7 = v01 | v11
  6011. # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
  6012. # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
  6013. vpor %ymm6,%ymm7,%ymm6
  6014. # qhasm: mem256[ input_0 + 1536 ] = x0
  6015. # asm 1: vmovupd <x0=reg256#10,1536(<input_0=int64#1)
  6016. # asm 2: vmovupd <x0=%ymm9,1536(<input_0=%rdi)
  6017. vmovupd %ymm9,1536(%rdi)
  6018. # qhasm: mem256[ input_0 + 1568 ] = x1
  6019. # asm 1: vmovupd <x1=reg256#14,1568(<input_0=int64#1)
  6020. # asm 2: vmovupd <x1=%ymm13,1568(<input_0=%rdi)
  6021. vmovupd %ymm13,1568(%rdi)
  6022. # qhasm: mem256[ input_0 + 1600 ] = x2
  6023. # asm 1: vmovupd <x2=reg256#15,1600(<input_0=int64#1)
  6024. # asm 2: vmovupd <x2=%ymm14,1600(<input_0=%rdi)
  6025. vmovupd %ymm14,1600(%rdi)
  6026. # qhasm: mem256[ input_0 + 1632 ] = x3
  6027. # asm 1: vmovupd <x3=reg256#11,1632(<input_0=int64#1)
  6028. # asm 2: vmovupd <x3=%ymm10,1632(<input_0=%rdi)
  6029. vmovupd %ymm10,1632(%rdi)
  6030. # qhasm: mem256[ input_0 + 1664 ] = x4
  6031. # asm 1: vmovupd <x4=reg256#12,1664(<input_0=int64#1)
  6032. # asm 2: vmovupd <x4=%ymm11,1664(<input_0=%rdi)
  6033. vmovupd %ymm11,1664(%rdi)
  6034. # qhasm: mem256[ input_0 + 1696 ] = x5
  6035. # asm 1: vmovupd <x5=reg256#9,1696(<input_0=int64#1)
  6036. # asm 2: vmovupd <x5=%ymm8,1696(<input_0=%rdi)
  6037. vmovupd %ymm8,1696(%rdi)
  6038. # qhasm: mem256[ input_0 + 1728 ] = x6
  6039. # asm 1: vmovupd <x6=reg256#13,1728(<input_0=int64#1)
  6040. # asm 2: vmovupd <x6=%ymm12,1728(<input_0=%rdi)
  6041. vmovupd %ymm12,1728(%rdi)
  6042. # qhasm: mem256[ input_0 + 1760 ] = x7
  6043. # asm 1: vmovupd <x7=reg256#7,1760(<input_0=int64#1)
  6044. # asm 2: vmovupd <x7=%ymm6,1760(<input_0=%rdi)
  6045. vmovupd %ymm6,1760(%rdi)
  6046. # qhasm: x0 = mem256[ input_0 + 1792 ]
  6047. # asm 1: vmovupd 1792(<input_0=int64#1),>x0=reg256#7
  6048. # asm 2: vmovupd 1792(<input_0=%rdi),>x0=%ymm6
  6049. vmovupd 1792(%rdi),%ymm6
  6050. # qhasm: x1 = mem256[ input_0 + 1824 ]
  6051. # asm 1: vmovupd 1824(<input_0=int64#1),>x1=reg256#8
  6052. # asm 2: vmovupd 1824(<input_0=%rdi),>x1=%ymm7
  6053. vmovupd 1824(%rdi),%ymm7
  6054. # qhasm: x2 = mem256[ input_0 + 1856 ]
  6055. # asm 1: vmovupd 1856(<input_0=int64#1),>x2=reg256#9
  6056. # asm 2: vmovupd 1856(<input_0=%rdi),>x2=%ymm8
  6057. vmovupd 1856(%rdi),%ymm8
  6058. # qhasm: x3 = mem256[ input_0 + 1888 ]
  6059. # asm 1: vmovupd 1888(<input_0=int64#1),>x3=reg256#10
  6060. # asm 2: vmovupd 1888(<input_0=%rdi),>x3=%ymm9
  6061. vmovupd 1888(%rdi),%ymm9
  6062. # qhasm: x4 = mem256[ input_0 + 1920 ]
  6063. # asm 1: vmovupd 1920(<input_0=int64#1),>x4=reg256#11
  6064. # asm 2: vmovupd 1920(<input_0=%rdi),>x4=%ymm10
  6065. vmovupd 1920(%rdi),%ymm10
  6066. # qhasm: x5 = mem256[ input_0 + 1952 ]
  6067. # asm 1: vmovupd 1952(<input_0=int64#1),>x5=reg256#12
  6068. # asm 2: vmovupd 1952(<input_0=%rdi),>x5=%ymm11
  6069. vmovupd 1952(%rdi),%ymm11
  6070. # qhasm: x6 = mem256[ input_0 + 1984 ]
  6071. # asm 1: vmovupd 1984(<input_0=int64#1),>x6=reg256#13
  6072. # asm 2: vmovupd 1984(<input_0=%rdi),>x6=%ymm12
  6073. vmovupd 1984(%rdi),%ymm12
  6074. # qhasm: x7 = mem256[ input_0 + 2016 ]
  6075. # asm 1: vmovupd 2016(<input_0=int64#1),>x7=reg256#14
  6076. # asm 2: vmovupd 2016(<input_0=%rdi),>x7=%ymm13
  6077. vmovupd 2016(%rdi),%ymm13
  6078. # qhasm: v00 = x0 & mask0
  6079. # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
  6080. # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
  6081. vpand %ymm6,%ymm0,%ymm14
  6082. # qhasm: v10 = x4 & mask0
  6083. # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16
  6084. # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15
  6085. vpand %ymm10,%ymm0,%ymm15
  6086. # qhasm: 4x v10 <<= 4
  6087. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  6088. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  6089. vpsllq $4,%ymm15,%ymm15
  6090. # qhasm: v01 = x0 & mask1
  6091. # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7
  6092. # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6
  6093. vpand %ymm6,%ymm1,%ymm6
  6094. # qhasm: v11 = x4 & mask1
  6095. # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
  6096. # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
  6097. vpand %ymm10,%ymm1,%ymm10
  6098. # qhasm: 4x v01 unsigned>>= 4
  6099. # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7
  6100. # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6
  6101. vpsrlq $4,%ymm6,%ymm6
  6102. # qhasm: x0 = v00 | v10
  6103. # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
  6104. # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
  6105. vpor %ymm14,%ymm15,%ymm14
  6106. # qhasm: x4 = v01 | v11
  6107. # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
  6108. # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
  6109. vpor %ymm6,%ymm10,%ymm6
  6110. # qhasm: v00 = x1 & mask0
  6111. # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
  6112. # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
  6113. vpand %ymm7,%ymm0,%ymm10
  6114. # qhasm: v10 = x5 & mask0
  6115. # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16
  6116. # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15
  6117. vpand %ymm11,%ymm0,%ymm15
  6118. # qhasm: 4x v10 <<= 4
  6119. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  6120. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  6121. vpsllq $4,%ymm15,%ymm15
  6122. # qhasm: v01 = x1 & mask1
  6123. # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8
  6124. # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7
  6125. vpand %ymm7,%ymm1,%ymm7
  6126. # qhasm: v11 = x5 & mask1
  6127. # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
  6128. # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
  6129. vpand %ymm11,%ymm1,%ymm11
  6130. # qhasm: 4x v01 unsigned>>= 4
  6131. # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8
  6132. # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7
  6133. vpsrlq $4,%ymm7,%ymm7
  6134. # qhasm: x1 = v00 | v10
  6135. # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
  6136. # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
  6137. vpor %ymm10,%ymm15,%ymm10
  6138. # qhasm: x5 = v01 | v11
  6139. # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
  6140. # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
  6141. vpor %ymm7,%ymm11,%ymm7
  6142. # qhasm: v00 = x2 & mask0
  6143. # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
  6144. # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
  6145. vpand %ymm8,%ymm0,%ymm11
  6146. # qhasm: v10 = x6 & mask0
  6147. # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16
  6148. # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15
  6149. vpand %ymm12,%ymm0,%ymm15
  6150. # qhasm: 4x v10 <<= 4
  6151. # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
  6152. # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
  6153. vpsllq $4,%ymm15,%ymm15
  6154. # qhasm: v01 = x2 & mask1
  6155. # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9
  6156. # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8
  6157. vpand %ymm8,%ymm1,%ymm8
  6158. # qhasm: v11 = x6 & mask1
  6159. # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
  6160. # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
  6161. vpand %ymm12,%ymm1,%ymm12
  6162. # qhasm: 4x v01 unsigned>>= 4
  6163. # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9
  6164. # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8
  6165. vpsrlq $4,%ymm8,%ymm8
  6166. # qhasm: x2 = v00 | v10
  6167. # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
  6168. # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
  6169. vpor %ymm11,%ymm15,%ymm11
  6170. # qhasm: x6 = v01 | v11
  6171. # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
  6172. # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
  6173. vpor %ymm8,%ymm12,%ymm8
  6174. # qhasm: v00 = x3 & mask0
  6175. # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
  6176. # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
  6177. vpand %ymm9,%ymm0,%ymm12
  6178. # qhasm: v10 = x7 & mask0
  6179. # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#1
  6180. # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm0
  6181. vpand %ymm13,%ymm0,%ymm0
  6182. # qhasm: 4x v10 <<= 4
  6183. # asm 1: vpsllq $4,<v10=reg256#1,<v10=reg256#1
  6184. # asm 2: vpsllq $4,<v10=%ymm0,<v10=%ymm0
  6185. vpsllq $4,%ymm0,%ymm0
  6186. # qhasm: v01 = x3 & mask1
  6187. # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10
  6188. # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9
  6189. vpand %ymm9,%ymm1,%ymm9
  6190. # qhasm: v11 = x7 & mask1
  6191. # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#2
  6192. # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm1
  6193. vpand %ymm13,%ymm1,%ymm1
  6194. # qhasm: 4x v01 unsigned>>= 4
  6195. # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10
  6196. # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9
  6197. vpsrlq $4,%ymm9,%ymm9
  6198. # qhasm: x3 = v00 | v10
  6199. # asm 1: vpor <v00=reg256#13,<v10=reg256#1,>x3=reg256#1
  6200. # asm 2: vpor <v00=%ymm12,<v10=%ymm0,>x3=%ymm0
  6201. vpor %ymm12,%ymm0,%ymm0
  6202. # qhasm: x7 = v01 | v11
  6203. # asm 1: vpor <v01=reg256#10,<v11=reg256#2,>x7=reg256#2
  6204. # asm 2: vpor <v01=%ymm9,<v11=%ymm1,>x7=%ymm1
  6205. vpor %ymm9,%ymm1,%ymm1
  6206. # qhasm: v00 = x0 & mask2
  6207. # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#10
  6208. # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm9
  6209. vpand %ymm14,%ymm2,%ymm9
  6210. # qhasm: v10 = x2 & mask2
  6211. # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#13
  6212. # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm12
  6213. vpand %ymm11,%ymm2,%ymm12
  6214. # qhasm: 4x v10 <<= 2
  6215. # asm 1: vpsllq $2,<v10=reg256#13,<v10=reg256#13
  6216. # asm 2: vpsllq $2,<v10=%ymm12,<v10=%ymm12
  6217. vpsllq $2,%ymm12,%ymm12
  6218. # qhasm: v01 = x0 & mask3
  6219. # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#14
  6220. # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm13
  6221. vpand %ymm14,%ymm3,%ymm13
  6222. # qhasm: v11 = x2 & mask3
  6223. # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
  6224. # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
  6225. vpand %ymm11,%ymm3,%ymm11
  6226. # qhasm: 4x v01 unsigned>>= 2
  6227. # asm 1: vpsrlq $2,<v01=reg256#14,<v01=reg256#14
  6228. # asm 2: vpsrlq $2,<v01=%ymm13,<v01=%ymm13
  6229. vpsrlq $2,%ymm13,%ymm13
  6230. # qhasm: x0 = v00 | v10
  6231. # asm 1: vpor <v00=reg256#10,<v10=reg256#13,>x0=reg256#10
  6232. # asm 2: vpor <v00=%ymm9,<v10=%ymm12,>x0=%ymm9
  6233. vpor %ymm9,%ymm12,%ymm9
  6234. # qhasm: x2 = v01 | v11
  6235. # asm 1: vpor <v01=reg256#14,<v11=reg256#12,>x2=reg256#12
  6236. # asm 2: vpor <v01=%ymm13,<v11=%ymm11,>x2=%ymm11
  6237. vpor %ymm13,%ymm11,%ymm11
  6238. # qhasm: v00 = x1 & mask2
  6239. # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#13
  6240. # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm12
  6241. vpand %ymm10,%ymm2,%ymm12
  6242. # qhasm: v10 = x3 & mask2
  6243. # asm 1: vpand <x3=reg256#1,<mask2=reg256#3,>v10=reg256#14
  6244. # asm 2: vpand <x3=%ymm0,<mask2=%ymm2,>v10=%ymm13
  6245. vpand %ymm0,%ymm2,%ymm13
  6246. # qhasm: 4x v10 <<= 2
  6247. # asm 1: vpsllq $2,<v10=reg256#14,<v10=reg256#14
  6248. # asm 2: vpsllq $2,<v10=%ymm13,<v10=%ymm13
  6249. vpsllq $2,%ymm13,%ymm13
  6250. # qhasm: v01 = x1 & mask3
  6251. # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11
  6252. # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10
  6253. vpand %ymm10,%ymm3,%ymm10
  6254. # qhasm: v11 = x3 & mask3
  6255. # asm 1: vpand <x3=reg256#1,<mask3=reg256#4,>v11=reg256#1
  6256. # asm 2: vpand <x3=%ymm0,<mask3=%ymm3,>v11=%ymm0
  6257. vpand %ymm0,%ymm3,%ymm0
  6258. # qhasm: 4x v01 unsigned>>= 2
  6259. # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11
  6260. # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10
  6261. vpsrlq $2,%ymm10,%ymm10
  6262. # qhasm: x1 = v00 | v10
  6263. # asm 1: vpor <v00=reg256#13,<v10=reg256#14,>x1=reg256#13
  6264. # asm 2: vpor <v00=%ymm12,<v10=%ymm13,>x1=%ymm12
  6265. vpor %ymm12,%ymm13,%ymm12
  6266. # qhasm: x3 = v01 | v11
  6267. # asm 1: vpor <v01=reg256#11,<v11=reg256#1,>x3=reg256#1
  6268. # asm 2: vpor <v01=%ymm10,<v11=%ymm0,>x3=%ymm0
  6269. vpor %ymm10,%ymm0,%ymm0
  6270. # qhasm: v00 = x4 & mask2
  6271. # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#11
  6272. # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm10
  6273. vpand %ymm6,%ymm2,%ymm10
  6274. # qhasm: v10 = x6 & mask2
  6275. # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#14
  6276. # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm13
  6277. vpand %ymm8,%ymm2,%ymm13
  6278. # qhasm: 4x v10 <<= 2
  6279. # asm 1: vpsllq $2,<v10=reg256#14,<v10=reg256#14
  6280. # asm 2: vpsllq $2,<v10=%ymm13,<v10=%ymm13
  6281. vpsllq $2,%ymm13,%ymm13
  6282. # qhasm: v01 = x4 & mask3
  6283. # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7
  6284. # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6
  6285. vpand %ymm6,%ymm3,%ymm6
  6286. # qhasm: v11 = x6 & mask3
  6287. # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
  6288. # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
  6289. vpand %ymm8,%ymm3,%ymm8
  6290. # qhasm: 4x v01 unsigned>>= 2
  6291. # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7
  6292. # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6
  6293. vpsrlq $2,%ymm6,%ymm6
  6294. # qhasm: x4 = v00 | v10
  6295. # asm 1: vpor <v00=reg256#11,<v10=reg256#14,>x4=reg256#11
  6296. # asm 2: vpor <v00=%ymm10,<v10=%ymm13,>x4=%ymm10
  6297. vpor %ymm10,%ymm13,%ymm10
  6298. # qhasm: x6 = v01 | v11
  6299. # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
  6300. # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
  6301. vpor %ymm6,%ymm8,%ymm6
  6302. # qhasm: v00 = x5 & mask2
  6303. # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
  6304. # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
  6305. vpand %ymm7,%ymm2,%ymm8
  6306. # qhasm: v10 = x7 & mask2
  6307. # asm 1: vpand <x7=reg256#2,<mask2=reg256#3,>v10=reg256#3
  6308. # asm 2: vpand <x7=%ymm1,<mask2=%ymm2,>v10=%ymm2
  6309. vpand %ymm1,%ymm2,%ymm2
  6310. # qhasm: 4x v10 <<= 2
  6311. # asm 1: vpsllq $2,<v10=reg256#3,<v10=reg256#3
  6312. # asm 2: vpsllq $2,<v10=%ymm2,<v10=%ymm2
  6313. vpsllq $2,%ymm2,%ymm2
  6314. # qhasm: v01 = x5 & mask3
  6315. # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8
  6316. # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7
  6317. vpand %ymm7,%ymm3,%ymm7
  6318. # qhasm: v11 = x7 & mask3
  6319. # asm 1: vpand <x7=reg256#2,<mask3=reg256#4,>v11=reg256#2
  6320. # asm 2: vpand <x7=%ymm1,<mask3=%ymm3,>v11=%ymm1
  6321. vpand %ymm1,%ymm3,%ymm1
  6322. # qhasm: 4x v01 unsigned>>= 2
  6323. # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8
  6324. # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7
  6325. vpsrlq $2,%ymm7,%ymm7
  6326. # qhasm: x5 = v00 | v10
  6327. # asm 1: vpor <v00=reg256#9,<v10=reg256#3,>x5=reg256#3
  6328. # asm 2: vpor <v00=%ymm8,<v10=%ymm2,>x5=%ymm2
  6329. vpor %ymm8,%ymm2,%ymm2
  6330. # qhasm: x7 = v01 | v11
  6331. # asm 1: vpor <v01=reg256#8,<v11=reg256#2,>x7=reg256#2
  6332. # asm 2: vpor <v01=%ymm7,<v11=%ymm1,>x7=%ymm1
  6333. vpor %ymm7,%ymm1,%ymm1
  6334. # qhasm: v00 = x0 & mask4
  6335. # asm 1: vpand <x0=reg256#10,<mask4=reg256#5,>v00=reg256#4
  6336. # asm 2: vpand <x0=%ymm9,<mask4=%ymm4,>v00=%ymm3
  6337. vpand %ymm9,%ymm4,%ymm3
  6338. # qhasm: v10 = x1 & mask4
  6339. # asm 1: vpand <x1=reg256#13,<mask4=reg256#5,>v10=reg256#8
  6340. # asm 2: vpand <x1=%ymm12,<mask4=%ymm4,>v10=%ymm7
  6341. vpand %ymm12,%ymm4,%ymm7
  6342. # qhasm: 4x v10 <<= 1
  6343. # asm 1: vpsllq $1,<v10=reg256#8,<v10=reg256#8
  6344. # asm 2: vpsllq $1,<v10=%ymm7,<v10=%ymm7
  6345. vpsllq $1,%ymm7,%ymm7
  6346. # qhasm: v01 = x0 & mask5
  6347. # asm 1: vpand <x0=reg256#10,<mask5=reg256#6,>v01=reg256#9
  6348. # asm 2: vpand <x0=%ymm9,<mask5=%ymm5,>v01=%ymm8
  6349. vpand %ymm9,%ymm5,%ymm8
  6350. # qhasm: v11 = x1 & mask5
  6351. # asm 1: vpand <x1=reg256#13,<mask5=reg256#6,>v11=reg256#10
  6352. # asm 2: vpand <x1=%ymm12,<mask5=%ymm5,>v11=%ymm9
  6353. vpand %ymm12,%ymm5,%ymm9
  6354. # qhasm: 4x v01 unsigned>>= 1
  6355. # asm 1: vpsrlq $1,<v01=reg256#9,<v01=reg256#9
  6356. # asm 2: vpsrlq $1,<v01=%ymm8,<v01=%ymm8
  6357. vpsrlq $1,%ymm8,%ymm8
  6358. # qhasm: x0 = v00 | v10
  6359. # asm 1: vpor <v00=reg256#4,<v10=reg256#8,>x0=reg256#4
  6360. # asm 2: vpor <v00=%ymm3,<v10=%ymm7,>x0=%ymm3
  6361. vpor %ymm3,%ymm7,%ymm3
  6362. # qhasm: x1 = v01 | v11
  6363. # asm 1: vpor <v01=reg256#9,<v11=reg256#10,>x1=reg256#8
  6364. # asm 2: vpor <v01=%ymm8,<v11=%ymm9,>x1=%ymm7
  6365. vpor %ymm8,%ymm9,%ymm7
  6366. # qhasm: v00 = x2 & mask4
  6367. # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#9
  6368. # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm8
  6369. vpand %ymm11,%ymm4,%ymm8
  6370. # qhasm: v10 = x3 & mask4
  6371. # asm 1: vpand <x3=reg256#1,<mask4=reg256#5,>v10=reg256#10
  6372. # asm 2: vpand <x3=%ymm0,<mask4=%ymm4,>v10=%ymm9
  6373. vpand %ymm0,%ymm4,%ymm9
  6374. # qhasm: 4x v10 <<= 1
  6375. # asm 1: vpsllq $1,<v10=reg256#10,<v10=reg256#10
  6376. # asm 2: vpsllq $1,<v10=%ymm9,<v10=%ymm9
  6377. vpsllq $1,%ymm9,%ymm9
  6378. # qhasm: v01 = x2 & mask5
  6379. # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12
  6380. # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11
  6381. vpand %ymm11,%ymm5,%ymm11
  6382. # qhasm: v11 = x3 & mask5
  6383. # asm 1: vpand <x3=reg256#1,<mask5=reg256#6,>v11=reg256#1
  6384. # asm 2: vpand <x3=%ymm0,<mask5=%ymm5,>v11=%ymm0
  6385. vpand %ymm0,%ymm5,%ymm0
  6386. # qhasm: 4x v01 unsigned>>= 1
  6387. # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12
  6388. # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11
  6389. vpsrlq $1,%ymm11,%ymm11
  6390. # qhasm: x2 = v00 | v10
  6391. # asm 1: vpor <v00=reg256#9,<v10=reg256#10,>x2=reg256#9
  6392. # asm 2: vpor <v00=%ymm8,<v10=%ymm9,>x2=%ymm8
  6393. vpor %ymm8,%ymm9,%ymm8
  6394. # qhasm: x3 = v01 | v11
  6395. # asm 1: vpor <v01=reg256#12,<v11=reg256#1,>x3=reg256#1
  6396. # asm 2: vpor <v01=%ymm11,<v11=%ymm0,>x3=%ymm0
  6397. vpor %ymm11,%ymm0,%ymm0
  6398. # qhasm: v00 = x4 & mask4
  6399. # asm 1: vpand <x4=reg256#11,<mask4=reg256#5,>v00=reg256#10
  6400. # asm 2: vpand <x4=%ymm10,<mask4=%ymm4,>v00=%ymm9
  6401. vpand %ymm10,%ymm4,%ymm9
  6402. # qhasm: v10 = x5 & mask4
  6403. # asm 1: vpand <x5=reg256#3,<mask4=reg256#5,>v10=reg256#12
  6404. # asm 2: vpand <x5=%ymm2,<mask4=%ymm4,>v10=%ymm11
  6405. vpand %ymm2,%ymm4,%ymm11
  6406. # qhasm: 4x v10 <<= 1
  6407. # asm 1: vpsllq $1,<v10=reg256#12,<v10=reg256#12
  6408. # asm 2: vpsllq $1,<v10=%ymm11,<v10=%ymm11
  6409. vpsllq $1,%ymm11,%ymm11
  6410. # qhasm: v01 = x4 & mask5
  6411. # asm 1: vpand <x4=reg256#11,<mask5=reg256#6,>v01=reg256#11
  6412. # asm 2: vpand <x4=%ymm10,<mask5=%ymm5,>v01=%ymm10
  6413. vpand %ymm10,%ymm5,%ymm10
  6414. # qhasm: v11 = x5 & mask5
  6415. # asm 1: vpand <x5=reg256#3,<mask5=reg256#6,>v11=reg256#3
  6416. # asm 2: vpand <x5=%ymm2,<mask5=%ymm5,>v11=%ymm2
  6417. vpand %ymm2,%ymm5,%ymm2
  6418. # qhasm: 4x v01 unsigned>>= 1
  6419. # asm 1: vpsrlq $1,<v01=reg256#11,<v01=reg256#11
  6420. # asm 2: vpsrlq $1,<v01=%ymm10,<v01=%ymm10
  6421. vpsrlq $1,%ymm10,%ymm10
  6422. # qhasm: x4 = v00 | v10
  6423. # asm 1: vpor <v00=reg256#10,<v10=reg256#12,>x4=reg256#10
  6424. # asm 2: vpor <v00=%ymm9,<v10=%ymm11,>x4=%ymm9
  6425. vpor %ymm9,%ymm11,%ymm9
  6426. # qhasm: x5 = v01 | v11
  6427. # asm 1: vpor <v01=reg256#11,<v11=reg256#3,>x5=reg256#3
  6428. # asm 2: vpor <v01=%ymm10,<v11=%ymm2,>x5=%ymm2
  6429. vpor %ymm10,%ymm2,%ymm2
  6430. # qhasm: v00 = x6 & mask4
  6431. # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#11
  6432. # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm10
  6433. vpand %ymm6,%ymm4,%ymm10
  6434. # qhasm: v10 = x7 & mask4
  6435. # asm 1: vpand <x7=reg256#2,<mask4=reg256#5,>v10=reg256#5
  6436. # asm 2: vpand <x7=%ymm1,<mask4=%ymm4,>v10=%ymm4
  6437. vpand %ymm1,%ymm4,%ymm4
  6438. # qhasm: 4x v10 <<= 1
  6439. # asm 1: vpsllq $1,<v10=reg256#5,<v10=reg256#5
  6440. # asm 2: vpsllq $1,<v10=%ymm4,<v10=%ymm4
  6441. vpsllq $1,%ymm4,%ymm4
  6442. # qhasm: v01 = x6 & mask5
  6443. # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7
  6444. # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6
  6445. vpand %ymm6,%ymm5,%ymm6
  6446. # qhasm: v11 = x7 & mask5
  6447. # asm 1: vpand <x7=reg256#2,<mask5=reg256#6,>v11=reg256#2
  6448. # asm 2: vpand <x7=%ymm1,<mask5=%ymm5,>v11=%ymm1
  6449. vpand %ymm1,%ymm5,%ymm1
  6450. # qhasm: 4x v01 unsigned>>= 1
  6451. # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7
  6452. # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6
  6453. vpsrlq $1,%ymm6,%ymm6
  6454. # qhasm: x6 = v00 | v10
  6455. # asm 1: vpor <v00=reg256#11,<v10=reg256#5,>x6=reg256#5
  6456. # asm 2: vpor <v00=%ymm10,<v10=%ymm4,>x6=%ymm4
  6457. vpor %ymm10,%ymm4,%ymm4
  6458. # qhasm: x7 = v01 | v11
  6459. # asm 1: vpor <v01=reg256#7,<v11=reg256#2,>x7=reg256#2
  6460. # asm 2: vpor <v01=%ymm6,<v11=%ymm1,>x7=%ymm1
  6461. vpor %ymm6,%ymm1,%ymm1
  6462. # qhasm: mem256[ input_0 + 1792 ] = x0
  6463. # asm 1: vmovupd <x0=reg256#4,1792(<input_0=int64#1)
  6464. # asm 2: vmovupd <x0=%ymm3,1792(<input_0=%rdi)
  6465. vmovupd %ymm3,1792(%rdi)
  6466. # qhasm: mem256[ input_0 + 1824 ] = x1
  6467. # asm 1: vmovupd <x1=reg256#8,1824(<input_0=int64#1)
  6468. # asm 2: vmovupd <x1=%ymm7,1824(<input_0=%rdi)
  6469. vmovupd %ymm7,1824(%rdi)
  6470. # qhasm: mem256[ input_0 + 1856 ] = x2
  6471. # asm 1: vmovupd <x2=reg256#9,1856(<input_0=int64#1)
  6472. # asm 2: vmovupd <x2=%ymm8,1856(<input_0=%rdi)
  6473. vmovupd %ymm8,1856(%rdi)
  6474. # qhasm: mem256[ input_0 + 1888 ] = x3
  6475. # asm 1: vmovupd <x3=reg256#1,1888(<input_0=int64#1)
  6476. # asm 2: vmovupd <x3=%ymm0,1888(<input_0=%rdi)
  6477. vmovupd %ymm0,1888(%rdi)
  6478. # qhasm: mem256[ input_0 + 1920 ] = x4
  6479. # asm 1: vmovupd <x4=reg256#10,1920(<input_0=int64#1)
  6480. # asm 2: vmovupd <x4=%ymm9,1920(<input_0=%rdi)
  6481. vmovupd %ymm9,1920(%rdi)
  6482. # qhasm: mem256[ input_0 + 1952 ] = x5
  6483. # asm 1: vmovupd <x5=reg256#3,1952(<input_0=int64#1)
  6484. # asm 2: vmovupd <x5=%ymm2,1952(<input_0=%rdi)
  6485. vmovupd %ymm2,1952(%rdi)
  6486. # qhasm: mem256[ input_0 + 1984 ] = x6
  6487. # asm 1: vmovupd <x6=reg256#5,1984(<input_0=int64#1)
  6488. # asm 2: vmovupd <x6=%ymm4,1984(<input_0=%rdi)
  6489. vmovupd %ymm4,1984(%rdi)
  6490. # qhasm: mem256[ input_0 + 2016 ] = x7
  6491. # asm 1: vmovupd <x7=reg256#2,2016(<input_0=int64#1)
  6492. # asm 2: vmovupd <x7=%ymm1,2016(<input_0=%rdi)
  6493. vmovupd %ymm1,2016(%rdi)
  6494. # qhasm: return
  6495. add %r11,%rsp
  6496. ret