# qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg128 a0 # qhasm: reg128 a1 # qhasm: reg128 a2 # qhasm: reg128 a3 # qhasm: reg128 a4 # qhasm: reg128 a5 # qhasm: reg128 a6 # qhasm: reg128 a7 # qhasm: reg128 a8 # qhasm: reg128 a9 # qhasm: reg128 a10 # qhasm: reg128 a11 # qhasm: reg128 a12 # qhasm: reg128 b0 # qhasm: reg128 b1 # qhasm: reg128 r0 # qhasm: reg128 r1 # qhasm: reg128 r2 # qhasm: reg128 r3 # qhasm: reg128 r4 # qhasm: reg128 r5 # qhasm: reg128 r6 # qhasm: reg128 r7 # qhasm: reg128 r8 # qhasm: reg128 r9 # qhasm: reg128 r10 # qhasm: reg128 r11 # qhasm: reg128 r12 # qhasm: reg128 r13 # qhasm: reg128 r14 # qhasm: reg128 r15 # qhasm: reg128 r16 # qhasm: reg128 r17 # qhasm: reg128 r18 # qhasm: reg128 r19 # qhasm: reg128 r20 # qhasm: reg128 r21 # qhasm: reg128 r22 # qhasm: reg128 r23 # qhasm: reg128 r24 # qhasm: reg128 r # qhasm: enter vec128_mul_asm .p2align 5 .global _PQCLEAN_MCELIECE6960119_SSE_vec128_mul_asm .global PQCLEAN_MCELIECE6960119_SSE_vec128_mul_asm _PQCLEAN_MCELIECE6960119_SSE_vec128_mul_asm: PQCLEAN_MCELIECE6960119_SSE_vec128_mul_asm: mov %rsp,%r11 and $31,%r11 add $0,%r11 sub %r11,%rsp # qhasm: b0 = mem128[ input_2 + 0 ] # asm 1: movdqu 0(b0=reg128#1 # asm 2: movdqu 0(b0=%xmm0 movdqu 0(%rdx),%xmm0 # qhasm: a12 = mem128[ input_1 + 192 ] # asm 1: movdqu 192(a12=reg128#2 # asm 2: movdqu 192(a12=%xmm1 movdqu 192(%rsi),%xmm1 # qhasm: r12 = a12 & b0 # asm 1: vpand r12=reg128#3 # asm 2: vpand r12=%xmm2 vpand %xmm0,%xmm1,%xmm2 # qhasm: r13 = a12 & mem128[input_2 + 16] # asm 1: vpand 16(r13=reg128#4 # asm 2: vpand 16(r13=%xmm3 vpand 16(%rdx),%xmm1,%xmm3 # qhasm: r14 = a12 & mem128[input_2 + 32] # asm 1: vpand 32(r14=reg128#5 # asm 2: vpand 32(r14=%xmm4 vpand 32(%rdx),%xmm1,%xmm4 # qhasm: r15 = a12 & mem128[input_2 + 48] # asm 1: vpand 48(r15=reg128#6 # asm 2: vpand 48(r15=%xmm5 vpand 48(%rdx),%xmm1,%xmm5 # qhasm: r16 = a12 & mem128[input_2 + 64] # asm 1: vpand 64(r16=reg128#7 # asm 2: vpand 64(r16=%xmm6 vpand 64(%rdx),%xmm1,%xmm6 # qhasm: r17 = a12 & mem128[input_2 + 80] # asm 1: vpand 80(r17=reg128#8 # asm 2: vpand 80(r17=%xmm7 vpand 80(%rdx),%xmm1,%xmm7 # qhasm: r18 = a12 & mem128[input_2 + 96] # asm 1: vpand 96(r18=reg128#9 # asm 2: vpand 96(r18=%xmm8 vpand 96(%rdx),%xmm1,%xmm8 # qhasm: r19 = a12 & mem128[input_2 + 112] # asm 1: vpand 112(r19=reg128#10 # asm 2: vpand 112(r19=%xmm9 vpand 112(%rdx),%xmm1,%xmm9 # qhasm: r20 = a12 & mem128[input_2 + 128] # asm 1: vpand 128(r20=reg128#11 # asm 2: vpand 128(r20=%xmm10 vpand 128(%rdx),%xmm1,%xmm10 # qhasm: r21 = a12 & mem128[input_2 + 144] # asm 1: vpand 144(r21=reg128#12 # asm 2: vpand 144(r21=%xmm11 vpand 144(%rdx),%xmm1,%xmm11 # qhasm: r22 = a12 & mem128[input_2 + 160] # asm 1: vpand 160(r22=reg128#13 # asm 2: vpand 160(r22=%xmm12 vpand 160(%rdx),%xmm1,%xmm12 # qhasm: r23 = a12 & mem128[input_2 + 176] # asm 1: vpand 176(r23=reg128#14 # asm 2: vpand 176(r23=%xmm13 vpand 176(%rdx),%xmm1,%xmm13 # qhasm: r24 = a12 & mem128[input_2 + 192] # asm 1: vpand 192(r24=reg128#2 # asm 2: vpand 192(r24=%xmm1 vpand 192(%rdx),%xmm1,%xmm1 # qhasm: r15 ^= r24 # asm 1: pxor r11=reg128#2 # asm 2: movdqa r11=%xmm1 movdqa %xmm1,%xmm1 # qhasm: a11 = mem128[ input_1 + 176 ] # asm 1: movdqu 176(a11=reg128#15 # asm 2: movdqu 176(a11=%xmm14 movdqu 176(%rsi),%xmm14 # qhasm: r = a11 & b0 # asm 1: vpand r=reg128#16 # asm 2: vpand r=%xmm15 vpand %xmm0,%xmm14,%xmm15 # qhasm: r11 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 16(r=%xmm15 vpand 16(%rdx),%xmm14,%xmm15 # qhasm: r12 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 32(r=%xmm15 vpand 32(%rdx),%xmm14,%xmm15 # qhasm: r13 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 48(r=%xmm15 vpand 48(%rdx),%xmm14,%xmm15 # qhasm: r14 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 64(r=%xmm15 vpand 64(%rdx),%xmm14,%xmm15 # qhasm: r15 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 80(r=%xmm15 vpand 80(%rdx),%xmm14,%xmm15 # qhasm: r16 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 96(r=%xmm15 vpand 96(%rdx),%xmm14,%xmm15 # qhasm: r17 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 112(r=%xmm15 vpand 112(%rdx),%xmm14,%xmm15 # qhasm: r18 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 128(r=%xmm15 vpand 128(%rdx),%xmm14,%xmm15 # qhasm: r19 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 144(r=%xmm15 vpand 144(%rdx),%xmm14,%xmm15 # qhasm: r20 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 160(r=%xmm15 vpand 160(%rdx),%xmm14,%xmm15 # qhasm: r21 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 176(r=%xmm15 vpand 176(%rdx),%xmm14,%xmm15 # qhasm: r22 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 192(r=%xmm14 vpand 192(%rdx),%xmm14,%xmm14 # qhasm: r23 ^= r # asm 1: pxor r10=reg128#14 # asm 2: movdqa r10=%xmm13 movdqa %xmm13,%xmm13 # qhasm: a10 = mem128[ input_1 + 160 ] # asm 1: movdqu 160(a10=reg128#15 # asm 2: movdqu 160(a10=%xmm14 movdqu 160(%rsi),%xmm14 # qhasm: r = a10 & b0 # asm 1: vpand r=reg128#16 # asm 2: vpand r=%xmm15 vpand %xmm0,%xmm14,%xmm15 # qhasm: r10 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 16(r=%xmm15 vpand 16(%rdx),%xmm14,%xmm15 # qhasm: r11 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 32(r=%xmm15 vpand 32(%rdx),%xmm14,%xmm15 # qhasm: r12 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 48(r=%xmm15 vpand 48(%rdx),%xmm14,%xmm15 # qhasm: r13 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 64(r=%xmm15 vpand 64(%rdx),%xmm14,%xmm15 # qhasm: r14 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 80(r=%xmm15 vpand 80(%rdx),%xmm14,%xmm15 # qhasm: r15 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 96(r=%xmm15 vpand 96(%rdx),%xmm14,%xmm15 # qhasm: r16 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 112(r=%xmm15 vpand 112(%rdx),%xmm14,%xmm15 # qhasm: r17 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 128(r=%xmm15 vpand 128(%rdx),%xmm14,%xmm15 # qhasm: r18 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 144(r=%xmm15 vpand 144(%rdx),%xmm14,%xmm15 # qhasm: r19 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 160(r=%xmm15 vpand 160(%rdx),%xmm14,%xmm15 # qhasm: r20 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 176(r=%xmm15 vpand 176(%rdx),%xmm14,%xmm15 # qhasm: r21 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 192(r=%xmm14 vpand 192(%rdx),%xmm14,%xmm14 # qhasm: r22 ^= r # asm 1: pxor r9=reg128#13 # asm 2: movdqa r9=%xmm12 movdqa %xmm12,%xmm12 # qhasm: a9 = mem128[ input_1 + 144 ] # asm 1: movdqu 144(a9=reg128#15 # asm 2: movdqu 144(a9=%xmm14 movdqu 144(%rsi),%xmm14 # qhasm: r = a9 & b0 # asm 1: vpand r=reg128#16 # asm 2: vpand r=%xmm15 vpand %xmm0,%xmm14,%xmm15 # qhasm: r9 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 16(r=%xmm15 vpand 16(%rdx),%xmm14,%xmm15 # qhasm: r10 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 32(r=%xmm15 vpand 32(%rdx),%xmm14,%xmm15 # qhasm: r11 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 48(r=%xmm15 vpand 48(%rdx),%xmm14,%xmm15 # qhasm: r12 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 64(r=%xmm15 vpand 64(%rdx),%xmm14,%xmm15 # qhasm: r13 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 80(r=%xmm15 vpand 80(%rdx),%xmm14,%xmm15 # qhasm: r14 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 96(r=%xmm15 vpand 96(%rdx),%xmm14,%xmm15 # qhasm: r15 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 112(r=%xmm15 vpand 112(%rdx),%xmm14,%xmm15 # qhasm: r16 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 128(r=%xmm15 vpand 128(%rdx),%xmm14,%xmm15 # qhasm: r17 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 144(r=%xmm15 vpand 144(%rdx),%xmm14,%xmm15 # qhasm: r18 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 160(r=%xmm15 vpand 160(%rdx),%xmm14,%xmm15 # qhasm: r19 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 176(r=%xmm15 vpand 176(%rdx),%xmm14,%xmm15 # qhasm: r20 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 192(r=%xmm14 vpand 192(%rdx),%xmm14,%xmm14 # qhasm: r21 ^= r # asm 1: pxor r8=reg128#12 # asm 2: movdqa r8=%xmm11 movdqa %xmm11,%xmm11 # qhasm: a8 = mem128[ input_1 + 128 ] # asm 1: movdqu 128(a8=reg128#15 # asm 2: movdqu 128(a8=%xmm14 movdqu 128(%rsi),%xmm14 # qhasm: r = a8 & b0 # asm 1: vpand r=reg128#16 # asm 2: vpand r=%xmm15 vpand %xmm0,%xmm14,%xmm15 # qhasm: r8 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 16(r=%xmm15 vpand 16(%rdx),%xmm14,%xmm15 # qhasm: r9 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 32(r=%xmm15 vpand 32(%rdx),%xmm14,%xmm15 # qhasm: r10 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 48(r=%xmm15 vpand 48(%rdx),%xmm14,%xmm15 # qhasm: r11 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 64(r=%xmm15 vpand 64(%rdx),%xmm14,%xmm15 # qhasm: r12 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 80(r=%xmm15 vpand 80(%rdx),%xmm14,%xmm15 # qhasm: r13 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 96(r=%xmm15 vpand 96(%rdx),%xmm14,%xmm15 # qhasm: r14 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 112(r=%xmm15 vpand 112(%rdx),%xmm14,%xmm15 # qhasm: r15 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 128(r=%xmm15 vpand 128(%rdx),%xmm14,%xmm15 # qhasm: r16 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 144(r=%xmm15 vpand 144(%rdx),%xmm14,%xmm15 # qhasm: r17 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 160(r=%xmm15 vpand 160(%rdx),%xmm14,%xmm15 # qhasm: r18 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 176(r=%xmm15 vpand 176(%rdx),%xmm14,%xmm15 # qhasm: r19 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 192(r=%xmm14 vpand 192(%rdx),%xmm14,%xmm14 # qhasm: r20 ^= r # asm 1: pxor r7=reg128#11 # asm 2: movdqa r7=%xmm10 movdqa %xmm10,%xmm10 # qhasm: a7 = mem128[ input_1 + 112 ] # asm 1: movdqu 112(a7=reg128#15 # asm 2: movdqu 112(a7=%xmm14 movdqu 112(%rsi),%xmm14 # qhasm: r = a7 & b0 # asm 1: vpand r=reg128#16 # asm 2: vpand r=%xmm15 vpand %xmm0,%xmm14,%xmm15 # qhasm: r7 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 16(r=%xmm15 vpand 16(%rdx),%xmm14,%xmm15 # qhasm: r8 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 32(r=%xmm15 vpand 32(%rdx),%xmm14,%xmm15 # qhasm: r9 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 48(r=%xmm15 vpand 48(%rdx),%xmm14,%xmm15 # qhasm: r10 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 64(r=%xmm15 vpand 64(%rdx),%xmm14,%xmm15 # qhasm: r11 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 80(r=%xmm15 vpand 80(%rdx),%xmm14,%xmm15 # qhasm: r12 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 96(r=%xmm15 vpand 96(%rdx),%xmm14,%xmm15 # qhasm: r13 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 112(r=%xmm15 vpand 112(%rdx),%xmm14,%xmm15 # qhasm: r14 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 128(r=%xmm15 vpand 128(%rdx),%xmm14,%xmm15 # qhasm: r15 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 144(r=%xmm15 vpand 144(%rdx),%xmm14,%xmm15 # qhasm: r16 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 160(r=%xmm15 vpand 160(%rdx),%xmm14,%xmm15 # qhasm: r17 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 176(r=%xmm15 vpand 176(%rdx),%xmm14,%xmm15 # qhasm: r18 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 192(r=%xmm14 vpand 192(%rdx),%xmm14,%xmm14 # qhasm: r19 ^= r # asm 1: pxor r6=reg128#10 # asm 2: movdqa r6=%xmm9 movdqa %xmm9,%xmm9 # qhasm: a6 = mem128[ input_1 + 96 ] # asm 1: movdqu 96(a6=reg128#15 # asm 2: movdqu 96(a6=%xmm14 movdqu 96(%rsi),%xmm14 # qhasm: r = a6 & b0 # asm 1: vpand r=reg128#16 # asm 2: vpand r=%xmm15 vpand %xmm0,%xmm14,%xmm15 # qhasm: r6 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 16(r=%xmm15 vpand 16(%rdx),%xmm14,%xmm15 # qhasm: r7 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 32(r=%xmm15 vpand 32(%rdx),%xmm14,%xmm15 # qhasm: r8 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 48(r=%xmm15 vpand 48(%rdx),%xmm14,%xmm15 # qhasm: r9 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 64(r=%xmm15 vpand 64(%rdx),%xmm14,%xmm15 # qhasm: r10 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 80(r=%xmm15 vpand 80(%rdx),%xmm14,%xmm15 # qhasm: r11 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 96(r=%xmm15 vpand 96(%rdx),%xmm14,%xmm15 # qhasm: r12 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 112(r=%xmm15 vpand 112(%rdx),%xmm14,%xmm15 # qhasm: r13 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 128(r=%xmm15 vpand 128(%rdx),%xmm14,%xmm15 # qhasm: r14 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 144(r=%xmm15 vpand 144(%rdx),%xmm14,%xmm15 # qhasm: r15 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 160(r=%xmm15 vpand 160(%rdx),%xmm14,%xmm15 # qhasm: r16 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 176(r=%xmm15 vpand 176(%rdx),%xmm14,%xmm15 # qhasm: r17 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 192(r=%xmm14 vpand 192(%rdx),%xmm14,%xmm14 # qhasm: r18 ^= r # asm 1: pxor r5=reg128#9 # asm 2: movdqa r5=%xmm8 movdqa %xmm8,%xmm8 # qhasm: a5 = mem128[ input_1 + 80 ] # asm 1: movdqu 80(a5=reg128#15 # asm 2: movdqu 80(a5=%xmm14 movdqu 80(%rsi),%xmm14 # qhasm: r = a5 & b0 # asm 1: vpand r=reg128#16 # asm 2: vpand r=%xmm15 vpand %xmm0,%xmm14,%xmm15 # qhasm: r5 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 16(r=%xmm15 vpand 16(%rdx),%xmm14,%xmm15 # qhasm: r6 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 32(r=%xmm15 vpand 32(%rdx),%xmm14,%xmm15 # qhasm: r7 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 48(r=%xmm15 vpand 48(%rdx),%xmm14,%xmm15 # qhasm: r8 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 64(r=%xmm15 vpand 64(%rdx),%xmm14,%xmm15 # qhasm: r9 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 80(r=%xmm15 vpand 80(%rdx),%xmm14,%xmm15 # qhasm: r10 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 96(r=%xmm15 vpand 96(%rdx),%xmm14,%xmm15 # qhasm: r11 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 112(r=%xmm15 vpand 112(%rdx),%xmm14,%xmm15 # qhasm: r12 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 128(r=%xmm15 vpand 128(%rdx),%xmm14,%xmm15 # qhasm: r13 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 144(r=%xmm15 vpand 144(%rdx),%xmm14,%xmm15 # qhasm: r14 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 160(r=%xmm15 vpand 160(%rdx),%xmm14,%xmm15 # qhasm: r15 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 176(r=%xmm15 vpand 176(%rdx),%xmm14,%xmm15 # qhasm: r16 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 192(r=%xmm14 vpand 192(%rdx),%xmm14,%xmm14 # qhasm: r17 ^= r # asm 1: pxor r4=reg128#8 # asm 2: movdqa r4=%xmm7 movdqa %xmm7,%xmm7 # qhasm: a4 = mem128[ input_1 + 64 ] # asm 1: movdqu 64(a4=reg128#15 # asm 2: movdqu 64(a4=%xmm14 movdqu 64(%rsi),%xmm14 # qhasm: r = a4 & b0 # asm 1: vpand r=reg128#16 # asm 2: vpand r=%xmm15 vpand %xmm0,%xmm14,%xmm15 # qhasm: r4 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 16(r=%xmm15 vpand 16(%rdx),%xmm14,%xmm15 # qhasm: r5 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 32(r=%xmm15 vpand 32(%rdx),%xmm14,%xmm15 # qhasm: r6 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 48(r=%xmm15 vpand 48(%rdx),%xmm14,%xmm15 # qhasm: r7 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 64(r=%xmm15 vpand 64(%rdx),%xmm14,%xmm15 # qhasm: r8 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 80(r=%xmm15 vpand 80(%rdx),%xmm14,%xmm15 # qhasm: r9 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 96(r=%xmm15 vpand 96(%rdx),%xmm14,%xmm15 # qhasm: r10 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 112(r=%xmm15 vpand 112(%rdx),%xmm14,%xmm15 # qhasm: r11 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 128(r=%xmm15 vpand 128(%rdx),%xmm14,%xmm15 # qhasm: r12 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 144(r=%xmm15 vpand 144(%rdx),%xmm14,%xmm15 # qhasm: r13 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 160(r=%xmm15 vpand 160(%rdx),%xmm14,%xmm15 # qhasm: r14 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 176(r=%xmm15 vpand 176(%rdx),%xmm14,%xmm15 # qhasm: r15 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 192(r=%xmm14 vpand 192(%rdx),%xmm14,%xmm14 # qhasm: r16 ^= r # asm 1: pxor r3=reg128#7 # asm 2: movdqa r3=%xmm6 movdqa %xmm6,%xmm6 # qhasm: a3 = mem128[ input_1 + 48 ] # asm 1: movdqu 48(a3=reg128#15 # asm 2: movdqu 48(a3=%xmm14 movdqu 48(%rsi),%xmm14 # qhasm: r = a3 & b0 # asm 1: vpand r=reg128#16 # asm 2: vpand r=%xmm15 vpand %xmm0,%xmm14,%xmm15 # qhasm: r3 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 16(r=%xmm15 vpand 16(%rdx),%xmm14,%xmm15 # qhasm: r4 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 32(r=%xmm15 vpand 32(%rdx),%xmm14,%xmm15 # qhasm: r5 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 48(r=%xmm15 vpand 48(%rdx),%xmm14,%xmm15 # qhasm: r6 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 64(r=%xmm15 vpand 64(%rdx),%xmm14,%xmm15 # qhasm: r7 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 80(r=%xmm15 vpand 80(%rdx),%xmm14,%xmm15 # qhasm: r8 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 96(r=%xmm15 vpand 96(%rdx),%xmm14,%xmm15 # qhasm: r9 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 112(r=%xmm15 vpand 112(%rdx),%xmm14,%xmm15 # qhasm: r10 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 128(r=%xmm15 vpand 128(%rdx),%xmm14,%xmm15 # qhasm: r11 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 144(r=%xmm15 vpand 144(%rdx),%xmm14,%xmm15 # qhasm: r12 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 160(r=%xmm15 vpand 160(%rdx),%xmm14,%xmm15 # qhasm: r13 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 176(r=%xmm15 vpand 176(%rdx),%xmm14,%xmm15 # qhasm: r14 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 192(r=%xmm14 vpand 192(%rdx),%xmm14,%xmm14 # qhasm: r15 ^= r # asm 1: pxor r2=reg128#6 # asm 2: movdqa r2=%xmm5 movdqa %xmm5,%xmm5 # qhasm: a2 = mem128[ input_1 + 32 ] # asm 1: movdqu 32(a2=reg128#15 # asm 2: movdqu 32(a2=%xmm14 movdqu 32(%rsi),%xmm14 # qhasm: r = a2 & b0 # asm 1: vpand r=reg128#16 # asm 2: vpand r=%xmm15 vpand %xmm0,%xmm14,%xmm15 # qhasm: r2 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 16(r=%xmm15 vpand 16(%rdx),%xmm14,%xmm15 # qhasm: r3 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 32(r=%xmm15 vpand 32(%rdx),%xmm14,%xmm15 # qhasm: r4 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 48(r=%xmm15 vpand 48(%rdx),%xmm14,%xmm15 # qhasm: r5 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 64(r=%xmm15 vpand 64(%rdx),%xmm14,%xmm15 # qhasm: r6 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 80(r=%xmm15 vpand 80(%rdx),%xmm14,%xmm15 # qhasm: r7 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 96(r=%xmm15 vpand 96(%rdx),%xmm14,%xmm15 # qhasm: r8 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 112(r=%xmm15 vpand 112(%rdx),%xmm14,%xmm15 # qhasm: r9 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 128(r=%xmm15 vpand 128(%rdx),%xmm14,%xmm15 # qhasm: r10 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 144(r=%xmm15 vpand 144(%rdx),%xmm14,%xmm15 # qhasm: r11 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 160(r=%xmm15 vpand 160(%rdx),%xmm14,%xmm15 # qhasm: r12 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 176(r=%xmm15 vpand 176(%rdx),%xmm14,%xmm15 # qhasm: r13 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 192(r=%xmm14 vpand 192(%rdx),%xmm14,%xmm14 # qhasm: r14 ^= r # asm 1: pxor r1=reg128#5 # asm 2: movdqa r1=%xmm4 movdqa %xmm4,%xmm4 # qhasm: a1 = mem128[ input_1 + 16 ] # asm 1: movdqu 16(a1=reg128#15 # asm 2: movdqu 16(a1=%xmm14 movdqu 16(%rsi),%xmm14 # qhasm: r = a1 & b0 # asm 1: vpand r=reg128#16 # asm 2: vpand r=%xmm15 vpand %xmm0,%xmm14,%xmm15 # qhasm: r1 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 16(r=%xmm15 vpand 16(%rdx),%xmm14,%xmm15 # qhasm: r2 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 32(r=%xmm15 vpand 32(%rdx),%xmm14,%xmm15 # qhasm: r3 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 48(r=%xmm15 vpand 48(%rdx),%xmm14,%xmm15 # qhasm: r4 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 64(r=%xmm15 vpand 64(%rdx),%xmm14,%xmm15 # qhasm: r5 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 80(r=%xmm15 vpand 80(%rdx),%xmm14,%xmm15 # qhasm: r6 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 96(r=%xmm15 vpand 96(%rdx),%xmm14,%xmm15 # qhasm: r7 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 112(r=%xmm15 vpand 112(%rdx),%xmm14,%xmm15 # qhasm: r8 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 128(r=%xmm15 vpand 128(%rdx),%xmm14,%xmm15 # qhasm: r9 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 144(r=%xmm15 vpand 144(%rdx),%xmm14,%xmm15 # qhasm: r10 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 160(r=%xmm15 vpand 160(%rdx),%xmm14,%xmm15 # qhasm: r11 ^= r # asm 1: pxor r=reg128#16 # asm 2: vpand 176(r=%xmm15 vpand 176(%rdx),%xmm14,%xmm15 # qhasm: r12 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 192(r=%xmm14 vpand 192(%rdx),%xmm14,%xmm14 # qhasm: r13 ^= r # asm 1: pxor r0=reg128#4 # asm 2: movdqa r0=%xmm3 movdqa %xmm3,%xmm3 # qhasm: a0 = mem128[ input_1 + 0 ] # asm 1: movdqu 0(a0=reg128#15 # asm 2: movdqu 0(a0=%xmm14 movdqu 0(%rsi),%xmm14 # qhasm: r = a0 & b0 # asm 1: vpand r=reg128#1 # asm 2: vpand r=%xmm0 vpand %xmm0,%xmm14,%xmm0 # qhasm: r0 ^= r # asm 1: pxor r=reg128#1 # asm 2: vpand 16(r=%xmm0 vpand 16(%rdx),%xmm14,%xmm0 # qhasm: r1 ^= r # asm 1: pxor r=reg128#1 # asm 2: vpand 32(r=%xmm0 vpand 32(%rdx),%xmm14,%xmm0 # qhasm: r2 ^= r # asm 1: pxor r=reg128#1 # asm 2: vpand 48(r=%xmm0 vpand 48(%rdx),%xmm14,%xmm0 # qhasm: r3 ^= r # asm 1: pxor r=reg128#1 # asm 2: vpand 64(r=%xmm0 vpand 64(%rdx),%xmm14,%xmm0 # qhasm: r4 ^= r # asm 1: pxor r=reg128#1 # asm 2: vpand 80(r=%xmm0 vpand 80(%rdx),%xmm14,%xmm0 # qhasm: r5 ^= r # asm 1: pxor r=reg128#1 # asm 2: vpand 96(r=%xmm0 vpand 96(%rdx),%xmm14,%xmm0 # qhasm: r6 ^= r # asm 1: pxor r=reg128#1 # asm 2: vpand 112(r=%xmm0 vpand 112(%rdx),%xmm14,%xmm0 # qhasm: r7 ^= r # asm 1: pxor r=reg128#1 # asm 2: vpand 128(r=%xmm0 vpand 128(%rdx),%xmm14,%xmm0 # qhasm: r8 ^= r # asm 1: pxor r=reg128#1 # asm 2: vpand 144(r=%xmm0 vpand 144(%rdx),%xmm14,%xmm0 # qhasm: r9 ^= r # asm 1: pxor r=reg128#1 # asm 2: vpand 160(r=%xmm0 vpand 160(%rdx),%xmm14,%xmm0 # qhasm: r10 ^= r # asm 1: pxor r=reg128#1 # asm 2: vpand 176(r=%xmm0 vpand 176(%rdx),%xmm14,%xmm0 # qhasm: r11 ^= r # asm 1: pxor r=reg128#1 # asm 2: vpand 192(r=%xmm0 vpand 192(%rdx),%xmm14,%xmm0 # qhasm: r12 ^= r # asm 1: pxor