# qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg128 a0 # qhasm: reg128 a1 # qhasm: reg128 a2 # qhasm: reg128 a3 # qhasm: reg128 a4 # qhasm: reg128 a5 # qhasm: reg128 a6 # qhasm: reg128 a7 # qhasm: reg128 a8 # qhasm: reg128 a9 # qhasm: reg128 a10 # qhasm: reg128 a11 # qhasm: reg128 b0 # qhasm: reg128 b1 # qhasm: reg128 r0 # qhasm: reg128 r1 # qhasm: reg128 r2 # qhasm: reg128 r3 # qhasm: reg128 r4 # qhasm: reg128 r5 # qhasm: reg128 r6 # qhasm: reg128 r7 # qhasm: reg128 r8 # qhasm: reg128 r9 # qhasm: reg128 r10 # qhasm: reg128 r11 # qhasm: reg128 r12 # qhasm: reg128 r13 # qhasm: reg128 r14 # qhasm: reg128 r15 # qhasm: reg128 r16 # qhasm: reg128 r17 # qhasm: reg128 r18 # qhasm: reg128 r19 # qhasm: reg128 r20 # qhasm: reg128 r21 # qhasm: reg128 r22 # qhasm: reg128 r # qhasm: enter vec128_mul_asm .p2align 5 .global _PQCLEAN_MCELIECE348864F_SSE_vec128_mul_asm .global PQCLEAN_MCELIECE348864F_SSE_vec128_mul_asm _PQCLEAN_MCELIECE348864F_SSE_vec128_mul_asm: PQCLEAN_MCELIECE348864F_SSE_vec128_mul_asm: mov %rsp,%r11 and $31,%r11 add $0,%r11 sub %r11,%rsp # qhasm: b0 = mem128[ input_2 + 0 ] # asm 1: movdqu 0(b0=reg128#1 # asm 2: movdqu 0(b0=%xmm0 movdqu 0(%rdx),%xmm0 # qhasm: a11 = mem128[ input_1 + 176 ] # asm 1: movdqu 176(a11=reg128#2 # asm 2: movdqu 176(a11=%xmm1 movdqu 176(%rsi),%xmm1 # qhasm: r11 = a11 & b0 # asm 1: vpand r11=reg128#3 # asm 2: vpand r11=%xmm2 vpand %xmm0,%xmm1,%xmm2 # qhasm: r12 = a11 & mem128[input_2 + 16] # asm 1: vpand 16(r12=reg128#4 # asm 2: vpand 16(r12=%xmm3 vpand 16(%rdx),%xmm1,%xmm3 # qhasm: r13 = a11 & mem128[input_2 + 32] # asm 1: vpand 32(r13=reg128#5 # asm 2: vpand 32(r13=%xmm4 vpand 32(%rdx),%xmm1,%xmm4 # qhasm: r14 = a11 & mem128[input_2 + 48] # asm 1: vpand 48(r14=reg128#6 # asm 2: vpand 48(r14=%xmm5 vpand 48(%rdx),%xmm1,%xmm5 # qhasm: r15 = a11 & mem128[input_2 + 64] # asm 1: vpand 64(r15=reg128#7 # asm 2: vpand 64(r15=%xmm6 vpand 64(%rdx),%xmm1,%xmm6 # qhasm: r16 = a11 & mem128[input_2 + 80] # asm 1: vpand 80(r16=reg128#8 # asm 2: vpand 80(r16=%xmm7 vpand 80(%rdx),%xmm1,%xmm7 # qhasm: r17 = a11 & mem128[input_2 + 96] # asm 1: vpand 96(r17=reg128#9 # asm 2: vpand 96(r17=%xmm8 vpand 96(%rdx),%xmm1,%xmm8 # qhasm: r18 = a11 & mem128[input_2 + 112] # asm 1: vpand 112(r18=reg128#10 # asm 2: vpand 112(r18=%xmm9 vpand 112(%rdx),%xmm1,%xmm9 # qhasm: r19 = a11 & mem128[input_2 + 128] # asm 1: vpand 128(r19=reg128#11 # asm 2: vpand 128(r19=%xmm10 vpand 128(%rdx),%xmm1,%xmm10 # qhasm: r20 = a11 & mem128[input_2 + 144] # asm 1: vpand 144(r20=reg128#12 # asm 2: vpand 144(r20=%xmm11 vpand 144(%rdx),%xmm1,%xmm11 # qhasm: r21 = a11 & mem128[input_2 + 160] # asm 1: vpand 160(r21=reg128#13 # asm 2: vpand 160(r21=%xmm12 vpand 160(%rdx),%xmm1,%xmm12 # qhasm: r22 = a11 & mem128[input_2 + 176] # asm 1: vpand 176(r22=reg128#2 # asm 2: vpand 176(r22=%xmm1 vpand 176(%rdx),%xmm1,%xmm1 # qhasm: r13 ^= r22 # asm 1: pxor r10=reg128#2 # asm 2: movdqa r10=%xmm1 movdqa %xmm1,%xmm1 # qhasm: a10 = mem128[ input_1 + 160 ] # asm 1: movdqu 160(a10=reg128#14 # asm 2: movdqu 160(a10=%xmm13 movdqu 160(%rsi),%xmm13 # qhasm: r = a10 & b0 # asm 1: vpand r=reg128#15 # asm 2: vpand r=%xmm14 vpand %xmm0,%xmm13,%xmm14 # qhasm: r10 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 16(r=%xmm14 vpand 16(%rdx),%xmm13,%xmm14 # qhasm: r11 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 32(r=%xmm14 vpand 32(%rdx),%xmm13,%xmm14 # qhasm: r12 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 48(r=%xmm14 vpand 48(%rdx),%xmm13,%xmm14 # qhasm: r13 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 64(r=%xmm14 vpand 64(%rdx),%xmm13,%xmm14 # qhasm: r14 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 80(r=%xmm14 vpand 80(%rdx),%xmm13,%xmm14 # qhasm: r15 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 96(r=%xmm14 vpand 96(%rdx),%xmm13,%xmm14 # qhasm: r16 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 112(r=%xmm14 vpand 112(%rdx),%xmm13,%xmm14 # qhasm: r17 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 128(r=%xmm14 vpand 128(%rdx),%xmm13,%xmm14 # qhasm: r18 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 144(r=%xmm14 vpand 144(%rdx),%xmm13,%xmm14 # qhasm: r19 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 160(r=%xmm14 vpand 160(%rdx),%xmm13,%xmm14 # qhasm: r20 ^= r # asm 1: pxor r=reg128#14 # asm 2: vpand 176(r=%xmm13 vpand 176(%rdx),%xmm13,%xmm13 # qhasm: r21 ^= r # asm 1: pxor r9=reg128#13 # asm 2: movdqa r9=%xmm12 movdqa %xmm12,%xmm12 # qhasm: a9 = mem128[ input_1 + 144 ] # asm 1: movdqu 144(a9=reg128#14 # asm 2: movdqu 144(a9=%xmm13 movdqu 144(%rsi),%xmm13 # qhasm: r = a9 & b0 # asm 1: vpand r=reg128#15 # asm 2: vpand r=%xmm14 vpand %xmm0,%xmm13,%xmm14 # qhasm: r9 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 16(r=%xmm14 vpand 16(%rdx),%xmm13,%xmm14 # qhasm: r10 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 32(r=%xmm14 vpand 32(%rdx),%xmm13,%xmm14 # qhasm: r11 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 48(r=%xmm14 vpand 48(%rdx),%xmm13,%xmm14 # qhasm: r12 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 64(r=%xmm14 vpand 64(%rdx),%xmm13,%xmm14 # qhasm: r13 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 80(r=%xmm14 vpand 80(%rdx),%xmm13,%xmm14 # qhasm: r14 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 96(r=%xmm14 vpand 96(%rdx),%xmm13,%xmm14 # qhasm: r15 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 112(r=%xmm14 vpand 112(%rdx),%xmm13,%xmm14 # qhasm: r16 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 128(r=%xmm14 vpand 128(%rdx),%xmm13,%xmm14 # qhasm: r17 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 144(r=%xmm14 vpand 144(%rdx),%xmm13,%xmm14 # qhasm: r18 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 160(r=%xmm14 vpand 160(%rdx),%xmm13,%xmm14 # qhasm: r19 ^= r # asm 1: pxor r=reg128#14 # asm 2: vpand 176(r=%xmm13 vpand 176(%rdx),%xmm13,%xmm13 # qhasm: r20 ^= r # asm 1: pxor r8=reg128#12 # asm 2: movdqa r8=%xmm11 movdqa %xmm11,%xmm11 # qhasm: a8 = mem128[ input_1 + 128 ] # asm 1: movdqu 128(a8=reg128#14 # asm 2: movdqu 128(a8=%xmm13 movdqu 128(%rsi),%xmm13 # qhasm: r = a8 & b0 # asm 1: vpand r=reg128#15 # asm 2: vpand r=%xmm14 vpand %xmm0,%xmm13,%xmm14 # qhasm: r8 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 16(r=%xmm14 vpand 16(%rdx),%xmm13,%xmm14 # qhasm: r9 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 32(r=%xmm14 vpand 32(%rdx),%xmm13,%xmm14 # qhasm: r10 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 48(r=%xmm14 vpand 48(%rdx),%xmm13,%xmm14 # qhasm: r11 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 64(r=%xmm14 vpand 64(%rdx),%xmm13,%xmm14 # qhasm: r12 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 80(r=%xmm14 vpand 80(%rdx),%xmm13,%xmm14 # qhasm: r13 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 96(r=%xmm14 vpand 96(%rdx),%xmm13,%xmm14 # qhasm: r14 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 112(r=%xmm14 vpand 112(%rdx),%xmm13,%xmm14 # qhasm: r15 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 128(r=%xmm14 vpand 128(%rdx),%xmm13,%xmm14 # qhasm: r16 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 144(r=%xmm14 vpand 144(%rdx),%xmm13,%xmm14 # qhasm: r17 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 160(r=%xmm14 vpand 160(%rdx),%xmm13,%xmm14 # qhasm: r18 ^= r # asm 1: pxor r=reg128#14 # asm 2: vpand 176(r=%xmm13 vpand 176(%rdx),%xmm13,%xmm13 # qhasm: r19 ^= r # asm 1: pxor r7=reg128#11 # asm 2: movdqa r7=%xmm10 movdqa %xmm10,%xmm10 # qhasm: a7 = mem128[ input_1 + 112 ] # asm 1: movdqu 112(a7=reg128#14 # asm 2: movdqu 112(a7=%xmm13 movdqu 112(%rsi),%xmm13 # qhasm: r = a7 & b0 # asm 1: vpand r=reg128#15 # asm 2: vpand r=%xmm14 vpand %xmm0,%xmm13,%xmm14 # qhasm: r7 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 16(r=%xmm14 vpand 16(%rdx),%xmm13,%xmm14 # qhasm: r8 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 32(r=%xmm14 vpand 32(%rdx),%xmm13,%xmm14 # qhasm: r9 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 48(r=%xmm14 vpand 48(%rdx),%xmm13,%xmm14 # qhasm: r10 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 64(r=%xmm14 vpand 64(%rdx),%xmm13,%xmm14 # qhasm: r11 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 80(r=%xmm14 vpand 80(%rdx),%xmm13,%xmm14 # qhasm: r12 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 96(r=%xmm14 vpand 96(%rdx),%xmm13,%xmm14 # qhasm: r13 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 112(r=%xmm14 vpand 112(%rdx),%xmm13,%xmm14 # qhasm: r14 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 128(r=%xmm14 vpand 128(%rdx),%xmm13,%xmm14 # qhasm: r15 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 144(r=%xmm14 vpand 144(%rdx),%xmm13,%xmm14 # qhasm: r16 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 160(r=%xmm14 vpand 160(%rdx),%xmm13,%xmm14 # qhasm: r17 ^= r # asm 1: pxor r=reg128#14 # asm 2: vpand 176(r=%xmm13 vpand 176(%rdx),%xmm13,%xmm13 # qhasm: r18 ^= r # asm 1: pxor r6=reg128#10 # asm 2: movdqa r6=%xmm9 movdqa %xmm9,%xmm9 # qhasm: a6 = mem128[ input_1 + 96 ] # asm 1: movdqu 96(a6=reg128#14 # asm 2: movdqu 96(a6=%xmm13 movdqu 96(%rsi),%xmm13 # qhasm: r = a6 & b0 # asm 1: vpand r=reg128#15 # asm 2: vpand r=%xmm14 vpand %xmm0,%xmm13,%xmm14 # qhasm: r6 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 16(r=%xmm14 vpand 16(%rdx),%xmm13,%xmm14 # qhasm: r7 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 32(r=%xmm14 vpand 32(%rdx),%xmm13,%xmm14 # qhasm: r8 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 48(r=%xmm14 vpand 48(%rdx),%xmm13,%xmm14 # qhasm: r9 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 64(r=%xmm14 vpand 64(%rdx),%xmm13,%xmm14 # qhasm: r10 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 80(r=%xmm14 vpand 80(%rdx),%xmm13,%xmm14 # qhasm: r11 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 96(r=%xmm14 vpand 96(%rdx),%xmm13,%xmm14 # qhasm: r12 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 112(r=%xmm14 vpand 112(%rdx),%xmm13,%xmm14 # qhasm: r13 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 128(r=%xmm14 vpand 128(%rdx),%xmm13,%xmm14 # qhasm: r14 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 144(r=%xmm14 vpand 144(%rdx),%xmm13,%xmm14 # qhasm: r15 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 160(r=%xmm14 vpand 160(%rdx),%xmm13,%xmm14 # qhasm: r16 ^= r # asm 1: pxor r=reg128#14 # asm 2: vpand 176(r=%xmm13 vpand 176(%rdx),%xmm13,%xmm13 # qhasm: r17 ^= r # asm 1: pxor r5=reg128#9 # asm 2: movdqa r5=%xmm8 movdqa %xmm8,%xmm8 # qhasm: a5 = mem128[ input_1 + 80 ] # asm 1: movdqu 80(a5=reg128#14 # asm 2: movdqu 80(a5=%xmm13 movdqu 80(%rsi),%xmm13 # qhasm: r = a5 & b0 # asm 1: vpand r=reg128#15 # asm 2: vpand r=%xmm14 vpand %xmm0,%xmm13,%xmm14 # qhasm: r5 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 16(r=%xmm14 vpand 16(%rdx),%xmm13,%xmm14 # qhasm: r6 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 32(r=%xmm14 vpand 32(%rdx),%xmm13,%xmm14 # qhasm: r7 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 48(r=%xmm14 vpand 48(%rdx),%xmm13,%xmm14 # qhasm: r8 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 64(r=%xmm14 vpand 64(%rdx),%xmm13,%xmm14 # qhasm: r9 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 80(r=%xmm14 vpand 80(%rdx),%xmm13,%xmm14 # qhasm: r10 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 96(r=%xmm14 vpand 96(%rdx),%xmm13,%xmm14 # qhasm: r11 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 112(r=%xmm14 vpand 112(%rdx),%xmm13,%xmm14 # qhasm: r12 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 128(r=%xmm14 vpand 128(%rdx),%xmm13,%xmm14 # qhasm: r13 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 144(r=%xmm14 vpand 144(%rdx),%xmm13,%xmm14 # qhasm: r14 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 160(r=%xmm14 vpand 160(%rdx),%xmm13,%xmm14 # qhasm: r15 ^= r # asm 1: pxor r=reg128#14 # asm 2: vpand 176(r=%xmm13 vpand 176(%rdx),%xmm13,%xmm13 # qhasm: r16 ^= r # asm 1: pxor r4=reg128#8 # asm 2: movdqa r4=%xmm7 movdqa %xmm7,%xmm7 # qhasm: a4 = mem128[ input_1 + 64 ] # asm 1: movdqu 64(a4=reg128#14 # asm 2: movdqu 64(a4=%xmm13 movdqu 64(%rsi),%xmm13 # qhasm: r = a4 & b0 # asm 1: vpand r=reg128#15 # asm 2: vpand r=%xmm14 vpand %xmm0,%xmm13,%xmm14 # qhasm: r4 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 16(r=%xmm14 vpand 16(%rdx),%xmm13,%xmm14 # qhasm: r5 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 32(r=%xmm14 vpand 32(%rdx),%xmm13,%xmm14 # qhasm: r6 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 48(r=%xmm14 vpand 48(%rdx),%xmm13,%xmm14 # qhasm: r7 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 64(r=%xmm14 vpand 64(%rdx),%xmm13,%xmm14 # qhasm: r8 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 80(r=%xmm14 vpand 80(%rdx),%xmm13,%xmm14 # qhasm: r9 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 96(r=%xmm14 vpand 96(%rdx),%xmm13,%xmm14 # qhasm: r10 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 112(r=%xmm14 vpand 112(%rdx),%xmm13,%xmm14 # qhasm: r11 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 128(r=%xmm14 vpand 128(%rdx),%xmm13,%xmm14 # qhasm: r12 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 144(r=%xmm14 vpand 144(%rdx),%xmm13,%xmm14 # qhasm: r13 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 160(r=%xmm14 vpand 160(%rdx),%xmm13,%xmm14 # qhasm: r14 ^= r # asm 1: pxor r=reg128#14 # asm 2: vpand 176(r=%xmm13 vpand 176(%rdx),%xmm13,%xmm13 # qhasm: r15 ^= r # asm 1: pxor r3=reg128#7 # asm 2: movdqa r3=%xmm6 movdqa %xmm6,%xmm6 # qhasm: a3 = mem128[ input_1 + 48 ] # asm 1: movdqu 48(a3=reg128#14 # asm 2: movdqu 48(a3=%xmm13 movdqu 48(%rsi),%xmm13 # qhasm: r = a3 & b0 # asm 1: vpand r=reg128#15 # asm 2: vpand r=%xmm14 vpand %xmm0,%xmm13,%xmm14 # qhasm: r3 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 16(r=%xmm14 vpand 16(%rdx),%xmm13,%xmm14 # qhasm: r4 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 32(r=%xmm14 vpand 32(%rdx),%xmm13,%xmm14 # qhasm: r5 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 48(r=%xmm14 vpand 48(%rdx),%xmm13,%xmm14 # qhasm: r6 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 64(r=%xmm14 vpand 64(%rdx),%xmm13,%xmm14 # qhasm: r7 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 80(r=%xmm14 vpand 80(%rdx),%xmm13,%xmm14 # qhasm: r8 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 96(r=%xmm14 vpand 96(%rdx),%xmm13,%xmm14 # qhasm: r9 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 112(r=%xmm14 vpand 112(%rdx),%xmm13,%xmm14 # qhasm: r10 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 128(r=%xmm14 vpand 128(%rdx),%xmm13,%xmm14 # qhasm: r11 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 144(r=%xmm14 vpand 144(%rdx),%xmm13,%xmm14 # qhasm: r12 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 160(r=%xmm14 vpand 160(%rdx),%xmm13,%xmm14 # qhasm: r13 ^= r # asm 1: pxor r=reg128#14 # asm 2: vpand 176(r=%xmm13 vpand 176(%rdx),%xmm13,%xmm13 # qhasm: r14 ^= r # asm 1: pxor r2=reg128#6 # asm 2: movdqa r2=%xmm5 movdqa %xmm5,%xmm5 # qhasm: a2 = mem128[ input_1 + 32 ] # asm 1: movdqu 32(a2=reg128#14 # asm 2: movdqu 32(a2=%xmm13 movdqu 32(%rsi),%xmm13 # qhasm: r = a2 & b0 # asm 1: vpand r=reg128#15 # asm 2: vpand r=%xmm14 vpand %xmm0,%xmm13,%xmm14 # qhasm: r2 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 16(r=%xmm14 vpand 16(%rdx),%xmm13,%xmm14 # qhasm: r3 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 32(r=%xmm14 vpand 32(%rdx),%xmm13,%xmm14 # qhasm: r4 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 48(r=%xmm14 vpand 48(%rdx),%xmm13,%xmm14 # qhasm: r5 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 64(r=%xmm14 vpand 64(%rdx),%xmm13,%xmm14 # qhasm: r6 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 80(r=%xmm14 vpand 80(%rdx),%xmm13,%xmm14 # qhasm: r7 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 96(r=%xmm14 vpand 96(%rdx),%xmm13,%xmm14 # qhasm: r8 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 112(r=%xmm14 vpand 112(%rdx),%xmm13,%xmm14 # qhasm: r9 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 128(r=%xmm14 vpand 128(%rdx),%xmm13,%xmm14 # qhasm: r10 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 144(r=%xmm14 vpand 144(%rdx),%xmm13,%xmm14 # qhasm: r11 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 160(r=%xmm14 vpand 160(%rdx),%xmm13,%xmm14 # qhasm: r12 ^= r # asm 1: pxor r=reg128#14 # asm 2: vpand 176(r=%xmm13 vpand 176(%rdx),%xmm13,%xmm13 # qhasm: r13 ^= r # asm 1: pxor r1=reg128#5 # asm 2: movdqa r1=%xmm4 movdqa %xmm4,%xmm4 # qhasm: a1 = mem128[ input_1 + 16 ] # asm 1: movdqu 16(a1=reg128#14 # asm 2: movdqu 16(a1=%xmm13 movdqu 16(%rsi),%xmm13 # qhasm: r = a1 & b0 # asm 1: vpand r=reg128#15 # asm 2: vpand r=%xmm14 vpand %xmm0,%xmm13,%xmm14 # qhasm: r1 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 16(r=%xmm14 vpand 16(%rdx),%xmm13,%xmm14 # qhasm: r2 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 32(r=%xmm14 vpand 32(%rdx),%xmm13,%xmm14 # qhasm: r3 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 48(r=%xmm14 vpand 48(%rdx),%xmm13,%xmm14 # qhasm: r4 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 64(r=%xmm14 vpand 64(%rdx),%xmm13,%xmm14 # qhasm: r5 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 80(r=%xmm14 vpand 80(%rdx),%xmm13,%xmm14 # qhasm: r6 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 96(r=%xmm14 vpand 96(%rdx),%xmm13,%xmm14 # qhasm: r7 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 112(r=%xmm14 vpand 112(%rdx),%xmm13,%xmm14 # qhasm: r8 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 128(r=%xmm14 vpand 128(%rdx),%xmm13,%xmm14 # qhasm: r9 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 144(r=%xmm14 vpand 144(%rdx),%xmm13,%xmm14 # qhasm: r10 ^= r # asm 1: pxor r=reg128#15 # asm 2: vpand 160(r=%xmm14 vpand 160(%rdx),%xmm13,%xmm14 # qhasm: r11 ^= r # asm 1: pxor r=reg128#14 # asm 2: vpand 176(r=%xmm13 vpand 176(%rdx),%xmm13,%xmm13 # qhasm: r12 ^= r # asm 1: pxor r0=reg128#4 # asm 2: movdqa r0=%xmm3 movdqa %xmm3,%xmm3 # qhasm: a0 = mem128[ input_1 + 0 ] # asm 1: movdqu 0(a0=reg128#14 # asm 2: movdqu 0(a0=%xmm13 movdqu 0(%rsi),%xmm13 # qhasm: r = a0 & b0 # asm 1: vpand r=reg128#1 # asm 2: vpand r=%xmm0 vpand %xmm0,%xmm13,%xmm0 # qhasm: r0 ^= r # asm 1: pxor r=reg128#1 # asm 2: vpand 16(r=%xmm0 vpand 16(%rdx),%xmm13,%xmm0 # qhasm: r1 ^= r # asm 1: pxor r=reg128#1 # asm 2: vpand 32(r=%xmm0 vpand 32(%rdx),%xmm13,%xmm0 # qhasm: r2 ^= r # asm 1: pxor r=reg128#1 # asm 2: vpand 48(r=%xmm0 vpand 48(%rdx),%xmm13,%xmm0 # qhasm: r3 ^= r # asm 1: pxor r=reg128#1 # asm 2: vpand 64(r=%xmm0 vpand 64(%rdx),%xmm13,%xmm0 # qhasm: r4 ^= r # asm 1: pxor r=reg128#1 # asm 2: vpand 80(r=%xmm0 vpand 80(%rdx),%xmm13,%xmm0 # qhasm: r5 ^= r # asm 1: pxor r=reg128#1 # asm 2: vpand 96(r=%xmm0 vpand 96(%rdx),%xmm13,%xmm0 # qhasm: r6 ^= r # asm 1: pxor r=reg128#1 # asm 2: vpand 112(r=%xmm0 vpand 112(%rdx),%xmm13,%xmm0 # qhasm: r7 ^= r # asm 1: pxor r=reg128#1 # asm 2: vpand 128(r=%xmm0 vpand 128(%rdx),%xmm13,%xmm0 # qhasm: r8 ^= r # asm 1: pxor r=reg128#1 # asm 2: vpand 144(r=%xmm0 vpand 144(%rdx),%xmm13,%xmm0 # qhasm: r9 ^= r # asm 1: pxor r=reg128#1 # asm 2: vpand 160(r=%xmm0 vpand 160(%rdx),%xmm13,%xmm0 # qhasm: r10 ^= r # asm 1: pxor r=reg128#1 # asm 2: vpand 176(r=%xmm0 vpand 176(%rdx),%xmm13,%xmm0 # qhasm: r11 ^= r # asm 1: pxor