# qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 b64 # qhasm: int64 synd # qhasm: int64 addr # qhasm: int64 c # qhasm: int64 c_all # qhasm: int64 row # qhasm: int64 p # qhasm: int64 e # qhasm: int64 s # qhasm: reg256 pp # qhasm: reg256 ee # qhasm: reg256 ss # qhasm: int64 buf_ptr # qhasm: stack256 buf # qhasm: enter syndrome_asm .p2align 5 .global _PQCLEAN_MCELIECE348864F_AVX_syndrome_asm .global PQCLEAN_MCELIECE348864F_AVX_syndrome_asm _PQCLEAN_MCELIECE348864F_AVX_syndrome_asm: PQCLEAN_MCELIECE348864F_AVX_syndrome_asm: mov %rsp,%r11 and $31,%r11 add $32,%r11 sub %r11,%rsp # qhasm: input_1 += 260780 # asm 1: add $260780,buf_ptr=int64#4 # asm 2: leaq buf_ptr=%rcx leaq 0(%rsp),%rcx # qhasm: row = 768 # asm 1: mov $768,>row=int64#5 # asm 2: mov $768,>row=%r8 mov $768,%r8 # qhasm: loop: ._loop: # qhasm: row -= 1 # asm 1: sub $1,ss=reg256#1 # asm 2: vmovupd 0(ss=%ymm0 vmovupd 0(%rsi),%ymm0 # qhasm: ee = mem256[ input_2 + 96 ] # asm 1: vmovupd 96(ee=reg256#2 # asm 2: vmovupd 96(ee=%ymm1 vmovupd 96(%rdx),%ymm1 # qhasm: ss &= ee # asm 1: vpand pp=reg256#2 # asm 2: vmovupd 32(pp=%ymm1 vmovupd 32(%rsi),%ymm1 # qhasm: ee = mem256[ input_2 + 128 ] # asm 1: vmovupd 128(ee=reg256#3 # asm 2: vmovupd 128(ee=%ymm2 vmovupd 128(%rdx),%ymm2 # qhasm: pp &= ee # asm 1: vpand pp=reg256#2 # asm 2: vmovupd 64(pp=%ymm1 vmovupd 64(%rsi),%ymm1 # qhasm: ee = mem256[ input_2 + 160 ] # asm 1: vmovupd 160(ee=reg256#3 # asm 2: vmovupd 160(ee=%ymm2 vmovupd 160(%rdx),%ymm2 # qhasm: pp &= ee # asm 1: vpand pp=reg256#2 # asm 2: vmovupd 96(pp=%ymm1 vmovupd 96(%rsi),%ymm1 # qhasm: ee = mem256[ input_2 + 192 ] # asm 1: vmovupd 192(ee=reg256#3 # asm 2: vmovupd 192(ee=%ymm2 vmovupd 192(%rdx),%ymm2 # qhasm: pp &= ee # asm 1: vpand pp=reg256#2 # asm 2: vmovupd 128(pp=%ymm1 vmovupd 128(%rsi),%ymm1 # qhasm: ee = mem256[ input_2 + 224 ] # asm 1: vmovupd 224(ee=reg256#3 # asm 2: vmovupd 224(ee=%ymm2 vmovupd 224(%rdx),%ymm2 # qhasm: pp &= ee # asm 1: vpand pp=reg256#2 # asm 2: vmovupd 160(pp=%ymm1 vmovupd 160(%rsi),%ymm1 # qhasm: ee = mem256[ input_2 + 256 ] # asm 1: vmovupd 256(ee=reg256#3 # asm 2: vmovupd 256(ee=%ymm2 vmovupd 256(%rdx),%ymm2 # qhasm: pp &= ee # asm 1: vpand pp=reg256#2 # asm 2: vmovupd 192(pp=%ymm1 vmovupd 192(%rsi),%ymm1 # qhasm: ee = mem256[ input_2 + 288 ] # asm 1: vmovupd 288(ee=reg256#3 # asm 2: vmovupd 288(ee=%ymm2 vmovupd 288(%rdx),%ymm2 # qhasm: pp &= ee # asm 1: vpand pp=reg256#2 # asm 2: vmovupd 224(pp=%ymm1 vmovupd 224(%rsi),%ymm1 # qhasm: ee = mem256[ input_2 + 320 ] # asm 1: vmovupd 320(ee=reg256#3 # asm 2: vmovupd 320(ee=%ymm2 vmovupd 320(%rdx),%ymm2 # qhasm: pp &= ee # asm 1: vpand pp=reg256#2 # asm 2: vmovupd 256(pp=%ymm1 vmovupd 256(%rsi),%ymm1 # qhasm: ee = mem256[ input_2 + 352 ] # asm 1: vmovupd 352(ee=reg256#3 # asm 2: vmovupd 352(ee=%ymm2 vmovupd 352(%rdx),%ymm2 # qhasm: pp &= ee # asm 1: vpand pp=reg256#2 # asm 2: vmovupd 288(pp=%ymm1 vmovupd 288(%rsi),%ymm1 # qhasm: ee = mem256[ input_2 + 384 ] # asm 1: vmovupd 384(ee=reg256#3 # asm 2: vmovupd 384(ee=%ymm2 vmovupd 384(%rdx),%ymm2 # qhasm: pp &= ee # asm 1: vpand buf=stack256#1 # asm 2: vmovapd buf=0(%rsp) vmovapd %ymm0,0(%rsp) # qhasm: s = *(uint64 *)(input_1 + 320) # asm 1: movq 320(s=int64#6 # asm 2: movq 320(s=%r9 movq 320(%rsi),%r9 # qhasm: e = *(uint64 *)(input_2 + 416) # asm 1: movq 416(e=int64#7 # asm 2: movq 416(e=%rax movq 416(%rdx),%rax # qhasm: s &= e # asm 1: and p=int64#7 # asm 2: movq 328(p=%rax movq 328(%rsi),%rax # qhasm: e = *(uint64 *)(input_2 + 424) # asm 1: movq 424(e=int64#8 # asm 2: movq 424(e=%r10 movq 424(%rdx),%r10 # qhasm: p &= e # asm 1: and p=int64#7d # asm 2: movl 336(p=%eax movl 336(%rsi),%eax # qhasm: e = *(uint32 *)(input_2 + 432) # asm 1: movl 432(e=int64#8d # asm 2: movl 432(e=%r10d movl 432(%rdx),%r10d # qhasm: p &= e # asm 1: and c_all=int64#6 # asm 2: popcnt c_all=%r9 popcnt %r9, %r9 # qhasm: b64 = mem64[ buf_ptr + 0 ] # asm 1: movq 0(b64=int64#7 # asm 2: movq 0(b64=%rax movq 0(%rcx),%rax # qhasm: c = count(b64) # asm 1: popcnt c=int64#7 # asm 2: popcnt c=%rax popcnt %rax, %rax # qhasm: c_all ^= c # asm 1: xor b64=int64#7 # asm 2: movq 8(b64=%rax movq 8(%rcx),%rax # qhasm: c = count(b64) # asm 1: popcnt c=int64#7 # asm 2: popcnt c=%rax popcnt %rax, %rax # qhasm: c_all ^= c # asm 1: xor b64=int64#7 # asm 2: movq 16(b64=%rax movq 16(%rcx),%rax # qhasm: c = count(b64) # asm 1: popcnt c=int64#7 # asm 2: popcnt c=%rax popcnt %rax, %rax # qhasm: c_all ^= c # asm 1: xor b64=int64#7 # asm 2: movq 24(b64=%rax movq 24(%rcx),%rax # qhasm: c = count(b64) # asm 1: popcnt c=int64#7 # asm 2: popcnt c=%rax popcnt %rax, %rax # qhasm: c_all ^= c # asm 1: xor addr=int64#7 # asm 2: mov addr=%rax mov %r8,%rax # qhasm: (uint64) addr >>= 3 # asm 1: shr $3,synd=int64#8 # asm 2: movzbq 0(synd=%r10 movzbq 0(%rax),%r10 # qhasm: synd <<= 1 # asm 1: shl $1,ss=reg256#1 # asm 2: vmovupd 0(ss=%ymm0 vmovupd 0(%rdi),%ymm0 # qhasm: ee = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(ee=reg256#2 # asm 2: vmovupd 0(ee=%ymm1 vmovupd 0(%rdx),%ymm1 # qhasm: ss ^= ee # asm 1: vpxor ss=reg256#1 # asm 2: vmovupd 32(ss=%ymm0 vmovupd 32(%rdi),%ymm0 # qhasm: ee = mem256[ input_2 + 32 ] # asm 1: vmovupd 32(ee=reg256#2 # asm 2: vmovupd 32(ee=%ymm1 vmovupd 32(%rdx),%ymm1 # qhasm: ss ^= ee # asm 1: vpxor ss=reg256#1 # asm 2: vmovupd 64(ss=%ymm0 vmovupd 64(%rdi),%ymm0 # qhasm: ee = mem256[ input_2 + 64 ] # asm 1: vmovupd 64(ee=reg256#2 # asm 2: vmovupd 64(ee=%ymm1 vmovupd 64(%rdx),%ymm1 # qhasm: ss ^= ee # asm 1: vpxor