Merge pull request #2

83467c5 Support position independent code for amd64-64-24k amd64-51-30k (vtnerd)
cmake-rewrite
luigi1111 4 years ago
commit d11e401c4e
No known key found for this signature in database
GPG Key ID: F4ACA0183641E010

@ -1680,27 +1680,27 @@ cmove %rdi,%r11
# qhasm: tt0 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P0,>tt0=int64#1
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P0,>tt0=%rdi
movq crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdi
movq crypto_sign_ed25519_amd64_51_30k_batch_2P0(%rip),%rdi
# qhasm: tt1 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt1=int64#4
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt1=%rcx
movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rcx
movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%rcx
# qhasm: tt2 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt2=int64#5
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt2=%r8
movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8
movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r8
# qhasm: tt3 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt3=int64#10
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt3=%r12
movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r12
movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r12
# qhasm: tt4 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt4=int64#11
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt4=%r13
movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r13
movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r13
# qhasm: tt0 -= tt2d0
# asm 1: sub <tt2d0=int64#2,<tt0=int64#1

@ -138,7 +138,7 @@ movq 32(%rdi),%r9
# qhasm: two51minus1 = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>two51minus1=int64#7
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>two51minus1=%rax
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rax
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rax
# qhasm: two51minus19 = two51minus1
# asm 1: mov <two51minus1=int64#7,>two51minus19=int64#8

@ -692,7 +692,7 @@ adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#2
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rsi
# qhasm: mulr01 = (mulr01.r0) << 13
# asm 1: shld $13,<r0=int64#5,<mulr01=int64#6

@ -500,7 +500,7 @@ adc %rdx,%r14
# qhasm: squareredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: squarer01 = (squarer01.r0) << 13
# asm 1: shld $13,<r0=int64#4,<squarer01=int64#5

@ -495,7 +495,7 @@ adc %rdx,%r14
# qhasm: squareredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=int64#2
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rsi
# qhasm: squarer01 = (squarer01.r0) << 13
# asm 1: shld $13,<r0=int64#4,<squarer01=int64#5

@ -332,7 +332,7 @@ mov %rdx,%r11
# qhasm: a0 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<a0=int64#3
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<a0=%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0(%rip),%rdx
# qhasm: x0 = *(uint64 *)(pp + 0)
# asm 1: movq 0(<pp=int64#2),>x0=int64#10
@ -357,7 +357,7 @@ mov %r8,%r12
# qhasm: a1 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a1=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a1=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r8
# qhasm: x1 = *(uint64 *)(pp + 8)
# asm 1: movq 8(<pp=int64#2),>x1=int64#11
@ -382,7 +382,7 @@ mov %r9,%r13
# qhasm: a2 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a2=int64#6
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a2=%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r9
# qhasm: x2 = *(uint64 *)(pp + 16)
# asm 1: movq 16(<pp=int64#2),>x2=int64#12
@ -407,7 +407,7 @@ mov %rax,%r14
# qhasm: a3 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a3=int64#7
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a3=%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%rax
# qhasm: x3 = *(uint64 *)(pp + 24)
# asm 1: movq 24(<pp=int64#2),>x3=int64#13
@ -432,7 +432,7 @@ mov %r10,%r15
# qhasm: a4 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a4=int64#8
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a4=%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r10
# qhasm: x4 = *(uint64 *)(pp + 32)
# asm 1: movq 32(<pp=int64#2),>x4=int64#14
@ -532,7 +532,7 @@ mov %rdx,%r11
# qhasm: t10 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<t10=int64#3
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<t10=%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0(%rip),%rdx
# qhasm: rx0 = *(uint64 *) (qp + 0)
# asm 1: movq 0(<qp=int64#4),>rx0=int64#10
@ -557,7 +557,7 @@ mov %r8,%r12
# qhasm: t11 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<t11=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<t11=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r8
# qhasm: rx1 = *(uint64 *) (qp + 8)
# asm 1: movq 8(<qp=int64#4),>rx1=int64#11
@ -582,7 +582,7 @@ mov %r9,%r13
# qhasm: t12 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<t12=int64#6
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<t12=%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r9
# qhasm: rx2 = *(uint64 *) (qp + 16)
# asm 1: movq 16(<qp=int64#4),>rx2=int64#12
@ -607,7 +607,7 @@ mov %rax,%r14
# qhasm: t13 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<t13=int64#7
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<t13=%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%rax
# qhasm: rx3 = *(uint64 *) (qp + 24)
# asm 1: movq 24(<qp=int64#4),>rx3=int64#13
@ -632,7 +632,7 @@ mov %r10,%r15
# qhasm: t14 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<t14=int64#8
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<t14=%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r10
# qhasm: rx4 = *(uint64 *) (qp + 32)
# asm 1: movq 32(<qp=int64#4),>rx4=int64#14
@ -1237,7 +1237,7 @@ adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.a0) << 13
# asm 1: shld $13,<a0=int64#5,<mulr01=int64#6
@ -1987,7 +1987,7 @@ adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.rx0) << 13
# asm 1: shld $13,<rx0=int64#5,<mulr01=int64#6
@ -2202,27 +2202,27 @@ mov %r12,%r15
# qhasm: rx0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<rx0=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<rx0=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P0(%rip),%r8
# qhasm: rx1 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx1=int64#7
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx1=%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%rax
# qhasm: rx2 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx2=int64#8
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx2=%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r10
# qhasm: rx3 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx3=int64#9
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx3=%r11
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r11
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r11
# qhasm: rx4 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx4=int64#10
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx4=%r12
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r12
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r12
# qhasm: ry0 += a0_stack
# asm 1: addq <a0_stack=stack64#8,<ry0=int64#3
@ -2862,7 +2862,7 @@ adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.c0) << 13
# asm 1: shld $13,<c0=int64#5,<mulr01=int64#6
@ -3090,7 +3090,7 @@ imulq $19,%rdx,%rax
movq %rax,96(%rsp)
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2(%rip)
# qhasm: c0 = mulrax
# asm 1: mov <mulrax=int64#7,>c0=int64#5
@ -3118,7 +3118,7 @@ imulq $19,%rdx,%rax
movq %rax,104(%rsp)
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1(%rip)
# qhasm: carry? c0 += mulrax
# asm 1: add <mulrax=int64#7,<c0=int64#5
@ -3136,7 +3136,7 @@ adc %rdx,%r9
movq 56(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0(%rip)
# qhasm: carry? c0 += mulrax
# asm 1: add <mulrax=int64#7,<c0=int64#5
@ -3154,7 +3154,7 @@ adc %rdx,%r9
movq 56(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1(%rip)
# qhasm: c1 = mulrax
# asm 1: mov <mulrax=int64#7,>c1=int64#8
@ -3172,7 +3172,7 @@ mov %rdx,%r11
movq 56(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2(%rip)
# qhasm: c2 = mulrax
# asm 1: mov <mulrax=int64#7,>c2=int64#10
@ -3190,7 +3190,7 @@ mov %rdx,%r13
movq 56(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3(%rip)
# qhasm: c3 = mulrax
# asm 1: mov <mulrax=int64#7,>c3=int64#12
@ -3208,7 +3208,7 @@ mov %rdx,%r15
movq 56(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4(%rip)
# qhasm: c4 = mulrax
# asm 1: mov <mulrax=int64#7,>c4=int64#14
@ -3226,7 +3226,7 @@ mov %rdx,%rbp
movq 64(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0(%rip)
# qhasm: carry? c1 += mulrax
# asm 1: add <mulrax=int64#7,<c1=int64#8
@ -3244,7 +3244,7 @@ adc %rdx,%r11
movq 64(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1(%rip)
# qhasm: carry? c2 += mulrax
# asm 1: add <mulrax=int64#7,<c2=int64#10
@ -3262,7 +3262,7 @@ adc %rdx,%r13
movq 64(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2(%rip)
# qhasm: carry? c3 += mulrax
# asm 1: add <mulrax=int64#7,<c3=int64#12
@ -3280,7 +3280,7 @@ adc %rdx,%r15
movq 64(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3(%rip)
# qhasm: carry? c4 += mulrax
# asm 1: add <mulrax=int64#7,<c4=int64#14
@ -3303,7 +3303,7 @@ movq 64(%rsp),%rdx
imulq $19,%rdx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4(%rip)
# qhasm: carry? c0 += mulrax
# asm 1: add <mulrax=int64#7,<c0=int64#5
@ -3321,7 +3321,7 @@ adc %rdx,%r9
movq 72(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0(%rip)
# qhasm: carry? c2 += mulrax
# asm 1: add <mulrax=int64#7,<c2=int64#10
@ -3339,7 +3339,7 @@ adc %rdx,%r13
movq 72(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1(%rip)
# qhasm: carry? c3 += mulrax
# asm 1: add <mulrax=int64#7,<c3=int64#12
@ -3357,7 +3357,7 @@ adc %rdx,%r15
movq 72(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2(%rip)
# qhasm: carry? c4 += mulrax
# asm 1: add <mulrax=int64#7,<c4=int64#14
@ -3380,7 +3380,7 @@ movq 72(%rsp),%rdx
imulq $19,%rdx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3(%rip)
# qhasm: carry? c0 += mulrax
# asm 1: add <mulrax=int64#7,<c0=int64#5
@ -3403,7 +3403,7 @@ movq 72(%rsp),%rdx
imulq $19,%rdx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4(%rip)
# qhasm: carry? c1 += mulrax
# asm 1: add <mulrax=int64#7,<c1=int64#8
@ -3421,7 +3421,7 @@ adc %rdx,%r11
movq 80(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0(%rip)
# qhasm: carry? c3 += mulrax
# asm 1: add <mulrax=int64#7,<c3=int64#12
@ -3439,7 +3439,7 @@ adc %rdx,%r15
movq 80(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1(%rip)
# qhasm: carry? c4 += mulrax
# asm 1: add <mulrax=int64#7,<c4=int64#14
@ -3457,7 +3457,7 @@ adc %rdx,%rbp
movq 96(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3(%rip)
# qhasm: carry? c1 += mulrax
# asm 1: add <mulrax=int64#7,<c1=int64#8
@ -3475,7 +3475,7 @@ adc %rdx,%r11
movq 96(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4(%rip)
# qhasm: carry? c2 += mulrax
# asm 1: add <mulrax=int64#7,<c2=int64#10
@ -3493,7 +3493,7 @@ adc %rdx,%r13
movq 88(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0(%rip)
# qhasm: carry? c4 += mulrax
# asm 1: add <mulrax=int64#7,<c4=int64#14
@ -3511,7 +3511,7 @@ adc %rdx,%rbp
movq 104(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2(%rip)
# qhasm: carry? c1 += mulrax
# asm 1: add <mulrax=int64#7,<c1=int64#8
@ -3529,7 +3529,7 @@ adc %rdx,%r11
movq 104(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3(%rip)
# qhasm: carry? c2 += mulrax
# asm 1: add <mulrax=int64#7,<c2=int64#10
@ -3547,7 +3547,7 @@ adc %rdx,%r13
movq 104(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4(%rip)
# qhasm: carry? c3 += mulrax
# asm 1: add <mulrax=int64#7,<c3=int64#12
@ -3562,7 +3562,7 @@ adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.c0) << 13
# asm 1: shld $13,<c0=int64#5,<mulr01=int64#6
@ -4312,7 +4312,7 @@ adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#2
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rsi
# qhasm: mulr01 = (mulr01.rt0) << 13
# asm 1: shld $13,<rt0=int64#5,<mulr01=int64#6
@ -4552,27 +4552,27 @@ mov %r10,%r13
# qhasm: rt0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<rt0=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<rt0=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P0(%rip),%r8
# qhasm: rt1 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt1=int64#4
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt1=%rcx
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rcx
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%rcx
# qhasm: rt2 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt2=int64#6
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt2=%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r9
# qhasm: rt3 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt3=int64#7
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt3=%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%rax
# qhasm: rt4 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt4=int64#8
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt4=%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r10
# qhasm: rz0 += c0_stack
# asm 1: addq <c0_stack=stack64#8,<rz0=int64#2

@ -651,7 +651,7 @@ adc %rdx,%r14
# qhasm: squareredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: squarer01 = (squarer01.a0) << 13
# asm 1: shld $13,<a0=int64#4,<squarer01=int64#5
@ -1226,7 +1226,7 @@ adc %rdx,%r14
# qhasm: squareredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: squarer01 = (squarer01.b0) << 13
# asm 1: shld $13,<b0=int64#4,<squarer01=int64#5
@ -1801,7 +1801,7 @@ adc %rdx,%r14
# qhasm: squareredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: squarer01 = (squarer01.c0) << 13
# asm 1: shld $13,<c0=int64#4,<squarer01=int64#5
@ -2041,27 +2041,27 @@ movq %r11,168(%rsp)
# qhasm: d0 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P0,>d0=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P0,>d0=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_2P0(%rip),%rdx
# qhasm: d1 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d1=int64#4
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d1=%rcx
movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rcx
movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%rcx
# qhasm: d2 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d2=int64#5
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d2=%r8
movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8
movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r8
# qhasm: d3 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d3=int64#6
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d3=%r9
movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r9
movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r9
# qhasm: d4 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d4=int64#7
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d4=%rax
movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%rax
# qhasm: e0 = d0
# asm 1: mov <d0=int64#3,>e0=int64#8
@ -2266,27 +2266,27 @@ movq %r14,72(%rdi)
# qhasm: d0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<d0=int64#3
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<d0=%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0(%rip),%rdx
# qhasm: d1 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<d1=int64#4
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<d1=%rcx
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rcx
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%rcx
# qhasm: d2 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<d2=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<d2=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r8
# qhasm: d3 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<d3=int64#6
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<d3=%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r9
# qhasm: d4 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<d4=int64#7
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<d4=%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%rax
# qhasm: d0 -= b0_stack
# asm 1: subq <b0_stack=stack64#13,<d0=int64#3
@ -2341,27 +2341,27 @@ movq %rax,112(%rdi)
# qhasm: rz0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_4P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_4P0,<rz0=int64#8
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_4P0,<rz0=%r10
add crypto_sign_ed25519_amd64_51_30k_batch_4P0,%r10
add crypto_sign_ed25519_amd64_51_30k_batch_4P0(%rip),%r10
# qhasm: rz1 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_4P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_4P1234,<rz1=int64#9
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_4P1234,<rz1=%r11
add crypto_sign_ed25519_amd64_51_30k_batch_4P1234,%r11
add crypto_sign_ed25519_amd64_51_30k_batch_4P1234(%rip),%r11
# qhasm: rz2 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_4P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_4P1234,<rz2=int64#10
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_4P1234,<rz2=%r12
add crypto_sign_ed25519_amd64_51_30k_batch_4P1234,%r12
add crypto_sign_ed25519_amd64_51_30k_batch_4P1234(%rip),%r12
# qhasm: rz3 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_4P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_4P1234,<rz3=int64#11
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_4P1234,<rz3=%r13
add crypto_sign_ed25519_amd64_51_30k_batch_4P1234,%r13
add crypto_sign_ed25519_amd64_51_30k_batch_4P1234(%rip),%r13
# qhasm: rz4 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_4P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_4P1234,<rz4=int64#12
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_4P1234,<rz4=%r14
add crypto_sign_ed25519_amd64_51_30k_batch_4P1234,%r14
add crypto_sign_ed25519_amd64_51_30k_batch_4P1234(%rip),%r14
# qhasm: rz0 -= c0_stack
# asm 1: subq <c0_stack=stack64#18,<rz0=int64#8
@ -2851,7 +2851,7 @@ adc %rdx,%r13
# qhasm: squareredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: squarer01 = (squarer01.rx0) << 13
# asm 1: shld $13,<rx0=int64#2,<squarer01=int64#4

@ -433,27 +433,27 @@ mov %rax,%r14
# qhasm: a0 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<a0=int64#3
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<a0=%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0(%rip),%rdx
# qhasm: a1 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a1=int64#4
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a1=%rcx
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rcx
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%rcx
# qhasm: a2 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a2=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a2=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r8
# qhasm: a3 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a3=int64#6
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a3=%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r9
# qhasm: a4 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a4=int64#7
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a4=%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%rax
# qhasm: b0 += *(uint64 *) (rp + 0)
# asm 1: addq 0(<rp=int64#1),<b0=int64#8
@ -1093,7 +1093,7 @@ adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.a0) << 13
# asm 1: shld $13,<a0=int64#4,<mulr01=int64#5
@ -1843,7 +1843,7 @@ adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.e0) << 13
# asm 1: shld $13,<e0=int64#4,<mulr01=int64#5
@ -2058,27 +2058,27 @@ mov %r11,%r14
# qhasm: e0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<e0=int64#4
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<e0=%rcx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rcx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0(%rip),%rcx
# qhasm: e1 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e1=int64#6
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e1=%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r9
# qhasm: e2 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e2=int64#7
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e2=%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%rax
# qhasm: e3 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e3=int64#8
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e3=%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r10
# qhasm: e4 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e4=int64#9
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e4=%r11
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r11
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r11
# qhasm: h0 += a0_stack
# asm 1: addq <a0_stack=stack64#8,<h0=int64#3
@ -2718,7 +2718,7 @@ adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#2
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rsi
# qhasm: mulr01 = (mulr01.c0) << 13
# asm 1: shld $13,<c0=int64#4,<mulr01=int64#5
@ -2988,27 +2988,27 @@ mov %r12,%rbp
# qhasm: f0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<f0=int64#2
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<f0=%rsi
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rsi
add crypto_sign_ed25519_amd64_51_30k_batch_2P0(%rip),%rsi
# qhasm: f1 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f1=int64#3
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f1=%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%rdx
# qhasm: f2 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f2=int64#4
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f2=%rcx
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rcx
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%rcx
# qhasm: f3 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f3=int64#9
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f3=%r11
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r11
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r11
# qhasm: f4 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f4=int64#10
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f4=%r12
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r12
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r12
# qhasm: g0 += c0_stack
# asm 1: addq <c0_stack=stack64#18,<g0=int64#11
@ -3648,7 +3648,7 @@ adc %rdx,%r13
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.rx0) << 13
# asm 1: shld $13,<rx0=int64#2,<mulr01=int64#4
@ -4398,7 +4398,7 @@ adc %rdx,%r13
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.ry0) << 13
# asm 1: shld $13,<ry0=int64#2,<mulr01=int64#4
@ -5148,7 +5148,7 @@ adc %rdx,%r13
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.rz0) << 13
# asm 1: shld $13,<rz0=int64#2,<mulr01=int64#4
@ -5898,7 +5898,7 @@ adc %rdx,%r13
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.rt0) << 13
# asm 1: shld $13,<rt0=int64#2,<mulr01=int64#4

@ -442,27 +442,27 @@ mov %r10,%r15
# qhasm: a0 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<a0=int64#3
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<a0=%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0(%rip),%rdx
# qhasm: a1 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a1=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a1=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r8
# qhasm: a2 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a2=int64#6
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a2=%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r9
# qhasm: a3 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a3=int64#7
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a3=%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%rax
# qhasm: a4 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a4=int64#8
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a4=%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r10
# qhasm: b0 += *(uint64 *) (pp + 0)
# asm 1: addq 0(<pp=int64#2),<b0=int64#9
@ -1102,7 +1102,7 @@ adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.a0) << 13
# asm 1: shld $13,<a0=int64#5,<mulr01=int64#6
@ -1852,7 +1852,7 @@ adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.e0) << 13
# asm 1: shld $13,<e0=int64#5,<mulr01=int64#6
@ -2067,27 +2067,27 @@ mov %r12,%r15
# qhasm: e0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<e0=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<e0=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P0(%rip),%r8
# qhasm: e1 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e1=int64#7
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e1=%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%rax
# qhasm: e2 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e2=int64#8
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e2=%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r10
# qhasm: e3 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e3=int64#9
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e3=%r11
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r11
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r11
# qhasm: e4 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e4=int64#10
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e4=%r12
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r12
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r12
# qhasm: h0 += a0_stack
# asm 1: addq <a0_stack=stack64#8,<h0=int64#3
@ -2727,7 +2727,7 @@ adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.c0) << 13
# asm 1: shld $13,<c0=int64#5,<mulr01=int64#6
@ -2997,27 +2997,27 @@ mov %rsi,%rbp
# qhasm: f0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<f0=int64#3
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<f0=%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0(%rip),%rdx
# qhasm: f1 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f1=int64#4
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f1=%rcx
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rcx
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%rcx
# qhasm: f2 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f2=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f2=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r8
# qhasm: f3 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f3=int64#10
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f3=%r12
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r12
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r12
# qhasm: f4 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f4=int64#2
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f4=%rsi
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rsi
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%rsi
# qhasm: g0 += c0_stack
# asm 1: addq <c0_stack=stack64#8,<g0=int64#11

@ -688,7 +688,7 @@ adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.rx0) << 13
# asm 1: shld $13,<rx0=int64#4,<mulr01=int64#5
@ -1438,7 +1438,7 @@ adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.ry0) << 13
# asm 1: shld $13,<ry0=int64#4,<mulr01=int64#5
@ -2188,7 +2188,7 @@ adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#2
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rsi
# qhasm: mulr01 = (mulr01.rz0) << 13
# asm 1: shld $13,<rz0=int64#4,<mulr01=int64#5

@ -698,7 +698,7 @@ adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.rx0) << 13
# asm 1: shld $13,<rx0=int64#4,<mulr01=int64#5
@ -1448,7 +1448,7 @@ adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.ry0) << 13
# asm 1: shld $13,<ry0=int64#4,<mulr01=int64#5
@ -2198,7 +2198,7 @@ adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.rz0) << 13
# asm 1: shld $13,<rz0=int64#4,<mulr01=int64#5
@ -2948,7 +2948,7 @@ adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#2
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rsi
# qhasm: mulr01 = (mulr01.rt0) << 13
# asm 1: shld $13,<rt0=int64#4,<mulr01=int64#5

@ -756,7 +756,7 @@ adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.x0) << 13
# asm 1: shld $13,<x0=int64#4,<mulr01=int64#5
@ -1506,7 +1506,7 @@ adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.y0) << 13
# asm 1: shld $13,<y0=int64#4,<mulr01=int64#5
@ -1721,27 +1721,27 @@ mov %r11,%r14
# qhasm: ysubx0 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<ysubx0=int64#3
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<ysubx0=%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0(%rip),%rdx
# qhasm: ysubx1 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<ysubx1=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<ysubx1=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r8
# qhasm: ysubx2 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<ysubx2=int64#10
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<ysubx2=%r12
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r12
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r12
# qhasm: ysubx3 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<ysubx3=int64#11
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<ysubx3=%r13
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r13
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r13
# qhasm: ysubx4 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<ysubx4=int64#12
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<ysubx4=%r14
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r14
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r14
# qhasm: x0 = stackx0
# asm 1: movq <stackx0=stack64#8,>x0=int64#13
@ -2406,7 +2406,7 @@ adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.rz0) << 13
# asm 1: shld $13,<rz0=int64#4,<mulr01=int64#5
@ -3156,7 +3156,7 @@ adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#2
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rsi
# qhasm: mulr01 = (mulr01.t0) << 13
# asm 1: shld $13,<t0=int64#4,<mulr01=int64#5
@ -3384,7 +3384,7 @@ imulq $19,%rsi,%rax
movq %rax,96(%rsp)
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2(%rip)
# qhasm: t2d0 = mulrax
# asm 1: mov <mulrax=int64#7,>t2d0=int64#2
@ -3412,7 +3412,7 @@ imulq $19,%rdx,%rax
movq %rax,104(%rsp)
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1(%rip)
# qhasm: carry? t2d0 += mulrax
# asm 1: add <mulrax=int64#7,<t2d0=int64#2
@ -3430,7 +3430,7 @@ adc %rdx,%rcx
movq 56(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0(%rip)
# qhasm: carry? t2d0 += mulrax
# asm 1: add <mulrax=int64#7,<t2d0=int64#2
@ -3448,7 +3448,7 @@ adc %rdx,%rcx
movq 56(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1(%rip)
# qhasm: t2d1 = mulrax
# asm 1: mov <mulrax=int64#7,>t2d1=int64#5
@ -3466,7 +3466,7 @@ mov %rdx,%r9
movq 56(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2(%rip)
# qhasm: t2d2 = mulrax
# asm 1: mov <mulrax=int64#7,>t2d2=int64#8
@ -3484,7 +3484,7 @@ mov %rdx,%r11
movq 56(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3(%rip)
# qhasm: t2d3 = mulrax
# asm 1: mov <mulrax=int64#7,>t2d3=int64#10
@ -3502,7 +3502,7 @@ mov %rdx,%r13
movq 56(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4(%rip)
# qhasm: t2d4 = mulrax
# asm 1: mov <mulrax=int64#7,>t2d4=int64#12
@ -3520,7 +3520,7 @@ mov %rdx,%r15
movq 64(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0(%rip)
# qhasm: carry? t2d1 += mulrax
# asm 1: add <mulrax=int64#7,<t2d1=int64#5
@ -3538,7 +3538,7 @@ adc %rdx,%r9
movq 64(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1(%rip)
# qhasm: carry? t2d2 += mulrax
# asm 1: add <mulrax=int64#7,<t2d2=int64#8
@ -3556,7 +3556,7 @@ adc %rdx,%r11
movq 64(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2(%rip)
# qhasm: carry? t2d3 += mulrax
# asm 1: add <mulrax=int64#7,<t2d3=int64#10
@ -3574,7 +3574,7 @@ adc %rdx,%r13
movq 64(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3(%rip)
# qhasm: carry? t2d4 += mulrax
# asm 1: add <mulrax=int64#7,<t2d4=int64#12
@ -3597,7 +3597,7 @@ movq 64(%rsp),%rdx
imulq $19,%rdx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4(%rip)
# qhasm: carry? t2d0 += mulrax
# asm 1: add <mulrax=int64#7,<t2d0=int64#2
@ -3615,7 +3615,7 @@ adc %rdx,%rcx
movq 72(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0(%rip)
# qhasm: carry? t2d2 += mulrax
# asm 1: add <mulrax=int64#7,<t2d2=int64#8
@ -3633,7 +3633,7 @@ adc %rdx,%r11
movq 72(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1(%rip)
# qhasm: carry? t2d3 += mulrax
# asm 1: add <mulrax=int64#7,<t2d3=int64#10
@ -3651,7 +3651,7 @@ adc %rdx,%r13
movq 72(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2(%rip)
# qhasm: carry? t2d4 += mulrax
# asm 1: add <mulrax=int64#7,<t2d4=int64#12
@ -3674,7 +3674,7 @@ movq 72(%rsp),%rdx
imulq $19,%rdx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3(%rip)
# qhasm: carry? t2d0 += mulrax
# asm 1: add <mulrax=int64#7,<t2d0=int64#2
@ -3697,7 +3697,7 @@ movq 72(%rsp),%rdx
imulq $19,%rdx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4(%rip)
# qhasm: carry? t2d1 += mulrax
# asm 1: add <mulrax=int64#7,<t2d1=int64#5
@ -3715,7 +3715,7 @@ adc %rdx,%r9
movq 80(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0(%rip)
# qhasm: carry? t2d3 += mulrax
# asm 1: add <mulrax=int64#7,<t2d3=int64#10
@ -3733,7 +3733,7 @@ adc %rdx,%r13
movq 80(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1(%rip)
# qhasm: carry? t2d4 += mulrax
# asm 1: add <mulrax=int64#7,<t2d4=int64#12
@ -3751,7 +3751,7 @@ adc %rdx,%r15
movq 96(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3(%rip)
# qhasm: carry? t2d1 += mulrax
# asm 1: add <mulrax=int64#7,<t2d1=int64#5
@ -3769,7 +3769,7 @@ adc %rdx,%r9
movq 96(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4(%rip)
# qhasm: carry? t2d2 += mulrax
# asm 1: add <mulrax=int64#7,<t2d2=int64#8
@ -3787,7 +3787,7 @@ adc %rdx,%r11
movq 88(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0(%rip)
# qhasm: carry? t2d4 += mulrax
# asm 1: add <mulrax=int64#7,<t2d4=int64#12
@ -3805,7 +3805,7 @@ adc %rdx,%r15
movq 104(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2(%rip)
# qhasm: carry? t2d1 += mulrax
# asm 1: add <mulrax=int64#7,<t2d1=int64#5
@ -3823,7 +3823,7 @@ adc %rdx,%r9
movq 104(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3(%rip)
# qhasm: carry? t2d2 += mulrax
# asm 1: add <mulrax=int64#7,<t2d2=int64#8
@ -3841,7 +3841,7 @@ adc %rdx,%r11
movq 104(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4(%rip)
# qhasm: carry? t2d3 += mulrax
# asm 1: add <mulrax=int64#7,<t2d3=int64#10
@ -3856,7 +3856,7 @@ adc %rdx,%r13
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.t2d0) << 13
# asm 1: shld $13,<t2d0=int64#2,<mulr01=int64#4

@ -352,27 +352,27 @@ mov %r10,%r15
# qhasm: a0 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<a0=int64#3
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<a0=%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0(%rip),%rdx
# qhasm: a1 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a1=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a1=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r8
# qhasm: a2 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a2=int64#6
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a2=%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r9
# qhasm: a3 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a3=int64#7
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a3=%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%rax
# qhasm: a4 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a4=int64#8
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a4=%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r10
# qhasm: b0 += *(uint64 *) (pp + 0)
# asm 1: addq 0(<pp=int64#2),<b0=int64#9
@ -1012,7 +1012,7 @@ adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.a0) << 13
# asm 1: shld $13,<a0=int64#5,<mulr01=int64#6
@ -1762,7 +1762,7 @@ adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.rx0) << 13
# asm 1: shld $13,<rx0=int64#5,<mulr01=int64#6
@ -1977,27 +1977,27 @@ mov %r12,%r15
# qhasm: rx0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<rx0=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<rx0=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P0(%rip),%r8
# qhasm: rx1 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx1=int64#7
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx1=%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%rax
# qhasm: rx2 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx2=int64#8
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx2=%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r10
# qhasm: rx3 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx3=int64#9
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx3=%r11
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r11
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r11
# qhasm: rx4 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx4=int64#10
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx4=%r12
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r12
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r12
# qhasm: ry0 += a0_stack
# asm 1: addq <a0_stack=stack64#8,<ry0=int64#3
@ -2637,7 +2637,7 @@ adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rdx
# qhasm: mulr01 = (mulr01.c0) << 13
# asm 1: shld $13,<c0=int64#5,<mulr01=int64#6
@ -3387,7 +3387,7 @@ adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#2
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51(%rip),%rsi
# qhasm: mulr01 = (mulr01.rt0) << 13
# asm 1: shld $13,<rt0=int64#5,<mulr01=int64#6
@ -3627,27 +3627,27 @@ mov %r10,%r13
# qhasm: rt0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<rt0=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<rt0=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P0(%rip),%r8
# qhasm: rt1 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt1=int64#4
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt1=%rcx
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rcx
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%rcx
# qhasm: rt2 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt2=int64#6
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt2=%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r9
# qhasm: rt3 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt3=int64#7
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt3=%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%rax
# qhasm: rt4 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt4=int64#8
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt4=%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r10
# qhasm: rz0 += c0_stack
# asm 1: addq <c0_stack=stack64#8,<rz0=int64#2

@ -652,7 +652,7 @@ adc %rdx,%r11
mov %r8,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#2
@ -670,7 +670,7 @@ mov %r9,%rax
mov %rdx,%rcx
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#4
@ -693,7 +693,7 @@ mov $0,%r8
adc %rdx,%r8
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#5
@ -716,7 +716,7 @@ mov $0,%r9
adc %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#6

@ -426,7 +426,7 @@ adc %rdx,%rcx
mov %r11,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: squarer4 = squarerax
# asm 1: mov <squarerax=int64#7,>squarer4=int64#2
@ -444,7 +444,7 @@ mov %r12,%rax
mov %rdx,%r11
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? squarer5 += squarerax
# asm 1: add <squarerax=int64#7,<squarer5=int64#9
@ -467,7 +467,7 @@ mov $0,%r12
adc %rdx,%r12
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? squarer6 += squarerax
# asm 1: add <squarerax=int64#7,<squarer6=int64#10
@ -490,7 +490,7 @@ mov $0,%rcx
adc %rdx,%rcx
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? squarer7 += squarerax
# asm 1: add <squarerax=int64#7,<squarer7=int64#4

@ -1208,7 +1208,7 @@ adc %rdx,%r11
mov %r8,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#5
@ -1226,7 +1226,7 @@ mov %r9,%rax
mov %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#6
@ -1249,7 +1249,7 @@ mov $0,%r10
adc %rdx,%r10
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#8
@ -1272,7 +1272,7 @@ mov $0,%r11
adc %rdx,%r11
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#9
@ -1890,7 +1890,7 @@ adc %rdx,%r11
mov %r8,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#5
@ -1908,7 +1908,7 @@ mov %r9,%rax
mov %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#6
@ -1931,7 +1931,7 @@ mov $0,%r10
adc %rdx,%r10
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#8
@ -1954,7 +1954,7 @@ mov $0,%r11
adc %rdx,%r11
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#9
@ -2742,7 +2742,7 @@ adc %rdx,%r11
mov %r8,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#5
@ -2760,7 +2760,7 @@ mov %r9,%rax
mov %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#6
@ -2783,7 +2783,7 @@ mov $0,%r10
adc %rdx,%r10
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#8
@ -2806,7 +2806,7 @@ mov $0,%r11
adc %rdx,%r11
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#9
@ -2941,7 +2941,7 @@ movq 56(%rsp),%r12
# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D0
# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D0,>mulrax=int64#7
# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D0,>mulrax=%rax
movq crypto_sign_ed25519_amd64_64_EC2D0,%rax
movq crypto_sign_ed25519_amd64_64_EC2D0(%rip),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
# asm 1: mul <mulx0=int64#10
@ -2961,7 +2961,7 @@ mov %rdx,%r14
# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D1
# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D1,>mulrax=int64#7
# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D1,>mulrax=%rax
movq crypto_sign_ed25519_amd64_64_EC2D1,%rax
movq crypto_sign_ed25519_amd64_64_EC2D1(%rip),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
# asm 1: mul <mulx0=int64#10
@ -2986,7 +2986,7 @@ adc %rdx,%r15
# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D2
# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D2,>mulrax=int64#7
# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D2,>mulrax=%rax
movq crypto_sign_ed25519_amd64_64_EC2D2,%rax
movq crypto_sign_ed25519_amd64_64_EC2D2(%rip),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
# asm 1: mul <mulx0=int64#10
@ -3011,7 +3011,7 @@ adc %rdx,%rbx
# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D3
# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D3,>mulrax=int64#7
# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D3,>mulrax=%rax
movq crypto_sign_ed25519_amd64_64_EC2D3,%rax
movq crypto_sign_ed25519_amd64_64_EC2D3(%rip),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
# asm 1: mul <mulx0=int64#10
@ -3036,7 +3036,7 @@ movq 64(%rsp),%r12
# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D0
# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D0,>mulrax=int64#7
# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D0,>mulrax=%rax
movq crypto_sign_ed25519_amd64_64_EC2D0,%rax
movq crypto_sign_ed25519_amd64_64_EC2D0(%rip),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
# asm 1: mul <mulx1=int64#10
@ -3061,7 +3061,7 @@ adc %rdx,%rbp
# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D1
# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D1,>mulrax=int64#7
# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D1,>mulrax=%rax
movq crypto_sign_ed25519_amd64_64_EC2D1,%rax
movq crypto_sign_ed25519_amd64_64_EC2D1(%rip),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
# asm 1: mul <mulx1=int64#10
@ -3096,7 +3096,7 @@ adc %rdx,%rbp
# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D2
# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D2,>mulrax=int64#7
# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D2,>mulrax=%rax
movq crypto_sign_ed25519_amd64_64_EC2D2,%rax
movq crypto_sign_ed25519_amd64_64_EC2D2(%rip),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
# asm 1: mul <mulx1=int64#10
@ -3131,7 +3131,7 @@ adc %rdx,%rbp
# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D3
# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D3,>mulrax=int64#7
# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D3,>mulrax=%rax
movq crypto_sign_ed25519_amd64_64_EC2D3,%rax
movq crypto_sign_ed25519_amd64_64_EC2D3(%rip),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
# asm 1: mul <mulx1=int64#10
@ -3166,7 +3166,7 @@ movq 72(%rsp),%r12
# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D0
# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D0,>mulrax=int64#7
# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D0,>mulrax=%rax
movq crypto_sign_ed25519_amd64_64_EC2D0,%rax
movq crypto_sign_ed25519_amd64_64_EC2D0(%rip),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
# asm 1: mul <mulx2=int64#10
@ -3191,7 +3191,7 @@ adc %rdx,%rbp
# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D1
# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D1,>mulrax=int64#7
# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D1,>mulrax=%rax
movq crypto_sign_ed25519_amd64_64_EC2D1,%rax
movq crypto_sign_ed25519_amd64_64_EC2D1(%rip),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
# asm 1: mul <mulx2=int64#10
@ -3226,7 +3226,7 @@ adc %rdx,%rbp
# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D2
# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D2,>mulrax=int64#7
# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D2,>mulrax=%rax
movq crypto_sign_ed25519_amd64_64_EC2D2,%rax
movq crypto_sign_ed25519_amd64_64_EC2D2(%rip),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
# asm 1: mul <mulx2=int64#10
@ -3261,7 +3261,7 @@ adc %rdx,%rbp
# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D3
# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D3,>mulrax=int64#7
# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D3,>mulrax=%rax
movq crypto_sign_ed25519_amd64_64_EC2D3,%rax
movq crypto_sign_ed25519_amd64_64_EC2D3(%rip),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
# asm 1: mul <mulx2=int64#10
@ -3296,7 +3296,7 @@ movq 80(%rsp),%r12
# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D0
# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D0,>mulrax=int64#7
# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D0,>mulrax=%rax
movq crypto_sign_ed25519_amd64_64_EC2D0,%rax
movq crypto_sign_ed25519_amd64_64_EC2D0(%rip),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
# asm 1: mul <mulx3=int64#10
@ -3321,7 +3321,7 @@ adc %rdx,%rbp
# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D1
# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D1,>mulrax=int64#7
# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D1,>mulrax=%rax
movq crypto_sign_ed25519_amd64_64_EC2D1,%rax
movq crypto_sign_ed25519_amd64_64_EC2D1(%rip),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
# asm 1: mul <mulx3=int64#10
@ -3356,7 +3356,7 @@ adc %rdx,%rbp
# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D2
# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D2,>mulrax=int64#7
# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D2,>mulrax=%rax
movq crypto_sign_ed25519_amd64_64_EC2D2,%rax
movq crypto_sign_ed25519_amd64_64_EC2D2(%rip),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
# asm 1: mul <mulx3=int64#10
@ -3391,7 +3391,7 @@ adc %rdx,%rbp
# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D3
# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D3,>mulrax=int64#7
# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D3,>mulrax=%rax
movq crypto_sign_ed25519_amd64_64_EC2D3,%rax
movq crypto_sign_ed25519_amd64_64_EC2D3(%rip),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
# asm 1: mul <mulx3=int64#10
@ -3424,7 +3424,7 @@ adc %rdx,%r11
mov %r8,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#5
@ -3442,7 +3442,7 @@ mov %r9,%rax
mov %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#6
@ -3465,7 +3465,7 @@ mov $0,%r10
adc %rdx,%r10
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#8
@ -3488,7 +3488,7 @@ mov $0,%r11
adc %rdx,%r11
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#9
@ -4106,7 +4106,7 @@ adc %rdx,%r11
mov %r8,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#2
@ -4124,7 +4124,7 @@ mov %r9,%rax
mov %rdx,%rcx
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#4
@ -4147,7 +4147,7 @@ mov $0,%r8
adc %rdx,%r8
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#5
@ -4170,7 +4170,7 @@ mov $0,%r9
adc %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#6

@ -576,7 +576,7 @@ adc %rdx,%rcx
mov %r11,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: squarer4 = squarerax
# asm 1: mov <squarerax=int64#7,>squarer4=int64#9
@ -594,7 +594,7 @@ mov %r12,%rax
mov %rdx,%r12
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? squarer5 += squarerax
# asm 1: add <squarerax=int64#7,<squarer5=int64#10
@ -617,7 +617,7 @@ mov $0,%r13
adc %rdx,%r13
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? squarer6 += squarerax
# asm 1: add <squarerax=int64#7,<squarer6=int64#11
@ -640,7 +640,7 @@ mov $0,%rcx
adc %rdx,%rcx
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? squarer7 += squarerax
# asm 1: add <squarerax=int64#7,<squarer7=int64#4
@ -1043,7 +1043,7 @@ adc %rdx,%rcx
mov %r11,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: squarer4 = squarerax
# asm 1: mov <squarerax=int64#7,>squarer4=int64#9
@ -1061,7 +1061,7 @@ mov %r12,%rax
mov %rdx,%r12
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? squarer5 += squarerax
# asm 1: add <squarerax=int64#7,<squarer5=int64#10
@ -1084,7 +1084,7 @@ mov $0,%r13
adc %rdx,%r13
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? squarer6 += squarerax
# asm 1: add <squarerax=int64#7,<squarer6=int64#11
@ -1107,7 +1107,7 @@ mov $0,%rcx
adc %rdx,%rcx
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? squarer7 += squarerax
# asm 1: add <squarerax=int64#7,<squarer7=int64#4
@ -1510,7 +1510,7 @@ adc %rdx,%rcx
mov %r11,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: squarer4 = squarerax
# asm 1: mov <squarerax=int64#7,>squarer4=int64#9
@ -1528,7 +1528,7 @@ mov %r12,%rax
mov %rdx,%r12
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? squarer5 += squarerax
# asm 1: add <squarerax=int64#7,<squarer5=int64#10
@ -1551,7 +1551,7 @@ mov $0,%r13
adc %rdx,%r13
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? squarer6 += squarerax
# asm 1: add <squarerax=int64#7,<squarer6=int64#11
@ -1574,7 +1574,7 @@ mov $0,%rcx
adc %rdx,%rcx
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? squarer7 += squarerax
# asm 1: add <squarerax=int64#7,<squarer7=int64#4
@ -2632,7 +2632,7 @@ adc %rdx,%rsi
mov %r10,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: squarer4 = squarerax
# asm 1: mov <squarerax=int64#7,>squarer4=int64#8
@ -2650,7 +2650,7 @@ mov %r11,%rax
mov %rdx,%r11
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? squarer5 += squarerax
# asm 1: add <squarerax=int64#7,<squarer5=int64#9
@ -2673,7 +2673,7 @@ mov $0,%r12
adc %rdx,%r12
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? squarer6 += squarerax
# asm 1: add <squarerax=int64#7,<squarer6=int64#10
@ -2696,7 +2696,7 @@ mov $0,%rsi
adc %rdx,%rsi
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? squarer7 += squarerax
# asm 1: add <squarerax=int64#7,<squarer7=int64#2

@ -1061,7 +1061,7 @@ adc %rdx,%r10
mov %rcx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#4
@ -1079,7 +1079,7 @@ mov %r8,%rax
mov %rdx,%r8
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#5
@ -1102,7 +1102,7 @@ mov $0,%r9
adc %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#6
@ -1125,7 +1125,7 @@ mov $0,%r10
adc %rdx,%r10
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#8
@ -1743,7 +1743,7 @@ adc %rdx,%r10
mov %rcx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#4
@ -1761,7 +1761,7 @@ mov %r8,%rax
mov %rdx,%r8
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#5
@ -1784,7 +1784,7 @@ mov $0,%r9
adc %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#6
@ -1807,7 +1807,7 @@ mov $0,%r10
adc %rdx,%r10
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#8
@ -2595,7 +2595,7 @@ adc %rdx,%r10
mov %rcx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#2
@ -2613,7 +2613,7 @@ mov %r8,%rax
mov %rdx,%rcx
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#4
@ -2636,7 +2636,7 @@ mov $0,%r8
adc %rdx,%r8
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#5
@ -2659,7 +2659,7 @@ mov $0,%r9
adc %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#6
@ -3532,7 +3532,7 @@ adc %rdx,%r9
mov %rsi,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#2
@ -3550,7 +3550,7 @@ mov %rcx,%rax
mov %rdx,%rcx
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#4
@ -3573,7 +3573,7 @@ mov $0,%r8
adc %rdx,%r8
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#5
@ -3596,7 +3596,7 @@ mov $0,%r9
adc %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#6
@ -4214,7 +4214,7 @@ adc %rdx,%r9
mov %rsi,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#2
@ -4232,7 +4232,7 @@ mov %rcx,%rax
mov %rdx,%rcx
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#4
@ -4255,7 +4255,7 @@ mov $0,%r8
adc %rdx,%r8
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#5
@ -4278,7 +4278,7 @@ mov $0,%r9
adc %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#6
@ -4896,7 +4896,7 @@ adc %rdx,%r9
mov %rsi,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#2
@ -4914,7 +4914,7 @@ mov %rcx,%rax
mov %rdx,%rcx
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#4
@ -4937,7 +4937,7 @@ mov $0,%r8
adc %rdx,%r8
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#5
@ -4960,7 +4960,7 @@ mov $0,%r9
adc %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#6
@ -5578,7 +5578,7 @@ adc %rdx,%r9
mov %rsi,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#2
@ -5596,7 +5596,7 @@ mov %rcx,%rax
mov %rdx,%rcx
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#4
@ -5619,7 +5619,7 @@ mov $0,%r8
adc %rdx,%r8
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#5
@ -5642,7 +5642,7 @@ mov $0,%r9
adc %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#6

@ -1070,7 +1070,7 @@ adc %rdx,%r11
mov %r8,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#5
@ -1088,7 +1088,7 @@ mov %r9,%rax
mov %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#6
@ -1111,7 +1111,7 @@ mov $0,%r10
adc %rdx,%r10
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#8
@ -1134,7 +1134,7 @@ mov $0,%r11
adc %rdx,%r11
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#9
@ -1752,7 +1752,7 @@ adc %rdx,%r11
mov %r8,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#5
@ -1770,7 +1770,7 @@ mov %r9,%rax
mov %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#6
@ -1793,7 +1793,7 @@ mov $0,%r10
adc %rdx,%r10
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#8
@ -1816,7 +1816,7 @@ mov $0,%r11
adc %rdx,%r11
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#9
@ -2604,7 +2604,7 @@ adc %rdx,%r11
mov %r8,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#4
@ -2622,7 +2622,7 @@ mov %r9,%rax
mov %rdx,%r8
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#5
@ -2645,7 +2645,7 @@ mov $0,%r9
adc %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#6
@ -2668,7 +2668,7 @@ mov $0,%r10
adc %rdx,%r10
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#8

@ -659,7 +659,7 @@ adc %rdx,%r10
mov %rcx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#4
@ -677,7 +677,7 @@ mov %r8,%rax
mov %rdx,%r8
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#5
@ -700,7 +700,7 @@ mov $0,%r9
adc %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#6
@ -723,7 +723,7 @@ mov $0,%r10
adc %rdx,%r10
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#8
@ -1341,7 +1341,7 @@ adc %rdx,%r10
mov %rcx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#4
@ -1359,7 +1359,7 @@ mov %r8,%rax
mov %rdx,%r8
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#5
@ -1382,7 +1382,7 @@ mov $0,%r9
adc %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#6
@ -1405,7 +1405,7 @@ mov $0,%r10
adc %rdx,%r10
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#8
@ -2023,7 +2023,7 @@ adc %rdx,%r10
mov %rcx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#2
@ -2041,7 +2041,7 @@ mov %r8,%rax
mov %rdx,%rcx
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#4
@ -2064,7 +2064,7 @@ mov $0,%r8
adc %rdx,%r8
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#5
@ -2087,7 +2087,7 @@ mov $0,%r9
adc %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#6

@ -667,7 +667,7 @@ adc %rdx,%r10
mov %rcx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#4
@ -685,7 +685,7 @@ mov %r8,%rax
mov %rdx,%r8
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#5
@ -708,7 +708,7 @@ mov $0,%r9
adc %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#6
@ -731,7 +731,7 @@ mov $0,%r10
adc %rdx,%r10
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#8
@ -1349,7 +1349,7 @@ adc %rdx,%r10
mov %rcx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#4
@ -1367,7 +1367,7 @@ mov %r8,%rax
mov %rdx,%r8
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#5
@ -1390,7 +1390,7 @@ mov $0,%r9
adc %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#6
@ -1413,7 +1413,7 @@ mov $0,%r10
adc %rdx,%r10
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#8
@ -2031,7 +2031,7 @@ adc %rdx,%r10
mov %rcx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#4
@ -2049,7 +2049,7 @@ mov %r8,%rax
mov %rdx,%r8
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#5
@ -2072,7 +2072,7 @@ mov $0,%r9
adc %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#6
@ -2095,7 +2095,7 @@ mov $0,%r10
adc %rdx,%r10
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#8
@ -2713,7 +2713,7 @@ adc %rdx,%r10
mov %rcx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#2
@ -2731,7 +2731,7 @@ mov %r8,%rax
mov %rdx,%rcx
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#4
@ -2754,7 +2754,7 @@ mov $0,%r8
adc %rdx,%r8
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#5
@ -2777,7 +2777,7 @@ mov $0,%r9
adc %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#6

@ -998,7 +998,7 @@ adc %rdx,%r11
mov %r8,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#5
@ -1016,7 +1016,7 @@ mov %r9,%rax
mov %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#6
@ -1039,7 +1039,7 @@ mov $0,%r10
adc %rdx,%r10
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#8
@ -1062,7 +1062,7 @@ mov $0,%r11
adc %rdx,%r11
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#9
@ -1680,7 +1680,7 @@ adc %rdx,%r11
mov %r8,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#5
@ -1698,7 +1698,7 @@ mov %r9,%rax
mov %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#6
@ -1721,7 +1721,7 @@ mov $0,%r10
adc %rdx,%r10
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#8
@ -1744,7 +1744,7 @@ mov $0,%r11
adc %rdx,%r11
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#9
@ -2532,7 +2532,7 @@ adc %rdx,%r11
mov %r8,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#5
@ -2550,7 +2550,7 @@ mov %r9,%rax
mov %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#6
@ -2573,7 +2573,7 @@ mov $0,%r10
adc %rdx,%r10
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#8
@ -2596,7 +2596,7 @@ mov $0,%r11
adc %rdx,%r11
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#9
@ -3214,7 +3214,7 @@ adc %rdx,%r11
mov %r8,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: mulr4 = mulrax
# asm 1: mov <mulrax=int64#7,>mulr4=int64#2
@ -3232,7 +3232,7 @@ mov %r9,%rax
mov %rdx,%rcx
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr5 += mulrax
# asm 1: add <mulrax=int64#7,<mulr5=int64#4
@ -3255,7 +3255,7 @@ mov $0,%r8
adc %rdx,%r8
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr6 += mulrax
# asm 1: add <mulrax=int64#7,<mulr6=int64#5
@ -3278,7 +3278,7 @@ mov $0,%r9
adc %rdx,%r9
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38
mulq crypto_sign_ed25519_amd64_64_38(%rip)
# qhasm: carry? mulr7 += mulrax
# asm 1: add <mulrax=int64#7,<mulr7=int64#6

Loading…
Cancel
Save