diff --git a/circom/src/input_user.rs b/circom/src/input_user.rs index 7f63975a0..eb6a54be3 100644 --- a/circom/src/input_user.rs +++ b/circom/src/input_user.rs @@ -322,6 +322,7 @@ mod input_processing { || prime_value == "grumpkin" || prime_value == "pallas" || prime_value == "vesta" + || prime_value == "secq256k1" { Ok(String::from(matches.value_of("prime").unwrap())) } @@ -498,7 +499,7 @@ mod input_processing { .takes_value(true) .default_value("bn128") .display_order(300) - .help("To choose the prime number to use to generate the circuit. Receives the name of the curve (bn128, bls12381, goldilocks, grumpkin, pallas, vesta)"), + .help("To choose the prime number to use to generate the circuit. Receives the name of the curve (bn128, bls12381, goldilocks, grumpkin, pallas, secq256k1, vesta)"), ) .get_matches() } diff --git a/code_producers/src/c_elements/secq256k1/fr.asm b/code_producers/src/c_elements/secq256k1/fr.asm new file mode 100644 index 000000000..5d1736da6 --- /dev/null +++ b/code_producers/src/c_elements/secq256k1/fr.asm @@ -0,0 +1,8794 @@ + + + global Fr_copy + global Fr_copyn + global Fr_add + global Fr_sub + global Fr_neg + global Fr_mul + global Fr_square + global Fr_band + global Fr_bor + global Fr_bxor + global Fr_bnot + global Fr_shl + global Fr_shr + global Fr_eq + global Fr_neq + global Fr_lt + global Fr_gt + global Fr_leq + global Fr_geq + global Fr_land + global Fr_lor + global Fr_lnot + global Fr_toNormal + global Fr_toLongNormal + global Fr_toMontgomery + global Fr_toInt + global Fr_isTrue + global Fr_q + global Fr_R3 + + global Fr_rawCopy + global Fr_rawZero + global Fr_rawSwap + global Fr_rawAdd + global Fr_rawSub + global Fr_rawNeg + global Fr_rawMMul + global Fr_rawMSquare + global Fr_rawToMontgomery + global Fr_rawFromMontgomery + global Fr_rawIsEq + global Fr_rawIsZero + global Fr_rawq + global Fr_rawR3 + + extern Fr_fail + DEFAULT REL + + section .text + + + + + + + + + + + + + + + + + + + + +;;;;;;;;;;;;;;;;;;;;;; +; copy +;;;;;;;;;;;;;;;;;;;;;; +; Copies +; Params: +; rsi <= the src +; rdi <= the dest +; +; Nidified registers: +; rax +;;;;;;;;;;;;;;;;;;;;;;; +Fr_copy: + + mov rax, [rsi + 0] + mov [rdi + 0], rax + + mov rax, [rsi + 8] + mov [rdi + 8], rax + + mov rax, [rsi + 16] + mov [rdi + 16], rax + + mov rax, [rsi + 24] + mov [rdi + 24], rax + + mov rax, [rsi + 32] + mov [rdi + 32], rax + + ret + + +;;;;;;;;;;;;;;;;;;;;;; +; rawCopy +;;;;;;;;;;;;;;;;;;;;;; +; Copies +; Params: +; rsi <= the src +; rdi <= the dest +; +; Nidified registers: +; rax +;;;;;;;;;;;;;;;;;;;;;;; +Fr_rawCopy: + + mov rax, [rsi + 0] + mov [rdi + 0], rax + + mov rax, [rsi + 8] + mov [rdi + 8], rax + + mov rax, [rsi + 16] + mov [rdi + 16], rax + + mov rax, [rsi + 24] + mov [rdi + 24], rax + + ret + + +;;;;;;;;;;;;;;;;;;;;;; +; rawZero +;;;;;;;;;;;;;;;;;;;;;; +; Copies +; Params: +; rsi <= the src +; +; Nidified registers: +; rax +;;;;;;;;;;;;;;;;;;;;;;; +Fr_rawZero: + xor rax, rax + + mov [rdi + 0], rax + + mov [rdi + 8], rax + + mov [rdi + 16], rax + + mov [rdi + 24], rax + + ret + +;;;;;;;;;;;;;;;;;;;;;; +; rawSwap +;;;;;;;;;;;;;;;;;;;;;; +; Copies +; Params: +; rdi <= a +; rsi <= p +; +; Nidified registers: +; rax +;;;;;;;;;;;;;;;;;;;;;;; +Fr_rawSwap: + + mov rax, [rsi + 0] + mov rcx, [rdi + 0] + mov [rdi + 0], rax + mov [rsi + 0], rbx + + mov rax, [rsi + 8] + mov rcx, [rdi + 8] + mov [rdi + 8], rax + mov [rsi + 8], rbx + + mov rax, [rsi + 16] + mov rcx, [rdi + 16] + mov [rdi + 16], rax + mov [rsi + 16], rbx + + mov rax, [rsi + 24] + mov rcx, [rdi + 24] + mov [rdi + 24], rax + mov [rsi + 24], rbx + + ret + + +;;;;;;;;;;;;;;;;;;;;;; +; copy an array of integers +;;;;;;;;;;;;;;;;;;;;;; +; Copies +; Params: +; rsi <= the src +; rdi <= the dest +; rdx <= number of integers to copy +; +; Nidified registers: +; rax +;;;;;;;;;;;;;;;;;;;;;;; +Fr_copyn: +Fr_copyn_loop: + mov r8, rsi + mov r9, rdi + mov rax, 5 + mul rdx + mov rcx, rax + cld + rep movsq + mov rsi, r8 + mov rdi, r9 + ret + +;;;;;;;;;;;;;;;;;;;;;; +; rawCopyS2L +;;;;;;;;;;;;;;;;;;;;;; +; Convert a 64 bit integer to a long format field element +; Params: +; rsi <= the integer +; rdi <= Pointer to the overwritted element +; +; Nidified registers: +; rax +;;;;;;;;;;;;;;;;;;;;;;; + +rawCopyS2L: + mov al, 0x80 + shl rax, 56 + mov [rdi], rax ; set the result to LONG normal + + cmp rsi, 0 + js u64toLong_adjust_neg + + mov [rdi + 8], rsi + xor rax, rax + + mov [rdi + 16], rax + + mov [rdi + 24], rax + + mov [rdi + 32], rax + + ret + +u64toLong_adjust_neg: + add rsi, [q] ; Set the first digit + mov [rdi + 8], rsi ; + + mov rsi, -1 ; all ones + + mov rax, rsi ; Add to q + adc rax, [q + 8 ] + mov [rdi + 16], rax + + mov rax, rsi ; Add to q + adc rax, [q + 16 ] + mov [rdi + 24], rax + + mov rax, rsi ; Add to q + adc rax, [q + 24 ] + mov [rdi + 32], rax + + ret + +;;;;;;;;;;;;;;;;;;;;;; +; toInt +;;;;;;;;;;;;;;;;;;;;;; +; Convert a 64 bit integer to a long format field element +; Params: +; rsi <= Pointer to the element +; Returs: +; rax <= The value +;;;;;;;;;;;;;;;;;;;;;;; +Fr_toInt: + mov rax, [rdi] + bt rax, 63 + jc Fr_long + movsx rax, eax + ret + +Fr_long: + push rbp + push rsi + push rdx + mov rbp, rsp + bt rax, 62 + jnc Fr_longNormal +Fr_longMontgomery: + + sub rsp, 40 + push rsi + mov rsi, rdi + mov rdi, rsp + call Fr_toNormal + pop rsi + + +Fr_longNormal: + mov rax, [rdi + 8] + mov rcx, rax + shr rcx, 31 + jnz Fr_longNeg + + mov rcx, [rdi + 16] + test rcx, rcx + jnz Fr_longNeg + + mov rcx, [rdi + 24] + test rcx, rcx + jnz Fr_longNeg + + mov rcx, [rdi + 32] + test rcx, rcx + jnz Fr_longNeg + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + +Fr_longNeg: + mov rax, [rdi + 8] + sub rax, [q] + jnc Fr_longErr + + mov rcx, [rdi + 16] + sbb rcx, [q + 8] + jnc Fr_longErr + + mov rcx, [rdi + 24] + sbb rcx, [q + 16] + jnc Fr_longErr + + mov rcx, [rdi + 32] + sbb rcx, [q + 24] + jnc Fr_longErr + + mov rcx, rax + sar rcx, 31 + add rcx, 1 + jnz Fr_longErr + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + +Fr_longErr: + push rdi + mov rdi, 0 + call Fr_fail + pop rdi + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + + + +Fr_rawMMul: + push r15 + push r14 + push r13 + push r12 + mov rcx,rdx + mov r9,[ np ] + xor r10,r10 + +; FirstLoop + mov rdx,[rsi + 0] + mulx rax,r11,[rcx] + mulx r8,r12,[rcx +8] + adcx r12,rax + mulx rax,r13,[rcx +16] + adcx r13,r8 + mulx r8,r14,[rcx +24] + adcx r14,rax + mov r15,r10 + adcx r15,r8 +; SecondLoop + mov rdx,r9 + mulx rax,rdx,r11 + mulx r8,rax,[q] + adcx rax,r11 + mulx rax,r11,[q +8] + adcx r11,r8 + adox r11,r12 + mulx r8,r12,[q +16] + adcx r12,rax + adox r12,r13 + mulx rax,r13,[q +24] + adcx r13,r8 + adox r13,r14 + mov r14,r10 + adcx r14,rax + adox r14,r15 + +; FirstLoop + mov rdx,[rsi + 8] + mov r15,r10 + mulx r8,rax,[rcx +0] + adcx r11,rax + adox r12,r8 + mulx r8,rax,[rcx +8] + adcx r12,rax + adox r13,r8 + mulx r8,rax,[rcx +16] + adcx r13,rax + adox r14,r8 + mulx r8,rax,[rcx +24] + adcx r14,rax + adox r15,r8 + adcx r15,r10 +; SecondLoop + mov rdx,r9 + mulx rax,rdx,r11 + mulx r8,rax,[q] + adcx rax,r11 + mulx rax,r11,[q +8] + adcx r11,r8 + adox r11,r12 + mulx r8,r12,[q +16] + adcx r12,rax + adox r12,r13 + mulx rax,r13,[q +24] + adcx r13,r8 + adox r13,r14 + mov r14,r10 + adcx r14,rax + adox r14,r15 + +; FirstLoop + mov rdx,[rsi + 16] + mov r15,r10 + mulx r8,rax,[rcx +0] + adcx r11,rax + adox r12,r8 + mulx r8,rax,[rcx +8] + adcx r12,rax + adox r13,r8 + mulx r8,rax,[rcx +16] + adcx r13,rax + adox r14,r8 + mulx r8,rax,[rcx +24] + adcx r14,rax + adox r15,r8 + adcx r15,r10 +; SecondLoop + mov rdx,r9 + mulx rax,rdx,r11 + mulx r8,rax,[q] + adcx rax,r11 + mulx rax,r11,[q +8] + adcx r11,r8 + adox r11,r12 + mulx r8,r12,[q +16] + adcx r12,rax + adox r12,r13 + mulx rax,r13,[q +24] + adcx r13,r8 + adox r13,r14 + mov r14,r10 + adcx r14,rax + adox r14,r15 + +; FirstLoop + mov rdx,[rsi + 24] + mov r15,r10 + mulx r8,rax,[rcx +0] + adcx r11,rax + adox r12,r8 + mulx r8,rax,[rcx +8] + adcx r12,rax + adox r13,r8 + mulx r8,rax,[rcx +16] + adcx r13,rax + adox r14,r8 + mulx r8,rax,[rcx +24] + adcx r14,rax + adox r15,r8 + adcx r15,r10 +; SecondLoop + mov rdx,r9 + mulx rax,rdx,r11 + mulx r8,rax,[q] + adcx rax,r11 + mulx rax,r11,[q +8] + adcx r11,r8 + adox r11,r12 + mulx r8,r12,[q +16] + adcx r12,rax + adox r12,r13 + mulx rax,r13,[q +24] + adcx r13,r8 + adox r13,r14 + mov r14,r10 + adcx r14,rax + adox r14,r15 + +;comparison + cmp r14,[q + 24] + jc Fr_rawMMul_done + jnz Fr_rawMMul_sq + cmp r13,[q + 16] + jc Fr_rawMMul_done + jnz Fr_rawMMul_sq + cmp r12,[q + 8] + jc Fr_rawMMul_done + jnz Fr_rawMMul_sq + cmp r11,[q + 0] + jc Fr_rawMMul_done + jnz Fr_rawMMul_sq +Fr_rawMMul_sq: + sub r11,[q +0] + sbb r12,[q +8] + sbb r13,[q +16] + sbb r14,[q +24] +Fr_rawMMul_done: + mov [rdi + 0],r11 + mov [rdi + 8],r12 + mov [rdi + 16],r13 + mov [rdi + 24],r14 + pop r12 + pop r13 + pop r14 + pop r15 + ret +Fr_rawMSquare: + push r15 + push r14 + push r13 + push r12 + mov rcx,rdx + mov r9,[ np ] + xor r10,r10 + +; FirstLoop + mov rdx,[rsi + 0] + mulx rax,r11,rdx + mulx r8,r12,[rsi +8] + adcx r12,rax + mulx rax,r13,[rsi +16] + adcx r13,r8 + mulx r8,r14,[rsi +24] + adcx r14,rax + mov r15,r10 + adcx r15,r8 +; SecondLoop + mov rdx,r9 + mulx rax,rdx,r11 + mulx r8,rax,[q] + adcx rax,r11 + mulx rax,r11,[q +8] + adcx r11,r8 + adox r11,r12 + mulx r8,r12,[q +16] + adcx r12,rax + adox r12,r13 + mulx rax,r13,[q +24] + adcx r13,r8 + adox r13,r14 + mov r14,r10 + adcx r14,rax + adox r14,r15 + +; FirstLoop + mov rdx,[rsi + 8] + mov r15,r10 + mulx r8,rax,[rsi +0] + adcx r11,rax + adox r12,r8 + mulx r8,rax,[rsi +8] + adcx r12,rax + adox r13,r8 + mulx r8,rax,[rsi +16] + adcx r13,rax + adox r14,r8 + mulx r8,rax,[rsi +24] + adcx r14,rax + adox r15,r8 + adcx r15,r10 +; SecondLoop + mov rdx,r9 + mulx rax,rdx,r11 + mulx r8,rax,[q] + adcx rax,r11 + mulx rax,r11,[q +8] + adcx r11,r8 + adox r11,r12 + mulx r8,r12,[q +16] + adcx r12,rax + adox r12,r13 + mulx rax,r13,[q +24] + adcx r13,r8 + adox r13,r14 + mov r14,r10 + adcx r14,rax + adox r14,r15 + +; FirstLoop + mov rdx,[rsi + 16] + mov r15,r10 + mulx r8,rax,[rsi +0] + adcx r11,rax + adox r12,r8 + mulx r8,rax,[rsi +8] + adcx r12,rax + adox r13,r8 + mulx r8,rax,[rsi +16] + adcx r13,rax + adox r14,r8 + mulx r8,rax,[rsi +24] + adcx r14,rax + adox r15,r8 + adcx r15,r10 +; SecondLoop + mov rdx,r9 + mulx rax,rdx,r11 + mulx r8,rax,[q] + adcx rax,r11 + mulx rax,r11,[q +8] + adcx r11,r8 + adox r11,r12 + mulx r8,r12,[q +16] + adcx r12,rax + adox r12,r13 + mulx rax,r13,[q +24] + adcx r13,r8 + adox r13,r14 + mov r14,r10 + adcx r14,rax + adox r14,r15 + +; FirstLoop + mov rdx,[rsi + 24] + mov r15,r10 + mulx r8,rax,[rsi +0] + adcx r11,rax + adox r12,r8 + mulx r8,rax,[rsi +8] + adcx r12,rax + adox r13,r8 + mulx r8,rax,[rsi +16] + adcx r13,rax + adox r14,r8 + mulx r8,rax,[rsi +24] + adcx r14,rax + adox r15,r8 + adcx r15,r10 +; SecondLoop + mov rdx,r9 + mulx rax,rdx,r11 + mulx r8,rax,[q] + adcx rax,r11 + mulx rax,r11,[q +8] + adcx r11,r8 + adox r11,r12 + mulx r8,r12,[q +16] + adcx r12,rax + adox r12,r13 + mulx rax,r13,[q +24] + adcx r13,r8 + adox r13,r14 + mov r14,r10 + adcx r14,rax + adox r14,r15 + +;comparison + cmp r14,[q + 24] + jc Fr_rawMSquare_done + jnz Fr_rawMSquare_sq + cmp r13,[q + 16] + jc Fr_rawMSquare_done + jnz Fr_rawMSquare_sq + cmp r12,[q + 8] + jc Fr_rawMSquare_done + jnz Fr_rawMSquare_sq + cmp r11,[q + 0] + jc Fr_rawMSquare_done + jnz Fr_rawMSquare_sq +Fr_rawMSquare_sq: + sub r11,[q +0] + sbb r12,[q +8] + sbb r13,[q +16] + sbb r14,[q +24] +Fr_rawMSquare_done: + mov [rdi + 0],r11 + mov [rdi + 8],r12 + mov [rdi + 16],r13 + mov [rdi + 24],r14 + pop r12 + pop r13 + pop r14 + pop r15 + ret +Fr_rawMMul1: + push r15 + push r14 + push r13 + push r12 + mov rcx,rdx + mov r9,[ np ] + xor r10,r10 + +; FirstLoop + mov rdx,rcx + mulx rax,r11,[rsi] + mulx r8,r12,[rsi +8] + adcx r12,rax + mulx rax,r13,[rsi +16] + adcx r13,r8 + mulx r8,r14,[rsi +24] + adcx r14,rax + mov r15,r10 + adcx r15,r8 +; SecondLoop + mov rdx,r9 + mulx rax,rdx,r11 + mulx r8,rax,[q] + adcx rax,r11 + mulx rax,r11,[q +8] + adcx r11,r8 + adox r11,r12 + mulx r8,r12,[q +16] + adcx r12,rax + adox r12,r13 + mulx rax,r13,[q +24] + adcx r13,r8 + adox r13,r14 + mov r14,r10 + adcx r14,rax + adox r14,r15 + + mov r15,r10 +; SecondLoop + mov rdx,r9 + mulx rax,rdx,r11 + mulx r8,rax,[q] + adcx rax,r11 + mulx rax,r11,[q +8] + adcx r11,r8 + adox r11,r12 + mulx r8,r12,[q +16] + adcx r12,rax + adox r12,r13 + mulx rax,r13,[q +24] + adcx r13,r8 + adox r13,r14 + mov r14,r10 + adcx r14,rax + adox r14,r15 + + mov r15,r10 +; SecondLoop + mov rdx,r9 + mulx rax,rdx,r11 + mulx r8,rax,[q] + adcx rax,r11 + mulx rax,r11,[q +8] + adcx r11,r8 + adox r11,r12 + mulx r8,r12,[q +16] + adcx r12,rax + adox r12,r13 + mulx rax,r13,[q +24] + adcx r13,r8 + adox r13,r14 + mov r14,r10 + adcx r14,rax + adox r14,r15 + + mov r15,r10 +; SecondLoop + mov rdx,r9 + mulx rax,rdx,r11 + mulx r8,rax,[q] + adcx rax,r11 + mulx rax,r11,[q +8] + adcx r11,r8 + adox r11,r12 + mulx r8,r12,[q +16] + adcx r12,rax + adox r12,r13 + mulx rax,r13,[q +24] + adcx r13,r8 + adox r13,r14 + mov r14,r10 + adcx r14,rax + adox r14,r15 + +;comparison + cmp r14,[q + 24] + jc Fr_rawMMul1_done + jnz Fr_rawMMul1_sq + cmp r13,[q + 16] + jc Fr_rawMMul1_done + jnz Fr_rawMMul1_sq + cmp r12,[q + 8] + jc Fr_rawMMul1_done + jnz Fr_rawMMul1_sq + cmp r11,[q + 0] + jc Fr_rawMMul1_done + jnz Fr_rawMMul1_sq +Fr_rawMMul1_sq: + sub r11,[q +0] + sbb r12,[q +8] + sbb r13,[q +16] + sbb r14,[q +24] +Fr_rawMMul1_done: + mov [rdi + 0],r11 + mov [rdi + 8],r12 + mov [rdi + 16],r13 + mov [rdi + 24],r14 + pop r12 + pop r13 + pop r14 + pop r15 + ret +Fr_rawFromMontgomery: + push r15 + push r14 + push r13 + push r12 + mov rcx,rdx + mov r9,[ np ] + xor r10,r10 + +; FirstLoop + mov r11,[rsi +0] + mov r12,[rsi +8] + mov r13,[rsi +16] + mov r14,[rsi +24] + mov r15,r10 +; SecondLoop + mov rdx,r9 + mulx rax,rdx,r11 + mulx r8,rax,[q] + adcx rax,r11 + mulx rax,r11,[q +8] + adcx r11,r8 + adox r11,r12 + mulx r8,r12,[q +16] + adcx r12,rax + adox r12,r13 + mulx rax,r13,[q +24] + adcx r13,r8 + adox r13,r14 + mov r14,r10 + adcx r14,rax + adox r14,r15 + + mov r15,r10 +; SecondLoop + mov rdx,r9 + mulx rax,rdx,r11 + mulx r8,rax,[q] + adcx rax,r11 + mulx rax,r11,[q +8] + adcx r11,r8 + adox r11,r12 + mulx r8,r12,[q +16] + adcx r12,rax + adox r12,r13 + mulx rax,r13,[q +24] + adcx r13,r8 + adox r13,r14 + mov r14,r10 + adcx r14,rax + adox r14,r15 + + mov r15,r10 +; SecondLoop + mov rdx,r9 + mulx rax,rdx,r11 + mulx r8,rax,[q] + adcx rax,r11 + mulx rax,r11,[q +8] + adcx r11,r8 + adox r11,r12 + mulx r8,r12,[q +16] + adcx r12,rax + adox r12,r13 + mulx rax,r13,[q +24] + adcx r13,r8 + adox r13,r14 + mov r14,r10 + adcx r14,rax + adox r14,r15 + + mov r15,r10 +; SecondLoop + mov rdx,r9 + mulx rax,rdx,r11 + mulx r8,rax,[q] + adcx rax,r11 + mulx rax,r11,[q +8] + adcx r11,r8 + adox r11,r12 + mulx r8,r12,[q +16] + adcx r12,rax + adox r12,r13 + mulx rax,r13,[q +24] + adcx r13,r8 + adox r13,r14 + mov r14,r10 + adcx r14,rax + adox r14,r15 + +;comparison + cmp r14,[q + 24] + jc Fr_rawFromMontgomery_done + jnz Fr_rawFromMontgomery_sq + cmp r13,[q + 16] + jc Fr_rawFromMontgomery_done + jnz Fr_rawFromMontgomery_sq + cmp r12,[q + 8] + jc Fr_rawFromMontgomery_done + jnz Fr_rawFromMontgomery_sq + cmp r11,[q + 0] + jc Fr_rawFromMontgomery_done + jnz Fr_rawFromMontgomery_sq +Fr_rawFromMontgomery_sq: + sub r11,[q +0] + sbb r12,[q +8] + sbb r13,[q +16] + sbb r14,[q +24] +Fr_rawFromMontgomery_done: + mov [rdi + 0],r11 + mov [rdi + 8],r12 + mov [rdi + 16],r13 + mov [rdi + 24],r14 + pop r12 + pop r13 + pop r14 + pop r15 + ret + +;;;;;;;;;;;;;;;;;;;;;; +; rawToMontgomery +;;;;;;;;;;;;;;;;;;;;;; +; Convert a number to Montgomery +; rdi <= Pointer destination element +; rsi <= Pointer to src element +;;;;;;;;;;;;;;;;;;;; +Fr_rawToMontgomery: + push rdx + lea rdx, [R2] + call Fr_rawMMul + pop rdx + ret + +;;;;;;;;;;;;;;;;;;;;;; +; toMontgomery +;;;;;;;;;;;;;;;;;;;;;; +; Convert a number to Montgomery +; rdi <= Destination +; rdi <= Pointer element to convert +; Modified registers: +; r8, r9, 10, r11, rax, rcx +;;;;;;;;;;;;;;;;;;;; +Fr_toMontgomery: + mov rax, [rsi] + bt rax, 62 ; check if montgomery + jc toMontgomery_doNothing + bt rax, 63 + jc toMontgomeryLong + +toMontgomeryShort: + movsx rdx, eax + mov [rdi], rdx + add rdi, 8 + lea rsi, [R2] + cmp rdx, 0 + js negMontgomeryShort +posMontgomeryShort: + call Fr_rawMMul1 + sub rdi, 8 + mov r11b, 0x40 + shl r11d, 24 + mov [rdi+4], r11d + ret + +negMontgomeryShort: + neg rdx ; Do the multiplication positive and then negate the result. + call Fr_rawMMul1 + mov rsi, rdi + call rawNegL + sub rdi, 8 + mov r11b, 0x40 + shl r11d, 24 + mov [rdi+4], r11d + ret + + +toMontgomeryLong: + mov [rdi], rax + add rdi, 8 + add rsi, 8 + lea rdx, [R2] + call Fr_rawMMul + sub rsi, 8 + sub rdi, 8 + mov r11b, 0xC0 + shl r11d, 24 + mov [rdi+4], r11d + ret + + +toMontgomery_doNothing: + call Fr_copy + ret + +;;;;;;;;;;;;;;;;;;;;;; +; toNormal +;;;;;;;;;;;;;;;;;;;;;; +; Convert a number from Montgomery +; rdi <= Destination +; rsi <= Pointer element to convert +; Modified registers: +; r8, r9, 10, r11, rax, rcx +;;;;;;;;;;;;;;;;;;;; +Fr_toNormal: + mov rax, [rsi] + bt rax, 62 ; check if montgomery + jnc toNormal_doNothing + bt rax, 63 ; if short, it means it's converted + jnc toNormal_doNothing + +toNormalLong: + add rdi, 8 + add rsi, 8 + call Fr_rawFromMontgomery + sub rsi, 8 + sub rdi, 8 + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + ret + +toNormal_doNothing: + call Fr_copy + ret + +;;;;;;;;;;;;;;;;;;;;;; +; toLongNormal +;;;;;;;;;;;;;;;;;;;;;; +; Convert a number to long normal +; rdi <= Destination +; rsi <= Pointer element to convert +; Modified registers: +; r8, r9, 10, r11, rax, rcx +;;;;;;;;;;;;;;;;;;;; +Fr_toLongNormal: + mov rax, [rsi] + bt rax, 63 ; check if long + jnc toLongNormal_fromShort + bt rax, 62 ; check if montgomery + jc toLongNormal_fromMontgomery + call Fr_copy ; It is already long + ret + +toLongNormal_fromMontgomery: + add rdi, 8 + add rsi, 8 + call Fr_rawFromMontgomery + sub rsi, 8 + sub rdi, 8 + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + ret + +toLongNormal_fromShort: + mov r8, rsi ; save rsi + movsx rsi, eax + call rawCopyS2L + mov rsi, r8 ; recover rsi + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + ret + + + + + + + + + + + + +;;;;;;;;;;;;;;;;;;;;;; +; add +;;;;;;;;;;;;;;;;;;;;;; +; Adds two elements of any kind +; Params: +; rsi <= Pointer to element 1 +; rdx <= Pointer to element 2 +; rdi <= Pointer to result +; Modified Registers: +; r8, r9, 10, r11, rax, rcx +;;;;;;;;;;;;;;;;;;;;;; +Fr_add: + push rbp + push rsi + push rdx + mov rbp, rsp + mov rax, [rsi] + mov rcx, [rdx] + bt rax, 63 ; Check if is short first operand + jc add_l1 + bt rcx, 63 ; Check if is short second operand + jc add_s1l2 + +add_s1s2: ; Both operands are short + + xor rdx, rdx + mov edx, eax + add edx, ecx + jo add_manageOverflow ; rsi already is the 64bits result + + mov [rdi], rdx ; not necessary to adjust so just save and return + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + +add_manageOverflow: ; Do the operation in 64 bits + push rsi + movsx rsi, eax + movsx rdx, ecx + add rsi, rdx + call rawCopyS2L + pop rsi + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + +add_l1: + bt rcx, 63 ; Check if is short second operand + jc add_l1l2 + +;;;;;;;; +add_l1s2: + bt rax, 62 ; check if montgomery first + jc add_l1ms2 +add_l1ns2: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + add rsi, 8 + movsx rdx, ecx + add rdi, 8 + cmp rdx, 0 + + jns tmp_1 + neg rdx + call rawSubLS + sub rdi, 8 + sub rsi, 8 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret +tmp_1: + call rawAddLS + sub rdi, 8 + sub rsi, 8 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + +add_l1ms2: + bt rcx, 62 ; check if montgomery second + jc add_l1ms2m +add_l1ms2n: + mov r11b, 0xC0 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toMontgomery + mov rdx, rdi + pop rdi + pop rsi + + + add rdi, 8 + add rsi, 8 + add rdx, 8 + call rawAddLL + sub rdi, 8 + sub rsi, 8 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +add_l1ms2m: + mov r11b, 0xC0 + shl r11d, 24 + mov [rdi+4], r11d + + add rdi, 8 + add rsi, 8 + add rdx, 8 + call rawAddLL + sub rdi, 8 + sub rsi, 8 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + +;;;;;;;; +add_s1l2: + bt rcx, 62 ; check if montgomery second + jc add_s1l2m +add_s1l2n: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + lea rsi, [rdx + 8] + movsx rdx, eax + add rdi, 8 + cmp rdx, 0 + + jns tmp_2 + neg rdx + call rawSubLS + sub rdi, 8 + sub rsi, 8 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret +tmp_2: + call rawAddLS + sub rdi, 8 + sub rsi, 8 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +add_s1l2m: + bt rax, 62 ; check if montgomery first + jc add_s1ml2m +add_s1nl2m: + mov r11b, 0xC0 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toMontgomery + mov rsi, rdi + pop rdi + pop rdx + + + add rdi, 8 + add rsi, 8 + add rdx, 8 + call rawAddLL + sub rdi, 8 + sub rsi, 8 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +add_s1ml2m: + mov r11b, 0xC0 + shl r11d, 24 + mov [rdi+4], r11d + + add rdi, 8 + add rsi, 8 + add rdx, 8 + call rawAddLL + sub rdi, 8 + sub rsi, 8 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +;;;; +add_l1l2: + bt rax, 62 ; check if montgomery first + jc add_l1ml2 +add_l1nl2: + bt rcx, 62 ; check if montgomery second + jc add_l1nl2m +add_l1nl2n: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + add rdi, 8 + add rsi, 8 + add rdx, 8 + call rawAddLL + sub rdi, 8 + sub rsi, 8 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +add_l1nl2m: + mov r11b, 0xC0 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toMontgomery + mov rsi, rdi + pop rdi + pop rdx + + + add rdi, 8 + add rsi, 8 + add rdx, 8 + call rawAddLL + sub rdi, 8 + sub rsi, 8 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +add_l1ml2: + bt rcx, 62 ; check if montgomery seconf + jc add_l1ml2m +add_l1ml2n: + mov r11b, 0xC0 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toMontgomery + mov rdx, rdi + pop rdi + pop rsi + + + add rdi, 8 + add rsi, 8 + add rdx, 8 + call rawAddLL + sub rdi, 8 + sub rsi, 8 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +add_l1ml2m: + mov r11b, 0xC0 + shl r11d, 24 + mov [rdi+4], r11d + + add rdi, 8 + add rsi, 8 + add rdx, 8 + call rawAddLL + sub rdi, 8 + sub rsi, 8 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + + +;;;;;;;;;;;;;;;;;;;;;; +; rawAddLL +;;;;;;;;;;;;;;;;;;;;;; +; Adds two elements of type long +; Params: +; rsi <= Pointer to the long data of element 1 +; rdx <= Pointer to the long data of element 2 +; rdi <= Pointer to the long data of result +; Modified Registers: +; rax +;;;;;;;;;;;;;;;;;;;;;; +rawAddLL: +Fr_rawAdd: + ; Add component by component with carry + + mov rax, [rsi + 0] + add rax, [rdx + 0] + mov [rdi + 0], rax + + mov rax, [rsi + 8] + adc rax, [rdx + 8] + mov [rdi + 8], rax + + mov rax, [rsi + 16] + adc rax, [rdx + 16] + mov [rdi + 16], rax + + mov rax, [rsi + 24] + adc rax, [rdx + 24] + mov [rdi + 24], rax + + jc rawAddLL_sq ; if overflow, substract q + + ; Compare with q + + + cmp rax, [q + 24] + jc rawAddLL_done ; q is bigget so done. + jnz rawAddLL_sq ; q is lower + + + mov rax, [rdi + 16] + + cmp rax, [q + 16] + jc rawAddLL_done ; q is bigget so done. + jnz rawAddLL_sq ; q is lower + + + mov rax, [rdi + 8] + + cmp rax, [q + 8] + jc rawAddLL_done ; q is bigget so done. + jnz rawAddLL_sq ; q is lower + + + mov rax, [rdi + 0] + + cmp rax, [q + 0] + jc rawAddLL_done ; q is bigget so done. + jnz rawAddLL_sq ; q is lower + + ; If equal substract q +rawAddLL_sq: + + mov rax, [q + 0] + sub [rdi + 0], rax + + mov rax, [q + 8] + sbb [rdi + 8], rax + + mov rax, [q + 16] + sbb [rdi + 16], rax + + mov rax, [q + 24] + sbb [rdi + 24], rax + +rawAddLL_done: + ret + + +;;;;;;;;;;;;;;;;;;;;;; +; rawAddLS +;;;;;;;;;;;;;;;;;;;;;; +; Adds two elements of type long +; Params: +; rdi <= Pointer to the long data of result +; rsi <= Pointer to the long data of element 1 +; rdx <= Value to be added +;;;;;;;;;;;;;;;;;;;;;; +rawAddLS: + ; Add component by component with carry + + add rdx, [rsi] + mov [rdi] ,rdx + + mov rdx, 0 + adc rdx, [rsi + 8] + mov [rdi + 8], rdx + + mov rdx, 0 + adc rdx, [rsi + 16] + mov [rdi + 16], rdx + + mov rdx, 0 + adc rdx, [rsi + 24] + mov [rdi + 24], rdx + + jc rawAddLS_sq ; if overflow, substract q + + ; Compare with q + + mov rax, [rdi + 24] + cmp rax, [q + 24] + jc rawAddLS_done ; q is bigget so done. + jnz rawAddLS_sq ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 16] + jc rawAddLS_done ; q is bigget so done. + jnz rawAddLS_sq ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 8] + jc rawAddLS_done ; q is bigget so done. + jnz rawAddLS_sq ; q is lower + + mov rax, [rdi + 0] + cmp rax, [q + 0] + jc rawAddLS_done ; q is bigget so done. + jnz rawAddLS_sq ; q is lower + + ; If equal substract q +rawAddLS_sq: + + mov rax, [q + 0] + sub [rdi + 0], rax + + mov rax, [q + 8] + sbb [rdi + 8], rax + + mov rax, [q + 16] + sbb [rdi + 16], rax + + mov rax, [q + 24] + sbb [rdi + 24], rax + +rawAddLS_done: + ret + + + + + + + + + + + + + + + +;;;;;;;;;;;;;;;;;;;;;; +; sub +;;;;;;;;;;;;;;;;;;;;;; +; Substracts two elements of any kind +; Params: +; rsi <= Pointer to element 1 +; rdx <= Pointer to element 2 +; rdi <= Pointer to result +; Modified Registers: +; r8, r9, 10, r11, rax, rcx +;;;;;;;;;;;;;;;;;;;;;; +Fr_sub: + push rbp + push rsi + push rdx + mov rbp, rsp + mov rax, [rsi] + mov rcx, [rdx] + bt rax, 63 ; Check if is long first operand + jc sub_l1 + bt rcx, 63 ; Check if is long second operand + jc sub_s1l2 + +sub_s1s2: ; Both operands are short + + xor rdx, rdx + mov edx, eax + sub edx, ecx + jo sub_manageOverflow ; rsi already is the 64bits result + + mov [rdi], rdx ; not necessary to adjust so just save and return + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + +sub_manageOverflow: ; Do the operation in 64 bits + push rsi + movsx rsi, eax + movsx rdx, ecx + sub rsi, rdx + call rawCopyS2L + pop rsi + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + +sub_l1: + bt rcx, 63 ; Check if is short second operand + jc sub_l1l2 + +;;;;;;;; +sub_l1s2: + bt rax, 62 ; check if montgomery first + jc sub_l1ms2 +sub_l1ns2: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + add rsi, 8 + movsx rdx, ecx + add rdi, 8 + cmp rdx, 0 + + jns tmp_3 + neg rdx + call rawAddLS + sub rdi, 8 + sub rsi, 8 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret +tmp_3: + call rawSubLS + sub rdi, 8 + sub rsi, 8 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +sub_l1ms2: + bt rcx, 62 ; check if montgomery second + jc sub_l1ms2m +sub_l1ms2n: + mov r11b, 0xC0 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toMontgomery + mov rdx, rdi + pop rdi + pop rsi + + + add rdi, 8 + add rsi, 8 + add rdx, 8 + call rawSubLL + sub rdi, 8 + sub rsi, 8 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +sub_l1ms2m: + mov r11b, 0xC0 + shl r11d, 24 + mov [rdi+4], r11d + + add rdi, 8 + add rsi, 8 + add rdx, 8 + call rawSubLL + sub rdi, 8 + sub rsi, 8 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + +;;;;;;;; +sub_s1l2: + bt rcx, 62 ; check if montgomery first + jc sub_s1l2m +sub_s1l2n: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + cmp eax, 0 + + js tmp_4 + + ; First Operand is positive + push rsi + add rdi, 8 + movsx rsi, eax + add rdx, 8 + call rawSubSL + sub rdi, 8 + pop rsi + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + +tmp_4: ; First operand is negative + push rsi + lea rsi, [rdx + 8] + movsx rdx, eax + add rdi, 8 + neg rdx + call rawNegLS + sub rdi, 8 + pop rsi + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +sub_s1l2m: + bt rax, 62 ; check if montgomery second + jc sub_s1ml2m +sub_s1nl2m: + mov r11b, 0xC0 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toMontgomery + mov rsi, rdi + pop rdi + pop rdx + + + add rdi, 8 + add rsi, 8 + add rdx, 8 + call rawSubLL + sub rdi, 8 + sub rsi, 8 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +sub_s1ml2m: + mov r11b, 0xC0 + shl r11d, 24 + mov [rdi+4], r11d + + add rdi, 8 + add rsi, 8 + add rdx, 8 + call rawSubLL + sub rdi, 8 + sub rsi, 8 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +;;;; +sub_l1l2: + bt rax, 62 ; check if montgomery first + jc sub_l1ml2 +sub_l1nl2: + bt rcx, 62 ; check if montgomery second + jc sub_l1nl2m +sub_l1nl2n: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + add rdi, 8 + add rsi, 8 + add rdx, 8 + call rawSubLL + sub rdi, 8 + sub rsi, 8 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +sub_l1nl2m: + mov r11b, 0xC0 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toMontgomery + mov rsi, rdi + pop rdi + pop rdx + + + add rdi, 8 + add rsi, 8 + add rdx, 8 + call rawSubLL + sub rdi, 8 + sub rsi, 8 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +sub_l1ml2: + bt rcx, 62 ; check if montgomery seconf + jc sub_l1ml2m +sub_l1ml2n: + mov r11b, 0xC0 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toMontgomery + mov rdx, rdi + pop rdi + pop rsi + + + add rdi, 8 + add rsi, 8 + add rdx, 8 + call rawSubLL + sub rdi, 8 + sub rsi, 8 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +sub_l1ml2m: + mov r11b, 0xC0 + shl r11d, 24 + mov [rdi+4], r11d + + add rdi, 8 + add rsi, 8 + add rdx, 8 + call rawSubLL + sub rdi, 8 + sub rsi, 8 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + +;;;;;;;;;;;;;;;;;;;;;; +; rawSubLS +;;;;;;;;;;;;;;;;;;;;;; +; Substracts a short element from the long element +; Params: +; rdi <= Pointer to the long data of result +; rsi <= Pointer to the long data of element 1 where will be substracted +; rdx <= Value to be substracted +; [rdi] = [rsi] - rdx +; Modified Registers: +; rax +;;;;;;;;;;;;;;;;;;;;;; +rawSubLS: + ; Substract first digit + + mov rax, [rsi] + sub rax, rdx + mov [rdi] ,rax + mov rdx, 0 + + mov rax, [rsi + 8] + sbb rax, rdx + mov [rdi + 8], rax + + mov rax, [rsi + 16] + sbb rax, rdx + mov [rdi + 16], rax + + mov rax, [rsi + 24] + sbb rax, rdx + mov [rdi + 24], rax + + jnc rawSubLS_done ; if overflow, add q + + ; Add q +rawSubLS_aq: + + mov rax, [q + 0] + add [rdi + 0], rax + + mov rax, [q + 8] + adc [rdi + 8], rax + + mov rax, [q + 16] + adc [rdi + 16], rax + + mov rax, [q + 24] + adc [rdi + 24], rax + +rawSubLS_done: + ret + + +;;;;;;;;;;;;;;;;;;;;;; +; rawSubSL +;;;;;;;;;;;;;;;;;;;;;; +; Substracts a long element from a short element +; Params: +; rdi <= Pointer to the long data of result +; rsi <= Value from where will bo substracted +; rdx <= Pointer to long of the value to be substracted +; +; [rdi] = rsi - [rdx] +; Modified Registers: +; rax +;;;;;;;;;;;;;;;;;;;;;; +rawSubSL: + ; Substract first digit + sub rsi, [rdx] + mov [rdi] ,rsi + + + mov rax, 0 + sbb rax, [rdx + 8] + mov [rdi + 8], rax + + mov rax, 0 + sbb rax, [rdx + 16] + mov [rdi + 16], rax + + mov rax, 0 + sbb rax, [rdx + 24] + mov [rdi + 24], rax + + jnc rawSubSL_done ; if overflow, add q + + ; Add q +rawSubSL_aq: + + mov rax, [q + 0] + add [rdi + 0], rax + + mov rax, [q + 8] + adc [rdi + 8], rax + + mov rax, [q + 16] + adc [rdi + 16], rax + + mov rax, [q + 24] + adc [rdi + 24], rax + +rawSubSL_done: + ret + +;;;;;;;;;;;;;;;;;;;;;; +; rawSubLL +;;;;;;;;;;;;;;;;;;;;;; +; Substracts a long element from a short element +; Params: +; rdi <= Pointer to the long data of result +; rsi <= Pointer to long from where substracted +; rdx <= Pointer to long of the value to be substracted +; +; [rdi] = [rsi] - [rdx] +; Modified Registers: +; rax +;;;;;;;;;;;;;;;;;;;;;; +rawSubLL: +Fr_rawSub: + ; Substract first digit + + mov rax, [rsi + 0] + sub rax, [rdx + 0] + mov [rdi + 0], rax + + mov rax, [rsi + 8] + sbb rax, [rdx + 8] + mov [rdi + 8], rax + + mov rax, [rsi + 16] + sbb rax, [rdx + 16] + mov [rdi + 16], rax + + mov rax, [rsi + 24] + sbb rax, [rdx + 24] + mov [rdi + 24], rax + + jnc rawSubLL_done ; if overflow, add q + + ; Add q +rawSubLL_aq: + + mov rax, [q + 0] + add [rdi + 0], rax + + mov rax, [q + 8] + adc [rdi + 8], rax + + mov rax, [q + 16] + adc [rdi + 16], rax + + mov rax, [q + 24] + adc [rdi + 24], rax + +rawSubLL_done: + ret + +;;;;;;;;;;;;;;;;;;;;;; +; rawNegLS +;;;;;;;;;;;;;;;;;;;;;; +; Substracts a long element and a short element form 0 +; Params: +; rdi <= Pointer to the long data of result +; rsi <= Pointer to long from where substracted +; rdx <= short value to be substracted too +; +; [rdi] = -[rsi] - rdx +; Modified Registers: +; rax +;;;;;;;;;;;;;;;;;;;;;; +rawNegLS: + mov rax, [q] + sub rax, rdx + mov [rdi], rax + + mov rax, [q + 8 ] + sbb rax, 0 + mov [rdi + 8], rax + + mov rax, [q + 16 ] + sbb rax, 0 + mov [rdi + 16], rax + + mov rax, [q + 24 ] + sbb rax, 0 + mov [rdi + 24], rax + + setc dl + + + mov rax, [rdi + 0 ] + sub rax, [rsi + 0] + mov [rdi + 0], rax + + mov rax, [rdi + 8 ] + sbb rax, [rsi + 8] + mov [rdi + 8], rax + + mov rax, [rdi + 16 ] + sbb rax, [rsi + 16] + mov [rdi + 16], rax + + mov rax, [rdi + 24 ] + sbb rax, [rsi + 24] + mov [rdi + 24], rax + + + setc dh + or dl, dh + jz rawNegSL_done + + ; it is a negative value, so add q + + mov rax, [q + 0] + add [rdi + 0], rax + + mov rax, [q + 8] + adc [rdi + 8], rax + + mov rax, [q + 16] + adc [rdi + 16], rax + + mov rax, [q + 24] + adc [rdi + 24], rax + + +rawNegSL_done: + ret + + + + + + + +;;;;;;;;;;;;;;;;;;;;;; +; neg +;;;;;;;;;;;;;;;;;;;;;; +; Adds two elements of any kind +; Params: +; rsi <= Pointer to element to be negated +; rdi <= Pointer to result +; [rdi] = -[rsi] +;;;;;;;;;;;;;;;;;;;;;; +Fr_neg: + mov rax, [rsi] + bt rax, 63 ; Check if is short first operand + jc neg_l + +neg_s: ; Operand is short + + neg eax + jo neg_manageOverflow ; Check if overflow. (0x80000000 is the only case) + + mov [rdi], rax ; not necessary to adjust so just save and return + ret + +neg_manageOverflow: ; Do the operation in 64 bits + push rsi + movsx rsi, eax + neg rsi + call rawCopyS2L + pop rsi + ret + + + +neg_l: + mov [rdi], rax ; Copy the type + + add rdi, 8 + add rsi, 8 + call rawNegL + sub rdi, 8 + sub rsi, 8 + ret + + + +;;;;;;;;;;;;;;;;;;;;;; +; rawNeg +;;;;;;;;;;;;;;;;;;;;;; +; Negates a value +; Params: +; rdi <= Pointer to the long data of result +; rsi <= Pointer to the long data of element 1 +; +; [rdi] = - [rsi] +;;;;;;;;;;;;;;;;;;;;;; +rawNegL: +Fr_rawNeg: + ; Compare is zero + + xor rax, rax + + cmp [rsi + 0], rax + jnz doNegate + + cmp [rsi + 8], rax + jnz doNegate + + cmp [rsi + 16], rax + jnz doNegate + + cmp [rsi + 24], rax + jnz doNegate + + ; it's zero so just set to zero + + mov [rdi + 0], rax + + mov [rdi + 8], rax + + mov [rdi + 16], rax + + mov [rdi + 24], rax + + ret +doNegate: + + mov rax, [q + 0] + sub rax, [rsi + 0] + mov [rdi + 0], rax + + mov rax, [q + 8] + sbb rax, [rsi + 8] + mov [rdi + 8], rax + + mov rax, [q + 16] + sbb rax, [rsi + 16] + mov [rdi + 16], rax + + mov rax, [q + 24] + sbb rax, [rsi + 24] + mov [rdi + 24], rax + + ret + + + + + + + + + + + + + + + + + + + +;;;;;;;;;;;;;;;;;;;;;; +; square +;;;;;;;;;;;;;;;;;;;;;; +; Squares a field element +; Params: +; rsi <= Pointer to element 1 +; rdi <= Pointer to result +; [rdi] = [rsi] * [rsi] +; Modified Registers: +; r8, r9, 10, r11, rax, rcx +;;;;;;;;;;;;;;;;;;;;;; +Fr_square: + mov r8, [rsi] + bt r8, 63 ; Check if is short first operand + jc square_l1 + +square_s1: ; Both operands are short + + xor rax, rax + mov eax, r8d + imul eax + jo square_manageOverflow ; rsi already is the 64bits result + + mov [rdi], rax ; not necessary to adjust so just save and return + +square_manageOverflow: ; Do the operation in 64 bits + push rsi + movsx rax, r8d + imul rax + mov rsi, rax + call rawCopyS2L + pop rsi + + ret + +square_l1: + bt r8, 62 ; check if montgomery first + jc square_l1m +square_l1n: + mov r11b, 0xC0 + shl r11d, 24 + mov [rdi+4], r11d + + add rdi, 8 + add rsi, 8 + call Fr_rawMSquare + sub rdi, 8 + sub rsi, 8 + + + push rsi + add rdi, 8 + mov rsi, rdi + lea rdx, [R3] + call Fr_rawMMul + sub rdi, 8 + pop rsi + + ret + +square_l1m: + mov r11b, 0xC0 + shl r11d, 24 + mov [rdi+4], r11d + + add rdi, 8 + add rsi, 8 + call Fr_rawMSquare + sub rdi, 8 + sub rsi, 8 + + ret + + + +;;;;;;;;;;;;;;;;;;;;;; +; mul +;;;;;;;;;;;;;;;;;;;;;; +; Multiplies two elements of any kind +; Params: +; rsi <= Pointer to element 1 +; rdx <= Pointer to element 2 +; rdi <= Pointer to result +; [rdi] = [rsi] * [rdi] +; Modified Registers: +; r8, r9, 10, r11, rax, rcx +;;;;;;;;;;;;;;;;;;;;;; +Fr_mul: + mov r8, [rsi] + mov r9, [rdx] + bt r8, 63 ; Check if is short first operand + jc mul_l1 + bt r9, 63 ; Check if is short second operand + jc mul_s1l2 + +mul_s1s2: ; Both operands are short + + xor rax, rax + mov eax, r8d + imul r9d + jo mul_manageOverflow ; rsi already is the 64bits result + + mov [rdi], rax ; not necessary to adjust so just save and return + +mul_manageOverflow: ; Do the operation in 64 bits + push rsi + movsx rax, r8d + movsx rcx, r9d + imul rcx + mov rsi, rax + call rawCopyS2L + pop rsi + + ret + +mul_l1: + bt r9, 63 ; Check if is short second operand + jc mul_l1l2 + +;;;;;;;; +mul_l1s2: + bt r8, 62 ; check if montgomery first + jc mul_l1ms2 +mul_l1ns2: + bt r9, 62 ; check if montgomery first + jc mul_l1ns2m +mul_l1ns2n: + mov r11b, 0xC0 + shl r11d, 24 + mov [rdi+4], r11d + + push rsi + add rsi, 8 + movsx rdx, r9d + add rdi, 8 + cmp rdx, 0 + + jns tmp_5 + neg rdx + call Fr_rawMMul1 + mov rsi, rdi + call rawNegL + sub rdi, 8 + pop rsi + + jmp tmp_6 +tmp_5: + call Fr_rawMMul1 + sub rdi, 8 + pop rsi +tmp_6: + + + + push rsi + add rdi, 8 + mov rsi, rdi + lea rdx, [R3] + call Fr_rawMMul + sub rdi, 8 + pop rsi + + ret + + +mul_l1ns2m: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + add rdi, 8 + add rsi, 8 + add rdx, 8 + call Fr_rawMMul + sub rdi, 8 + sub rsi, 8 + + ret + + +mul_l1ms2: + bt r9, 62 ; check if montgomery second + jc mul_l1ms2m +mul_l1ms2n: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + push rsi + add rsi, 8 + movsx rdx, r9d + add rdi, 8 + cmp rdx, 0 + + jns tmp_7 + neg rdx + call Fr_rawMMul1 + mov rsi, rdi + call rawNegL + sub rdi, 8 + pop rsi + + jmp tmp_8 +tmp_7: + call Fr_rawMMul1 + sub rdi, 8 + pop rsi +tmp_8: + + + ret + +mul_l1ms2m: + mov r11b, 0xC0 + shl r11d, 24 + mov [rdi+4], r11d + + add rdi, 8 + add rsi, 8 + add rdx, 8 + call Fr_rawMMul + sub rdi, 8 + sub rsi, 8 + + ret + + +;;;;;;;; +mul_s1l2: + bt r8, 62 ; check if montgomery first + jc mul_s1ml2 +mul_s1nl2: + bt r9, 62 ; check if montgomery first + jc mul_s1nl2m +mul_s1nl2n: + mov r11b, 0xC0 + shl r11d, 24 + mov [rdi+4], r11d + + push rsi + lea rsi, [rdx + 8] + movsx rdx, r8d + add rdi, 8 + cmp rdx, 0 + + jns tmp_9 + neg rdx + call Fr_rawMMul1 + mov rsi, rdi + call rawNegL + sub rdi, 8 + pop rsi + + jmp tmp_10 +tmp_9: + call Fr_rawMMul1 + sub rdi, 8 + pop rsi +tmp_10: + + + + push rsi + add rdi, 8 + mov rsi, rdi + lea rdx, [R3] + call Fr_rawMMul + sub rdi, 8 + pop rsi + + ret + +mul_s1nl2m: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + push rsi + lea rsi, [rdx + 8] + movsx rdx, r8d + add rdi, 8 + cmp rdx, 0 + + jns tmp_11 + neg rdx + call Fr_rawMMul1 + mov rsi, rdi + call rawNegL + sub rdi, 8 + pop rsi + + jmp tmp_12 +tmp_11: + call Fr_rawMMul1 + sub rdi, 8 + pop rsi +tmp_12: + + + ret + +mul_s1ml2: + bt r9, 62 ; check if montgomery first + jc mul_s1ml2m +mul_s1ml2n: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + add rdi, 8 + add rsi, 8 + add rdx, 8 + call Fr_rawMMul + sub rdi, 8 + sub rsi, 8 + + ret + +mul_s1ml2m: + mov r11b, 0xC0 + shl r11d, 24 + mov [rdi+4], r11d + + add rdi, 8 + add rsi, 8 + add rdx, 8 + call Fr_rawMMul + sub rdi, 8 + sub rsi, 8 + + ret + +;;;; +mul_l1l2: + bt r8, 62 ; check if montgomery first + jc mul_l1ml2 +mul_l1nl2: + bt r9, 62 ; check if montgomery second + jc mul_l1nl2m +mul_l1nl2n: + mov r11b, 0xC0 + shl r11d, 24 + mov [rdi+4], r11d + + add rdi, 8 + add rsi, 8 + add rdx, 8 + call Fr_rawMMul + sub rdi, 8 + sub rsi, 8 + + + push rsi + add rdi, 8 + mov rsi, rdi + lea rdx, [R3] + call Fr_rawMMul + sub rdi, 8 + pop rsi + + ret + +mul_l1nl2m: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + add rdi, 8 + add rsi, 8 + add rdx, 8 + call Fr_rawMMul + sub rdi, 8 + sub rsi, 8 + + ret + +mul_l1ml2: + bt r9, 62 ; check if montgomery seconf + jc mul_l1ml2m +mul_l1ml2n: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + add rdi, 8 + add rsi, 8 + add rdx, 8 + call Fr_rawMMul + sub rdi, 8 + sub rsi, 8 + + ret + +mul_l1ml2m: + mov r11b, 0xC0 + shl r11d, 24 + mov [rdi+4], r11d + + add rdi, 8 + add rsi, 8 + add rdx, 8 + call Fr_rawMMul + sub rdi, 8 + sub rsi, 8 + + ret + + + + + + + + + + + + + + + + + + + +;;;;;;;;;;;;;;;;;;;;;; +; band +;;;;;;;;;;;;;;;;;;;;;; +; Adds two elements of any kind +; Params: +; rsi <= Pointer to element 1 +; rdx <= Pointer to element 2 +; rdi <= Pointer to result +; Modified Registers: +; r8, r9, 10, r11, rax, rcx +;;;;;;;;;;;;;;;;;;;;;; +Fr_band: + push rbp + push rsi + push rdx + mov rbp, rsp + mov rax, [rsi] + mov rcx, [rdx] + bt rax, 63 ; Check if is short first operand + jc and_l1 + bt rcx, 63 ; Check if is short second operand + jc and_s1l2 + +and_s1s2: + + cmp eax, 0 + + js tmp_13 + + cmp ecx, 0 + js tmp_13 + xor rdx, rdx ; both ops are positive so do the op and return + mov edx, eax + and edx, ecx + mov [rdi], rdx ; not necessary to adjust so just save and return + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + +tmp_13: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toLongNormal + mov rdx, rdi + pop rdi + pop rsi + + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toLongNormal + mov rsi, rdi + pop rdi + pop rdx + + + + mov rax, [rsi + 8] + and rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + and rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + and rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + and rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_15 ; q is bigget so done. + jnz tmp_14 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_15 ; q is bigget so done. + jnz tmp_14 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_15 ; q is bigget so done. + jnz tmp_14 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_15 ; q is bigget so done. + jnz tmp_14 ; q is lower + + ; If equal substract q +tmp_14: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_15: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + + + + +and_l1: + bt rcx, 63 ; Check if is short second operand + jc and_l1l2 + + +and_l1s2: + bt rax, 62 ; check if montgomery first + jc and_l1ms2 +and_l1ns2: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov rcx, [rdx] + cmp ecx, 0 + + js tmp_16 + movsx rax, ecx + and rax, [rsi +8] + mov [rdi+8], rax + + xor rax, rax + and rax, [rsi + 16]; + + mov [rdi + 16 ], rax; + + xor rax, rax + and rax, [rsi + 24]; + + mov [rdi + 24 ], rax; + + xor rax, rax + and rax, [rsi + 32]; + + and rax, [lboMask] ; + + mov [rdi + 32 ], rax; + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_18 ; q is bigget so done. + jnz tmp_17 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_18 ; q is bigget so done. + jnz tmp_17 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_18 ; q is bigget so done. + jnz tmp_17 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_18 ; q is bigget so done. + jnz tmp_17 ; q is lower + + ; If equal substract q +tmp_17: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_18: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + +tmp_16: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toLongNormal + mov rdx, rdi + pop rdi + pop rsi + + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + + mov rax, [rsi + 8] + and rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + and rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + and rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + and rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_20 ; q is bigget so done. + jnz tmp_19 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_20 ; q is bigget so done. + jnz tmp_19 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_20 ; q is bigget so done. + jnz tmp_19 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_20 ; q is bigget so done. + jnz tmp_19 ; q is lower + + ; If equal substract q +tmp_19: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_20: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + + +and_l1ms2: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toNormal + mov rsi, rdi + pop rdi + pop rdx + + + mov rcx, [rdx] + cmp ecx, 0 + + js tmp_21 + movsx rax, ecx + and rax, [rsi +8] + mov [rdi+8], rax + + xor rax, rax + and rax, [rsi + 16]; + + mov [rdi + 16 ], rax; + + xor rax, rax + and rax, [rsi + 24]; + + mov [rdi + 24 ], rax; + + xor rax, rax + and rax, [rsi + 32]; + + and rax, [lboMask] ; + + mov [rdi + 32 ], rax; + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_23 ; q is bigget so done. + jnz tmp_22 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_23 ; q is bigget so done. + jnz tmp_22 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_23 ; q is bigget so done. + jnz tmp_22 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_23 ; q is bigget so done. + jnz tmp_22 ; q is lower + + ; If equal substract q +tmp_22: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_23: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + +tmp_21: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toLongNormal + mov rdx, rdi + pop rdi + pop rsi + + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + + mov rax, [rsi + 8] + and rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + and rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + and rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + and rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_25 ; q is bigget so done. + jnz tmp_24 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_25 ; q is bigget so done. + jnz tmp_24 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_25 ; q is bigget so done. + jnz tmp_24 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_25 ; q is bigget so done. + jnz tmp_24 ; q is lower + + ; If equal substract q +tmp_24: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_25: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + + + +and_s1l2: + bt rcx, 62 ; check if montgomery first + jc and_s1l2m +and_s1l2n: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov eax, [rsi] + cmp eax, 0 + + js tmp_26 + and rax, [rdx +8] + mov [rdi+8], rax + + xor rax, rax + and rax, [rdx + 16] + + mov [rdi + 16 ], rax + + xor rax, rax + and rax, [rdx + 24] + + mov [rdi + 24 ], rax + + xor rax, rax + and rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_28 ; q is bigget so done. + jnz tmp_27 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_28 ; q is bigget so done. + jnz tmp_27 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_28 ; q is bigget so done. + jnz tmp_27 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_28 ; q is bigget so done. + jnz tmp_27 ; q is lower + + ; If equal substract q +tmp_27: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_28: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + +tmp_26: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toLongNormal + mov rsi, rdi + pop rdi + pop rdx + + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + + mov rax, [rsi + 8] + and rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + and rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + and rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + and rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_30 ; q is bigget so done. + jnz tmp_29 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_30 ; q is bigget so done. + jnz tmp_29 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_30 ; q is bigget so done. + jnz tmp_29 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_30 ; q is bigget so done. + jnz tmp_29 ; q is lower + + ; If equal substract q +tmp_29: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_30: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + + +and_s1l2m: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toNormal + mov rdx, rdi + pop rdi + pop rsi + + + mov eax, [rsi] + cmp eax, 0 + + js tmp_31 + and rax, [rdx +8] + mov [rdi+8], rax + + xor rax, rax + and rax, [rdx + 16] + + mov [rdi + 16 ], rax + + xor rax, rax + and rax, [rdx + 24] + + mov [rdi + 24 ], rax + + xor rax, rax + and rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_33 ; q is bigget so done. + jnz tmp_32 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_33 ; q is bigget so done. + jnz tmp_32 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_33 ; q is bigget so done. + jnz tmp_32 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_33 ; q is bigget so done. + jnz tmp_32 ; q is lower + + ; If equal substract q +tmp_32: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_33: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + +tmp_31: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toLongNormal + mov rsi, rdi + pop rdi + pop rdx + + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + + mov rax, [rsi + 8] + and rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + and rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + and rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + and rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_35 ; q is bigget so done. + jnz tmp_34 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_35 ; q is bigget so done. + jnz tmp_34 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_35 ; q is bigget so done. + jnz tmp_34 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_35 ; q is bigget so done. + jnz tmp_34 ; q is lower + + ; If equal substract q +tmp_34: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_35: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + + + +and_l1l2: + bt rax, 62 ; check if montgomery first + jc and_l1ml2 + bt rcx, 62 ; check if montgomery first + jc and_l1nl2m +and_l1nl2n: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + + mov rax, [rsi + 8] + and rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + and rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + and rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + and rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_37 ; q is bigget so done. + jnz tmp_36 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_37 ; q is bigget so done. + jnz tmp_36 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_37 ; q is bigget so done. + jnz tmp_36 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_37 ; q is bigget so done. + jnz tmp_36 ; q is lower + + ; If equal substract q +tmp_36: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_37: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +and_l1nl2m: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toNormal + mov rdx, rdi + pop rdi + pop rsi + + + + mov rax, [rsi + 8] + and rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + and rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + and rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + and rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_39 ; q is bigget so done. + jnz tmp_38 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_39 ; q is bigget so done. + jnz tmp_38 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_39 ; q is bigget so done. + jnz tmp_38 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_39 ; q is bigget so done. + jnz tmp_38 ; q is lower + + ; If equal substract q +tmp_38: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_39: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +and_l1ml2: + bt rcx, 62 ; check if montgomery first + jc and_l1ml2m +and_l1ml2n: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toNormal + mov rsi, rdi + pop rdi + pop rdx + + + + mov rax, [rsi + 8] + and rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + and rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + and rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + and rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_41 ; q is bigget so done. + jnz tmp_40 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_41 ; q is bigget so done. + jnz tmp_40 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_41 ; q is bigget so done. + jnz tmp_40 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_41 ; q is bigget so done. + jnz tmp_40 ; q is lower + + ; If equal substract q +tmp_40: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_41: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +and_l1ml2m: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toNormal + mov rsi, rdi + pop rdi + pop rdx + + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toNormal + mov rdx, rdi + pop rdi + pop rsi + + + + mov rax, [rsi + 8] + and rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + and rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + and rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + and rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_43 ; q is bigget so done. + jnz tmp_42 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_43 ; q is bigget so done. + jnz tmp_42 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_43 ; q is bigget so done. + jnz tmp_42 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_43 ; q is bigget so done. + jnz tmp_42 ; q is lower + + ; If equal substract q +tmp_42: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_43: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + +;;;;;;;;;;;;;;;;;;;;;; +; bor +;;;;;;;;;;;;;;;;;;;;;; +; Adds two elements of any kind +; Params: +; rsi <= Pointer to element 1 +; rdx <= Pointer to element 2 +; rdi <= Pointer to result +; Modified Registers: +; r8, r9, 10, r11, rax, rcx +;;;;;;;;;;;;;;;;;;;;;; +Fr_bor: + push rbp + push rsi + push rdx + mov rbp, rsp + mov rax, [rsi] + mov rcx, [rdx] + bt rax, 63 ; Check if is short first operand + jc or_l1 + bt rcx, 63 ; Check if is short second operand + jc or_s1l2 + +or_s1s2: + + cmp eax, 0 + + js tmp_44 + + cmp ecx, 0 + js tmp_44 + xor rdx, rdx ; both ops are positive so do the op and return + mov edx, eax + or edx, ecx + mov [rdi], rdx ; not necessary to adjust so just save and return + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + +tmp_44: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toLongNormal + mov rdx, rdi + pop rdi + pop rsi + + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toLongNormal + mov rsi, rdi + pop rdi + pop rdx + + + + mov rax, [rsi + 8] + or rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + or rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + or rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + or rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_46 ; q is bigget so done. + jnz tmp_45 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_46 ; q is bigget so done. + jnz tmp_45 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_46 ; q is bigget so done. + jnz tmp_45 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_46 ; q is bigget so done. + jnz tmp_45 ; q is lower + + ; If equal substract q +tmp_45: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_46: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + + + + +or_l1: + bt rcx, 63 ; Check if is short second operand + jc or_l1l2 + + +or_l1s2: + bt rax, 62 ; check if montgomery first + jc or_l1ms2 +or_l1ns2: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov rcx, [rdx] + cmp ecx, 0 + + js tmp_47 + movsx rax, ecx + or rax, [rsi +8] + mov [rdi+8], rax + + xor rax, rax + or rax, [rsi + 16]; + + mov [rdi + 16 ], rax; + + xor rax, rax + or rax, [rsi + 24]; + + mov [rdi + 24 ], rax; + + xor rax, rax + or rax, [rsi + 32]; + + and rax, [lboMask] ; + + mov [rdi + 32 ], rax; + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_49 ; q is bigget so done. + jnz tmp_48 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_49 ; q is bigget so done. + jnz tmp_48 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_49 ; q is bigget so done. + jnz tmp_48 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_49 ; q is bigget so done. + jnz tmp_48 ; q is lower + + ; If equal substract q +tmp_48: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_49: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + +tmp_47: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toLongNormal + mov rdx, rdi + pop rdi + pop rsi + + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + + mov rax, [rsi + 8] + or rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + or rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + or rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + or rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_51 ; q is bigget so done. + jnz tmp_50 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_51 ; q is bigget so done. + jnz tmp_50 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_51 ; q is bigget so done. + jnz tmp_50 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_51 ; q is bigget so done. + jnz tmp_50 ; q is lower + + ; If equal substract q +tmp_50: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_51: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + + +or_l1ms2: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toNormal + mov rsi, rdi + pop rdi + pop rdx + + + mov rcx, [rdx] + cmp ecx, 0 + + js tmp_52 + movsx rax, ecx + or rax, [rsi +8] + mov [rdi+8], rax + + xor rax, rax + or rax, [rsi + 16]; + + mov [rdi + 16 ], rax; + + xor rax, rax + or rax, [rsi + 24]; + + mov [rdi + 24 ], rax; + + xor rax, rax + or rax, [rsi + 32]; + + and rax, [lboMask] ; + + mov [rdi + 32 ], rax; + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_54 ; q is bigget so done. + jnz tmp_53 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_54 ; q is bigget so done. + jnz tmp_53 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_54 ; q is bigget so done. + jnz tmp_53 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_54 ; q is bigget so done. + jnz tmp_53 ; q is lower + + ; If equal substract q +tmp_53: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_54: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + +tmp_52: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toLongNormal + mov rdx, rdi + pop rdi + pop rsi + + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + + mov rax, [rsi + 8] + or rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + or rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + or rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + or rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_56 ; q is bigget so done. + jnz tmp_55 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_56 ; q is bigget so done. + jnz tmp_55 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_56 ; q is bigget so done. + jnz tmp_55 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_56 ; q is bigget so done. + jnz tmp_55 ; q is lower + + ; If equal substract q +tmp_55: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_56: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + + + +or_s1l2: + bt rcx, 62 ; check if montgomery first + jc or_s1l2m +or_s1l2n: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov eax, [rsi] + cmp eax, 0 + + js tmp_57 + or rax, [rdx +8] + mov [rdi+8], rax + + xor rax, rax + or rax, [rdx + 16] + + mov [rdi + 16 ], rax + + xor rax, rax + or rax, [rdx + 24] + + mov [rdi + 24 ], rax + + xor rax, rax + or rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_59 ; q is bigget so done. + jnz tmp_58 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_59 ; q is bigget so done. + jnz tmp_58 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_59 ; q is bigget so done. + jnz tmp_58 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_59 ; q is bigget so done. + jnz tmp_58 ; q is lower + + ; If equal substract q +tmp_58: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_59: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + +tmp_57: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toLongNormal + mov rsi, rdi + pop rdi + pop rdx + + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + + mov rax, [rsi + 8] + or rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + or rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + or rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + or rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_61 ; q is bigget so done. + jnz tmp_60 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_61 ; q is bigget so done. + jnz tmp_60 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_61 ; q is bigget so done. + jnz tmp_60 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_61 ; q is bigget so done. + jnz tmp_60 ; q is lower + + ; If equal substract q +tmp_60: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_61: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + + +or_s1l2m: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toNormal + mov rdx, rdi + pop rdi + pop rsi + + + mov eax, [rsi] + cmp eax, 0 + + js tmp_62 + or rax, [rdx +8] + mov [rdi+8], rax + + xor rax, rax + or rax, [rdx + 16] + + mov [rdi + 16 ], rax + + xor rax, rax + or rax, [rdx + 24] + + mov [rdi + 24 ], rax + + xor rax, rax + or rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_64 ; q is bigget so done. + jnz tmp_63 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_64 ; q is bigget so done. + jnz tmp_63 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_64 ; q is bigget so done. + jnz tmp_63 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_64 ; q is bigget so done. + jnz tmp_63 ; q is lower + + ; If equal substract q +tmp_63: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_64: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + +tmp_62: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toLongNormal + mov rsi, rdi + pop rdi + pop rdx + + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + + mov rax, [rsi + 8] + or rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + or rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + or rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + or rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_66 ; q is bigget so done. + jnz tmp_65 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_66 ; q is bigget so done. + jnz tmp_65 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_66 ; q is bigget so done. + jnz tmp_65 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_66 ; q is bigget so done. + jnz tmp_65 ; q is lower + + ; If equal substract q +tmp_65: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_66: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + + + +or_l1l2: + bt rax, 62 ; check if montgomery first + jc or_l1ml2 + bt rcx, 62 ; check if montgomery first + jc or_l1nl2m +or_l1nl2n: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + + mov rax, [rsi + 8] + or rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + or rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + or rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + or rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_68 ; q is bigget so done. + jnz tmp_67 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_68 ; q is bigget so done. + jnz tmp_67 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_68 ; q is bigget so done. + jnz tmp_67 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_68 ; q is bigget so done. + jnz tmp_67 ; q is lower + + ; If equal substract q +tmp_67: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_68: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +or_l1nl2m: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toNormal + mov rdx, rdi + pop rdi + pop rsi + + + + mov rax, [rsi + 8] + or rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + or rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + or rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + or rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_70 ; q is bigget so done. + jnz tmp_69 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_70 ; q is bigget so done. + jnz tmp_69 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_70 ; q is bigget so done. + jnz tmp_69 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_70 ; q is bigget so done. + jnz tmp_69 ; q is lower + + ; If equal substract q +tmp_69: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_70: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +or_l1ml2: + bt rcx, 62 ; check if montgomery first + jc or_l1ml2m +or_l1ml2n: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toNormal + mov rsi, rdi + pop rdi + pop rdx + + + + mov rax, [rsi + 8] + or rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + or rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + or rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + or rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_72 ; q is bigget so done. + jnz tmp_71 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_72 ; q is bigget so done. + jnz tmp_71 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_72 ; q is bigget so done. + jnz tmp_71 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_72 ; q is bigget so done. + jnz tmp_71 ; q is lower + + ; If equal substract q +tmp_71: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_72: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +or_l1ml2m: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toNormal + mov rsi, rdi + pop rdi + pop rdx + + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toNormal + mov rdx, rdi + pop rdi + pop rsi + + + + mov rax, [rsi + 8] + or rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + or rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + or rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + or rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_74 ; q is bigget so done. + jnz tmp_73 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_74 ; q is bigget so done. + jnz tmp_73 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_74 ; q is bigget so done. + jnz tmp_73 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_74 ; q is bigget so done. + jnz tmp_73 ; q is lower + + ; If equal substract q +tmp_73: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_74: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + +;;;;;;;;;;;;;;;;;;;;;; +; bxor +;;;;;;;;;;;;;;;;;;;;;; +; Adds two elements of any kind +; Params: +; rsi <= Pointer to element 1 +; rdx <= Pointer to element 2 +; rdi <= Pointer to result +; Modified Registers: +; r8, r9, 10, r11, rax, rcx +;;;;;;;;;;;;;;;;;;;;;; +Fr_bxor: + push rbp + push rsi + push rdx + mov rbp, rsp + mov rax, [rsi] + mov rcx, [rdx] + bt rax, 63 ; Check if is short first operand + jc xor_l1 + bt rcx, 63 ; Check if is short second operand + jc xor_s1l2 + +xor_s1s2: + + cmp eax, 0 + + js tmp_75 + + cmp ecx, 0 + js tmp_75 + xor rdx, rdx ; both ops are positive so do the op and return + mov edx, eax + xor edx, ecx + mov [rdi], rdx ; not necessary to adjust so just save and return + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + +tmp_75: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toLongNormal + mov rdx, rdi + pop rdi + pop rsi + + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toLongNormal + mov rsi, rdi + pop rdi + pop rdx + + + + mov rax, [rsi + 8] + xor rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + xor rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + xor rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + xor rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_77 ; q is bigget so done. + jnz tmp_76 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_77 ; q is bigget so done. + jnz tmp_76 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_77 ; q is bigget so done. + jnz tmp_76 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_77 ; q is bigget so done. + jnz tmp_76 ; q is lower + + ; If equal substract q +tmp_76: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_77: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + + + + +xor_l1: + bt rcx, 63 ; Check if is short second operand + jc xor_l1l2 + + +xor_l1s2: + bt rax, 62 ; check if montgomery first + jc xor_l1ms2 +xor_l1ns2: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov rcx, [rdx] + cmp ecx, 0 + + js tmp_78 + movsx rax, ecx + xor rax, [rsi +8] + mov [rdi+8], rax + + xor rax, rax + xor rax, [rsi + 16]; + + mov [rdi + 16 ], rax; + + xor rax, rax + xor rax, [rsi + 24]; + + mov [rdi + 24 ], rax; + + xor rax, rax + xor rax, [rsi + 32]; + + and rax, [lboMask] ; + + mov [rdi + 32 ], rax; + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_80 ; q is bigget so done. + jnz tmp_79 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_80 ; q is bigget so done. + jnz tmp_79 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_80 ; q is bigget so done. + jnz tmp_79 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_80 ; q is bigget so done. + jnz tmp_79 ; q is lower + + ; If equal substract q +tmp_79: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_80: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + +tmp_78: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toLongNormal + mov rdx, rdi + pop rdi + pop rsi + + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + + mov rax, [rsi + 8] + xor rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + xor rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + xor rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + xor rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_82 ; q is bigget so done. + jnz tmp_81 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_82 ; q is bigget so done. + jnz tmp_81 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_82 ; q is bigget so done. + jnz tmp_81 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_82 ; q is bigget so done. + jnz tmp_81 ; q is lower + + ; If equal substract q +tmp_81: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_82: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + + +xor_l1ms2: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toNormal + mov rsi, rdi + pop rdi + pop rdx + + + mov rcx, [rdx] + cmp ecx, 0 + + js tmp_83 + movsx rax, ecx + xor rax, [rsi +8] + mov [rdi+8], rax + + xor rax, rax + xor rax, [rsi + 16]; + + mov [rdi + 16 ], rax; + + xor rax, rax + xor rax, [rsi + 24]; + + mov [rdi + 24 ], rax; + + xor rax, rax + xor rax, [rsi + 32]; + + and rax, [lboMask] ; + + mov [rdi + 32 ], rax; + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_85 ; q is bigget so done. + jnz tmp_84 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_85 ; q is bigget so done. + jnz tmp_84 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_85 ; q is bigget so done. + jnz tmp_84 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_85 ; q is bigget so done. + jnz tmp_84 ; q is lower + + ; If equal substract q +tmp_84: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_85: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + +tmp_83: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toLongNormal + mov rdx, rdi + pop rdi + pop rsi + + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + + mov rax, [rsi + 8] + xor rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + xor rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + xor rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + xor rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_87 ; q is bigget so done. + jnz tmp_86 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_87 ; q is bigget so done. + jnz tmp_86 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_87 ; q is bigget so done. + jnz tmp_86 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_87 ; q is bigget so done. + jnz tmp_86 ; q is lower + + ; If equal substract q +tmp_86: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_87: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + + + +xor_s1l2: + bt rcx, 62 ; check if montgomery first + jc xor_s1l2m +xor_s1l2n: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov eax, [rsi] + cmp eax, 0 + + js tmp_88 + xor rax, [rdx +8] + mov [rdi+8], rax + + xor rax, rax + xor rax, [rdx + 16] + + mov [rdi + 16 ], rax + + xor rax, rax + xor rax, [rdx + 24] + + mov [rdi + 24 ], rax + + xor rax, rax + xor rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_90 ; q is bigget so done. + jnz tmp_89 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_90 ; q is bigget so done. + jnz tmp_89 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_90 ; q is bigget so done. + jnz tmp_89 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_90 ; q is bigget so done. + jnz tmp_89 ; q is lower + + ; If equal substract q +tmp_89: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_90: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + +tmp_88: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toLongNormal + mov rsi, rdi + pop rdi + pop rdx + + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + + mov rax, [rsi + 8] + xor rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + xor rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + xor rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + xor rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_92 ; q is bigget so done. + jnz tmp_91 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_92 ; q is bigget so done. + jnz tmp_91 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_92 ; q is bigget so done. + jnz tmp_91 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_92 ; q is bigget so done. + jnz tmp_91 ; q is lower + + ; If equal substract q +tmp_91: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_92: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + + +xor_s1l2m: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toNormal + mov rdx, rdi + pop rdi + pop rsi + + + mov eax, [rsi] + cmp eax, 0 + + js tmp_93 + xor rax, [rdx +8] + mov [rdi+8], rax + + xor rax, rax + xor rax, [rdx + 16] + + mov [rdi + 16 ], rax + + xor rax, rax + xor rax, [rdx + 24] + + mov [rdi + 24 ], rax + + xor rax, rax + xor rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_95 ; q is bigget so done. + jnz tmp_94 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_95 ; q is bigget so done. + jnz tmp_94 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_95 ; q is bigget so done. + jnz tmp_94 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_95 ; q is bigget so done. + jnz tmp_94 ; q is lower + + ; If equal substract q +tmp_94: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_95: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + +tmp_93: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toLongNormal + mov rsi, rdi + pop rdi + pop rdx + + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + + mov rax, [rsi + 8] + xor rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + xor rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + xor rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + xor rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_97 ; q is bigget so done. + jnz tmp_96 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_97 ; q is bigget so done. + jnz tmp_96 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_97 ; q is bigget so done. + jnz tmp_96 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_97 ; q is bigget so done. + jnz tmp_96 ; q is lower + + ; If equal substract q +tmp_96: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_97: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + + + +xor_l1l2: + bt rax, 62 ; check if montgomery first + jc xor_l1ml2 + bt rcx, 62 ; check if montgomery first + jc xor_l1nl2m +xor_l1nl2n: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + + mov rax, [rsi + 8] + xor rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + xor rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + xor rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + xor rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_99 ; q is bigget so done. + jnz tmp_98 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_99 ; q is bigget so done. + jnz tmp_98 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_99 ; q is bigget so done. + jnz tmp_98 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_99 ; q is bigget so done. + jnz tmp_98 ; q is lower + + ; If equal substract q +tmp_98: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_99: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +xor_l1nl2m: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toNormal + mov rdx, rdi + pop rdi + pop rsi + + + + mov rax, [rsi + 8] + xor rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + xor rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + xor rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + xor rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_101 ; q is bigget so done. + jnz tmp_100 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_101 ; q is bigget so done. + jnz tmp_100 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_101 ; q is bigget so done. + jnz tmp_100 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_101 ; q is bigget so done. + jnz tmp_100 ; q is lower + + ; If equal substract q +tmp_100: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_101: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +xor_l1ml2: + bt rcx, 62 ; check if montgomery first + jc xor_l1ml2m +xor_l1ml2n: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toNormal + mov rsi, rdi + pop rdi + pop rdx + + + + mov rax, [rsi + 8] + xor rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + xor rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + xor rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + xor rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_103 ; q is bigget so done. + jnz tmp_102 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_103 ; q is bigget so done. + jnz tmp_102 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_103 ; q is bigget so done. + jnz tmp_102 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_103 ; q is bigget so done. + jnz tmp_102 ; q is lower + + ; If equal substract q +tmp_102: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_103: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +xor_l1ml2m: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toNormal + mov rsi, rdi + pop rdi + pop rdx + + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toNormal + mov rdx, rdi + pop rdi + pop rsi + + + + mov rax, [rsi + 8] + xor rax, [rdx + 8] + + mov [rdi + 8 ], rax + + mov rax, [rsi + 16] + xor rax, [rdx + 16] + + mov [rdi + 16 ], rax + + mov rax, [rsi + 24] + xor rax, [rdx + 24] + + mov [rdi + 24 ], rax + + mov rax, [rsi + 32] + xor rax, [rdx + 32] + + and rax, [lboMask] + + mov [rdi + 32 ], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_105 ; q is bigget so done. + jnz tmp_104 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_105 ; q is bigget so done. + jnz tmp_104 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_105 ; q is bigget so done. + jnz tmp_104 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_105 ; q is bigget so done. + jnz tmp_104 ; q is lower + + ; If equal substract q +tmp_104: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_105: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + + +;;;;;;;;;;;;;;;;;;;;;; +; bnot +;;;;;;;;;;;;;;;;;;;;;; +; Adds two elements of any kind +; Params: +; rsi <= Pointer to element 1 +; rdi <= Pointer to result +; Modified Registers: +; r8, r9, 10, r11, rax, rcx +;;;;;;;;;;;;;;;;;;;;;; +Fr_bnot: + push rbp + push rsi + push rdx + mov rbp, rsp + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + + mov rax, [rsi] + bt rax, 63 ; Check if is long operand + jc bnot_l1 +bnot_s: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toLongNormal + mov rsi, rdi + pop rdi + pop rdx + + jmp bnot_l1n + +bnot_l1: + bt rax, 62 ; check if montgomery first + jnc bnot_l1n + +bnot_l1m: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toNormal + mov rsi, rdi + pop rdi + pop rdx + + +bnot_l1n: + + mov rax, [rsi + 8] + not rax + + mov [rdi + 8], rax + + mov rax, [rsi + 16] + not rax + + mov [rdi + 16], rax + + mov rax, [rsi + 24] + not rax + + mov [rdi + 24], rax + + mov rax, [rsi + 32] + not rax + + and rax, [lboMask] + + mov [rdi + 32], rax + + + + + + ; Compare with q + + mov rax, [rdi + 32] + cmp rax, [q + 24] + jc tmp_107 ; q is bigget so done. + jnz tmp_106 ; q is lower + + mov rax, [rdi + 24] + cmp rax, [q + 16] + jc tmp_107 ; q is bigget so done. + jnz tmp_106 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 8] + jc tmp_107 ; q is bigget so done. + jnz tmp_106 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 0] + jc tmp_107 ; q is bigget so done. + jnz tmp_106 ; q is lower + + ; If equal substract q +tmp_106: + + mov rax, [q + 0] + sub [rdi + 8], rax + + mov rax, [q + 8] + sbb [rdi + 16], rax + + mov rax, [q + 16] + sbb [rdi + 24], rax + + mov rax, [q + 24] + sbb [rdi + 32], rax + +tmp_107: + + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + + +;;;;;;;;;;;;;;;;;;;;;; +; rawShr +;;;;;;;;;;;;;;;;;;;;;; +; Adds two elements of any kind +; Params: +; rsi <= Pointer to element 1 +; rdx <= how much is shifted +; rdi <= Pointer to result +; Modified Registers: +; r8, r9, 10, r11, rax, rcx +;;;;;;;;;;;;;;;;;;;;;; +rawShr: + cmp rdx, 0 + je Fr_rawCopy + + cmp rdx, 255 + jae Fr_rawZero + +rawShr_nz: + mov r8, rdx + shr r8,6 + mov rcx, rdx + and rcx, 0x3F + jz rawShr_aligned + mov ch, 64 + sub ch, cl + + mov r9, 1 + rol cx, 8 + shl r9, cl + rol cx, 8 + sub r9, 1 + mov r10, r9 + not r10 + + + cmp r8, 3 + jae rawShr_if2_0 + + mov rax, [rsi + r8*8 + 0 ] + shr rax, cl + and rax, r9 + mov r11, [rsi + r8*8 + 8 ] + rol cx, 8 + shl r11, cl + rol cx, 8 + and r11, r10 + or rax, r11 + mov [rdi + 0], rax + + jmp rawShr_endif_0 +rawShr_if2_0: + jne rawShr_else_0 + + mov rax, [rsi + r8*8 + 0 ] + shr rax, cl + and rax, r9 + mov [rdi + 0], rax + + jmp rawShr_endif_0 +rawShr_else_0: + xor rax, rax + mov [rdi + 0], rax +rawShr_endif_0: + + cmp r8, 2 + jae rawShr_if2_1 + + mov rax, [rsi + r8*8 + 8 ] + shr rax, cl + and rax, r9 + mov r11, [rsi + r8*8 + 16 ] + rol cx, 8 + shl r11, cl + rol cx, 8 + and r11, r10 + or rax, r11 + mov [rdi + 8], rax + + jmp rawShr_endif_1 +rawShr_if2_1: + jne rawShr_else_1 + + mov rax, [rsi + r8*8 + 8 ] + shr rax, cl + and rax, r9 + mov [rdi + 8], rax + + jmp rawShr_endif_1 +rawShr_else_1: + xor rax, rax + mov [rdi + 8], rax +rawShr_endif_1: + + cmp r8, 1 + jae rawShr_if2_2 + + mov rax, [rsi + r8*8 + 16 ] + shr rax, cl + and rax, r9 + mov r11, [rsi + r8*8 + 24 ] + rol cx, 8 + shl r11, cl + rol cx, 8 + and r11, r10 + or rax, r11 + mov [rdi + 16], rax + + jmp rawShr_endif_2 +rawShr_if2_2: + jne rawShr_else_2 + + mov rax, [rsi + r8*8 + 16 ] + shr rax, cl + and rax, r9 + mov [rdi + 16], rax + + jmp rawShr_endif_2 +rawShr_else_2: + xor rax, rax + mov [rdi + 16], rax +rawShr_endif_2: + + cmp r8, 0 + jae rawShr_if2_3 + + mov rax, [rsi + r8*8 + 24 ] + shr rax, cl + and rax, r9 + mov r11, [rsi + r8*8 + 32 ] + rol cx, 8 + shl r11, cl + rol cx, 8 + and r11, r10 + or rax, r11 + mov [rdi + 24], rax + + jmp rawShr_endif_3 +rawShr_if2_3: + jne rawShr_else_3 + + mov rax, [rsi + r8*8 + 24 ] + shr rax, cl + and rax, r9 + mov [rdi + 24], rax + + jmp rawShr_endif_3 +rawShr_else_3: + xor rax, rax + mov [rdi + 24], rax +rawShr_endif_3: + + + ret + +rawShr_aligned: + + cmp r8, 3 + ja rawShr_if3_0 + mov rax, [rsi + r8*8 + 0 ] + mov [rdi + 0], rax + jmp rawShr_endif3_0 +rawShr_if3_0: + xor rax, rax + mov [rdi + 0], rax +rawShr_endif3_0: + + cmp r8, 2 + ja rawShr_if3_1 + mov rax, [rsi + r8*8 + 8 ] + mov [rdi + 8], rax + jmp rawShr_endif3_1 +rawShr_if3_1: + xor rax, rax + mov [rdi + 8], rax +rawShr_endif3_1: + + cmp r8, 1 + ja rawShr_if3_2 + mov rax, [rsi + r8*8 + 16 ] + mov [rdi + 16], rax + jmp rawShr_endif3_2 +rawShr_if3_2: + xor rax, rax + mov [rdi + 16], rax +rawShr_endif3_2: + + cmp r8, 0 + ja rawShr_if3_3 + mov rax, [rsi + r8*8 + 24 ] + mov [rdi + 24], rax + jmp rawShr_endif3_3 +rawShr_if3_3: + xor rax, rax + mov [rdi + 24], rax +rawShr_endif3_3: + + ret + + +;;;;;;;;;;;;;;;;;;;;;; +; rawShl +;;;;;;;;;;;;;;;;;;;;;; +; Adds two elements of any kind +; Params: +; rsi <= Pointer to element 1 +; rdx <= how much is shifted +; rdi <= Pointer to result +; Modified Registers: +; r8, r9, 10, r11, rax, rcx +;;;;;;;;;;;;;;;;;;;;;; +rawShl: + cmp rdx, 0 + je Fr_rawCopy + + cmp rdx, 255 + jae Fr_rawZero + + mov r8, rdx + shr r8,6 + mov rcx, rdx + and rcx, 0x3F + jz rawShl_aligned + mov ch, 64 + sub ch, cl + + + mov r10, 1 + shl r10, cl + sub r10, 1 + mov r9, r10 + not r9 + + mov rdx, rsi + mov rax, r8 + shl rax, 3 + sub rdx, rax + + + cmp r8, 3 + jae rawShl_if2_3 + + mov rax, [rdx + 24 ] + shl rax, cl + and rax, r9 + mov r11, [rdx + 16 ] + rol cx, 8 + shr r11, cl + rol cx, 8 + and r11, r10 + or rax, r11 + + and rax, [lboMask] + + + mov [rdi + 24], rax + + jmp rawShl_endif_3 +rawShl_if2_3: + jne rawShl_else_3 + + mov rax, [rdx + 24 ] + shl rax, cl + and rax, r9 + + and rax, [lboMask] + + + mov [rdi + 24], rax + + jmp rawShl_endif_3 +rawShl_else_3: + xor rax, rax + mov [rdi + 24], rax +rawShl_endif_3: + + cmp r8, 2 + jae rawShl_if2_2 + + mov rax, [rdx + 16 ] + shl rax, cl + and rax, r9 + mov r11, [rdx + 8 ] + rol cx, 8 + shr r11, cl + rol cx, 8 + and r11, r10 + or rax, r11 + + + mov [rdi + 16], rax + + jmp rawShl_endif_2 +rawShl_if2_2: + jne rawShl_else_2 + + mov rax, [rdx + 16 ] + shl rax, cl + and rax, r9 + + + mov [rdi + 16], rax + + jmp rawShl_endif_2 +rawShl_else_2: + xor rax, rax + mov [rdi + 16], rax +rawShl_endif_2: + + cmp r8, 1 + jae rawShl_if2_1 + + mov rax, [rdx + 8 ] + shl rax, cl + and rax, r9 + mov r11, [rdx + 0 ] + rol cx, 8 + shr r11, cl + rol cx, 8 + and r11, r10 + or rax, r11 + + + mov [rdi + 8], rax + + jmp rawShl_endif_1 +rawShl_if2_1: + jne rawShl_else_1 + + mov rax, [rdx + 8 ] + shl rax, cl + and rax, r9 + + + mov [rdi + 8], rax + + jmp rawShl_endif_1 +rawShl_else_1: + xor rax, rax + mov [rdi + 8], rax +rawShl_endif_1: + + cmp r8, 0 + jae rawShl_if2_0 + + mov rax, [rdx + 0 ] + shl rax, cl + and rax, r9 + mov r11, [rdx + -8 ] + rol cx, 8 + shr r11, cl + rol cx, 8 + and r11, r10 + or rax, r11 + + + mov [rdi + 0], rax + + jmp rawShl_endif_0 +rawShl_if2_0: + jne rawShl_else_0 + + mov rax, [rdx + 0 ] + shl rax, cl + and rax, r9 + + + mov [rdi + 0], rax + + jmp rawShl_endif_0 +rawShl_else_0: + xor rax, rax + mov [rdi + 0], rax +rawShl_endif_0: + + + + + + + ; Compare with q + + mov rax, [rdi + 24] + cmp rax, [q + 24] + jc tmp_109 ; q is bigget so done. + jnz tmp_108 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 16] + jc tmp_109 ; q is bigget so done. + jnz tmp_108 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 8] + jc tmp_109 ; q is bigget so done. + jnz tmp_108 ; q is lower + + mov rax, [rdi + 0] + cmp rax, [q + 0] + jc tmp_109 ; q is bigget so done. + jnz tmp_108 ; q is lower + + ; If equal substract q +tmp_108: + + mov rax, [q + 0] + sub [rdi + 0], rax + + mov rax, [q + 8] + sbb [rdi + 8], rax + + mov rax, [q + 16] + sbb [rdi + 16], rax + + mov rax, [q + 24] + sbb [rdi + 24], rax + +tmp_109: + + ret; + +rawShl_aligned: + mov rdx, rsi + mov rax, r8 + shl rax, 3 + sub rdx, rax + + + cmp r8, 3 + ja rawShl_if3_3 + mov rax, [rdx + 24 ] + + and rax, [lboMask] + + mov [rdi + 24], rax + jmp rawShl_endif3_3 +rawShl_if3_3: + xor rax, rax + mov [rdi + 24], rax +rawShl_endif3_3: + + cmp r8, 2 + ja rawShl_if3_2 + mov rax, [rdx + 16 ] + + mov [rdi + 16], rax + jmp rawShl_endif3_2 +rawShl_if3_2: + xor rax, rax + mov [rdi + 16], rax +rawShl_endif3_2: + + cmp r8, 1 + ja rawShl_if3_1 + mov rax, [rdx + 8 ] + + mov [rdi + 8], rax + jmp rawShl_endif3_1 +rawShl_if3_1: + xor rax, rax + mov [rdi + 8], rax +rawShl_endif3_1: + + cmp r8, 0 + ja rawShl_if3_0 + mov rax, [rdx + 0 ] + + mov [rdi + 0], rax + jmp rawShl_endif3_0 +rawShl_if3_0: + xor rax, rax + mov [rdi + 0], rax +rawShl_endif3_0: + + + + + + ; Compare with q + + mov rax, [rdi + 24] + cmp rax, [q + 24] + jc tmp_111 ; q is bigget so done. + jnz tmp_110 ; q is lower + + mov rax, [rdi + 16] + cmp rax, [q + 16] + jc tmp_111 ; q is bigget so done. + jnz tmp_110 ; q is lower + + mov rax, [rdi + 8] + cmp rax, [q + 8] + jc tmp_111 ; q is bigget so done. + jnz tmp_110 ; q is lower + + mov rax, [rdi + 0] + cmp rax, [q + 0] + jc tmp_111 ; q is bigget so done. + jnz tmp_110 ; q is lower + + ; If equal substract q +tmp_110: + + mov rax, [q + 0] + sub [rdi + 0], rax + + mov rax, [q + 8] + sbb [rdi + 8], rax + + mov rax, [q + 16] + sbb [rdi + 16], rax + + mov rax, [q + 24] + sbb [rdi + 24], rax + +tmp_111: + + ret + + + + + + +;;;;;;;;;;;;;;;;;;;;;; +; shr +;;;;;;;;;;;;;;;;;;;;;; +; Adds two elements of any kind +; Params: +; rsi <= Pointer to element 1 +; rdx <= Pointer to element 2 +; rdi <= Pointer to result +; Modified Registers: +; r8, r9, 10, r11, rax, rcx +;;;;;;;;;;;;;;;;;;;;;; +Fr_shr: + push rbp + push rsi + push rdi + push rdx + mov rbp, rsp + + + + + + + mov rcx, [rdx] + bt rcx, 63 ; Check if is short second operand + jnc tmp_112 + + ; long 2 + bt rcx, 62 ; Check if is montgomery second operand + jnc tmp_113 + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toNormal + mov rdx, rdi + pop rdi + pop rsi + +tmp_113: + mov rcx, [rdx + 8] + cmp rcx, 255 + jae tmp_114 + xor rax, rax + + cmp [rdx + 16], rax + jnz tmp_114 + + cmp [rdx + 24], rax + jnz tmp_114 + + cmp [rdx + 32], rax + jnz tmp_114 + + mov rdx, rcx + jmp do_shr + +tmp_114: + mov rcx, [q] + sub rcx, [rdx+8] + cmp rcx, 255 + jae setzero + mov rax, [q] + sub rax, [rdx+8] + + mov rax, [q+ 8] + sbb rax, [rdx + 16] + jnz setzero + + mov rax, [q+ 16] + sbb rax, [rdx + 24] + jnz setzero + + mov rax, [q+ 24] + sbb rax, [rdx + 32] + jnz setzero + + mov rdx, rcx + jmp do_shl + +tmp_112: + cmp ecx, 0 + jl tmp_115 + cmp ecx, 255 + jae setzero + movsx rdx, ecx + jmp do_shr +tmp_115: + neg ecx + cmp ecx, 255 + jae setzero + movsx rdx, ecx + jmp do_shl + + + + +;;;;;;;;;;;;;;;;;;;;;; +; shl +;;;;;;;;;;;;;;;;;;;;;; +; Adds two elements of any kind +; Params: +; rsi <= Pointer to element 1 +; rdx <= Pointer to element 2 +; rdi <= Pointer to result +; Modified Registers: +; r8, r9, 10, r11, rax, rcx +;;;;;;;;;;;;;;;;;;;;;; +Fr_shl: + push rbp + push rsi + push rdi + push rdx + mov rbp, rsp + + + + + + mov rcx, [rdx] + bt rcx, 63 ; Check if is short second operand + jnc tmp_116 + + ; long 2 + bt rcx, 62 ; Check if is montgomery second operand + jnc tmp_117 + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toNormal + mov rdx, rdi + pop rdi + pop rsi + +tmp_117: + mov rcx, [rdx + 8] + cmp rcx, 255 + jae tmp_118 + xor rax, rax + + cmp [rdx + 16], rax + jnz tmp_118 + + cmp [rdx + 24], rax + jnz tmp_118 + + cmp [rdx + 32], rax + jnz tmp_118 + + mov rdx, rcx + jmp do_shl + +tmp_118: + mov rcx, [q] + sub rcx, [rdx+8] + cmp rcx, 255 + jae setzero + mov rax, [q] + sub rax, [rdx+8] + + mov rax, [q+ 8] + sbb rax, [rdx + 16] + jnz setzero + + mov rax, [q+ 16] + sbb rax, [rdx + 24] + jnz setzero + + mov rax, [q+ 24] + sbb rax, [rdx + 32] + jnz setzero + + mov rdx, rcx + jmp do_shr + +tmp_116: + cmp ecx, 0 + jl tmp_119 + cmp ecx, 255 + jae setzero + movsx rdx, ecx + jmp do_shl +tmp_119: + neg ecx + cmp ecx, 255 + jae setzero + movsx rdx, ecx + jmp do_shr + + + +;;;;;;;;;; +;;; doShl +;;;;;;;;;; +do_shl: + mov rcx, [rsi] + bt rcx, 63 ; Check if is short second operand + jc do_shll +do_shls: + + movsx rax, ecx + cmp rax, 0 + jz setzero; + jl do_shlcl + + cmp rdx, 31 + jae do_shlcl + + mov cl, dl + shl rax, cl + mov rcx, rax + shr rcx, 31 + jnz do_shlcl + mov [rdi], rax + mov rsp, rbp + pop rdx + pop rdi + pop rsi + pop rbp + ret + +do_shlcl: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toLongNormal + mov rsi, rdi + pop rdi + pop rdx + + jmp do_shlln + +do_shll: + bt rcx, 62 ; Check if is short second operand + jnc do_shlln + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toNormal + mov rsi, rdi + pop rdi + pop rdx + +do_shlln: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + add rdi, 8 + add rsi, 8 + call rawShl + mov rsp, rbp + pop rdx + pop rdi + pop rsi + pop rbp + ret + + +;;;;;;;;;; +;;; doShr +;;;;;;;;;; +do_shr: + mov rcx, [rsi] + bt rcx, 63 ; Check if is short second operand + jc do_shrl +do_shrs: + movsx rax, ecx + cmp rax, 0 + jz setzero; + jl do_shrcl + + cmp rdx, 31 + jae setzero + + mov cl, dl + shr rax, cl + mov [rdi], rax + mov rsp, rbp + pop rdx + pop rdi + pop rsi + pop rbp + ret + +do_shrcl: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toLongNormal + mov rsi, rdi + pop rdi + pop rdx + + +do_shrl: + bt rcx, 62 ; Check if is short second operand + jnc do_shrln + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toNormal + mov rsi, rdi + pop rdi + pop rdx + +do_shrln: + mov r11b, 0x80 + shl r11d, 24 + mov [rdi+4], r11d + add rdi, 8 + add rsi, 8 + call rawShr + mov rsp, rbp + pop rdx + pop rdi + pop rsi + pop rbp + ret + +setzero: + xor rax, rax + mov [rdi], rax + mov rsp, rbp + pop rdx + pop rdi + pop rsi + pop rbp + ret + + + + + +;;;;;;;;;;;;;;;;;;;;;; +; rgt - Raw Greater Than +;;;;;;;;;;;;;;;;;;;;;; +; returns in ax 1 id *rsi > *rdx +; Params: +; rsi <= Pointer to element 1 +; rdx <= Pointer to element 2 +; rax <= Return 1 or 0 +; Modified Registers: +; r8, r9, rax +;;;;;;;;;;;;;;;;;;;;;; +Fr_rgt: + push rbp + push rsi + push rdx + mov rbp, rsp + mov r8, [rsi] + mov r9, [rdx] + bt r8, 63 ; Check if is short first operand + jc rgt_l1 + bt r9, 63 ; Check if is short second operand + jc rgt_s1l2 + +rgt_s1s2: ; Both operands are short + cmp r8d, r9d + jg rgt_ret1 + jmp rgt_ret0 + + +rgt_l1: + bt r9, 63 ; Check if is short second operand + jc rgt_l1l2 + +;;;;;;;; +rgt_l1s2: + bt r8, 62 ; check if montgomery first + jc rgt_l1ms2 +rgt_l1ns2: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toLongNormal + mov rdx, rdi + pop rdi + pop rsi + + jmp rgtL1L2 + +rgt_l1ms2: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toLongNormal + mov rdx, rdi + pop rdi + pop rsi + + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toNormal + mov rsi, rdi + pop rdi + pop rdx + + jmp rgtL1L2 + + +;;;;;;;; +rgt_s1l2: + bt r9, 62 ; check if montgomery second + jc rgt_s1l2m +rgt_s1l2n: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toLongNormal + mov rsi, rdi + pop rdi + pop rdx + + jmp rgtL1L2 + +rgt_s1l2m: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toLongNormal + mov rsi, rdi + pop rdi + pop rdx + + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toNormal + mov rdx, rdi + pop rdi + pop rsi + + jmp rgtL1L2 + +;;;; +rgt_l1l2: + bt r8, 62 ; check if montgomery first + jc rgt_l1ml2 +rgt_l1nl2: + bt r9, 62 ; check if montgomery second + jc rgt_l1nl2m +rgt_l1nl2n: + jmp rgtL1L2 + +rgt_l1nl2m: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toNormal + mov rdx, rdi + pop rdi + pop rsi + + jmp rgtL1L2 + +rgt_l1ml2: + bt r9, 62 ; check if montgomery second + jc rgt_l1ml2m +rgt_l1ml2n: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toNormal + mov rsi, rdi + pop rdi + pop rdx + + jmp rgtL1L2 + +rgt_l1ml2m: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toNormal + mov rsi, rdi + pop rdi + pop rdx + + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toNormal + mov rdx, rdi + pop rdi + pop rsi + + jmp rgtL1L2 + + +;;;;;; +; rgtL1L2 +;;;;;; + +rgtL1L2: + + + mov rax, [rsi + 32] + cmp [half + 24], rax ; comare with (q-1)/2 + jc rgtl1l2_n1 ; half e1-e2 is neg => e1 < e2 + + jnz rgtl1l2_p1 ; half>rax => e1 -e2 is pos => e1 > e2 + + + mov rax, [rsi + 24] + cmp [half + 16], rax ; comare with (q-1)/2 + jc rgtl1l2_n1 ; half e1-e2 is neg => e1 < e2 + + jnz rgtl1l2_p1 ; half>rax => e1 -e2 is pos => e1 > e2 + + + mov rax, [rsi + 16] + cmp [half + 8], rax ; comare with (q-1)/2 + jc rgtl1l2_n1 ; half e1-e2 is neg => e1 < e2 + + jnz rgtl1l2_p1 ; half>rax => e1 -e2 is pos => e1 > e2 + + + mov rax, [rsi + 8] + cmp [half + 0], rax ; comare with (q-1)/2 + jc rgtl1l2_n1 ; half e1-e2 is neg => e1 < e2 + + jmp rgtl1l2_p1 + + + +rgtl1l2_p1: + + + mov rax, [rdx + 32] + cmp [half + 24], rax ; comare with (q-1)/2 + jc rgt_ret1 ; half e1-e2 is neg => e1 < e2 + + jnz rgtRawL1L2 ; half>rax => e1 -e2 is pos => e1 > e2 + + + mov rax, [rdx + 24] + cmp [half + 16], rax ; comare with (q-1)/2 + jc rgt_ret1 ; half e1-e2 is neg => e1 < e2 + + jnz rgtRawL1L2 ; half>rax => e1 -e2 is pos => e1 > e2 + + + mov rax, [rdx + 16] + cmp [half + 8], rax ; comare with (q-1)/2 + jc rgt_ret1 ; half e1-e2 is neg => e1 < e2 + + jnz rgtRawL1L2 ; half>rax => e1 -e2 is pos => e1 > e2 + + + mov rax, [rdx + 8] + cmp [half + 0], rax ; comare with (q-1)/2 + jc rgt_ret1 ; half e1-e2 is neg => e1 < e2 + + jmp rgtRawL1L2 + + + + +rgtl1l2_n1: + + + mov rax, [rdx + 32] + cmp [half + 24], rax ; comare with (q-1)/2 + jc rgtRawL1L2 ; half e1-e2 is neg => e1 < e2 + + jnz rgt_ret0 ; half>rax => e1 -e2 is pos => e1 > e2 + + + mov rax, [rdx + 24] + cmp [half + 16], rax ; comare with (q-1)/2 + jc rgtRawL1L2 ; half e1-e2 is neg => e1 < e2 + + jnz rgt_ret0 ; half>rax => e1 -e2 is pos => e1 > e2 + + + mov rax, [rdx + 16] + cmp [half + 8], rax ; comare with (q-1)/2 + jc rgtRawL1L2 ; half e1-e2 is neg => e1 < e2 + + jnz rgt_ret0 ; half>rax => e1 -e2 is pos => e1 > e2 + + + mov rax, [rdx + 8] + cmp [half + 0], rax ; comare with (q-1)/2 + jc rgtRawL1L2 ; half e1-e2 is neg => e1 < e2 + + jmp rgt_ret0 + + + + + +rgtRawL1L2: + + mov rax, [rsi + 32] + cmp [rdx + 32], rax ; comare with (q-1)/2 + jc rgt_ret1 ; rsi 1st > 2nd + + jnz rgt_ret0 + + + mov rax, [rsi + 24] + cmp [rdx + 24], rax ; comare with (q-1)/2 + jc rgt_ret1 ; rsi 1st > 2nd + + jnz rgt_ret0 + + + mov rax, [rsi + 16] + cmp [rdx + 16], rax ; comare with (q-1)/2 + jc rgt_ret1 ; rsi 1st > 2nd + + jnz rgt_ret0 + + + mov rax, [rsi + 8] + cmp [rdx + 8], rax ; comare with (q-1)/2 + jc rgt_ret1 ; rsi 1st > 2nd + + + +rgt_ret0: + xor rax, rax + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret +rgt_ret1: + mov rax, 1 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + +;;;;;;;;;;;;;;;;;;;;;; +; rlt - Raw Less Than +;;;;;;;;;;;;;;;;;;;;;; +; returns in ax 1 id *rsi > *rdx +; Params: +; rsi <= Pointer to element 1 +; rdx <= Pointer to element 2 +; rax <= Return 1 or 0 +; Modified Registers: +; r8, r9, rax +;;;;;;;;;;;;;;;;;;;;;; +Fr_rlt: + push rbp + push rsi + push rdx + mov rbp, rsp + mov r8, [rsi] + mov r9, [rdx] + bt r8, 63 ; Check if is short first operand + jc rlt_l1 + bt r9, 63 ; Check if is short second operand + jc rlt_s1l2 + +rlt_s1s2: ; Both operands are short + cmp r8d, r9d + jl rlt_ret1 + jmp rlt_ret0 + + +rlt_l1: + bt r9, 63 ; Check if is short second operand + jc rlt_l1l2 + +;;;;;;;; +rlt_l1s2: + bt r8, 62 ; check if montgomery first + jc rlt_l1ms2 +rlt_l1ns2: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toLongNormal + mov rdx, rdi + pop rdi + pop rsi + + jmp rltL1L2 + +rlt_l1ms2: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toLongNormal + mov rdx, rdi + pop rdi + pop rsi + + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toNormal + mov rsi, rdi + pop rdi + pop rdx + + jmp rltL1L2 + + +;;;;;;;; +rlt_s1l2: + bt r9, 62 ; check if montgomery second + jc rlt_s1l2m +rlt_s1l2n: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toLongNormal + mov rsi, rdi + pop rdi + pop rdx + + jmp rltL1L2 + +rlt_s1l2m: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toLongNormal + mov rsi, rdi + pop rdi + pop rdx + + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toNormal + mov rdx, rdi + pop rdi + pop rsi + + jmp rltL1L2 + +;;;; +rlt_l1l2: + bt r8, 62 ; check if montgomery first + jc rlt_l1ml2 +rlt_l1nl2: + bt r9, 62 ; check if montgomery second + jc rlt_l1nl2m +rlt_l1nl2n: + jmp rltL1L2 + +rlt_l1nl2m: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toNormal + mov rdx, rdi + pop rdi + pop rsi + + jmp rltL1L2 + +rlt_l1ml2: + bt r9, 62 ; check if montgomery second + jc rlt_l1ml2m +rlt_l1ml2n: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toNormal + mov rsi, rdi + pop rdi + pop rdx + + jmp rltL1L2 + +rlt_l1ml2m: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toNormal + mov rsi, rdi + pop rdi + pop rdx + + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toNormal + mov rdx, rdi + pop rdi + pop rsi + + jmp rltL1L2 + + +;;;;;; +; rltL1L2 +;;;;;; + +rltL1L2: + + + mov rax, [rsi + 32] + cmp [half + 24], rax ; comare with (q-1)/2 + jc rltl1l2_n1 ; half e1-e2 is neg => e1 < e2 + + jnz rltl1l2_p1 ; half>rax => e1 -e2 is pos => e1 > e2 + + + mov rax, [rsi + 24] + cmp [half + 16], rax ; comare with (q-1)/2 + jc rltl1l2_n1 ; half e1-e2 is neg => e1 < e2 + + jnz rltl1l2_p1 ; half>rax => e1 -e2 is pos => e1 > e2 + + + mov rax, [rsi + 16] + cmp [half + 8], rax ; comare with (q-1)/2 + jc rltl1l2_n1 ; half e1-e2 is neg => e1 < e2 + + jnz rltl1l2_p1 ; half>rax => e1 -e2 is pos => e1 > e2 + + + mov rax, [rsi + 8] + cmp [half + 0], rax ; comare with (q-1)/2 + jc rltl1l2_n1 ; half e1-e2 is neg => e1 < e2 + + jmp rltl1l2_p1 + + + +rltl1l2_p1: + + + mov rax, [rdx + 32] + cmp [half + 24], rax ; comare with (q-1)/2 + jc rlt_ret0 ; half e1-e2 is neg => e1 < e2 + + jnz rltRawL1L2 ; half>rax => e1 -e2 is pos => e1 > e2 + + + mov rax, [rdx + 24] + cmp [half + 16], rax ; comare with (q-1)/2 + jc rlt_ret0 ; half e1-e2 is neg => e1 < e2 + + jnz rltRawL1L2 ; half>rax => e1 -e2 is pos => e1 > e2 + + + mov rax, [rdx + 16] + cmp [half + 8], rax ; comare with (q-1)/2 + jc rlt_ret0 ; half e1-e2 is neg => e1 < e2 + + jnz rltRawL1L2 ; half>rax => e1 -e2 is pos => e1 > e2 + + + mov rax, [rdx + 8] + cmp [half + 0], rax ; comare with (q-1)/2 + jc rlt_ret0 ; half e1-e2 is neg => e1 < e2 + + jmp rltRawL1L2 + + + + +rltl1l2_n1: + + + mov rax, [rdx + 32] + cmp [half + 24], rax ; comare with (q-1)/2 + jc rltRawL1L2 ; half e1-e2 is neg => e1 < e2 + + jnz rlt_ret1 ; half>rax => e1 -e2 is pos => e1 > e2 + + + mov rax, [rdx + 24] + cmp [half + 16], rax ; comare with (q-1)/2 + jc rltRawL1L2 ; half e1-e2 is neg => e1 < e2 + + jnz rlt_ret1 ; half>rax => e1 -e2 is pos => e1 > e2 + + + mov rax, [rdx + 16] + cmp [half + 8], rax ; comare with (q-1)/2 + jc rltRawL1L2 ; half e1-e2 is neg => e1 < e2 + + jnz rlt_ret1 ; half>rax => e1 -e2 is pos => e1 > e2 + + + mov rax, [rdx + 8] + cmp [half + 0], rax ; comare with (q-1)/2 + jc rltRawL1L2 ; half e1-e2 is neg => e1 < e2 + + jmp rlt_ret1 + + + + + +rltRawL1L2: + + mov rax, [rsi + 32] + cmp [rdx + 32], rax ; comare with (q-1)/2 + jc rlt_ret0 ; rsi 1st > 2nd + jnz rlt_ret1 + + mov rax, [rsi + 24] + cmp [rdx + 24], rax ; comare with (q-1)/2 + jc rlt_ret0 ; rsi 1st > 2nd + jnz rlt_ret1 + + mov rax, [rsi + 16] + cmp [rdx + 16], rax ; comare with (q-1)/2 + jc rlt_ret0 ; rsi 1st > 2nd + jnz rlt_ret1 + + mov rax, [rsi + 8] + cmp [rdx + 8], rax ; comare with (q-1)/2 + jc rlt_ret0 ; rsi 1st > 2nd + jnz rlt_ret1 + + +rlt_ret0: + xor rax, rax + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret +rlt_ret1: + mov rax, 1 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + + +;;;;;;;;;;;;;;;;;;;;;; +; req - Raw Eq +;;;;;;;;;;;;;;;;;;;;;; +; returns in ax 1 id *rsi == *rdx +; Params: +; rsi <= Pointer to element 1 +; rdx <= Pointer to element 2 +; rax <= Return 1 or 0 +; Modified Registers: +; r8, r9, rax +;;;;;;;;;;;;;;;;;;;;;; +Fr_req: + push rbp + push rsi + push rdx + mov rbp, rsp + mov r8, [rsi] + mov r9, [rdx] + bt r8, 63 ; Check if is short first operand + jc req_l1 + bt r9, 63 ; Check if is short second operand + jc req_s1l2 + +req_s1s2: ; Both operands are short + cmp r8d, r9d + je req_ret1 + jmp req_ret0 + + +req_l1: + bt r9, 63 ; Check if is short second operand + jc req_l1l2 + +;;;;;;;; +req_l1s2: + bt r8, 62 ; check if montgomery first + jc req_l1ms2 +req_l1ns2: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toLongNormal + mov rdx, rdi + pop rdi + pop rsi + + jmp reqL1L2 + +req_l1ms2: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toMontgomery + mov rdx, rdi + pop rdi + pop rsi + + jmp reqL1L2 + + +;;;;;;;; +req_s1l2: + bt r9, 62 ; check if montgomery second + jc req_s1l2m +req_s1l2n: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toLongNormal + mov rsi, rdi + pop rdi + pop rdx + + jmp reqL1L2 + +req_s1l2m: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toMontgomery + mov rsi, rdi + pop rdi + pop rdx + + jmp reqL1L2 + +;;;; +req_l1l2: + bt r8, 62 ; check if montgomery first + jc req_l1ml2 +req_l1nl2: + bt r9, 62 ; check if montgomery second + jc req_l1nl2m +req_l1nl2n: + jmp reqL1L2 + +req_l1nl2m: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rdx + push r8 + call Fr_toMontgomery + mov rsi, rdi + pop rdi + pop rdx + + jmp reqL1L2 + +req_l1ml2: + bt r9, 62 ; check if montgomery second + jc req_l1ml2m +req_l1ml2n: + + mov r8, rdi + sub rsp, 40 + mov rdi, rsp + push rsi + mov rsi, rdx + push r8 + call Fr_toMontgomery + mov rdx, rdi + pop rdi + pop rsi + + jmp reqL1L2 + +req_l1ml2m: + jmp reqL1L2 + + +;;;;;; +; eqL1L2 +;;;;;; + +reqL1L2: + + mov rax, [rsi + 8] + cmp [rdx + 8], rax + jne req_ret0 ; rsi 1st > 2nd + + mov rax, [rsi + 16] + cmp [rdx + 16], rax + jne req_ret0 ; rsi 1st > 2nd + + mov rax, [rsi + 24] + cmp [rdx + 24], rax + jne req_ret0 ; rsi 1st > 2nd + + mov rax, [rsi + 32] + cmp [rdx + 32], rax + jne req_ret0 ; rsi 1st > 2nd + + +req_ret1: + mov rax, 1 + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + +req_ret0: + xor rax, rax + mov rsp, rbp + pop rdx + pop rsi + pop rbp + ret + + +;;;;;;;;;;;;;;;;;;;;;; +; gt +;;;;;;;;;;;;;;;;;;;;;; +; Compares two elements of any kind +; Params: +; rsi <= Pointer to element 1 +; rdx <= Pointer to element 2 +; rdi <= Pointer to result can be zero or one. +; Modified Registers: +; rax, rcx +;;;;;;;;;;;;;;;;;;;;;; +Fr_gt: + call Fr_rgt + mov [rdi], rax + ret + +;;;;;;;;;;;;;;;;;;;;;; +; lt +;;;;;;;;;;;;;;;;;;;;;; +; Compares two elements of any kind +; Params: +; rsi <= Pointer to element 1 +; rdx <= Pointer to element 2 +; rdi <= Pointer to result can be zero or one. +; Modified Registers: +; rax, rcx +;;;;;;;;;;;;;;;;;;;;;; +Fr_lt: + call Fr_rlt + mov [rdi], rax + ret + +;;;;;;;;;;;;;;;;;;;;;; +; eq +;;;;;;;;;;;;;;;;;;;;;; +; Compares two elements of any kind +; Params: +; rsi <= Pointer to element 1 +; rdx <= Pointer to element 2 +; rdi <= Pointer to result can be zero or one. +; Modified Registers: +; rax, rcx +;;;;;;;;;;;;;;;;;;;;;; +Fr_eq: + call Fr_req + mov [rdi], rax + ret + +;;;;;;;;;;;;;;;;;;;;;; +; neq +;;;;;;;;;;;;;;;;;;;;;; +; Compares two elements of any kind +; Params: +; rsi <= Pointer to element 1 +; rdx <= Pointer to element 2 +; rdi <= Pointer to result can be zero or one. +; Modified Registers: +; rax, rcx +;;;;;;;;;;;;;;;;;;;;;; +Fr_neq: + call Fr_req + xor rax, 1 + mov [rdi], rax + ret + +;;;;;;;;;;;;;;;;;;;;;; +; geq +;;;;;;;;;;;;;;;;;;;;;; +; Compares two elements of any kind +; Params: +; rsi <= Pointer to element 1 +; rdx <= Pointer to element 2 +; rdi <= Pointer to result can be zero or one. +; Modified Registers: +; rax, rcx +;;;;;;;;;;;;;;;;;;;;;; +Fr_geq: + call Fr_rlt + xor rax, 1 + mov [rdi], rax + ret + +;;;;;;;;;;;;;;;;;;;;;; +; leq +;;;;;;;;;;;;;;;;;;;;;; +; Compares two elements of any kind +; Params: +; rsi <= Pointer to element 1 +; rdx <= Pointer to element 2 +; rdi <= Pointer to result can be zero or one. +; Modified Registers: +; rax, rcx +;;;;;;;;;;;;;;;;;;;;;; +Fr_leq: + call Fr_rgt + xor rax, 1 + mov [rdi], rax + ret + + + +;;;;;;;;;;;;;;;;;;;;;; +; rawIsEq +;;;;;;;;;;;;;;;;;;;;;; +; Compares two elements of any kind +; Params: +; rdi <= Pointer to element 1 +; rsi <= Pointer to element 2 +; Returns +; ax <= 1 if are equal 0, otherwise +; Modified Registers: +; rax +;;;;;;;;;;;;;;;;;;;;;; +Fr_rawIsEq: + + mov rax, [rsi + 0] + cmp [rdi + 0], rax + jne rawIsEq_ret0 + + mov rax, [rsi + 8] + cmp [rdi + 8], rax + jne rawIsEq_ret0 + + mov rax, [rsi + 16] + cmp [rdi + 16], rax + jne rawIsEq_ret0 + + mov rax, [rsi + 24] + cmp [rdi + 24], rax + jne rawIsEq_ret0 + +rawIsEq_ret1: + mov rax, 1 + ret + +rawIsEq_ret0: + xor rax, rax + ret + +;;;;;;;;;;;;;;;;;;;;;; +; rawIsZero +;;;;;;;;;;;;;;;;;;;;;; +; Compares two elements of any kind +; Params: +; rdi <= Pointer to element 1 +; Returns +; ax <= 1 if is 0, otherwise +; Modified Registers: +; rax +;;;;;;;;;;;;;;;;;;;;;; +Fr_rawIsZero: + + cmp qword [rdi + 0], $0 + jne rawIsZero_ret0 + + cmp qword [rdi + 8], $0 + jne rawIsZero_ret0 + + cmp qword [rdi + 16], $0 + jne rawIsZero_ret0 + + cmp qword [rdi + 24], $0 + jne rawIsZero_ret0 + + +rawIsZero_ret1: + mov rax, 1 + ret + +rawIsZero_ret0: + xor rax, rax + ret + + + + + + + + + + + +;;;;;;;;;;;;;;;;;;;;;; +; land +;;;;;;;;;;;;;;;;;;;;;; +; Logical and between two elements +; Params: +; rsi <= Pointer to element 1 +; rdx <= Pointer to element 2 +; rdi <= Pointer to result zero or one +; Modified Registers: +; rax, rcx, r8 +;;;;;;;;;;;;;;;;;;;;;; +Fr_land: + + + + + + + mov rax, [rsi] + bt rax, 63 + jc tmp_120 + + test eax, eax + jz retZero_122 + jmp retOne_121 + +tmp_120: + + mov rax, [rsi + 8] + test rax, rax + jnz retOne_121 + + mov rax, [rsi + 16] + test rax, rax + jnz retOne_121 + + mov rax, [rsi + 24] + test rax, rax + jnz retOne_121 + + mov rax, [rsi + 32] + test rax, rax + jnz retOne_121 + + +retZero_122: + mov qword r8, 0 + jmp done_123 + +retOne_121: + mov qword r8, 1 + +done_123: + + + + + + + + mov rax, [rdx] + bt rax, 63 + jc tmp_124 + + test eax, eax + jz retZero_126 + jmp retOne_125 + +tmp_124: + + mov rax, [rdx + 8] + test rax, rax + jnz retOne_125 + + mov rax, [rdx + 16] + test rax, rax + jnz retOne_125 + + mov rax, [rdx + 24] + test rax, rax + jnz retOne_125 + + mov rax, [rdx + 32] + test rax, rax + jnz retOne_125 + + +retZero_126: + mov qword rcx, 0 + jmp done_127 + +retOne_125: + mov qword rcx, 1 + +done_127: + + and rcx, r8 + mov [rdi], rcx + ret + + +;;;;;;;;;;;;;;;;;;;;;; +; lor +;;;;;;;;;;;;;;;;;;;;;; +; Logical or between two elements +; Params: +; rsi <= Pointer to element 1 +; rdx <= Pointer to element 2 +; rdi <= Pointer to result zero or one +; Modified Registers: +; rax, rcx, r8 +;;;;;;;;;;;;;;;;;;;;;; +Fr_lor: + + + + + + + mov rax, [rsi] + bt rax, 63 + jc tmp_128 + + test eax, eax + jz retZero_130 + jmp retOne_129 + +tmp_128: + + mov rax, [rsi + 8] + test rax, rax + jnz retOne_129 + + mov rax, [rsi + 16] + test rax, rax + jnz retOne_129 + + mov rax, [rsi + 24] + test rax, rax + jnz retOne_129 + + mov rax, [rsi + 32] + test rax, rax + jnz retOne_129 + + +retZero_130: + mov qword r8, 0 + jmp done_131 + +retOne_129: + mov qword r8, 1 + +done_131: + + + + + + + + mov rax, [rdx] + bt rax, 63 + jc tmp_132 + + test eax, eax + jz retZero_134 + jmp retOne_133 + +tmp_132: + + mov rax, [rdx + 8] + test rax, rax + jnz retOne_133 + + mov rax, [rdx + 16] + test rax, rax + jnz retOne_133 + + mov rax, [rdx + 24] + test rax, rax + jnz retOne_133 + + mov rax, [rdx + 32] + test rax, rax + jnz retOne_133 + + +retZero_134: + mov qword rcx, 0 + jmp done_135 + +retOne_133: + mov qword rcx, 1 + +done_135: + + or rcx, r8 + mov [rdi], rcx + ret + + +;;;;;;;;;;;;;;;;;;;;;; +; lnot +;;;;;;;;;;;;;;;;;;;;;; +; Do the logical not of an element +; Params: +; rsi <= Pointer to element to be tested +; rdi <= Pointer to result one if element1 is zero and zero otherwise +; Modified Registers: +; rax, rax, r8 +;;;;;;;;;;;;;;;;;;;;;; +Fr_lnot: + + + + + + + mov rax, [rsi] + bt rax, 63 + jc tmp_136 + + test eax, eax + jz retZero_138 + jmp retOne_137 + +tmp_136: + + mov rax, [rsi + 8] + test rax, rax + jnz retOne_137 + + mov rax, [rsi + 16] + test rax, rax + jnz retOne_137 + + mov rax, [rsi + 24] + test rax, rax + jnz retOne_137 + + mov rax, [rsi + 32] + test rax, rax + jnz retOne_137 + + +retZero_138: + mov qword rcx, 0 + jmp done_139 + +retOne_137: + mov qword rcx, 1 + +done_139: + + test rcx, rcx + + jz lnot_retOne +lnot_retZero: + mov qword [rdi], 0 + ret +lnot_retOne: + mov qword [rdi], 1 + ret + + +;;;;;;;;;;;;;;;;;;;;;; +; isTrue +;;;;;;;;;;;;;;;;;;;;;; +; Convert a 64 bit integer to a long format field element +; Params: +; rsi <= Pointer to the element +; Returs: +; rax <= 1 if true 0 if false +;;;;;;;;;;;;;;;;;;;;;;; +Fr_isTrue: + + + + + + + mov rax, [rdi] + bt rax, 63 + jc tmp_140 + + test eax, eax + jz retZero_142 + jmp retOne_141 + +tmp_140: + + mov rax, [rdi + 8] + test rax, rax + jnz retOne_141 + + mov rax, [rdi + 16] + test rax, rax + jnz retOne_141 + + mov rax, [rdi + 24] + test rax, rax + jnz retOne_141 + + mov rax, [rdi + 32] + test rax, rax + jnz retOne_141 + + +retZero_142: + mov qword rax, 0 + jmp done_143 + +retOne_141: + mov qword rax, 1 + +done_143: + + ret + + + + + + section .data +Fr_q: + dd 0 + dd 0x80000000 +Fr_rawq: +q dq 0x992d30ed00000001,0x224698fc094cf91b,0x0000000000000000,0x4000000000000000 +half dq 0xcc96987680000000,0x11234c7e04a67c8d,0x0000000000000000,0x2000000000000000 +R2 dq 0x8c78ecb30000000f,0xd7d30dbd8b0de0e7,0x7797a99bc3c95d18,0x096d41af7b9cb714 +Fr_R3: + dd 0 + dd 0x80000000 +Fr_rawR3: +R3 dq 0xf185a5993a9e10f9,0xf6a68f3b6ac5b1d1,0xdf8d1014353fd42c,0x2ae309222d2d9910 +lboMask dq 0x7fffffffffffffff +np dq 0x992d30ecffffffff + diff --git a/code_producers/src/c_elements/secq256k1/fr.cpp b/code_producers/src/c_elements/secq256k1/fr.cpp new file mode 100644 index 000000000..14864de1c --- /dev/null +++ b/code_producers/src/c_elements/secq256k1/fr.cpp @@ -0,0 +1,321 @@ +#include "fr.hpp" +#include +#include +#include +#include +#include + + +static mpz_t q; +static mpz_t zero; +static mpz_t one; +static mpz_t mask; +static size_t nBits; +static bool initialized = false; + + +void Fr_toMpz(mpz_t r, PFrElement pE) { + FrElement tmp; + Fr_toNormal(&tmp, pE); + if (!(tmp.type & Fr_LONG)) { + mpz_set_si(r, tmp.shortVal); + if (tmp.shortVal<0) { + mpz_add(r, r, q); + } + } else { + mpz_import(r, Fr_N64, -1, 8, -1, 0, (const void *)tmp.longVal); + } +} + +void Fr_fromMpz(PFrElement pE, mpz_t v) { + if (mpz_fits_sint_p(v)) { + pE->type = Fr_SHORT; + pE->shortVal = mpz_get_si(v); + } else { + pE->type = Fr_LONG; + for (int i=0; ilongVal[i] = 0; + mpz_export((void *)(pE->longVal), NULL, -1, 8, -1, 0, v); + } +} + + +bool Fr_init() { + if (initialized) return false; + initialized = true; + mpz_init(q); + mpz_import(q, Fr_N64, -1, 8, -1, 0, (const void *)Fr_q.longVal); + mpz_init_set_ui(zero, 0); + mpz_init_set_ui(one, 1); + nBits = mpz_sizeinbase (q, 2); + mpz_init(mask); + mpz_mul_2exp(mask, one, nBits); + mpz_sub(mask, mask, one); + return true; +} + +void Fr_str2element(PFrElement pE, char const *s, uint base) { + mpz_t mr; + mpz_init_set_str(mr, s, base); + mpz_fdiv_r(mr, mr, q); + Fr_fromMpz(pE, mr); + mpz_clear(mr); +} + +char *Fr_element2str(PFrElement pE) { + FrElement tmp; + mpz_t r; + if (!(pE->type & Fr_LONG)) { + if (pE->shortVal>=0) { + char *r = new char[32]; + sprintf(r, "%d", pE->shortVal); + return r; + } else { + mpz_init_set_si(r, pE->shortVal); + mpz_add(r, r, q); + } + } else { + Fr_toNormal(&tmp, pE); + mpz_init(r); + mpz_import(r, Fr_N64, -1, 8, -1, 0, (const void *)tmp.longVal); + } + char *res = mpz_get_str (0, 10, r); + mpz_clear(r); + return res; +} + +void Fr_idiv(PFrElement r, PFrElement a, PFrElement b) { + mpz_t ma; + mpz_t mb; + mpz_t mr; + mpz_init(ma); + mpz_init(mb); + mpz_init(mr); + + Fr_toMpz(ma, a); + // char *s1 = mpz_get_str (0, 10, ma); + // printf("s1 %s\n", s1); + Fr_toMpz(mb, b); + // char *s2 = mpz_get_str (0, 10, mb); + // printf("s2 %s\n", s2); + mpz_fdiv_q(mr, ma, mb); + // char *sr = mpz_get_str (0, 10, mr); + // printf("r %s\n", sr); + Fr_fromMpz(r, mr); + + mpz_clear(ma); + mpz_clear(mb); + mpz_clear(mr); +} + +void Fr_mod(PFrElement r, PFrElement a, PFrElement b) { + mpz_t ma; + mpz_t mb; + mpz_t mr; + mpz_init(ma); + mpz_init(mb); + mpz_init(mr); + + Fr_toMpz(ma, a); + Fr_toMpz(mb, b); + mpz_fdiv_r(mr, ma, mb); + Fr_fromMpz(r, mr); + + mpz_clear(ma); + mpz_clear(mb); + mpz_clear(mr); +} + +void Fr_pow(PFrElement r, PFrElement a, PFrElement b) { + mpz_t ma; + mpz_t mb; + mpz_t mr; + mpz_init(ma); + mpz_init(mb); + mpz_init(mr); + + Fr_toMpz(ma, a); + Fr_toMpz(mb, b); + mpz_powm(mr, ma, mb, q); + Fr_fromMpz(r, mr); + + mpz_clear(ma); + mpz_clear(mb); + mpz_clear(mr); +} + +void Fr_inv(PFrElement r, PFrElement a) { + mpz_t ma; + mpz_t mr; + mpz_init(ma); + mpz_init(mr); + + Fr_toMpz(ma, a); + mpz_invert(mr, ma, q); + Fr_fromMpz(r, mr); + mpz_clear(ma); + mpz_clear(mr); +} + +void Fr_div(PFrElement r, PFrElement a, PFrElement b) { + FrElement tmp; + Fr_inv(&tmp, b); + Fr_mul(r, a, &tmp); +} + +void Fr_fail() { + assert(false); +} + + +RawFr::RawFr() { + Fr_init(); + set(fZero, 0); + set(fOne, 1); + neg(fNegOne, fOne); +} + +RawFr::~RawFr() { +} + +void RawFr::fromString(Element &r, const std::string &s, uint32_t radix) { + mpz_t mr; + mpz_init_set_str(mr, s.c_str(), radix); + mpz_fdiv_r(mr, mr, q); + for (int i=0; i>3] & (1 << (p & 0x7))) +void RawFr::exp(Element &r, const Element &base, uint8_t* scalar, unsigned int scalarSize) { + bool oneFound = false; + Element copyBase; + copy(copyBase, base); + for (int i=scalarSize*8-1; i>=0; i--) { + if (!oneFound) { + if ( !BIT_IS_SET(scalar, i) ) continue; + copy(r, copyBase); + oneFound = true; + continue; + } + square(r, r); + if ( BIT_IS_SET(scalar, i) ) { + mul(r, r, copyBase); + } + } + if (!oneFound) { + copy(r, fOne); + } +} + +void RawFr::toMpz(mpz_t r, const Element &a) { + Element tmp; + Fr_rawFromMontgomery(tmp.v, a.v); + mpz_import(r, Fr_N64, -1, 8, -1, 0, (const void *)tmp.v); +} + +void RawFr::fromMpz(Element &r, const mpz_t a) { + for (int i=0; i +#include +#include + +#ifdef __APPLE__ +#include // typedef unsigned int uint; +#endif // __APPLE__ + +#define Fr_N64 4 +#define Fr_SHORT 0x00000000 +#define Fr_LONG 0x80000000 +#define Fr_LONGMONTGOMERY 0xC0000000 +typedef uint64_t FrRawElement[Fr_N64]; +typedef struct __attribute__((__packed__)) { + int32_t shortVal; + uint32_t type; + FrRawElement longVal; +} FrElement; +typedef FrElement *PFrElement; +extern FrElement Fr_q; +extern FrElement Fr_R3; +extern FrRawElement Fr_rawq; +extern FrRawElement Fr_rawR3; + +extern "C" void Fr_copy(PFrElement r, PFrElement a); +extern "C" void Fr_copyn(PFrElement r, PFrElement a, int n); +extern "C" void Fr_add(PFrElement r, PFrElement a, PFrElement b); +extern "C" void Fr_sub(PFrElement r, PFrElement a, PFrElement b); +extern "C" void Fr_neg(PFrElement r, PFrElement a); +extern "C" void Fr_mul(PFrElement r, PFrElement a, PFrElement b); +extern "C" void Fr_square(PFrElement r, PFrElement a); +extern "C" void Fr_band(PFrElement r, PFrElement a, PFrElement b); +extern "C" void Fr_bor(PFrElement r, PFrElement a, PFrElement b); +extern "C" void Fr_bxor(PFrElement r, PFrElement a, PFrElement b); +extern "C" void Fr_bnot(PFrElement r, PFrElement a); +extern "C" void Fr_shl(PFrElement r, PFrElement a, PFrElement b); +extern "C" void Fr_shr(PFrElement r, PFrElement a, PFrElement b); +extern "C" void Fr_eq(PFrElement r, PFrElement a, PFrElement b); +extern "C" void Fr_neq(PFrElement r, PFrElement a, PFrElement b); +extern "C" void Fr_lt(PFrElement r, PFrElement a, PFrElement b); +extern "C" void Fr_gt(PFrElement r, PFrElement a, PFrElement b); +extern "C" void Fr_leq(PFrElement r, PFrElement a, PFrElement b); +extern "C" void Fr_geq(PFrElement r, PFrElement a, PFrElement b); +extern "C" void Fr_land(PFrElement r, PFrElement a, PFrElement b); +extern "C" void Fr_lor(PFrElement r, PFrElement a, PFrElement b); +extern "C" void Fr_lnot(PFrElement r, PFrElement a); +extern "C" void Fr_toNormal(PFrElement r, PFrElement a); +extern "C" void Fr_toLongNormal(PFrElement r, PFrElement a); +extern "C" void Fr_toMontgomery(PFrElement r, PFrElement a); + +extern "C" int Fr_isTrue(PFrElement pE); +extern "C" int Fr_toInt(PFrElement pE); + +extern "C" void Fr_rawCopy(FrRawElement pRawResult, const FrRawElement pRawA); +extern "C" void Fr_rawSwap(FrRawElement pRawResult, FrRawElement pRawA); +extern "C" void Fr_rawAdd(FrRawElement pRawResult, const FrRawElement pRawA, const FrRawElement pRawB); +extern "C" void Fr_rawSub(FrRawElement pRawResult, const FrRawElement pRawA, const FrRawElement pRawB); +extern "C" void Fr_rawNeg(FrRawElement pRawResult, const FrRawElement pRawA); +extern "C" void Fr_rawMMul(FrRawElement pRawResult, const FrRawElement pRawA, const FrRawElement pRawB); +extern "C" void Fr_rawMSquare(FrRawElement pRawResult, const FrRawElement pRawA); +extern "C" void Fr_rawMMul1(FrRawElement pRawResult, const FrRawElement pRawA, uint64_t pRawB); +extern "C" void Fr_rawToMontgomery(FrRawElement pRawResult, const FrRawElement &pRawA); +extern "C" void Fr_rawFromMontgomery(FrRawElement pRawResult, const FrRawElement &pRawA); +extern "C" int Fr_rawIsEq(const FrRawElement pRawA, const FrRawElement pRawB); +extern "C" int Fr_rawIsZero(const FrRawElement pRawB); + +extern "C" void Fr_fail(); + + +// Pending functions to convert + +void Fr_str2element(PFrElement pE, char const*s, uint base); +char *Fr_element2str(PFrElement pE); +void Fr_idiv(PFrElement r, PFrElement a, PFrElement b); +void Fr_mod(PFrElement r, PFrElement a, PFrElement b); +void Fr_inv(PFrElement r, PFrElement a); +void Fr_div(PFrElement r, PFrElement a, PFrElement b); +void Fr_pow(PFrElement r, PFrElement a, PFrElement b); + +class RawFr { + +public: + const static int N64 = Fr_N64; + const static int MaxBits = 255; + + + struct Element { + FrRawElement v; + }; + +private: + Element fZero; + Element fOne; + Element fNegOne; + +public: + + RawFr(); + ~RawFr(); + + const Element &zero() { return fZero; }; + const Element &one() { return fOne; }; + const Element &negOne() { return fNegOne; }; + Element set(int value); + void set(Element &r, int value); + + void fromString(Element &r, const std::string &n, uint32_t radix = 10); + std::string toString(const Element &a, uint32_t radix = 10); + + void inline copy(Element &r, const Element &a) { Fr_rawCopy(r.v, a.v); }; + void inline swap(Element &a, Element &b) { Fr_rawSwap(a.v, b.v); }; + void inline add(Element &r, const Element &a, const Element &b) { Fr_rawAdd(r.v, a.v, b.v); }; + void inline sub(Element &r, const Element &a, const Element &b) { Fr_rawSub(r.v, a.v, b.v); }; + void inline mul(Element &r, const Element &a, const Element &b) { Fr_rawMMul(r.v, a.v, b.v); }; + + Element inline add(const Element &a, const Element &b) { Element r; Fr_rawAdd(r.v, a.v, b.v); return r;}; + Element inline sub(const Element &a, const Element &b) { Element r; Fr_rawSub(r.v, a.v, b.v); return r;}; + Element inline mul(const Element &a, const Element &b) { Element r; Fr_rawMMul(r.v, a.v, b.v); return r;}; + + Element inline neg(const Element &a) { Element r; Fr_rawNeg(r.v, a.v); return r; }; + Element inline square(const Element &a) { Element r; Fr_rawMSquare(r.v, a.v); return r; }; + + Element inline add(int a, const Element &b) { return add(set(a), b);}; + Element inline sub(int a, const Element &b) { return sub(set(a), b);}; + Element inline mul(int a, const Element &b) { return mul(set(a), b);}; + + Element inline add(const Element &a, int b) { return add(a, set(b));}; + Element inline sub(const Element &a, int b) { return sub(a, set(b));}; + Element inline mul(const Element &a, int b) { return mul(a, set(b));}; + + void inline mul1(Element &r, const Element &a, uint64_t b) { Fr_rawMMul1(r.v, a.v, b); }; + void inline neg(Element &r, const Element &a) { Fr_rawNeg(r.v, a.v); }; + void inline square(Element &r, const Element &a) { Fr_rawMSquare(r.v, a.v); }; + void inv(Element &r, const Element &a); + void div(Element &r, const Element &a, const Element &b); + void exp(Element &r, const Element &base, uint8_t* scalar, unsigned int scalarSize); + + void inline toMontgomery(Element &r, const Element &a) { Fr_rawToMontgomery(r.v, a.v); }; + void inline fromMontgomery(Element &r, const Element &a) { Fr_rawFromMontgomery(r.v, a.v); }; + int inline eq(const Element &a, const Element &b) { return Fr_rawIsEq(a.v, b.v); }; + int inline isZero(const Element &a) { return Fr_rawIsZero(a.v); }; + + void toMpz(mpz_t r, const Element &a); + void fromMpz(Element &a, const mpz_t r); + + int toRprBE(const Element &element, uint8_t *data, int bytes); + int fromRprBE(Element &element, const uint8_t *data, int bytes); + + int bytes ( void ) { return Fr_N64 * 8; }; + + void fromUI(Element &r, unsigned long int v); + + static RawFr field; + +}; + + +#endif // __FR_H + + + diff --git a/code_producers/src/wasm_elements/secq256k1/fr-code.wat b/code_producers/src/wasm_elements/secq256k1/fr-code.wat new file mode 100644 index 000000000..3c4f938ad --- /dev/null +++ b/code_producers/src/wasm_elements/secq256k1/fr-code.wat @@ -0,0 +1,12214 @@ + (func $Fr_int_copy (type $_sig_i32i32) + (param $px i32) + (param $pr i32) + get_local $pr + get_local $px + i64.load + i64.store + get_local $pr + get_local $px + i64.load offset=8 + i64.store offset=8 + get_local $pr + get_local $px + i64.load offset=16 + i64.store offset=16 + get_local $pr + get_local $px + i64.load offset=24 + i64.store offset=24 + ) + (func $Fr_int_zero (type $_sig_i32) + (param $pr i32) + get_local $pr + i64.const 0 + i64.store + get_local $pr + i64.const 0 + i64.store offset=8 + get_local $pr + i64.const 0 + i64.store offset=16 + get_local $pr + i64.const 0 + i64.store offset=24 + ) + (func $Fr_int_isZero (type $_sig_i32ri32) + (param $px i32) + (result i32) + get_local $px + i64.load offset=24 + i64.eqz + if + get_local $px + i64.load offset=16 + i64.eqz + if + get_local $px + i64.load offset=8 + i64.eqz + if + get_local $px + i64.load + i64.eqz + return + else + i32.const 0 + return + end + else + i32.const 0 + return + end + else + i32.const 0 + return + end + i32.const 0 + return + ) + (func $Fr_int_one (type $_sig_i32) + (param $pr i32) + get_local $pr + i64.const 1 + i64.store + get_local $pr + i64.const 0 + i64.store offset=8 + get_local $pr + i64.const 0 + i64.store offset=16 + get_local $pr + i64.const 0 + i64.store offset=24 + ) + (func $Fr_int_eq (type $_sig_i32i32ri32) + (param $px i32) + (param $py i32) + (result i32) + get_local $px + i64.load offset=24 + get_local $py + i64.load offset=24 + i64.eq + if + get_local $px + i64.load offset=16 + get_local $py + i64.load offset=16 + i64.eq + if + get_local $px + i64.load offset=8 + get_local $py + i64.load offset=8 + i64.eq + if + get_local $px + i64.load + get_local $py + i64.load + i64.eq + return + else + i32.const 0 + return + end + else + i32.const 0 + return + end + else + i32.const 0 + return + end + i32.const 0 + return + ) + (func $Fr_int_gt (type $_sig_i32i32ri32) + (param $px i32) + (param $py i32) + (result i32) + get_local $px + i64.load offset=24 + get_local $py + i64.load offset=24 + i64.lt_u + if + i32.const 0 + return + else + get_local $px + i64.load offset=24 + get_local $py + i64.load offset=24 + i64.gt_u + if + i32.const 1 + return + else + get_local $px + i64.load offset=16 + get_local $py + i64.load offset=16 + i64.lt_u + if + i32.const 0 + return + else + get_local $px + i64.load offset=16 + get_local $py + i64.load offset=16 + i64.gt_u + if + i32.const 1 + return + else + get_local $px + i64.load offset=8 + get_local $py + i64.load offset=8 + i64.lt_u + if + i32.const 0 + return + else + get_local $px + i64.load offset=8 + get_local $py + i64.load offset=8 + i64.gt_u + if + i32.const 1 + return + else + get_local $px + i64.load + get_local $py + i64.load + i64.gt_u + return + end + end + end + end + end + end + i32.const 0 + return + ) + (func $Fr_int_gte (type $_sig_i32i32ri32) + (param $px i32) + (param $py i32) + (result i32) + get_local $px + i64.load offset=24 + get_local $py + i64.load offset=24 + i64.lt_u + if + i32.const 0 + return + else + get_local $px + i64.load offset=24 + get_local $py + i64.load offset=24 + i64.gt_u + if + i32.const 1 + return + else + get_local $px + i64.load offset=16 + get_local $py + i64.load offset=16 + i64.lt_u + if + i32.const 0 + return + else + get_local $px + i64.load offset=16 + get_local $py + i64.load offset=16 + i64.gt_u + if + i32.const 1 + return + else + get_local $px + i64.load offset=8 + get_local $py + i64.load offset=8 + i64.lt_u + if + i32.const 0 + return + else + get_local $px + i64.load offset=8 + get_local $py + i64.load offset=8 + i64.gt_u + if + i32.const 1 + return + else + get_local $px + i64.load + get_local $py + i64.load + i64.ge_u + return + end + end + end + end + end + end + i32.const 0 + return + ) + (func $Fr_int_add (type $_sig_i32i32i32ri32) + (param $x i32) + (param $y i32) + (param $r i32) + (result i32) + (local $c i64) + get_local $x + i64.load32_u + get_local $y + i64.load32_u + i64.add + set_local $c + get_local $r + get_local $c + i64.store32 + get_local $x + i64.load32_u offset=4 + get_local $y + i64.load32_u offset=4 + i64.add + get_local $c + i64.const 32 + i64.shr_u + i64.add + set_local $c + get_local $r + get_local $c + i64.store32 offset=4 + get_local $x + i64.load32_u offset=8 + get_local $y + i64.load32_u offset=8 + i64.add + get_local $c + i64.const 32 + i64.shr_u + i64.add + set_local $c + get_local $r + get_local $c + i64.store32 offset=8 + get_local $x + i64.load32_u offset=12 + get_local $y + i64.load32_u offset=12 + i64.add + get_local $c + i64.const 32 + i64.shr_u + i64.add + set_local $c + get_local $r + get_local $c + i64.store32 offset=12 + get_local $x + i64.load32_u offset=16 + get_local $y + i64.load32_u offset=16 + i64.add + get_local $c + i64.const 32 + i64.shr_u + i64.add + set_local $c + get_local $r + get_local $c + i64.store32 offset=16 + get_local $x + i64.load32_u offset=20 + get_local $y + i64.load32_u offset=20 + i64.add + get_local $c + i64.const 32 + i64.shr_u + i64.add + set_local $c + get_local $r + get_local $c + i64.store32 offset=20 + get_local $x + i64.load32_u offset=24 + get_local $y + i64.load32_u offset=24 + i64.add + get_local $c + i64.const 32 + i64.shr_u + i64.add + set_local $c + get_local $r + get_local $c + i64.store32 offset=24 + get_local $x + i64.load32_u offset=28 + get_local $y + i64.load32_u offset=28 + i64.add + get_local $c + i64.const 32 + i64.shr_u + i64.add + set_local $c + get_local $r + get_local $c + i64.store32 offset=28 + get_local $c + i64.const 32 + i64.shr_u + i32.wrap/i64 + ) + (func $Fr_int_sub (type $_sig_i32i32i32ri32) + (param $x i32) + (param $y i32) + (param $r i32) + (result i32) + (local $c i64) + get_local $x + i64.load32_u + get_local $y + i64.load32_u + i64.sub + set_local $c + get_local $r + get_local $c + i64.const 0xFFFFFFFF + i64.and + i64.store32 + get_local $x + i64.load32_u offset=4 + get_local $y + i64.load32_u offset=4 + i64.sub + get_local $c + i64.const 32 + i64.shr_s + i64.add + set_local $c + get_local $r + get_local $c + i64.const 0xFFFFFFFF + i64.and + i64.store32 offset=4 + get_local $x + i64.load32_u offset=8 + get_local $y + i64.load32_u offset=8 + i64.sub + get_local $c + i64.const 32 + i64.shr_s + i64.add + set_local $c + get_local $r + get_local $c + i64.const 0xFFFFFFFF + i64.and + i64.store32 offset=8 + get_local $x + i64.load32_u offset=12 + get_local $y + i64.load32_u offset=12 + i64.sub + get_local $c + i64.const 32 + i64.shr_s + i64.add + set_local $c + get_local $r + get_local $c + i64.const 0xFFFFFFFF + i64.and + i64.store32 offset=12 + get_local $x + i64.load32_u offset=16 + get_local $y + i64.load32_u offset=16 + i64.sub + get_local $c + i64.const 32 + i64.shr_s + i64.add + set_local $c + get_local $r + get_local $c + i64.const 0xFFFFFFFF + i64.and + i64.store32 offset=16 + get_local $x + i64.load32_u offset=20 + get_local $y + i64.load32_u offset=20 + i64.sub + get_local $c + i64.const 32 + i64.shr_s + i64.add + set_local $c + get_local $r + get_local $c + i64.const 0xFFFFFFFF + i64.and + i64.store32 offset=20 + get_local $x + i64.load32_u offset=24 + get_local $y + i64.load32_u offset=24 + i64.sub + get_local $c + i64.const 32 + i64.shr_s + i64.add + set_local $c + get_local $r + get_local $c + i64.const 0xFFFFFFFF + i64.and + i64.store32 offset=24 + get_local $x + i64.load32_u offset=28 + get_local $y + i64.load32_u offset=28 + i64.sub + get_local $c + i64.const 32 + i64.shr_s + i64.add + set_local $c + get_local $r + get_local $c + i64.const 0xFFFFFFFF + i64.and + i64.store32 offset=28 + get_local $c + i64.const 32 + i64.shr_s + i32.wrap/i64 + ) + (func $Fr_int_mul (type $_sig_i32i32i32) + (param $x i32) + (param $y i32) + (param $r i32) + (local $c0 i64) + (local $c1 i64) + (local $x0 i64) + (local $y0 i64) + (local $x1 i64) + (local $y1 i64) + (local $x2 i64) + (local $y2 i64) + (local $x3 i64) + (local $y3 i64) + (local $x4 i64) + (local $y4 i64) + (local $x5 i64) + (local $y5 i64) + (local $x6 i64) + (local $y6 i64) + (local $x7 i64) + (local $y7 i64) + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x + i64.load32_u + tee_local $x0 + get_local $y + i64.load32_u + tee_local $y0 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 + get_local $c1 + i64.const 32 + i64.shr_u + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $y + i64.load32_u offset=4 + tee_local $y1 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x + i64.load32_u offset=4 + tee_local $x1 + get_local $y0 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $r + get_local $c1 + i64.store32 offset=4 + get_local $c0 + i64.const 32 + i64.shr_u + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $y + i64.load32_u offset=8 + tee_local $y2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $y1 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x + i64.load32_u offset=8 + tee_local $x2 + get_local $y0 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=8 + get_local $c1 + i64.const 32 + i64.shr_u + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $y + i64.load32_u offset=12 + tee_local $y3 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $y2 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $y1 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x + i64.load32_u offset=12 + tee_local $x3 + get_local $y0 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $r + get_local $c1 + i64.store32 offset=12 + get_local $c0 + i64.const 32 + i64.shr_u + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $y + i64.load32_u offset=16 + tee_local $y4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $y3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $y2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x3 + get_local $y1 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x + i64.load32_u offset=16 + tee_local $x4 + get_local $y0 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=16 + get_local $c1 + i64.const 32 + i64.shr_u + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $y + i64.load32_u offset=20 + tee_local $y5 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $y4 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $y3 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x3 + get_local $y2 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x4 + get_local $y1 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x + i64.load32_u offset=20 + tee_local $x5 + get_local $y0 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $r + get_local $c1 + i64.store32 offset=20 + get_local $c0 + i64.const 32 + i64.shr_u + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $y + i64.load32_u offset=24 + tee_local $y6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $y5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $y4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x3 + get_local $y3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x4 + get_local $y2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x5 + get_local $y1 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x + i64.load32_u offset=24 + tee_local $x6 + get_local $y0 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=24 + get_local $c1 + i64.const 32 + i64.shr_u + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $y + i64.load32_u offset=28 + tee_local $y7 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $y6 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $y5 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x3 + get_local $y4 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x4 + get_local $y3 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x5 + get_local $y2 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x6 + get_local $y1 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x + i64.load32_u offset=28 + tee_local $x7 + get_local $y0 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $r + get_local $c1 + i64.store32 offset=28 + get_local $c0 + i64.const 32 + i64.shr_u + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $y7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $y6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x3 + get_local $y5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x4 + get_local $y4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x5 + get_local $y3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x6 + get_local $y2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x7 + get_local $y1 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=32 + get_local $c1 + i64.const 32 + i64.shr_u + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $y7 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x3 + get_local $y6 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x4 + get_local $y5 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x5 + get_local $y4 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x6 + get_local $y3 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x7 + get_local $y2 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $r + get_local $c1 + i64.store32 offset=36 + get_local $c0 + i64.const 32 + i64.shr_u + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x3 + get_local $y7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x4 + get_local $y6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x5 + get_local $y5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x6 + get_local $y4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x7 + get_local $y3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=40 + get_local $c1 + i64.const 32 + i64.shr_u + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x4 + get_local $y7 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x5 + get_local $y6 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x6 + get_local $y5 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x7 + get_local $y4 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $r + get_local $c1 + i64.store32 offset=44 + get_local $c0 + i64.const 32 + i64.shr_u + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x5 + get_local $y7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x6 + get_local $y6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x7 + get_local $y5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=48 + get_local $c1 + i64.const 32 + i64.shr_u + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x6 + get_local $y7 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x7 + get_local $y6 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $r + get_local $c1 + i64.store32 offset=52 + get_local $c0 + i64.const 32 + i64.shr_u + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x7 + get_local $y7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=56 + get_local $c1 + i64.const 32 + i64.shr_u + set_local $c0 + get_local $r + get_local $c1 + i64.store32 offset=60 + ) + (func $Fr_int_square (type $_sig_i32i32) + (param $x i32) + (param $r i32) + (local $c0 i64) + (local $c1 i64) + (local $c0_old i64) + (local $c1_old i64) + (local $x0 i64) + (local $x1 i64) + (local $x2 i64) + (local $x3 i64) + (local $x4 i64) + (local $x5 i64) + (local $x6 i64) + (local $x7 i64) + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x + i64.load32_u + tee_local $x0 + get_local $x0 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $x + i64.load32_u offset=4 + tee_local $x1 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=4 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $x + i64.load32_u offset=8 + tee_local $x2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $x1 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=8 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $x + i64.load32_u offset=12 + tee_local $x3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $x2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=12 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $x + i64.load32_u offset=16 + tee_local $x4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $x3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $x2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=16 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $x + i64.load32_u offset=20 + tee_local $x5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $x4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $x3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=20 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $x + i64.load32_u offset=24 + tee_local $x6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $x5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $x4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x3 + get_local $x3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=24 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $x + i64.load32_u offset=28 + tee_local $x7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $x6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $x5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x3 + get_local $x4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=28 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $x7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $x6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x3 + get_local $x5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x4 + get_local $x4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=32 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $x7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x3 + get_local $x6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x4 + get_local $x5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=36 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x3 + get_local $x7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x4 + get_local $x6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x5 + get_local $x5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=40 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x4 + get_local $x7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x5 + get_local $x6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=44 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x5 + get_local $x7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x6 + get_local $x6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=48 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x6 + get_local $x7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=52 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x7 + get_local $x7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=56 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + get_local $r + get_local $c0_old + i64.store32 offset=60 + ) + (func $Fr_int_squareOld (type $_sig_i32i32) + (param $x i32) + (param $r i32) + get_local $x + get_local $x + get_local $r + call $Fr_int_mul + ) + (func $Fr_int__mul1 (type $_sig_i32i64i32) + (param $px i32) + (param $y i64) + (param $pr i32) + (local $c i64) + get_local $px + i64.load32_u align=1 + get_local $y + i64.mul + set_local $c + get_local $pr + get_local $c + i64.store32 align=1 + get_local $px + i64.load32_u offset=4 align=1 + get_local $y + i64.mul + get_local $c + i64.const 32 + i64.shr_u + i64.add + set_local $c + get_local $pr + get_local $c + i64.store32 offset=4 align=1 + get_local $px + i64.load32_u offset=8 align=1 + get_local $y + i64.mul + get_local $c + i64.const 32 + i64.shr_u + i64.add + set_local $c + get_local $pr + get_local $c + i64.store32 offset=8 align=1 + get_local $px + i64.load32_u offset=12 align=1 + get_local $y + i64.mul + get_local $c + i64.const 32 + i64.shr_u + i64.add + set_local $c + get_local $pr + get_local $c + i64.store32 offset=12 align=1 + get_local $px + i64.load32_u offset=16 align=1 + get_local $y + i64.mul + get_local $c + i64.const 32 + i64.shr_u + i64.add + set_local $c + get_local $pr + get_local $c + i64.store32 offset=16 align=1 + get_local $px + i64.load32_u offset=20 align=1 + get_local $y + i64.mul + get_local $c + i64.const 32 + i64.shr_u + i64.add + set_local $c + get_local $pr + get_local $c + i64.store32 offset=20 align=1 + get_local $px + i64.load32_u offset=24 align=1 + get_local $y + i64.mul + get_local $c + i64.const 32 + i64.shr_u + i64.add + set_local $c + get_local $pr + get_local $c + i64.store32 offset=24 align=1 + get_local $px + i64.load32_u offset=28 align=1 + get_local $y + i64.mul + get_local $c + i64.const 32 + i64.shr_u + i64.add + set_local $c + get_local $pr + get_local $c + i64.store32 offset=28 align=1 + ) + (func $Fr_int__add1 (type $_sig_i32i64) + (param $x i32) + (param $y i64) + (local $c i64) + (local $px i32) + get_local $x + set_local $px + get_local $px + i64.load32_u align=1 + get_local $y + i64.add + set_local $c + get_local $px + get_local $c + i64.store32 align=1 + get_local $c + i64.const 32 + i64.shr_u + set_local $c + block + loop + get_local $c + i64.eqz + br_if 1 + get_local $px + i32.const 4 + i32.add + set_local $px + get_local $px + i64.load32_u align=1 + get_local $c + i64.add + set_local $c + get_local $px + get_local $c + i64.store32 align=1 + get_local $c + i64.const 32 + i64.shr_u + set_local $c + br 0 + end + end + ) + (func $Fr_int_div (type $_sig_i32i32i32i32) + (param $x i32) + (param $y i32) + (param $c i32) + (param $r i32) + (local $rr i32) + (local $cc i32) + (local $eX i32) + (local $eY i32) + (local $sy i64) + (local $sx i64) + (local $ec i32) + get_local $c + if + get_local $c + set_local $cc + else + i32.const 192 + set_local $cc + end + get_local $r + if + get_local $r + set_local $rr + else + i32.const 224 + set_local $rr + end + get_local $x + get_local $rr + call $Fr_int_copy + get_local $y + i32.const 160 + call $Fr_int_copy + get_local $cc + call $Fr_int_zero + i32.const 256 + call $Fr_int_zero + i32.const 31 + set_local $eX + i32.const 31 + set_local $eY + block + loop + i32.const 160 + get_local $eY + i32.add + i32.load8_u + get_local $eY + i32.const 3 + i32.eq + i32.or + br_if 1 + get_local $eY + i32.const 1 + i32.sub + set_local $eY + br 0 + end + end + i32.const 160 + get_local $eY + i32.add + i32.const 3 + i32.sub + i64.load32_u align=1 + i64.const 1 + i64.add + set_local $sy + get_local $sy + i64.const 1 + i64.eq + if + i64.const 0 + i64.const 0 + i64.div_u + drop + end + block + loop + block + loop + get_local $rr + get_local $eX + i32.add + i32.load8_u + get_local $eX + i32.const 7 + i32.eq + i32.or + br_if 1 + get_local $eX + i32.const 1 + i32.sub + set_local $eX + br 0 + end + end + get_local $rr + get_local $eX + i32.add + i32.const 7 + i32.sub + i64.load align=1 + set_local $sx + get_local $sx + get_local $sy + i64.div_u + set_local $sx + get_local $eX + get_local $eY + i32.sub + i32.const 4 + i32.sub + set_local $ec + block + loop + get_local $sx + i64.const 0xFFFFFFFF00000000 + i64.and + i64.eqz + get_local $ec + i32.const 0 + i32.ge_s + i32.and + br_if 1 + get_local $sx + i64.const 8 + i64.shr_u + set_local $sx + get_local $ec + i32.const 1 + i32.add + set_local $ec + br 0 + end + end + get_local $sx + i64.eqz + if + get_local $rr + i32.const 160 + call $Fr_int_gte + i32.eqz + br_if 2 + i64.const 1 + set_local $sx + i32.const 0 + set_local $ec + end + i32.const 160 + get_local $sx + i32.const 288 + call $Fr_int__mul1 + get_local $rr + i32.const 288 + get_local $ec + i32.sub + get_local $rr + call $Fr_int_sub + drop + get_local $cc + get_local $ec + i32.add + get_local $sx + call $Fr_int__add1 + br 0 + end + end + ) + (func $Fr_int_inverseMod (type $_sig_i32i32i32) + (param $px i32) + (param $pm i32) + (param $pr i32) + (local $t i32) + (local $newt i32) + (local $r i32) + (local $qq i32) + (local $qr i32) + (local $newr i32) + (local $swp i32) + (local $x i32) + (local $signt i32) + (local $signnewt i32) + (local $signx i32) + i32.const 320 + set_local $t + i32.const 320 + call $Fr_int_zero + i32.const 0 + set_local $signt + i32.const 352 + set_local $r + get_local $pm + i32.const 352 + call $Fr_int_copy + i32.const 384 + set_local $newt + i32.const 384 + call $Fr_int_one + i32.const 0 + set_local $signnewt + i32.const 416 + set_local $newr + get_local $px + i32.const 416 + call $Fr_int_copy + i32.const 448 + set_local $qq + i32.const 480 + set_local $qr + i32.const 576 + set_local $x + block + loop + get_local $newr + call $Fr_int_isZero + br_if 1 + get_local $r + get_local $newr + get_local $qq + get_local $qr + call $Fr_int_div + get_local $qq + get_local $newt + i32.const 512 + call $Fr_int_mul + get_local $signt + if + get_local $signnewt + if + i32.const 512 + get_local $t + call $Fr_int_gte + if + i32.const 512 + get_local $t + get_local $x + call $Fr_int_sub + drop + i32.const 0 + set_local $signx + else + get_local $t + i32.const 512 + get_local $x + call $Fr_int_sub + drop + i32.const 1 + set_local $signx + end + else + i32.const 512 + get_local $t + get_local $x + call $Fr_int_add + drop + i32.const 1 + set_local $signx + end + else + get_local $signnewt + if + i32.const 512 + get_local $t + get_local $x + call $Fr_int_add + drop + i32.const 0 + set_local $signx + else + get_local $t + i32.const 512 + call $Fr_int_gte + if + get_local $t + i32.const 512 + get_local $x + call $Fr_int_sub + drop + i32.const 0 + set_local $signx + else + i32.const 512 + get_local $t + get_local $x + call $Fr_int_sub + drop + i32.const 1 + set_local $signx + end + end + end + get_local $t + set_local $swp + get_local $newt + set_local $t + get_local $x + set_local $newt + get_local $swp + set_local $x + get_local $signnewt + set_local $signt + get_local $signx + set_local $signnewt + get_local $r + set_local $swp + get_local $newr + set_local $r + get_local $qr + set_local $newr + get_local $swp + set_local $qr + br 0 + end + end + get_local $signt + if + get_local $pm + get_local $t + get_local $pr + call $Fr_int_sub + drop + else + get_local $t + get_local $pr + call $Fr_int_copy + end + ) + (func $Fr_F1m_add (type $_sig_i32i32i32) + (param $x i32) + (param $y i32) + (param $r i32) + get_local $x + get_local $y + get_local $r + call $Fr_int_add + if + get_local $r + i32.const 608 + get_local $r + call $Fr_int_sub + drop + else + get_local $r + i32.const 608 + call $Fr_int_gte + if + get_local $r + i32.const 608 + get_local $r + call $Fr_int_sub + drop + end + end + ) + (func $Fr_F1m_sub (type $_sig_i32i32i32) + (param $x i32) + (param $y i32) + (param $r i32) + get_local $x + get_local $y + get_local $r + call $Fr_int_sub + if + get_local $r + i32.const 608 + get_local $r + call $Fr_int_add + drop + end + ) + (func $Fr_F1m_neg (type $_sig_i32i32) + (param $x i32) + (param $r i32) + i32.const 768 + get_local $x + get_local $r + call $Fr_F1m_sub + ) + (func $Fr_F1m_mReduct (type $_sig_i32i32) + (param $t i32) + (param $r i32) + (local $np32 i64) + (local $c i64) + (local $m i64) + i64.const 4294967295 + set_local $np32 + i64.const 0 + set_local $c + get_local $t + i64.load32_u + get_local $np32 + i64.mul + i64.const 0xFFFFFFFF + i64.and + set_local $m + get_local $t + i64.load32_u + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 + get_local $t + i64.load32_u offset=4 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=4 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=4 + get_local $t + i64.load32_u offset=8 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=8 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=8 + get_local $t + i64.load32_u offset=12 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=12 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=12 + get_local $t + i64.load32_u offset=16 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=16 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=16 + get_local $t + i64.load32_u offset=20 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=20 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=20 + get_local $t + i64.load32_u offset=24 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=24 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=24 + get_local $t + i64.load32_u offset=28 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=28 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=28 + i32.const 992 + get_local $c + i64.const 32 + i64.shr_u + i64.store32 + i64.const 0 + set_local $c + get_local $t + i64.load32_u offset=4 + get_local $np32 + i64.mul + i64.const 0xFFFFFFFF + i64.and + set_local $m + get_local $t + i64.load32_u offset=4 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=4 + get_local $t + i64.load32_u offset=8 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=4 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=8 + get_local $t + i64.load32_u offset=12 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=8 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=12 + get_local $t + i64.load32_u offset=16 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=12 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=16 + get_local $t + i64.load32_u offset=20 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=16 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=20 + get_local $t + i64.load32_u offset=24 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=20 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=24 + get_local $t + i64.load32_u offset=28 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=24 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=28 + get_local $t + i64.load32_u offset=32 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=28 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=32 + i32.const 992 + get_local $c + i64.const 32 + i64.shr_u + i64.store32 offset=4 + i64.const 0 + set_local $c + get_local $t + i64.load32_u offset=8 + get_local $np32 + i64.mul + i64.const 0xFFFFFFFF + i64.and + set_local $m + get_local $t + i64.load32_u offset=8 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=8 + get_local $t + i64.load32_u offset=12 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=4 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=12 + get_local $t + i64.load32_u offset=16 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=8 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=16 + get_local $t + i64.load32_u offset=20 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=12 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=20 + get_local $t + i64.load32_u offset=24 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=16 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=24 + get_local $t + i64.load32_u offset=28 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=20 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=28 + get_local $t + i64.load32_u offset=32 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=24 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=32 + get_local $t + i64.load32_u offset=36 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=28 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=36 + i32.const 992 + get_local $c + i64.const 32 + i64.shr_u + i64.store32 offset=8 + i64.const 0 + set_local $c + get_local $t + i64.load32_u offset=12 + get_local $np32 + i64.mul + i64.const 0xFFFFFFFF + i64.and + set_local $m + get_local $t + i64.load32_u offset=12 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=12 + get_local $t + i64.load32_u offset=16 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=4 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=16 + get_local $t + i64.load32_u offset=20 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=8 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=20 + get_local $t + i64.load32_u offset=24 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=12 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=24 + get_local $t + i64.load32_u offset=28 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=16 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=28 + get_local $t + i64.load32_u offset=32 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=20 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=32 + get_local $t + i64.load32_u offset=36 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=24 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=36 + get_local $t + i64.load32_u offset=40 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=28 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=40 + i32.const 992 + get_local $c + i64.const 32 + i64.shr_u + i64.store32 offset=12 + i64.const 0 + set_local $c + get_local $t + i64.load32_u offset=16 + get_local $np32 + i64.mul + i64.const 0xFFFFFFFF + i64.and + set_local $m + get_local $t + i64.load32_u offset=16 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=16 + get_local $t + i64.load32_u offset=20 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=4 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=20 + get_local $t + i64.load32_u offset=24 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=8 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=24 + get_local $t + i64.load32_u offset=28 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=12 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=28 + get_local $t + i64.load32_u offset=32 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=16 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=32 + get_local $t + i64.load32_u offset=36 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=20 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=36 + get_local $t + i64.load32_u offset=40 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=24 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=40 + get_local $t + i64.load32_u offset=44 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=28 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=44 + i32.const 992 + get_local $c + i64.const 32 + i64.shr_u + i64.store32 offset=16 + i64.const 0 + set_local $c + get_local $t + i64.load32_u offset=20 + get_local $np32 + i64.mul + i64.const 0xFFFFFFFF + i64.and + set_local $m + get_local $t + i64.load32_u offset=20 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=20 + get_local $t + i64.load32_u offset=24 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=4 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=24 + get_local $t + i64.load32_u offset=28 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=8 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=28 + get_local $t + i64.load32_u offset=32 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=12 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=32 + get_local $t + i64.load32_u offset=36 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=16 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=36 + get_local $t + i64.load32_u offset=40 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=20 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=40 + get_local $t + i64.load32_u offset=44 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=24 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=44 + get_local $t + i64.load32_u offset=48 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=28 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=48 + i32.const 992 + get_local $c + i64.const 32 + i64.shr_u + i64.store32 offset=20 + i64.const 0 + set_local $c + get_local $t + i64.load32_u offset=24 + get_local $np32 + i64.mul + i64.const 0xFFFFFFFF + i64.and + set_local $m + get_local $t + i64.load32_u offset=24 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=24 + get_local $t + i64.load32_u offset=28 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=4 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=28 + get_local $t + i64.load32_u offset=32 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=8 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=32 + get_local $t + i64.load32_u offset=36 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=12 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=36 + get_local $t + i64.load32_u offset=40 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=16 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=40 + get_local $t + i64.load32_u offset=44 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=20 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=44 + get_local $t + i64.load32_u offset=48 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=24 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=48 + get_local $t + i64.load32_u offset=52 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=28 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=52 + i32.const 992 + get_local $c + i64.const 32 + i64.shr_u + i64.store32 offset=24 + i64.const 0 + set_local $c + get_local $t + i64.load32_u offset=28 + get_local $np32 + i64.mul + i64.const 0xFFFFFFFF + i64.and + set_local $m + get_local $t + i64.load32_u offset=28 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=28 + get_local $t + i64.load32_u offset=32 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=4 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=32 + get_local $t + i64.load32_u offset=36 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=8 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=36 + get_local $t + i64.load32_u offset=40 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=12 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=40 + get_local $t + i64.load32_u offset=44 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=16 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=44 + get_local $t + i64.load32_u offset=48 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=20 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=48 + get_local $t + i64.load32_u offset=52 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=24 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=52 + get_local $t + i64.load32_u offset=56 + get_local $c + i64.const 32 + i64.shr_u + i64.add + i32.const 608 + i64.load32_u offset=28 + get_local $m + i64.mul + i64.add + set_local $c + get_local $t + get_local $c + i64.store32 offset=56 + i32.const 992 + get_local $c + i64.const 32 + i64.shr_u + i64.store32 offset=28 + i32.const 992 + get_local $t + i32.const 32 + i32.add + get_local $r + call $Fr_F1m_add + ) + (func $Fr_F1m_mul (type $_sig_i32i32i32) + (param $x i32) + (param $y i32) + (param $r i32) + (local $c0 i64) + (local $c1 i64) + (local $np32 i64) + (local $x0 i64) + (local $y0 i64) + (local $m0 i64) + (local $q0 i64) + (local $x1 i64) + (local $y1 i64) + (local $m1 i64) + (local $q1 i64) + (local $x2 i64) + (local $y2 i64) + (local $m2 i64) + (local $q2 i64) + (local $x3 i64) + (local $y3 i64) + (local $m3 i64) + (local $q3 i64) + (local $x4 i64) + (local $y4 i64) + (local $m4 i64) + (local $q4 i64) + (local $x5 i64) + (local $y5 i64) + (local $m5 i64) + (local $q5 i64) + (local $x6 i64) + (local $y6 i64) + (local $m6 i64) + (local $q6 i64) + (local $x7 i64) + (local $y7 i64) + (local $m7 i64) + (local $q7 i64) + i64.const 4294967295 + set_local $np32 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x + i64.load32_u + tee_local $x0 + get_local $y + i64.load32_u + tee_local $y0 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $np32 + i64.mul + i64.const 0xFFFFFFFF + i64.and + set_local $m0 + get_local $c0 + i64.const 4294967295 + i64.and + i32.const 0 + i64.load32_u offset=608 + tee_local $q0 + get_local $m0 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c1 + i64.const 32 + i64.shr_u + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $y + i64.load32_u offset=4 + tee_local $y1 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x + i64.load32_u offset=4 + tee_local $x1 + get_local $y0 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + i32.const 0 + i64.load32_u offset=612 + tee_local $q1 + get_local $m0 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $np32 + i64.mul + i64.const 0xFFFFFFFF + i64.and + set_local $m1 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q0 + get_local $m1 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c0 + i64.const 32 + i64.shr_u + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $y + i64.load32_u offset=8 + tee_local $y2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $y1 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x + i64.load32_u offset=8 + tee_local $x2 + get_local $y0 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q1 + get_local $m1 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i32.const 0 + i64.load32_u offset=616 + tee_local $q2 + get_local $m0 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $np32 + i64.mul + i64.const 0xFFFFFFFF + i64.and + set_local $m2 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q0 + get_local $m2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c1 + i64.const 32 + i64.shr_u + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $y + i64.load32_u offset=12 + tee_local $y3 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $y2 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $y1 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x + i64.load32_u offset=12 + tee_local $x3 + get_local $y0 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q1 + get_local $m2 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q2 + get_local $m1 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + i32.const 0 + i64.load32_u offset=620 + tee_local $q3 + get_local $m0 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $np32 + i64.mul + i64.const 0xFFFFFFFF + i64.and + set_local $m3 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q0 + get_local $m3 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c0 + i64.const 32 + i64.shr_u + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $y + i64.load32_u offset=16 + tee_local $y4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $y3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $y2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x3 + get_local $y1 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x + i64.load32_u offset=16 + tee_local $x4 + get_local $y0 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q1 + get_local $m3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q2 + get_local $m2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q3 + get_local $m1 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i32.const 0 + i64.load32_u offset=624 + tee_local $q4 + get_local $m0 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $np32 + i64.mul + i64.const 0xFFFFFFFF + i64.and + set_local $m4 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q0 + get_local $m4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c1 + i64.const 32 + i64.shr_u + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $y + i64.load32_u offset=20 + tee_local $y5 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $y4 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $y3 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x3 + get_local $y2 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x4 + get_local $y1 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x + i64.load32_u offset=20 + tee_local $x5 + get_local $y0 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q1 + get_local $m4 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q2 + get_local $m3 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q3 + get_local $m2 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q4 + get_local $m1 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + i32.const 0 + i64.load32_u offset=628 + tee_local $q5 + get_local $m0 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $np32 + i64.mul + i64.const 0xFFFFFFFF + i64.and + set_local $m5 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q0 + get_local $m5 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c0 + i64.const 32 + i64.shr_u + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $y + i64.load32_u offset=24 + tee_local $y6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $y5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $y4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x3 + get_local $y3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x4 + get_local $y2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x5 + get_local $y1 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x + i64.load32_u offset=24 + tee_local $x6 + get_local $y0 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q1 + get_local $m5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q2 + get_local $m4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q3 + get_local $m3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q4 + get_local $m2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q5 + get_local $m1 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i32.const 0 + i64.load32_u offset=632 + tee_local $q6 + get_local $m0 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $np32 + i64.mul + i64.const 0xFFFFFFFF + i64.and + set_local $m6 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q0 + get_local $m6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c1 + i64.const 32 + i64.shr_u + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $y + i64.load32_u offset=28 + tee_local $y7 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $y6 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $y5 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x3 + get_local $y4 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x4 + get_local $y3 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x5 + get_local $y2 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x6 + get_local $y1 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x + i64.load32_u offset=28 + tee_local $x7 + get_local $y0 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q1 + get_local $m6 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q2 + get_local $m5 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q3 + get_local $m4 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q4 + get_local $m3 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q5 + get_local $m2 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q6 + get_local $m1 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + i32.const 0 + i64.load32_u offset=636 + tee_local $q7 + get_local $m0 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $np32 + i64.mul + i64.const 0xFFFFFFFF + i64.and + set_local $m7 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q0 + get_local $m7 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c0 + i64.const 32 + i64.shr_u + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $y7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $y6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x3 + get_local $y5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x4 + get_local $y4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x5 + get_local $y3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x6 + get_local $y2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x7 + get_local $y1 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q1 + get_local $m7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q2 + get_local $m6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q3 + get_local $m5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q4 + get_local $m4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q5 + get_local $m3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q6 + get_local $m2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q7 + get_local $m1 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 + get_local $c1 + i64.const 32 + i64.shr_u + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $y7 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x3 + get_local $y6 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x4 + get_local $y5 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x5 + get_local $y4 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x6 + get_local $y3 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x7 + get_local $y2 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q2 + get_local $m7 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q3 + get_local $m6 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q4 + get_local $m5 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q5 + get_local $m4 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q6 + get_local $m3 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q7 + get_local $m2 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $r + get_local $c1 + i64.store32 offset=4 + get_local $c0 + i64.const 32 + i64.shr_u + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x3 + get_local $y7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x4 + get_local $y6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x5 + get_local $y5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x6 + get_local $y4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x7 + get_local $y3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q3 + get_local $m7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q4 + get_local $m6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q5 + get_local $m5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q6 + get_local $m4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q7 + get_local $m3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=8 + get_local $c1 + i64.const 32 + i64.shr_u + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x4 + get_local $y7 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x5 + get_local $y6 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x6 + get_local $y5 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x7 + get_local $y4 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q4 + get_local $m7 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q5 + get_local $m6 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q6 + get_local $m5 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q7 + get_local $m4 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $r + get_local $c1 + i64.store32 offset=12 + get_local $c0 + i64.const 32 + i64.shr_u + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x5 + get_local $y7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x6 + get_local $y6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x7 + get_local $y5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q5 + get_local $m7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q6 + get_local $m6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q7 + get_local $m5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=16 + get_local $c1 + i64.const 32 + i64.shr_u + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x6 + get_local $y7 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $x7 + get_local $y6 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q6 + get_local $m7 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $c1 + i64.const 4294967295 + i64.and + get_local $q7 + get_local $m6 + i64.mul + i64.add + set_local $c1 + get_local $c0 + get_local $c1 + i64.const 32 + i64.shr_u + i64.add + set_local $c0 + get_local $r + get_local $c1 + i64.store32 offset=20 + get_local $c0 + i64.const 32 + i64.shr_u + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x7 + get_local $y7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q7 + get_local $m7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=24 + get_local $c1 + i64.const 32 + i64.shr_u + set_local $c0 + get_local $r + get_local $c1 + i64.store32 offset=28 + get_local $c0 + i32.wrap/i64 + if + get_local $r + i32.const 608 + get_local $r + call $Fr_int_sub + drop + else + get_local $r + i32.const 608 + call $Fr_int_gte + if + get_local $r + i32.const 608 + get_local $r + call $Fr_int_sub + drop + end + end + ) + (func $Fr_F1m_square (type $_sig_i32i32) + (param $x i32) + (param $r i32) + (local $c0 i64) + (local $c1 i64) + (local $c0_old i64) + (local $c1_old i64) + (local $np32 i64) + (local $x0 i64) + (local $m0 i64) + (local $q0 i64) + (local $x1 i64) + (local $m1 i64) + (local $q1 i64) + (local $x2 i64) + (local $m2 i64) + (local $q2 i64) + (local $x3 i64) + (local $m3 i64) + (local $q3 i64) + (local $x4 i64) + (local $m4 i64) + (local $q4 i64) + (local $x5 i64) + (local $m5 i64) + (local $q5 i64) + (local $x6 i64) + (local $m6 i64) + (local $q6 i64) + (local $x7 i64) + (local $m7 i64) + (local $q7 i64) + i64.const 4294967295 + set_local $np32 + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x + i64.load32_u + tee_local $x0 + get_local $x0 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $np32 + i64.mul + i64.const 0xFFFFFFFF + i64.and + set_local $m0 + get_local $c0 + i64.const 4294967295 + i64.and + i32.const 0 + i64.load32_u offset=608 + tee_local $q0 + get_local $m0 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $x + i64.load32_u offset=4 + tee_local $x1 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i32.const 0 + i64.load32_u offset=612 + tee_local $q1 + get_local $m0 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $np32 + i64.mul + i64.const 0xFFFFFFFF + i64.and + set_local $m1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q0 + get_local $m1 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $x + i64.load32_u offset=8 + tee_local $x2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $x1 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q1 + get_local $m1 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i32.const 0 + i64.load32_u offset=616 + tee_local $q2 + get_local $m0 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $np32 + i64.mul + i64.const 0xFFFFFFFF + i64.and + set_local $m2 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q0 + get_local $m2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $x + i64.load32_u offset=12 + tee_local $x3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $x2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q1 + get_local $m2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q2 + get_local $m1 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i32.const 0 + i64.load32_u offset=620 + tee_local $q3 + get_local $m0 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $np32 + i64.mul + i64.const 0xFFFFFFFF + i64.and + set_local $m3 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q0 + get_local $m3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $x + i64.load32_u offset=16 + tee_local $x4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $x3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $x2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q1 + get_local $m3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q2 + get_local $m2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q3 + get_local $m1 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i32.const 0 + i64.load32_u offset=624 + tee_local $q4 + get_local $m0 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $np32 + i64.mul + i64.const 0xFFFFFFFF + i64.and + set_local $m4 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q0 + get_local $m4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $x + i64.load32_u offset=20 + tee_local $x5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $x4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $x3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q1 + get_local $m4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q2 + get_local $m3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q3 + get_local $m2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q4 + get_local $m1 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i32.const 0 + i64.load32_u offset=628 + tee_local $q5 + get_local $m0 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $np32 + i64.mul + i64.const 0xFFFFFFFF + i64.and + set_local $m5 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q0 + get_local $m5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $x + i64.load32_u offset=24 + tee_local $x6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $x5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $x4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x3 + get_local $x3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q1 + get_local $m5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q2 + get_local $m4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q3 + get_local $m3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q4 + get_local $m2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q5 + get_local $m1 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i32.const 0 + i64.load32_u offset=632 + tee_local $q6 + get_local $m0 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $np32 + i64.mul + i64.const 0xFFFFFFFF + i64.and + set_local $m6 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q0 + get_local $m6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x0 + get_local $x + i64.load32_u offset=28 + tee_local $x7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $x6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $x5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x3 + get_local $x4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q1 + get_local $m6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q2 + get_local $m5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q3 + get_local $m4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q4 + get_local $m3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q5 + get_local $m2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q6 + get_local $m1 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i32.const 0 + i64.load32_u offset=636 + tee_local $q7 + get_local $m0 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $np32 + i64.mul + i64.const 0xFFFFFFFF + i64.and + set_local $m7 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q0 + get_local $m7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x1 + get_local $x7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $x6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x3 + get_local $x5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x4 + get_local $x4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q1 + get_local $m7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q2 + get_local $m6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q3 + get_local $m5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q4 + get_local $m4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q5 + get_local $m3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q6 + get_local $m2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q7 + get_local $m1 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x2 + get_local $x7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x3 + get_local $x6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x4 + get_local $x5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q2 + get_local $m7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q3 + get_local $m6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q4 + get_local $m5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q5 + get_local $m4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q6 + get_local $m3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q7 + get_local $m2 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=4 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x3 + get_local $x7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x4 + get_local $x6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x5 + get_local $x5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q3 + get_local $m7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q4 + get_local $m6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q5 + get_local $m5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q6 + get_local $m4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q7 + get_local $m3 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=8 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x4 + get_local $x7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x5 + get_local $x6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q4 + get_local $m7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q5 + get_local $m6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q6 + get_local $m5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q7 + get_local $m4 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=12 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x5 + get_local $x7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x6 + get_local $x6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q5 + get_local $m7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q6 + get_local $m6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q7 + get_local $m5 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=16 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x6 + get_local $x7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q6 + get_local $m7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q7 + get_local $m6 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=20 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + i64.const 0 + set_local $c0 + i64.const 0 + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + i64.const 1 + i64.shl + set_local $c0 + get_local $c1 + i64.const 1 + i64.shl + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $x7 + get_local $x7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $c0_old + i64.const 4294967295 + i64.and + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + get_local $c1_old + i64.add + set_local $c1 + get_local $c0 + i64.const 4294967295 + i64.and + get_local $q7 + get_local $m7 + i64.mul + i64.add + set_local $c0 + get_local $c1 + get_local $c0 + i64.const 32 + i64.shr_u + i64.add + set_local $c1 + get_local $r + get_local $c0 + i64.store32 offset=24 + get_local $c1 + set_local $c0_old + get_local $c0_old + i64.const 32 + i64.shr_u + set_local $c1_old + get_local $r + get_local $c0_old + i64.store32 offset=28 + get_local $c1_old + i32.wrap/i64 + if + get_local $r + i32.const 608 + get_local $r + call $Fr_int_sub + drop + else + get_local $r + i32.const 608 + call $Fr_int_gte + if + get_local $r + i32.const 608 + get_local $r + call $Fr_int_sub + drop + end + end + ) + (func $Fr_F1m_squareOld (type $_sig_i32i32) + (param $x i32) + (param $r i32) + get_local $x + get_local $x + get_local $r + call $Fr_F1m_mul + ) + (func $Fr_F1m_toMontgomery (type $_sig_i32i32) + (param $x i32) + (param $r i32) + get_local $x + i32.const 672 + get_local $r + call $Fr_F1m_mul + ) + (func $Fr_F1m_fromMontgomery (type $_sig_i32i32) + (param $x i32) + (param $r i32) + get_local $x + i32.const 1504 + call $Fr_int_copy + i32.const 1536 + call $Fr_int_zero + i32.const 1504 + get_local $r + call $Fr_F1m_mReduct + ) + (func $Fr_F1m_isNegative (type $_sig_i32ri32) + (param $x i32) + (result i32) + get_local $x + i32.const 1568 + call $Fr_F1m_fromMontgomery + i32.const 1568 + i32.load + i32.const 1 + i32.and + ) + (func $Fr_F1m_inverse (type $_sig_i32i32) + (param $x i32) + (param $r i32) + get_local $x + get_local $r + call $Fr_F1m_fromMontgomery + get_local $r + i32.const 608 + get_local $r + call $Fr_int_inverseMod + get_local $r + get_local $r + call $Fr_F1m_toMontgomery + ) + (func $Fr_F1m_one (type $_sig_i32) + (param $pr i32) + i32.const 736 + get_local $pr + call $Fr_int_copy + ) + (func $Fr_F1m_load (type $_sig_i32i32i32) + (param $scalar i32) + (param $scalarLen i32) + (param $r i32) + (local $p i32) + (local $l i32) + (local $i i32) + (local $j i32) + get_local $r + call $Fr_int_zero + i32.const 32 + set_local $i + get_local $scalar + set_local $p + block + loop + get_local $i + get_local $scalarLen + i32.gt_u + br_if 1 + get_local $i + i32.const 32 + i32.eq + if + i32.const 1600 + call $Fr_F1m_one + else + i32.const 1600 + i32.const 672 + i32.const 1600 + call $Fr_F1m_mul + end + get_local $p + i32.const 1600 + i32.const 1632 + call $Fr_F1m_mul + get_local $r + i32.const 1632 + get_local $r + call $Fr_F1m_add + get_local $p + i32.const 32 + i32.add + set_local $p + get_local $i + i32.const 32 + i32.add + set_local $i + br 0 + end + end + get_local $scalarLen + i32.const 32 + i32.rem_u + set_local $l + get_local $l + i32.eqz + if + return + end + i32.const 1632 + call $Fr_int_zero + i32.const 0 + set_local $j + block + loop + get_local $j + get_local $l + i32.eq + br_if 1 + get_local $j + get_local $p + i32.load8_u + i32.store8 offset=1632 + get_local $p + i32.const 1 + i32.add + set_local $p + get_local $j + i32.const 1 + i32.add + set_local $j + br 0 + end + end + get_local $i + i32.const 32 + i32.eq + if + i32.const 1600 + call $Fr_F1m_one + else + i32.const 1600 + i32.const 672 + i32.const 1600 + call $Fr_F1m_mul + end + i32.const 1632 + i32.const 1600 + i32.const 1632 + call $Fr_F1m_mul + get_local $r + i32.const 1632 + get_local $r + call $Fr_F1m_add + ) + (func $Fr_F1m_timesScalar (type $_sig_i32i32i32i32) + (param $x i32) + (param $scalar i32) + (param $scalarLen i32) + (param $r i32) + get_local $scalar + get_local $scalarLen + i32.const 1664 + call $Fr_F1m_load + i32.const 1664 + i32.const 1664 + call $Fr_F1m_toMontgomery + get_local $x + i32.const 1664 + get_local $r + call $Fr_F1m_mul + ) + (func $Fr_F1m_exp (type $_sig_i32i32i32i32) + (param $base i32) + (param $scalar i32) + (param $scalarLength i32) + (param $r i32) + (local $i i32) + (local $b i32) + get_local $base + i32.const 1696 + call $Fr_int_copy + get_local $r + call $Fr_F1m_one + get_local $scalarLength + set_local $i + block + loop + get_local $i + i32.const 1 + i32.sub + set_local $i + get_local $scalar + get_local $i + i32.add + i32.load8_u + set_local $b + get_local $r + get_local $r + call $Fr_F1m_square + get_local $b + i32.const 128 + i32.ge_u + if + get_local $b + i32.const 128 + i32.sub + set_local $b + i32.const 1696 + get_local $r + get_local $r + call $Fr_F1m_mul + end + get_local $r + get_local $r + call $Fr_F1m_square + get_local $b + i32.const 64 + i32.ge_u + if + get_local $b + i32.const 64 + i32.sub + set_local $b + i32.const 1696 + get_local $r + get_local $r + call $Fr_F1m_mul + end + get_local $r + get_local $r + call $Fr_F1m_square + get_local $b + i32.const 32 + i32.ge_u + if + get_local $b + i32.const 32 + i32.sub + set_local $b + i32.const 1696 + get_local $r + get_local $r + call $Fr_F1m_mul + end + get_local $r + get_local $r + call $Fr_F1m_square + get_local $b + i32.const 16 + i32.ge_u + if + get_local $b + i32.const 16 + i32.sub + set_local $b + i32.const 1696 + get_local $r + get_local $r + call $Fr_F1m_mul + end + get_local $r + get_local $r + call $Fr_F1m_square + get_local $b + i32.const 8 + i32.ge_u + if + get_local $b + i32.const 8 + i32.sub + set_local $b + i32.const 1696 + get_local $r + get_local $r + call $Fr_F1m_mul + end + get_local $r + get_local $r + call $Fr_F1m_square + get_local $b + i32.const 4 + i32.ge_u + if + get_local $b + i32.const 4 + i32.sub + set_local $b + i32.const 1696 + get_local $r + get_local $r + call $Fr_F1m_mul + end + get_local $r + get_local $r + call $Fr_F1m_square + get_local $b + i32.const 2 + i32.ge_u + if + get_local $b + i32.const 2 + i32.sub + set_local $b + i32.const 1696 + get_local $r + get_local $r + call $Fr_F1m_mul + end + get_local $r + get_local $r + call $Fr_F1m_square + get_local $b + i32.const 1 + i32.ge_u + if + get_local $b + i32.const 1 + i32.sub + set_local $b + i32.const 1696 + get_local $r + get_local $r + call $Fr_F1m_mul + end + get_local $i + i32.eqz + br_if 1 + br 0 + end + end + ) + (func $Fr_F1m_sqrt (type $_sig_i32i32) + (param $n i32) + (param $r i32) + (local $m i32) + (local $i i32) + (local $j i32) + get_local $n + call $Fr_int_isZero + if + get_local $r + call $Fr_int_zero + return + end + i32.const 32 + set_local $m + i32.const 928 + i32.const 1728 + call $Fr_int_copy + get_local $n + i32.const 896 + i32.const 32 + i32.const 1760 + call $Fr_F1m_exp + get_local $n + i32.const 960 + i32.const 32 + i32.const 1792 + call $Fr_F1m_exp + block + loop + i32.const 1760 + i32.const 736 + call $Fr_int_eq + br_if 1 + i32.const 1760 + i32.const 1824 + call $Fr_F1m_square + i32.const 1 + set_local $i + block + loop + i32.const 1824 + i32.const 736 + call $Fr_int_eq + br_if 1 + i32.const 1824 + i32.const 1824 + call $Fr_F1m_square + get_local $i + i32.const 1 + i32.add + set_local $i + br 0 + end + end + i32.const 1728 + i32.const 1856 + call $Fr_int_copy + get_local $m + get_local $i + i32.sub + i32.const 1 + i32.sub + set_local $j + block + loop + get_local $j + i32.eqz + br_if 1 + i32.const 1856 + i32.const 1856 + call $Fr_F1m_square + get_local $j + i32.const 1 + i32.sub + set_local $j + br 0 + end + end + get_local $i + set_local $m + i32.const 1856 + i32.const 1728 + call $Fr_F1m_square + i32.const 1760 + i32.const 1728 + i32.const 1760 + call $Fr_F1m_mul + i32.const 1792 + i32.const 1856 + i32.const 1792 + call $Fr_F1m_mul + br 0 + end + end + i32.const 1792 + call $Fr_F1m_isNegative + if + i32.const 1792 + get_local $r + call $Fr_F1m_neg + else + i32.const 1792 + get_local $r + call $Fr_int_copy + end + ) + (func $Fr_F1m_isSquare (type $_sig_i32ri32) + (param $n i32) + (result i32) + get_local $n + call $Fr_int_isZero + if + i32.const 1 + return + end + get_local $n + i32.const 800 + i32.const 32 + i32.const 1888 + call $Fr_F1m_exp + i32.const 1888 + i32.const 736 + call $Fr_int_eq + ) + (func $Fr_copy (type $_sig_i32i32) + (param $pr i32) + (param $px i32) + get_local $pr + get_local $px + i64.load + i64.store + get_local $pr + get_local $px + i64.load offset=8 + i64.store offset=8 + get_local $pr + get_local $px + i64.load offset=16 + i64.store offset=16 + get_local $pr + get_local $px + i64.load offset=24 + i64.store offset=24 + get_local $pr + get_local $px + i64.load offset=32 + i64.store offset=32 + ) + (func $Fr_copyn (type $_sig_i32i32i32) + (param $pr i32) + (param $px i32) + (param $n i32) + (local $s i32) + (local $d i32) + (local $slast i32) + get_local $px + set_local $s + get_local $pr + set_local $d + get_local $s + get_local $n + i32.const 40 + i32.mul + i32.add + set_local $slast + block + loop + get_local $s + get_local $slast + i32.eq + br_if 1 + get_local $d + get_local $s + i64.load + i64.store + get_local $d + i32.const 8 + i32.add + set_local $d + get_local $s + i32.const 8 + i32.add + set_local $s + br 0 + end + end + ) + (func $Fr_isTrue (type $_sig_i32ri32) + (param $px i32) + (result i32) + get_local $px + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $px + i32.const 8 + i32.add + call $Fr_int_isZero + i32.eqz + return + end + get_local $px + i32.load + i32.const 0 + i32.ne + ) + (func $Fr_rawCopyS2L (type $_sig_i32i64) + (param $pR i32) + (param $v i64) + get_local $v + i64.const 0 + i64.gt_s + if + get_local $pR + get_local $v + i64.store + get_local $pR + i64.const 0 + i64.store offset=8 + get_local $pR + i64.const 0 + i64.store offset=16 + get_local $pR + i64.const 0 + i64.store offset=24 + else + i64.const 0 + get_local $v + i64.sub + set_local $v + get_local $pR + get_local $v + i64.store + get_local $pR + i64.const 0 + i64.store offset=8 + get_local $pR + i64.const 0 + i64.store offset=16 + get_local $pR + i64.const 0 + i64.store offset=24 + get_local $pR + get_local $pR + call $Fr_F1m_neg + end + ) + (func $Fr_toMontgomery (type $_sig_i32) + (param $pR i32) + get_local $pR + i32.load8_u offset=7 + i32.const 64 + i32.and + if + return + else + get_local $pR + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pR + i32.const -1073741824 + i32.store offset=4 + get_local $pR + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_toMontgomery + else + get_local $pR + i32.const 8 + i32.add + get_local $pR + i64.load32_s + call $Fr_rawCopyS2L + get_local $pR + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_toMontgomery + get_local $pR + i32.const 1073741824 + i32.store offset=4 + end + end + ) + (func $Fr_toNormal (type $_sig_i32) + (param $pR i32) + get_local $pR + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pR + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pR + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_fromMontgomery + end + end + ) + (func $Fr_toLongNormal (type $_sig_i32) + (param $pR i32) + get_local $pR + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pR + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pR + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_fromMontgomery + end + else + get_local $pR + i32.const 8 + i32.add + get_local $pR + i64.load32_s + call $Fr_rawCopyS2L + get_local $pR + i32.const -2147483648 + i32.store offset=4 + end + ) + (func $Fr_isNegative (type $_sig_i32ri32) + (param $pA i32) + (result i32) + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pA + call $Fr_toNormal + get_local $pA + i32.const 8 + i32.add + i32.const 1920 + call $Fr_int_gt + return + end + get_local $pA + i32.load + i32.const 0 + i32.lt_s + ) + (func $Fr_neg (type $_sig_i32i32) + (param $pR i32) + (param $pA i32) + (local $r i64) + (local $overflow i64) + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pA + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pR + i32.const -1073741824 + i32.store offset=4 + else + get_local $pR + i32.const -2147483648 + i32.store offset=4 + end + get_local $pA + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_neg + else + i64.const 0 + get_local $pA + i64.load32_s + i64.sub + set_local $r + get_local $r + i64.const 31 + i64.shr_s + set_local $overflow + get_local $overflow + i64.eqz + get_local $overflow + i64.const 1 + i64.add + i64.eqz + i32.or + if + get_local $pR + get_local $r + i64.store32 + get_local $pR + i32.const 0 + i32.store offset=4 + else + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pR + i32.const 8 + i32.add + get_local $r + call $Fr_rawCopyS2L + end + end + ) + (func $Fr_getLsb32 (type $_sig_i32ri32) + (param $pA i32) + (result i32) + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pA + call $Fr_toNormal + get_local $pA + i32.load offset=8 + return + else + get_local $pA + i32.load + return + end + i32.const 0 + ) + (func $Fr_toInt (type $_sig_i32ri32) + (param $pA i32) + (result i32) + get_local $pA + call $Fr_isNegative + if + i32.const 8 + get_local $pA + call $Fr_neg + i32.const 0 + i32.const 8 + call $Fr_getLsb32 + i32.sub + return + else + get_local $pA + call $Fr_getLsb32 + return + end + i32.const 0 + ) + (func $Fr_add (type $_sig_i32i32i32) + (param $pR i32) + (param $pA i32) + (param $pB i32) + (local $r i64) + (local $overflow i64) + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pA + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pB + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pR + i32.const -1073741824 + i32.store offset=4 + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_add + else + get_local $pB + call $Fr_toMontgomery + get_local $pR + i32.const -1073741824 + i32.store offset=4 + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_add + end + else + get_local $pB + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pA + call $Fr_toMontgomery + get_local $pR + i32.const -1073741824 + i32.store offset=4 + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_add + else + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_add + end + end + else + get_local $pA + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pB + call $Fr_toMontgomery + get_local $pR + i32.const -1073741824 + i32.store offset=4 + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_add + else + get_local $pR + i32.const -2147483648 + i32.store offset=4 + i32.const 16 + get_local $pB + i64.load32_s + call $Fr_rawCopyS2L + get_local $pA + i32.const 8 + i32.add + i32.const 16 + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_add + end + end + else + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pB + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pA + call $Fr_toMontgomery + get_local $pR + i32.const -1073741824 + i32.store offset=4 + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_add + else + get_local $pR + i32.const -2147483648 + i32.store offset=4 + i32.const 16 + get_local $pA + i64.load32_s + call $Fr_rawCopyS2L + i32.const 16 + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_add + end + else + get_local $pA + i64.load32_s + get_local $pB + i64.load32_s + i64.add + set_local $r + get_local $r + i64.const 31 + i64.shr_s + set_local $overflow + get_local $overflow + i64.eqz + get_local $overflow + i64.const 1 + i64.add + i64.eqz + i32.or + if + get_local $pR + get_local $r + i64.store32 + get_local $pR + i32.const 0 + i32.store offset=4 + else + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pR + i32.const 8 + i32.add + get_local $r + call $Fr_rawCopyS2L + end + end + end + ) + (func $Fr_sub (type $_sig_i32i32i32) + (param $pR i32) + (param $pA i32) + (param $pB i32) + (local $r i64) + (local $overflow i64) + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pA + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pB + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pR + i32.const -1073741824 + i32.store offset=4 + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_sub + else + get_local $pB + call $Fr_toMontgomery + get_local $pR + i32.const -1073741824 + i32.store offset=4 + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_sub + end + else + get_local $pB + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pA + call $Fr_toMontgomery + get_local $pR + i32.const -1073741824 + i32.store offset=4 + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_sub + else + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_sub + end + end + else + get_local $pA + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pB + call $Fr_toMontgomery + get_local $pR + i32.const -1073741824 + i32.store offset=4 + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_sub + else + get_local $pR + i32.const -2147483648 + i32.store offset=4 + i32.const 16 + get_local $pB + i64.load32_s + call $Fr_rawCopyS2L + get_local $pA + i32.const 8 + i32.add + i32.const 16 + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_sub + end + end + else + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pB + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pA + call $Fr_toMontgomery + get_local $pR + i32.const -1073741824 + i32.store offset=4 + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_sub + else + get_local $pR + i32.const -2147483648 + i32.store offset=4 + i32.const 16 + get_local $pA + i64.load32_s + call $Fr_rawCopyS2L + i32.const 16 + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_sub + end + else + get_local $pA + i64.load32_s + get_local $pB + i64.load32_s + i64.sub + set_local $r + get_local $r + i64.const 31 + i64.shr_s + set_local $overflow + get_local $overflow + i64.eqz + get_local $overflow + i64.const 1 + i64.add + i64.eqz + i32.or + if + get_local $pR + get_local $r + i64.store32 + get_local $pR + i32.const 0 + i32.store offset=4 + else + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pR + i32.const 8 + i32.add + get_local $r + call $Fr_rawCopyS2L + end + end + end + ) + (func $Fr_eqR (type $_sig_i32i32ri32) + (param $pA i32) + (param $pB i32) + (result i32) + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pB + i32.const 8 + i32.add + get_local $pB + i64.load32_s + call $Fr_rawCopyS2L + get_local $pB + i32.const -2147483648 + i32.store offset=4 + end + get_local $pA + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pB + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + call $Fr_int_eq + if + i32.const 1 + return + else + i32.const 0 + return + end + else + get_local $pA + call $Fr_toNormal + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + call $Fr_int_eq + if + i32.const 1 + return + else + i32.const 0 + return + end + end + else + get_local $pB + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pB + call $Fr_toNormal + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + call $Fr_int_eq + if + i32.const 1 + return + else + i32.const 0 + return + end + else + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + call $Fr_int_eq + if + i32.const 1 + return + else + i32.const 0 + return + end + end + end + else + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pA + i32.const 8 + i32.add + get_local $pA + i64.load32_s + call $Fr_rawCopyS2L + get_local $pA + i32.const -2147483648 + i32.store offset=4 + end + get_local $pA + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pB + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + call $Fr_int_eq + if + i32.const 1 + return + else + i32.const 0 + return + end + else + get_local $pA + call $Fr_toNormal + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + call $Fr_int_eq + if + i32.const 1 + return + else + i32.const 0 + return + end + end + else + get_local $pB + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pB + call $Fr_toNormal + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + call $Fr_int_eq + if + i32.const 1 + return + else + i32.const 0 + return + end + else + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + call $Fr_int_eq + if + i32.const 1 + return + else + i32.const 0 + return + end + end + end + else + get_local $pA + i32.load + get_local $pB + i32.load + i32.eq + if + i32.const 1 + return + else + i32.const 0 + return + end + end + end + i32.const 0 + ) + (func $Fr_gtR (type $_sig_i32i32ri32) + (param $pA i32) + (param $pB i32) + (result i32) + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pB + i32.const 8 + i32.add + get_local $pB + i64.load32_s + call $Fr_rawCopyS2L + get_local $pB + i32.const -2147483648 + i32.store offset=4 + end + get_local $pA + call $Fr_toNormal + get_local $pB + call $Fr_toNormal + get_local $pA + call $Fr_isNegative + if + get_local $pB + call $Fr_isNegative + if + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + call $Fr_int_gt + if + i32.const 1 + return + else + i32.const 0 + return + end + else + i32.const 0 + return + end + else + get_local $pB + call $Fr_isNegative + if + i32.const 1 + return + else + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + call $Fr_int_gt + if + i32.const 1 + return + else + i32.const 0 + return + end + end + end + else + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pA + i32.const 8 + i32.add + get_local $pA + i64.load32_s + call $Fr_rawCopyS2L + get_local $pA + i32.const -2147483648 + i32.store offset=4 + end + get_local $pA + call $Fr_toNormal + get_local $pB + call $Fr_toNormal + get_local $pA + call $Fr_isNegative + if + get_local $pB + call $Fr_isNegative + if + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + call $Fr_int_gt + if + i32.const 1 + return + else + i32.const 0 + return + end + else + i32.const 0 + return + end + else + get_local $pB + call $Fr_isNegative + if + i32.const 1 + return + else + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + call $Fr_int_gt + if + i32.const 1 + return + else + i32.const 0 + return + end + end + end + else + get_local $pA + i32.load + get_local $pB + i32.load + i32.gt_s + if + i32.const 1 + return + else + i32.const 0 + return + end + end + end + i32.const 0 + ) + (func $Fr_eq (type $_sig_i32i32i32) + (param $pR i32) + (param $pA i32) + (param $pB i32) + get_local $pA + get_local $pB + call $Fr_eqR + if + get_local $pR + i64.const 1 + i64.store + else + get_local $pR + i64.const 0 + i64.store + end + ) + (func $Fr_neq (type $_sig_i32i32i32) + (param $pR i32) + (param $pA i32) + (param $pB i32) + get_local $pA + get_local $pB + call $Fr_eqR + if + get_local $pR + i64.const 0 + i64.store + else + get_local $pR + i64.const 1 + i64.store + end + ) + (func $Fr_gt (type $_sig_i32i32i32) + (param $pR i32) + (param $pA i32) + (param $pB i32) + get_local $pA + get_local $pB + call $Fr_eqR + if + get_local $pR + i64.const 0 + i64.store + else + get_local $pA + get_local $pB + call $Fr_gtR + if + get_local $pR + i64.const 1 + i64.store + else + get_local $pR + i64.const 0 + i64.store + end + end + ) + (func $Fr_geq (type $_sig_i32i32i32) + (param $pR i32) + (param $pA i32) + (param $pB i32) + get_local $pA + get_local $pB + call $Fr_eqR + if + get_local $pR + i64.const 1 + i64.store + else + get_local $pA + get_local $pB + call $Fr_gtR + if + get_local $pR + i64.const 1 + i64.store + else + get_local $pR + i64.const 0 + i64.store + end + end + ) + (func $Fr_lt (type $_sig_i32i32i32) + (param $pR i32) + (param $pA i32) + (param $pB i32) + get_local $pA + get_local $pB + call $Fr_eqR + if + get_local $pR + i64.const 0 + i64.store + else + get_local $pA + get_local $pB + call $Fr_gtR + if + get_local $pR + i64.const 0 + i64.store + else + get_local $pR + i64.const 1 + i64.store + end + end + ) + (func $Fr_leq (type $_sig_i32i32i32) + (param $pR i32) + (param $pA i32) + (param $pB i32) + get_local $pA + get_local $pB + call $Fr_eqR + if + get_local $pR + i64.const 1 + i64.store + else + get_local $pA + get_local $pB + call $Fr_gtR + if + get_local $pR + i64.const 0 + i64.store + else + get_local $pR + i64.const 1 + i64.store + end + end + ) + (func $Fr_mul (type $_sig_i32i32i32) + (param $pR i32) + (param $pA i32) + (param $pB i32) + (local $r i64) + (local $overflow i64) + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pA + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pB + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pR + i32.const -1073741824 + i32.store offset=4 + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_mul + else + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_mul + end + else + get_local $pB + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_mul + else + get_local $pR + i32.const -1073741824 + i32.store offset=4 + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_mul + i32.const 704 + get_local $pR + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_mul + end + end + else + get_local $pA + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pB + call $Fr_toMontgomery + get_local $pR + i32.const -1073741824 + i32.store offset=4 + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_mul + else + get_local $pB + call $Fr_toMontgomery + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_mul + end + end + else + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pB + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pA + call $Fr_toMontgomery + get_local $pR + i32.const -1073741824 + i32.store offset=4 + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_mul + else + get_local $pA + call $Fr_toMontgomery + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_mul + end + else + get_local $pA + i64.load32_s + get_local $pB + i64.load32_s + i64.mul + set_local $r + get_local $r + i64.const 31 + i64.shr_s + set_local $overflow + get_local $overflow + i64.eqz + get_local $overflow + i64.const 1 + i64.add + i64.eqz + i32.or + if + get_local $pR + get_local $r + i64.store32 + get_local $pR + i32.const 0 + i32.store offset=4 + else + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pR + i32.const 8 + i32.add + get_local $r + call $Fr_rawCopyS2L + end + end + end + ) + (func $Fr_idiv (type $_sig_i32i32i32) + (param $pR i32) + (param $pA i32) + (param $pB i32) + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pA + i32.const 8 + i32.add + get_local $pA + i64.load32_s + call $Fr_rawCopyS2L + get_local $pA + i32.const -2147483648 + i32.store offset=4 + end + get_local $pA + call $Fr_toNormal + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pB + i32.const 8 + i32.add + get_local $pB + i64.load32_s + call $Fr_rawCopyS2L + get_local $pB + i32.const -2147483648 + i32.store offset=4 + end + get_local $pB + call $Fr_toNormal + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + i32.const 16 + call $Fr_int_div + ) + (func $Fr_mod (type $_sig_i32i32i32) + (param $pR i32) + (param $pA i32) + (param $pB i32) + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pA + i32.const 8 + i32.add + get_local $pA + i64.load32_s + call $Fr_rawCopyS2L + get_local $pA + i32.const -2147483648 + i32.store offset=4 + end + get_local $pA + call $Fr_toNormal + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pB + i32.const 8 + i32.add + get_local $pB + i64.load32_s + call $Fr_rawCopyS2L + get_local $pB + i32.const -2147483648 + i32.store offset=4 + end + get_local $pB + call $Fr_toNormal + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + i32.const 16 + get_local $pR + i32.const 8 + i32.add + call $Fr_int_div + ) + (func $Fr_inv (type $_sig_i32i32) + (param $pR i32) + (param $pA i32) + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pA + i32.const 8 + i32.add + get_local $pA + i64.load32_s + call $Fr_rawCopyS2L + get_local $pA + i32.const -2147483648 + i32.store offset=4 + end + get_local $pA + i32.const 8 + i32.add + i32.const 608 + get_local $pR + i32.const 8 + i32.add + call $Fr_int_inverseMod + get_local $pA + i32.load8_u offset=7 + i32.const 64 + i32.and + if + get_local $pR + i32.const -1073741824 + i32.store offset=4 + get_local $pR + i32.const 8 + i32.add + i32.const 704 + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_mul + else + get_local $pR + i32.const -2147483648 + i32.store offset=4 + end + ) + (func $Fr_div (type $_sig_i32i32i32) + (param $pR i32) + (param $pA i32) + (param $pB i32) + (local $r i64) + (local $overflow i64) + get_local $pR + get_local $pB + call $Fr_inv + get_local $pR + get_local $pR + get_local $pA + call $Fr_mul + ) + (func $Fr_pow (type $_sig_i32i32i32) + (param $pR i32) + (param $pA i32) + (param $pB i32) + get_local $pA + call $Fr_toMontgomery + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pB + i32.const 8 + i32.add + get_local $pB + i64.load32_s + call $Fr_rawCopyS2L + get_local $pB + i32.const -2147483648 + i32.store offset=4 + end + get_local $pB + call $Fr_toNormal + get_local $pR + i32.const -1073741824 + i32.store offset=4 + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + i32.const 32 + get_local $pR + i32.const 8 + i32.add + call $Fr_F1m_exp + ) + (func $Fr_fixedShl (type $_sig_i64i64ri64) + (param $a i64) + (param $b i64) + (result i64) + get_local $b + i64.const 64 + i64.ge_u + if + i64.const 0 + return + end + get_local $a + get_local $b + i64.shl + ) + (func $Fr_fixedShr (type $_sig_i64i64ri64) + (param $a i64) + (param $b i64) + (result i64) + get_local $b + i64.const 64 + i64.ge_u + if + i64.const 0 + return + end + get_local $a + get_local $b + i64.shr_u + ) + (func $Fr_rawgetchunk (type $_sig_i32i32ri64) + (param $pA i32) + (param $i i32) + (result i64) + get_local $i + i32.const 4 + i32.lt_u + if + get_local $pA + get_local $i + i32.const 8 + i32.mul + i32.add + i64.load + return + end + i64.const 0 + ) + (func $Fr_rawshll (type $_sig_i32i32i32) + (param $pR i32) + (param $pA i32) + (param $n i32) + (local $oWords1 i32) + (local $oBits1 i64) + (local $oWords2 i32) + (local $oBits2 i64) + (local $i i32) + i32.const 0 + get_local $n + i32.const 6 + i32.shr_u + i32.sub + set_local $oWords1 + get_local $oWords1 + i32.const 1 + i32.sub + set_local $oWords2 + get_local $n + i64.extend_u/i32 + i64.const 63 + i64.and + set_local $oBits1 + i64.const 64 + get_local $oBits1 + i64.sub + set_local $oBits2 + i32.const 0 + set_local $i + block + loop + get_local $i + i32.const 4 + i32.eq + br_if 1 + get_local $pR + get_local $i + i32.const 8 + i32.mul + i32.add + get_local $pA + get_local $oWords1 + get_local $i + i32.add + call $Fr_rawgetchunk + get_local $oBits1 + call $Fr_fixedShl + get_local $pA + get_local $oWords2 + get_local $i + i32.add + call $Fr_rawgetchunk + get_local $oBits2 + call $Fr_fixedShr + i64.or + i64.store + get_local $i + i32.const 1 + i32.add + set_local $i + br 0 + end + end + ) + (func $Fr_rawshrl (type $_sig_i32i32i32) + (param $pR i32) + (param $pA i32) + (param $n i32) + (local $oWords1 i32) + (local $oBits1 i64) + (local $oWords2 i32) + (local $oBits2 i64) + (local $i i32) + get_local $n + i32.const 6 + i32.shr_u + set_local $oWords1 + get_local $oWords1 + i32.const 1 + i32.add + set_local $oWords2 + get_local $n + i64.extend_u/i32 + i64.const 63 + i64.and + set_local $oBits1 + i64.const 64 + get_local $oBits1 + i64.sub + set_local $oBits2 + i32.const 0 + set_local $i + block + loop + get_local $i + i32.const 4 + i32.eq + br_if 1 + get_local $pR + get_local $i + i32.const 8 + i32.mul + i32.add + get_local $pA + get_local $oWords1 + get_local $i + i32.add + call $Fr_rawgetchunk + get_local $oBits1 + call $Fr_fixedShr + get_local $pA + get_local $oWords2 + get_local $i + i32.add + call $Fr_rawgetchunk + get_local $oBits2 + call $Fr_fixedShl + i64.or + i64.store + get_local $i + i32.const 1 + i32.add + set_local $i + br 0 + end + end + ) + (func $Fr_adjustBinResult (type $_sig_i32) + (param $pA i32) + get_local $pA + get_local $pA + i64.load offset=32 + i64.const 9223372036854775807 + i64.and + i64.store offset=32 + get_local $pA + i32.const 8 + i32.add + i32.const 608 + call $Fr_int_gte + if + get_local $pA + i32.const 8 + i32.add + i32.const 608 + get_local $pA + i32.const 8 + i32.add + call $Fr_int_sub + drop + end + ) + (func $Fr_rawshl (type $_sig_i32i32i32) + (param $pR i32) + (param $pA i32) + (param $n i32) + (local $r i64) + (local $overflow i64) + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pA + call $Fr_toNormal + get_local $pR + i32.const 8 + i32.add + get_local $pA + i32.const 8 + i32.add + get_local $n + call $Fr_rawshll + get_local $pR + call $Fr_adjustBinResult + get_local $pR + i32.const -2147483648 + i32.store offset=4 + else + get_local $pA + call $Fr_isNegative + if + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pA + i32.const 8 + i32.add + get_local $pA + i64.load32_s + call $Fr_rawCopyS2L + get_local $pA + i32.const -2147483648 + i32.store offset=4 + end + get_local $pR + i32.const 8 + i32.add + get_local $pA + i32.const 8 + i32.add + get_local $n + call $Fr_rawshll + get_local $pR + call $Fr_adjustBinResult + get_local $pR + i32.const -2147483648 + i32.store offset=4 + else + get_local $n + i32.const 30 + i32.gt_u + if + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pA + i32.const 8 + i32.add + get_local $pA + i64.load32_s + call $Fr_rawCopyS2L + get_local $pA + i32.const -2147483648 + i32.store offset=4 + end + get_local $pR + i32.const 8 + i32.add + get_local $pA + i32.const 8 + i32.add + get_local $n + call $Fr_rawshll + get_local $pR + call $Fr_adjustBinResult + get_local $pR + i32.const -2147483648 + i32.store offset=4 + else + get_local $pA + i64.load32_s + get_local $n + i64.extend_u/i32 + i64.shl + set_local $r + get_local $r + i64.const 31 + i64.shr_s + set_local $overflow + get_local $overflow + i64.eqz + get_local $overflow + i64.const 1 + i64.add + i64.eqz + i32.or + if + get_local $pR + get_local $r + i64.store32 + get_local $pR + i32.const 0 + i32.store offset=4 + else + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pR + i32.const 8 + i32.add + get_local $r + call $Fr_rawCopyS2L + end + end + end + end + ) + (func $Fr_rawshr (type $_sig_i32i32i32) + (param $pR i32) + (param $pA i32) + (param $n i32) + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pA + call $Fr_toNormal + get_local $pR + i32.const 8 + i32.add + get_local $pA + i32.const 8 + i32.add + get_local $n + call $Fr_rawshrl + get_local $pR + i32.const -2147483648 + i32.store offset=4 + else + get_local $pA + call $Fr_isNegative + if + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pA + i32.const 8 + i32.add + get_local $pA + i64.load32_s + call $Fr_rawCopyS2L + get_local $pA + i32.const -2147483648 + i32.store offset=4 + end + get_local $pR + i32.const 8 + i32.add + get_local $pA + i32.const 8 + i32.add + get_local $n + call $Fr_rawshrl + get_local $pR + i32.const -2147483648 + i32.store offset=4 + else + get_local $n + i32.const 32 + i32.lt_u + if + get_local $pR + get_local $pA + i32.load + get_local $n + i32.shr_u + i32.store + else + get_local $pR + i32.const 0 + i32.store + end + get_local $pR + i32.const 0 + i32.store offset=4 + end + end + ) + (func $Fr_shl (type $_sig_i32i32i32) + (param $pR i32) + (param $pA i32) + (param $pB i32) + get_local $pB + call $Fr_isNegative + if + i32.const 48 + get_local $pB + call $Fr_neg + i32.const 8 + i32.const 48 + i32.const 88 + call $Fr_lt + i32.const 8 + i32.load + if + get_local $pR + get_local $pA + i32.const 48 + call $Fr_toInt + call $Fr_rawshr + else + get_local $pR + call $Fr_int_zero + end + else + i32.const 8 + get_local $pB + i32.const 88 + call $Fr_lt + i32.const 8 + i32.load + if + get_local $pR + get_local $pA + get_local $pB + call $Fr_toInt + call $Fr_rawshl + else + get_local $pR + call $Fr_int_zero + end + end + ) + (func $Fr_shr (type $_sig_i32i32i32) + (param $pR i32) + (param $pA i32) + (param $pB i32) + get_local $pB + call $Fr_isNegative + if + i32.const 48 + get_local $pB + call $Fr_neg + i32.const 8 + i32.const 48 + i32.const 88 + call $Fr_lt + i32.const 8 + i32.load + if + get_local $pR + get_local $pA + i32.const 48 + call $Fr_toInt + call $Fr_rawshl + else + get_local $pR + call $Fr_int_zero + end + else + i32.const 8 + get_local $pB + i32.const 88 + call $Fr_lt + i32.const 8 + i32.load + if + get_local $pR + get_local $pA + get_local $pB + call $Fr_toInt + call $Fr_rawshr + else + get_local $pR + call $Fr_int_zero + end + end + ) + (func $Fr_rawbandl (type $_sig_i32i32i32) + (param $pA i32) + (param $pB i32) + (param $pR i32) + get_local $pR + get_local $pA + i64.load + get_local $pB + i64.load + i64.and + i64.store + get_local $pR + get_local $pA + i64.load offset=8 + get_local $pB + i64.load offset=8 + i64.and + i64.store offset=8 + get_local $pR + get_local $pA + i64.load offset=16 + get_local $pB + i64.load offset=16 + i64.and + i64.store offset=16 + get_local $pR + get_local $pA + i64.load offset=24 + get_local $pB + i64.load offset=24 + i64.and + i64.store offset=24 + ) + (func $Fr_band (type $_sig_i32i32i32) + (param $pR i32) + (param $pA i32) + (param $pB i32) + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pA + i32.const 8 + i32.add + get_local $pA + i64.load32_s + call $Fr_rawCopyS2L + get_local $pA + i32.const -2147483648 + i32.store offset=4 + end + get_local $pA + call $Fr_toNormal + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pB + i32.const 8 + i32.add + get_local $pB + i64.load32_s + call $Fr_rawCopyS2L + get_local $pB + i32.const -2147483648 + i32.store offset=4 + end + get_local $pB + call $Fr_toNormal + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_rawbandl + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pR + call $Fr_adjustBinResult + else + get_local $pA + call $Fr_isNegative + if + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pA + i32.const 8 + i32.add + get_local $pA + i64.load32_s + call $Fr_rawCopyS2L + get_local $pA + i32.const -2147483648 + i32.store offset=4 + end + get_local $pA + call $Fr_toNormal + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pB + i32.const 8 + i32.add + get_local $pB + i64.load32_s + call $Fr_rawCopyS2L + get_local $pB + i32.const -2147483648 + i32.store offset=4 + end + get_local $pB + call $Fr_toNormal + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_rawbandl + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pR + call $Fr_adjustBinResult + else + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pA + i32.const 8 + i32.add + get_local $pA + i64.load32_s + call $Fr_rawCopyS2L + get_local $pA + i32.const -2147483648 + i32.store offset=4 + end + get_local $pA + call $Fr_toNormal + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pB + i32.const 8 + i32.add + get_local $pB + i64.load32_s + call $Fr_rawCopyS2L + get_local $pB + i32.const -2147483648 + i32.store offset=4 + end + get_local $pB + call $Fr_toNormal + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_rawbandl + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pR + call $Fr_adjustBinResult + else + get_local $pB + call $Fr_isNegative + if + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pA + i32.const 8 + i32.add + get_local $pA + i64.load32_s + call $Fr_rawCopyS2L + get_local $pA + i32.const -2147483648 + i32.store offset=4 + end + get_local $pA + call $Fr_toNormal + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pB + i32.const 8 + i32.add + get_local $pB + i64.load32_s + call $Fr_rawCopyS2L + get_local $pB + i32.const -2147483648 + i32.store offset=4 + end + get_local $pB + call $Fr_toNormal + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_rawbandl + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pR + call $Fr_adjustBinResult + else + get_local $pR + get_local $pA + i32.load + get_local $pB + i32.load + i32.and + i32.store + get_local $pR + i32.const 0 + i32.store offset=4 + end + end + end + end + ) + (func $Fr_rawborl (type $_sig_i32i32i32) + (param $pA i32) + (param $pB i32) + (param $pR i32) + get_local $pR + get_local $pA + i64.load + get_local $pB + i64.load + i64.or + i64.store + get_local $pR + get_local $pA + i64.load offset=8 + get_local $pB + i64.load offset=8 + i64.or + i64.store offset=8 + get_local $pR + get_local $pA + i64.load offset=16 + get_local $pB + i64.load offset=16 + i64.or + i64.store offset=16 + get_local $pR + get_local $pA + i64.load offset=24 + get_local $pB + i64.load offset=24 + i64.or + i64.store offset=24 + ) + (func $Fr_bor (type $_sig_i32i32i32) + (param $pR i32) + (param $pA i32) + (param $pB i32) + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pA + i32.const 8 + i32.add + get_local $pA + i64.load32_s + call $Fr_rawCopyS2L + get_local $pA + i32.const -2147483648 + i32.store offset=4 + end + get_local $pA + call $Fr_toNormal + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pB + i32.const 8 + i32.add + get_local $pB + i64.load32_s + call $Fr_rawCopyS2L + get_local $pB + i32.const -2147483648 + i32.store offset=4 + end + get_local $pB + call $Fr_toNormal + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_rawborl + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pR + call $Fr_adjustBinResult + else + get_local $pA + call $Fr_isNegative + if + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pA + i32.const 8 + i32.add + get_local $pA + i64.load32_s + call $Fr_rawCopyS2L + get_local $pA + i32.const -2147483648 + i32.store offset=4 + end + get_local $pA + call $Fr_toNormal + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pB + i32.const 8 + i32.add + get_local $pB + i64.load32_s + call $Fr_rawCopyS2L + get_local $pB + i32.const -2147483648 + i32.store offset=4 + end + get_local $pB + call $Fr_toNormal + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_rawborl + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pR + call $Fr_adjustBinResult + else + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pA + i32.const 8 + i32.add + get_local $pA + i64.load32_s + call $Fr_rawCopyS2L + get_local $pA + i32.const -2147483648 + i32.store offset=4 + end + get_local $pA + call $Fr_toNormal + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pB + i32.const 8 + i32.add + get_local $pB + i64.load32_s + call $Fr_rawCopyS2L + get_local $pB + i32.const -2147483648 + i32.store offset=4 + end + get_local $pB + call $Fr_toNormal + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_rawborl + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pR + call $Fr_adjustBinResult + else + get_local $pB + call $Fr_isNegative + if + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pA + i32.const 8 + i32.add + get_local $pA + i64.load32_s + call $Fr_rawCopyS2L + get_local $pA + i32.const -2147483648 + i32.store offset=4 + end + get_local $pA + call $Fr_toNormal + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pB + i32.const 8 + i32.add + get_local $pB + i64.load32_s + call $Fr_rawCopyS2L + get_local $pB + i32.const -2147483648 + i32.store offset=4 + end + get_local $pB + call $Fr_toNormal + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_rawborl + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pR + call $Fr_adjustBinResult + else + get_local $pR + get_local $pA + i32.load + get_local $pB + i32.load + i32.or + i32.store + get_local $pR + i32.const 0 + i32.store offset=4 + end + end + end + end + ) + (func $Fr_rawbxorl (type $_sig_i32i32i32) + (param $pA i32) + (param $pB i32) + (param $pR i32) + get_local $pR + get_local $pA + i64.load + get_local $pB + i64.load + i64.xor + i64.store + get_local $pR + get_local $pA + i64.load offset=8 + get_local $pB + i64.load offset=8 + i64.xor + i64.store offset=8 + get_local $pR + get_local $pA + i64.load offset=16 + get_local $pB + i64.load offset=16 + i64.xor + i64.store offset=16 + get_local $pR + get_local $pA + i64.load offset=24 + get_local $pB + i64.load offset=24 + i64.xor + i64.store offset=24 + ) + (func $Fr_bxor (type $_sig_i32i32i32) + (param $pR i32) + (param $pA i32) + (param $pB i32) + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pA + i32.const 8 + i32.add + get_local $pA + i64.load32_s + call $Fr_rawCopyS2L + get_local $pA + i32.const -2147483648 + i32.store offset=4 + end + get_local $pA + call $Fr_toNormal + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pB + i32.const 8 + i32.add + get_local $pB + i64.load32_s + call $Fr_rawCopyS2L + get_local $pB + i32.const -2147483648 + i32.store offset=4 + end + get_local $pB + call $Fr_toNormal + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_rawbxorl + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pR + call $Fr_adjustBinResult + else + get_local $pA + call $Fr_isNegative + if + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pA + i32.const 8 + i32.add + get_local $pA + i64.load32_s + call $Fr_rawCopyS2L + get_local $pA + i32.const -2147483648 + i32.store offset=4 + end + get_local $pA + call $Fr_toNormal + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pB + i32.const 8 + i32.add + get_local $pB + i64.load32_s + call $Fr_rawCopyS2L + get_local $pB + i32.const -2147483648 + i32.store offset=4 + end + get_local $pB + call $Fr_toNormal + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_rawbxorl + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pR + call $Fr_adjustBinResult + else + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pA + i32.const 8 + i32.add + get_local $pA + i64.load32_s + call $Fr_rawCopyS2L + get_local $pA + i32.const -2147483648 + i32.store offset=4 + end + get_local $pA + call $Fr_toNormal + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pB + i32.const 8 + i32.add + get_local $pB + i64.load32_s + call $Fr_rawCopyS2L + get_local $pB + i32.const -2147483648 + i32.store offset=4 + end + get_local $pB + call $Fr_toNormal + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_rawbxorl + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pR + call $Fr_adjustBinResult + else + get_local $pB + call $Fr_isNegative + if + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pA + i32.const 8 + i32.add + get_local $pA + i64.load32_s + call $Fr_rawCopyS2L + get_local $pA + i32.const -2147483648 + i32.store offset=4 + end + get_local $pA + call $Fr_toNormal + get_local $pB + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pB + i32.const 8 + i32.add + get_local $pB + i64.load32_s + call $Fr_rawCopyS2L + get_local $pB + i32.const -2147483648 + i32.store offset=4 + end + get_local $pB + call $Fr_toNormal + get_local $pA + i32.const 8 + i32.add + get_local $pB + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_rawbxorl + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pR + call $Fr_adjustBinResult + else + get_local $pR + get_local $pA + i32.load + get_local $pB + i32.load + i32.xor + i32.store + get_local $pR + i32.const 0 + i32.store offset=4 + end + end + end + end + ) + (func $Fr_rawbnotl (type $_sig_i32i32) + (param $pA i32) + (param $pR i32) + get_local $pR + get_local $pA + i64.load + i64.const -1 + i64.xor + i64.store + get_local $pR + get_local $pA + i64.load offset=8 + i64.const -1 + i64.xor + i64.store offset=8 + get_local $pR + get_local $pA + i64.load offset=16 + i64.const -1 + i64.xor + i64.store offset=16 + get_local $pR + get_local $pA + i64.load offset=24 + i64.const -1 + i64.xor + i64.store offset=24 + ) + (func $Fr_bnot (type $_sig_i32i32) + (param $pR i32) + (param $pA i32) + get_local $pA + i32.load8_u offset=7 + i32.const 128 + i32.and + if + else + get_local $pA + i32.const 8 + i32.add + get_local $pA + i64.load32_s + call $Fr_rawCopyS2L + get_local $pA + i32.const -2147483648 + i32.store offset=4 + end + get_local $pA + call $Fr_toNormal + get_local $pA + i32.const 8 + i32.add + get_local $pR + i32.const 8 + i32.add + call $Fr_rawbnotl + get_local $pR + i32.const -2147483648 + i32.store offset=4 + get_local $pR + call $Fr_adjustBinResult + ) + (func $Fr_land (type $_sig_i32i32i32) + (param $pR i32) + (param $pA i32) + (param $pB i32) + get_local $pA + call $Fr_isTrue + get_local $pB + call $Fr_isTrue + i32.and + if + get_local $pR + i64.const 1 + i64.store + else + get_local $pR + i64.const 0 + i64.store + end + ) + (func $Fr_lor (type $_sig_i32i32i32) + (param $pR i32) + (param $pA i32) + (param $pB i32) + get_local $pA + call $Fr_isTrue + get_local $pB + call $Fr_isTrue + i32.or + if + get_local $pR + i64.const 1 + i64.store + else + get_local $pR + i64.const 0 + i64.store + end + ) + (func $Fr_lnot (type $_sig_i32i32) + (param $pR i32) + (param $pA i32) + get_local $pA + call $Fr_isTrue + if + get_local $pR + i64.const 0 + i64.store + else + get_local $pR + i64.const 1 + i64.store + end + ) \ No newline at end of file diff --git a/code_producers/src/wasm_elements/secq256k1/fr-data.wat b/code_producers/src/wasm_elements/secq256k1/fr-data.wat new file mode 100644 index 000000000..deb55612a --- /dev/null +++ b/code_producers/src/wasm_elements/secq256k1/fr-data.wat @@ -0,0 +1,16 @@ +(data (i32.const 0) "\a0\07\00\00") +(data (i32.const 88) "\00\01\00\00\00\00\00\00") +(data (i32.const 128) "\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00") +(data (i32.const 608) "/\fc\ff\ff\fe\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff") +(data (i32.const 640) "\d1\03\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00") +(data (i32.const 672) "\a1\90\0e\00\a2\07\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00") +(data (i32.const 704) "q\f6\957\e3\b1+\00s\0b\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00") +(data (i32.const 736) "\d1\03\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00") +(data (i32.const 768) "\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00") +(data (i32.const 800) "\17\fe\ff\7f\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\7f") +(data (i32.const 832) "\18\fe\ff\7f\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\7f") +(data (i32.const 864) "s\0b\00\00\03\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00") +(data (i32.const 896) "\17\fe\ff\7f\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\7f") +(data (i32.const 928) "^\f8\ff\ff\fd\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff") +(data (i32.const 960) "\0c\ff\ff\bf\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff?") +(data (i32.const 1920) "\17\fe\ff\7f\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\7f") \ No newline at end of file diff --git a/code_producers/src/wasm_elements/secq256k1/fr-types.wat b/code_producers/src/wasm_elements/secq256k1/fr-types.wat new file mode 100644 index 000000000..fdcbccce6 --- /dev/null +++ b/code_producers/src/wasm_elements/secq256k1/fr-types.wat @@ -0,0 +1,11 @@ + (type $_sig_i32i32 (func (param i32 i32))) + (type $_sig_i32 (func (param i32))) + (type $_sig_i32ri32 (func (param i32) (result i32))) + (type $_sig_i32i32ri32 (func (param i32 i32) (result i32))) + (type $_sig_i32i32i32ri32 (func (param i32 i32 i32) (result i32))) + (type $_sig_i32i32i32 (func (param i32 i32 i32))) + (type $_sig_i32i64i32 (func (param i32 i64 i32))) + (type $_sig_i32i64 (func (param i32 i64))) + (type $_sig_i32i32i32i32 (func (param i32 i32 i32 i32))) + (type $_sig_i64i64ri64 (func (param i64 i64) (result i64))) + (type $_sig_i32i32ri64 (func (param i32 i32) (result i64))) \ No newline at end of file diff --git a/code_producers/src/wasm_elements/wasm_code_generator.rs b/code_producers/src/wasm_elements/wasm_code_generator.rs index 7277066ad..080c9baba 100644 --- a/code_producers/src/wasm_elements/wasm_code_generator.rs +++ b/code_producers/src/wasm_elements/wasm_code_generator.rs @@ -3,6 +3,7 @@ use num_bigint_dig::BigInt; use std::fs::File; use std::io::prelude::*; use std::path::PathBuf; +use std::slice::SliceIndex; pub fn wasm_hexa(nbytes: usize, num: &BigInt) -> String { let inbytes = num.to_str_radix(16).to_string(); @@ -226,7 +227,9 @@ pub fn get_initial_size_of_memory(producer: &WASMProducer) -> usize { //------------------- generate all kinds of Data ------------------ -pub fn generate_hash_map(signal_name_list: &Vec<(String, usize, usize)>) -> Vec<(u64, usize, usize)> { +pub fn generate_hash_map( + signal_name_list: &Vec<(String, usize, usize)>, +) -> Vec<(u64, usize, usize)> { assert!(signal_name_list.len() <= 256); let len = 256; let mut hash_map = vec![(0, 0, 0); len]; @@ -560,7 +563,7 @@ pub fn generate_data_list(producer: &WASMProducer) -> Vec { wdata.push(format!( "(data (i32.const {}) \"{}\")", producer.get_raw_prime_start(), - wasm_hexa(producer.get_size_32_bit()*4, &p) + wasm_hexa(producer.get_size_32_bit() * 4, &p) )); wdata.push(format!( "(data (i32.const {}) \"{}\")", @@ -579,7 +582,12 @@ pub fn generate_data_list(producer: &WASMProducer) -> Vec { producer.get_witness_signal_id_list_start(), s )); - wdata.push(format!("(data (i32.const {}) \"{}{}\")",producer.get_signal_memory_start(),"\\00\\00\\00\\00\\00\\00\\00\\80",wasm_hexa(producer.get_size_32_bit()*4, &BigInt::from(1)))); //setting 'one' as long normal 1 + wdata.push(format!( + "(data (i32.const {}) \"{}{}\")", + producer.get_signal_memory_start(), + "\\00\\00\\00\\00\\00\\00\\00\\80", + wasm_hexa(producer.get_size_32_bit() * 4, &BigInt::from(1)) + )); //setting 'one' as long normal 1 wdata.push(format!( "(data (i32.const {}) \"{}\")", producer.get_template_instance_to_io_signal_start(), @@ -608,7 +616,7 @@ pub fn generate_data_list(producer: &WASMProducer) -> Vec { wdata.push(format!( "(data (i32.const {}) \"{}\\00\")", m + i * producer.get_size_of_message_in_bytes(), - &ml[i][..producer.get_size_of_message_in_bytes()-1] + &ml[i][..producer.get_size_of_message_in_bytes() - 1] )); } } @@ -625,7 +633,7 @@ pub fn generate_data_list(producer: &WASMProducer) -> Vec { wdata.push(format!( "(data (i32.const {}) \"{}\\00\")", s + i * producer.get_size_of_message_in_bytes(), - &st[i][..producer.get_size_of_message_in_bytes()-1] + &st[i][..producer.get_size_of_message_in_bytes() - 1] )); } } @@ -833,17 +841,17 @@ pub fn init_generator(producer: &WASMProducer) -> Vec { // instructions.push(store32(None)); instructions.push(set_constant(&next_to_one.to_string())); let funcname = format!("${}_create", producer.get_main_header()); - instructions.push(call(&funcname)); + instructions.push(call(&funcname)); instructions.push(drop()); if producer.get_number_of_main_inputs() == 0 { - instructions.push(set_constant(&producer.get_component_tree_start().to_string())); - let funcname = format!("${}_run", producer.get_main_header()); - instructions.push(call(&funcname)); - instructions.push(tee_local(producer.get_merror_tag())); - instructions.push(add_if()); - instructions.push(get_local("$merror")); - instructions.push(call("$exceptionHandler")); - instructions.push(add_end()); + instructions.push(set_constant(&producer.get_component_tree_start().to_string())); + let funcname = format!("${}_run", producer.get_main_header()); + instructions.push(call(&funcname)); + instructions.push(tee_local(producer.get_merror_tag())); + instructions.push(add_if()); + instructions.push(get_local("$merror")); + instructions.push(call("$exceptionHandler")); + instructions.push(add_end()); } instructions.push(")".to_string()); instructions @@ -970,7 +978,7 @@ pub fn set_input_signal_generator(producer: &WASMProducer) -> Vec Vec Vec Vec { instructions.push(shl32()); instructions.push(add32()); // address of the witness in the witness list instructions.push(load32(None)); // number of the signal in the signal Memory - instructions.push(set_constant(&format!("{}",producer.get_size_32_bit()*4+8)));//40 + instructions.push(set_constant(&format!("{}", producer.get_size_32_bit() * 4 + 8))); //40 instructions.push(mul32()); instructions.push(set_constant(&producer.get_signal_memory_start().to_string())); instructions.push(add32()); // address of the signal in the signal Memory @@ -1559,15 +1567,16 @@ fn get_file_instructions(name: &str) -> Vec { pub fn fr_types(prime: &String) -> Vec { let mut instructions = vec![]; - let file = match prime.as_ref(){ + let file = match prime.as_ref() { "bn128" => include_str!("bn128/fr-types.wat"), "bls12381" => include_str!("bls12381/fr-types.wat"), "goldilocks" => include_str!("goldilocks/fr-types.wat"), "grumpkin" => include_str!("grumpkin/fr-types.wat"), "pallas" => include_str!("pallas/fr-types.wat"), "vesta" => include_str!("vesta/fr-types.wat"), + "secq256k1" => include_str!("secq256k1/fr-types.wat"), _ => unreachable!(), - }; + }; for line in file.lines() { instructions.push(line.to_string()); } @@ -1576,15 +1585,16 @@ pub fn fr_types(prime: &String) -> Vec { pub fn fr_data(prime: &String) -> Vec { let mut instructions = vec![]; - let file = match prime.as_ref(){ + let file = match prime.as_ref() { "bn128" => include_str!("bn128/fr-data.wat"), "bls12381" => include_str!("bls12381/fr-data.wat"), "goldilocks" => include_str!("goldilocks/fr-data.wat"), "grumpkin" => include_str!("grumpkin/fr-data.wat"), "pallas" => include_str!("pallas/fr-data.wat"), "vesta" => include_str!("vesta/fr-data.wat"), + "secq256k1" => include_str!("secq256k1/fr-data.wat"), _ => unreachable!(), - }; + }; for line in file.lines() { instructions.push(line.to_string()); } @@ -1592,15 +1602,16 @@ pub fn fr_data(prime: &String) -> Vec { } pub fn fr_code(prime: &String) -> Vec { let mut instructions = vec![]; - let file = match prime.as_ref(){ + let file = match prime.as_ref() { "bn128" => include_str!("bn128/fr-code.wat"), "bls12381" => include_str!("bls12381/fr-code.wat"), "goldilocks" => include_str!("goldilocks/fr-code.wat"), "grumpkin" => include_str!("grumpkin/fr-code.wat"), "pallas" => include_str!("pallas/fr-code.wat"), "vesta" => include_str!("vesta/fr-code.wat"), + "secq256k1" => include_str!("secq256k1/fr-code.wat"), _ => unreachable!(), - }; + }; for line in file.lines() { instructions.push(line.to_string()); } @@ -1628,7 +1639,7 @@ pub fn generate_utils_js_file(js_folder: &PathBuf) -> std::io::Result<()> { pub fn generate_generate_witness_js_file(js_folder: &PathBuf) -> std::io::Result<()> { use std::io::BufWriter; - let mut file_path = js_folder.clone(); + let mut file_path = js_folder.clone(); file_path.push("generate_witness"); file_path.set_extension("js"); let file_name = file_path.to_str().unwrap(); @@ -1645,7 +1656,7 @@ pub fn generate_generate_witness_js_file(js_folder: &PathBuf) -> std::io::Result pub fn generate_witness_calculator_js_file(js_folder: &PathBuf) -> std::io::Result<()> { use std::io::BufWriter; - let mut file_path = js_folder.clone(); + let mut file_path = js_folder.clone(); file_path.push("witness_calculator"); file_path.set_extension("js"); let file_name = file_path.to_str().unwrap(); @@ -1790,7 +1801,7 @@ mod tests { code_aux = build_log_message_generator(&producer); code.append(&mut code_aux); - + //code_aux = main_sample_generator(&producer); //code.append(&mut code_aux); diff --git a/compiler/src/circuit_design/build.rs b/compiler/src/circuit_design/build.rs index ce3dac392..f6a042597 100644 --- a/compiler/src/circuit_design/build.rs +++ b/compiler/src/circuit_design/build.rs @@ -3,7 +3,9 @@ use crate::circuit_design::function::FunctionCodeInfo; use crate::circuit_design::template::TemplateCodeInfo; use crate::hir::very_concrete_program::*; use crate::intermediate_representation::translate; -use crate::intermediate_representation::translate::{CodeInfo, FieldTracker, TemplateDB, ParallelClusters}; +use crate::intermediate_representation::translate::{ + CodeInfo, FieldTracker, TemplateDB, ParallelClusters, +}; use code_producers::c_elements::*; use code_producers::wasm_elements::*; use program_structure::file_definition::FileLibrary; @@ -25,8 +27,7 @@ fn build_template_instances( c_info: &CircuitInfo, ti: Vec, mut field_tracker: FieldTracker, -) -> (FieldTracker, HashMap) { - +) -> (FieldTracker, HashMap) { fn compute_jump(lengths: &Vec, indexes: &[usize]) -> usize { let mut jump = 0; let mut full_length = lengths.iter().fold(1, |p, c| p * (*c)); @@ -55,31 +56,34 @@ fn build_template_instances( cmp_to_type.insert(name, xtype); } circuit.wasm_producer.message_list.push(msg); - circuit.c_producer.has_parallelism |= template.is_parallel || template.is_parallel_component; + circuit.c_producer.has_parallelism |= + template.is_parallel || template.is_parallel_component; let mut component_to_parallel: HashMap = HashMap::new(); - for trigger in &template.triggers{ - match component_to_parallel.get_mut(&trigger.component_name){ + for trigger in &template.triggers { + match component_to_parallel.get_mut(&trigger.component_name) { Some(parallel_info) => { - parallel_info.positions_to_parallel.insert(trigger.indexed_with.clone(), trigger.is_parallel); - if parallel_info.uniform_parallel_value.is_some(){ - if parallel_info.uniform_parallel_value.unwrap() != trigger.is_parallel{ + parallel_info + .positions_to_parallel + .insert(trigger.indexed_with.clone(), trigger.is_parallel); + if parallel_info.uniform_parallel_value.is_some() { + if parallel_info.uniform_parallel_value.unwrap() != trigger.is_parallel { parallel_info.uniform_parallel_value = None; } } - }, + } None => { let mut positions_to_parallel = BTreeMap::new(); - positions_to_parallel.insert(trigger.indexed_with.clone(), trigger.is_parallel); + positions_to_parallel.insert(trigger.indexed_with.clone(), trigger.is_parallel); let new_parallel_info = ParallelClusters { positions_to_parallel, uniform_parallel_value: Some(trigger.is_parallel), }; component_to_parallel.insert(trigger.component_name.clone(), new_parallel_info); - }, + } } } - + let code_info = CodeInfo { cmp_to_type, field_tracker, @@ -96,7 +100,7 @@ fn build_template_instances( fresh_cmp_id: cmp_id, components: template.components, template_database: &c_info.template_database, - string_table : string_table, + string_table: string_table, signals_to_tags: template.signals_to_tags, }; let mut template_info = TemplateCodeInfo { @@ -133,7 +137,7 @@ fn build_function_instances( c_info: &CircuitInfo, instances: Vec, mut field_tracker: FieldTracker, - mut string_table : HashMap + mut string_table: HashMap, ) -> (FieldTracker, HashMap, HashMap) { let mut function_to_arena_size = HashMap::new(); for instance in instances { @@ -160,7 +164,7 @@ fn build_function_instances( cmp_to_type: HashMap::with_capacity(0), component_to_parallel: HashMap::with_capacity(0), template_database: &c_info.template_database, - string_table : string_table, + string_table: string_table, signals_to_tags: BTreeMap::new(), }; let mut function_info = FunctionCodeInfo { @@ -184,7 +188,12 @@ fn build_function_instances( } // WASM producer builder -fn initialize_wasm_producer(vcp: &VCP, database: &TemplateDB, wat_flag:bool, version: &str) -> WASMProducer { +fn initialize_wasm_producer( + vcp: &VCP, + database: &TemplateDB, + wat_flag: bool, + version: &str, +) -> WASMProducer { use program_structure::utils::constants::UsefulConstants; let initial_node = vcp.get_main_id(); let prime = UsefulConstants::new(&vcp.prime).get_p().clone(); @@ -194,18 +203,20 @@ fn initialize_wasm_producer(vcp: &VCP, database: &TemplateDB, wat_flag:bool, ver producer.main_signal_offset = 1; producer.prime = prime.to_str_radix(10); producer.prime_str = vcp.prime.clone(); - producer.fr_memory_size = match vcp.prime.as_str(){ + producer.fr_memory_size = match vcp.prime.as_str() { "goldilocks" => 412, "bn128" => 1948, "bls12381" => 1948, "grumpkin" => 1948, "pallas" => 1948, "vesta" => 1948, - _ => unreachable!() + "secq256k1" => 1948, + _ => unreachable!(), }; //producer.fr_memory_size = 412 if goldilocks and 1948 for bn128 and bls12381 // for each created component we store three u32, for each son we store a u32 in its father - producer.size_of_component_tree = stats.all_created_components * 3 + stats.all_needed_subcomponents_indexes; + producer.size_of_component_tree = + stats.all_created_components * 3 + stats.all_needed_subcomponents_indexes; producer.total_number_of_signals = stats.all_signals + 1; producer.size_32_bit = prime.bits() / 32 + if prime.bits() % 32 != 0 { 1 } else { 0 }; producer.size_32_shift = 0; @@ -225,7 +236,8 @@ fn initialize_wasm_producer(vcp: &VCP, database: &TemplateDB, wat_flag:bool, ver producer.template_instance_list = build_template_list(vcp); producer.field_tracking.clear(); producer.wat_flag = wat_flag; - (producer.major_version, producer.minor_version, producer.patch_version) = get_number_version(version); + (producer.major_version, producer.minor_version, producer.patch_version) = + get_number_version(version); producer } @@ -239,7 +251,8 @@ fn initialize_c_producer(vcp: &VCP, database: &TemplateDB, version: &str) -> CPr producer.main_signal_offset = 1; producer.prime = prime.to_str_radix(10); producer.prime_str = vcp.prime.clone(); - producer.size_of_component_tree = stats.all_created_components * 3 + stats.all_needed_subcomponents_indexes; + producer.size_of_component_tree = + stats.all_created_components * 3 + stats.all_needed_subcomponents_indexes; producer.total_number_of_signals = stats.all_signals + 1; producer.size_32_bit = prime.bits() / 32 + if prime.bits() % 32 != 0 { 1 } else { 0 }; producer.size_32_shift = 0; @@ -254,11 +267,12 @@ fn initialize_c_producer(vcp: &VCP, database: &TemplateDB, version: &str) -> CPr producer.signals_in_witness = producer.witness_to_signal_list.len(); producer.number_of_main_inputs = vcp.templates[initial_node].number_of_inputs; producer.number_of_main_outputs = vcp.templates[initial_node].number_of_outputs; - producer.main_input_list = main_input_list(&vcp.templates[initial_node]); + producer.main_input_list = main_input_list(&vcp.templates[initial_node]); producer.io_map = build_io_map(vcp, database); producer.template_instance_list = build_template_list_parallel(vcp); producer.field_tracking.clear(); - (producer.major_version, producer.minor_version, producer.patch_version) = get_number_version(version); + (producer.major_version, producer.minor_version, producer.patch_version) = + get_number_version(version); producer } @@ -284,8 +298,8 @@ fn build_template_list(vcp: &VCP) -> TemplateList { fn build_template_list_parallel(vcp: &VCP) -> TemplateListParallel { let mut tmp_list = TemplateListParallel::new(); for instance in &vcp.templates { - tmp_list.push(InfoParallel{ - name: instance.template_header.clone(), + tmp_list.push(InfoParallel { + name: instance.template_header.clone(), is_parallel: instance.is_parallel || instance.is_parallel_component, is_not_parallel: !instance.is_parallel && instance.is_not_parallel_component, }); @@ -365,7 +379,8 @@ pub fn build_circuit(vcp: VCP, flag: CompilationFlags, version: &str) -> Circuit } let template_database = TemplateDB::build(&vcp.templates); let mut circuit = Circuit::default(); - circuit.wasm_producer = initialize_wasm_producer(&vcp, &template_database, flag.wat_flag, version); + circuit.wasm_producer = + initialize_wasm_producer(&vcp, &template_database, flag.wat_flag, version); circuit.c_producer = initialize_c_producer(&vcp, &template_database, version); let field_tracker = FieldTracker::new(); @@ -377,8 +392,13 @@ pub fn build_circuit(vcp: VCP, flag: CompilationFlags, version: &str) -> Circuit let (field_tracker, string_table) = build_template_instances(&mut circuit, &circuit_info, vcp.templates, field_tracker); - let (field_tracker, function_to_arena_size, table_string_to_usize) = - build_function_instances(&mut circuit, &circuit_info, vcp.functions, field_tracker,string_table); + let (field_tracker, function_to_arena_size, table_string_to_usize) = build_function_instances( + &mut circuit, + &circuit_info, + vcp.functions, + field_tracker, + string_table, + ); let table_usize_to_string = create_table_usize_to_string(table_string_to_usize); circuit.wasm_producer.set_string_table(table_usize_to_string.clone()); @@ -398,12 +418,12 @@ pub fn build_circuit(vcp: VCP, flag: CompilationFlags, version: &str) -> Circuit circuit } -pub fn create_table_usize_to_string( string_table : HashMap) -> Vec { +pub fn create_table_usize_to_string(string_table: HashMap) -> Vec { let size = string_table.len(); - let mut table_usize_to_string = vec![String::new(); size]; + let mut table_usize_to_string = vec![String::new(); size]; for (string, us) in string_table { table_usize_to_string[us] = string; - } + } table_usize_to_string } diff --git a/mkdocs/docs/getting-started/compilation-options.md b/mkdocs/docs/getting-started/compilation-options.md index 5280f34e0..d4668cef3 100644 --- a/mkdocs/docs/getting-started/compilation-options.md +++ b/mkdocs/docs/getting-started/compilation-options.md @@ -1,6 +1,6 @@ If we use the command ```circom --help```, we can see all the options and flags that we can use during the compilation. -```console +```console USAGE: circom [FLAGS] [OPTIONS] [--] [input] @@ -24,7 +24,7 @@ FLAGS: OPTIONS: -o, --output Path to the directory where the output will be written [default: .] -p, --prime To choose the prime number to use to generate the circuit. Receives the - name of the curve (bn128, bls12381, goldilocks, grumpkin, pallas, vesta) [default: bn128] + name of the curve (bn128, bls12381, goldilocks, grumpkin, pallas, secq256k1, vesta) [default: bn128] -l ... Adds directory to library search path --O2round Maximum number of rounds of the simplification process @@ -34,37 +34,39 @@ ARGS: In the following, we explain these options. +##### Flags and options related to the compiler's output -#####Flags and options related to the compiler's output * Flag ```--r1cs``` outputs the constraints in R1CS format. * Flag ```--sym``` outputs the witness in sym format. * Flag ```--wasm``` produces a WebAssembly program that receives the private and public inputs and generates the circuit witness. * Flag ```-c / --c``` produces a C++ program that receives the private and public inputs and generates the circuit witness. * Flag ```--wat``` compiles the circuit to wat. * Flag ```--json``` outputs the R1CS system in JSON format. -* Option ```-o / --output ``` allows to indicate the path to the directory where the output will be written. By default the path is ```.```. +* Option ```-o / --output ``` allows to indicate the path to the directory where the output will be written. By default the path is ```.```. -#####Flags and options related to the constraint generation process -* Flag ```--verbose``` shows logs with known values at compilation time during the constraint generation process. +##### Flags and options related to the constraint generation process + +* Flag ```--verbose``` shows logs with known values at compilation time during the constraint generation process. * Flag ```--inspect``` does an additional check over the R1CS system produced. (see [--inspect](../circom-language/code-quality/inspect)). * Flag ```--use_old_simplification_heuristics``` allows to use an old heuristics of the optimization algorithm. However, it is not recommended since the new heuristics has produced better results in practice. +##### Flags and options related to the R1CS optimization -#####Flags and options related to the R1CS optimization In the following, we explain the different optimizations that we can apply to the final R1CS during the constraint generation phase. * Flag ```--O0``` does not apply any kind of simplification. -* Flag ```--O1``` removes two kinds of simple constraints: a) ```signal = K```, being K is a constant in $F_p$ and b) ```signal1 = signal2```, which usually appears when linking components inputs and outputs. +* Flag ```--O1``` removes two kinds of simple constraints: a) ```signal = K```, being K is a constant in $F_p$ and b) ```signal1 = signal2```, which usually appears when linking components inputs and outputs. * Flag ```--O2``` applies Gauss elimination to remove as many linear constraints as possible. After applying the substitutions discovered by the algorithm, non-linear constraints may become linear. Thus, the Gauss elimination is applied during several rounds until no more linear constraints are discovered. -* Option ```--O2round ``` is similar to ```--O2```but it limits the maximum number of rounds applied during the optimization. In ``````, user needs to indicate the number of rounds. +* Option ```--O2round ``` is similar to ```--O2```but it limits the maximum number of rounds applied during the optimization. In ``````, user needs to indicate the number of rounds. Only one of these flags/options must be used during the compilation. -#####Other flags and options -* Option ```-p, --prime ``` allows the user indicate which prime must be used during the compilation. Currently, it admits six different primes: bn128, bls12381, goldilock, grumpkin, pallas and vesta. If not indicated, the default prime is bn128. +##### Other flags and options + +* Option ```-p, --prime ``` allows the user indicate which prime must be used during the compilation. Currently, it admits six different primes: bn128, bls12381, goldilock, grumpkin, pallas, secq256k1, and vesta. If not indicated, the default prime is bn128. * Option ```-l ``` adds the provided directory in ``````to the library search path. It is possible to add as much ```-l ``` as needed, but only one directory per option. diff --git a/program_structure/src/utils/constants.rs b/program_structure/src/utils/constants.rs index 42ae5f289..f4daa9f15 100644 --- a/program_structure/src/utils/constants.rs +++ b/program_structure/src/utils/constants.rs @@ -2,13 +2,17 @@ use num_bigint::BigInt; const P_BN128: &str = "21888242871839275222246405745257275088548364400416034343698204186575808495617"; -const P_BLS12381: &str = +const P_BLS12381: &str = "52435875175126190479447740508185965837690552500527637822603658699938581184513"; -const P_GOLDILOCKS: &str = - "18446744069414584321"; -const P_GRUMPKIN: &str = "21888242871839275222246405745257275088696311157297823662689037894645226208583"; -const P_PALLAS: &str = "28948022309329048855892746252171976963363056481941560715954676764349967630337"; -const P_VESTA : &str = "28948022309329048855892746252171976963363056481941647379679742748393362948097"; +const P_GOLDILOCKS: &str = "18446744069414584321"; +const P_GRUMPKIN: &str = + "21888242871839275222246405745257275088696311157297823662689037894645226208583"; +const P_PALLAS: &str = + "28948022309329048855892746252171976963363056481941560715954676764349967630337"; +const P_VESTA: &str = + "28948022309329048855892746252171976963363056481941647379679742748393362948097"; +const P_SECQ256K1: &str = + "115792089237316195423570985008687907853269984665640564039457584007908834671663"; //const P_STR: &str = "21888242871839275222246405745257275088548364400416034343698204186575808495617"; pub struct UsefulConstants { @@ -23,17 +27,29 @@ impl Clone for UsefulConstants { impl UsefulConstants { pub fn new(possible_prime: &String) -> UsefulConstants { - let prime_to_use = if possible_prime.eq("bn128") {P_BN128} - else if possible_prime.eq("bls12381") { P_BLS12381} - else if possible_prime.eq("goldilocks") { P_GOLDILOCKS} - else if possible_prime.eq("grumpkin") { P_GRUMPKIN} - else if possible_prime.eq("pallas") { P_PALLAS} - else if possible_prime.eq("vesta") { P_VESTA} - else {unreachable!()}; + let prime_to_use = if possible_prime.eq("bn128") { + P_BN128 + } else if possible_prime.eq("bls12381") { + P_BLS12381 + } else if possible_prime.eq("goldilocks") { + P_GOLDILOCKS + } else if possible_prime.eq("grumpkin") { + P_GRUMPKIN + } else if possible_prime.eq("pallas") { + P_PALLAS + } else if possible_prime.eq("vesta") { + P_VESTA + } else if possible_prime.eq("secq256k1") { + P_SECQ256K1 + } else { + unreachable!() + }; - UsefulConstants { p: BigInt::parse_bytes(prime_to_use.as_bytes(), 10).expect("can not parse p") } + UsefulConstants { + p: BigInt::parse_bytes(prime_to_use.as_bytes(), 10).expect("can not parse p"), + } } - + pub fn get_p(&self) -> &BigInt { &self.p }