From df867e4348f83e8d5115bd18572c9735f2f4819d Mon Sep 17 00:00:00 2001 From: codygunton Date: Tue, 2 May 2023 14:21:28 +0000 Subject: [PATCH] Generalize scalar multiplication to Grumpkin Copy and paste scalar_multiplication. reduce_buckets_simple compiles, fails expectations Paste-n-go starter code from old Lagrange work Set up test. Baby Grumpkin trancript io. Fix pathing Quick and dirt SRS (no checksum) Add notes on transcript format. Hide srs processor for now. Progress: executable to generate SRS; test passes Add error handling to script Add todo Progress: match endianness of BN txpt Yay reduce_buckets_simple also passes reduce_buckets also passes! try to docker Add todo All tests pass with size 2^20 SRS Add disabled test. Make bin runnable from build/ Bump SRS size to see green check Start: move scalar muls up a level Templatize: everything builds and links Add TODO(#473)'s Prep to templatize test Templatize; test next up. Template test suite. Update srs gen scripts. Fix WASM build (?) Cleanup --- cpp/.clangd | 2 - .../Dockerfile.x86_64-linux-clang-assert | 1 + cpp/src/CMakeLists.txt | 1 + .../benchmark/pippenger_bench/main.cpp | 7 +- .../dsl/acir_format/ecdsa_secp256k1.test.cpp | 11 +- .../dsl/acir_proofs/acir_proofs.cpp | 4 +- .../barretenberg/ecc/curves/bn254/bn254.hpp | 19 + .../bn254/scalar_multiplication/c_bind.cpp | 13 +- .../bn254/scalar_multiplication/pippenger.cpp | 44 -- .../scalar_multiplication.hpp | 155 ----- .../ecc/curves/grumpkin/grumpkin.hpp | 16 +- .../scalar_multiplication/pippenger.cpp | 51 ++ .../scalar_multiplication/pippenger.hpp | 16 +- .../scalar_multiplication/process_buckets.cpp | 0 .../scalar_multiplication/process_buckets.hpp | 0 .../scalar_multiplication/runtime_states.cpp | 54 +- .../scalar_multiplication/runtime_states.hpp | 31 +- .../scalar_multiplication.cpp | 398 ++++++++---- .../scalar_multiplication.hpp | 272 ++++++++ .../scalar_multiplication.test.cpp | 596 +++++++++++------- .../ecc/curves/secp256k1/secp256k1.hpp | 11 + .../ecc/curves/secp256r1/secp256r1.hpp | 11 + cpp/src/barretenberg/ecc/groups/wnaf.hpp | 2 +- .../grumpkin_srs_gen/CMakeLists.txt | 11 + .../grumpkin_srs_gen/grumpkin_srs_gen.cpp | 37 ++ .../barretenberg/honk/pcs/commitment_key.hpp | 17 +- cpp/src/barretenberg/honk/pcs/ipa/ipa.hpp | 23 +- .../honk/proof_system/ultra_verifier.cpp | 3 +- .../honk/proof_system/verifier.cpp | 1 - .../proofs/join_split/c_bind.cpp | 2 +- .../plonk/composer/composer_base.cpp | 9 +- .../turbo_plonk_composer_helper.cpp | 1 - .../plonk/composer/standard_composer.cpp | 1 - .../plonk/composer/turbo_composer.cpp | 1 - .../plonk/composer/ultra_composer.cpp | 1 - .../plonk/proof_system/prover/prover.cpp | 2 +- .../proof_system/proving_key/proving_key.hpp | 9 +- .../plonk/proof_system/verifier/verifier.cpp | 9 +- .../proof_system/verifier/verifier.test.cpp | 12 +- .../permutation_widget_impl.hpp | 4 +- .../random_widgets/plookup_widget_impl.hpp | 2 +- .../polynomials/polynomials.bench.cpp | 49 +- .../turbo_circuit_constructor.cpp | 1 - .../proof_system/work_queue/work_queue.cpp | 7 +- cpp/src/barretenberg/srs/CMakeLists.txt | 2 +- cpp/src/barretenberg/srs/io.cpp | 334 ---------- cpp/src/barretenberg/srs/io.hpp | 396 +++++++++++- cpp/src/barretenberg/srs/io.test.cpp | 2 +- .../reference_string/env_reference_string.hpp | 5 +- .../file_reference_string.cpp | 2 +- .../file_reference_string.hpp | 5 +- .../reference_string/mem_reference_string.cpp | 2 +- .../reference_string/mem_reference_string.hpp | 2 - .../pippenger_reference_string.hpp | 12 +- cpp/srs_db/grumpkin/monomial/README.md | 16 + 55 files changed, 1616 insertions(+), 1079 deletions(-) create mode 100644 cpp/src/barretenberg/ecc/curves/bn254/bn254.hpp delete mode 100644 cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/pippenger.cpp delete mode 100644 cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.hpp create mode 100644 cpp/src/barretenberg/ecc/curves/scalar_multiplication/pippenger.cpp rename cpp/src/barretenberg/ecc/curves/{bn254 => }/scalar_multiplication/pippenger.hpp (72%) rename cpp/src/barretenberg/ecc/curves/{bn254 => }/scalar_multiplication/process_buckets.cpp (100%) rename cpp/src/barretenberg/ecc/curves/{bn254 => }/scalar_multiplication/process_buckets.hpp (100%) rename cpp/src/barretenberg/ecc/curves/{bn254 => }/scalar_multiplication/runtime_states.cpp (76%) rename cpp/src/barretenberg/ecc/curves/{bn254 => }/scalar_multiplication/runtime_states.hpp (65%) rename cpp/src/barretenberg/ecc/curves/{bn254 => }/scalar_multiplication/scalar_multiplication.cpp (69%) create mode 100644 cpp/src/barretenberg/ecc/curves/scalar_multiplication/scalar_multiplication.hpp rename cpp/src/barretenberg/ecc/curves/{bn254 => }/scalar_multiplication/scalar_multiplication.test.cpp (54%) create mode 100644 cpp/src/barretenberg/grumpkin_srs_gen/CMakeLists.txt create mode 100644 cpp/src/barretenberg/grumpkin_srs_gen/grumpkin_srs_gen.cpp delete mode 100644 cpp/src/barretenberg/srs/io.cpp create mode 100644 cpp/srs_db/grumpkin/monomial/README.md diff --git a/cpp/.clangd b/cpp/.clangd index 599f23163a..06f5d0d059 100644 --- a/cpp/.clangd +++ b/cpp/.clangd @@ -59,8 +59,6 @@ Diagnostics: - readability-function-cognitive-complexity # It is often nicer to not be explicit - google-explicit-constructor - CheckOptions: - - cppcoreguidelines-special-member-functions.AllowSoleDefaultDtor: True --- # this divider is necessary # Disable some checks for Google Test/Bench diff --git a/cpp/dockerfiles/Dockerfile.x86_64-linux-clang-assert b/cpp/dockerfiles/Dockerfile.x86_64-linux-clang-assert index d03fa89cdf..fa457af236 100644 --- a/cpp/dockerfiles/Dockerfile.x86_64-linux-clang-assert +++ b/cpp/dockerfiles/Dockerfile.x86_64-linux-clang-assert @@ -16,6 +16,7 @@ WORKDIR /usr/src/barretenberg/cpp COPY . . # Build everything to ensure everything builds. All tests will be run from the result of this build. RUN cmake --preset default -DCMAKE_BUILD_TYPE=RelWithAssert -DCI=ON && cmake --build --preset default +RUN cd build && ./bin/grumpkin_srs_gen 1048576 && cd ../ FROM alpine:3.17 RUN apk update && apk add curl openmp diff --git a/cpp/src/CMakeLists.txt b/cpp/src/CMakeLists.txt index 7f12367c67..e1fb6b4314 100644 --- a/cpp/src/CMakeLists.txt +++ b/cpp/src/CMakeLists.txt @@ -54,6 +54,7 @@ add_subdirectory(barretenberg/join_split_example) add_subdirectory(barretenberg/dsl) add_subdirectory(barretenberg/serialize) add_subdirectory(barretenberg/solidity_helpers) +add_subdirectory(barretenberg/grumpkin_srs_gen) if(BENCHMARKS) add_subdirectory(barretenberg/benchmark) diff --git a/cpp/src/barretenberg/benchmark/pippenger_bench/main.cpp b/cpp/src/barretenberg/benchmark/pippenger_bench/main.cpp index 40f2b15284..72325f78b3 100644 --- a/cpp/src/barretenberg/benchmark/pippenger_bench/main.cpp +++ b/cpp/src/barretenberg/benchmark/pippenger_bench/main.cpp @@ -1,7 +1,8 @@ #include #include "barretenberg/common/assert.hpp" #include -#include "barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.hpp" +#include "barretenberg/ecc/curves/bn254/bn254.hpp" +#include "barretenberg/ecc/curves/scalar_multiplication/scalar_multiplication.hpp" #include "barretenberg/srs/reference_string/file_reference_string.hpp" #include "barretenberg/polynomials/polynomial_arithmetic.hpp" @@ -63,9 +64,9 @@ const auto init = []() { int pippenger() { - scalar_multiplication::pippenger_runtime_state state(NUM_POINTS); + scalar_multiplication::pippenger_runtime_state state(NUM_POINTS); std::chrono::steady_clock::time_point time_start = std::chrono::steady_clock::now(); - g1::element result = scalar_multiplication::pippenger_unsafe( + g1::element result = scalar_multiplication::pippenger_unsafe( &scalars[0], reference_string->get_monomial_points(), NUM_POINTS, state); std::chrono::steady_clock::time_point time_end = std::chrono::steady_clock::now(); std::chrono::microseconds diff = std::chrono::duration_cast(time_end - time_start); diff --git a/cpp/src/barretenberg/dsl/acir_format/ecdsa_secp256k1.test.cpp b/cpp/src/barretenberg/dsl/acir_format/ecdsa_secp256k1.test.cpp index b8d8574440..55950cfcff 100644 --- a/cpp/src/barretenberg/dsl/acir_format/ecdsa_secp256k1.test.cpp +++ b/cpp/src/barretenberg/dsl/acir_format/ecdsa_secp256k1.test.cpp @@ -7,7 +7,7 @@ #include #include -using curve = proof_system::plonk::stdlib::secp256k1; +using curve_ct = proof_system::plonk::stdlib::secp256k1; size_t generate_ecdsa_constraint(acir_format::EcdsaSecp256k1Constraint& ecdsa_constraint, std::vector& witness_values) @@ -20,12 +20,13 @@ size_t generate_ecdsa_constraint(acir_format::EcdsaSecp256k1Constraint& ecdsa_co std::copy(message_string.begin(), message_string.end(), std::back_inserter(message_buffer)); auto hashed_message = sha256::sha256(message_buffer); - crypto::ecdsa::key_pair account; - account.private_key = curve::fr::random_element(); - account.public_key = curve::g1::one * account.private_key; + crypto::ecdsa::key_pair account; + account.private_key = curve_ct::fr::random_element(); + account.public_key = curve_ct::g1::one * account.private_key; crypto::ecdsa::signature signature = - crypto::ecdsa::construct_signature(message_string, account); + crypto::ecdsa::construct_signature(message_string, + account); uint256_t pub_x_value = account.public_key.x; uint256_t pub_y_value = account.public_key.y; diff --git a/cpp/src/barretenberg/dsl/acir_proofs/acir_proofs.cpp b/cpp/src/barretenberg/dsl/acir_proofs/acir_proofs.cpp index 2010c592fb..a987a66d28 100644 --- a/cpp/src/barretenberg/dsl/acir_proofs/acir_proofs.cpp +++ b/cpp/src/barretenberg/dsl/acir_proofs/acir_proofs.cpp @@ -72,7 +72,7 @@ size_t init_verification_key(void* pippenger, uint8_t const* g2x, uint8_t const* auto proving_key = std::make_shared(std::move(pk_data), crs); auto crs_factory = std::make_unique( - reinterpret_cast(pippenger), g2x); + reinterpret_cast*>(pippenger), g2x); proving_key->reference_string = crs_factory->get_prover_crs(proving_key->circuit_size); acir_format::Composer composer(proving_key, nullptr); @@ -108,7 +108,7 @@ size_t new_proof(void* pippenger, auto witness = from_buffer>(witness_buf); auto crs_factory = std::make_unique( - reinterpret_cast(pippenger), g2x); + reinterpret_cast*>(pippenger), g2x); proving_key->reference_string = crs_factory->get_prover_crs(proving_key->circuit_size); acir_format::Composer composer(proving_key, nullptr); diff --git a/cpp/src/barretenberg/ecc/curves/bn254/bn254.hpp b/cpp/src/barretenberg/ecc/curves/bn254/bn254.hpp new file mode 100644 index 0000000000..5ecc714f44 --- /dev/null +++ b/cpp/src/barretenberg/ecc/curves/bn254/bn254.hpp @@ -0,0 +1,19 @@ +#pragma once +#include "../bn254/fr.hpp" +#include "../bn254/fq.hpp" +#include "../bn254/fq2.hpp" +#include "../bn254/g1.hpp" +#include "../bn254/g2.hpp" + +namespace curve { +class BN254 { + public: + using ScalarField = barretenberg::fr; + using BaseField = barretenberg::fq; + using Group = typename barretenberg::g1; + using Element = typename Group::element; + using AffineElement = typename Group::affine_element; + using G2AffineElement = typename barretenberg::g2::affine_element; + using G2BaseField = typename barretenberg::fq2; +}; +} // namespace curve \ No newline at end of file diff --git a/cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/c_bind.cpp b/cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/c_bind.cpp index 66803cb5d5..74275eb514 100644 --- a/cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/c_bind.cpp +++ b/cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/c_bind.cpp @@ -1,5 +1,6 @@ -#include "scalar_multiplication.hpp" -#include "pippenger.hpp" +#include "../bn254.hpp" +#include "../../scalar_multiplication/scalar_multiplication.hpp" +#include "../../scalar_multiplication/pippenger.hpp" #include "barretenberg/common/mem.hpp" using namespace barretenberg; @@ -21,19 +22,19 @@ WASM_EXPORT void bbfree(void* ptr) WASM_EXPORT void* new_pippenger(uint8_t* points, size_t num_points) { - auto ptr = new scalar_multiplication::Pippenger(points, num_points); + auto ptr = new scalar_multiplication::Pippenger(points, num_points); return ptr; } WASM_EXPORT void delete_pippenger(void* pippenger) { - delete reinterpret_cast(pippenger); + delete reinterpret_cast*>(pippenger); } WASM_EXPORT void pippenger_unsafe(void* pippenger_ptr, void* scalars_ptr, size_t from, size_t range, void* result_ptr) { - scalar_multiplication::pippenger_runtime_state state(range); - auto pippenger = reinterpret_cast(pippenger_ptr); + scalar_multiplication::pippenger_runtime_state state(range); + auto pippenger = reinterpret_cast*>(pippenger_ptr); auto scalars = reinterpret_cast(scalars_ptr); auto result = reinterpret_cast(result_ptr); *result = pippenger->pippenger_unsafe(scalars, from, range); diff --git a/cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/pippenger.cpp b/cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/pippenger.cpp deleted file mode 100644 index cb8f93a6c3..0000000000 --- a/cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/pippenger.cpp +++ /dev/null @@ -1,44 +0,0 @@ -#include "pippenger.hpp" -#include "barretenberg/srs/io.hpp" -namespace barretenberg { -namespace scalar_multiplication { - -Pippenger::Pippenger(g1::affine_element* points, size_t num_points) - : monomials_(points) - , num_points_(num_points) -{ - io::byteswap(&monomials_[0], num_points * 64); - scalar_multiplication::generate_pippenger_point_table(monomials_, monomials_, num_points); -} - -Pippenger::Pippenger(uint8_t const* points, size_t num_points) - : num_points_(num_points) -{ - monomials_ = point_table_alloc(num_points); - - barretenberg::io::read_g1_elements_from_buffer(&monomials_[0], (char*)points, num_points * 64); - barretenberg::scalar_multiplication::generate_pippenger_point_table(monomials_, monomials_, num_points); -} - -Pippenger::Pippenger(std::string const& path, size_t num_points) - : num_points_(num_points) -{ - monomials_ = point_table_alloc(num_points); - - barretenberg::io::read_transcript_g1(monomials_, num_points, path); - barretenberg::scalar_multiplication::generate_pippenger_point_table(monomials_, monomials_, num_points); -} - -g1::element Pippenger::pippenger_unsafe(fr* scalars, size_t from, size_t range) -{ - scalar_multiplication::pippenger_runtime_state state(range); - return scalar_multiplication::pippenger_unsafe(scalars, monomials_ + from * 2, range, state); -} - -Pippenger::~Pippenger() -{ - free(monomials_); -} - -} // namespace scalar_multiplication -} // namespace barretenberg diff --git a/cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.hpp b/cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.hpp deleted file mode 100644 index 36613a47a9..0000000000 --- a/cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.hpp +++ /dev/null @@ -1,155 +0,0 @@ -#pragma once - -#include "../fr.hpp" -#include "../g1.hpp" -#include "./runtime_states.hpp" -#include -#include - -namespace barretenberg { -namespace scalar_multiplication { - -constexpr size_t get_num_buckets(const size_t num_points) -{ - const size_t bits_per_bucket = get_optimal_bucket_width(num_points / 2); - return 1UL << bits_per_bucket; -} - -/** - * pointers that describe how to add points into buckets, for the pippenger algorithm. - * `wnaf_table` is an unrolled two-dimensional array, with each inner array being of size `n`, - * where `n` is the number of points being multiplied. The second dimension size is defined by - * the number of pippenger rounds (fixed for a given `n`, see `get_num_rounds`) - * - * An entry of `wnaf_table` contains the following three pieces of information: - * 1: the point index that we're working on. This is stored in the high 32 bits - * 2: the bucket index that we're adding the point into. This is stored in the low 31 bits - * 3: the sign of the point we're adding (i.e. do we actually need to subtract). This is stored in the 32nd bit. - * - * We pack this information into a 64 bit unsigned integer, so that we can more efficiently sort our wnaf entries. - * For a given round, we want to sort our wnaf entries in increasing bucket index order. - * - * This is so that we can efficiently use multiple threads to execute the pippenger algorithm. - * For a given round, a given point's bucket index will be uniformly randomly distributed, - * assuming the inputs are from a zero-knowledge proof. This is because the scalar multiplier will be uniformly randomly - *distributed, and the bucket indices are derived from the scalar multiplier. - * - * This means that, if we were to iterate over all of our points in order, and add each point into its associated - *bucket, we would be accessing all of our buckets in a completely random pattern. - * - * Aside from memory latency problems this incurs, this makes the naive algorithm unsuitable for multithreading - we - *cannot assign a thread a tranche of points, because each thread will be adding points into the same set of buckets, - *triggering race conditions. We do not want to manage the overhead of thread locks for each bucket; the process of - *adding a point into a bucket takes, on average, only 400 CPU cycles, so the slowdown of managing mutex locks would add - *considerable overhead. - * - * The solution is to sort the buckets. If the buckets are sorted, we can assign a tranche of buckets to individual - *threads, safe in the knowledge that there will be no race conditions, with one condition. A thread's starting bucket - *may be equal to the previous thread's end bucket, so we need to ensure that each thread works on a local array of - *buckets. This adds little overhead (for 2^20 points, we have 32,768 buckets. With 8 threads, the amount of bucket - *overlap is ~16 buckets, so we could incur 16 extra 'additions' in pippenger's bucket concatenation phase, but this is - *an insignificant contribution). - * - * The alternative approach (the one we used to use) is to slice up all of the points being multiplied amongst all - *available threads, and run the complete pippenger algorithm for each thread. This is suboptimal, because the - *complexity of pippenger is O(n / logn) point additions, and a sequence of smaller pippenger calls will have a smaller - *`n`. - * - * This is the motivation for multi-threading the actual Pippenger algorithm. In addition, the above approach performs - *extremely poorly for GPUs, where the number of threads can be as high as 2^10 (for a multi-scalar-multiplication of - *2^20 points, this doubles the number of pippenger rounds per thread) - * - * To give concrete numbers, the difference between calling pippenger on 2^20 points, and calling pippenger 8 times on - *2^17 points, is 5-10%. Which means that, for 8 threads, we need to ensure that our sorting algorithm adds less than 5% - *to the total runtime of pippenger. Given a single cache miss per point would increase the run-time by 25%, this is not - *much room to work with! - * - * However, a radix sort, combined with the fact that the total number of buckets is quite small (2^16 at most), seems - *to be fast enough. Benchmarks indicate (i7-8650U, 8 threads) that, for 2^20 points, the total runtime is <1200ms and - *of that, the radix sort consumes 58ms (4.8%) - * - * One advantage of sorting by bucket order vs point order, is that a 'bucket' is 96 bytes large (sizeof(g1::element), - *buckets have z-coordinates). Points, on the other hand, are 64 bytes large (affine points, no z-coordinate). This - *makes fetching random point locations in memory more efficient than fetching random bucket locations, as each point - *occupies a single cache line. Using __builtin_prefetch to recover the point just before it's needed, seems to improve - *the runtime of pippenger by 10-20%. - * - * Finally, `skew_table` tracks whether a scalar multplier is even or odd - * (if it's even, we need to subtract the point from the total result, - * because our windowed non-adjacent form values can only be odd) - * - **/ - -struct multiplication_thread_state { - g1::element* buckets; - const uint64_t* point_schedule; -}; - -void compute_wnaf_states(uint64_t* point_schedule, - bool* input_skew_table, - uint64_t* round_counts, - const fr* scalars, - const size_t num_initial_points); - -void generate_pippenger_point_table(g1::affine_element* points, g1::affine_element* table, size_t num_points); - -void organize_buckets(uint64_t* point_schedule, const uint64_t* round_counts, const size_t num_points); - -inline void count_bits(uint32_t* bucket_counts, - uint32_t* bit_offsets, - const uint32_t num_buckets, - const size_t num_bits) -{ - for (size_t i = 0; i < num_buckets; ++i) { - const uint32_t count = bucket_counts[i]; - for (uint32_t j = 0; j < num_bits; ++j) { - bit_offsets[j + 1] += (count & (1U << j)); - } - } - bit_offsets[0] = 0; - for (size_t i = 2; i < num_bits + 1; ++i) { - bit_offsets[i] += bit_offsets[i - 1]; - } -} - -uint32_t construct_addition_chains(affine_product_runtime_state& state, bool empty_bucket_counts = true); - -void add_affine_points(g1::affine_element* points, const size_t num_points, fq* scratch_space); -void add_affine_points_with_edge_cases(g1::affine_element* points, const size_t num_points, fq* scratch_space); - -void evaluate_addition_chains(affine_product_runtime_state& state, - const size_t max_bucket_bits, - bool handle_edge_cases); - -g1::element pippenger_internal(g1::affine_element* points, - fr* scalars, - const size_t num_initial_points, - pippenger_runtime_state& state, - bool handle_edge_cases); - -g1::element evaluate_pippenger_rounds(pippenger_runtime_state& state, - g1::affine_element* points, - const size_t num_points, - bool handle_edge_cases = false); - -g1::affine_element* reduce_buckets(affine_product_runtime_state& state, - bool first_round = true, - bool handle_edge_cases = false); - -g1::element pippenger(fr* scalars, - g1::affine_element* points, - const size_t num_points, - pippenger_runtime_state& state, - bool handle_edge_cases = true); - -g1::element pippenger_unsafe(fr* scalars, - g1::affine_element* points, - const size_t num_initial_points, - pippenger_runtime_state& state); -g1::element pippenger_without_endomorphism_basis_points(fr* scalars, - g1::affine_element* points, - const size_t num_initial_points, - pippenger_runtime_state& state); - -} // namespace scalar_multiplication -} // namespace barretenberg diff --git a/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.hpp b/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.hpp index 918969d6ba..cd575a16d6 100644 --- a/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.hpp +++ b/cpp/src/barretenberg/ecc/curves/grumpkin/grumpkin.hpp @@ -16,7 +16,7 @@ struct GrumpkinG1Params { static constexpr bool can_hash_to_curve = true; static constexpr bool small_elements = true; static constexpr bool has_a = false; - // have checked in grumpkin.test_b that b is Montgomery form of -17 + // have checked in grumpkin.test_b that b is Montgomery form of -17 static constexpr barretenberg::fr b{ 0xdd7056026000005a, 0x223fa97acb319311, 0xcc388229877910c0, 0x34394632b724eaa }; @@ -31,4 +31,16 @@ struct GrumpkinG1Params { typedef barretenberg::group g1; g1::affine_element get_generator(const size_t generator_index); -} // namespace grumpkin \ No newline at end of file + +}; // namespace grumpkin + +namespace curve { +class Grumpkin { + public: + using ScalarField = barretenberg::fq; + using BaseField = barretenberg::fr; + using Group = typename grumpkin::g1; + using Element = typename Group::element; + using AffineElement = typename Group::affine_element; +}; +} // namespace curve \ No newline at end of file diff --git a/cpp/src/barretenberg/ecc/curves/scalar_multiplication/pippenger.cpp b/cpp/src/barretenberg/ecc/curves/scalar_multiplication/pippenger.cpp new file mode 100644 index 0000000000..a32df73e92 --- /dev/null +++ b/cpp/src/barretenberg/ecc/curves/scalar_multiplication/pippenger.cpp @@ -0,0 +1,51 @@ +#include "pippenger.hpp" +#include "barretenberg/srs/io.hpp" +namespace barretenberg { +namespace scalar_multiplication { + +template +Pippenger::Pippenger(typename Curve::AffineElement* points, size_t num_points) + : monomials_(points) + , num_points_(num_points) +{ + srs::IO::byteswap(&monomials_[0], num_points * 64); + scalar_multiplication::generate_pippenger_point_table(monomials_, monomials_, num_points); +} + +template +Pippenger::Pippenger(uint8_t const* points, size_t num_points) + : num_points_(num_points) +{ + monomials_ = point_table_alloc(num_points); + + srs::IO::read_affine_elements_from_buffer(&monomials_[0], (char*)points, num_points * 64); + scalar_multiplication::generate_pippenger_point_table(monomials_, monomials_, num_points); +} + +template +Pippenger::Pippenger(std::string const& path, size_t num_points) + : num_points_(num_points) +{ + monomials_ = point_table_alloc(num_points); + + srs::IO::read_transcript_g1(monomials_, num_points, path); + scalar_multiplication::generate_pippenger_point_table(monomials_, monomials_, num_points); +} + +template +typename Curve::Element Pippenger::pippenger_unsafe(ScalarField* scalars, size_t from, size_t range) +{ + scalar_multiplication::pippenger_runtime_state state(range); + return scalar_multiplication::pippenger_unsafe(scalars, monomials_ + from * 2, range, state); +} + +template Pippenger::~Pippenger() +{ + free(monomials_); +} + +template class Pippenger; +template class Pippenger; + +} // namespace scalar_multiplication +} // namespace barretenberg diff --git a/cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/pippenger.hpp b/cpp/src/barretenberg/ecc/curves/scalar_multiplication/pippenger.hpp similarity index 72% rename from cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/pippenger.hpp rename to cpp/src/barretenberg/ecc/curves/scalar_multiplication/pippenger.hpp index 48a2c133f6..1dc2132851 100644 --- a/cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/pippenger.hpp +++ b/cpp/src/barretenberg/ecc/curves/scalar_multiplication/pippenger.hpp @@ -32,14 +32,17 @@ template inline T* point_table_alloc(size_t num_points) return (T*)aligned_alloc(64, point_table_buf_size(num_points)); } -class Pippenger { +template class Pippenger { public: + using ScalarField = typename Curve::ScalarField; + using Element = typename Curve::Element; + using AffineElement = typename Curve::AffineElement; /** * Expects points to be buffer of size as per point_table_size(). * It expects the crs to start at points[1], and it fills in affine_one at points[0]. * The crs undergoes a byteswap, and then the point table is generated. */ - Pippenger(g1::affine_element* points, size_t num_points); + Pippenger(AffineElement* points, size_t num_points); Pippenger(uint8_t const* points, size_t num_points); @@ -47,16 +50,19 @@ class Pippenger { ~Pippenger(); - g1::element pippenger_unsafe(fr* scalars, size_t from, size_t range); + Element pippenger_unsafe(ScalarField* scalars, size_t from, size_t range); - g1::affine_element* get_point_table() const { return monomials_; } + AffineElement* get_point_table() const { return monomials_; } size_t get_num_points() const { return num_points_; } private: - g1::affine_element* monomials_; + AffineElement* monomials_; size_t num_points_; }; +extern template class Pippenger; +extern template class Pippenger; + } // namespace scalar_multiplication } // namespace barretenberg diff --git a/cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/process_buckets.cpp b/cpp/src/barretenberg/ecc/curves/scalar_multiplication/process_buckets.cpp similarity index 100% rename from cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/process_buckets.cpp rename to cpp/src/barretenberg/ecc/curves/scalar_multiplication/process_buckets.cpp diff --git a/cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/process_buckets.hpp b/cpp/src/barretenberg/ecc/curves/scalar_multiplication/process_buckets.hpp similarity index 100% rename from cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/process_buckets.hpp rename to cpp/src/barretenberg/ecc/curves/scalar_multiplication/process_buckets.hpp diff --git a/cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/runtime_states.cpp b/cpp/src/barretenberg/ecc/curves/scalar_multiplication/runtime_states.cpp similarity index 76% rename from cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/runtime_states.cpp rename to cpp/src/barretenberg/ecc/curves/scalar_multiplication/runtime_states.cpp index 6e8aa5ccd8..9e0ba1fc03 100644 --- a/cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/runtime_states.cpp +++ b/cpp/src/barretenberg/ecc/curves/scalar_multiplication/runtime_states.cpp @@ -11,13 +11,16 @@ namespace barretenberg { namespace scalar_multiplication { -pippenger_runtime_state::pippenger_runtime_state(const size_t num_initial_points) +template pippenger_runtime_state::pippenger_runtime_state(const size_t num_initial_points) { + using Fq = typename Curve::BaseField; + using AffineElement = typename Curve::AffineElement; + constexpr size_t MAX_NUM_ROUNDS = 256; num_points = num_initial_points * 2; const size_t num_points_floor = static_cast(1ULL << (numeric::get_msb(num_points))); const size_t num_buckets = static_cast( - 1U << barretenberg::scalar_multiplication::get_optimal_bucket_width(static_cast(num_initial_points))); + 1U << scalar_multiplication::get_optimal_bucket_width(static_cast(num_initial_points))); #ifndef NO_MULTITHREADING const size_t num_threads = max_threads::compute_num_threads(); #else @@ -25,15 +28,15 @@ pippenger_runtime_state::pippenger_runtime_state(const size_t num_initial_points #endif const size_t prefetch_overflow = 16 * num_threads; const size_t num_rounds = - static_cast(barretenberg::scalar_multiplication::get_num_rounds(static_cast(num_points_floor))); + static_cast(scalar_multiplication::get_num_rounds(static_cast(num_points_floor))); point_schedule = (uint64_t*)(aligned_alloc( 64, (static_cast(num_points) * num_rounds + prefetch_overflow) * sizeof(uint64_t))); skew_table = (bool*)(aligned_alloc(64, pad(static_cast(num_points) * sizeof(bool), 64))); - point_pairs_1 = (g1::affine_element*)(aligned_alloc( - 64, (static_cast(num_points) * 2 + (num_threads * 16)) * sizeof(g1::affine_element))); - point_pairs_2 = (g1::affine_element*)(aligned_alloc( - 64, (static_cast(num_points) * 2 + (num_threads * 16)) * sizeof(g1::affine_element))); - scratch_space = (fq*)(aligned_alloc(64, static_cast(num_points) * sizeof(g1::affine_element))); + point_pairs_1 = (AffineElement*)(aligned_alloc( + 64, (static_cast(num_points) * 2 + (num_threads * 16)) * sizeof(AffineElement))); + point_pairs_2 = (AffineElement*)(aligned_alloc( + 64, (static_cast(num_points) * 2 + (num_threads * 16)) * sizeof(AffineElement))); + scratch_space = (Fq*)(aligned_alloc(64, static_cast(num_points) * sizeof(AffineElement))); bucket_counts = (uint32_t*)(aligned_alloc(64, num_threads * num_buckets * sizeof(uint32_t))); bit_counts = (uint32_t*)(aligned_alloc(64, num_threads * num_buckets * sizeof(uint32_t))); bucket_empty_status = (bool*)(aligned_alloc(64, num_threads * num_buckets * sizeof(bool))); @@ -45,13 +48,9 @@ pippenger_runtime_state::pippenger_runtime_state(const size_t num_initial_points #endif for (size_t i = 0; i < num_threads; ++i) { const size_t thread_offset = i * points_per_thread; - memset((void*)(point_pairs_1 + thread_offset + (i * 16)), - 0, - (points_per_thread + 16) * sizeof(g1::affine_element)); - memset((void*)(point_pairs_2 + thread_offset + (i * 16)), - 0, - (points_per_thread + 16) * sizeof(g1::affine_element)); - memset((void*)(scratch_space + thread_offset), 0, (points_per_thread) * sizeof(fq)); + memset((void*)(point_pairs_1 + thread_offset + (i * 16)), 0, (points_per_thread + 16) * sizeof(AffineElement)); + memset((void*)(point_pairs_2 + thread_offset + (i * 16)), 0, (points_per_thread + 16) * sizeof(AffineElement)); + memset((void*)(scratch_space + thread_offset), 0, (points_per_thread) * sizeof(Fq)); for (size_t j = 0; j < num_rounds; ++j) { const size_t round_offset = (j * static_cast(num_points)); memset((void*)(point_schedule + round_offset + thread_offset), 0, points_per_thread * sizeof(uint64_t)); @@ -65,7 +64,7 @@ pippenger_runtime_state::pippenger_runtime_state(const size_t num_initial_points memset((void*)round_counts, 0, MAX_NUM_ROUNDS * sizeof(uint64_t)); } -pippenger_runtime_state::pippenger_runtime_state(pippenger_runtime_state&& other) +template pippenger_runtime_state::pippenger_runtime_state(pippenger_runtime_state&& other) { point_schedule = other.point_schedule; skew_table = other.skew_table; @@ -90,7 +89,8 @@ pippenger_runtime_state::pippenger_runtime_state(pippenger_runtime_state&& other num_points = other.num_points; } -pippenger_runtime_state& pippenger_runtime_state::operator=(pippenger_runtime_state&& other) +template +pippenger_runtime_state& pippenger_runtime_state::operator=(pippenger_runtime_state&& other) { if (point_schedule) { aligned_free(point_schedule); @@ -152,14 +152,15 @@ pippenger_runtime_state& pippenger_runtime_state::operator=(pippenger_runtime_st return *this; } -affine_product_runtime_state pippenger_runtime_state::get_affine_product_runtime_state(const size_t num_threads, - const size_t thread_index) +template +affine_product_runtime_state pippenger_runtime_state::get_affine_product_runtime_state( + const size_t num_threads, const size_t thread_index) { const size_t points_per_thread = static_cast(num_points / num_threads); - const size_t num_buckets = static_cast( - 1U << barretenberg::scalar_multiplication::get_optimal_bucket_width(static_cast(num_points) / 2)); + const size_t num_buckets = + static_cast(1U << scalar_multiplication::get_optimal_bucket_width(static_cast(num_points) / 2)); - scalar_multiplication::affine_product_runtime_state product_state; + scalar_multiplication::affine_product_runtime_state product_state; product_state.point_pairs_1 = point_pairs_1 + (thread_index * points_per_thread) + (thread_index * 16); product_state.point_pairs_2 = point_pairs_2 + (thread_index * points_per_thread) + (thread_index * 16); @@ -170,7 +171,7 @@ affine_product_runtime_state pippenger_runtime_state::get_affine_product_runtime return product_state; } -pippenger_runtime_state::~pippenger_runtime_state() +template pippenger_runtime_state::~pippenger_runtime_state() { if (point_schedule) { aligned_free(point_schedule); @@ -208,5 +209,10 @@ pippenger_runtime_state::~pippenger_runtime_state() aligned_free(round_counts); } } + +template struct affine_product_runtime_state; +template struct affine_product_runtime_state; +template struct pippenger_runtime_state; +template struct pippenger_runtime_state; } // namespace scalar_multiplication -} // namespace barretenberg +} // namespace barretenberg \ No newline at end of file diff --git a/cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/runtime_states.hpp b/cpp/src/barretenberg/ecc/curves/scalar_multiplication/runtime_states.hpp similarity index 65% rename from cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/runtime_states.hpp rename to cpp/src/barretenberg/ecc/curves/scalar_multiplication/runtime_states.hpp index 14c62eb089..b0102485f4 100644 --- a/cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/runtime_states.hpp +++ b/cpp/src/barretenberg/ecc/curves/scalar_multiplication/runtime_states.hpp @@ -1,6 +1,9 @@ #pragma once -#include "../g1.hpp" +// #include "../g1.hpp" +#include "../grumpkin/grumpkin.hpp" +#include "../bn254/bn254.hpp" +#include "barretenberg/ecc/groups/wnaf.hpp" namespace barretenberg { // simple helper functions to retrieve pointers to pre-allocated memory for the scalar multiplication algorithm. @@ -62,11 +65,11 @@ constexpr size_t get_num_rounds(const size_t num_points) return WNAF_SIZE(bits_per_bucket + 1); } -struct affine_product_runtime_state { - g1::affine_element* points; - g1::affine_element* point_pairs_1; - g1::affine_element* point_pairs_2; - fq* scratch_space; +template struct affine_product_runtime_state { + typename Curve::AffineElement* points; + typename Curve::AffineElement* point_pairs_1; + typename Curve::AffineElement* point_pairs_2; + typename Curve::BaseField* scratch_space; uint32_t* bucket_counts; uint32_t* bit_offsets; uint64_t* point_schedule; @@ -75,12 +78,12 @@ struct affine_product_runtime_state { bool* bucket_empty_status; }; -struct pippenger_runtime_state { +template struct pippenger_runtime_state { uint64_t* point_schedule; bool* skew_table; - g1::affine_element* point_pairs_1; - g1::affine_element* point_pairs_2; - fq* scratch_space; + typename Curve::AffineElement* point_pairs_1; + typename Curve::AffineElement* point_pairs_2; + typename Curve::BaseField* scratch_space; uint32_t* bucket_counts; uint32_t* bit_counts; bool* bucket_empty_status; @@ -92,7 +95,13 @@ struct pippenger_runtime_state { pippenger_runtime_state& operator=(pippenger_runtime_state&& other); ~pippenger_runtime_state(); - affine_product_runtime_state get_affine_product_runtime_state(const size_t num_threads, const size_t thread_index); + affine_product_runtime_state get_affine_product_runtime_state(const size_t num_threads, + const size_t thread_index); }; + +extern template struct affine_product_runtime_state; +extern template struct affine_product_runtime_state; +extern template struct pippenger_runtime_state; +extern template struct pippenger_runtime_state; } // namespace scalar_multiplication } // namespace barretenberg \ No newline at end of file diff --git a/cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.cpp b/cpp/src/barretenberg/ecc/curves/scalar_multiplication/scalar_multiplication.cpp similarity index 69% rename from cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.cpp rename to cpp/src/barretenberg/ecc/curves/scalar_multiplication/scalar_multiplication.cpp index 8ff1782f1e..40d1af0307 100644 --- a/cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.cpp +++ b/cpp/src/barretenberg/ecc/curves/scalar_multiplication/scalar_multiplication.cpp @@ -10,10 +10,7 @@ #include #include -#include "../../../groups/wnaf.hpp" -#include "../fq.hpp" -#include "../fr.hpp" -#include "../g1.hpp" +#include "../../groups/wnaf.hpp" #include "./process_buckets.hpp" #include "./runtime_states.hpp" @@ -56,44 +53,44 @@ uint64_t schedule_o = state.point_schedule[schedule_it + 14]; \ uint64_t schedule_p = state.point_schedule[schedule_it + 15]; \ \ - g1::conditional_negate_affine( \ + Group::conditional_negate_affine( \ state.points + (schedule_a >> 32ULL), state.point_pairs_1 + current_offset, (schedule_a >> 31ULL) & 1ULL); \ - g1::conditional_negate_affine( \ + Group::conditional_negate_affine( \ state.points + (schedule_b >> 32ULL), state.point_pairs_1 + current_offset + 1, (schedule_b >> 31ULL) & 1ULL); \ - g1::conditional_negate_affine( \ + Group::conditional_negate_affine( \ state.points + (schedule_c >> 32ULL), state.point_pairs_1 + current_offset + 2, (schedule_c >> 31ULL) & 1ULL); \ - g1::conditional_negate_affine( \ + Group::conditional_negate_affine( \ state.points + (schedule_d >> 32ULL), state.point_pairs_1 + current_offset + 3, (schedule_d >> 31ULL) & 1ULL); \ - g1::conditional_negate_affine( \ + Group::conditional_negate_affine( \ state.points + (schedule_e >> 32ULL), state.point_pairs_1 + current_offset + 4, (schedule_e >> 31ULL) & 1ULL); \ - g1::conditional_negate_affine( \ + Group::conditional_negate_affine( \ state.points + (schedule_f >> 32ULL), state.point_pairs_1 + current_offset + 5, (schedule_f >> 31ULL) & 1ULL); \ - g1::conditional_negate_affine( \ + Group::conditional_negate_affine( \ state.points + (schedule_g >> 32ULL), state.point_pairs_1 + current_offset + 6, (schedule_g >> 31ULL) & 1ULL); \ - g1::conditional_negate_affine( \ + Group::conditional_negate_affine( \ state.points + (schedule_h >> 32ULL), state.point_pairs_1 + current_offset + 7, (schedule_h >> 31ULL) & 1ULL); \ - g1::conditional_negate_affine( \ + Group::conditional_negate_affine( \ state.points + (schedule_i >> 32ULL), state.point_pairs_1 + current_offset + 8, (schedule_i >> 31ULL) & 1ULL); \ - g1::conditional_negate_affine( \ + Group::conditional_negate_affine( \ state.points + (schedule_j >> 32ULL), state.point_pairs_1 + current_offset + 9, (schedule_j >> 31ULL) & 1ULL); \ - g1::conditional_negate_affine(state.points + (schedule_k >> 32ULL), \ - state.point_pairs_1 + current_offset + 10, \ - (schedule_k >> 31ULL) & 1ULL); \ - g1::conditional_negate_affine(state.points + (schedule_l >> 32ULL), \ - state.point_pairs_1 + current_offset + 11, \ - (schedule_l >> 31ULL) & 1ULL); \ - g1::conditional_negate_affine(state.points + (schedule_m >> 32ULL), \ - state.point_pairs_1 + current_offset + 12, \ - (schedule_m >> 31ULL) & 1ULL); \ - g1::conditional_negate_affine(state.points + (schedule_n >> 32ULL), \ - state.point_pairs_1 + current_offset + 13, \ - (schedule_n >> 31ULL) & 1ULL); \ - g1::conditional_negate_affine(state.points + (schedule_o >> 32ULL), \ - state.point_pairs_1 + current_offset + 14, \ - (schedule_o >> 31ULL) & 1ULL); \ - g1::conditional_negate_affine(state.points + (schedule_p >> 32ULL), \ - state.point_pairs_1 + current_offset + 15, \ - (schedule_p >> 31ULL) & 1ULL); \ + Group::conditional_negate_affine(state.points + (schedule_k >> 32ULL), \ + state.point_pairs_1 + current_offset + 10, \ + (schedule_k >> 31ULL) & 1ULL); \ + Group::conditional_negate_affine(state.points + (schedule_l >> 32ULL), \ + state.point_pairs_1 + current_offset + 11, \ + (schedule_l >> 31ULL) & 1ULL); \ + Group::conditional_negate_affine(state.points + (schedule_m >> 32ULL), \ + state.point_pairs_1 + current_offset + 12, \ + (schedule_m >> 31ULL) & 1ULL); \ + Group::conditional_negate_affine(state.points + (schedule_n >> 32ULL), \ + state.point_pairs_1 + current_offset + 13, \ + (schedule_n >> 31ULL) & 1ULL); \ + Group::conditional_negate_affine(state.points + (schedule_o >> 32ULL), \ + state.point_pairs_1 + current_offset + 14, \ + (schedule_o >> 31ULL) & 1ULL); \ + Group::conditional_negate_affine(state.points + (schedule_p >> 32ULL), \ + state.point_pairs_1 + current_offset + 15, \ + (schedule_p >> 31ULL) & 1ULL); \ \ current_offset += 16; \ schedule_it += 16; @@ -101,10 +98,14 @@ namespace barretenberg { namespace scalar_multiplication { -void generate_pippenger_point_table(g1::affine_element* points, g1::affine_element* table, size_t num_points) +template +void generate_pippenger_point_table(typename Curve::AffineElement* points, + typename Curve::AffineElement* table, + size_t num_points) { // iterate backwards, so that `points` and `table` can point to the same memory location - fq beta = fq::cube_root_of_unity(); + using Fq = typename Curve::BaseField; + Fq beta = Fq::cube_root_of_unity(); for (size_t i = num_points - 1; i < num_points; --i) { table[i * 2] = points[i]; table[i * 2 + 1].x = beta * points[i].x; @@ -192,12 +193,14 @@ void generate_pippenger_point_table(g1::affine_element* points, g1::affine_eleme * @param scalars The pointer to the region with initial scalars that need to be converted into WNAF * @param num_initial_points The number of points before the endomorphism split **/ +template void compute_wnaf_states(uint64_t* point_schedule, bool* input_skew_table, uint64_t* round_counts, - const fr* scalars, + const typename Curve::ScalarField* scalars, const size_t num_initial_points) { + using Fr = typename Curve::ScalarField; const size_t num_points = num_initial_points * 2; constexpr size_t MAX_NUM_ROUNDS = 256; constexpr size_t MAX_NUM_THREADS = 128; @@ -221,15 +224,15 @@ void compute_wnaf_states(uint64_t* point_schedule, #pragma omp parallel for #endif for (size_t i = 0; i < num_threads; ++i) { - fr T0; + Fr T0; uint64_t* wnaf_table = &point_schedule[(2 * i) * num_initial_points_per_thread]; - const fr* thread_scalars = &scalars[i * num_initial_points_per_thread]; + const Fr* thread_scalars = &scalars[i * num_initial_points_per_thread]; bool* skew_table = &input_skew_table[(2 * i) * num_initial_points_per_thread]; uint64_t offset = i * num_points_per_thread; for (uint64_t j = 0; j < num_initial_points_per_thread; ++j) { T0 = thread_scalars[j].from_montgomery_form(); - fr::split_into_endomorphism_scalars(T0, T0, *(fr*)&T0.data[2]); + Fr::split_into_endomorphism_scalars(T0, T0, *(Fr*)&T0.data[2]); wnaf::fixed_wnaf_with_counts(&T0.data[0], &wnaf_table[(j << 1UL)], @@ -309,9 +312,13 @@ void organize_buckets(uint64_t* point_schedule, const uint64_t*, const size_t nu * * We can re-arrange the Pippenger algorithm to get this property, but it's...complicated **/ -void add_affine_points(g1::affine_element* points, const size_t num_points, fq* scratch_space) +template +void add_affine_points(typename Curve::AffineElement* points, + const size_t num_points, + typename Curve::BaseField* scratch_space) { - fq batch_inversion_accumulator = fq::one(); + using Fq = typename Curve::BaseField; + Fq batch_inversion_accumulator = Fq::one(); for (size_t i = 0; i < num_points; i += 2) { scratch_space[i >> 1] = points[i].x + points[i + 1].x; // x2 + x1 @@ -346,9 +353,13 @@ void add_affine_points(g1::affine_element* points, const size_t num_points, fq* } } -void add_affine_points_with_edge_cases(g1::affine_element* points, const size_t num_points, fq* scratch_space) +template +void add_affine_points_with_edge_cases(typename Curve::AffineElement* points, + const size_t num_points, + typename Curve::BaseField* scratch_space) { - fq batch_inversion_accumulator = fq::one(); + using Fq = typename Curve::BaseField; + Fq batch_inversion_accumulator = Fq::one(); for (size_t i = 0; i < num_points; i += 2) { if (points[i].is_point_at_infinity() || points[i + 1].is_point_at_infinity()) { @@ -358,7 +369,7 @@ void add_affine_points_with_edge_cases(g1::affine_element* points, const size_t if (points[i].y == points[i + 1].y) { // double scratch_space[i >> 1] = points[i].x + points[i].x; // 2x - fq x_squared = points[i].x.sqr(); + Fq x_squared = points[i].x.sqr(); points[i + 1].x = points[i].y + points[i].y; // 2y points[i + 1].y = x_squared + x_squared + x_squared; // 3x^2 points[i + 1].y *= batch_inversion_accumulator; @@ -414,7 +425,10 @@ void add_affine_points_with_edge_cases(g1::affine_element* points, const size_t * `max_bucket_bits` indicates the largest set of nested pairs in the array, * which defines the iteration depth **/ -void evaluate_addition_chains(affine_product_runtime_state& state, const size_t max_bucket_bits, bool handle_edge_cases) +template +void evaluate_addition_chains(affine_product_runtime_state& state, + const size_t max_bucket_bits, + bool handle_edge_cases) { size_t end = state.num_points; size_t start = 0; @@ -422,9 +436,9 @@ void evaluate_addition_chains(affine_product_runtime_state& state, const size_t const size_t points_in_round = (state.num_points - state.bit_offsets[i + 1]) >> (i); start = end - points_in_round; if (handle_edge_cases) { - add_affine_points_with_edge_cases(state.point_pairs_1 + start, points_in_round, state.scratch_space); + add_affine_points_with_edge_cases(state.point_pairs_1 + start, points_in_round, state.scratch_space); } else { - add_affine_points(state.point_pairs_1 + start, points_in_round, state.scratch_space); + add_affine_points(state.point_pairs_1 + start, points_in_round, state.scratch_space); } } } @@ -449,7 +463,10 @@ void evaluate_addition_chains(affine_product_runtime_state& state, const size_t * The next step is to 'play it again Sam', and recurse back into `reduce_buckets`, with our reduced number of points. * We repeat this process until every bucket only has one point assigned to it. **/ -g1::affine_element* reduce_buckets(affine_product_runtime_state& state, bool first_round, bool handle_edge_cases) +template +typename Curve::AffineElement* reduce_buckets(affine_product_runtime_state& state, + bool first_round, + bool handle_edge_cases) { // std::chrono::steady_clock::time_point time_start = std::chrono::steady_clock::now(); @@ -515,7 +532,7 @@ g1::affine_element* reduce_buckets(affine_product_runtime_state& state, bool fir // modify `num_points` to reflect the new number of reduced points. // also swap around the `point_pairs` pointer; what used to be our temporary array // has now become our input point array - g1::affine_element* temp = state.point_pairs_1; + typename Curve::AffineElement* temp = state.point_pairs_1; state.num_points = new_num_points; state.points = state.point_pairs_1; state.point_pairs_1 = state.point_pairs_2; @@ -527,8 +544,10 @@ g1::affine_element* reduce_buckets(affine_product_runtime_state& state, bool fir return reduce_buckets(state, false, handle_edge_cases); } -uint32_t construct_addition_chains(affine_product_runtime_state& state, bool empty_bucket_counts) +template +uint32_t construct_addition_chains(affine_product_runtime_state& state, bool empty_bucket_counts) { + using Group = typename Curve::Group; // if this is the first call to `construct_addition_chains`, we need to count up our buckets if (empty_bucket_counts) { memset((void*)state.bucket_counts, 0x00, sizeof(uint32_t) * state.num_buckets); @@ -614,30 +633,30 @@ uint32_t construct_addition_chains(affine_product_runtime_state& state, bool emp const uint64_t schedule_g = state.point_schedule[schedule_it + 6]; const uint64_t schedule_h = state.point_schedule[schedule_it + 7]; - g1::conditional_negate_affine(state.points + (schedule_a >> 32ULL), - state.point_pairs_1 + current_offset, - (schedule_a >> 31ULL) & 1ULL); - g1::conditional_negate_affine(state.points + (schedule_b >> 32ULL), - state.point_pairs_1 + current_offset + 1, - (schedule_b >> 31ULL) & 1ULL); - g1::conditional_negate_affine(state.points + (schedule_c >> 32ULL), - state.point_pairs_1 + current_offset + 2, - (schedule_c >> 31ULL) & 1ULL); - g1::conditional_negate_affine(state.points + (schedule_d >> 32ULL), - state.point_pairs_1 + current_offset + 3, - (schedule_d >> 31ULL) & 1ULL); - g1::conditional_negate_affine(state.points + (schedule_e >> 32ULL), - state.point_pairs_1 + current_offset + 4, - (schedule_e >> 31ULL) & 1ULL); - g1::conditional_negate_affine(state.points + (schedule_f >> 32ULL), - state.point_pairs_1 + current_offset + 5, - (schedule_f >> 31ULL) & 1ULL); - g1::conditional_negate_affine(state.points + (schedule_g >> 32ULL), - state.point_pairs_1 + current_offset + 6, - (schedule_g >> 31ULL) & 1ULL); - g1::conditional_negate_affine(state.points + (schedule_h >> 32ULL), - state.point_pairs_1 + current_offset + 7, - (schedule_h >> 31ULL) & 1ULL); + Group::conditional_negate_affine(state.points + (schedule_a >> 32ULL), + state.point_pairs_1 + current_offset, + (schedule_a >> 31ULL) & 1ULL); + Group::conditional_negate_affine(state.points + (schedule_b >> 32ULL), + state.point_pairs_1 + current_offset + 1, + (schedule_b >> 31ULL) & 1ULL); + Group::conditional_negate_affine(state.points + (schedule_c >> 32ULL), + state.point_pairs_1 + current_offset + 2, + (schedule_c >> 31ULL) & 1ULL); + Group::conditional_negate_affine(state.points + (schedule_d >> 32ULL), + state.point_pairs_1 + current_offset + 3, + (schedule_d >> 31ULL) & 1ULL); + Group::conditional_negate_affine(state.points + (schedule_e >> 32ULL), + state.point_pairs_1 + current_offset + 4, + (schedule_e >> 31ULL) & 1ULL); + Group::conditional_negate_affine(state.points + (schedule_f >> 32ULL), + state.point_pairs_1 + current_offset + 5, + (schedule_f >> 31ULL) & 1ULL); + Group::conditional_negate_affine(state.points + (schedule_g >> 32ULL), + state.point_pairs_1 + current_offset + 6, + (schedule_g >> 31ULL) & 1ULL); + Group::conditional_negate_affine(state.points + (schedule_h >> 32ULL), + state.point_pairs_1 + current_offset + 7, + (schedule_h >> 31ULL) & 1ULL); current_offset += 8; schedule_it += 8; @@ -653,18 +672,18 @@ uint32_t construct_addition_chains(affine_product_runtime_state& state, bool emp const uint64_t schedule_c = state.point_schedule[schedule_it + 2]; const uint64_t schedule_d = state.point_schedule[schedule_it + 3]; - g1::conditional_negate_affine(state.points + (schedule_a >> 32ULL), - state.point_pairs_1 + current_offset, - (schedule_a >> 31ULL) & 1ULL); - g1::conditional_negate_affine(state.points + (schedule_b >> 32ULL), - state.point_pairs_1 + current_offset + 1, - (schedule_b >> 31ULL) & 1ULL); - g1::conditional_negate_affine(state.points + (schedule_c >> 32ULL), - state.point_pairs_1 + current_offset + 2, - (schedule_c >> 31ULL) & 1ULL); - g1::conditional_negate_affine(state.points + (schedule_d >> 32ULL), - state.point_pairs_1 + current_offset + 3, - (schedule_d >> 31ULL) & 1ULL); + Group::conditional_negate_affine(state.points + (schedule_a >> 32ULL), + state.point_pairs_1 + current_offset, + (schedule_a >> 31ULL) & 1ULL); + Group::conditional_negate_affine(state.points + (schedule_b >> 32ULL), + state.point_pairs_1 + current_offset + 1, + (schedule_b >> 31ULL) & 1ULL); + Group::conditional_negate_affine(state.points + (schedule_c >> 32ULL), + state.point_pairs_1 + current_offset + 2, + (schedule_c >> 31ULL) & 1ULL); + Group::conditional_negate_affine(state.points + (schedule_d >> 32ULL), + state.point_pairs_1 + current_offset + 3, + (schedule_d >> 31ULL) & 1ULL); current_offset += 4; schedule_it += 4; break; @@ -677,12 +696,12 @@ uint32_t construct_addition_chains(affine_product_runtime_state& state, bool emp const uint64_t schedule_a = state.point_schedule[schedule_it]; const uint64_t schedule_b = state.point_schedule[schedule_it + 1]; - g1::conditional_negate_affine(state.points + (schedule_a >> 32ULL), - state.point_pairs_1 + current_offset, - (schedule_a >> 31ULL) & 1ULL); - g1::conditional_negate_affine(state.points + (schedule_b >> 32ULL), - state.point_pairs_1 + current_offset + 1, - (schedule_b >> 31ULL) & 1ULL); + Group::conditional_negate_affine(state.points + (schedule_a >> 32ULL), + state.point_pairs_1 + current_offset, + (schedule_a >> 31ULL) & 1ULL); + Group::conditional_negate_affine(state.points + (schedule_b >> 32ULL), + state.point_pairs_1 + current_offset + 1, + (schedule_b >> 31ULL) & 1ULL); current_offset += 2; schedule_it += 2; break; @@ -694,9 +713,9 @@ uint32_t construct_addition_chains(affine_product_runtime_state& state, bool emp __builtin_prefetch(state.points + (state.point_schedule[schedule_it + 7] >> 32ULL)); const uint64_t schedule_a = state.point_schedule[schedule_it]; - g1::conditional_negate_affine(state.points + (schedule_a >> 32ULL), - state.point_pairs_1 + current_offset, - (schedule_a >> 31ULL) & 1ULL); + Group::conditional_negate_affine(state.points + (schedule_a >> 32ULL), + state.point_pairs_1 + current_offset, + (schedule_a >> 31ULL) & 1ULL); ++current_offset; ++schedule_it; break; @@ -711,7 +730,7 @@ uint32_t construct_addition_chains(affine_product_runtime_state& state, bool emp const uint64_t predicate = (schedule >> 31UL) & 1UL; - g1::conditional_negate_affine( + Group::conditional_negate_affine( state.points + (schedule >> 32ULL), state.point_pairs_1 + current_offset, predicate); ++current_offset; ++schedule_it; @@ -723,11 +742,14 @@ uint32_t construct_addition_chains(affine_product_runtime_state& state, bool emp return max_bucket_bits; } -g1::element evaluate_pippenger_rounds(pippenger_runtime_state& state, - g1::affine_element* points, - const size_t num_points, - bool handle_edge_cases) +template +typename Curve::Element evaluate_pippenger_rounds(pippenger_runtime_state& state, + typename Curve::AffineElement* points, + const size_t num_points, + bool handle_edge_cases) { + using Element = typename Curve::Element; + using AffineElement = typename Curve::AffineElement; const size_t num_rounds = get_num_rounds(num_points); #ifndef NO_MULTITHREADING const size_t num_threads = max_threads::compute_num_threads(); @@ -736,8 +758,8 @@ g1::element evaluate_pippenger_rounds(pippenger_runtime_state& state, #endif const size_t bits_per_bucket = get_optimal_bucket_width(num_points / 2); - std::unique_ptr thread_accumulators( - static_cast(aligned_alloc(64, num_threads * sizeof(g1::element))), &aligned_free); + std::unique_ptr thread_accumulators( + static_cast(aligned_alloc(64, num_threads * sizeof(Element))), &aligned_free); #ifndef NO_MULTITHREADING #pragma omp parallel for @@ -749,7 +771,7 @@ g1::element evaluate_pippenger_rounds(pippenger_runtime_state& state, const uint64_t num_round_points = state.round_counts[i]; - g1::element accumulator; + Element accumulator; accumulator.self_set_infinity(); if ((num_round_points == 0) || (num_round_points < num_threads && j != num_threads - 1)) { @@ -766,13 +788,14 @@ g1::element evaluate_pippenger_rounds(pippenger_runtime_state& state, thread_point_schedule[(num_round_points_per_thread - 1 + leftovers)] & 0x7fffffffU; const size_t num_thread_buckets = (last_bucket - first_bucket) + 1; - affine_product_runtime_state product_state = state.get_affine_product_runtime_state(num_threads, j); + affine_product_runtime_state product_state = + state.get_affine_product_runtime_state(num_threads, j); product_state.num_points = static_cast(num_round_points_per_thread + leftovers); product_state.points = points; product_state.point_schedule = thread_point_schedule; product_state.num_buckets = static_cast(num_thread_buckets); - g1::affine_element* output_buckets = reduce_buckets(product_state, true, handle_edge_cases); - g1::element running_sum; + AffineElement* output_buckets = reduce_buckets(product_state, true, handle_edge_cases); + Element running_sum; running_sum.self_set_infinity(); // one nice side-effect of the affine trick, is that half of the bucket concatenation @@ -795,7 +818,7 @@ g1::element evaluate_pippenger_rounds(pippenger_runtime_state& state, if (first_bucket > 0) { uint32_t multiplier = static_cast(first_bucket << 1UL); size_t shift = numeric::get_msb(multiplier); - g1::element rolling_accumulator = g1::point_at_infinity; + Element rolling_accumulator = Curve::Group::point_at_infinity; bool init = false; while (shift != static_cast(-1)) { if (init) { @@ -816,8 +839,8 @@ g1::element evaluate_pippenger_rounds(pippenger_runtime_state& state, if (i == (num_rounds - 1)) { const size_t num_points_per_thread = num_points / num_threads; bool* skew_table = &state.skew_table[j * num_points_per_thread]; - g1::affine_element* point_table = &points[j * num_points_per_thread]; - g1::affine_element addition_temporary; + AffineElement* point_table = &points[j * num_points_per_thread]; + AffineElement addition_temporary; for (size_t k = 0; k < num_points_per_thread; ++k) { if (skew_table[k]) { addition_temporary = -point_table[k]; @@ -835,7 +858,7 @@ g1::element evaluate_pippenger_rounds(pippenger_runtime_state& state, } } - g1::element result; + Element result; result.self_set_infinity(); for (size_t i = 0; i < num_threads; ++i) { result += thread_accumulators[i]; @@ -843,25 +866,31 @@ g1::element evaluate_pippenger_rounds(pippenger_runtime_state& state, return result; } -g1::element pippenger_internal(g1::affine_element* points, - fr* scalars, - const size_t num_initial_points, - pippenger_runtime_state& state, - bool handle_edge_cases) +template +typename Curve::Element pippenger_internal(typename Curve::AffineElement* points, + typename Curve::ScalarField* scalars, + const size_t num_initial_points, + pippenger_runtime_state& state, + bool handle_edge_cases) { // multiplication_runtime_state state; - compute_wnaf_states(state.point_schedule, state.skew_table, state.round_counts, scalars, num_initial_points); + compute_wnaf_states(state.point_schedule, state.skew_table, state.round_counts, scalars, num_initial_points); organize_buckets(state.point_schedule, state.round_counts, num_initial_points * 2); - g1::element result = evaluate_pippenger_rounds(state, points, num_initial_points * 2, handle_edge_cases); + typename Curve::Element result = + evaluate_pippenger_rounds(state, points, num_initial_points * 2, handle_edge_cases); return result; } -g1::element pippenger(fr* scalars, - g1::affine_element* points, - const size_t num_initial_points, - pippenger_runtime_state& state, - bool handle_edge_cases) +template +typename Curve::Element pippenger(typename Curve::ScalarField* scalars, + typename Curve::AffineElement* points, + const size_t num_initial_points, + pippenger_runtime_state& state, + bool handle_edge_cases) { + using Group = typename Curve::Group; + using Element = typename Curve::Element; + // our windowed non-adjacent form algorthm requires that each thread can work on at least 8 points. // If we fall below this theshold, fall back to the traditional scalar multiplication algorithm. // For 8 threads, this neatly coincides with the threshold where Strauss scalar multiplication outperforms Pippenger @@ -872,20 +901,20 @@ g1::element pippenger(fr* scalars, #endif if (num_initial_points == 0) { - g1::element out = g1::one; + Element out = Group::one; out.self_set_infinity(); return out; } if (num_initial_points <= threshold) { - std::vector exponentiation_results(num_initial_points); + std::vector exponentiation_results(num_initial_points); // might as well multithread this... // Possible optimization: use group::batch_mul_with_endomorphism here. #ifndef NO_MULTITHREADING #pragma omp parallel for #endif for (size_t i = 0; i < num_initial_points; ++i) { - exponentiation_results[i] = g1::element(points[i * 2]) * scalars[i]; + exponentiation_results[i] = Element(points[i * 2]) * scalars[i]; } for (size_t i = num_initial_points - 1; i > 0; --i) { @@ -897,7 +926,7 @@ g1::element pippenger(fr* scalars, const size_t slice_bits = static_cast(numeric::get_msb(static_cast(num_initial_points))); const size_t num_slice_points = static_cast(1ULL << slice_bits); - g1::element result = pippenger_internal(points, scalars, num_slice_points, state, handle_edge_cases); + Element result = pippenger_internal(points, scalars, num_slice_points, state, handle_edge_cases); if (num_slice_points != num_initial_points) { const uint64_t leftover_points = num_initial_points - num_slice_points; @@ -926,21 +955,120 @@ g1::element pippenger(fr* scalars, * Unless you're a malicious adversary, then it would be a great idea! * **/ -g1::element pippenger_unsafe(fr* scalars, - g1::affine_element* points, - const size_t num_initial_points, - pippenger_runtime_state& state) +template +typename Curve::Element pippenger_unsafe(typename Curve::ScalarField* scalars, + typename Curve::AffineElement* points, + const size_t num_initial_points, + pippenger_runtime_state& state) { return pippenger(scalars, points, num_initial_points, state, false); } -g1::element pippenger_without_endomorphism_basis_points(fr* scalars, - g1::affine_element* points, - const size_t num_initial_points, - pippenger_runtime_state& state) + +template +typename Curve::Element pippenger_without_endomorphism_basis_points(typename Curve::ScalarField* scalars, + typename Curve::AffineElement* points, + const size_t num_initial_points, + pippenger_runtime_state& state) { - std::vector G_mod(num_initial_points * 2); - barretenberg::scalar_multiplication::generate_pippenger_point_table(points, &G_mod[0], num_initial_points); + std::vector G_mod(num_initial_points * 2); + barretenberg::scalar_multiplication::generate_pippenger_point_table(points, &G_mod[0], num_initial_points); return pippenger(scalars, &G_mod[0], num_initial_points, state, false); } + +// Explicit instantiation +// BN254 +template uint32_t construct_addition_chains(affine_product_runtime_state& state, + bool empty_bucket_counts = true); + +template void add_affine_points(curve::BN254::AffineElement* points, + const size_t num_points, + curve::BN254::BaseField* scratch_space); + +template void add_affine_points_with_edge_cases(curve::BN254::AffineElement* points, + const size_t num_points, + curve::BN254::BaseField* scratch_space); + +template void evaluate_addition_chains(affine_product_runtime_state& state, + const size_t max_bucket_bits, + bool handle_edge_cases); +template curve::BN254::Element pippenger_internal(curve::BN254::AffineElement* points, + curve::BN254::ScalarField* scalars, + const size_t num_initial_points, + pippenger_runtime_state& state, + bool handle_edge_cases); + +template curve::BN254::Element evaluate_pippenger_rounds(pippenger_runtime_state& state, + curve::BN254::AffineElement* points, + const size_t num_points, + bool handle_edge_cases = false); + +template curve::BN254::AffineElement* reduce_buckets(affine_product_runtime_state& state, + bool first_round = true, + bool handle_edge_cases = false); + +template curve::BN254::Element pippenger(curve::BN254::ScalarField* scalars, + curve::BN254::AffineElement* points, + const size_t num_points, + pippenger_runtime_state& state, + bool handle_edge_cases = true); + +template curve::BN254::Element pippenger_unsafe(curve::BN254::ScalarField* scalars, + curve::BN254::AffineElement* points, + const size_t num_initial_points, + pippenger_runtime_state& state); + +template curve::BN254::Element pippenger_without_endomorphism_basis_points( + curve::BN254::ScalarField* scalars, + curve::BN254::AffineElement* points, + const size_t num_initial_points, + pippenger_runtime_state& state); + +// Grumpkin +template uint32_t construct_addition_chains(affine_product_runtime_state& state, + bool empty_bucket_counts = true); + +template void add_affine_points(curve::Grumpkin::AffineElement* points, + const size_t num_points, + curve::Grumpkin::BaseField* scratch_space); + +template void add_affine_points_with_edge_cases(curve::Grumpkin::AffineElement* points, + const size_t num_points, + curve::Grumpkin::BaseField* scratch_space); + +template void evaluate_addition_chains(affine_product_runtime_state& state, + const size_t max_bucket_bits, + bool handle_edge_cases); +template curve::Grumpkin::Element pippenger_internal(curve::Grumpkin::AffineElement* points, + curve::Grumpkin::ScalarField* scalars, + const size_t num_initial_points, + pippenger_runtime_state& state, + bool handle_edge_cases); + +template curve::Grumpkin::Element evaluate_pippenger_rounds( + pippenger_runtime_state& state, + curve::Grumpkin::AffineElement* points, + const size_t num_points, + bool handle_edge_cases = false); + +template curve::Grumpkin::AffineElement* reduce_buckets( + affine_product_runtime_state& state, bool first_round = true, bool handle_edge_cases = false); + +template curve::Grumpkin::Element pippenger(curve::Grumpkin::ScalarField* scalars, + curve::Grumpkin::AffineElement* points, + const size_t num_points, + pippenger_runtime_state& state, + bool handle_edge_cases = true); + +template curve::Grumpkin::Element pippenger_unsafe(curve::Grumpkin::ScalarField* scalars, + curve::Grumpkin::AffineElement* points, + const size_t num_initial_points, + pippenger_runtime_state& state); + +template curve::Grumpkin::Element pippenger_without_endomorphism_basis_points( + curve::Grumpkin::ScalarField* scalars, + curve::Grumpkin::AffineElement* points, + const size_t num_initial_points, + pippenger_runtime_state& state); + } // namespace scalar_multiplication } // namespace barretenberg diff --git a/cpp/src/barretenberg/ecc/curves/scalar_multiplication/scalar_multiplication.hpp b/cpp/src/barretenberg/ecc/curves/scalar_multiplication/scalar_multiplication.hpp new file mode 100644 index 0000000000..ec48bd8764 --- /dev/null +++ b/cpp/src/barretenberg/ecc/curves/scalar_multiplication/scalar_multiplication.hpp @@ -0,0 +1,272 @@ +#pragma once + +#include "../bn254/bn254.hpp" +#include "../grumpkin/grumpkin.hpp" +#include "./runtime_states.hpp" +#include +#include + +namespace barretenberg { +namespace scalar_multiplication { + +constexpr size_t get_num_buckets(const size_t num_points) +{ + const size_t bits_per_bucket = get_optimal_bucket_width(num_points / 2); + return 1UL << bits_per_bucket; +} + +/** + * pointers that describe how to add points into buckets, for the pippenger algorithm. + * `wnaf_table` is an unrolled two-dimensional array, with each inner array being of size `n`, + * where `n` is the number of points being multiplied. The second dimension size is defined by + * the number of pippenger rounds (fixed for a given `n`, see `get_num_rounds`) + * + * An entry of `wnaf_table` contains the following three pieces of information: + * 1: the point index that we're working on. This is stored in the high 32 bits + * 2: the bucket index that we're adding the point into. This is stored in the low 31 bits + * 3: the sign of the point we're adding (i.e. do we actually need to subtract). This is stored in the 32nd bit. + * + * We pack this information into a 64 bit unsigned integer, so that we can more efficiently sort our wnaf entries. + * For a given round, we want to sort our wnaf entries in increasing bucket index order. + * + * This is so that we can efficiently use multiple threads to execute the pippenger algorithm. + * For a given round, a given point's bucket index will be uniformly randomly distributed, + * assuming the inputs are from a zero-knowledge proof. This is because the scalar multiplier will be uniformly randomly + *distributed, and the bucket indices are derived from the scalar multiplier. + * + * This means that, if we were to iterate over all of our points in order, and add each point into its associated + *bucket, we would be accessing all of our buckets in a completely random pattern. + * + * Aside from memory latency problems this incurs, this makes the naive algorithm unsuitable for multithreading - we + *cannot assign a thread a tranche of points, because each thread will be adding points into the same set of buckets, + *triggering race conditions. We do not want to manage the overhead of thread locks for each bucket; the process of + *adding a point into a bucket takes, on average, only 400 CPU cycles, so the slowdown of managing mutex locks would add + *considerable overhead. + * + * The solution is to sort the buckets. If the buckets are sorted, we can assign a tranche of buckets to individual + *threads, safe in the knowledge that there will be no race conditions, with one condition. A thread's starting bucket + *may be equal to the previous thread's end bucket, so we need to ensure that each thread works on a local array of + *buckets. This adds little overhead (for 2^20 points, we have 32,768 buckets. With 8 threads, the amount of bucket + *overlap is ~16 buckets, so we could incur 16 extra 'additions' in pippenger's bucket concatenation phase, but this is + *an insignificant contribution). + * + * The alternative approach (the one we used to use) is to slice up all of the points being multiplied amongst all + *available threads, and run the complete pippenger algorithm for each thread. This is suboptimal, because the + *complexity of pippenger is O(n / logn) point additions, and a sequence of smaller pippenger calls will have a smaller + *`n`. + * + * This is the motivation for multi-threading the actual Pippenger algorithm. In addition, the above approach performs + *extremely poorly for GPUs, where the number of threads can be as high as 2^10 (for a multi-scalar-multiplication of + *2^20 points, this doubles the number of pippenger rounds per thread) + * + * To give concrete numbers, the difference between calling pippenger on 2^20 points, and calling pippenger 8 times on + *2^17 points, is 5-10%. Which means that, for 8 threads, we need to ensure that our sorting algorithm adds less than 5% + *to the total runtime of pippenger. Given a single cache miss per point would increase the run-time by 25%, this is not + *much room to work with! + * + * However, a radix sort, combined with the fact that the total number of buckets is quite small (2^16 at most), seems + *to be fast enough. Benchmarks indicate (i7-8650U, 8 threads) that, for 2^20 points, the total runtime is <1200ms and + *of that, the radix sort consumes 58ms (4.8%) + * + * One advantage of sorting by bucket order vs point order, is that a 'bucket' is 96 bytes large (sizeof(g1::element), + *buckets have z-coordinates). Points, on the other hand, are 64 bytes large (affine points, no z-coordinate). This + *makes fetching random point locations in memory more efficient than fetching random bucket locations, as each point + *occupies a single cache line. Using __builtin_prefetch to recover the point just before it's needed, seems to improve + *the runtime of pippenger by 10-20%. + * + * Finally, `skew_table` tracks whether a scalar multplier is even or odd + * (if it's even, we need to subtract the point from the total result, + * because our windowed non-adjacent form values can only be odd) + * + **/ + +template struct multiplication_thread_state { + typename Curve::Element* buckets; + const uint64_t* point_schedule; +}; + +template +void compute_wnaf_states(uint64_t* point_schedule, + bool* input_skew_table, + uint64_t* round_counts, + const typename Curve::ScalarField* scalars, + const size_t num_initial_points); + +template +void generate_pippenger_point_table(typename Curve::AffineElement* points, + typename Curve::AffineElement* table, + size_t num_points); + +void organize_buckets(uint64_t* point_schedule, const uint64_t* round_counts, const size_t num_points); + +inline void count_bits(uint32_t* bucket_counts, + uint32_t* bit_offsets, + const uint32_t num_buckets, + const size_t num_bits) +{ + for (size_t i = 0; i < num_buckets; ++i) { + const uint32_t count = bucket_counts[i]; + for (uint32_t j = 0; j < num_bits; ++j) { + bit_offsets[j + 1] += (count & (1U << j)); + } + } + bit_offsets[0] = 0; + for (size_t i = 2; i < num_bits + 1; ++i) { + bit_offsets[i] += bit_offsets[i - 1]; + } +} + +template +uint32_t construct_addition_chains(affine_product_runtime_state& state, bool empty_bucket_counts = true); + +template +void add_affine_points(typename Curve::AffineElement* points, + const size_t num_points, + typename Curve::BaseField* scratch_space); + +template +void add_affine_points_with_edge_cases(typename Curve::AffineElement* points, + const size_t num_points, + typename Curve::BaseField* scratch_space); + +template +void evaluate_addition_chains(affine_product_runtime_state& state, + const size_t max_bucket_bits, + bool handle_edge_cases); +template +typename Curve::Element pippenger_internal(typename Curve::AffineElement* points, + typename Curve::ScalarField* scalars, + const size_t num_initial_points, + pippenger_runtime_state& state, + bool handle_edge_cases); + +template +typename Curve::Element evaluate_pippenger_rounds(pippenger_runtime_state& state, + typename Curve::AffineElement* points, + const size_t num_points, + bool handle_edge_cases = false); + +template +typename Curve::AffineElement* reduce_buckets(affine_product_runtime_state& state, + bool first_round = true, + bool handle_edge_cases = false); + +template +typename Curve::Element pippenger(typename Curve::ScalarField* scalars, + typename Curve::AffineElement* points, + const size_t num_points, + pippenger_runtime_state& state, + bool handle_edge_cases = true); + +template +typename Curve::Element pippenger_unsafe(typename Curve::ScalarField* scalars, + typename Curve::AffineElement* points, + const size_t num_initial_points, + pippenger_runtime_state& state); + +template +typename Curve::Element pippenger_without_endomorphism_basis_points(typename Curve::ScalarField* scalars, + typename Curve::AffineElement* points, + const size_t num_initial_points, + pippenger_runtime_state& state); + +// Explicit instantiation +// BN254 +extern template uint32_t construct_addition_chains(affine_product_runtime_state& state, + bool empty_bucket_counts = true); + +extern template void add_affine_points(curve::BN254::AffineElement* points, + const size_t num_points, + curve::BN254::BaseField* scratch_space); + +extern template void add_affine_points_with_edge_cases(curve::BN254::AffineElement* points, + const size_t num_points, + curve::BN254::BaseField* scratch_space); + +extern template void evaluate_addition_chains(affine_product_runtime_state& state, + const size_t max_bucket_bits, + bool handle_edge_cases); +extern template curve::BN254::Element pippenger_internal(curve::BN254::AffineElement* points, + curve::BN254::ScalarField* scalars, + const size_t num_initial_points, + pippenger_runtime_state& state, + bool handle_edge_cases); + +extern template curve::BN254::Element evaluate_pippenger_rounds( + pippenger_runtime_state& state, + curve::BN254::AffineElement* points, + const size_t num_points, + bool handle_edge_cases = false); + +extern template curve::BN254::AffineElement* reduce_buckets( + affine_product_runtime_state& state, bool first_round = true, bool handle_edge_cases = false); + +extern template curve::BN254::Element pippenger(curve::BN254::ScalarField* scalars, + curve::BN254::AffineElement* points, + const size_t num_points, + pippenger_runtime_state& state, + bool handle_edge_cases = true); + +extern template curve::BN254::Element pippenger_unsafe(curve::BN254::ScalarField* scalars, + curve::BN254::AffineElement* points, + const size_t num_initial_points, + pippenger_runtime_state& state); + +extern template curve::BN254::Element pippenger_without_endomorphism_basis_points( + curve::BN254::ScalarField* scalars, + curve::BN254::AffineElement* points, + const size_t num_initial_points, + pippenger_runtime_state& state); + +// Grumpkin + +extern template uint32_t construct_addition_chains( + affine_product_runtime_state& state, bool empty_bucket_counts = true); + +extern template void add_affine_points(curve::Grumpkin::AffineElement* points, + const size_t num_points, + curve::Grumpkin::BaseField* scratch_space); + +extern template void add_affine_points_with_edge_cases(curve::Grumpkin::AffineElement* points, + const size_t num_points, + curve::Grumpkin::BaseField* scratch_space); + +extern template void evaluate_addition_chains(affine_product_runtime_state& state, + const size_t max_bucket_bits, + bool handle_edge_cases); +extern template curve::Grumpkin::Element pippenger_internal( + curve::Grumpkin::AffineElement* points, + curve::Grumpkin::ScalarField* scalars, + const size_t num_initial_points, + pippenger_runtime_state& state, + bool handle_edge_cases); + +extern template curve::Grumpkin::Element evaluate_pippenger_rounds( + pippenger_runtime_state& state, + curve::Grumpkin::AffineElement* points, + const size_t num_points, + bool handle_edge_cases = false); + +extern template curve::Grumpkin::AffineElement* reduce_buckets( + affine_product_runtime_state& state, bool first_round = true, bool handle_edge_cases = false); + +extern template curve::Grumpkin::Element pippenger(curve::Grumpkin::ScalarField* scalars, + curve::Grumpkin::AffineElement* points, + const size_t num_points, + pippenger_runtime_state& state, + bool handle_edge_cases = true); + +extern template curve::Grumpkin::Element pippenger_unsafe( + curve::Grumpkin::ScalarField* scalars, + curve::Grumpkin::AffineElement* points, + const size_t num_initial_points, + pippenger_runtime_state& state); + +extern template curve::Grumpkin::Element pippenger_without_endomorphism_basis_points( + curve::Grumpkin::ScalarField* scalars, + curve::Grumpkin::AffineElement* points, + const size_t num_initial_points, + pippenger_runtime_state& state); + +} // namespace scalar_multiplication +} // namespace barretenberg diff --git a/cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.test.cpp b/cpp/src/barretenberg/ecc/curves/scalar_multiplication/scalar_multiplication.test.cpp similarity index 54% rename from cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.test.cpp rename to cpp/src/barretenberg/ecc/curves/scalar_multiplication/scalar_multiplication.test.cpp index bd0875ba96..289cb89dad 100644 --- a/cpp/src/barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.test.cpp +++ b/cpp/src/barretenberg/ecc/curves/scalar_multiplication/scalar_multiplication.test.cpp @@ -1,32 +1,65 @@ +#include +#include +#include + #include "pippenger.hpp" #include "scalar_multiplication.hpp" -#include #include "barretenberg/common/test.hpp" #include "barretenberg/srs/io.hpp" -#include - #include "barretenberg/numeric/random/engine.hpp" - #include "barretenberg/common/mem.hpp" -#define BARRETENBERG_SRS_PATH "../srs_db/ignition" - -using namespace barretenberg; -using namespace barretenberg::scalar_multiplication; - namespace { auto& engine = numeric::random::get_debug_engine(); } -TEST(scalar_multiplication, reduce_buckets_simple) +template class SRSIO : public ::testing::Test { + public: + const std::string SRS_PATH = []() { + if constexpr (std::same_as) { + return "../srs_db/ignition"; + } else if constexpr (std::same_as) { + return "../srs_db/grumpkin"; + } + }(); + + static void read_transcript_g2(std::string const& srs_path) requires srs::HasG2 + { + typename Curve::G2AffineElement g2_x; + srs::IO::read_transcript_g2(g2_x, srs_path); + }; + + static void read_transcript(typename Curve::AffineElement* monomials, size_t degree, std::string const& srs_path) + { + if constexpr (srs::HasG2) { + typename Curve::G2AffineElement g2_x; + srs::IO::read_transcript(monomials, g2_x, degree, srs_path); + } else { + srs::IO::read_transcript(monomials, degree, srs_path); + } + } +}; + +using Curves = ::testing::Types; + +TYPED_TEST_SUITE(SRSIO, Curves); + +TYPED_TEST(SRSIO, ReduceBucketsSimple) { + using Curve = TypeParam; + using Pippenger = barretenberg::scalar_multiplication::Pippenger; + using Element = typename Curve::Element; + using AffineElement = typename Curve::AffineElement; + using Fq = typename Curve::BaseField; + constexpr size_t num_points = 128; - g2::affine_element g2_x; - io::read_transcript_g2(g2_x, BARRETENBERG_SRS_PATH); - auto pippenger = Pippenger(BARRETENBERG_SRS_PATH, num_points / 2); + if constexpr (srs::HasG2) { + TestFixture::read_transcript_g2(TestFixture::SRS_PATH); + } + auto pippenger = Pippenger(TestFixture::SRS_PATH, num_points / 2); auto monomials = pippenger.get_point_table(); - std::vector point_schedule(scalar_multiplication::point_table_size(num_points / 2)); + std::vector point_schedule(barretenberg::scalar_multiplication::point_table_size(num_points / 2)); std::array bucket_empty_status; // 16 buckets, each bucket has one point std::array transcript; @@ -167,7 +200,7 @@ TEST(scalar_multiplication, reduce_buckets_simple) for (size_t i = 0; i < num_points; ++i) { point_schedule[i] = (static_cast(transcript_points[i]) << 32ULL) + transcript[i]; } - std::array expected; + std::array expected; for (size_t i = 0; i < num_points; ++i) { expected[i].self_set_infinity(); } @@ -179,20 +212,20 @@ TEST(scalar_multiplication, reduce_buckets_simple) } } - std::array point_pairs; - std::array output_buckets; - std::array scratch_space; + std::array point_pairs; + std::array output_buckets; + std::array scratch_space; std::array bucket_counts; std::array bit_offsets = { 0 }; - scalar_multiplication::affine_product_runtime_state product_state{ + barretenberg::scalar_multiplication::affine_product_runtime_state product_state{ &monomials[0], &point_pairs[0], &output_buckets[0], &scratch_space[0], &bucket_counts[0], &bit_offsets[0], &point_schedule[0], num_points, 2, &bucket_empty_status[0] }; - g1::affine_element* output = scalar_multiplication::reduce_buckets(product_state, true); + AffineElement* output = barretenberg::scalar_multiplication::reduce_buckets(product_state, true); for (size_t i = 0; i < product_state.num_buckets; ++i) { expected[i] = expected[i].normalize(); @@ -201,54 +234,57 @@ TEST(scalar_multiplication, reduce_buckets_simple) } } -TEST(scalar_multiplication, reduce_buckets) +TYPED_TEST(SRSIO, ReduceBuckets) { + using Curve = TypeParam; + using Element = typename Curve::Element; + using AffineElement = typename Curve::AffineElement; + using Fr = typename Curve::ScalarField; + using Fq = typename Curve::BaseField; + constexpr size_t num_initial_points = 1 << 12; constexpr size_t num_points = num_initial_points * 2; - g1::affine_element* monomials = - (g1::affine_element*)(aligned_alloc(64, sizeof(g1::affine_element) * (num_points * 2))); - g1::affine_element* scratch_points = - (g1::affine_element*)(aligned_alloc(64, sizeof(g1::affine_element) * (num_points * 2))); - g1::affine_element* point_pairs = - (g1::affine_element*)(aligned_alloc(64, sizeof(g1::affine_element) * (num_points * 2))); - g1::element* expected_buckets = (g1::element*)(aligned_alloc(64, sizeof(g1::element) * (num_points * 2))); + AffineElement* monomials = (AffineElement*)(aligned_alloc(64, sizeof(AffineElement) * (num_points * 2))); + AffineElement* scratch_points = (AffineElement*)(aligned_alloc(64, sizeof(AffineElement) * (num_points * 2))); + AffineElement* point_pairs = (AffineElement*)(aligned_alloc(64, sizeof(AffineElement) * (num_points * 2))); + Element* expected_buckets = (Element*)(aligned_alloc(64, sizeof(Element) * (num_points * 2))); bool* bucket_empty_status = (bool*)(aligned_alloc(64, sizeof(bool) * (num_points * 2))); - memset((void*)scratch_points, 0x00, (num_points * 2) * sizeof(g1::affine_element)); - memset((void*)point_pairs, 0x00, (num_points * 2) * sizeof(g1::affine_element)); - memset((void*)expected_buckets, 0x00, (num_points * 2) * sizeof(g1::element)); + memset((void*)scratch_points, 0x00, (num_points * 2) * sizeof(AffineElement)); + memset((void*)point_pairs, 0x00, (num_points * 2) * sizeof(AffineElement)); + memset((void*)expected_buckets, 0x00, (num_points * 2) * sizeof(Element)); memset((void*)bucket_empty_status, 0x00, (num_points * 2) * sizeof(bool)); - fq* scratch_field = (fq*)(aligned_alloc(64, sizeof(fq) * (num_points))); + Fq* scratch_field = (Fq*)(aligned_alloc(64, sizeof(Fq) * (num_points))); - memset((void*)scratch_field, 0x00, num_points * sizeof(fq)); + memset((void*)scratch_field, 0x00, num_points * sizeof(Fq)); - g2::affine_element g2_x; - io::read_transcript(monomials, g2_x, num_initial_points, BARRETENBERG_SRS_PATH); + TestFixture::read_transcript(monomials, num_initial_points, TestFixture::SRS_PATH); - scalar_multiplication::generate_pippenger_point_table(monomials, monomials, num_initial_points); + barretenberg::scalar_multiplication::generate_pippenger_point_table( + monomials, monomials, num_initial_points); - fr* scalars = (fr*)(aligned_alloc(64, sizeof(fr) * num_initial_points)); + Fr* scalars = (Fr*)(aligned_alloc(64, sizeof(Fr) * num_initial_points)); for (size_t i = 0; i < num_initial_points; ++i) { - scalars[i] = fr::random_element(); + scalars[i] = Fr::random_element(); } - scalar_multiplication::pippenger_runtime_state state(num_initial_points); + barretenberg::scalar_multiplication::pippenger_runtime_state state(num_initial_points); std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now(); - scalar_multiplication::compute_wnaf_states( + barretenberg::scalar_multiplication::compute_wnaf_states( state.point_schedule, state.skew_table, state.round_counts, scalars, num_initial_points); std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now(); std::chrono::milliseconds diff = std::chrono::duration_cast(end - start); std::cout << "wnaf time: " << diff.count() << "ms" << std::endl; start = std::chrono::steady_clock::now(); - scalar_multiplication::organize_buckets(state.point_schedule, state.round_counts, num_points); + barretenberg::scalar_multiplication::organize_buckets(state.point_schedule, state.round_counts, num_points); end = std::chrono::steady_clock::now(); diff = std::chrono::duration_cast(end - start); std::cout << "organize bucket time: " << diff.count() << "ms" << std::endl; - const size_t max_num_buckets = scalar_multiplication::get_num_buckets(num_points * 2); + const size_t max_num_buckets = barretenberg::scalar_multiplication::get_num_buckets(num_points * 2); uint32_t* bucket_counts = static_cast(aligned_alloc(64, max_num_buckets * 100 * sizeof(uint32_t))); memset((void*)bucket_counts, 0x00, max_num_buckets * sizeof(uint32_t)); @@ -264,19 +300,21 @@ TEST(scalar_multiplication, reduce_buckets) const size_t last_bucket = point_schedule_copy[num_points - 1] & 0x7fffffffULL; const size_t num_buckets = last_bucket - first_bucket + 1; - scalar_multiplication::affine_product_runtime_state product_state{ monomials, - point_pairs, - scratch_points, - scratch_field, - bucket_counts, - &bit_offsets[0], - &state.point_schedule[num_points], - num_points, - static_cast(num_buckets), - bucket_empty_status }; + barretenberg::scalar_multiplication::affine_product_runtime_state product_state{ + monomials, + point_pairs, + scratch_points, + scratch_field, + bucket_counts, + &bit_offsets[0], + &state.point_schedule[num_points], + num_points, + static_cast(num_buckets), + bucket_empty_status + }; start = std::chrono::steady_clock::now(); - // scalar_multiplication::scalar_multiplication_internal(state, monomials); + // barretenberg::scalar_multiplication::scalar_multiplication_internal(state, monomials); end = std::chrono::steady_clock::now(); diff = std::chrono::duration_cast(end - start); std::cout << "scalar mul: " << diff.count() << "ms" << std::endl; @@ -290,19 +328,19 @@ TEST(scalar_multiplication, reduce_buckets) uint64_t point_index = schedule >> 32ULL; uint64_t predicate = (schedule >> 31ULL) & 1ULL; // printf("expected bucket index = %lu \n", bucket_index - first_bucket); - g1::element& bucket = expected_buckets[bucket_index - first_bucket]; - g1::affine_element& point = monomials[point_index]; + Element& bucket = expected_buckets[bucket_index - first_bucket]; + AffineElement& point = monomials[point_index]; bucket.self_mixed_add_or_sub(point, predicate); } size_t it = 0; - g1::affine_element* result_buckets = scalar_multiplication::reduce_buckets(product_state, true); + AffineElement* result_buckets = barretenberg::scalar_multiplication::reduce_buckets(product_state, true); printf("num buckets = %zu \n", num_buckets); for (size_t i = 0; i < num_buckets; ++i) { if (!bucket_empty_status[i]) { - g1::element expected = expected_buckets[i].normalize(); + Element expected = expected_buckets[i].normalize(); EXPECT_EQ((expected.x == result_buckets[it].x), true); EXPECT_EQ((expected.y == result_buckets[it].y), true); ++it; @@ -322,51 +360,54 @@ TEST(scalar_multiplication, reduce_buckets) } // This test intermittenly fails. -TEST(scalar_multiplication, DISABLED_reduce_buckets_basic) +TYPED_TEST(SRSIO, DISABLED_ReduceBucketsBasic) { + using Curve = TypeParam; + using AffineElement = typename Curve::AffineElement; + using Fr = typename Curve::ScalarField; + using Fq = typename Curve::BaseField; + constexpr size_t num_initial_points = 1 << 20; constexpr size_t num_points = num_initial_points * 2; - g1::affine_element* monomials = (g1::affine_element*)(aligned_alloc(64, sizeof(g1::affine_element) * (num_points))); - g1::affine_element* scratch_points = - (g1::affine_element*)(aligned_alloc(64, sizeof(g1::affine_element) * (num_points))); - g1::affine_element* point_pairs = - (g1::affine_element*)(aligned_alloc(64, sizeof(g1::affine_element) * (num_points))); + AffineElement* monomials = (AffineElement*)(aligned_alloc(64, sizeof(AffineElement) * (num_points))); + AffineElement* scratch_points = (AffineElement*)(aligned_alloc(64, sizeof(AffineElement) * (num_points))); + AffineElement* point_pairs = (AffineElement*)(aligned_alloc(64, sizeof(AffineElement) * (num_points))); bool* bucket_empty_status = (bool*)(aligned_alloc(64, sizeof(bool) * (num_points))); - fq* scratch_field = (fq*)(aligned_alloc(64, sizeof(fq) * (num_points))); + Fq* scratch_field = (Fq*)(aligned_alloc(64, sizeof(Fq) * (num_points))); - memset((void*)scratch_points, 0x00, num_points * sizeof(g1::affine_element)); - memset((void*)point_pairs, 0x00, num_points * sizeof(g1::affine_element)); - memset((void*)scratch_field, 0x00, num_points * sizeof(fq)); + memset((void*)scratch_points, 0x00, num_points * sizeof(AffineElement)); + memset((void*)point_pairs, 0x00, num_points * sizeof(AffineElement)); + memset((void*)scratch_field, 0x00, num_points * sizeof(Fq)); memset((void*)bucket_empty_status, 0x00, num_points * sizeof(bool)); - g2::affine_element g2_x; - io::read_transcript(monomials, g2_x, num_initial_points, BARRETENBERG_SRS_PATH); + TestFixture::read_transcript(monomials, num_initial_points, TestFixture::SRS_PATH); - fr* scalars = (fr*)(aligned_alloc(64, sizeof(fr) * num_initial_points)); + Fr* scalars = (Fr*)(aligned_alloc(64, sizeof(Fr) * num_initial_points)); - fr source_scalar = fr::random_element(); + Fr source_scalar = Fr::random_element(); for (size_t i = 0; i < num_initial_points; ++i) { source_scalar.self_sqr(); - fr::__copy(source_scalar, scalars[i]); + Fr::__copy(source_scalar, scalars[i]); } - scalar_multiplication::pippenger_runtime_state state(num_initial_points); - scalar_multiplication::generate_pippenger_point_table(monomials, monomials, num_initial_points); + barretenberg::scalar_multiplication::pippenger_runtime_state state(num_initial_points); + barretenberg::scalar_multiplication::generate_pippenger_point_table( + monomials, monomials, num_initial_points); std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now(); - scalar_multiplication::compute_wnaf_states( + barretenberg::scalar_multiplication::compute_wnaf_states( state.point_schedule, state.skew_table, state.round_counts, scalars, num_initial_points); std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now(); std::chrono::milliseconds diff = std::chrono::duration_cast(end - start); std::cout << "wnaf time: " << diff.count() << "ms" << std::endl; start = std::chrono::steady_clock::now(); - scalar_multiplication::organize_buckets(state.point_schedule, state.round_counts, num_points); + barretenberg::scalar_multiplication::organize_buckets(state.point_schedule, state.round_counts, num_points); end = std::chrono::steady_clock::now(); diff = std::chrono::duration_cast(end - start); std::cout << "organize bucket time: " << diff.count() << "ms" << std::endl; - const size_t max_num_buckets = scalar_multiplication::get_num_buckets(num_points * 2); + const size_t max_num_buckets = barretenberg::scalar_multiplication::get_num_buckets(num_points * 2); uint32_t* bucket_counts = static_cast(aligned_alloc(64, max_num_buckets * sizeof(uint32_t))); memset((void*)bucket_counts, 0x00, max_num_buckets * sizeof(uint32_t)); @@ -375,20 +416,22 @@ TEST(scalar_multiplication, DISABLED_reduce_buckets_basic) const size_t last_bucket = state.point_schedule[num_points - 1] & 0x7fffffffULL; const size_t num_buckets = last_bucket - first_bucket + 1; - scalar_multiplication::affine_product_runtime_state product_state{ monomials, - point_pairs, - scratch_points, - scratch_field, - bucket_counts, - &bit_offsets[0], - state.point_schedule, - (uint32_t)state.round_counts[0], - static_cast(num_buckets), - bucket_empty_status }; + barretenberg::scalar_multiplication::affine_product_runtime_state product_state{ + monomials, + point_pairs, + scratch_points, + scratch_field, + bucket_counts, + &bit_offsets[0], + state.point_schedule, + (uint32_t)state.round_counts[0], + static_cast(num_buckets), + bucket_empty_status + }; start = std::chrono::steady_clock::now(); - scalar_multiplication::reduce_buckets(product_state, true); - // scalar_multiplication::scalar_multiplication_internal(state, monomials); + barretenberg::scalar_multiplication::reduce_buckets(product_state, true); + // barretenberg::scalar_multiplication::scalar_multiplication_internal(state, monomials); end = std::chrono::steady_clock::now(); diff = std::chrono::duration_cast(end - start); std::cout << "scalar mul: " << diff.count() << "ms" << std::endl; @@ -402,19 +445,24 @@ TEST(scalar_multiplication, DISABLED_reduce_buckets_basic) aligned_free(bucket_counts); } -TEST(scalar_multiplication, add_affine_points) +TYPED_TEST(SRSIO, AddAffinePoints) { + using Curve = TypeParam; + using Element = typename Curve::Element; + using AffineElement = typename Curve::AffineElement; + using Fq = typename Curve::BaseField; + constexpr size_t num_points = 20; - g1::affine_element* points = (g1::affine_element*)(aligned_alloc(64, sizeof(g1::affine_element) * (num_points))); - fq* scratch_space = (fq*)(aligned_alloc(64, sizeof(fq) * (num_points * 2))); - fq* lambda = (fq*)(aligned_alloc(64, sizeof(fq) * (num_points * 2))); + AffineElement* points = (AffineElement*)(aligned_alloc(64, sizeof(AffineElement) * (num_points))); + Fq* scratch_space = (Fq*)(aligned_alloc(64, sizeof(Fq) * (num_points * 2))); + Fq* lambda = (Fq*)(aligned_alloc(64, sizeof(Fq) * (num_points * 2))); - g1::element* points_copy = (g1::element*)(aligned_alloc(64, sizeof(g1::element) * (num_points))); + Element* points_copy = (Element*)(aligned_alloc(64, sizeof(Element) * (num_points))); for (size_t i = 0; i < num_points; ++i) { - points[i] = g1::affine_element(g1::element::random_element()); + points[i] = AffineElement(Element::random_element()); points_copy[i].x = points[i].x; points_copy[i].y = points[i].y; - points_copy[i].z = fq::one(); + points_copy[i].z = Fq::one(); } size_t count = num_points - 1; @@ -423,7 +471,7 @@ TEST(scalar_multiplication, add_affine_points) points_copy[count + 1] = points_copy[count + 1].normalize(); } - scalar_multiplication::add_affine_points(points, num_points, scratch_space); + barretenberg::scalar_multiplication::add_affine_points(points, num_points, scratch_space); for (size_t i = num_points - 1; i > num_points - 1 - (num_points / 2); --i) { EXPECT_EQ((points[i].x == points_copy[i].x), true); EXPECT_EQ((points[i].y == points_copy[i].y), true); @@ -434,39 +482,43 @@ TEST(scalar_multiplication, add_affine_points) aligned_free(scratch_space); } -TEST(scalar_multiplication, construct_addition_chains) +TYPED_TEST(SRSIO, ConstructAdditionChains) { + using Curve = TypeParam; + using AffineElement = typename Curve::AffineElement; + using Fr = typename Curve::ScalarField; + constexpr size_t num_initial_points = 1 << 20; constexpr size_t num_points = num_initial_points * 2; - g1::affine_element* monomials = (g1::affine_element*)(aligned_alloc(64, sizeof(g1::affine_element) * (num_points))); + AffineElement* monomials = (AffineElement*)(aligned_alloc(64, sizeof(AffineElement) * (num_points))); - g2::affine_element g2_x; - io::read_transcript(monomials, g2_x, num_initial_points, BARRETENBERG_SRS_PATH); + TestFixture::read_transcript(monomials, num_initial_points, TestFixture::SRS_PATH); - fr* scalars = (fr*)(aligned_alloc(64, sizeof(fr) * num_initial_points)); + Fr* scalars = (Fr*)(aligned_alloc(64, sizeof(Fr) * num_initial_points)); - fr source_scalar = fr::random_element(); + Fr source_scalar = Fr::random_element(); for (size_t i = 0; i < num_initial_points; ++i) { source_scalar.self_sqr(); - fr::__copy(source_scalar, scalars[i]); + Fr::__copy(source_scalar, scalars[i]); } - scalar_multiplication::pippenger_runtime_state state(num_initial_points); - scalar_multiplication::generate_pippenger_point_table(monomials, monomials, num_initial_points); + barretenberg::scalar_multiplication::pippenger_runtime_state state(num_initial_points); + barretenberg::scalar_multiplication::generate_pippenger_point_table( + monomials, monomials, num_initial_points); std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now(); - scalar_multiplication::compute_wnaf_states( + barretenberg::scalar_multiplication::compute_wnaf_states( state.point_schedule, state.skew_table, state.round_counts, scalars, num_initial_points); std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now(); std::chrono::milliseconds diff = std::chrono::duration_cast(end - start); std::cout << "wnaf time: " << diff.count() << "ms" << std::endl; start = std::chrono::steady_clock::now(); - scalar_multiplication::organize_buckets(state.point_schedule, state.round_counts, num_points); + barretenberg::scalar_multiplication::organize_buckets(state.point_schedule, state.round_counts, num_points); end = std::chrono::steady_clock::now(); diff = std::chrono::duration_cast(end - start); std::cout << "organize bucket time: " << diff.count() << "ms" << std::endl; - const size_t max_num_buckets = scalar_multiplication::get_num_buckets(num_points * 2); + const size_t max_num_buckets = barretenberg::scalar_multiplication::get_num_buckets(num_points * 2); bool* bucket_empty_status = static_cast(aligned_alloc(64, num_points * sizeof(bool))); uint32_t* bucket_counts = static_cast(aligned_alloc(64, max_num_buckets * sizeof(uint32_t))); memset((void*)bucket_counts, 0x00, max_num_buckets * sizeof(uint32_t)); @@ -475,19 +527,21 @@ TEST(scalar_multiplication, construct_addition_chains) const size_t last_bucket = state.point_schedule[state.round_counts[0] - 1] & 0x7fffffffULL; const size_t num_buckets = last_bucket - first_bucket + 1; - scalar_multiplication::affine_product_runtime_state product_state{ monomials, - monomials, - monomials, - nullptr, - bucket_counts, - &bit_offsets[0], - state.point_schedule, - static_cast(state.round_counts[0]), - static_cast(num_buckets), - bucket_empty_status }; + barretenberg::scalar_multiplication::affine_product_runtime_state product_state{ + monomials, + monomials, + monomials, + nullptr, + bucket_counts, + &bit_offsets[0], + state.point_schedule, + static_cast(state.round_counts[0]), + static_cast(num_buckets), + bucket_empty_status + }; start = std::chrono::steady_clock::now(); - scalar_multiplication::construct_addition_chains(product_state, true); + barretenberg::scalar_multiplication::construct_addition_chains(product_state, true); end = std::chrono::steady_clock::now(); diff = std::chrono::duration_cast(end - start); info("construct addition chains: ", diff.count(), "ms"); @@ -499,17 +553,24 @@ TEST(scalar_multiplication, construct_addition_chains) aligned_free(bucket_counts); } -TEST(scalar_multiplication, endomorphism_split) +TYPED_TEST(SRSIO, EndomorphismSplit) { - fr scalar = fr::random_element(); + using Curve = TypeParam; + using Group = typename Curve::Group; + using Element = typename Curve::Element; + using AffineElement = typename Curve::AffineElement; + using Fr = typename Curve::ScalarField; + using Fq = typename Curve::BaseField; + + Fr scalar = Fr::random_element(); - g1::element expected = g1::one * scalar; + Element expected = Group::one * scalar; // we want to test that we can split a scalar into two half-length components, using the same location in memory. - fr* k1_t = &scalar; - fr* k2_t = (fr*)&scalar.data[2]; + Fr* k1_t = &scalar; + Fr* k2_t = (Fr*)&scalar.data[2]; - fr::split_into_endomorphism_scalars(scalar, *k1_t, *k2_t); + Fr::split_into_endomorphism_scalars(scalar, *k1_t, *k2_t); // The compiler really doesn't like what we're doing here, // and disabling the array-bounds error project-wide seems unsafe. // The large macro blocks are here to warn that we should be careful when @@ -518,44 +579,47 @@ TEST(scalar_multiplication, endomorphism_split) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Warray-bounds" #endif - fr k1{ (*k1_t).data[0], (*k1_t).data[1], 0, 0 }; - fr k2{ (*k2_t).data[0], (*k2_t).data[1], 0, 0 }; + Fr k1{ (*k1_t).data[0], (*k1_t).data[1], 0, 0 }; + Fr k2{ (*k2_t).data[0], (*k2_t).data[1], 0, 0 }; #if !defined(__clang__) && defined(__GNUC__) #pragma GCC diagnostic pop #endif - g1::element result; - g1::element t1 = g1::affine_one * k1; - g1::affine_element generator = g1::affine_one; - fq beta = fq::cube_root_of_unity(); + Element result; + Element t1 = Group::affine_one * k1; + AffineElement generator = Group::affine_one; + Fq beta = Fq::cube_root_of_unity(); generator.x = generator.x * beta; generator.y = -generator.y; - g1::element t2 = generator * k2; + Element t2 = generator * k2; result = t1 + t2; EXPECT_EQ(result == expected, true); } -TEST(scalar_multiplication, radix_sort) +TYPED_TEST(SRSIO, RadixSort) { + using Curve = TypeParam; + using Fr = typename Curve::ScalarField; + // check that our radix sort correctly sorts! constexpr size_t target_degree = 1 << 8; - constexpr size_t num_rounds = scalar_multiplication::get_num_rounds(target_degree * 2); - fr* scalars = (fr*)(aligned_alloc(64, sizeof(fr) * target_degree)); + constexpr size_t num_rounds = barretenberg::scalar_multiplication::get_num_rounds(target_degree * 2); + Fr* scalars = (Fr*)(aligned_alloc(64, sizeof(Fr) * target_degree)); - fr source_scalar = fr::random_element(); + Fr source_scalar = Fr::random_element(); for (size_t i = 0; i < target_degree; ++i) { source_scalar.self_sqr(); - fr::__copy(source_scalar, scalars[i]); + Fr::__copy(source_scalar, scalars[i]); } - scalar_multiplication::pippenger_runtime_state state(target_degree); - scalar_multiplication::compute_wnaf_states( + barretenberg::scalar_multiplication::pippenger_runtime_state state(target_degree); + barretenberg::scalar_multiplication::compute_wnaf_states( state.point_schedule, state.skew_table, state.round_counts, scalars, target_degree); uint64_t* wnaf_copy = (uint64_t*)(aligned_alloc(64, sizeof(uint64_t) * target_degree * 2 * num_rounds)); memcpy((void*)wnaf_copy, (void*)state.point_schedule, sizeof(uint64_t) * target_degree * 2 * num_rounds); - scalar_multiplication::organize_buckets(state.point_schedule, state.round_counts, target_degree * 2); + barretenberg::scalar_multiplication::organize_buckets(state.point_schedule, state.round_counts, target_degree * 2); for (size_t i = 0; i < num_rounds; ++i) { uint64_t* unsorted_wnaf = &wnaf_copy[i * target_degree * 2]; uint64_t* sorted_wnaf = &state.point_schedule[i * target_degree * 2]; @@ -580,45 +644,50 @@ TEST(scalar_multiplication, radix_sort) free(wnaf_copy); } -HEAVY_TEST(scalar_multiplication, oversized_inputs) +TYPED_TEST(SRSIO, OversizedInputs) { + using Curve = TypeParam; + using Element = typename Curve::Element; + using AffineElement = typename Curve::AffineElement; + using Fr = typename Curve::ScalarField; + using Fq = typename Curve::BaseField; + // for point ranges with more than 1 << 20 points, we split into chunks of smaller multi-exps. // Check that this is done correctly size_t transcript_degree = 1 << 20; size_t target_degree = 1200000; - g1::affine_element* monomials = - (g1::affine_element*)(aligned_alloc(64, sizeof(g1::affine_element) * (2 * target_degree))); - g2::affine_element g2_x; - io::read_transcript(monomials, g2_x, transcript_degree, BARRETENBERG_SRS_PATH); + AffineElement* monomials = (AffineElement*)(aligned_alloc(64, sizeof(AffineElement) * (2 * target_degree))); + + TestFixture::read_transcript(monomials, transcript_degree, TestFixture::SRS_PATH); memcpy((void*)(monomials + (2 * transcript_degree)), (void*)monomials, - ((2 * target_degree - 2 * transcript_degree) * sizeof(g1::affine_element))); - scalar_multiplication::generate_pippenger_point_table(monomials, monomials, target_degree); + ((2 * target_degree - 2 * transcript_degree) * sizeof(AffineElement))); + barretenberg::scalar_multiplication::generate_pippenger_point_table(monomials, monomials, target_degree); - fr* scalars = (fr*)(aligned_alloc(64, sizeof(fr) * target_degree)); + Fr* scalars = (Fr*)(aligned_alloc(64, sizeof(Fr) * target_degree)); - fr source_scalar = fr::random_element(); - fr accumulator = source_scalar; + Fr source_scalar = Fr::random_element(); + Fr accumulator = source_scalar; for (size_t i = 0; i < target_degree; ++i) { accumulator *= source_scalar; - fr::__copy(accumulator, scalars[i]); + Fr::__copy(accumulator, scalars[i]); } - scalar_multiplication::pippenger_runtime_state state(target_degree); + barretenberg::scalar_multiplication::pippenger_runtime_state state(target_degree); - g1::element first = scalar_multiplication::pippenger(scalars, monomials, target_degree, state); + Element first = barretenberg::scalar_multiplication::pippenger(scalars, monomials, target_degree, state); first = first.normalize(); for (size_t i = 0; i < target_degree; ++i) { scalars[i].self_neg(); } - scalar_multiplication::pippenger_runtime_state state_2(target_degree); + barretenberg::scalar_multiplication::pippenger_runtime_state state_2(target_degree); - g1::element second = scalar_multiplication::pippenger(scalars, monomials, target_degree, state_2); + Element second = barretenberg::scalar_multiplication::pippenger(scalars, monomials, target_degree, state_2); second = second.normalize(); EXPECT_EQ((first.z == second.z), true); - EXPECT_EQ((first.z == fq::one()), true); + EXPECT_EQ((first.z == Fq::one()), true); EXPECT_EQ((first.x == second.x), true); EXPECT_EQ((first.y == -second.y), true); @@ -626,34 +695,38 @@ HEAVY_TEST(scalar_multiplication, oversized_inputs) aligned_free(scalars); } -TEST(scalar_multiplication, undersized_inputs) +TYPED_TEST(SRSIO, UndersizedInputs) { + using Curve = TypeParam; + using Element = typename Curve::Element; + using AffineElement = typename Curve::AffineElement; + using Fr = typename Curve::ScalarField; + // we fall back to traditional scalar multiplication algorithm for small input sizes. // Check this is done correctly size_t num_points = 17; - fr* scalars = (fr*)aligned_alloc(32, sizeof(fr) * num_points); + Fr* scalars = (Fr*)aligned_alloc(32, sizeof(Fr) * num_points); - g1::affine_element* points = - (g1::affine_element*)aligned_alloc(32, sizeof(g1::affine_element) * num_points * 2 + 1); + AffineElement* points = (AffineElement*)aligned_alloc(32, sizeof(AffineElement) * num_points * 2 + 1); for (size_t i = 0; i < num_points; ++i) { - scalars[i] = fr::random_element(); - points[i] = g1::affine_element(g1::element::random_element()); + scalars[i] = Fr::random_element(); + points[i] = AffineElement(Element::random_element()); } - g1::element expected; + Element expected; expected.self_set_infinity(); for (size_t i = 0; i < num_points; ++i) { - g1::element temp = points[i] * scalars[i]; + Element temp = points[i] * scalars[i]; expected += temp; } expected = expected.normalize(); - scalar_multiplication::generate_pippenger_point_table(points, points, num_points); + barretenberg::scalar_multiplication::generate_pippenger_point_table(points, points, num_points); - scalar_multiplication::pippenger_runtime_state state(num_points); + barretenberg::scalar_multiplication::pippenger_runtime_state state(num_points); - g1::element result = scalar_multiplication::pippenger(scalars, points, num_points, state); + Element result = barretenberg::scalar_multiplication::pippenger(scalars, points, num_points, state); result = result.normalize(); aligned_free(scalars); @@ -662,31 +735,35 @@ TEST(scalar_multiplication, undersized_inputs) EXPECT_EQ(result == expected, true); } -TEST(scalar_multiplication, pippenger) +TYPED_TEST(SRSIO, PippengerSmall) { + using Curve = TypeParam; + using Element = typename Curve::Element; + using AffineElement = typename Curve::AffineElement; + using Fr = typename Curve::ScalarField; + constexpr size_t num_points = 8192; - fr* scalars = (fr*)aligned_alloc(32, sizeof(fr) * num_points); + Fr* scalars = (Fr*)aligned_alloc(32, sizeof(Fr) * num_points); - g1::affine_element* points = - (g1::affine_element*)aligned_alloc(32, sizeof(g1::affine_element) * num_points * 2 + 1); + AffineElement* points = (AffineElement*)aligned_alloc(32, sizeof(AffineElement) * num_points * 2 + 1); for (size_t i = 0; i < num_points; ++i) { - scalars[i] = fr::random_element(); - points[i] = g1::affine_element(g1::element::random_element()); + scalars[i] = Fr::random_element(); + points[i] = AffineElement(Element::random_element()); } - g1::element expected; + Element expected; expected.self_set_infinity(); for (size_t i = 0; i < num_points; ++i) { - g1::element temp = points[i] * scalars[i]; + Element temp = points[i] * scalars[i]; expected += temp; } expected = expected.normalize(); - scalar_multiplication::generate_pippenger_point_table(points, points, num_points); - scalar_multiplication::pippenger_runtime_state state(num_points); + barretenberg::scalar_multiplication::generate_pippenger_point_table(points, points, num_points); + barretenberg::scalar_multiplication::pippenger_runtime_state state(num_points); - g1::element result = scalar_multiplication::pippenger(scalars, points, num_points, state); + Element result = barretenberg::scalar_multiplication::pippenger(scalars, points, num_points, state); result = result.normalize(); aligned_free(scalars); @@ -695,33 +772,37 @@ TEST(scalar_multiplication, pippenger) EXPECT_EQ(result == expected, true); } -TEST(scalar_multiplication, pippenger_edge_case_dbl) +TYPED_TEST(SRSIO, PippengerEdgeCaseDbl) { + using Curve = TypeParam; + using Element = typename Curve::Element; + using AffineElement = typename Curve::AffineElement; + using Fr = typename Curve::ScalarField; + constexpr size_t num_points = 128; - fr* scalars = (fr*)aligned_alloc(32, sizeof(fr) * num_points); + Fr* scalars = (Fr*)aligned_alloc(32, sizeof(Fr) * num_points); - g1::affine_element* points = - (g1::affine_element*)aligned_alloc(32, sizeof(g1::affine_element) * num_points * 2 + 1); + AffineElement* points = (AffineElement*)aligned_alloc(32, sizeof(AffineElement) * num_points * 2 + 1); - g1::affine_element point = g1::affine_element(g1::element::random_element()); + AffineElement point = AffineElement(Element::random_element()); for (size_t i = 0; i < num_points; ++i) { - scalars[i] = fr::random_element(); + scalars[i] = Fr::random_element(); points[i] = point; } - g1::element expected; + Element expected; expected.self_set_infinity(); for (size_t i = 0; i < num_points; ++i) { - g1::element temp = points[i] * scalars[i]; + Element temp = points[i] * scalars[i]; expected += temp; } if (!expected.is_point_at_infinity()) { expected = expected.normalize(); } - scalar_multiplication::generate_pippenger_point_table(points, points, num_points); - scalar_multiplication::pippenger_runtime_state state(num_points); - g1::element result = scalar_multiplication::pippenger(scalars, points, num_points, state); + barretenberg::scalar_multiplication::generate_pippenger_point_table(points, points, num_points); + barretenberg::scalar_multiplication::pippenger_runtime_state state(num_points); + Element result = barretenberg::scalar_multiplication::pippenger(scalars, points, num_points, state); result = result.normalize(); aligned_free(scalars); @@ -730,16 +811,21 @@ TEST(scalar_multiplication, pippenger_edge_case_dbl) EXPECT_EQ(result == expected, true); } -TEST(scalar_multiplication, pippenger_short_inputs) +TYPED_TEST(SRSIO, PippengerShortInputs) { + using Curve = TypeParam; + using Element = typename Curve::Element; + using AffineElement = typename Curve::AffineElement; + using Fr = typename Curve::ScalarField; + constexpr size_t num_points = 8192; - fr* scalars = (fr*)aligned_alloc(32, sizeof(fr) * num_points); + Fr* scalars = (Fr*)aligned_alloc(32, sizeof(Fr) * num_points); - g1::affine_element* points = scalar_multiplication::point_table_alloc(num_points); + AffineElement* points = barretenberg::scalar_multiplication::point_table_alloc(num_points); for (size_t i = 0; i < num_points; ++i) { - points[i] = g1::affine_element(g1::element::random_element()); + points[i] = AffineElement(Element::random_element()); } for (size_t i = 0; i < (num_points / 4); ++i) { scalars[i * 4].data[0] = engine.get_random_uint32(); @@ -764,17 +850,17 @@ TEST(scalar_multiplication, pippenger_short_inputs) scalars[i * 4 + 3] = scalars[i * 4 + 3].to_montgomery_form(); } - g1::element expected; + Element expected; expected.self_set_infinity(); for (size_t i = 0; i < num_points; ++i) { - g1::element temp = points[i] * scalars[i]; + Element temp = points[i] * scalars[i]; expected += temp; } expected = expected.normalize(); - scalar_multiplication::generate_pippenger_point_table(points, points, num_points); - scalar_multiplication::pippenger_runtime_state state(num_points); + barretenberg::scalar_multiplication::generate_pippenger_point_table(points, points, num_points); + barretenberg::scalar_multiplication::pippenger_runtime_state state(num_points); - g1::element result = scalar_multiplication::pippenger(scalars, points, num_points, state); + Element result = barretenberg::scalar_multiplication::pippenger(scalars, points, num_points, state); result = result.normalize(); aligned_free(scalars); @@ -783,30 +869,35 @@ TEST(scalar_multiplication, pippenger_short_inputs) EXPECT_EQ(result == expected, true); } -TEST(scalar_multiplication, pippenger_unsafe) +TYPED_TEST(SRSIO, PippengerUnsafe) { + using Curve = TypeParam; + using Element = typename Curve::Element; + using AffineElement = typename Curve::AffineElement; + using Fr = typename Curve::ScalarField; + constexpr size_t num_points = 8192; - fr* scalars = (fr*)aligned_alloc(32, sizeof(fr) * num_points); + Fr* scalars = (Fr*)aligned_alloc(32, sizeof(Fr) * num_points); - g1::affine_element* points = scalar_multiplication::point_table_alloc(num_points); + AffineElement* points = barretenberg::scalar_multiplication::point_table_alloc(num_points); for (size_t i = 0; i < num_points; ++i) { - scalars[i] = fr::random_element(); - points[i] = g1::affine_element(g1::element::random_element()); + scalars[i] = Fr::random_element(); + points[i] = AffineElement(Element::random_element()); } - g1::element expected; + Element expected; expected.self_set_infinity(); for (size_t i = 0; i < num_points; ++i) { - g1::element temp = points[i] * scalars[i]; + Element temp = points[i] * scalars[i]; expected += temp; } expected = expected.normalize(); - scalar_multiplication::generate_pippenger_point_table(points, points, num_points); + barretenberg::scalar_multiplication::generate_pippenger_point_table(points, points, num_points); - scalar_multiplication::pippenger_runtime_state state(num_points); - g1::element result = scalar_multiplication::pippenger_unsafe(scalars, points, num_points, state); + barretenberg::scalar_multiplication::pippenger_runtime_state state(num_points); + Element result = barretenberg::scalar_multiplication::pippenger_unsafe(scalars, points, num_points, state); result = result.normalize(); aligned_free(scalars); @@ -815,17 +906,21 @@ TEST(scalar_multiplication, pippenger_unsafe) EXPECT_EQ(result == expected, true); } -TEST(scalar_multiplication, pippenger_unsafe_short_inputs) +TYPED_TEST(SRSIO, PippengerUnsafeShortInputs) { + using Curve = TypeParam; + using Element = typename Curve::Element; + using AffineElement = typename Curve::AffineElement; + using Fr = typename Curve::ScalarField; + constexpr size_t num_points = 8192; - fr* scalars = (fr*)aligned_alloc(32, sizeof(fr) * num_points); + Fr* scalars = (Fr*)aligned_alloc(32, sizeof(Fr) * num_points); - g1::affine_element* points = - (g1::affine_element*)aligned_alloc(32, sizeof(g1::affine_element) * num_points * 2 + 1); + AffineElement* points = (AffineElement*)aligned_alloc(32, sizeof(AffineElement) * num_points * 2 + 1); for (size_t i = 0; i < num_points; ++i) { - points[i] = g1::affine_element(g1::element::random_element()); + points[i] = AffineElement(Element::random_element()); } for (size_t i = 0; i < (num_points / 4); ++i) { scalars[i * 4].data[0] = engine.get_random_uint32(); @@ -850,17 +945,17 @@ TEST(scalar_multiplication, pippenger_unsafe_short_inputs) scalars[i * 4 + 3] = scalars[i * 4 + 3].to_montgomery_form(); } - g1::element expected; + Element expected; expected.self_set_infinity(); for (size_t i = 0; i < num_points; ++i) { - g1::element temp = points[i] * scalars[i]; + Element temp = points[i] * scalars[i]; expected += temp; } expected = expected.normalize(); - scalar_multiplication::generate_pippenger_point_table(points, points, num_points); - scalar_multiplication::pippenger_runtime_state state(num_points); + barretenberg::scalar_multiplication::generate_pippenger_point_table(points, points, num_points); + barretenberg::scalar_multiplication::pippenger_runtime_state state(num_points); - g1::element result = scalar_multiplication::pippenger_unsafe(scalars, points, num_points, state); + Element result = barretenberg::scalar_multiplication::pippenger_unsafe(scalars, points, num_points, state); result = result.normalize(); aligned_free(scalars); @@ -869,31 +964,35 @@ TEST(scalar_multiplication, pippenger_unsafe_short_inputs) EXPECT_EQ(result == expected, true); } -TEST(scalar_multiplication, pippenger_one) +TYPED_TEST(SRSIO, PippengerOne) { + using Curve = TypeParam; + using Element = typename Curve::Element; + using AffineElement = typename Curve::AffineElement; + using Fr = typename Curve::ScalarField; + size_t num_points = 1; - fr* scalars = (fr*)aligned_alloc(32, sizeof(fr) * 1); + Fr* scalars = (Fr*)aligned_alloc(32, sizeof(Fr) * 1); - g1::affine_element* points = - (g1::affine_element*)aligned_alloc(32, sizeof(g1::affine_element) * num_points * 2 + 1); + AffineElement* points = (AffineElement*)aligned_alloc(32, sizeof(AffineElement) * num_points * 2 + 1); for (size_t i = 0; i < num_points; ++i) { - scalars[i] = fr::random_element(); - points[i] = g1::affine_element(g1::element::random_element()); + scalars[i] = Fr::random_element(); + points[i] = AffineElement(Element::random_element()); } - g1::element expected; + Element expected; expected.self_set_infinity(); for (size_t i = 0; i < num_points; ++i) { - g1::element temp = points[i] * scalars[i]; + Element temp = points[i] * scalars[i]; expected += temp; } expected = expected.normalize(); - scalar_multiplication::generate_pippenger_point_table(points, points, num_points); - scalar_multiplication::pippenger_runtime_state state(num_points); + barretenberg::scalar_multiplication::generate_pippenger_point_table(points, points, num_points); + barretenberg::scalar_multiplication::pippenger_runtime_state state(num_points); - g1::element result = scalar_multiplication::pippenger(scalars, points, num_points, state); + Element result = barretenberg::scalar_multiplication::pippenger(scalars, points, num_points, state); result = result.normalize(); aligned_free(scalars); @@ -902,14 +1001,19 @@ TEST(scalar_multiplication, pippenger_one) EXPECT_EQ(result == expected, true); } -TEST(scalar_multiplication, pippenger_zero_points) +TYPED_TEST(SRSIO, PippengerZeroPoints) { - fr* scalars = (fr*)aligned_alloc(32, sizeof(fr)); + using Curve = TypeParam; + using Element = typename Curve::Element; + using AffineElement = typename Curve::AffineElement; + using Fr = typename Curve::ScalarField; - g1::affine_element* points = (g1::affine_element*)aligned_alloc(32, sizeof(g1::affine_element) * 2 + 1); + Fr* scalars = (Fr*)aligned_alloc(32, sizeof(Fr)); - scalar_multiplication::pippenger_runtime_state state(0); - g1::element result = scalar_multiplication::pippenger(scalars, points, 0, state); + AffineElement* points = (AffineElement*)aligned_alloc(32, sizeof(AffineElement) * 2 + 1); + + barretenberg::scalar_multiplication::pippenger_runtime_state state(0); + Element result = barretenberg::scalar_multiplication::pippenger(scalars, points, 0, state); aligned_free(scalars); aligned_free(points); @@ -917,18 +1021,24 @@ TEST(scalar_multiplication, pippenger_zero_points) EXPECT_EQ(result.is_point_at_infinity(), true); } -TEST(scalar_multiplication, pippenger_mul_by_zero) +TYPED_TEST(SRSIO, PippengerMulByZero) { - fr* scalars = (fr*)aligned_alloc(32, sizeof(fr)); + using Curve = TypeParam; + using Group = typename Curve::Group; + using Element = typename Curve::Element; + using AffineElement = typename Curve::AffineElement; + using Fr = typename Curve::ScalarField; + + Fr* scalars = (Fr*)aligned_alloc(32, sizeof(Fr)); - g1::affine_element* points = (g1::affine_element*)aligned_alloc(32, sizeof(g1::affine_element) * 2 + 1); + AffineElement* points = (AffineElement*)aligned_alloc(32, sizeof(AffineElement) * 2 + 1); - scalars[0] = fr::zero(); - points[0] = g1::affine_one; - scalar_multiplication::generate_pippenger_point_table(points, points, 1); + scalars[0] = Fr::zero(); + points[0] = Group::affine_one; + barretenberg::scalar_multiplication::generate_pippenger_point_table(points, points, 1); - scalar_multiplication::pippenger_runtime_state state(1); - g1::element result = scalar_multiplication::pippenger(scalars, points, 1, state); + barretenberg::scalar_multiplication::pippenger_runtime_state state(1); + Element result = barretenberg::scalar_multiplication::pippenger(scalars, points, 1, state); aligned_free(scalars); aligned_free(points); diff --git a/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.hpp b/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.hpp index 220cc9da30..070222911c 100644 --- a/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.hpp +++ b/cpp/src/barretenberg/ecc/curves/secp256k1/secp256k1.hpp @@ -147,3 +147,14 @@ typedef barretenberg:: g1; g1::affine_element get_generator(const size_t generator_index); } // namespace secp256k1 + +namespace curve { +class SECP256K1 { + public: + using ScalarField = secp256k1::fr; + using BaseField = secp256k1::fq; + using Group = secp256k1::g1; + using Element = typename Group::element; + using AffineElement = typename Group::affine_element; +}; +} // namespace curve \ No newline at end of file diff --git a/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.hpp b/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.hpp index 542d351231..2d04e47c90 100644 --- a/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.hpp +++ b/cpp/src/barretenberg/ecc/curves/secp256r1/secp256r1.hpp @@ -134,3 +134,14 @@ typedef barretenberg:: g1; g1::affine_element get_generator(const size_t generator_index); } // namespace secp256r1 + +namespace curve { +class SECP256R1 { + public: + using ScalarField = secp256r1::fr; + using BaseField = secp256r1::fq; + using Group = secp256r1::g1; + using Element = typename Group::element; + using AffineElement = typename Group::affine_element; +}; +} // namespace curve \ No newline at end of file diff --git a/cpp/src/barretenberg/ecc/groups/wnaf.hpp b/cpp/src/barretenberg/ecc/groups/wnaf.hpp index ba16a34c45..2a01f0a4c4 100644 --- a/cpp/src/barretenberg/ecc/groups/wnaf.hpp +++ b/cpp/src/barretenberg/ecc/groups/wnaf.hpp @@ -7,7 +7,7 @@ namespace barretenberg { namespace wnaf { constexpr size_t SCALAR_BITS = 127; -#define WNAF_SIZE(x) ((wnaf::SCALAR_BITS + x - 1) / (x)) +#define WNAF_SIZE(x) ((barretenberg::wnaf::SCALAR_BITS + x - 1) / (x)) constexpr size_t get_optimal_bucket_width(const size_t num_points) { diff --git a/cpp/src/barretenberg/grumpkin_srs_gen/CMakeLists.txt b/cpp/src/barretenberg/grumpkin_srs_gen/CMakeLists.txt new file mode 100644 index 0000000000..8c001b844c --- /dev/null +++ b/cpp/src/barretenberg/grumpkin_srs_gen/CMakeLists.txt @@ -0,0 +1,11 @@ +add_executable( + grumpkin_srs_gen + grumpkin_srs_gen.cpp +) + +target_link_libraries( + grumpkin_srs_gen + PRIVATE + srs + ecc +) \ No newline at end of file diff --git a/cpp/src/barretenberg/grumpkin_srs_gen/grumpkin_srs_gen.cpp b/cpp/src/barretenberg/grumpkin_srs_gen/grumpkin_srs_gen.cpp new file mode 100644 index 0000000000..b0477d4849 --- /dev/null +++ b/cpp/src/barretenberg/grumpkin_srs_gen/grumpkin_srs_gen.cpp @@ -0,0 +1,37 @@ +#include +#include + +#include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" +#include "barretenberg/srs/io.hpp" + +/* Generates a monomial basis Grumpkin SRS for testing purposes. + We only provide functionality create a single transcript file. + The SRS has the form [1]_1, [x]_1, [x^2]_1, ... where x = 2. */ +int main(int argc, char** argv) +{ + std::vector args(argv, argv + argc); + if (args.size() <= 1) { + info("usage: ", args[0], " [output_srs_path]"); + return 1; + } + + // Note: the number of points in one Ignition transcript file is 5'040'000; see + // https://github.com/AztecProtocol/ignition-verification/blob/master/Transcript_spec.md + const size_t subgroup_size = (size_t)atoi(args[1].c_str()); + const std::string srs_path = (args.size() > 2) ? args[2] : "../srs_db/grumpkin/"; + + std::vector srs(subgroup_size); + + auto point = grumpkin::g1::one; + uint32_t x_secret = 2; + for (size_t point_idx = 0; point_idx < subgroup_size; ++point_idx) { + srs.at(point_idx) = static_cast(point); + point *= x_secret; + } + + srs::Manifest manifest{ 0, 1, static_cast(subgroup_size), 0, static_cast(subgroup_size), 0, 0 }; + + srs::IO::write_transcript(&srs[0], manifest, srs_path); + + return 0; +} \ No newline at end of file diff --git a/cpp/src/barretenberg/honk/pcs/commitment_key.hpp b/cpp/src/barretenberg/honk/pcs/commitment_key.hpp index 5d4e32c80d..eee5376308 100644 --- a/cpp/src/barretenberg/honk/pcs/commitment_key.hpp +++ b/cpp/src/barretenberg/honk/pcs/commitment_key.hpp @@ -8,7 +8,7 @@ #include "barretenberg/polynomials/polynomial_arithmetic.hpp" #include "barretenberg/polynomials/polynomial.hpp" #include "barretenberg/srs/reference_string/file_reference_string.hpp" -#include "barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.hpp" +#include "barretenberg/ecc/curves/scalar_multiplication/scalar_multiplication.hpp" #include "barretenberg/ecc/curves/bn254/pairing.hpp" #include "barretenberg/numeric/bitop/pow.hpp" @@ -61,12 +61,14 @@ class CommitmentKey { { const size_t degree = polynomial.size(); ASSERT(degree <= srs.get_monomial_size()); - return barretenberg::scalar_multiplication::pippenger_unsafe( + // TODO(#473) + return barretenberg::scalar_multiplication::pippenger_unsafe( const_cast(polynomial.data()), srs.get_monomial_points(), degree, pippenger_runtime_state); }; private: - barretenberg::scalar_multiplication::pippenger_runtime_state pippenger_runtime_state; + // TODO(#473) + barretenberg::scalar_multiplication::pippenger_runtime_state pippenger_runtime_state; proof_system::FileReferenceString srs; }; @@ -239,11 +241,13 @@ class CommitmentKey { { const size_t degree = polynomial.size(); ASSERT(degree <= srs.get_monomial_size()); - return barretenberg::scalar_multiplication::pippenger_without_endomorphism_basis_points( + // TODO(#473) + return barretenberg::scalar_multiplication::pippenger_without_endomorphism_basis_points( const_cast(polynomial.data()), srs.get_monomial_points(), degree, pippenger_runtime_state); }; - barretenberg::scalar_multiplication::pippenger_runtime_state pippenger_runtime_state; + // TODO(#473) + barretenberg::scalar_multiplication::pippenger_runtime_state pippenger_runtime_state; proof_system::FileReferenceString srs; }; @@ -268,7 +272,8 @@ class VerificationKey { , srs(num_points, std::string(path)) {} - barretenberg::scalar_multiplication::pippenger_runtime_state pippenger_runtime_state; + // TODO(#473) + barretenberg::scalar_multiplication::pippenger_runtime_state pippenger_runtime_state; proof_system::FileReferenceString srs; }; diff --git a/cpp/src/barretenberg/honk/pcs/ipa/ipa.hpp b/cpp/src/barretenberg/honk/pcs/ipa/ipa.hpp index a7eaf54f91..7b5edb682d 100644 --- a/cpp/src/barretenberg/honk/pcs/ipa/ipa.hpp +++ b/cpp/src/barretenberg/honk/pcs/ipa/ipa.hpp @@ -2,7 +2,7 @@ #include #include #include -#include "barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.hpp" +#include "barretenberg/ecc/curves/scalar_multiplication/scalar_multiplication.hpp" #include "barretenberg/honk/pcs/commitment_key.hpp" #include "barretenberg/stdlib/primitives/curves/bn254.hpp" @@ -88,13 +88,17 @@ template class InnerProductArgument { inner_prod_R += a_vec[round_size + j] * b_vec[j]; } // L_i = < a_vec_lo, G_vec_hi > + inner_prod_L * aux_generator - L_elements[i] = barretenberg::scalar_multiplication::pippenger_without_endomorphism_basis_points( - &a_vec[0], &G_vec_local[round_size], round_size, ck->pippenger_runtime_state); + L_elements[i] = + // TODO(#473) + barretenberg::scalar_multiplication::pippenger_without_endomorphism_basis_points( + &a_vec[0], &G_vec_local[round_size], round_size, ck->pippenger_runtime_state); L_elements[i] += aux_generator * inner_prod_L; // R_i = < a_vec_hi, G_vec_lo > + inner_prod_R * aux_generator - R_elements[i] = barretenberg::scalar_multiplication::pippenger_without_endomorphism_basis_points( - &a_vec[round_size], &G_vec_local[0], round_size, ck->pippenger_runtime_state); + // TODO(#473) + R_elements[i] = + barretenberg::scalar_multiplication::pippenger_without_endomorphism_basis_points( + &a_vec[round_size], &G_vec_local[0], round_size, ck->pippenger_runtime_state); R_elements[i] += aux_generator * inner_prod_R; std::string index = std::to_string(i); @@ -178,8 +182,10 @@ template class InnerProductArgument { msm_scalars[2 * i] = round_challenges[i].sqr(); msm_scalars[2 * i + 1] = round_challenges_inv[i].sqr(); } - Commitment LR_sums = barretenberg::scalar_multiplication::pippenger_without_endomorphism_basis_points( - &msm_scalars[0], &msm_elements[0], pippenger_size, vk->pippenger_runtime_state); + // TODO(#473) + Commitment LR_sums = + barretenberg::scalar_multiplication::pippenger_without_endomorphism_basis_points( + &msm_scalars[0], &msm_elements[0], pippenger_size, vk->pippenger_runtime_state); Commitment C_zero = C_prime + LR_sums; /** @@ -218,7 +224,8 @@ template class InnerProductArgument { for (size_t i = 0; i < poly_degree; i++) { G_vec_local[i] = srs_elements[i]; } - auto G_zero = barretenberg::scalar_multiplication::pippenger_without_endomorphism_basis_points( + // TODO(#473) + auto G_zero = barretenberg::scalar_multiplication::pippenger_without_endomorphism_basis_points( &s_vec[0], &G_vec_local[0], poly_degree, vk->pippenger_runtime_state); auto a_zero = transcript.template receive_from_prover("IPA:a_0"); diff --git a/cpp/src/barretenberg/honk/proof_system/ultra_verifier.cpp b/cpp/src/barretenberg/honk/proof_system/ultra_verifier.cpp index 628a969d26..b4a811a6f5 100644 --- a/cpp/src/barretenberg/honk/proof_system/ultra_verifier.cpp +++ b/cpp/src/barretenberg/honk/proof_system/ultra_verifier.cpp @@ -2,10 +2,9 @@ #include "barretenberg/honk/transcript/transcript.hpp" #include "barretenberg/numeric/bitop/get_msb.hpp" #include "barretenberg/honk/flavor/standard.hpp" -#include "barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.hpp" #include "barretenberg/honk/utils/power_polynomial.hpp" -#pragma GCC diagnostic ignored "-Wunused-variable" +#pragma GCC diagnostic ignored "-Wunused-variable" // TODO(Cody): this needs to go. using namespace barretenberg; using namespace proof_system::honk::sumcheck; diff --git a/cpp/src/barretenberg/honk/proof_system/verifier.cpp b/cpp/src/barretenberg/honk/proof_system/verifier.cpp index 3506f15aaa..2fc77a69ec 100644 --- a/cpp/src/barretenberg/honk/proof_system/verifier.cpp +++ b/cpp/src/barretenberg/honk/proof_system/verifier.cpp @@ -2,7 +2,6 @@ #include "barretenberg/honk/transcript/transcript.hpp" #include "barretenberg/numeric/bitop/get_msb.hpp" #include "barretenberg/honk/flavor/standard.hpp" -#include "barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.hpp" #include "barretenberg/honk/utils/power_polynomial.hpp" using namespace barretenberg; diff --git a/cpp/src/barretenberg/join_split_example/proofs/join_split/c_bind.cpp b/cpp/src/barretenberg/join_split_example/proofs/join_split/c_bind.cpp index 5ee4b90fad..3c7b219a9e 100644 --- a/cpp/src/barretenberg/join_split_example/proofs/join_split/c_bind.cpp +++ b/cpp/src/barretenberg/join_split_example/proofs/join_split/c_bind.cpp @@ -59,7 +59,7 @@ WASM_EXPORT uint32_t join_split__get_new_proving_key_data(uint8_t** output) WASM_EXPORT void join_split__init_verification_key(void* pippenger, uint8_t const* g2x) { auto crs_factory = std::make_unique( - reinterpret_cast(pippenger), g2x); + reinterpret_cast*>(pippenger), g2x); init_verification_key(std::move(crs_factory)); } diff --git a/cpp/src/barretenberg/plonk/composer/composer_base.cpp b/cpp/src/barretenberg/plonk/composer/composer_base.cpp index 46e404a57e..0371f2cffe 100644 --- a/cpp/src/barretenberg/plonk/composer/composer_base.cpp +++ b/cpp/src/barretenberg/plonk/composer/composer_base.cpp @@ -329,11 +329,12 @@ std::shared_ptr ComposerBase::compute_verification_key_base( selector_poly_coefficients = proving_key->polynomial_store.get(selector_poly_label).get_coefficients(); // Commit to the constraint selector polynomial and insert the commitment in the verification key. + // TODO(#473) auto selector_poly_commitment = g1::affine_element( - scalar_multiplication::pippenger(selector_poly_coefficients, - proving_key->reference_string->get_monomial_points(), - proving_key->circuit_size, - proving_key->pippenger_runtime_state)); + scalar_multiplication::pippenger(selector_poly_coefficients, + proving_key->reference_string->get_monomial_points(), + proving_key->circuit_size, + proving_key->pippenger_runtime_state)); circuit_verification_key->commitments.insert({ selector_commitment_label, selector_poly_commitment }); } diff --git a/cpp/src/barretenberg/plonk/composer/splitting_tmp/composer_helper/turbo_plonk_composer_helper.cpp b/cpp/src/barretenberg/plonk/composer/splitting_tmp/composer_helper/turbo_plonk_composer_helper.cpp index c367d9ccd1..d1331baf7d 100644 --- a/cpp/src/barretenberg/plonk/composer/splitting_tmp/composer_helper/turbo_plonk_composer_helper.cpp +++ b/cpp/src/barretenberg/plonk/composer/splitting_tmp/composer_helper/turbo_plonk_composer_helper.cpp @@ -1,6 +1,5 @@ #include "turbo_plonk_composer_helper.hpp" #include "barretenberg/proof_system/circuit_constructors/turbo_circuit_constructor.hpp" -#include "barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.hpp" #include "barretenberg/numeric/bitop/get_msb.hpp" #include "barretenberg/plonk/proof_system/widgets/random_widgets/permutation_widget.hpp" #include "barretenberg/plonk/proof_system/widgets/transition_widgets/turbo_arithmetic_widget.hpp" diff --git a/cpp/src/barretenberg/plonk/composer/standard_composer.cpp b/cpp/src/barretenberg/plonk/composer/standard_composer.cpp index 1158590acf..cd7d157a88 100644 --- a/cpp/src/barretenberg/plonk/composer/standard_composer.cpp +++ b/cpp/src/barretenberg/plonk/composer/standard_composer.cpp @@ -1,6 +1,5 @@ #include "standard_composer.hpp" #include "barretenberg/plonk/proof_system/types/prover_settings.hpp" -#include "barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.hpp" #include "barretenberg/numeric/bitop/get_msb.hpp" #include "barretenberg/plonk/proof_system/widgets/transition_widgets/arithmetic_widget.hpp" #include "barretenberg/plonk/proof_system/widgets/random_widgets/permutation_widget.hpp" diff --git a/cpp/src/barretenberg/plonk/composer/turbo_composer.cpp b/cpp/src/barretenberg/plonk/composer/turbo_composer.cpp index f3339cf147..14927395e7 100644 --- a/cpp/src/barretenberg/plonk/composer/turbo_composer.cpp +++ b/cpp/src/barretenberg/plonk/composer/turbo_composer.cpp @@ -1,5 +1,4 @@ #include "turbo_composer.hpp" -#include "barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.hpp" #include "barretenberg/numeric/bitop/get_msb.hpp" #include "barretenberg/plonk/proof_system/widgets/random_widgets/permutation_widget.hpp" #include "barretenberg/plonk/proof_system/widgets/transition_widgets/turbo_arithmetic_widget.hpp" diff --git a/cpp/src/barretenberg/plonk/composer/ultra_composer.cpp b/cpp/src/barretenberg/plonk/composer/ultra_composer.cpp index ce0f4eac7b..3992143ad8 100644 --- a/cpp/src/barretenberg/plonk/composer/ultra_composer.cpp +++ b/cpp/src/barretenberg/plonk/composer/ultra_composer.cpp @@ -1,6 +1,5 @@ #include "ultra_composer.hpp" -#include "barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.hpp" #include "barretenberg/numeric/bitop/get_msb.hpp" #include #include diff --git a/cpp/src/barretenberg/plonk/proof_system/prover/prover.cpp b/cpp/src/barretenberg/plonk/proof_system/prover/prover.cpp index b7730585e1..32a14c0ac6 100644 --- a/cpp/src/barretenberg/plonk/proof_system/prover/prover.cpp +++ b/cpp/src/barretenberg/plonk/proof_system/prover/prover.cpp @@ -3,7 +3,7 @@ #include "barretenberg/plonk/proof_system/types/prover_settings.hpp" #include "barretenberg/polynomials/polynomial.hpp" #include -#include "barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.hpp" +#include "barretenberg/ecc/curves/scalar_multiplication/scalar_multiplication.hpp" #include "barretenberg/polynomials/iterate_over_domain.hpp" #include "barretenberg/polynomials/polynomial_arithmetic.hpp" diff --git a/cpp/src/barretenberg/plonk/proof_system/proving_key/proving_key.hpp b/cpp/src/barretenberg/plonk/proof_system/proving_key/proving_key.hpp index f3198a5cbb..f30b0bbf9f 100644 --- a/cpp/src/barretenberg/plonk/proof_system/proving_key/proving_key.hpp +++ b/cpp/src/barretenberg/plonk/proof_system/proving_key/proving_key.hpp @@ -1,14 +1,15 @@ #pragma once -#include "barretenberg/ecc/curves/bn254/scalar_multiplication/runtime_states.hpp" #include +#include + +#include "barretenberg/ecc/curves/scalar_multiplication/runtime_states.hpp" #include "barretenberg/polynomials/evaluation_domain.hpp" #include "barretenberg/polynomials/polynomial.hpp" - #include "barretenberg/proof_system/polynomial_store/polynomial_store.hpp" #include "barretenberg/srs/reference_string/reference_string.hpp" #include "barretenberg/plonk/proof_system/constants.hpp" #include "barretenberg/plonk/proof_system/types/polynomial_manifest.hpp" -#include +#include "barretenberg/ecc/curves/bn254/bn254.hpp" namespace proof_system::plonk { @@ -62,7 +63,7 @@ struct proving_key { barretenberg::polynomial quotient_polynomial_parts[plonk::NUM_QUOTIENT_PARTS]; - barretenberg::scalar_multiplication::pippenger_runtime_state pippenger_runtime_state; + barretenberg::scalar_multiplication::pippenger_runtime_state pippenger_runtime_state; PolynomialManifest polynomial_manifest; diff --git a/cpp/src/barretenberg/plonk/proof_system/verifier/verifier.cpp b/cpp/src/barretenberg/plonk/proof_system/verifier/verifier.cpp index bee400fd36..6f37c631ac 100644 --- a/cpp/src/barretenberg/plonk/proof_system/verifier/verifier.cpp +++ b/cpp/src/barretenberg/plonk/proof_system/verifier/verifier.cpp @@ -5,7 +5,7 @@ #include "../utils/kate_verification.hpp" #include "barretenberg/ecc/curves/bn254/fq12.hpp" #include "barretenberg/ecc/curves/bn254/pairing.hpp" -#include "barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.hpp" +#include "barretenberg/ecc/curves/scalar_multiplication/scalar_multiplication.hpp" #include "barretenberg/polynomials/polynomial_arithmetic.hpp" using namespace barretenberg; @@ -176,12 +176,13 @@ template bool VerifierBase::verify size_t num_elements = elements.size(); elements.resize(num_elements * 2); - barretenberg::scalar_multiplication::generate_pippenger_point_table(&elements[0], &elements[0], num_elements); - scalar_multiplication::pippenger_runtime_state state(num_elements); + barretenberg::scalar_multiplication::generate_pippenger_point_table( + &elements[0], &elements[0], num_elements); + scalar_multiplication::pippenger_runtime_state state(num_elements); g1::element P[2]; - P[0] = barretenberg::scalar_multiplication::pippenger(&scalars[0], &elements[0], num_elements, state); + P[0] = barretenberg::scalar_multiplication::pippenger(&scalars[0], &elements[0], num_elements, state); P[1] = -(g1::element(PI_Z_OMEGA) * separator_challenge + PI_Z); if (key->contains_recursive_proof) { diff --git a/cpp/src/barretenberg/plonk/proof_system/verifier/verifier.test.cpp b/cpp/src/barretenberg/plonk/proof_system/verifier/verifier.test.cpp index 701a465189..eccd0ed6fa 100644 --- a/cpp/src/barretenberg/plonk/proof_system/verifier/verifier.test.cpp +++ b/cpp/src/barretenberg/plonk/proof_system/verifier/verifier.test.cpp @@ -5,7 +5,7 @@ #include "../../../transcript/transcript.hpp" #include "barretenberg/plonk/composer/standard_composer.hpp" #include "verifier.hpp" -#include "barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.hpp" +#include "barretenberg/ecc/curves/scalar_multiplication/scalar_multiplication.hpp" #include #include "barretenberg/srs/reference_string/file_reference_string.hpp" #include "barretenberg/polynomials/polynomial_arithmetic.hpp" @@ -29,15 +29,15 @@ plonk::Verifier generate_verifier(std::shared_ptr circuit_proving_k poly_coefficients[7] = circuit_proving_key->polynomial_store.get("sigma_3").get_coefficients(); std::vector commitments; - scalar_multiplication::pippenger_runtime_state state(circuit_proving_key->circuit_size); + scalar_multiplication::pippenger_runtime_state state(circuit_proving_key->circuit_size); commitments.resize(8); for (size_t i = 0; i < 8; ++i) { commitments[i] = g1::affine_element( - scalar_multiplication::pippenger(poly_coefficients[i], - circuit_proving_key->reference_string->get_monomial_points(), - circuit_proving_key->circuit_size, - state)); + scalar_multiplication::pippenger(poly_coefficients[i], + circuit_proving_key->reference_string->get_monomial_points(), + circuit_proving_key->circuit_size, + state)); } auto crs = std::make_shared("../srs_db/ignition"); diff --git a/cpp/src/barretenberg/plonk/proof_system/widgets/random_widgets/permutation_widget_impl.hpp b/cpp/src/barretenberg/plonk/proof_system/widgets/random_widgets/permutation_widget_impl.hpp index f59863bc5e..ec2b12d46b 100644 --- a/cpp/src/barretenberg/plonk/proof_system/widgets/random_widgets/permutation_widget_impl.hpp +++ b/cpp/src/barretenberg/plonk/proof_system/widgets/random_widgets/permutation_widget_impl.hpp @@ -1,6 +1,6 @@ #pragma once #include "barretenberg/common/mem.hpp" -#include "barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.hpp" +#include "barretenberg/ecc/curves/scalar_multiplication/scalar_multiplication.hpp" #include "barretenberg/plonk/proof_system/proving_key/proving_key.hpp" #include "barretenberg/plonk/proof_system/public_inputs/public_inputs.hpp" #include "barretenberg/transcript/transcript.hpp" @@ -229,7 +229,7 @@ void ProverPermutationWidget::read_transcript(&globals.monomials[0], g2_x, MAX_GATES, "../srs_db/ignition"); globals.scalars = (fr*)(aligned_alloc(32, sizeof(fr) * MAX_GATES * MAX_ROUNDS)); globals.data = (fr*)(aligned_alloc(32, sizeof(fr) * (8 * 17 * MAX_GATES))); memset((void*)globals.monomials, 0x00, MAX_GATES * 2 * sizeof(globals.monomials)); @@ -122,10 +122,11 @@ void pippenger_bench(State& state) noexcept for (auto _ : state) { const size_t num_points = static_cast(state.range(0)); state.PauseTiming(); - scalar_multiplication::pippenger_runtime_state run_state(num_points); + scalar_multiplication::pippenger_runtime_state run_state(num_points); state.ResumeTiming(); // uint64_t before = rdtsc(); - scalar_multiplication::pippenger(&globals.scalars[0], &globals.monomials[0], num_points, run_state); + scalar_multiplication::pippenger( + &globals.scalars[0], &globals.monomials[0], num_points, run_state); // uint64_t after = rdtsc(); // count += (after - before); // ++i; @@ -143,7 +144,7 @@ void unsafe_pippenger_bench(State& state) noexcept uint64_t i = 0; for (auto _ : state) { state.PauseTiming(); - scalar_multiplication::pippenger_runtime_state run_state(num_points); + scalar_multiplication::pippenger_runtime_state run_state(num_points); state.ResumeTiming(); uint64_t before = rdtsc(); @@ -164,28 +165,28 @@ void new_plonk_scalar_multiplications_bench(State& state) noexcept uint64_t k = 0; for (auto _ : state) { state.PauseTiming(); - scalar_multiplication::pippenger_runtime_state run_state(MAX_GATES); + scalar_multiplication::pippenger_runtime_state run_state(MAX_GATES); state.ResumeTiming(); uint64_t before = rdtsc(); - g1::element a = - scalar_multiplication::pippenger(&globals.scalars[0], &globals.monomials[0], MAX_GATES, run_state); - g1::element b = - scalar_multiplication::pippenger(&globals.scalars[1], &globals.monomials[0], MAX_GATES, run_state); - g1::element c = - scalar_multiplication::pippenger(&globals.scalars[2], &globals.monomials[0], MAX_GATES, run_state); - g1::element d = - scalar_multiplication::pippenger(&globals.scalars[3], &globals.monomials[0], MAX_GATES, run_state); - g1::element e = - scalar_multiplication::pippenger(&globals.scalars[4], &globals.monomials[0], MAX_GATES, run_state); - g1::element f = - scalar_multiplication::pippenger(&globals.scalars[5], &globals.monomials[0], MAX_GATES, run_state); - g1::element g = - scalar_multiplication::pippenger(&globals.scalars[6], &globals.monomials[0], MAX_GATES, run_state); - g1::element h = - scalar_multiplication::pippenger(&globals.scalars[7], &globals.monomials[0], MAX_GATES, run_state); - g1::element i = - scalar_multiplication::pippenger(&globals.scalars[8], &globals.monomials[0], MAX_GATES, run_state); + g1::element a = scalar_multiplication::pippenger( + &globals.scalars[0], &globals.monomials[0], MAX_GATES, run_state); + g1::element b = scalar_multiplication::pippenger( + &globals.scalars[1], &globals.monomials[0], MAX_GATES, run_state); + g1::element c = scalar_multiplication::pippenger( + &globals.scalars[2], &globals.monomials[0], MAX_GATES, run_state); + g1::element d = scalar_multiplication::pippenger( + &globals.scalars[3], &globals.monomials[0], MAX_GATES, run_state); + g1::element e = scalar_multiplication::pippenger( + &globals.scalars[4], &globals.monomials[0], MAX_GATES, run_state); + g1::element f = scalar_multiplication::pippenger( + &globals.scalars[5], &globals.monomials[0], MAX_GATES, run_state); + g1::element g = scalar_multiplication::pippenger( + &globals.scalars[6], &globals.monomials[0], MAX_GATES, run_state); + g1::element h = scalar_multiplication::pippenger( + &globals.scalars[7], &globals.monomials[0], MAX_GATES, run_state); + g1::element i = scalar_multiplication::pippenger( + &globals.scalars[8], &globals.monomials[0], MAX_GATES, run_state); uint64_t after = rdtsc(); count += (after - before); ++k; diff --git a/cpp/src/barretenberg/proof_system/circuit_constructors/turbo_circuit_constructor.cpp b/cpp/src/barretenberg/proof_system/circuit_constructors/turbo_circuit_constructor.cpp index 8bc4b461e2..73c491c0a5 100644 --- a/cpp/src/barretenberg/proof_system/circuit_constructors/turbo_circuit_constructor.cpp +++ b/cpp/src/barretenberg/proof_system/circuit_constructors/turbo_circuit_constructor.cpp @@ -1,5 +1,4 @@ #include "turbo_circuit_constructor.hpp" -#include "barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.hpp" #include "barretenberg/numeric/bitop/get_msb.hpp" using namespace barretenberg; diff --git a/cpp/src/barretenberg/proof_system/work_queue/work_queue.cpp b/cpp/src/barretenberg/proof_system/work_queue/work_queue.cpp index 038716f4f8..df4fbc05fe 100644 --- a/cpp/src/barretenberg/proof_system/work_queue/work_queue.cpp +++ b/cpp/src/barretenberg/proof_system/work_queue/work_queue.cpp @@ -1,6 +1,6 @@ #include "work_queue.hpp" -#include "barretenberg/ecc/curves/bn254/scalar_multiplication/scalar_multiplication.hpp" +#include "barretenberg/ecc/curves/scalar_multiplication/scalar_multiplication.hpp" #include "barretenberg/polynomials/polynomial_arithmetic.hpp" namespace proof_system::plonk { @@ -206,8 +206,9 @@ void work_queue::process_queue() barretenberg::g1::affine_element* srs_points = key->reference_string->get_monomial_points(); // Run pippenger multi-scalar multiplication. - auto runtime_state = barretenberg::scalar_multiplication::pippenger_runtime_state(msm_size); - barretenberg::g1::affine_element result(barretenberg::scalar_multiplication::pippenger_unsafe( + // TODO(#473), but maybe the work queue is going away + auto runtime_state = barretenberg::scalar_multiplication::pippenger_runtime_state(msm_size); + barretenberg::g1::affine_element result(barretenberg::scalar_multiplication::pippenger_unsafe( item.mul_scalars, srs_points, msm_size, runtime_state)); transcript->add_element(item.tag, result.to_buffer()); diff --git a/cpp/src/barretenberg/srs/CMakeLists.txt b/cpp/src/barretenberg/srs/CMakeLists.txt index 315fc916d7..984bb3278a 100644 --- a/cpp/src/barretenberg/srs/CMakeLists.txt +++ b/cpp/src/barretenberg/srs/CMakeLists.txt @@ -1 +1 @@ -barretenberg_module(srs polynomials env) +barretenberg_module(srs polynomials env ecc) \ No newline at end of file diff --git a/cpp/src/barretenberg/srs/io.cpp b/cpp/src/barretenberg/srs/io.cpp deleted file mode 100644 index 8292fce42b..0000000000 --- a/cpp/src/barretenberg/srs/io.cpp +++ /dev/null @@ -1,334 +0,0 @@ -#include "io.hpp" -#include "barretenberg/common/mem.hpp" -#include "barretenberg/common/net.hpp" -#include "barretenberg/common/throw_or_abort.hpp" -#include -#include - -namespace barretenberg { -namespace io { - -constexpr size_t BLAKE2B_CHECKSUM_LENGTH = 64; - -size_t get_transcript_size(const Manifest& manifest) -{ - const size_t manifest_size = sizeof(Manifest); - const size_t g1_buffer_size = sizeof(fq) * 2 * manifest.num_g1_points; - const size_t g2_buffer_size = sizeof(fq2) * 2 * manifest.num_g2_points; - return manifest_size + g1_buffer_size + g2_buffer_size + BLAKE2B_CHECKSUM_LENGTH; -} - -void read_manifest(std::string const& filename, Manifest& manifest) -{ - std::ifstream file; - file.open(filename, std::ifstream::binary); - file.read((char*)&manifest, sizeof(Manifest)); - file.close(); - - manifest.transcript_number = ntohl(manifest.transcript_number); - manifest.total_transcripts = ntohl(manifest.total_transcripts); - manifest.total_g1_points = ntohl(manifest.total_g1_points); - manifest.total_g2_points = ntohl(manifest.total_g2_points); - manifest.num_g1_points = ntohl(manifest.num_g1_points); - manifest.num_g2_points = ntohl(manifest.num_g2_points); - manifest.start_from = ntohl(manifest.start_from); -} - -void read_g1_elements_from_buffer(g1::affine_element* elements, char const* buffer, size_t buffer_size) -{ - memcpy((void*)elements, (void*)buffer, buffer_size); - byteswap(elements, buffer_size); -} - -void byteswap(g1::affine_element* elements, size_t elements_size) -{ - constexpr size_t bytes_per_element = sizeof(g1::affine_element); - size_t num_elements = elements_size / bytes_per_element; - - if (is_little_endian()) { - for (size_t i = 0; i < num_elements; ++i) { - elements[i].x.data[0] = __builtin_bswap64(elements[i].x.data[0]); - elements[i].x.data[1] = __builtin_bswap64(elements[i].x.data[1]); - elements[i].x.data[2] = __builtin_bswap64(elements[i].x.data[2]); - elements[i].x.data[3] = __builtin_bswap64(elements[i].x.data[3]); - elements[i].y.data[0] = __builtin_bswap64(elements[i].y.data[0]); - elements[i].y.data[1] = __builtin_bswap64(elements[i].y.data[1]); - elements[i].y.data[2] = __builtin_bswap64(elements[i].y.data[2]); - elements[i].y.data[3] = __builtin_bswap64(elements[i].y.data[3]); - elements[i].x.self_to_montgomery_form(); - elements[i].y.self_to_montgomery_form(); - } - } -} - -void read_g2_elements_from_buffer(g2::affine_element* elements, char const* buffer, size_t buffer_size) -{ - memcpy((void*)elements, (void*)buffer, buffer_size); - byteswap(elements, buffer_size); -} - -void byteswap(g2::affine_element* elements, size_t elements_size) -{ - constexpr size_t bytes_per_element = sizeof(g2::affine_element); - size_t num_elements = elements_size / bytes_per_element; - - if (is_little_endian()) { - for (size_t i = 0; i < num_elements; ++i) { - elements[i].x.c0.data[0] = __builtin_bswap64(elements[i].x.c0.data[0]); - elements[i].x.c0.data[1] = __builtin_bswap64(elements[i].x.c0.data[1]); - elements[i].x.c0.data[2] = __builtin_bswap64(elements[i].x.c0.data[2]); - elements[i].x.c0.data[3] = __builtin_bswap64(elements[i].x.c0.data[3]); - elements[i].y.c0.data[0] = __builtin_bswap64(elements[i].y.c0.data[0]); - elements[i].y.c0.data[1] = __builtin_bswap64(elements[i].y.c0.data[1]); - elements[i].y.c0.data[2] = __builtin_bswap64(elements[i].y.c0.data[2]); - elements[i].y.c0.data[3] = __builtin_bswap64(elements[i].y.c0.data[3]); - elements[i].x.c1.data[0] = __builtin_bswap64(elements[i].x.c1.data[0]); - elements[i].x.c1.data[1] = __builtin_bswap64(elements[i].x.c1.data[1]); - elements[i].x.c1.data[2] = __builtin_bswap64(elements[i].x.c1.data[2]); - elements[i].x.c1.data[3] = __builtin_bswap64(elements[i].x.c1.data[3]); - elements[i].y.c1.data[0] = __builtin_bswap64(elements[i].y.c1.data[0]); - elements[i].y.c1.data[1] = __builtin_bswap64(elements[i].y.c1.data[1]); - elements[i].y.c1.data[2] = __builtin_bswap64(elements[i].y.c1.data[2]); - elements[i].y.c1.data[3] = __builtin_bswap64(elements[i].y.c1.data[3]); - elements[i].x.c0.self_to_montgomery_form(); - elements[i].x.c1.self_to_montgomery_form(); - elements[i].y.c0.self_to_montgomery_form(); - elements[i].y.c1.self_to_montgomery_form(); - } - } -} - -size_t get_file_size(std::string const& filename) -{ - struct stat st; - if (stat(filename.c_str(), &st) != 0) { - return 0; - } - return (size_t)st.st_size; -} - -void read_file_into_buffer( - char* buffer, size_t& size, std::string const& filename, size_t offset = 0, size_t amount = 0) -{ - size = amount ? amount : get_file_size(filename); - - std::ifstream file; - file.open(filename, std::ifstream::binary); - file.seekg((int)offset); - - // Read the desired size, but return the actual size read - file.read(buffer, (int)size); - if (!file) { - ptrdiff_t read = file.gcount(); - throw_or_abort(format("Only read ", read, " bytes from file but expected ", size, ".")); - } - - file.close(); -} - -std::string get_transcript_path(std::string const& dir, size_t num) -{ - return format(dir, "/monomial/transcript", (num < 10) ? "0" : "", std::to_string(num), ".dat"); -}; - -bool is_file_exist(std::string const& fileName) -{ - std::ifstream infile(fileName); - return infile.good(); -} - -void read_transcript_g1(g1::affine_element* monomials, size_t degree, std::string const& dir) -{ - size_t num = 0; - size_t num_read = 0; - std::string path = get_transcript_path(dir, num); - - while (is_file_exist(path) && num_read < degree) { - Manifest manifest; - read_manifest(path, manifest); - - auto offset = sizeof(Manifest); - const size_t num_to_read = std::min((size_t)manifest.num_g1_points, degree - num_read); - const size_t g1_buffer_size = sizeof(fq) * 2 * num_to_read; - - char* buffer = (char*)&monomials[num_read]; - size_t size = 0; - - // We must pass the size actually read to the second call, not the desired - // g1_buffer_size as the file may have been smaller than this. - read_file_into_buffer(buffer, size, path, offset, g1_buffer_size); - byteswap(&monomials[num_read], size); - - num_read += num_to_read; - path = get_transcript_path(dir, ++num); - } - - const bool monomial_srs_condition = num_read < degree; - if (monomial_srs_condition) { - throw_or_abort(format("Only read ", - num_read, - " points from ", - path, - ", but require ", - degree, - ". Is your srs large enough? Either run bootstrap.sh to download the transcript.dat " - "files to `srs_db/ignition/`, or you might need to download extra transcript.dat files " - "by editing `srs_db/download_ignition.sh` (but be careful, as this suggests you've " - "just changed a circuit to exceed a new 'power of two' boundary).")); - } -} - -void read_transcript_g2(g2::affine_element& g2_x, std::string const& dir) -{ - - const size_t g2_size = sizeof(fq2) * 2; - std::string path = format(dir, "/g2.dat"); - - if (is_file_exist(path)) { - char* buffer = (char*)&g2_x; - size_t size = 0; - - // Again, size passed to second function should be size actually read - read_file_into_buffer(buffer, size, path, 0, g2_size); - byteswap(&g2_x, size); - - return; - } - - // Get transcript starting at g0.dat - path = get_transcript_path(dir, 0); - - Manifest manifest; - read_manifest(path, manifest); - - const size_t g2_buffer_offset = sizeof(fq) * 2 * manifest.num_g1_points; - auto offset = sizeof(Manifest) + g2_buffer_offset; - - char* buffer = (char*)&g2_x; - size_t size = 0; - - // Again, size passed to second function should be size actually read - read_file_into_buffer(buffer, size, path, offset, g2_size); - byteswap(&g2_x, size); -} - -void read_transcript(g1::affine_element* monomials, g2::affine_element& g2_x, size_t degree, std::string const& path) -{ - read_transcript_g1(monomials, degree, path); - read_transcript_g2(g2_x, path); -} - -void write_buffer_to_file(std::string const& filename, char const* buffer, size_t buffer_size) -{ - std::ofstream file; - file.open(filename); - file.write(&buffer[0], (int)(buffer_size)); - file.close(); -} - -void write_g1_elements_to_buffer(g1::affine_element const* elements, char* buffer, size_t num_elements) -{ - uint64_t temp_x[4]; - uint64_t temp_y[4]; - fq temp_x_g1; - fq temp_y_g1; - - if (is_little_endian()) { - for (size_t i = 0; i < num_elements; ++i) { - size_t byte_position_1 = sizeof(fq) * i * 2; - size_t byte_position_2 = sizeof(fq) * (i * 2 + 1); - - temp_x_g1 = elements[i].x.from_montgomery_form(); - temp_y_g1 = elements[i].y.from_montgomery_form(); - - temp_x[0] = __builtin_bswap64(temp_x_g1.data[0]); - temp_x[1] = __builtin_bswap64(temp_x_g1.data[1]); - temp_x[2] = __builtin_bswap64(temp_x_g1.data[2]); - temp_x[3] = __builtin_bswap64(temp_x_g1.data[3]); - temp_y[0] = __builtin_bswap64(temp_y_g1.data[0]); - temp_y[1] = __builtin_bswap64(temp_y_g1.data[1]); - temp_y[2] = __builtin_bswap64(temp_y_g1.data[2]); - temp_y[3] = __builtin_bswap64(temp_y_g1.data[3]); - - memcpy((void*)(buffer + byte_position_1), (void*)temp_x, sizeof(fq)); - memcpy((void*)(buffer + byte_position_2), (void*)temp_y, sizeof(fq)); - } - } -} - -void write_g2_elements_to_buffer(g2::affine_element const* elements, char* buffer, size_t num_elements) -{ - uint64_t temp_x[8]; - uint64_t temp_y[8]; - fq temp_x_g2_1; - fq temp_x_g2_2; - fq temp_y_g2_1; - fq temp_y_g2_2; - - if (is_little_endian()) { - for (size_t i = 0; i < num_elements; ++i) { - size_t byte_position_1 = sizeof(fq) * (4 * i); - size_t byte_position_2 = sizeof(fq) * (4 * i + 2); - - temp_x_g2_1 = elements[i].x.c0.from_montgomery_form(); - temp_x_g2_2 = elements[i].x.c1.from_montgomery_form(); - temp_y_g2_1 = elements[i].y.c0.from_montgomery_form(); - temp_y_g2_2 = elements[i].y.c1.from_montgomery_form(); - - temp_x[0] = __builtin_bswap64(temp_x_g2_1.data[0]); - temp_x[1] = __builtin_bswap64(temp_x_g2_1.data[1]); - temp_x[2] = __builtin_bswap64(temp_x_g2_1.data[2]); - temp_x[3] = __builtin_bswap64(temp_x_g2_1.data[3]); - temp_x[4] = __builtin_bswap64(temp_x_g2_2.data[0]); - temp_x[5] = __builtin_bswap64(temp_x_g2_2.data[1]); - temp_x[6] = __builtin_bswap64(temp_x_g2_2.data[2]); - temp_x[7] = __builtin_bswap64(temp_x_g2_2.data[3]); - - temp_y[0] = __builtin_bswap64(temp_y_g2_1.data[0]); - temp_y[1] = __builtin_bswap64(temp_y_g2_1.data[1]); - temp_y[2] = __builtin_bswap64(temp_y_g2_1.data[2]); - temp_y[3] = __builtin_bswap64(temp_y_g2_1.data[3]); - temp_y[4] = __builtin_bswap64(temp_y_g2_2.data[0]); - temp_y[5] = __builtin_bswap64(temp_y_g2_2.data[1]); - temp_y[6] = __builtin_bswap64(temp_y_g2_2.data[2]); - temp_y[7] = __builtin_bswap64(temp_y_g2_2.data[3]); - - memcpy((void*)(buffer + byte_position_1), (void*)temp_x, 2 * sizeof(fq)); - memcpy((void*)(buffer + byte_position_2), (void*)temp_y, 2 * sizeof(fq)); - } - } -} - -void write_transcript(g1::affine_element const* g1_x, - g2::affine_element const* g2_x, - Manifest const& manifest, - std::string const& dir) -{ - const size_t num_g1_x = manifest.num_g1_points; - const size_t num_g2_x = manifest.num_g2_points; - const size_t transcript_num = manifest.transcript_number; - const size_t manifest_size = sizeof(Manifest); - const size_t g1_buffer_size = sizeof(fq) * 2 * num_g1_x; - const size_t g2_buffer_size = sizeof(fq) * 4 * num_g2_x; - const size_t transcript_size = manifest_size + g1_buffer_size + g2_buffer_size; - std::string path = get_transcript_path(dir, transcript_num); - std::vector buffer(transcript_size); - - Manifest net_manifest; - net_manifest.transcript_number = htonl(manifest.transcript_number); - net_manifest.total_transcripts = htonl(manifest.total_transcripts); - net_manifest.total_g1_points = htonl(manifest.total_g1_points); - net_manifest.total_g2_points = htonl(manifest.total_g2_points); - net_manifest.num_g1_points = htonl(manifest.num_g1_points); - net_manifest.num_g2_points = htonl(manifest.num_g2_points); - net_manifest.start_from = htonl(manifest.start_from); - - std::copy(&net_manifest, &net_manifest + 1, (Manifest*)&buffer[0]); - - write_g1_elements_to_buffer(g1_x, &buffer[manifest_size], num_g1_x); - write_g2_elements_to_buffer(g2_x, &buffer[manifest_size + g1_buffer_size], num_g2_x); - write_buffer_to_file(path, &buffer[0], transcript_size); -} - -} // namespace io -} // namespace barretenberg diff --git a/cpp/src/barretenberg/srs/io.hpp b/cpp/src/barretenberg/srs/io.hpp index 0d285c3109..918f503049 100644 --- a/cpp/src/barretenberg/srs/io.hpp +++ b/cpp/src/barretenberg/srs/io.hpp @@ -1,12 +1,13 @@ #pragma once -#include "../ecc/curves/bn254/g1.hpp" -#include "../ecc/curves/bn254/g2.hpp" +#include "../ecc/curves/bn254/bn254.hpp" +#include "../ecc/curves/grumpkin/grumpkin.hpp" #include +#include +#include #include +#include -namespace barretenberg { -namespace io { - +namespace srs { struct Manifest { uint32_t transcript_number; uint32_t total_transcripts; @@ -17,28 +18,381 @@ struct Manifest { uint32_t start_from; }; -void read_transcript_g1(g1::affine_element* monomials, size_t degree, std::string const& dir); +// Detect whether a curve has a G2AffineElement defined +template concept HasG2 = requires +{ + typename Curve::G2AffineElement; +}; + +// If Curve has a G2AffineElement type, check whether T is this type. +template +concept GivingG2AffineElementType = std::same_as; + +// If Curve has a G2AffineElement type, check whether T is this type. +template +concept GivingG1AffineElementType = std::same_as; + +template class IO { + using Fq = typename Curve::BaseField; + using Fr = typename Curve::ScalarField; + using Element = typename Curve::Element; + using AffineElement = typename Curve::AffineElement; + + static constexpr size_t BLAKE2B_CHECKSUM_LENGTH = 64; + + static size_t get_transcript_size(const Manifest& manifest) + { + const size_t manifest_size = sizeof(Manifest); + const size_t g1_buffer_size = sizeof(Fq) * 2 * manifest.num_g1_points; + size_t result = manifest_size + g1_buffer_size + BLAKE2B_CHECKSUM_LENGTH; + + if constexpr (HasG2) { + const size_t g2_buffer_size = 2 * sizeof(Fq) * 2 * manifest.num_g2_points; + info(g2_buffer_size); + result += g2_buffer_size; + } + + return result; + } + + static void read_manifest(std::string const& filename, Manifest& manifest) + { + std::ifstream file; + file.open(filename, std::ifstream::binary); + file.read((char*)&manifest, sizeof(Manifest)); + file.close(); + + manifest.transcript_number = ntohl(manifest.transcript_number); + manifest.total_transcripts = ntohl(manifest.total_transcripts); + manifest.total_g1_points = ntohl(manifest.total_g1_points); + manifest.total_g2_points = ntohl(manifest.total_g2_points); + manifest.num_g1_points = ntohl(manifest.num_g1_points); + manifest.num_g2_points = ntohl(manifest.num_g2_points); + manifest.start_from = ntohl(manifest.start_from); + } + + static void write_buffer_to_file(std::string const& filename, char const* buffer, size_t buffer_size) + { + std::ofstream file; + file.open(filename); + file.write(&buffer[0], (int)(buffer_size)); + file.close(); + } + + static size_t get_file_size(std::string const& filename) + { + struct stat st; + if (stat(filename.c_str(), &st) != 0) { + return 0; + } + return (size_t)st.st_size; + } + + static void read_file_into_buffer( + char* buffer, size_t& size, std::string const& filename, size_t offset = 0, size_t amount = 0) + { + size = amount ? amount : get_file_size(filename); + + std::ifstream file; + file.open(filename, std::ifstream::binary); + file.seekg((int)offset); + + // Read the desired size, but return the actual size read + file.read(buffer, (int)size); + if (!file) { + ptrdiff_t read = file.gcount(); + throw_or_abort(format("Only read ", read, " bytes from file but expected ", size, ".")); + } + + file.close(); + } + + static std::string get_transcript_path(std::string const& dir, size_t num) + { + return format(dir, "/monomial/transcript", (num < 10) ? "0" : "", std::to_string(num), ".dat"); + }; + + static bool is_file_exist(std::string const& fileName) + { + std::ifstream infile(fileName); + return infile.good(); + } + + template + static void write_elements_to_buffer(AffineElementType const* elements, char* buffer, size_t num_elements) + { + if constexpr (GivingG1AffineElementType) { + uint64_t temp_x[4]; + uint64_t temp_y[4]; + Fq temp_x_g1; + Fq temp_y_g1; + + if (is_little_endian()) { + for (size_t i = 0; i < num_elements; ++i) { + size_t byte_position_1 = sizeof(Fq) * i * 2; + size_t byte_position_2 = sizeof(Fq) * (i * 2 + 1); + + temp_x_g1 = elements[i].x.from_montgomery_form(); + temp_y_g1 = elements[i].y.from_montgomery_form(); + + temp_x[0] = __builtin_bswap64(temp_x_g1.data[0]); + temp_x[1] = __builtin_bswap64(temp_x_g1.data[1]); + temp_x[2] = __builtin_bswap64(temp_x_g1.data[2]); + temp_x[3] = __builtin_bswap64(temp_x_g1.data[3]); + temp_y[0] = __builtin_bswap64(temp_y_g1.data[0]); + temp_y[1] = __builtin_bswap64(temp_y_g1.data[1]); + temp_y[2] = __builtin_bswap64(temp_y_g1.data[2]); + temp_y[3] = __builtin_bswap64(temp_y_g1.data[3]); + + memcpy((void*)(buffer + byte_position_1), (void*)temp_x, sizeof(Fq)); + memcpy((void*)(buffer + byte_position_2), (void*)temp_y, sizeof(Fq)); + } + } + } else if constexpr (GivingG2AffineElementType) { + uint64_t temp_x[8]; + uint64_t temp_y[8]; + Fq temp_x_g2_1; + Fq temp_x_g2_2; + Fq temp_y_g2_1; + Fq temp_y_g2_2; + + if (is_little_endian()) { + for (size_t i = 0; i < num_elements; ++i) { + size_t byte_position_1 = sizeof(Fq) * (4 * i); + size_t byte_position_2 = sizeof(Fq) * (4 * i + 2); + + temp_x_g2_1 = elements[i].x.c0.from_montgomery_form(); + temp_x_g2_2 = elements[i].x.c1.from_montgomery_form(); + temp_y_g2_1 = elements[i].y.c0.from_montgomery_form(); + temp_y_g2_2 = elements[i].y.c1.from_montgomery_form(); -void read_transcript_g2(g2::affine_element& g2_x, std::string const& dir); + temp_x[0] = __builtin_bswap64(temp_x_g2_1.data[0]); + temp_x[1] = __builtin_bswap64(temp_x_g2_1.data[1]); + temp_x[2] = __builtin_bswap64(temp_x_g2_1.data[2]); + temp_x[3] = __builtin_bswap64(temp_x_g2_1.data[3]); + temp_x[4] = __builtin_bswap64(temp_x_g2_2.data[0]); + temp_x[5] = __builtin_bswap64(temp_x_g2_2.data[1]); + temp_x[6] = __builtin_bswap64(temp_x_g2_2.data[2]); + temp_x[7] = __builtin_bswap64(temp_x_g2_2.data[3]); -void read_transcript(g1::affine_element* monomials, g2::affine_element& g2_x, size_t degree, std::string const& path); + temp_y[0] = __builtin_bswap64(temp_y_g2_1.data[0]); + temp_y[1] = __builtin_bswap64(temp_y_g2_1.data[1]); + temp_y[2] = __builtin_bswap64(temp_y_g2_1.data[2]); + temp_y[3] = __builtin_bswap64(temp_y_g2_1.data[3]); + temp_y[4] = __builtin_bswap64(temp_y_g2_2.data[0]); + temp_y[5] = __builtin_bswap64(temp_y_g2_2.data[1]); + temp_y[6] = __builtin_bswap64(temp_y_g2_2.data[2]); + temp_y[7] = __builtin_bswap64(temp_y_g2_2.data[3]); -void read_g1_elements_from_buffer(g1::affine_element* elements, char const* buffer, size_t buffer_size); -void byteswap(g1::affine_element* elements, size_t buffer_size); + memcpy((void*)(buffer + byte_position_1), (void*)temp_x, 2 * sizeof(Fq)); + memcpy((void*)(buffer + byte_position_2), (void*)temp_y, 2 * sizeof(Fq)); + } + } + } + } -void read_g2_elements_from_buffer(g2::affine_element* elements, char const* buffer, size_t buffer_size); -void byteswap(g2::affine_element* elements, size_t buffer_size); + public: + template static void byteswap(AffineElementType* elements, size_t elements_size) + { + if constexpr (GivingG1AffineElementType) { + constexpr size_t bytes_per_element = sizeof(AffineElementType); + size_t num_elements = elements_size / bytes_per_element; -void write_buffer_to_file(std::string const& filename, char const* buffer, size_t buffer_size); + if (is_little_endian()) { + for (size_t i = 0; i < num_elements; ++i) { + elements[i].x.data[0] = __builtin_bswap64(elements[i].x.data[0]); + elements[i].x.data[1] = __builtin_bswap64(elements[i].x.data[1]); + elements[i].x.data[2] = __builtin_bswap64(elements[i].x.data[2]); + elements[i].x.data[3] = __builtin_bswap64(elements[i].x.data[3]); + elements[i].y.data[0] = __builtin_bswap64(elements[i].y.data[0]); + elements[i].y.data[1] = __builtin_bswap64(elements[i].y.data[1]); + elements[i].y.data[2] = __builtin_bswap64(elements[i].y.data[2]); + elements[i].y.data[3] = __builtin_bswap64(elements[i].y.data[3]); + elements[i].x.self_to_montgomery_form(); + elements[i].y.self_to_montgomery_form(); + } + } + } else if constexpr (GivingG2AffineElementType) { + constexpr size_t bytes_per_element = sizeof(AffineElementType); + size_t num_elements = elements_size / bytes_per_element; -void write_g1_elements_to_buffer(g1::affine_element const* elements, char* buffer, size_t num_elements); + if (is_little_endian()) { + for (size_t i = 0; i < num_elements; ++i) { + elements[i].x.c0.data[0] = __builtin_bswap64(elements[i].x.c0.data[0]); + elements[i].x.c0.data[1] = __builtin_bswap64(elements[i].x.c0.data[1]); + elements[i].x.c0.data[2] = __builtin_bswap64(elements[i].x.c0.data[2]); + elements[i].x.c0.data[3] = __builtin_bswap64(elements[i].x.c0.data[3]); + elements[i].y.c0.data[0] = __builtin_bswap64(elements[i].y.c0.data[0]); + elements[i].y.c0.data[1] = __builtin_bswap64(elements[i].y.c0.data[1]); + elements[i].y.c0.data[2] = __builtin_bswap64(elements[i].y.c0.data[2]); + elements[i].y.c0.data[3] = __builtin_bswap64(elements[i].y.c0.data[3]); + elements[i].x.c1.data[0] = __builtin_bswap64(elements[i].x.c1.data[0]); + elements[i].x.c1.data[1] = __builtin_bswap64(elements[i].x.c1.data[1]); + elements[i].x.c1.data[2] = __builtin_bswap64(elements[i].x.c1.data[2]); + elements[i].x.c1.data[3] = __builtin_bswap64(elements[i].x.c1.data[3]); + elements[i].y.c1.data[0] = __builtin_bswap64(elements[i].y.c1.data[0]); + elements[i].y.c1.data[1] = __builtin_bswap64(elements[i].y.c1.data[1]); + elements[i].y.c1.data[2] = __builtin_bswap64(elements[i].y.c1.data[2]); + elements[i].y.c1.data[3] = __builtin_bswap64(elements[i].y.c1.data[3]); + elements[i].x.c0.self_to_montgomery_form(); + elements[i].x.c1.self_to_montgomery_form(); + elements[i].y.c0.self_to_montgomery_form(); + elements[i].y.c1.self_to_montgomery_form(); + } + } + } + } -void write_g2_elements_to_buffer(g2::affine_element const* elements, char* buffer, size_t num_elements); + template + static void read_affine_elements_from_buffer(AffineElementType* elements, char const* buffer, size_t buffer_size) + { + memcpy((void*)elements, (void*)buffer, buffer_size); + byteswap<>(elements, buffer_size); + } -void write_transcript(g1::affine_element const* g1_x, - g2::affine_element const* g2_x, - Manifest const& manifest, - std::string const& dir); + static void read_transcript_g1(AffineElement* monomials, size_t degree, std::string const& dir) + { + size_t num = 0; + size_t num_read = 0; + std::string path = get_transcript_path(dir, num); + + while (is_file_exist(path) && num_read < degree) { + Manifest manifest; + read_manifest(path, manifest); + + auto offset = sizeof(Manifest); + const size_t num_to_read = std::min((size_t)manifest.num_g1_points, degree - num_read); + const size_t g1_buffer_size = sizeof(Fq) * 2 * num_to_read; + + char* buffer = (char*)&monomials[num_read]; + size_t size = 0; + + // We must pass the size actually read to the second call, not the desired + // g1_buffer_size as the file may have been smaller than this. + read_file_into_buffer(buffer, size, path, offset, g1_buffer_size); + srs::IO::byteswap(&monomials[num_read], size); + + num_read += num_to_read; + path = get_transcript_path(dir, ++num); + } + + const bool monomial_srs_condition = num_read < degree; + if (monomial_srs_condition) { + throw_or_abort( + format("Only read ", + num_read, + " points from ", + path, + ", but require ", + degree, + ". Is your srs large enough? Either run bootstrap.sh to download the transcript.dat " + "files to `srs_db/ignition/`, or you might need to download extra transcript.dat files " + "by editing `srs_db/download_ignition.sh` (but be careful, as this suggests you've " + "just changed a circuit to exceed a new 'power of two' boundary).")); + } + } + + static void read_transcript_g2(auto& g2_x, std::string const& dir) requires HasG2 + { + const size_t g2_size = sizeof(typename Curve::G2BaseField) * 2; + std::string path = format(dir, "/g2.dat"); + + if (is_file_exist(path)) { + char* buffer = (char*)&g2_x; + size_t size = 0; + + // Again, size passed to second function should be size actually read + read_file_into_buffer(buffer, size, path, 0, g2_size); + byteswap(&g2_x, size); + + return; + } + + // Get transcript starting at g0.dat + path = get_transcript_path(dir, 0); + + Manifest manifest; + read_manifest(path, manifest); + + const size_t g2_buffer_offset = sizeof(Fq) * 2 * manifest.num_g1_points; + auto offset = sizeof(Manifest) + g2_buffer_offset; + + char* buffer = (char*)&g2_x; + size_t size = 0; + + // Again, size passed to second function should be size actually read + read_file_into_buffer(buffer, size, path, offset, g2_size); + byteswap(&g2_x, size); + } + + static void read_transcript(AffineElement* monomials, + auto& g2_x, + size_t degree, + std::string const& path) requires HasG2 + { + read_transcript_g1(monomials, degree, path); + read_transcript_g2(g2_x, path); + } + + static void read_transcript(AffineElement* monomials, size_t degree, std::string const& path) + { + read_transcript_g1(monomials, degree, path); + } + + // This function is a vestige of the Lagrange form transcript work, and it is not used anywhere. + static void write_transcript(AffineElement const* g1_x, + auto const* g2_x, + Manifest const& manifest, + std::string const& dir) requires HasG2 + { + const size_t num_g1_x = manifest.num_g1_points; + const size_t num_g2_x = manifest.num_g2_points; + const size_t transcript_num = manifest.transcript_number; + const size_t manifest_size = sizeof(Manifest); + const size_t g1_buffer_size = sizeof(Fq) * 2 * num_g1_x; + const size_t g2_buffer_size = sizeof(Fq) * 4 * num_g2_x; + const size_t transcript_size = manifest_size + g1_buffer_size + g2_buffer_size; + std::string path = get_transcript_path(dir, transcript_num); + std::vector buffer(transcript_size); + + Manifest net_manifest; + net_manifest.transcript_number = htonl(manifest.transcript_number); + net_manifest.total_transcripts = htonl(manifest.total_transcripts); + net_manifest.total_g1_points = htonl(manifest.total_g1_points); + net_manifest.total_g2_points = htonl(manifest.total_g2_points); + net_manifest.num_g1_points = htonl(manifest.num_g1_points); + net_manifest.num_g2_points = htonl(manifest.num_g2_points); + net_manifest.start_from = htonl(manifest.start_from); + + std::copy(&net_manifest, &net_manifest + 1, (Manifest*)&buffer[0]); + + write_g1_elements_to_buffer(g1_x, &buffer[manifest_size], num_g1_x); + write_g2_elements_to_buffer(g2_x, &buffer[manifest_size + g1_buffer_size], num_g2_x); + write_buffer_to_file(path, &buffer[0], transcript_size); + } + + static void write_transcript(AffineElement const* g1_x, Manifest const& manifest, std::string const& dir) + { + const size_t num_g1_x = manifest.num_g1_points; + const size_t transcript_num = manifest.transcript_number; + const size_t manifest_size = sizeof(Manifest); + const size_t g1_buffer_size = sizeof(Fq) * 2 * num_g1_x; + const size_t transcript_size = manifest_size + g1_buffer_size; + std::string path = get_transcript_path(dir, transcript_num); + std::vector buffer(transcript_size); + + Manifest net_manifest; + net_manifest.transcript_number = htonl(manifest.transcript_number); + net_manifest.total_transcripts = htonl(manifest.total_transcripts); + net_manifest.total_g1_points = htonl(manifest.total_g1_points); + net_manifest.num_g1_points = htonl(manifest.num_g1_points); + net_manifest.start_from = htonl(manifest.start_from); + + std::copy(&net_manifest, &net_manifest + 1, (Manifest*)&buffer[0]); + + write_elements_to_buffer(g1_x, &buffer[manifest_size], num_g1_x); + write_buffer_to_file(path, &buffer[0], transcript_size); + } +}; -} // namespace io -} // namespace barretenberg +} // namespace srs diff --git a/cpp/src/barretenberg/srs/io.test.cpp b/cpp/src/barretenberg/srs/io.test.cpp index 7ff2806227..81452638a4 100644 --- a/cpp/src/barretenberg/srs/io.test.cpp +++ b/cpp/src/barretenberg/srs/io.test.cpp @@ -11,7 +11,7 @@ TEST(io, read_transcript_loads_well_formed_srs) size_t degree = 100000; g1::affine_element* monomials = (g1::affine_element*)(aligned_alloc(32, sizeof(g1::affine_element) * (degree + 2))); g2::affine_element g2_x; - io::read_transcript(monomials, g2_x, degree, "../srs_db/ignition"); + srs::IO::read_transcript(monomials, g2_x, degree, "../srs_db/ignition"); EXPECT_EQ(g1::affine_one, monomials[0]); diff --git a/cpp/src/barretenberg/srs/reference_string/env_reference_string.hpp b/cpp/src/barretenberg/srs/reference_string/env_reference_string.hpp index cceb6eef82..b6821916f1 100644 --- a/cpp/src/barretenberg/srs/reference_string/env_reference_string.hpp +++ b/cpp/src/barretenberg/srs/reference_string/env_reference_string.hpp @@ -11,7 +11,7 @@ #include "barretenberg/ecc/curves/bn254/g1.hpp" #include "barretenberg/ecc/curves/bn254/g2.hpp" -#include "barretenberg/ecc/curves/bn254/scalar_multiplication/pippenger.hpp" +#include "barretenberg/ecc/curves/scalar_multiplication/pippenger.hpp" #include "barretenberg/env/crs.hpp" @@ -32,7 +32,8 @@ class EnvReferenceString : public ProverReferenceString { private: size_t num_points; - scalar_multiplication::Pippenger pippenger_; + // TODO(#473)? + scalar_multiplication::Pippenger pippenger_; }; class EnvReferenceStringFactory : public ReferenceStringFactory { diff --git a/cpp/src/barretenberg/srs/reference_string/file_reference_string.cpp b/cpp/src/barretenberg/srs/reference_string/file_reference_string.cpp index 99f09c776c..db24aafb39 100644 --- a/cpp/src/barretenberg/srs/reference_string/file_reference_string.cpp +++ b/cpp/src/barretenberg/srs/reference_string/file_reference_string.cpp @@ -10,7 +10,7 @@ VerifierFileReferenceString::VerifierFileReferenceString(std::string const& path (barretenberg::pairing::miller_lines*)(aligned_alloc(64, sizeof(barretenberg::pairing::miller_lines) * 2))) { - barretenberg::io::read_transcript_g2(g2_x, path); + srs::IO::read_transcript_g2(g2_x, path); barretenberg::pairing::precompute_miller_lines(barretenberg::g2::one, precomputed_g2_lines[0]); barretenberg::pairing::precompute_miller_lines(g2_x, precomputed_g2_lines[1]); } diff --git a/cpp/src/barretenberg/srs/reference_string/file_reference_string.hpp b/cpp/src/barretenberg/srs/reference_string/file_reference_string.hpp index ef6bebfc0a..b63dfdc96f 100644 --- a/cpp/src/barretenberg/srs/reference_string/file_reference_string.hpp +++ b/cpp/src/barretenberg/srs/reference_string/file_reference_string.hpp @@ -6,7 +6,7 @@ #include "barretenberg/ecc/curves/bn254/g1.hpp" #include "barretenberg/ecc/curves/bn254/g2.hpp" -#include "barretenberg/ecc/curves/bn254/scalar_multiplication/pippenger.hpp" +#include "barretenberg/ecc/curves/scalar_multiplication/pippenger.hpp" #include #include @@ -46,7 +46,8 @@ class FileReferenceString : public ProverReferenceString { private: size_t num_points; - scalar_multiplication::Pippenger pippenger_; + // TODO(#473)? + scalar_multiplication::Pippenger pippenger_; }; class FileReferenceStringFactory : public ReferenceStringFactory { diff --git a/cpp/src/barretenberg/srs/reference_string/mem_reference_string.cpp b/cpp/src/barretenberg/srs/reference_string/mem_reference_string.cpp index 0a9c71b40a..dc83086420 100644 --- a/cpp/src/barretenberg/srs/reference_string/mem_reference_string.cpp +++ b/cpp/src/barretenberg/srs/reference_string/mem_reference_string.cpp @@ -12,7 +12,7 @@ VerifierMemReferenceString::VerifierMemReferenceString(uint8_t const* g2x) : precomputed_g2_lines( (barretenberg::pairing::miller_lines*)(aligned_alloc(64, sizeof(barretenberg::pairing::miller_lines) * 2))) { - barretenberg::io::read_g2_elements_from_buffer(&g2_x, (char*)g2x, 128); + srs::IO::read_affine_elements_from_buffer(&g2_x, (char*)g2x, 128); barretenberg::pairing::precompute_miller_lines(barretenberg::g2::one, precomputed_g2_lines[0]); barretenberg::pairing::precompute_miller_lines(g2_x, precomputed_g2_lines[1]); diff --git a/cpp/src/barretenberg/srs/reference_string/mem_reference_string.hpp b/cpp/src/barretenberg/srs/reference_string/mem_reference_string.hpp index 608446fa77..6c61e21528 100644 --- a/cpp/src/barretenberg/srs/reference_string/mem_reference_string.hpp +++ b/cpp/src/barretenberg/srs/reference_string/mem_reference_string.hpp @@ -5,8 +5,6 @@ #include "reference_string.hpp" -#include "barretenberg/ecc/curves/bn254/scalar_multiplication/pippenger.hpp" - namespace barretenberg::pairing { struct miller_lines; } // namespace barretenberg::pairing diff --git a/cpp/src/barretenberg/srs/reference_string/pippenger_reference_string.hpp b/cpp/src/barretenberg/srs/reference_string/pippenger_reference_string.hpp index 53c6867018..1882cb8e9d 100644 --- a/cpp/src/barretenberg/srs/reference_string/pippenger_reference_string.hpp +++ b/cpp/src/barretenberg/srs/reference_string/pippenger_reference_string.hpp @@ -15,7 +15,8 @@ using namespace barretenberg; class PippengerReferenceString : public ProverReferenceString { public: - PippengerReferenceString(scalar_multiplication::Pippenger* pippenger) + // TODO(#473)? + PippengerReferenceString(scalar_multiplication::Pippenger* pippenger) : pippenger_(pippenger) {} @@ -23,12 +24,14 @@ class PippengerReferenceString : public ProverReferenceString { g1::affine_element* get_monomial_points() override { return pippenger_->get_point_table(); } private: - scalar_multiplication::Pippenger* pippenger_; + // TODO(#473)? + scalar_multiplication::Pippenger* pippenger_; }; class PippengerReferenceStringFactory : public ReferenceStringFactory { public: - PippengerReferenceStringFactory(scalar_multiplication::Pippenger* pippenger, uint8_t const* g2x) + // TODO(#473)? + PippengerReferenceStringFactory(scalar_multiplication::Pippenger* pippenger, uint8_t const* g2x) : pippenger_(pippenger) , g2x_(g2x) {} @@ -47,7 +50,8 @@ class PippengerReferenceStringFactory : public ReferenceStringFactory { } private: - scalar_multiplication::Pippenger* pippenger_; + // TODO(#473)? + scalar_multiplication::Pippenger* pippenger_; uint8_t const* g2x_; }; diff --git a/cpp/srs_db/grumpkin/monomial/README.md b/cpp/srs_db/grumpkin/monomial/README.md new file mode 100644 index 0000000000..9d3e53e889 --- /dev/null +++ b/cpp/srs_db/grumpkin/monomial/README.md @@ -0,0 +1,16 @@ +# Quick-and-dirty Grumpkin transcript + +The Grumpkin transcript currently departs in structure from the BN254 +transcript in that: + - It does not contain a checksum + - It does not contain any g2 points (indeed, there is no grumpkin::g2). + - The transcript generation binary only produces a single transcript file. + If more than 504000 points are desired at some point, it is likely we will + need to a small refactor so that more files are created. + +A full-length transcript file containing 504000 points would have +BN254 transcript00.dat size: 322560412 +Grumpkin transcript00.dat size: 322560028 +322560028 - 322560412 = 384 = 256 + 128 + ^^^^^^^^^^^ ^^^^^^^^ + 2 g2 points checksum