From 87cb00f0918da73ec0f302eb6e34eadf0bdb0e89 Mon Sep 17 00:00:00 2001 From: ludamad Date: Thu, 30 Nov 2023 01:51:26 +0000 Subject: [PATCH] Revert "fix undefined behaviour" This reverts commit 8dbeb8c4b1e34bb4ccc36bf41bb1b3c6f6bce3f1. --- barretenberg/cpp/CMakePresets.json | 17 -------- .../barretenberg/common/compiler_hints.hpp | 6 --- .../ecc/fields/field_declarations.hpp | 14 ------ .../barretenberg/ecc/groups/element_impl.hpp | 14 +++--- .../cpp/src/barretenberg/ecc/groups/wnaf.hpp | 43 +++++++------------ .../src/barretenberg/ecc/groups/wnaf.test.cpp | 19 ++++---- .../scalar_multiplication.cpp | 9 ++-- .../primitives/biggroup/biggroup_nafs.hpp | 7 ++- .../primitives/plookup/plookup.test.cpp | 2 +- 9 files changed, 43 insertions(+), 88 deletions(-) diff --git a/barretenberg/cpp/CMakePresets.json b/barretenberg/cpp/CMakePresets.json index 7fb7f286d693..b87e10709b63 100644 --- a/barretenberg/cpp/CMakePresets.json +++ b/barretenberg/cpp/CMakePresets.json @@ -140,18 +140,6 @@ "LDFLAGS": "-fsanitize=thread" } }, - { - "name": "ubsan", - "displayName": "Debugging build with undefined behavior sanitizer on Clang-16", - "description": "Build with undefined behavior sanitizer on clang16 with debugging information", - "inherits": "clang16-dbg", - "binaryDir": "build-ubsan", - "environment": { - "CFLAGS": "-fsanitize=undefined -fsanitize-recover=unsigned-integer-overflow", - "CXXFLAGS": "-fsanitize=undefined -fsanitize-recover=unsigned-integer-overflow", - "LDFLAGS": "-fsanitize=undefined -fsanitize-recover=unsigned-integer-overflow" - } - }, { "name": "msan", "displayName": "Debugging build with memory sanitizer on Clang-16", @@ -348,11 +336,6 @@ "inherits": "default", "configurePreset": "tsan" }, - { - "name": "ubsan", - "inherits": "default", - "configurePreset": "ubsan" - }, { "name": "coverage", "inherits": "default", diff --git a/barretenberg/cpp/src/barretenberg/common/compiler_hints.hpp b/barretenberg/cpp/src/barretenberg/common/compiler_hints.hpp index 74e1b4aeccca..1815816a3c41 100644 --- a/barretenberg/cpp/src/barretenberg/common/compiler_hints.hpp +++ b/barretenberg/cpp/src/barretenberg/common/compiler_hints.hpp @@ -13,10 +13,4 @@ #else #define BBERG_PROFILE #define BBERG_NO_PROFILE -#endif - -#if defined(__clang__) -#define BBERG_ALLOW_SHIFT_OVERFLOW __attribute__((no_sanitize("shift"))) -#else -#define BBERG_ALLOW_SHIFT_OVERFLOW #endif \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/ecc/fields/field_declarations.hpp b/barretenberg/cpp/src/barretenberg/ecc/fields/field_declarations.hpp index ae96dfd05da0..a0446f179160 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/fields/field_declarations.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/fields/field_declarations.hpp @@ -368,20 +368,6 @@ template struct alignas(32) field { k1.data[1] = t2.data[1]; } - /** - * @brief Uses split_into_endomorphism_scalars(field, field&, field&) but returns the directly decomposed uint64 - *pairs representing the uint128. - **/ - static std::array, 2> split_into_endomorphism_scalars(const field& k) - { - // if the modulus is too large, we use more space than we have - static_assert(Params::modulus_3 < 0x4000000000000000ULL); - field f1; - field f2; - split_into_endomorphism_scalars(k, f1, f2); - return std::array{ std::array{ f1.data[0], f1.data[1] }, std::array{ f2.data[0], f2.data[1] } }; - } - static void split_into_endomorphism_scalars_384(const field& input, field& k1_out, field& k2_out) { diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/element_impl.hpp b/barretenberg/cpp/src/barretenberg/ecc/groups/element_impl.hpp index fecfecb2c505..2deae8323915 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/element_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/element_impl.hpp @@ -637,13 +637,14 @@ element element::mul_with_endomorphism(const Fr& exponent) } uint64_t wnaf_table[num_rounds * 2]; - std::array, 2> endo_scalars = Fr::split_into_endomorphism_scalars(converted_scalar); + Fr endo_scalar; + Fr::split_into_endomorphism_scalars(converted_scalar, endo_scalar, *(Fr*)&endo_scalar.data[2]); // NOLINT bool skew = false; bool endo_skew = false; - wnaf::fixed_wnaf(endo_scalars[0], &wnaf_table[0], skew, 0, 2, num_wnaf_bits); - wnaf::fixed_wnaf(endo_scalars[1], &wnaf_table[1], endo_skew, 0, 2, num_wnaf_bits); + wnaf::fixed_wnaf(&endo_scalar.data[0], &wnaf_table[0], skew, 0, 2, num_wnaf_bits); + wnaf::fixed_wnaf(&endo_scalar.data[2], &wnaf_table[1], endo_skew, 0, 2, num_wnaf_bits); element work_element{ T::one_x, T::one_y, Fq::one() }; work_element.self_set_infinity(); @@ -782,13 +783,14 @@ std::vector> element::batch_mul_with_endomo } uint64_t wnaf_table[num_rounds * 2]; - std::array, 2> endo_scalars = Fr::split_into_endomorphism_scalars(converted_scalar); + Fr endo_scalar; + Fr::split_into_endomorphism_scalars(converted_scalar, endo_scalar, *(Fr*)&endo_scalar.data[2]); // NOLINT bool skew = false; bool endo_skew = false; - wnaf::fixed_wnaf(endo_scalars[0], &wnaf_table[0], skew, 0, 2, num_wnaf_bits); - wnaf::fixed_wnaf(endo_scalars[1], &wnaf_table[1], endo_skew, 0, 2, num_wnaf_bits); + wnaf::fixed_wnaf(&endo_scalar.data[0], &wnaf_table[0], skew, 0, 2, num_wnaf_bits); + wnaf::fixed_wnaf(&endo_scalar.data[2], &wnaf_table[1], endo_skew, 0, 2, num_wnaf_bits); std::vector work_elements(num_points); diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/wnaf.hpp b/barretenberg/cpp/src/barretenberg/ecc/groups/wnaf.hpp index 444ec74da92d..c6dbee10eada 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/wnaf.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/wnaf.hpp @@ -1,12 +1,10 @@ #pragma once -#include "barretenberg/common/compiler_hints.hpp" #include "barretenberg/numeric/bitop/get_msb.hpp" #include #include // NOLINTBEGIN(readability-implicit-bool-conversion) namespace barretenberg::wnaf { -using Uint64Pair = std::array; constexpr size_t SCALAR_BITS = 127; #define WNAF_SIZE(x) ((barretenberg::wnaf::SCALAR_BITS + (x)-1) / (x)) // NOLINT(cppcoreguidelines-macro-usage) @@ -72,7 +70,7 @@ constexpr size_t get_num_rounds(const size_t num_points) return WNAF_SIZE(bits_per_bucket + 1); } -template inline uint64_t get_wnaf_bits_const(const Uint64Pair& scalar) noexcept +template inline uint64_t get_wnaf_bits_const(const uint64_t* scalar) noexcept { if constexpr (bits == 0) { return 0ULL; @@ -106,9 +104,7 @@ template inline uint64_t get_wnaf_bits_const( } } -inline uint64_t BBERG_ALLOW_SHIFT_OVERFLOW get_wnaf_bits(const Uint64Pair& scalar, - const uint64_t bits, - const uint64_t bit_position) noexcept +inline uint64_t get_wnaf_bits(const uint64_t* scalar, const uint64_t bits, const uint64_t bit_position) noexcept { /** * we want to take a 128 bit scalar and shift it down by (bit_position). @@ -136,11 +132,8 @@ inline uint64_t BBERG_ALLOW_SHIFT_OVERFLOW get_wnaf_bits(const Uint64Pair& scala return (lo & bit_mask) | (hi & hi_mask); } -inline void fixed_wnaf_packed(const Uint64Pair& scalar, - uint64_t* wnaf, - bool& skew_map, - const uint64_t point_index, - const size_t wnaf_bits) noexcept +inline void fixed_wnaf_packed( + const uint64_t* scalar, uint64_t* wnaf, bool& skew_map, const uint64_t point_index, const size_t wnaf_bits) noexcept { skew_map = ((scalar[0] & 1) == 0); uint64_t previous = get_wnaf_bits(scalar, wnaf_bits, 0) + static_cast(skew_map); @@ -163,7 +156,7 @@ inline void fixed_wnaf_packed(const Uint64Pair& scalar, wnaf[0] = ((slice + predicate) >> 1UL) | (point_index); } -inline void fixed_wnaf(const Uint64Pair& scalar, +inline void fixed_wnaf(const uint64_t* scalar, uint64_t* wnaf, bool& skew_map, const uint64_t point_index, @@ -219,7 +212,7 @@ inline void fixed_wnaf(const Uint64Pair& scalar, * Ok, so we should be a bit more limited with when we don't include window entries. * The goal here is to identify short scalars, so we want to identify the most significant non-zero window **/ -inline uint64_t get_num_scalar_bits(const Uint64Pair& scalar) +inline uint64_t get_num_scalar_bits(const uint64_t* scalar) { const uint64_t msb_1 = numeric::get_msb(scalar[1]); const uint64_t msb_0 = numeric::get_msb(scalar[0]); @@ -259,7 +252,7 @@ inline uint64_t get_num_scalar_bits(const Uint64Pair& scalar) * @param num_points Total points in the MSM (2*num_initial_points) * */ -inline void fixed_wnaf_with_counts(const Uint64Pair& scalar, +inline void fixed_wnaf_with_counts(const uint64_t* scalar, uint64_t* wnaf, bool& skew_map, uint64_t* wnaf_round_counts, @@ -333,10 +326,7 @@ inline void fixed_wnaf_with_counts(const Uint64Pair& scalar, } template -inline void wnaf_round(const Uint64Pair& scalar, - uint64_t* wnaf, - const uint64_t point_index, - const uint64_t previous) noexcept +inline void wnaf_round(uint64_t* scalar, uint64_t* wnaf, const uint64_t point_index, const uint64_t previous) noexcept { constexpr size_t wnaf_entries = (SCALAR_BITS + wnaf_bits - 1) / wnaf_bits; constexpr auto log2_num_points = static_cast(numeric::get_msb(static_cast(num_points))); @@ -361,10 +351,7 @@ inline void wnaf_round(const Uint64Pair& scalar, } template -inline void wnaf_round(const Uint64Pair& scalar, - uint64_t* wnaf, - const uint64_t point_index, - const uint64_t previous) noexcept +inline void wnaf_round(uint64_t* scalar, uint64_t* wnaf, const uint64_t point_index, const uint64_t previous) noexcept { constexpr size_t wnaf_entries = (scalar_bits + wnaf_bits - 1) / wnaf_bits; constexpr auto log2_num_points = static_cast(numeric::get_msb(static_cast(num_points))); @@ -390,7 +377,7 @@ inline void wnaf_round(const Uint64Pair& scalar, } template -inline void wnaf_round_packed(const Uint64Pair& scalar, +inline void wnaf_round_packed(const uint64_t* scalar, uint64_t* wnaf, const uint64_t point_index, const uint64_t previous) noexcept @@ -419,7 +406,7 @@ inline void wnaf_round_packed(const Uint64Pair& scalar, } template -inline void fixed_wnaf(const Uint64Pair& scalar, uint64_t* wnaf, bool& skew_map, const size_t point_index) noexcept +inline void fixed_wnaf(uint64_t* scalar, uint64_t* wnaf, bool& skew_map, const size_t point_index) noexcept { skew_map = ((scalar[0] & 1) == 0); uint64_t previous = get_wnaf_bits_const(scalar) + static_cast(skew_map); @@ -427,7 +414,7 @@ inline void fixed_wnaf(const Uint64Pair& scalar, uint64_t* wnaf, bool& skew_map, } template -inline void fixed_wnaf(const Uint64Pair& scalar, uint64_t* wnaf, bool& skew_map, const size_t point_index) noexcept +inline void fixed_wnaf(uint64_t* scalar, uint64_t* wnaf, bool& skew_map, const size_t point_index) noexcept { skew_map = ((scalar[0] & 1) == 0); uint64_t previous = get_wnaf_bits_const(scalar) + static_cast(skew_map); @@ -435,7 +422,7 @@ inline void fixed_wnaf(const Uint64Pair& scalar, uint64_t* wnaf, bool& skew_map, } template -inline void wnaf_round_with_restricted_first_slice(const Uint64Pair& scalar, +inline void wnaf_round_with_restricted_first_slice(uint64_t* scalar, uint64_t* wnaf, const uint64_t point_index, const uint64_t previous) noexcept @@ -477,7 +464,7 @@ inline void wnaf_round_with_restricted_first_slice(const Uint64Pair& scalar, } template -inline void fixed_wnaf_with_restricted_first_slice(const Uint64Pair& scalar, +inline void fixed_wnaf_with_restricted_first_slice(uint64_t* scalar, uint64_t* wnaf, bool& skew_map, const size_t point_index) noexcept @@ -491,7 +478,7 @@ inline void fixed_wnaf_with_restricted_first_slice(const Uint64Pair& scalar, } // template -// inline void fixed_wnaf_packed(const Uint64Pair& scalar, +// inline void fixed_wnaf_packed(const uint64_t* scalar, // uint64_t* wnaf, // bool& skew_map, // const uint64_t point_index) noexcept diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/wnaf.test.cpp b/barretenberg/cpp/src/barretenberg/ecc/groups/wnaf.test.cpp index dc3c81f26514..916dd3de9705 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/wnaf.test.cpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/wnaf.test.cpp @@ -36,7 +36,7 @@ void recover_fixed_wnaf(const uint64_t* wnaf, bool skew, uint64_t& hi, uint64_t& TEST(wnaf, WnafZero) { - std::array buffer{ 0, 0 }; + uint64_t buffer[2]{ 0, 0 }; uint64_t wnaf[WNAF_SIZE(5)] = { 0 }; bool skew = false; wnaf::fixed_wnaf<1, 5>(buffer, wnaf, skew, 0); @@ -60,7 +60,7 @@ TEST(wnaf, WnafTwoBitWindow) constexpr uint32_t num_quads = (num_bits >> 1) + 1; uint64_t wnaf[num_quads] = { 0 }; bool skew = false; - barretenberg::wnaf::fixed_wnaf<256, 1, window>({ input.data[0], input.data[1] }, wnaf, skew, 0); + barretenberg::wnaf::fixed_wnaf<256, 1, window>(&input.data[0], wnaf, skew, 0); /** * For representing even numbers, we define a skew: @@ -109,7 +109,7 @@ TEST(wnaf, WnafFixed) buffer.data[1] &= 0x7fffffffffffffffUL; uint64_t wnaf[WNAF_SIZE(5)] = { 0 }; bool skew = false; - wnaf::fixed_wnaf<1, 5>({ buffer.data[0], buffer.data[1] }, wnaf, skew, 0); + wnaf::fixed_wnaf<1, 5>(&buffer.data[0], wnaf, skew, 0); uint64_t recovered_hi = 0; uint64_t recovered_lo = 0; recover_fixed_wnaf(wnaf, skew, recovered_hi, recovered_lo, 5); @@ -119,7 +119,7 @@ TEST(wnaf, WnafFixed) TEST(wnaf, WnafFixedSimpleLo) { - std::array rand_buffer = { 0, 1 }; + uint64_t rand_buffer[2]{ 1, 0 }; uint64_t wnaf[WNAF_SIZE(5)]{ 0 }; bool skew = false; wnaf::fixed_wnaf<1, 5>(rand_buffer, wnaf, skew, 0); @@ -132,7 +132,7 @@ TEST(wnaf, WnafFixedSimpleLo) TEST(wnaf, WnafFixedSimpleHi) { - std::array rand_buffer = { 0, 1 }; + uint64_t rand_buffer[2] = { 0, 1 }; uint64_t wnaf[WNAF_SIZE(5)] = { 0 }; bool skew = false; wnaf::fixed_wnaf<1, 5>(rand_buffer, wnaf, skew, 0); @@ -148,13 +148,16 @@ TEST(wnaf, WnafFixedWithEndoSplit) fr k = engine.get_random_uint256(); k.data[3] &= 0x0fffffffffffffffUL; - auto [k1, k2] = fr::split_into_endomorphism_scalars(k); + fr k1{ 0, 0, 0, 0 }; + fr k2{ 0, 0, 0, 0 }; + + fr::split_into_endomorphism_scalars(k, k1, k2); uint64_t wnaf[WNAF_SIZE(5)] = { 0 }; uint64_t endo_wnaf[WNAF_SIZE(5)] = { 0 }; bool skew = false; bool endo_skew = false; - wnaf::fixed_wnaf<1, 5>(k1, wnaf, skew, 0); - wnaf::fixed_wnaf<1, 5>(k2, endo_wnaf, endo_skew, 0); + wnaf::fixed_wnaf<1, 5>(&k1.data[0], wnaf, skew, 0); + wnaf::fixed_wnaf<1, 5>(&k2.data[0], endo_wnaf, endo_skew, 0); fr k1_recovered{ 0, 0, 0, 0 }; fr k2_recovered{ 0, 0, 0, 0 }; diff --git a/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.cpp b/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.cpp index 92fd36517366..84be01335815 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.cpp +++ b/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.cpp @@ -219,23 +219,24 @@ void compute_wnaf_states(uint64_t* point_schedule, } parallel_for(num_threads, [&](size_t i) { + Fr T0; uint64_t* wnaf_table = &point_schedule[(2 * i) * num_initial_points_per_thread]; const Fr* thread_scalars = &scalars[i * num_initial_points_per_thread]; bool* skew_table = &input_skew_table[(2 * i) * num_initial_points_per_thread]; uint64_t offset = i * num_points_per_thread; for (uint64_t j = 0; j < num_initial_points_per_thread; ++j) { - Fr T0 = thread_scalars[j].from_montgomery_form(); - std::array, 2> endo_scalars = Fr::split_into_endomorphism_scalars(T0); + T0 = thread_scalars[j].from_montgomery_form(); + Fr::split_into_endomorphism_scalars(T0, T0, *(Fr*)&T0.data[2]); - wnaf::fixed_wnaf_with_counts(endo_scalars[0], + wnaf::fixed_wnaf_with_counts(&T0.data[0], &wnaf_table[(j << 1UL)], skew_table[j << 1ULL], &thread_round_counts[i][0], ((j << 1ULL) + offset) << 32ULL, num_points, wnaf_bits); - wnaf::fixed_wnaf_with_counts(endo_scalars[1], + wnaf::fixed_wnaf_with_counts(&T0.data[2], &wnaf_table[(j << 1UL) + 1], skew_table[(j << 1UL) + 1], &thread_round_counts[i][0], diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup_nafs.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup_nafs.hpp index 6b11078c53d1..55bc21b6cbb9 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup_nafs.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup_nafs.hpp @@ -150,10 +150,10 @@ typename element::secp256k1_wnaf_pair element::compu k_u256 = k_u256 >> stagger; if (is_lo) { barretenberg::wnaf::fixed_wnaf( - { k_u256.data[0], k_u256.data[1] }, &wnaf_values[0], skew_without_stagger, 0); + &k_u256.data[0], &wnaf_values[0], skew_without_stagger, 0); } else { barretenberg::wnaf::fixed_wnaf( - { k_u256.data[2], k_u256.data[2] }, &wnaf_values[0], skew_without_stagger, 0); + &k_u256.data[0], &wnaf_values[0], skew_without_stagger, 0); } // Number of rounds that are needed to reconstruct the scalar without staggered bits @@ -372,8 +372,7 @@ std::vector> element::compute_wnaf(const Fr& scalar) uint64_t wnaf_values[num_rounds] = { 0 }; bool skew = false; - barretenberg::wnaf::fixed_wnaf( - { scalar_multiplier.data[0], scalar_multiplier.data[1] }, &wnaf_values[0], skew, 0); + barretenberg::wnaf::fixed_wnaf(&scalar_multiplier.data[0], &wnaf_values[0], skew, 0); std::vector> wnaf_entries; for (size_t i = 0; i < num_rounds; ++i) { diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/plookup/plookup.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/plookup/plookup.test.cpp index 05b9e159f07c..e199c2b817b0 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/plookup/plookup.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/plookup/plookup.test.cpp @@ -527,7 +527,7 @@ TEST(stdlib_plookup, secp256k1_generator) uint64_t wnaf_entries[18] = { 0 }; bool skew = false; - barretenberg::wnaf::fixed_wnaf<129, 1, 8>({ input_value.data[0], input_value.data[1] }, &wnaf_entries[0], skew, 0); + barretenberg::wnaf::fixed_wnaf<129, 1, 8>(&input_value.data[0], &wnaf_entries[0], skew, 0); std::vector naf_values; for (size_t i = 0; i < 17; ++i) {