diff --git a/src/nunavut/lang/cpp/support/serialization.j2 b/src/nunavut/lang/cpp/support/serialization.j2 index d6656d73..8dd4df54 100644 --- a/src/nunavut/lang/cpp/support/serialization.j2 +++ b/src/nunavut/lang/cpp/support/serialization.j2 @@ -85,10 +85,41 @@ using std::bitcast; #endif // def __cpp_lib_bit_cast - +#ifdef NUNAVUT_USE_SPAN_LITE using nonstd::span; +#else + +template +class span{ + T* ptr_; + {{ typename_unsigned_length }} size_; +public: + template<{{ typename_unsigned_length }} N> + span(std::array& data): ptr_(data.data()), size_(data.size()){} + template<{{ typename_unsigned_length }} N> + span(const std::array& data): ptr_(data.data()), size_(data.size()){} + template<{{ typename_unsigned_length }} N> + span(T (&data)[N]): ptr_(data), size_(N){} + span(T* ptr, {{ typename_unsigned_length }} size): ptr_(ptr), size_(size){} + + T* data(){ return ptr_;} + T* data() const { return ptr_;} + {{ typename_unsigned_length }} size() const{ return size_; } + T& operator[]({{ typename_unsigned_length }} index){ + {{ assert('index < size_') }} + return ptr_[index]; + } + T& operator[]({{ typename_unsigned_length }} index) const { + {{ assert('index < size_') }} + return ptr_[index]; + } +}; + +#endif using bytespan = span<{{ typename_byte }}> ; using const_bytespan = span ; + + #if span_FEATURE( MAKE_SPAN ) using nonstd::make_span; #endif // span_FEATURE( MAKE_SPAN ) @@ -106,15 +137,91 @@ enum class Error{ REPRESENTATION_BAD_DELIMITER_HEADER=12 }; -inline tl::unexpected operator-(const Error& e){ - return tl::unexpected{e}; -} - +#ifdef NUNAVUT_USE_TL_EXPECTED +template +using unexpected = tl::unexpected; template using Result = tl::expected; +#else +template +struct unexpected{ + Err value; + + explicit unexpected(Err e):value(e){} +}; + +/// This is a dumbed down version of C++23 std::expected, made better suited for +/// embedded applications. It never throws, but uses NUNAVUT_ASSERT to signal +/// exceptional cases. +/// All versions of Ret are expected to be non-throwing. +template +class expected{ + // We can use a maximum of all types. + using storage_t = typename std::aligned_storage< + (std::max(sizeof(Ret), sizeof(Error))), + (std::max(alignof(Ret), alignof(Error)))>::type; + storage_t storage; + bool is_expected_; +private: + Ret* ret_ptr() { return reinterpret_cast(&storage); } + const Ret* ret_ptr() const { return reinterpret_cast(&storage); } + Error* error_ptr() { return reinterpret_cast(&storage); } + const Error* error_ptr() const { return reinterpret_cast(&storage); } +public: + expected():is_expected_(true){ new(ret_ptr()) Ret(); } + expected(Ret r):is_expected_(true){ new(ret_ptr()) Ret(std::move(r)); } + expected& operator=(Ret other){ this->~expected(); return *new(this) expected(std::move(other)); } + expected(unexpected err):is_expected_(false){ new(error_ptr()) Error(std::move(err.value)); } + expected(const expected& other): is_expected_(other.is_expected_){ + if(is_expected_){ new(ret_ptr()) Ret(*other.ret_ptr()); } + else { new(error_ptr()) Error(*other.error_ptr()); } + } + expected& operator=(const expected& other){ + this->~expected(); return *new(this) expected(other); + } + ~expected(){ + if(is_expected_){ ret_ptr()->~Ret(); } + else{ error_ptr()->~Error(); } + } + + Ret& value(){ {{ assert('is_expected_') }} return *ret_ptr(); } + const Ret& value() const { {{ assert('is_expected_') }} return *ret_ptr(); } + Ret& operator*(){ return value(); } + const Ret& operator*()const { return value(); } + Ret* operator->(){ {{ assert('is_expected_') }} return ret_ptr(); } + const Ret* operator->() const { {{ assert('is_expected_') }} return ret_ptr(); } + Error& error(){ {{ assert('not is_expected_') }} return *error_ptr(); } + const Error& error() const { {{ assert('not is_expected_') }} return *error_ptr(); } + + bool has_value() const { return is_expected_; } + operator bool() const { return has_value(); } +}; + +template<> +class expected{ + using underlying_type = typename std::underlying_type::type; + underlying_type e; +public: + expected():e(0){} + expected(unexpected err):e(static_cast(err.value)){ } + Error error() const { {{ assert('not has_value()') }} return static_cast(e); } + + bool has_value() const { return e == 0; } + operator bool() const { return has_value(); } +}; + +template +using Result = expected; +#endif + + using VoidResult = Result; using SerializeResult = Result<{{ typename_unsigned_length }}>; +inline unexpected operator-(const Error& e){ + return unexpected{e}; +} + namespace options { {% for key, value in options.items() -%} diff --git a/src/nunavut/lang/cpp/templates/deserialization.j2 b/src/nunavut/lang/cpp/templates/deserialization.j2 index e6aecb82..1aab0783 100644 --- a/src/nunavut/lang/cpp/templates/deserialization.j2 +++ b/src/nunavut/lang/cpp/templates/deserialization.j2 @@ -137,13 +137,77 @@ {# ----------------------------------------------------------------------------------------------------------------- #} {% macro _deserialize_fixed_length_array(t, reference, offset) %} - (void)({{reference}}); +{# SPECIAL CASE: PACKED BIT ARRAY #} +{#{% if t.element_type is BooleanType %} + nunavutGetBits(&{{ reference }}_bitpacked_[0], &buffer[0], capacity_bytes, offset_bits, {{ t.capacity }}UL); + offset_bits += {{ t.capacity }}UL; +#} +{# SPECIAL CASE: BYTES-LIKE ARRAY #} +{#{% elif t.element_type is PrimitiveType and t.element_type.bit_length == 8 and t.element_type is zero_cost_primitive %} + nunavutGetBits(&{{ reference }}[0], &buffer[0], capacity_bytes, offset_bits, {{ t.capacity }}UL * 8U); + offset_bits += {{ t.capacity }}UL * 8U; +#} +{# SPECIAL CASE: ZERO-COST PRIMITIVES #} +{#{% elif t.element_type is PrimitiveType and t.element_type is zero_cost_primitive %} + {% if t.element_type is FloatType %} + static_assert(NUNAVUT_PLATFORM_IEEE754_FLOAT, "Native IEEE754 binary32 required. TODO: relax constraint"); + {% if t.element_type.bit_length > 32 %} + static_assert(NUNAVUT_PLATFORM_IEEE754_DOUBLE, "Native IEEE754 binary64 required. TODO: relax constraint"); + {% endif %} + {% endif %} + nunavutGetBits(&{{ reference }}[0], &buffer[0], capacity_bytes, offset_bits, {# -#} +{# {{ t.capacity }}UL * {{ t.element_type.bit_length }}U); + offset_bits += {{ t.capacity }}UL * {{ t.element_type.bit_length }}U; +#} +{# GENERAL CASE #} +{#{% else %}#} + {# Element offset is the superposition of each individual element offset plus the array's own offset. + # For example, an array like uint8[3] offset by 16 bits would have its element_offset = {16, 24, 32}. + # We can also unroll element deserialization for small arrays (e.g., below ~10 elements) to take advantage of + # spurious alignment of elements but the benefit of such optimization is believed to be negligible. #} + {% set element_offset = offset + t.element_type.bit_length_set.repeat_range(t.capacity - 1) %} + {% set ref_index = 'index'|to_template_unique_name %} + for (size_t {{ ref_index }} = 0U; {{ ref_index }} < {{ t.capacity }}UL; ++{{ ref_index }}) + { + {{ _deserialize_any(t.element_type, reference + ('[%s]'|format(ref_index)), element_offset)|trim|indent }} + } + {# Size cannot be checked here because if implicit zero extension rule is applied it won't match. #} +{#{% endif %}#} {% endmacro %} {# ----------------------------------------------------------------------------------------------------------------- #} {% macro _deserialize_variable_length_array(t, reference, offset) %} - (void)({{reference}}); + { + {# DESERIALIZE THE IMPLICIT ARRAY LENGTH FIELD #} + {% set ref_size = 'size'|to_template_unique_name %} + // Array length prefix: {{ t.length_field_type }} + {{ _deserialize_integer(t.length_field_type, ('const %s %s'|format((t.length_field_type | declaration), ref_size)) , offset) }} + if ( {{ ref_size}} > {{ t.capacity }}U) + { + return -nunavut::support::Error::REPRESENTATION_BAD_ARRAY_LENGTH; + } + {{ reference }}.resize({{ ref_size }}); + +{# COMPUTE THE ARRAY ELEMENT OFFSETS #} +{# NOTICE: The offset is no longer valid at this point because we just emitted the array length prefix. #} +{% set element_offset = offset + t.bit_length_set %} +{% set first_element_offset = offset + t.length_field_type.bit_length %} +{% assert (element_offset.min) == (first_element_offset.min) %} +{% if first_element_offset.is_aligned_at_byte() %} + {{ assert('in_buffer.offset_alings_to_byte()') }} +{% endif %} + {# GENERAL CASE #} + {% set ref_index = 'index'|to_template_unique_name %} + for (size_t {{ ref_index }} = 0U; {{ ref_index }} < {{ reference }}.size(); ++{{ ref_index }}) + { + {{ + _deserialize_any(t.element_type, reference + ('[%s]'|format(ref_index)), element_offset) + |trim|indent + }} + } + + } {% endmacro %} diff --git a/src/nunavut/lang/cpp/templates/serialization.j2 b/src/nunavut/lang/cpp/templates/serialization.j2 index c5addcdb..0ada219f 100644 --- a/src/nunavut/lang/cpp/templates/serialization.j2 +++ b/src/nunavut/lang/cpp/templates/serialization.j2 @@ -226,13 +226,93 @@ {# ----------------------------------------------------------------------------------------------------------------- #} {% macro _serialize_fixed_length_array(t, reference, offset) %} - (void)({{reference}}); +{# SPECIAL CASE: PACKED BIT ARRAY #} +{#{% if t.element_type is BooleanType %} + {% if offset.is_aligned_at_byte() %} + // Optimization prospect: this item is aligned at the byte boundary, so it is possible to use memmove(). + {% endif %} + nunavutCopyBits(&buffer[0], offset_bits, {{ t.capacity }}UL, &{{ reference }}_bitpacked_[0], 0U); + out_buffer.add_offset({{ t.capacity }}UL); +#} +{# SPECIAL CASE: BYTES-LIKE ARRAY #} +{# +{% elif t.element_type is PrimitiveType and t.element_type.bit_length == 8 and t.element_type is zero_cost_primitive %} + {% if offset.is_aligned_at_byte() %} + // Optimization prospect: this item is aligned at the byte boundary, so it is possible to use memmove(). + {% endif %} + nunavutCopyBits(&buffer[0], offset_bits, {{ t.capacity }}UL * 8U, &{{ reference }}[0], 0U); + out_buffer.add_offset({{ t.capacity }}UL * 8U); +#} +{# SPECIAL CASE: ZERO-COST PRIMITIVES #} +{# +{% elif t.element_type is PrimitiveType and t.element_type is zero_cost_primitive %} + // Saturation code not emitted -- assume the native representation is conformant. + {% if t.element_type is FloatType %} + static_assert(NUNAVUT_PLATFORM_IEEE754_FLOAT, "Native IEEE754 binary32 required. TODO: relax constraint"); + {% if t.element_type.bit_length > 32 %} + static_assert(NUNAVUT_PLATFORM_IEEE754_DOUBLE, "Native IEEE754 binary64 required. TODO: relax constraint"); + {% endif %} + {% endif %} + {% if offset.is_aligned_at_byte() %} + // Optimization prospect: this item is aligned at the byte boundary, so it is possible to use memmove(). + {% endif %} + nunavutCopyBits(&buffer[0], offset_bits, {{ t.capacity }}UL * {{ t.element_type.bit_length }}UL, {# -#} +{# &{{ reference }}[0], 0U); + out_buffer.add_offset({{ t.capacity }}UL * {{ t.element_type.bit_length }}UL); +#} +{# GENERAL CASE #} +{# {% else %} #} + {% set ref_origin_offset = 'origin'|to_template_unique_name %} + const {{ typename_unsigned_bit_length }} {{ ref_origin_offset }} = out_buffer.offset(); + {# Element offset is the superposition of each individual element offset plus the array's own offset. + # For example, an array like uint8[3] offset by 16 bits would have its element_offset = {16, 24, 32}. + # We can also unroll element deserialization for small arrays (e.g., below ~10 elements) to take advantage of + # spurious alignment of elements but the benefit of such optimization is believed to be negligible. #} + {% set element_offset = offset + t.element_type.bit_length_set.repeat_range(t.capacity - 1) %} + {% set ref_index = 'index'|to_template_unique_name %} + for (size_t {{ ref_index }} = 0U; {{ ref_index }} < {{ t.capacity }}UL; ++{{ ref_index }}) + { + {{ _serialize_any(t.element_type, reference + ('[%s]'|format(ref_index)), element_offset)|trim|indent }} + } + // It is assumed that we know the exact type of the serialized entity, hence we expect the size to match. + {% if not t.bit_length_set.fixed_length %} + {{ assert('(out_buffer.offset() - %s) >= %sULL'|format(ref_origin_offset, t.bit_length_set.min)) }} + {{ assert('(out_buffer.offset() - %s) <= %sULL'|format(ref_origin_offset, t.bit_length_set.max)) }} + {% else %} + {{ assert('(out_buffer.offset() - %s) == %sULL'|format(ref_origin_offset, t.bit_length_set.max)) }} + {% endif %} + (void) {{ ref_origin_offset }}; +{# {% endif %} #} {% endmacro %} {# ----------------------------------------------------------------------------------------------------------------- #} {% macro _serialize_variable_length_array(t, reference, offset) %} - (void)({{reference}}); + if ({{ reference }}.size() > {{ t.capacity }}) + { + return -nunavut::support::Error::REPRESENTATION_BAD_ARRAY_LENGTH; + } + // Array length prefix: {{ t.length_field_type }} + {{ _serialize_integer(t.length_field_type, reference + '.size()', offset) }} + +{# COMPUTE THE ARRAY ELEMENT OFFSETS #} +{# NOTICE: The offset is no longer valid at this point because we just emitted the array length prefix. #} +{% set element_offset = offset + t.bit_length_set %} +{% set first_element_offset = offset + t.length_field_type.bit_length %} +{% assert (element_offset.min) == (first_element_offset.min) %} +{% if first_element_offset.is_aligned_at_byte() %} + {{ assert('out_buffer.offset_alings_to_byte()') }} +{% endif %} + +{# GENERAL CASE #} + {% set ref_index = 'index'|to_template_unique_name %} + for (size_t {{ ref_index }} = 0U; {{ ref_index }} < {{ reference }}.size(); ++{{ ref_index }}) + { + {{ + _serialize_any(t.element_type, reference + ('[%s]'|format(ref_index)), element_offset) + |trim|indent + }} + } {% endmacro %} @@ -281,5 +361,5 @@ {% endif %} out_buffer.add_offset({{ ref_size_bytes }} * 8U); - {{ assert('out_buffer.size() >= 0') }} + // {{ assert('out_buffer.size() >= 0') }} {% endmacro %} diff --git a/verification/cpp/suite/test_bitarray.cpp b/verification/cpp/suite/test_bitarray.cpp index e262e813..5380c413 100644 --- a/verification/cpp/suite/test_bitarray.cpp +++ b/verification/cpp/suite/test_bitarray.cpp @@ -22,7 +22,7 @@ TEST(BitSpan, Constructor) { ASSERT_EQ(sp.size(), 5U*8U); } const uint8_t csrcVar = 0x8F; - const std::array csrcArray{ 1, 2, 3, 4, 5 }; + const std::array csrcArray{ 1, 2, 3, 4, 5 }; { nunavut::support::const_bitspan sp{{&csrcVar, 1}}; ASSERT_EQ(sp.size(), 1U*8U); @@ -72,23 +72,23 @@ TEST(BitSpan, Subspan) TEST(BitSpan, AlignedPtr) { std::array srcArray{ 1, 2, 3, 4, 5 }; { - auto actualPtr = nunavut::support::bitspan{srcArray}.aligned_ptr(); + auto actualPtr = nunavut::support::bitspan(srcArray).aligned_ptr(); ASSERT_EQ(actualPtr, srcArray.data()); } { - auto actualPtr = nunavut::support::bitspan{srcArray, 1}.aligned_ptr(); + auto actualPtr = nunavut::support::bitspan(srcArray, 1).aligned_ptr(); ASSERT_EQ(actualPtr, srcArray.data()); } { - auto actualPtr = nunavut::support::bitspan{srcArray, 5}.aligned_ptr(); + auto actualPtr = nunavut::support::bitspan(srcArray, 5).aligned_ptr(); ASSERT_EQ(actualPtr, srcArray.data()); } { - auto actualPtr = nunavut::support::bitspan{srcArray, 7}.aligned_ptr(); + auto actualPtr = nunavut::support::bitspan(srcArray, 7).aligned_ptr(); ASSERT_EQ(actualPtr, srcArray.data()); } { - auto actualPtr = nunavut::support::bitspan{srcArray}.aligned_ptr(8); + auto actualPtr = nunavut::support::bitspan(srcArray).aligned_ptr(8); ASSERT_EQ(actualPtr, &srcArray[1]); } } @@ -103,10 +103,19 @@ TEST(BitSpan, TestSize) { nunavut::support::bitspan sp{src, 1}; ASSERT_EQ(sp.size(), 5U*8U - 1U); } + std::array csrc{ 1, 2, 3, 4, 5 }; + { + nunavut::support::const_bitspan sp{csrc}; + ASSERT_EQ(sp.size(), 5U*8U); + } + { + nunavut::support::const_bitspan sp{csrc, 1}; + ASSERT_EQ(sp.size(), 5U*8U - 1U); + } } TEST(BitSpan, CopyBits) { - std::array src{ 1, 2, 3, 4, 5 }; + std::array src{ 1, 2, 3, 4, 5 }; std::array dst{}; memset(dst.data(), 0, dst.size()); @@ -118,7 +127,7 @@ TEST(BitSpan, CopyBits) { } TEST(BitSpan, CopyBitsWithAlignedOffset) { - std::array src{ 0x11, 0x22, 0x33, 0x44, 0x55 }; + std::array src{ 0x11, 0x22, 0x33, 0x44, 0x55 }; std::array dst{}; memset(dst.data(), 0, dst.size()); @@ -153,19 +162,19 @@ TEST(BitSpan, CopyBitsWithAlignedOffset) { } TEST(BitSpan, CopyBitsWithAlignedOffsetNonByteLen) { - std::array src{ 0x0, 0x0, 0x11, 0x22, 0x33, 0x44, 0x55 }; + std::array src{ 0x0, 0x0, 0x11, 0x22, 0x33, 0x44, 0x55 }; std::array dst{}; memset(dst.data(), 0, dst.size()); - nunavut::support::const_bitspan(src, 2U * 8U).copyTo(nunavut::support::bitspan{dst}, 4); + nunavut::support::const_bitspan({src}, 2U * 8U).copyTo(nunavut::support::bitspan{dst}, 4); ASSERT_EQ(0x1U, dst[0]); - nunavut::support::const_bitspan(src, 3U * 8U).copyTo(nunavut::support::bitspan{dst}, 4); + nunavut::support::const_bitspan({src}, 3U * 8U).copyTo(nunavut::support::bitspan{dst}, 4); ASSERT_EQ(0x2U, dst[0]); } TEST(BitSpan, CopyBitsWithUnalignedOffset){ - std::array src{ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA }; + std::array src{ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA }; std::array dst{}; memset(dst.data(), 0, dst.size()); @@ -193,7 +202,7 @@ TEST(BitSpan, CopyBitsWithUnalignedOffset){ TEST(BitSpan, SaturateBufferFragmentBitLength) { using namespace nunavut::support; - std::array data{}; + std::array data{}; ASSERT_EQ(32U, const_bitspan(data, 0U).saturateBufferFragmentBitLength(32)); ASSERT_EQ(31U, const_bitspan(data, 1U).saturateBufferFragmentBitLength(32)); ASSERT_EQ(16U, const_bitspan(data, 0U).saturateBufferFragmentBitLength(16)); @@ -204,7 +213,7 @@ TEST(BitSpan, SaturateBufferFragmentBitLength) TEST(BitSpan, GetBits) { - std::array src{ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF }; + std::array src{ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF }; std::array dst{}; memset(dst.data(), 0xAA, dst.size()); nunavut::support::const_bitspan{{src.data(), 6U}, 0}.getBits(dst, 0); @@ -305,7 +314,7 @@ TEST(BitSpan, SetIxx_bufferOverflow) ASSERT_EQ(0xAA, buffer[2]); rc = nunavut::support::bitspan{{buffer, 2U}, 2U*8U}.setIxx(0xAA, 8); ASSERT_FALSE(rc); - ASSERT_EQ(-nunavut::support::Error::SERIALIZATION_BUFFER_TOO_SMALL, rc); + ASSERT_EQ(nunavut::support::Error::SERIALIZATION_BUFFER_TOO_SMALL, rc.error()); ASSERT_EQ(0xAA, buffer[2]); } diff --git a/verification/cpp/suite/test_helpers.hpp b/verification/cpp/suite/test_helpers.hpp index d17974f3..3149a791 100644 --- a/verification/cpp/suite/test_helpers.hpp +++ b/verification/cpp/suite/test_helpers.hpp @@ -9,18 +9,18 @@ #include "gmock/gmock.h" #include "nunavut/support/serialization.hpp" -#if __cplusplus > 201703L -#include "magic_enum.hpp" testing::Message& operator<<(testing::Message& s, const nunavut::support::Error& e){ - s << magic_enum::enum_name(e); - return s; -} -#else -testing::Message& operator<<(testing::Message& s, const nunavut::support::Error& e){ - s << static_cast>(e); + using namespace nunavut::support; + switch(e){ + case Error::SERIALIZATION_INVALID_ARGUMENT: s << "SERIALIZATION_INVALID_ARGUMENT"; break; + case Error::SERIALIZATION_BUFFER_TOO_SMALL: s << "SERIALIZATION_BUFFER_TOO_SMALL"; break; + case Error::REPRESENTATION_BAD_ARRAY_LENGTH: s << "REPRESENTATION_BAD_ARRAY_LENGTH"; break; + case Error::REPRESENTATION_BAD_UNION_TAG: s << "REPRESENTATION_BAD_UNION_TAG"; break; + case Error::REPRESENTATION_BAD_DELIMITER_HEADER: s << "REPRESENTATION_BAD_DELIMITER_HEADER"; break; + } return s; } -#endif + namespace nunavut{ namespace testing{ diff --git a/verification/cpp/suite/test_serialization.cpp b/verification/cpp/suite/test_serialization.cpp index 56f1f556..ea99e14e 100644 --- a/verification/cpp/suite/test_serialization.cpp +++ b/verification/cpp/suite/test_serialization.cpp @@ -82,7 +82,6 @@ TEST(Serialization, BasicSerialize) { /// print('\n'.join(f'0x{x:02X}U,' for x in sr)) TEST(Serialization, StructReference) { - return; regulated::basics::Struct__0_1 obj{}; // Initialize a reference object, serialize, and compare against the reference serialized representation. @@ -120,80 +119,79 @@ TEST(Serialization, StructReference) obj.unaligned_bitpacked_lt3.emplace_back(0); ASSERT_EQ(2U, obj.unaligned_bitpacked_lt3.size()); // 0b01, rest truncated obj.delimited_var_2[0].set_f16(+1e9F); // truncated to infinity - obj.delimited_var_2[1].set_f16(-1e40); // retained + obj.delimited_var_2[1].set_f64(-1e40); // retained obj.aligned_bitpacked_le3.emplace_back(1); ASSERT_EQ(1U, obj.aligned_bitpacked_le3.size()); // only lsb is set, other truncated const uint8_t reference[] = { - 0xFEU, // void1, true, 6 lsb of int10 = 511 - 0x07U, // 4 msb of int10 = 511, 4 lsb of -512 = 0b_10_0000_0000 - 0x60U, // 6 msb of -512 (0x60 = 0b_0110_0000), 2 lsb of 0x0055 = 0b0001010101 - 0x15U, // 8 msb of 0b_00_0101_0101, 0x15 = 0b00010101 - 0x56U, // ALIGNED; -0x00AA in two's complement is 0x356 = 0b_11_01010110 - 0x0BU, // 2 msb of the above (0b11) followed by 8 bit of length prefix (2) of float16[<=2] f16_le2 - 0xFCU, // 2 msb of the length prefix followed by 6 lsb of (float16.min = 0xfbff = 0b_11111011_11111111) - 0xEFU, // 0b_xx_111011_11xxxxxx (continuation of the float16) - 0x03U, // 2 msb of the above (0b11) and the next float16 = +inf, represented 0x7C00 = 0b_01111100_00000000 - 0xF0U, // 0b_xx111100_00xxxxxx (continuation of the infinity) - 0x15U, // 2 msb of the above (0b01) followed by bool[3] unaligned_bitpacked_3 = [1, 0, 1], then PADDING - 0x02U, // ALIGNED; empty struct not manifested, here we have length = 2 of uint8[<3] bytes_lt3 - 0x6FU, // bytes_lt3[0] = 111 - 0xDEU, // bytes_lt3[1] = 222 - 0x89U, // bytes_3[0] = -0x77 (two's complement) - 0xEFU, // bytes_3[1] = -0x11 (two's complement) - 0x77U, // bytes_3[2] = +0x77 - 0x03U, // length = 3 of truncated uint2[<=4] u2_le4 - 0x36U, // 0b_00_11_01_10: u2_le4[0] = 0b10, u2_le4[1] = 0b01, u2_le4[2] = 0b11, then dynamic padding - 0x01U, // ALIGNED; length = 1 of DelimitedFixedSize.0.1[<=2] delimited_fix_le2 - 0x00U, // Constant DH of DelimitedFixedSize.0.1 - 0x00U, // ditto - 0x00U, // ditto - 0x00U, // ditto - 0x34U, // uint16[2] u16_2; first element = 0x1234 - 0x12U, // continuation - 0x78U, // second element = 0x5678 - 0x56U, // continuation - 0x11U, // bool[3] aligned_bitpacked_3 = [1, 0, 0]; then 5 lsb of length = 2 of bool[<3] unaligned_bitpacked_lt3 - 0x08U, // 3 msb of length = 2 (i.e., zeros), then values [1, 0], then 1 bit of padding before composite - 0x03U, // DH = 3 of the first element of DelimitedVariableSize.0.1[2] delimited_var_2 - 0x00U, // ditto - 0x00U, // ditto - 0x00U, // ditto - 0x00U, // union tag = 0, f16 selected - 0x00U, // f16 truncated to positive infinity; see representation above - 0x7CU, // ditto - 0x09U, // DH = (8 + 1) of the second element of DelimitedVariableSize.0.1[2] delimited_var_2 - 0x00U, // ditto - 0x00U, // ditto - 0x00U, // ditto - 0x02U, // union tag = 2, f64 selected (notice that union tags are always aligned by design) - 0xA5U, // float64 = -1e40 is 0xc83d6329f1c35ca5, this is the LSB - 0x5CU, // ditto - 0xC3U, // ditto - 0xF1U, // ditto - 0x29U, // ditto - 0x63U, // ditto - 0x3DU, // ditto - 0xC8U, // ditto - 0x01U, // length = 1 of bool[<=3] aligned_bitpacked_le3 - 0x01U, // the one single bit of the above, then 7 bits of dynamic padding to byte - // END OF SERIALIZED REPRESENTATION - 0x55U, // canary 1 - 0x55U, // canary 2 - 0x55U, // canary 3 - 0x55U, // canary 4 - 0x55U, // canary 5 - 0x55U, // canary 6 - 0x55U, // canary 7 - 0x55U, // canary 8 - 0x55U, // canary 9 - 0x55U, // canary 10 - 0x55U, // canary 11 - 0x55U, // canary 12 - 0x55U, // canary 13 - 0x55U, // canary 14 - 0x55U, // canary 15 - 0x55U, // canary 16 + 0xFEU, // byte 0: void1, true, 6 lsb of int10 = 511 + 0x07U, // byte 1: 4 msb of int10 = 511, 4 lsb of -512 = 0b_10_0000_0000 + 0x60U, // byte 2: 6 msb of -512 (0x60 = 0b_0110_0000), 2 lsb of 0x0055 = 0b0001010101 + 0x15U, // byte 3: 8 msb of 0b_00_0101_0101, 0x15 = 0b00010101 + 0x56U, // byte 4: ALIGNED; -0x00AA in two's complement is 0x356 = 0b_11_01010110 + 0x0BU, // byte 5: 2 msb of the above (0b11) followed by 8 bit of length prefix (2) of float16[<=2] f16_le2 + 0xFCU, // byte 6: 2 msb of the length prefix followed by 6 lsb of (float16.min = 0xfbff = 0b_11111011_11111111) + 0xEFU, // byte 7: 0b_xx_111011_11xxxxxx (continuation of the float16) + 0x03U, // byte 8: 2 msb of the above (0b11) and the next float16 = +inf, represented 0x7C00 = 0b_01111100_00000000 + 0xF0U, // byte 9: 0b_xx111100_00xxxxxx (continuation of the infinity) + 0x15U, // byte 10: 2 msb of the above (0b01) followed by bool[3] unaligned_bitpacked_3 = [1, 0, 1], then PADDING + 0x02U, // byte 11: ALIGNED; empty struct not manifested, here we have length = 2 of uint8[<3] bytes_lt3 + 0x6FU, // byte 12: bytes_lt3[0] = 111 + 0xDEU, // byte 13: bytes_lt3[1] = 222 + 0x89U, // byte 14: bytes_3[0] = -0x77 (two's complement) + 0xEFU, // byte 15: bytes_3[1] = -0x11 (two's complement) + 0x77U, // byte 16: bytes_3[2] = +0x77 + 0x03U, // byte 17: length = 3 of truncated uint2[<=4] u2_le4 + 0x36U, // byte 18: 0b_00_11_01_10: u2_le4[0] = 0b10, u2_le4[1] = 0b01, u2_le4[2] = 0b11, then dynamic padding + 0x01U, // byte 19: ALIGNED; length = 1 of DelimitedFixedSize.0.1[<=2] delimited_fix_le2 + 0x00U, // byte 20: Constant DH of DelimitedFixedSize.0.1 + 0x00U, // byte 21: ditto + 0x00U, // byte 22: ditto + 0x00U, // byte 23: ditto + 0x34U, // byte 24: uint16[2] u16_2; first element = 0x1234 + 0x12U, // byte 25: continuation + 0x78U, // byte 26: second element = 0x5678 + 0x56U, // byte 27: continuation + 0x11U, // byte 28: bool[3] aligned_bitpacked_3 = [1, 0, 0]; then 5 lsb of length = 2 of bool[<3] unaligned_bitpacked_lt3 + 0x08U, // byte 29: 3 msb of length = 2 (i.e., zeros), then values [1, 0], then 1 bit of padding before composite + 0x03U, // byte 30: DH = 3 of the first element of DelimitedVariableSize.0.1[2] delimited_var_2 + 0x00U, // byte 31: ditto + 0x00U, // byte 32: ditto + 0x00U, // byte 33: ditto + 0x00U, // byte 34: union tag = 0, f16 selected + 0x00U, // byte 35: f16 truncated to positive infinity; see representation above + 0x7CU, // byte 36: ditto + 0x09U, // byte 37: DH = (8 + 1) of the second element of DelimitedVariableSize.0.1[2] delimited_var_2 + 0x00U, // byte 38: ditto + 0x00U, // byte 39: ditto + 0x00U, // byte 40: ditto + 0x02U, // byte 41: union tag = 2, f64 selected (notice that union tags are always aligned by design) + 0xA5U, // byte 42: float64 = -1e40 is 0xc83d6329f1c35ca5, this is the LSB + 0x5CU, // byte 43: ditto + 0xC3U, // byte 44: ditto + 0xF1U, // byte 45: ditto + 0x29U, // byte 46: ditto + 0x63U, // byte 47: ditto + 0x3DU, // byte 48: ditto + 0xC8U, // byte 49: ditto + 0x01U, // byte 50: length = 1 of bool[<=3] aligned_bitpacked_le3 + 0x01U, // byte 51: the one single bit of the above, then 7 bits of dynamic padding to byte// byte 51: END OF SERIALIZED REPRESENTATION + 0x55U, // byte 52: canary 1 + 0x55U, // byte 53: canary 2 + 0x55U, // byte 54: canary 3 + 0x55U, // byte 55: canary 4 + 0x55U, // byte 56: canary 5 + 0x55U, // byte 57: canary 6 + 0x55U, // byte 58: canary 7 + 0x55U, // byte 59: canary 8 + 0x55U, // byte 60: canary 9 + 0x55U, // byte 61: canary 10 + 0x55U, // byte 62: canary 11 + 0x55U, // byte 63: canary 12 + 0x55U, // byte 64: canary 13 + 0x55U, // byte 65: canary 14 + 0x55U, // byte 66: canary 15 + 0x55U, // byte 67: canary 16 }; uint8_t buf[sizeof(reference)]; @@ -202,33 +200,33 @@ TEST(Serialization, StructReference) auto result = obj.serialize({{buf, sizeof(buf)}}); ASSERT_TRUE(result) << "Error is " << static_cast(result.error()); - ASSERT_EQ(sizeof(reference) - 16U, result.value()); + EXPECT_EQ(sizeof(reference) - 16U, result.value()); for(size_t i=0; i< sizeof(reference); i++){ ASSERT_EQ(reference[i], buf[i]) << "Failed at " << i; } - // Check union manipulation functions. - // TEST_ASSERT_TRUE(regulated_basics_DelimitedVariableSize_0_1_is_f16_(&obj.delimited_var_2[0])); - // TEST_ASSERT_FALSE(regulated_basics_DelimitedVariableSize_0_1_is_f32_(&obj.delimited_var_2[0])); - // TEST_ASSERT_FALSE(regulated_basics_DelimitedVariableSize_0_1_is_f64_(&obj.delimited_var_2[0])); - // TEST_ASSERT_FALSE(regulated_basics_DelimitedVariableSize_0_1_is_f64_(NULL)); - // regulated_basics_DelimitedVariableSize_0_1_select_f32_(NULL); // No action; same state retained. - // TEST_ASSERT_TRUE(regulated_basics_DelimitedVariableSize_0_1_is_f16_(&obj.delimited_var_2[0])); - // TEST_ASSERT_FALSE(regulated_basics_DelimitedVariableSize_0_1_is_f32_(&obj.delimited_var_2[0])); - // TEST_ASSERT_FALSE(regulated_basics_DelimitedVariableSize_0_1_is_f64_(&obj.delimited_var_2[0])); - // TEST_ASSERT_FALSE(regulated_basics_DelimitedVariableSize_0_1_is_f64_(NULL)); + ASSERT_TRUE(obj.delimited_var_2[0].is_f16()); + ASSERT_FALSE(obj.delimited_var_2[0].is_f32()); + ASSERT_FALSE(obj.delimited_var_2[0].is_f64()); + obj.delimited_var_2[0].set_f32(); + ASSERT_FALSE(obj.delimited_var_2[0].is_f16()); + ASSERT_TRUE(obj.delimited_var_2[0].is_f32()); + ASSERT_FALSE(obj.delimited_var_2[0].is_f64()); + obj.delimited_var_2[0].set_f64(); + ASSERT_FALSE(obj.delimited_var_2[0].is_f16()); + ASSERT_FALSE(obj.delimited_var_2[0].is_f32()); + ASSERT_TRUE(obj.delimited_var_2[0].is_f64()); // Test default initialization. - // (void) memset(&obj, 0x55, sizeof(obj)); // Fill using a non-zero pattern. obj.regulated::basics::Struct__0_1::~Struct__0_1(); new (&obj)regulated::basics::Struct__0_1(); ASSERT_EQ(false, obj.boolean); - // ASSERT_EQ(0, obj.i10_4[0]); - // ASSERT_EQ(0, obj.i10_4[1]); - // ASSERT_EQ(0, obj.i10_4[2]); - // ASSERT_EQ(0, obj.i10_4[3]); + ASSERT_EQ(0, obj.i10_4[0]); + ASSERT_EQ(0, obj.i10_4[1]); + ASSERT_EQ(0, obj.i10_4[2]); + ASSERT_EQ(0, obj.i10_4[3]); ASSERT_EQ(0U, obj.f16_le2.size()); ASSERT_EQ(0, obj.unaligned_bitpacked_3[0]); ASSERT_EQ(0, obj.unaligned_bitpacked_3[1]); @@ -261,8 +259,10 @@ TEST(Serialization, StructReference) ASSERT_EQ(0U, obj.aligned_bitpacked_le3.size()); // // Deserialize the above reference representation and compare the result against the original object. + result = obj.deserialize({{reference}, 0U}); + ASSERT_TRUE(result) << "Error was " << result.error(); // ASSERT_EQ(0, regulated_basics_Struct__0_1_deserialize_(&obj, &reference[0], &size)); - // ASSERT_EQ(sizeof(reference) - 16U, size); // 16 trailing bytes implicitly truncated away + ASSERT_EQ(sizeof(reference) - 16U, result.value()); // 16 trailing bytes implicitly truncated away // ASSERT_EQ(true, obj.boolean); // ASSERT_EQ(+511, obj.i10_4[0]); // saturated