Skip to content

Commit

Permalink
align compressed tensor data as per schema
Browse files Browse the repository at this point in the history
  • Loading branch information
ddavis-2015 committed Oct 14, 2024
1 parent ad2b1c3 commit 77bb05d
Show file tree
Hide file tree
Showing 5 changed files with 46 additions and 24 deletions.
9 changes: 6 additions & 3 deletions tensorflow/lite/micro/kernels/concatenation_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -341,7 +341,8 @@ TF_LITE_MICRO_TEST(TwoInputsFloatCompressed) {
int input_shape[] = {2, 2, 3};
const float input1_value[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f};
const float input2_value[] = {7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f};
const uint8_t inputs_compressed[] = {0x05, 0x39, 0x40};
// Align the tensor data the same as a Buffer in the schema
alignas(16) const uint8_t inputs_compressed[] = {0x05, 0x39, 0x40};
constexpr int kBitWidth = 3;

// expected output when concatenating on axis 0
Expand Down Expand Up @@ -409,7 +410,8 @@ TF_LITE_MICRO_TEST(TwoInputsQuantizedInt8Compressed) {
const int8_t input1_values[] = {1, 2, 3, 4};
const int8_t input2_values[] = {5, 6, 7, 8};
const int8_t output_value[] = {1, 2, 5, 6, 3, 4, 7, 8};
const uint8_t input_compressed[] = {0x1B};
// Align the tensor data the same as a Buffer in the schema
alignas(16) const uint8_t input_compressed[] = {0x1B};
constexpr int kBitWidth = 2;

int8_t output_data[std::extent<decltype(output_value)>::value];
Expand Down Expand Up @@ -470,7 +472,8 @@ TF_LITE_MICRO_TEST(TwoInputsQuantizedInt16Compressed) {
const int16_t input1_values[] = {1, 2, 3, 4};
const int16_t input2_values[] = {5, 6, 7, 8};
const int16_t output_value[] = {1, 2, 5, 6, 3, 4, 7, 8};
const uint8_t input_compressed[] = {0x1B};
// Align the tensor data the same as a Buffer in the schema
alignas(16) const uint8_t input_compressed[] = {0x1B};
constexpr int kBitWidth = 2;

int16_t output_data[std::extent<decltype(output_value)>::value];
Expand Down
12 changes: 8 additions & 4 deletions tensorflow/lite/micro/kernels/conv_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,8 @@ static const float kGoldenData[kOutputElements] = {18, 2, 5, 18, 2, 5,
#ifdef USE_TFLM_COMPRESSION

// compressed filter data for kBinQuant scheme, matches kFilterData
constexpr uint8_t kBinQuantFilterData[] = {
// Align the tensor data the same as a Buffer in the schema
alignas(16) constexpr uint8_t kBinQuantFilterData[] = {
0x05, 0x38, 0x20, 0x90, 0x00,
};
constexpr float kBinQuantFilterValueTable[] = {
Expand All @@ -61,7 +62,8 @@ constexpr size_t kBinQuantFilterValueTableElements =
std::extent<decltype(kBinQuantFilterValueTable)>::value;
constexpr int kBinQuantFilterBitWidth = 3;
// compressed bias data for kBinQuant scheme, matches kBiasData
constexpr uint8_t kBinQuantBiasData[] = {0x18};
// Align the tensor data the same as a Buffer in the schema
alignas(16) constexpr uint8_t kBinQuantBiasData[] = {0x18};
constexpr int kBinQuantBiasBitWidth = 2;

// Common inputs and outputs for quantized compressed tensor tests.
Expand Down Expand Up @@ -103,7 +105,8 @@ constexpr int kOutputElementsQ1 = std::extent<decltype(kGoldenDataQ1)>::value;
static const float kGoldenDataQ1_16[] = {31, 63.99804688, -57, -46};

// compressed filter data for kBinQuant scheme, matches kFilterDataQ1
constexpr uint8_t kBinQuantFilterDataQ1[] = {
// Align the tensor data the same as a Buffer in the schema
alignas(16) constexpr uint8_t kBinQuantFilterDataQ1[] = {
0x05, 0x34, 0xE5, 0xDE, 0x54, 0xC1,
};
constexpr float kBinQuantFilterValueTableQ1[] = {
Expand All @@ -113,7 +116,8 @@ constexpr size_t kBinQuantFilterValueTableElementsQ1 =
std::extent<decltype(kBinQuantFilterValueTableQ1)>::value;
constexpr int kBinQuantFilterBitWidthQ1 = 3;
// compressed bias data for kBinQuant scheme, matches kBiasDataQ1
constexpr uint8_t kBinQuantBiasDataQ1[] = {0x00};
// Align the tensor data the same as a Buffer in the schema
alignas(16) constexpr uint8_t kBinQuantBiasDataQ1[] = {0x00};
constexpr int kBinQuantBiasBitWidthQ1 = 1;

static TfLiteConvParams common_conv_params_q1 = {
Expand Down
15 changes: 10 additions & 5 deletions tensorflow/lite/micro/kernels/depthwise_conv_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -78,10 +78,13 @@ static constexpr float kGoldenDataQ1[] = {43, 48, 21, 22, 3, -4, -30, -36};
constexpr int kOutputElementsQ1 = std::extent<decltype(kGoldenDataQ1)>::value;

// compressed filter data for kBinQuant scheme, matches kFilterDataQ1
constexpr uint8_t kBinQuantFilterDataQ1[] = {0x15, 0x6A, 0x8A, 0x60};
// Align the tensor data the same as a Buffer in the schema
alignas(16) constexpr uint8_t kBinQuantFilterDataQ1[] = {0x15, 0x6A, 0x8A,
0x60};
constexpr int kBinQuantFilterBitWidthQ1 = 2;
// compressed bias data for kBinQuant scheme, matches kBiasDataQ1
constexpr uint8_t kBinQuantBiasDataQ1[] = {0x00};
// Align the tensor data the same as a Buffer in the schema
alignas(16) constexpr uint8_t kBinQuantBiasDataQ1[] = {0x00};
constexpr int kBinQuantBiasBitWidthQ1 = 1;

#endif // USE_TFLM_COMPRESSION
Expand Down Expand Up @@ -424,13 +427,15 @@ TF_LITE_MICRO_TEST(SimpleTestCompressed) {
int filter_shape[] = {4, 1, 2, 2, 4};
// Filter values:
// {1, 2, 3, 4, -9, 10, -11, 12, 5, 6, 7, 8, 13, -14, 15, -16}
const uint8_t kBinQuantFilterData[] = {0x01, 0x23, 0xF8, 0xE9,
0x45, 0x67, 0xAD, 0xBC};
// Align the tensor data the same as a Buffer in the schema
alignas(16) const uint8_t kBinQuantFilterData[] = {0x01, 0x23, 0xF8, 0xE9,
0x45, 0x67, 0xAD, 0xBC};
const float kBinQuantFilterValueTable[] = {1, 2, 3, 4, 5, 6, 7, 8,
10, 12, 13, 15, -16, -14, -11, -9};
int bias_shape[] = {4, 1, 1, 1, 4};
const float bias_values[] = {1, 2, 3, 4};
const uint8_t kBinQuantBiasData[] = {0x1B};
// Align the tensor data the same as a Buffer in the schema
alignas(16) const uint8_t kBinQuantBiasData[] = {0x1B};
const float golden[] = {
71, -34, 99, -20, 91, -26, 127, -4,
};
Expand Down
11 changes: 7 additions & 4 deletions tensorflow/lite/micro/kernels/fully_connected_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -48,18 +48,21 @@ const float simple_bias_data[] = {1, 2, 3};
#ifdef USE_TFLM_COMPRESSION

// compressed filter data for kBinQuant scheme
constexpr uint8_t kBinQuantFilterData[] = {0x01, 0x23, 0x45, 0x67, 0x89,
0x01, 0x23, 0x45, 0x67, 0x89,
0x01, 0x23, 0x45, 0x67, 0x89};
// Align the tensor data the same as a Buffer in the schema
alignas(16) constexpr uint8_t kBinQuantFilterData[] = {
0x01, 0x23, 0x45, 0x67, 0x89, 0x01, 0x23, 0x45,
0x67, 0x89, 0x01, 0x23, 0x45, 0x67, 0x89};
constexpr float kBinQuantFilterValueTable[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
constexpr size_t kBinQuantFilterValueTableElements =
std::extent<decltype(tflite::testing::kBinQuantFilterValueTable)>::value;
constexpr int kBinQuantFilterBitWidth = 4;
// compressed bias data for kBinQuant scheme
constexpr uint8_t kBinQuantBiasData[] = {0x18};
// Align the tensor data the same as a Buffer in the schema
alignas(16) constexpr uint8_t kBinQuantBiasData[] = {0x18};
constexpr int kBinQuantBiasBitWidth = 2;
constexpr size_t simple_bias_size =
std::extent<decltype(simple_bias_data)>::value;

#endif // USE_TFLM_COMPRESSION

// TODO(b/258710417): INT4 isn't currently supported on Hexagon.
Expand Down
23 changes: 15 additions & 8 deletions tensorflow/lite/micro/kernels/transpose_conv_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -56,11 +56,13 @@ constexpr size_t kTransposeConvMaxTensors = 5;
constexpr size_t kTransposeConvMaxInputTensors = 4;

// compressed filter data for kBinQuant scheme, matches kFilterData
constexpr uint8_t kBinQuantFilterData[] = {0x00, 0x44, 0x32, 0x14, 0xC7, 0x42,
0x54, 0xB6, 0x35, 0xCF, 0x84, 0x40};
// Align the tensor data the same as a Buffer in the schema
alignas(16) constexpr uint8_t kBinQuantFilterData[] = {
0x00, 0x44, 0x32, 0x14, 0xC7, 0x42, 0x54, 0xB6, 0x35, 0xCF, 0x84, 0x40};
constexpr int kBinQuantFilterBitWidth = 5;
// compressed bias data for kBinQuant scheme, matches kBiasData
constexpr uint8_t kBinQuantBiasData[] = {0x00};
// Align the tensor data the same as a Buffer in the schema
alignas(16) constexpr uint8_t kBinQuantBiasData[] = {0x00};
constexpr int kBinQuantBiasBitWidth = 1;

// Common inputs and outputs (quantized single channel).
Expand All @@ -86,10 +88,13 @@ static constexpr float kGoldenDataQ1[] = {
constexpr int kOutputElementsQ1 = std::extent<decltype(kGoldenDataQ1)>::value;

// compressed filter data for kBinQuant scheme, matches kFilterDataQ1
constexpr uint8_t kBinQuantFilterDataQ1[] = {0x01, 0x23, 0x45, 0x67, 0x80};
// Align the tensor data the same as a Buffer in the schema
alignas(16) constexpr uint8_t kBinQuantFilterDataQ1[] = {0x01, 0x23, 0x45, 0x67,
0x80};
constexpr int kBinQuantFilterBitWidthQ1 = 4;
// compressed bias data for kBinQuant scheme, matches kBiasDataQ1
constexpr uint8_t kBinQuantBiasDataQ1[] = {0x00};
// Align the tensor data the same as a Buffer in the schema
alignas(16) constexpr uint8_t kBinQuantBiasDataQ1[] = {0x00};
constexpr int kBinQuantBiasBitWidthQ1 = 1;

// Common inputs and outputs (quantized multi channel).
Expand Down Expand Up @@ -131,15 +136,17 @@ static constexpr float kGoldenDataQ2[] = {10, 35, 19, 24, -6, -41,
constexpr int kOutputElementsQ2 = std::extent<decltype(kGoldenDataQ2)>::value;

// compressed filter data for kBinQuant scheme, matches kFilterDataQ2
constexpr uint8_t kBinQuantFilterDataQ2[] = {0x05, 0x34, 0xE5,
0xDE, 0x54, 0xC1};
// Align the tensor data the same as a Buffer in the schema
alignas(16) constexpr uint8_t kBinQuantFilterDataQ2[] = {0x05, 0x34, 0xE5,
0xDE, 0x54, 0xC1};
constexpr float kBinQuantFilterValueTableQ2[] = {1, 2, 3, 4, 5, 6, 0, 0,
1, 2, 3, 4, 5, 6, 7, 8};
constexpr size_t kBinQuantFilterValueTableElementsQ2 =
std::extent<decltype(kBinQuantFilterValueTableQ2)>::value;
constexpr int kBinQuantFilterBitWidthQ2 = 3;
// compressed bias data for kBinQuant scheme, matches kBiasDataQ2
constexpr uint8_t kBinQuantBiasDataQ2[] = {0x00};
// Align the tensor data the same as a Buffer in the schema
alignas(16) constexpr uint8_t kBinQuantBiasDataQ2[] = {0x00};
constexpr int kBinQuantBiasBitWidthQ2 = 1;

#endif // USE_TFLM_COMPRESSION
Expand Down

0 comments on commit 77bb05d

Please sign in to comment.