diff --git a/tensorflow/lite/micro/recording_micro_allocator.cc b/tensorflow/lite/micro/recording_micro_allocator.cc index ee76196d255..18addaee5f7 100644 --- a/tensorflow/lite/micro/recording_micro_allocator.cc +++ b/tensorflow/lite/micro/recording_micro_allocator.cc @@ -1,4 +1,4 @@ -/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -78,14 +78,15 @@ RecordedAllocation RecordingMicroAllocator::GetRecordedAllocation( return recorded_node_and_registration_array_data_; case RecordedAllocationType::kOpData: return recorded_op_data_; - // the function MicroPrintf was never reached outside the switch, because - // each case has a return. As the intention of the MicroPrintf is to be - // called when no matching case is found, a default case was added to - // contemplate an invalid allocation type +#ifdef USE_TFLM_COMPRESSION + case RecordedAllocationType::kCompressionData: + return recorded_compression_data_; +#endif // USE_TFLM_COMPRESSION default: - MicroPrintf("Invalid allocation type supplied: %d", allocation_type); - return RecordedAllocation(); + break; } + MicroPrintf("Invalid allocation type supplied: %d", allocation_type); + return RecordedAllocation(); } const RecordingSingleArenaBufferAllocator* @@ -117,6 +118,13 @@ void RecordingMicroAllocator::PrintAllocations() const { "NodeAndRegistration structs"); PrintRecordedAllocation(RecordedAllocationType::kOpData, "Operator runtime data", "OpData structs"); + +#ifdef USE_TFLM_COMPRESSION + + PrintRecordedAllocation(RecordedAllocationType::kCompressionData, + "Persistent compression data", "allocations"); + +#endif // USE_TFLM_COMPRESSION } void* RecordingMicroAllocator::AllocatePersistentBuffer(size_t bytes) { @@ -233,6 +241,21 @@ TfLiteStatus RecordingMicroAllocator::PopulateTfLiteTensorFromFlatbuffer( return status; } +#ifdef USE_TFLM_COMPRESSION + +TfLiteStatus RecordingMicroAllocator::AllocateCompressedTensorsList( + const Model* model, SubgraphAllocations* subgraph_allocations) { + RecordedAllocation allocations = SnapshotAllocationUsage(); + + TfLiteStatus status = MicroAllocator::AllocateCompressedTensorsList( + model, subgraph_allocations); + + RecordAllocationUsage(allocations, recorded_compression_data_); + return status; +} + +#endif // USE_TFLM_COMPRESSION + RecordedAllocation RecordingMicroAllocator::SnapshotAllocationUsage() const { return {/*requested_bytes=*/recording_memory_allocator_->GetRequestedBytes(), /*used_bytes=*/recording_memory_allocator_->GetUsedBytes(), diff --git a/tensorflow/lite/micro/recording_micro_allocator.h b/tensorflow/lite/micro/recording_micro_allocator.h index b6f69264dc0..80f163240d3 100644 --- a/tensorflow/lite/micro/recording_micro_allocator.h +++ b/tensorflow/lite/micro/recording_micro_allocator.h @@ -1,4 +1,4 @@ -/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -33,6 +33,11 @@ enum class RecordedAllocationType { kTfLiteTensorVariableBufferData, kNodeAndRegistrationArray, kOpData, +#ifdef USE_TFLM_COMPRESSION + kCompressionData, +#endif // USE_TFLM_COMPRESSION + + kNumAllocationTypes, // must be last }; // Container for holding information about allocation recordings by a given @@ -93,6 +98,13 @@ class RecordingMicroAllocator : public MicroAllocator { int subgraph_index, bool allocate_temp) override; +#ifdef USE_TFLM_COMPRESSION + + TfLiteStatus AllocateCompressedTensorsList( + const Model* model, SubgraphAllocations* subgraph_allocations) override; + +#endif // USE_TFLM_COMPRESSION + private: RecordingMicroAllocator(RecordingSingleArenaBufferAllocator* memory_allocator, MicroMemoryPlanner* memory_planner); @@ -113,6 +125,9 @@ class RecordingMicroAllocator : public MicroAllocator { RecordedAllocation recorded_persistent_buffer_data_ = {}; RecordedAllocation recorded_tflite_tensor_variable_buffer_data_ = {}; RecordedAllocation recorded_node_and_registration_array_data_ = {}; +#ifdef USE_TFLM_COMPRESSION + RecordedAllocation recorded_compression_data_ = {}; +#endif // USE_TFLM_COMPRESSION // TODO(b/187993291): Re-enable OpData allocating tracking. RecordedAllocation recorded_op_data_ = {}; diff --git a/tensorflow/lite/micro/recording_micro_allocator_test.cc b/tensorflow/lite/micro/recording_micro_allocator_test.cc index 9d3a5965de4..121a74c3324 100644 --- a/tensorflow/lite/micro/recording_micro_allocator_test.cc +++ b/tensorflow/lite/micro/recording_micro_allocator_test.cc @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -317,6 +317,72 @@ TF_LITE_MICRO_TEST(TestMultiSubgraphModel) { num_tensors * TF_LITE_EVAL_TENSOR_STRUCT_SIZE); } +#ifdef USE_TFLM_COMPRESSION + +TF_LITE_MICRO_TEST(TestCompressedModel) { + tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr; + tflite::testing::TestingOpResolver ops_resolver; + const tflite::Model* model = tflite::testing::GetSimpleMockModelCompressed(); + const int arena_size = 2048; + + uint8_t arena[arena_size]; + + tflite::RecordingMicroAllocator* micro_allocator = + tflite::RecordingMicroAllocator::Create(arena, arena_size); + TF_LITE_MICRO_EXPECT(micro_allocator != nullptr); + TF_LITE_MICRO_CHECK_FAIL(); + + tflite::SubgraphAllocations* subgraph_allocations = + micro_allocator->StartModelAllocation(model); + TF_LITE_MICRO_EXPECT(nullptr != subgraph_allocations); + TF_LITE_MICRO_CHECK_FAIL(); + + TfLiteStatus status = micro_allocator->FinishModelAllocation( + model, subgraph_allocations, &scratch_buffer_handles); + TF_LITE_MICRO_EXPECT_EQ(status, kTfLiteOk); + TF_LITE_MICRO_CHECK_FAIL(); + + micro_allocator->PrintAllocations(); + + size_t count_compression_allocations = 0; + size_t size_compression_allocations = 0; + for (size_t subgraph_idx = 0; subgraph_idx < model->subgraphs()->size(); + subgraph_idx++) { + const tflite::CompressionTensorData** ctl = + subgraph_allocations[subgraph_idx].compressed.tensors; + if (ctl == nullptr) { + continue; + } + const tflite::SubGraph* subgraph = model->subgraphs()->Get(subgraph_idx); + const size_t num_tensors = subgraph->tensors()->size(); + for (size_t i = 0; i < num_tensors; i++) { + if (ctl[i] != nullptr) { + count_compression_allocations++; + size_compression_allocations += sizeof(tflite::CompressionTensorData); + count_compression_allocations++; + size_compression_allocations += sizeof(tflite::LookupTableData); + } + } + // Add the CompressionTensorData array + count_compression_allocations++; + size_compression_allocations += + num_tensors * sizeof(tflite::CompressionTensorData*); + } + + tflite::RecordedAllocation recorded_allocation = + micro_allocator->GetRecordedAllocation( + tflite::RecordedAllocationType::kCompressionData); + + TF_LITE_MICRO_EXPECT_EQ(recorded_allocation.count, + count_compression_allocations); + TF_LITE_MICRO_EXPECT_EQ(recorded_allocation.requested_bytes, + size_compression_allocations); + TF_LITE_MICRO_EXPECT_GE(recorded_allocation.used_bytes, + size_compression_allocations); +} + +#endif // USE_TFLM_COMPRESSION + // TODO(b/158124094): Find a way to audit OpData allocations on // cross-architectures.