From 2ff4f59fdcad9e6b0e4c922e74464cf3f2a087ca Mon Sep 17 00:00:00 2001 From: aarongreen Date: Sun, 24 Sep 2023 23:35:08 +0000 Subject: [PATCH] pw_allocator: Add SplitFreeListAllocator This CL adds SplitFreeListAllocator, a memory allocator which uses a free list. It splits allocations by allocating large and small requests from opposite ends of its overall memory region. Splitting allocations by size reduces fragmentation. Change-Id: I24e5791695f7d2892eb89b4b4371e85288b6cdb5 Reviewed-on: https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/172231 Reviewed-by: Taylor Cramer Commit-Queue: Aaron Green --- pw_allocator/BUILD.bazel | 30 ++ pw_allocator/BUILD.gn | 24 ++ pw_allocator/CMakeLists.txt | 28 ++ pw_allocator/docs.rst | 3 + .../pw_allocator/split_free_list_allocator.h | 92 ++++++ pw_allocator/split_free_list_allocator.cc | 285 ++++++++++++++++ .../split_free_list_allocator_test.cc | 304 ++++++++++++++++++ 7 files changed, 766 insertions(+) create mode 100644 pw_allocator/public/pw_allocator/split_free_list_allocator.h create mode 100644 pw_allocator/split_free_list_allocator.cc create mode 100644 pw_allocator/split_free_list_allocator_test.cc diff --git a/pw_allocator/BUILD.bazel b/pw_allocator/BUILD.bazel index 30ed3d2add..6de649822d 100644 --- a/pw_allocator/BUILD.bazel +++ b/pw_allocator/BUILD.bazel @@ -102,6 +102,23 @@ pw_cc_library( ], ) +pw_cc_library( + name = "split_free_list_allocator", + srcs = [ + "split_free_list_allocator.cc", + ], + hdrs = [ + "public/pw_allocator/split_free_list_allocator.h", + ], + includes = ["public"], + deps = [ + ":allocator", + "//pw_assert", + "//pw_bytes", + "//pw_status", + ], +) + pw_cc_library( name = "allocator_testing", srcs = [ @@ -178,3 +195,16 @@ pw_cc_test( ":freelist_heap", ], ) + +pw_cc_test( + name = "split_free_list_allocator_test", + srcs = [ + "split_free_list_allocator_test.cc", + ], + deps = [ + ":split_free_list_allocator", + "//pw_bytes", + "//pw_containers:vector", + "//pw_unit_test", + ], +) diff --git a/pw_allocator/BUILD.gn b/pw_allocator/BUILD.gn index dd8b59226f..23106562d8 100644 --- a/pw_allocator/BUILD.gn +++ b/pw_allocator/BUILD.gn @@ -103,6 +103,20 @@ pw_source_set("freelist_heap") { sources = [ "freelist_heap.cc" ] } +pw_source_set("split_free_list_allocator") { + public_configs = [ ":default_config" ] + public = [ "public/pw_allocator/split_free_list_allocator.h" ] + public_deps = [ + ":allocator", + dir_pw_status, + ] + deps = [ + dir_pw_assert, + dir_pw_bytes, + ] + sources = [ "split_free_list_allocator.cc" ] +} + pw_test_group("tests") { tests = [ ":allocator_test", @@ -110,6 +124,7 @@ pw_test_group("tests") { ":fallback_allocator_test", ":freelist_test", ":freelist_heap_test", + ":split_free_list_allocator_test", ] } @@ -167,6 +182,15 @@ pw_test("freelist_heap_test") { sources = [ "freelist_heap_test.cc" ] } +pw_test("split_free_list_allocator_test") { + deps = [ + ":split_free_list_allocator", + "$dir_pw_containers:vector", + dir_pw_bytes, + ] + sources = [ "split_free_list_allocator_test.cc" ] +} + pw_doc_group("docs") { inputs = [ "doc_resources/pw_allocator_heap_visualizer_demo.png" ] sources = [ "docs.rst" ] diff --git a/pw_allocator/CMakeLists.txt b/pw_allocator/CMakeLists.txt index 00491f249b..7a45c50309 100644 --- a/pw_allocator/CMakeLists.txt +++ b/pw_allocator/CMakeLists.txt @@ -82,6 +82,21 @@ pw_add_library(pw_allocator.freelist_heap STATIC freelist_heap.cc ) +pw_add_library(pw_allocator.split_free_list_allocator STATIC + SOURCES + split_free_list_allocator.cc + HEADERS + public/pw_allocator/split_free_list_allocator.h + PUBLIC_INCLUDES + public + PUBLIC_DEPS + pw_allocator.allocator + pw_status + PRIVATE_DEPS + pw_assert + pw_bytes +) + pw_add_library(pw_allocator.allocator_testing STATIC HEADERS pw_allocator_private/allocator_testing.h @@ -150,3 +165,16 @@ pw_add_test(pw_allocator.freelist_heap_test modules pw_allocator ) + +pw_add_test(pw_allocator.split_free_list_allocator_test + SOURCES + split_free_list_allocator_test.cc + PRIVATE_DEPS + pw_allocator.split_free_list_allocator + pw_containers.vector + pw_bytes + pw_unit_test + GROUPS + modules + pw_allocator +) diff --git a/pw_allocator/docs.rst b/pw_allocator/docs.rst index 81413d1461..27c29383c7 100644 --- a/pw_allocator/docs.rst +++ b/pw_allocator/docs.rst @@ -35,6 +35,9 @@ Provided implementations of the ``Allocator`` interface include: - ``FallbackAllocator``: Dispatches first to a primary allocator, and, if that fails, to a secondary alloator. +- ``SplitFreeListAllocator``: Tracks free blocks using a free list, and splits + large and small allocations between the front and back, respectively, of its + memory region in order to reduce fragmentation. Heap Poisoning ============== diff --git a/pw_allocator/public/pw_allocator/split_free_list_allocator.h b/pw_allocator/public/pw_allocator/split_free_list_allocator.h new file mode 100644 index 0000000000..0facb1a1ba --- /dev/null +++ b/pw_allocator/public/pw_allocator/split_free_list_allocator.h @@ -0,0 +1,92 @@ +// Copyright 2023 The Pigweed Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy of +// the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. +#pragma once + +#include +#include +#include + +#include "pw_allocator/allocator.h" +#include "pw_status/status.h" + +namespace pw::allocator { + +/// This memory allocator uses a free list to track unallocated blocks, with a +/// twist: Allocations above or below a given threshold are taken from +/// respectively lower or higher addresses from within the allocator's memory +/// region. This has the effect of decreasing fragmentation as similarly-sized +/// allocations are grouped together. +/// +/// NOTE!! Do NOT use memory returned from this allocator as the backing for +/// another allocator. If this is done, the `Query` method will incorrectly +/// think pointers returned by that alloator were created by this one, and +/// report that this allocator can de/reallocate them. +class SplitFreeListAllocator : public Allocator { + public: + /// Free memory blocks are tracked using a singly linked list. The free memory + /// itself is used to for these structs, so the minimum size and alignment + /// supported by this allocator is `sizeof(FreeBlock)`. + /// + /// Allocator callers should not need to access this type directly. + struct FreeBlock { + FreeBlock* next; + size_t size; + }; + + constexpr SplitFreeListAllocator() = default; + ~SplitFreeListAllocator() override; + + // Not copyable. + SplitFreeListAllocator(const SplitFreeListAllocator&) = delete; + SplitFreeListAllocator& operator=(const SplitFreeListAllocator&) = delete; + + /// Sets the memory region to be used by this allocator, and the threshold at + /// which allocations are considerd "large" or "small". Large and small + /// allocations return lower and higher addresses, respectively. + /// + /// @param[in] base Start of the memory region for this allocator. + /// @param[in] size Length of the memory region for this allocator. + /// @param[in] threshold Allocations of this size of larger are considered + /// "large" and come from lower addresses. + void Initialize(void* base, size_t size, size_t threshold); + + private: + /// Adds the given block to the free list. The block must not be null. + void AddBlock(FreeBlock* block); + + /// Removes the given block from the free list. The block must not be null. + FreeBlock* RemoveBlock(FreeBlock* prev, FreeBlock* block); + + /// @copydoc Allocator::Dispatch + Status DoQuery(const void* ptr, size_t size, size_t alignment) const override; + + /// @copydoc Allocator::Allocate + void* DoAllocate(size_t size, size_t alignment) override; + + /// @copydoc Allocator::Deallocate + void DoDeallocate(void* ptr, size_t size, size_t alignment) override; + + /// @copydoc Allocator::Resize + bool DoResize(void* ptr, + size_t old_size, + size_t old_alignment, + size_t new_size) override; + + uintptr_t addr_ = 0; + size_t size_ = 0; + FreeBlock* head_ = nullptr; + size_t threshold_ = 0; +}; + +} // namespace pw::allocator diff --git a/pw_allocator/split_free_list_allocator.cc b/pw_allocator/split_free_list_allocator.cc new file mode 100644 index 0000000000..16bda8e4d3 --- /dev/null +++ b/pw_allocator/split_free_list_allocator.cc @@ -0,0 +1,285 @@ +// Copyright 2023 The Pigweed Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy of +// the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. + +#include "pw_allocator/split_free_list_allocator.h" + +#include + +#include "pw_assert/check.h" +#include "pw_bytes/alignment.h" + +namespace pw::allocator { + +static_assert(sizeof(size_t) == sizeof(uintptr_t), "platform not supported"); + +using FreeBlock = SplitFreeListAllocator::FreeBlock; + +// Public methods. + +SplitFreeListAllocator::~SplitFreeListAllocator() { + // All memory must be returned before the allocator goes out of scope. + if (addr_ != 0) { + PW_CHECK(addr_ == reinterpret_cast(head_)); + PW_CHECK(head_->next == nullptr); + PW_CHECK(head_->size == size_); + } +} + +void SplitFreeListAllocator::Initialize(void* base, + size_t size, + size_t threshold) { + PW_CHECK(base != nullptr); + auto addr = reinterpret_cast(base); + addr_ = AlignUp(addr, alignof(FreeBlock)); + PW_CHECK(alignof(FreeBlock) <= size, "size underflow on alignment"); + size_ = AlignDown(size - (addr_ - addr), alignof(FreeBlock)); + PW_CHECK(sizeof(FreeBlock) <= size_, "region is smaller than a single block"); + head_ = reinterpret_cast(addr_); + head_->next = nullptr; + head_->size = size_; + threshold_ = threshold; +} + +// Private methods. + +namespace { + +/// Adjust the layout if necessary to match `SplitFreeListAllocator`'s minimums. +/// +/// This functions will modify `size` and `alignment` to represent a memory +/// region that is a multiple of `sizeof(FreeBlock)`, aligned on +/// `sizeof(FreeBlock)` boundaries. This potentially wastes a few bytes for +/// allocations that could have been aligned on `alignof(FreeBlock)` boundaries, +/// but it greatly simplifies ensuring that any fragments can hold a `FreeBlock` +/// as well as reconstructing the `FreeBlock` from a pointer and `Layout` in +/// `Deallocate`. +void Normalize(size_t& size, size_t& alignment) { + alignment = std::max(alignment, sizeof(FreeBlock)); + size = AlignUp(std::max(size, sizeof(FreeBlock)), alignment); +} + +/// Stores a `FreeBlock` representing a block of the given `size` at +/// `ptr` + `offset`, and returns it. +FreeBlock* CreateBlock(void* ptr, size_t size, size_t offset = 0) { + auto addr = reinterpret_cast(ptr) + offset; + auto* block = reinterpret_cast(addr); + block->next = nullptr; + block->size = size; + return block; +} + +/// Returns true if `prev` + `offset` equals `next`. +bool IsAdjacent(void* prev, size_t offset, void* next) { + return reinterpret_cast(prev) + offset == + reinterpret_cast(next); +} + +/// Reduces the size of a block and creates and returns a new block representing +/// the difference. +/// +/// The original block must have room for both resulting `FreeBlock`s. +/// +/// This function assumes `prev` IS on a free list. +FreeBlock* SplitBlock(FreeBlock* prev, size_t offset) { + PW_DCHECK(sizeof(FreeBlock) <= offset); + PW_DCHECK(offset + sizeof(FreeBlock) <= prev->size); + FreeBlock* next = CreateBlock(prev, prev->size - offset, offset); + next->next = prev->next; + prev->size = offset; + prev->next = next; + return next; +} + +/// Combines two blocks into one and returns it. +/// +/// `prev` and `next` MUJST NOT be null. + +/// This function assumes `prev` and `next` ARE NOT on a free list. +FreeBlock* MergeBlocks(FreeBlock* prev, FreeBlock* next) { + PW_DCHECK(prev != nullptr); + PW_DCHECK(next != nullptr); + prev->size += next->size; + return prev; +} + +} // namespace + +void SplitFreeListAllocator::AddBlock(FreeBlock* block) { + PW_DCHECK(addr_ != 0); + PW_DCHECK(block != nullptr); + block->next = head_; + head_ = block; +} + +SplitFreeListAllocator::FreeBlock* SplitFreeListAllocator::RemoveBlock( + FreeBlock* prev, FreeBlock* block) { + PW_DCHECK(addr_ != 0); + PW_DCHECK(block != nullptr); + if (block == head_) { + head_ = block->next; + } else { + prev->next = block->next; + } + return block; +} + +Status SplitFreeListAllocator::DoQuery(const void* ptr, + size_t size, + size_t alignment) const { + PW_DCHECK(addr_ != 0); + if (ptr == nullptr || size == 0) { + return Status::OutOfRange(); + } + Normalize(size, alignment); + auto addr = reinterpret_cast(ptr); + if (addr + size <= addr || addr < addr_ || addr_ + size_ < addr + size) { + return Status::OutOfRange(); + } + return OkStatus(); +} + +void* SplitFreeListAllocator::DoAllocate(size_t size, size_t alignment) { + PW_DCHECK(addr_ != 0); + if (head_ == nullptr || size == 0 || size_ < size) { + return nullptr; + } + Normalize(size, alignment); + + // Blocks over and under the threshold are allocated from lower and higher + // addresses, respectively. + bool from_lower = threshold_ <= size; + FreeBlock* prev = nullptr; + FreeBlock* block = nullptr; + size_t offset = 0; + for (FreeBlock *previous = nullptr, *current = head_; current != nullptr; + previous = current, current = current->next) { + if (current->size < size) { + continue; + } + // Fragment large requests from the start of the block, and small requests + // from the back. Verify the aligned offsets are still within the block. + uintptr_t current_start = reinterpret_cast(current); + uintptr_t current_end = current_start + current->size; + uintptr_t addr = from_lower ? AlignUp(current_start, alignment) + : AlignDown(current_end - size, alignment); + if (addr < current_start || current_end < addr + size) { + continue; + } + // Update `prev` and `block` if the current block is earlier or later and we + // want blocks with lower or higher address, respectively. + if (block == nullptr || (current < block) == from_lower) { + prev = previous; + block = current; + offset = addr - current_start; + } + } + if (block == nullptr) { + return nullptr; + } + if (offset != 0) { + prev = block; + block = SplitBlock(block, offset); + } + if (size < block->size) { + SplitBlock(block, size); + } + return RemoveBlock(prev, block); +} + +void SplitFreeListAllocator::DoDeallocate(void* ptr, + size_t size, + size_t alignment) { + PW_DCHECK(addr_ != 0); + + // Do nothing if no memory block pointer. + if (ptr == nullptr) { + return; + } + + // Ensure that this allocation came from this object. + PW_DCHECK(DoQuery(ptr, size, alignment).ok()); + + Normalize(size, alignment); + FreeBlock* block = CreateBlock(ptr, size); + for (FreeBlock *previous = nullptr, *current = head_; current != nullptr; + current = current->next) { + if (IsAdjacent(current, current->size, block)) { + // Block precedes block being freed. Remove from list and merge. + block = MergeBlocks(RemoveBlock(previous, current), block); + } else if (IsAdjacent(block, block->size, current)) { + // Block follows block being freed. Remove from list and merge. + block = MergeBlocks(block, RemoveBlock(previous, current)); + } else { + previous = current; + } + } + + // Add released block to the free list. + AddBlock(block); +} + +bool SplitFreeListAllocator::DoResize(void* ptr, + size_t old_size, + size_t old_alignment, + size_t new_size) { + PW_DCHECK(addr_ != 0); + + if (ptr == nullptr || old_size == 0 || new_size == 0) { + return false; + } + + // Ensure that this allocation came from this object. + PW_DCHECK(DoQuery(ptr, old_size, old_alignment).ok()); + + // Do nothing if new size equals current size. + Normalize(old_size, old_alignment); + Normalize(new_size, old_alignment); + if (old_size == new_size) { + return true; + } + bool growing = old_size < new_size; + size_t diff = growing ? new_size - old_size : old_size - new_size; + // Try to find a free block that follows this one. + FreeBlock* prev = nullptr; + FreeBlock* next = head_; + while (next != nullptr && !IsAdjacent(ptr, old_size, next)) { + prev = next; + next = next->next; + } + if (growing) { + if (next == nullptr || next->size < diff) { + // No free neighbor that is large enough. Must reallocate. + return false; + } + // Split the next block and remove the portion to be returned. + if (diff != next->size) { + SplitBlock(next, diff); + } + RemoveBlock(prev, next); + } else /* !growing*/ { + if (next == nullptr) { + // Create a new block for the extra space and add it. + next = CreateBlock(ptr, diff, new_size); + } else { + // Merge the extra space with the next block. + RemoveBlock(prev, next); + prev = CreateBlock(ptr, diff, new_size); + next = MergeBlocks(prev, next); + } + AddBlock(next); + } + return true; +} + +} // namespace pw::allocator diff --git a/pw_allocator/split_free_list_allocator_test.cc b/pw_allocator/split_free_list_allocator_test.cc new file mode 100644 index 0000000000..16f62fade3 --- /dev/null +++ b/pw_allocator/split_free_list_allocator_test.cc @@ -0,0 +1,304 @@ +// Copyright 2023 The Pigweed Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy of +// the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. + +#include "pw_allocator/split_free_list_allocator.h" + +#include "gtest/gtest.h" +#include "pw_bytes/alignment.h" +#include "pw_containers/vector.h" + +namespace pw::allocator { +namespace { + +// Test fixture. + +struct SplitFreeListAllocatorTest : ::testing::Test { + alignas(16) std::array buffer; + SplitFreeListAllocator allocator; + + void SetUp() override { + allocator.Initialize(buffer.data(), buffer.size(), 64); + } +}; + +// Unit tests. + +TEST_F(SplitFreeListAllocatorTest, InitializeUnaligned) { + // The test fixture uses aligned memory to make it easier to reason about + // allocations, but that isn't strictly required. Simply verify that a call to + // `Initialize` with unaligned memory does not crash. + alignas(16) std::array buf; + SplitFreeListAllocator unaligned; + unaligned.Initialize(buf.data() + 1, buf.size() - 1, 64); +} + +TEST_F(SplitFreeListAllocatorTest, AllocateLargeDeallocate) { + constexpr Layout layout = Layout::Of(); + void* ptr = allocator.Allocate(layout); + // Returned pointer should be from the beginning. + EXPECT_EQ(ptr, buffer.data()); + allocator.Deallocate(ptr, layout); +} + +TEST_F(SplitFreeListAllocatorTest, AllocateSmallDeallocate) { + // Returned pointer should not be from the beginning, but should still be in + // range. Exact pointer depends on allocator's minimum allocation size. + constexpr Layout layout = Layout::Of(); + void* ptr = allocator.Allocate(layout); + EXPECT_GT(ptr, buffer.data()); + EXPECT_LT(ptr, buffer.data() + buffer.size()); + allocator.Deallocate(ptr, layout); +} + +TEST_F(SplitFreeListAllocatorTest, AllocateTooLarge) { + void* ptr = allocator.Allocate(Layout::Of()); + EXPECT_EQ(ptr, nullptr); +} + +TEST_F(SplitFreeListAllocatorTest, AllocateAllDeallocateShuffled) { + constexpr Layout layout = Layout::Of(); + Vector ptrs; + // Allocate until the pool is exhausted. + while (true) { + void* ptr = allocator.Allocate(layout); + if (ptr == nullptr) { + break; + } + ptrs.push_back(ptr); + } + // Mix up the order of allocations. + for (size_t i = 0; i < ptrs.size(); ++i) { + if (i % 2 == 0 && i + 1 < ptrs.size()) { + std::swap(ptrs[i], ptrs[i + 1]); + } + if (i % 3 == 0 && i + 2 < ptrs.size()) { + std::swap(ptrs[i], ptrs[i + 2]); + } + } + // Deallocate everything. + for (void* ptr : ptrs) { + allocator.Deallocate(ptr, layout); + } +} + +TEST_F(SplitFreeListAllocatorTest, AllocateDeallocateLargeAlignment) { + void* ptr1 = allocator.AllocateUnchecked(sizeof(uint32_t), 64); + void* ptr2 = allocator.AllocateUnchecked(sizeof(uint32_t), 64); + EXPECT_EQ(reinterpret_cast(ptr1) % 64, 0U); + EXPECT_EQ(reinterpret_cast(ptr2) % 64, 0U); + allocator.DeallocateUnchecked(ptr1, sizeof(uint32_t), 64); + allocator.DeallocateUnchecked(ptr2, sizeof(uint32_t), 64); +} + +TEST_F(SplitFreeListAllocatorTest, AllocateAlignmentFailure) { + // Find a valid address aligned to 128 bytes. + auto base = reinterpret_cast(buffer.data()); + auto aligned = AlignUp(base + 16, 128); + + // Now allocate up to 3 regions: + // * from the beginning to 16 bytes before the alignment boundary + // * the next 128 bytes + // * whatever is left + size_t size1 = aligned - base - 16; + void* ptr1 = allocator.AllocateUnchecked(size1, 1); + + size_t size2 = 128; + void* ptr2 = allocator.AllocateUnchecked(size2, 1); + + size_t size3 = 128 - size1; + void* ptr3 = allocator.AllocateUnchecked(size3, 1); + + // Now free the second region. This leaves a 128-byte region available, but it + // is not aligned to a 128 byte boundary. + allocator.DeallocateUnchecked(ptr2, size2, 1); + + // The allocator should be unable to create an aligned region of the given + // size. + void* ptr = allocator.AllocateUnchecked(128, 128); + EXPECT_EQ(ptr, nullptr); + + if (ptr1 != nullptr) { + allocator.DeallocateUnchecked(ptr1, size1, 1); + } + allocator.DeallocateUnchecked(ptr3, size3, 1); +} + +TEST_F(SplitFreeListAllocatorTest, DeallocateNull) { + constexpr Layout layout = Layout::Of(); + allocator.Deallocate(nullptr, layout); +} + +TEST_F(SplitFreeListAllocatorTest, QueryLargeValid) { + constexpr Layout layout = Layout::Of(); + void* ptr = allocator.Allocate(layout); + EXPECT_EQ(allocator.Query(ptr, layout), OkStatus()); + allocator.Deallocate(ptr, layout); +} + +TEST_F(SplitFreeListAllocatorTest, QuerySmallValid) { + constexpr Layout layout = Layout::Of(); + void* ptr = allocator.Allocate(layout); + EXPECT_EQ(allocator.Query(ptr, layout), OkStatus()); + allocator.Deallocate(ptr, layout); +} + +TEST_F(SplitFreeListAllocatorTest, QueryInvalidPtr) { + constexpr Layout layout = Layout::Of(); + EXPECT_EQ(allocator.Query(this, layout), Status::OutOfRange()); +} + +TEST_F(SplitFreeListAllocatorTest, QueryInvalidSize) { + constexpr Layout layout = Layout::Of(); + void* ptr = allocator.Allocate(layout); + EXPECT_EQ(allocator.QueryUnchecked(ptr, 0, layout.alignment()), + Status::OutOfRange()); + allocator.Deallocate(ptr, layout); +} + +TEST_F(SplitFreeListAllocatorTest, ResizeNull) { + constexpr Layout old_layout = Layout::Of(); + size_t new_size = 1; + EXPECT_FALSE(allocator.Resize(nullptr, old_layout, new_size)); +} + +TEST_F(SplitFreeListAllocatorTest, ResizeSame) { + constexpr Layout old_layout = Layout::Of(); + void* ptr = allocator.Allocate(old_layout); + EXPECT_NE(ptr, nullptr); + constexpr Layout new_layout = Layout::Of(); + EXPECT_TRUE(allocator.Resize(ptr, old_layout, new_layout.size())); + allocator.Deallocate(ptr, new_layout); +} + +TEST_F(SplitFreeListAllocatorTest, ResizeLargeSmaller) { + constexpr Layout old_layout = Layout::Of(); + void* ptr = allocator.Allocate(old_layout); + + // Shrinking always succeeds. + constexpr Layout new_layout = Layout::Of(); + EXPECT_TRUE(allocator.Resize(ptr, old_layout, new_layout.size())); + allocator.Deallocate(ptr, new_layout); +} + +TEST_F(SplitFreeListAllocatorTest, ResizeLargeLarger) { + constexpr Layout old_layout = Layout::Of(); + void* ptr = allocator.Allocate(old_layout); + + // Nothing after ptr, so `Resize` should succeed. + constexpr Layout new_layout = Layout::Of(); + EXPECT_TRUE(allocator.Resize(ptr, old_layout, new_layout.size())); + allocator.Deallocate(ptr, new_layout); +} + +TEST_F(SplitFreeListAllocatorTest, ResizeLargeLargerFailure) { + constexpr Layout old_layout = Layout::Of(); + void* ptr1 = allocator.Allocate(old_layout); + void* ptr2 = allocator.Allocate(old_layout); + + // Memory after ptr is already allocated, so `Resize` should fail. + size_t new_size = 240; + EXPECT_FALSE(allocator.Resize(ptr1, old_layout, new_size)); + allocator.Deallocate(ptr1, old_layout); + allocator.Deallocate(ptr2, old_layout); +} + +TEST_F(SplitFreeListAllocatorTest, ResizeLargeSmallerAcrossThreshold) { + constexpr Layout old_layout = Layout::Of(); + void* ptr = allocator.Allocate(old_layout); + + // Shrinking succeeds, and the pointer is unchanged even though it is now + // below the threshold. + constexpr Layout new_layout = Layout::Of(); + EXPECT_TRUE(allocator.Resize(ptr, old_layout, new_layout.size())); + allocator.Deallocate(ptr, new_layout); +} + +TEST_F(SplitFreeListAllocatorTest, ResizeSmallSmaller) { + constexpr Layout old_layout = Layout::Of(); + void* ptr = allocator.Allocate(old_layout); + + // Shrinking always succeeds. + constexpr Layout new_layout = Layout::Of(); + EXPECT_TRUE(allocator.Resize(ptr, old_layout, new_layout.size())); + allocator.Deallocate(ptr, new_layout); +} + +TEST_F(SplitFreeListAllocatorTest, ResizeSmallLarger) { + // First, allocate a trailing block. + constexpr Layout layout1 = Layout::Of(); + void* ptr1 = allocator.Allocate(layout1); + EXPECT_NE(ptr1, nullptr); + + // Next allocate the memory to be resized. + constexpr Layout old_layout = Layout::Of(); + void* ptr = allocator.Allocate(old_layout); + EXPECT_NE(ptr, nullptr); + + // Now free the trailing block. + allocator.Deallocate(ptr1, layout1); + + // And finally, resize. Since the memory after the block is available and big + // enough, `Resize` should succeed. + constexpr Layout new_layout = Layout::Of(); + EXPECT_TRUE(allocator.Resize(ptr, old_layout, new_layout.size())); + allocator.Deallocate(ptr, new_layout); +} + +TEST_F(SplitFreeListAllocatorTest, ResizeSmallLargerFailure) { + // First, allocate a trailing block. + constexpr Layout layout1 = Layout::Of(); + void* ptr1 = allocator.Allocate(layout1); + EXPECT_NE(ptr1, nullptr); + + // Next allocate the memory to be resized. + constexpr Layout old_layout = Layout::Of(); + void* ptr = allocator.Allocate(old_layout); + EXPECT_NE(ptr, nullptr); + + // Now free the trailing block. + allocator.Deallocate(ptr1, layout1); + + // And finally, resize. Since the memory after the block is available but not + // big enough, `Resize` should fail. + size_t new_size = 48; + EXPECT_FALSE(allocator.Resize(ptr, old_layout, new_size)); + allocator.Deallocate(ptr, old_layout); +} + +TEST_F(SplitFreeListAllocatorTest, ResizeSmallLargerAcrossThreshold) { + // First, allocate several trailing block. + constexpr Layout layout1 = Layout::Of(); + void* ptr1 = allocator.Allocate(layout1); + EXPECT_NE(ptr1, nullptr); + void* ptr2 = allocator.Allocate(layout1); + EXPECT_NE(ptr2, nullptr); + + // Next allocate the memory to be resized. + constexpr Layout old_layout = Layout::Of(); + void* ptr = allocator.Allocate(old_layout); + EXPECT_NE(ptr, nullptr); + + // Now free the trailing blocks. + allocator.Deallocate(ptr1, layout1); + allocator.Deallocate(ptr2, layout1); + + // Growing succeeds, and the pointer is unchanged even though it is now + // above the threshold. + constexpr Layout new_layout = Layout::Of(); + EXPECT_TRUE(allocator.Resize(ptr, old_layout, new_layout.size())); + allocator.Deallocate(ptr, new_layout); +} + +} // namespace +} // namespace pw::allocator