Skip to content

Commit

Permalink
[scudo] Add ScopedTSD to avoid releasing TSD manually (llvm#80061)
Browse files Browse the repository at this point in the history
This makes the use of TSD be RAII style and avoid the exposing of the
type of TSDs.

Also move some thread safety analyses from static to runtime because of
its limitation. Even we mark some code path as NO_THREAD_SAFETY_ANALYSIS
but we still have the `assertLocked()` cover the correctness.
  • Loading branch information
ChiaHungDuan authored Feb 5, 2024
1 parent dfdea4d commit 8ce036d
Show file tree
Hide file tree
Showing 5 changed files with 130 additions and 98 deletions.
24 changes: 7 additions & 17 deletions compiler-rt/lib/scudo/standalone/combined.h
Original file line number Diff line number Diff line change
Expand Up @@ -364,9 +364,7 @@ class Allocator {
if (LIKELY(PrimaryT::canAllocate(NeededSize))) {
ClassId = SizeClassMap::getClassIdBySize(NeededSize);
DCHECK_NE(ClassId, 0U);
bool UnlockRequired;
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
typename TSDRegistryT::ScopedTSD TSD(TSDRegistry);
Block = TSD->getCache().allocate(ClassId);
// If the allocation failed, retry in each successively larger class until
// it fits. If it fails to fit in the largest class, fallback to the
Expand All @@ -377,8 +375,6 @@ class Allocator {
if (!Block)
ClassId = 0;
}
if (UnlockRequired)
TSD->unlock();
}
if (UNLIKELY(ClassId == 0)) {
Block = Secondary.allocate(Options, Size, Alignment, &SecondaryBlockEnd,
Expand Down Expand Up @@ -1145,13 +1141,11 @@ class Allocator {
void *BlockBegin = getBlockBegin(Ptr, Header);
const uptr ClassId = Header->ClassId;
if (LIKELY(ClassId)) {
bool UnlockRequired;
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
const bool CacheDrained =
TSD->getCache().deallocate(ClassId, BlockBegin);
if (UnlockRequired)
TSD->unlock();
bool CacheDrained;
{
typename TSDRegistryT::ScopedTSD TSD(TSDRegistry);
CacheDrained = TSD->getCache().deallocate(ClassId, BlockBegin);
}
// When we have drained some blocks back to the Primary from TSD, that
// implies that we may have the chance to release some pages as well.
// Note that in order not to block other thread's accessing the TSD,
Expand All @@ -1165,13 +1159,9 @@ class Allocator {
Secondary.deallocate(Options, BlockBegin);
}
} else {
bool UnlockRequired;
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
typename TSDRegistryT::ScopedTSD TSD(TSDRegistry);
Quarantine.put(&TSD->getQuarantineCache(),
QuarantineCallback(*this, TSD->getCache()), Ptr, Size);
if (UnlockRequired)
TSD->unlock();
}
}

Expand Down
38 changes: 24 additions & 14 deletions compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -229,11 +229,25 @@ struct TestConditionVariableConfig {
#define SCUDO_TYPED_TEST(FIXTURE, NAME) \
template <class TypeParam> \
struct FIXTURE##NAME : public FIXTURE<TypeParam> { \
using BaseT = FIXTURE<TypeParam>; \
void Run(); \
}; \
SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
template <class TypeParam> void FIXTURE##NAME<TypeParam>::Run()

// Accessing `TSD->getCache()` requires `TSD::Mutex` which isn't easy to test
// using thread-safety analysis. Alternatively, we verify the thread safety
// through a runtime check in ScopedTSD and mark the test body with
// NO_THREAD_SAFETY_ANALYSIS.
#define SCUDO_TYPED_TEST_SKIP_THREAD_SAFETY(FIXTURE, NAME) \
template <class TypeParam> \
struct FIXTURE##NAME : public FIXTURE<TypeParam> { \
using BaseT = FIXTURE<TypeParam>; \
void Run() NO_THREAD_SAFETY_ANALYSIS; \
}; \
SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
template <class TypeParam> void FIXTURE##NAME<TypeParam>::Run()

SCUDO_TYPED_TEST(ScudoCombinedTest, IsOwned) {
auto *Allocator = this->Allocator.get();
static scudo::u8 StaticBuffer[scudo::Chunk::getHeaderSize() + 1];
Expand Down Expand Up @@ -547,7 +561,8 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, Stats) {
EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos);
}

SCUDO_TYPED_TEST(ScudoCombinedTest, CacheDrain) NO_THREAD_SAFETY_ANALYSIS {
SCUDO_TYPED_TEST_SKIP_THREAD_SAFETY(ScudoCombinedTest, CacheDrain) {
using AllocatorT = typename BaseT::AllocatorT;
auto *Allocator = this->Allocator.get();

std::vector<void *> V;
Expand All @@ -559,17 +574,15 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, CacheDrain) NO_THREAD_SAFETY_ANALYSIS {
for (auto P : V)
Allocator->deallocate(P, Origin);

bool UnlockRequired;
auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
typename AllocatorT::TSDRegistryT::ScopedTSD TSD(
*Allocator->getTSDRegistry());
EXPECT_TRUE(!TSD->getCache().isEmpty());
TSD->getCache().drain();
EXPECT_TRUE(TSD->getCache().isEmpty());
if (UnlockRequired)
TSD->unlock();
}

SCUDO_TYPED_TEST(ScudoCombinedTest, ForceCacheDrain) NO_THREAD_SAFETY_ANALYSIS {
SCUDO_TYPED_TEST_SKIP_THREAD_SAFETY(ScudoCombinedTest, ForceCacheDrain) {
using AllocatorT = typename BaseT::AllocatorT;
auto *Allocator = this->Allocator.get();

std::vector<void *> V;
Expand All @@ -584,14 +597,11 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, ForceCacheDrain) NO_THREAD_SAFETY_ANALYSIS {
// `ForceAll` will also drain the caches.
Allocator->releaseToOS(scudo::ReleaseToOS::ForceAll);

bool UnlockRequired;
auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
typename AllocatorT::TSDRegistryT::ScopedTSD TSD(
*Allocator->getTSDRegistry());
EXPECT_TRUE(TSD->getCache().isEmpty());
EXPECT_EQ(TSD->getQuarantineCache().getSize(), 0U);
EXPECT_TRUE(Allocator->getQuarantine()->isEmpty());
if (UnlockRequired)
TSD->unlock();
}

SCUDO_TYPED_TEST(ScudoCombinedTest, ThreadedCombined) {
Expand Down Expand Up @@ -892,8 +902,8 @@ TEST(ScudoCombinedTest, BasicTrustyConfig) {
}

bool UnlockRequired;
auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
typename AllocatorT::TSDRegistryT::ScopedTSD TSD(
*Allocator->getTSDRegistry());
TSD->getCache().drain();

Allocator->releaseToOS(scudo::ReleaseToOS::Force);
Expand Down
55 changes: 25 additions & 30 deletions compiler-rt/lib/scudo/standalone/tests/tsd_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include <mutex>
#include <set>
#include <thread>
#include <type_traits>

// We mock out an allocator with a TSD registry, mostly using empty stubs. The
// cache contains a single volatile uptr, to be able to test that several
Expand Down Expand Up @@ -86,7 +87,8 @@ TEST(ScudoTSDTest, TSDRegistryInit) {
EXPECT_TRUE(Allocator->isInitialized());
}

template <class AllocatorT> static void testRegistry() {
template <class AllocatorT>
static void testRegistry() NO_THREAD_SAFETY_ANALYSIS {
auto Deleter = [](AllocatorT *A) {
A->unmapTestOnly();
delete A;
Expand All @@ -99,22 +101,17 @@ template <class AllocatorT> static void testRegistry() {
Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/true);
EXPECT_TRUE(Allocator->isInitialized());

bool UnlockRequired;
auto TSD = Registry->getTSDAndLock(&UnlockRequired);
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
EXPECT_NE(TSD, nullptr);
EXPECT_EQ(TSD->getCache().Canary, 0U);
if (UnlockRequired)
TSD->unlock();
{
typename AllocatorT::TSDRegistryT::ScopedTSD TSD(*Registry);
EXPECT_EQ(TSD->getCache().Canary, 0U);
}

Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/false);
TSD = Registry->getTSDAndLock(&UnlockRequired);
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
EXPECT_NE(TSD, nullptr);
EXPECT_EQ(TSD->getCache().Canary, 0U);
memset(&TSD->getCache(), 0x42, sizeof(TSD->getCache()));
if (UnlockRequired)
TSD->unlock();
{
typename AllocatorT::TSDRegistryT::ScopedTSD TSD(*Registry);
EXPECT_EQ(TSD->getCache().Canary, 0U);
memset(&TSD->getCache(), 0x42, sizeof(TSD->getCache()));
}
}

TEST(ScudoTSDTest, TSDRegistryBasic) {
Expand All @@ -129,31 +126,33 @@ static std::mutex Mutex;
static std::condition_variable Cv;
static bool Ready;

template <typename AllocatorT> static void stressCache(AllocatorT *Allocator) {
// Accessing `TSD->getCache()` requires `TSD::Mutex` which isn't easy to test
// using thread-safety analysis. Alternatively, we verify the thread safety
// through a runtime check in ScopedTSD and mark the test body with
// NO_THREAD_SAFETY_ANALYSIS.
template <typename AllocatorT>
static void stressCache(AllocatorT *Allocator) NO_THREAD_SAFETY_ANALYSIS {
auto Registry = Allocator->getTSDRegistry();
{
std::unique_lock<std::mutex> Lock(Mutex);
while (!Ready)
Cv.wait(Lock);
}
Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false);
bool UnlockRequired;
auto TSD = Registry->getTSDAndLock(&UnlockRequired);
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
EXPECT_NE(TSD, nullptr);
typename AllocatorT::TSDRegistryT::ScopedTSD TSD(*Registry);
// For an exclusive TSD, the cache should be empty. We cannot guarantee the
// same for a shared TSD.
if (!UnlockRequired)
if (std::is_same<typename AllocatorT::TSDRegistryT,
scudo::TSDRegistryExT<AllocatorT>>()) {
EXPECT_EQ(TSD->getCache().Canary, 0U);
}
// Transform the thread id to a uptr to use it as canary.
const scudo::uptr Canary = static_cast<scudo::uptr>(
std::hash<std::thread::id>{}(std::this_thread::get_id()));
TSD->getCache().Canary = Canary;
// Loop a few times to make sure that a concurrent thread isn't modifying it.
for (scudo::uptr I = 0; I < 4096U; I++)
EXPECT_EQ(TSD->getCache().Canary, Canary);
if (UnlockRequired)
TSD->unlock();
}

template <class AllocatorT> static void testRegistryThreaded() {
Expand Down Expand Up @@ -195,14 +194,10 @@ static void stressSharedRegistry(MockAllocator<SharedCaches> *Allocator) {
Cv.wait(Lock);
}
Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false);
bool UnlockRequired;
for (scudo::uptr I = 0; I < 4096U; I++) {
auto TSD = Registry->getTSDAndLock(&UnlockRequired);
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
EXPECT_NE(TSD, nullptr);
Set.insert(reinterpret_cast<void *>(TSD));
if (UnlockRequired)
TSD->unlock();
typename MockAllocator<SharedCaches>::TSDRegistryT::ScopedTSD TSD(
*Registry);
Set.insert(reinterpret_cast<void *>(&*TSD));
}
{
std::unique_lock<std::mutex> Lock(Mutex);
Expand Down
54 changes: 37 additions & 17 deletions compiler-rt/lib/scudo/standalone/tsd_exclusive.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,31 @@ struct ThreadState {
template <class Allocator> void teardownThread(void *Ptr);

template <class Allocator> struct TSDRegistryExT {
using ThisT = TSDRegistryExT<Allocator>;

struct ScopedTSD {
ScopedTSD(ThisT &TSDRegistry) {
CurrentTSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
DCHECK_NE(CurrentTSD, nullptr);
}

~ScopedTSD() {
if (UNLIKELY(UnlockRequired))
CurrentTSD->unlock();
}

TSD<Allocator> &operator*() { return *CurrentTSD; }

TSD<Allocator> *operator->() {
CurrentTSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
return CurrentTSD;
}

private:
TSD<Allocator> *CurrentTSD;
bool UnlockRequired;
};

void init(Allocator *Instance) REQUIRES(Mutex) {
DCHECK(!Initialized);
Instance->init();
Expand Down Expand Up @@ -74,23 +99,6 @@ template <class Allocator> struct TSDRegistryExT {
initThread(Instance, MinimalInit);
}

// TODO(chiahungduan): Consider removing the argument `UnlockRequired` by
// embedding the logic into TSD or always locking the TSD. It will enable us
// to properly mark thread annotation here and adding proper runtime
// assertions in the member functions of TSD. For example, assert the lock is
// acquired before calling TSD::commitBack().
ALWAYS_INLINE TSD<Allocator> *
getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
if (LIKELY(State.InitState == ThreadState::Initialized &&
!atomic_load(&Disabled, memory_order_acquire))) {
*UnlockRequired = false;
return &ThreadTSD;
}
FallbackTSD.lock();
*UnlockRequired = true;
return &FallbackTSD;
}

// To disable the exclusive TSD registry, we effectively lock the fallback TSD
// and force all threads to attempt to use it instead of their local one.
void disable() NO_THREAD_SAFETY_ANALYSIS {
Expand Down Expand Up @@ -123,6 +131,18 @@ template <class Allocator> struct TSDRegistryExT {
}

private:
ALWAYS_INLINE TSD<Allocator> *
getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
if (LIKELY(State.InitState == ThreadState::Initialized &&
!atomic_load(&Disabled, memory_order_acquire))) {
*UnlockRequired = false;
return &ThreadTSD;
}
FallbackTSD.lock();
*UnlockRequired = true;
return &FallbackTSD;
}

// Using minimal initialization allows for global initialization while keeping
// the thread specific structure untouched. The fallback structure will be
// used instead.
Expand Down
Loading

0 comments on commit 8ce036d

Please sign in to comment.