Skip to content

Commit

Permalink
WorkerThreadPool: Avoid most runtime allocations
Browse files Browse the repository at this point in the history
Just a little optimization.

**NOTE:**
With `RID_Owner` we could replace each pair of `PagedAllocator` and
`HashMap`-of-ids-to-pointers. However, that would force us to expose
`RID` as the task/group id, instead of `int`, which would break the
API. Too bad. Let's wait until Godot 5.0.
  • Loading branch information
RandomShaper committed Jan 8, 2024
1 parent ae418f9 commit a731774
Show file tree
Hide file tree
Showing 3 changed files with 34 additions and 15 deletions.
20 changes: 14 additions & 6 deletions core/object/worker_thread_pool.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -613,13 +613,14 @@ void WorkerThreadPool::finish() {
return;
}

task_mutex.lock();
SelfList<Task> *E = low_priority_task_queue.first();
while (E) {
print_error("Task waiting was never re-claimed: " + E->self()->description);
E = E->next();
{
MutexLock lock(task_mutex);
SelfList<Task> *E = low_priority_task_queue.first();
while (E) {
print_error("Task waiting was never re-claimed: " + E->self()->description);
E = E->next();
}
}
task_mutex.unlock();

{
MutexLock lock(task_mutex);
Expand All @@ -632,6 +633,13 @@ void WorkerThreadPool::finish() {
data.thread.wait_to_finish();
}

{
MutexLock lock(task_mutex);
for (KeyValue<TaskID, Task *> &E : tasks) {
task_allocator.free(E.value);
}
}

threads.clear();
}

Expand Down
23 changes: 19 additions & 4 deletions core/object/worker_thread_pool.h
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,11 @@ class WorkerThreadPool : public Object {
task_elem(this) {}
};

PagedAllocator<Task> task_allocator;
PagedAllocator<Group> group_allocator;
static const uint32_t TASKS_PAGE_SIZE = 1024;
static const uint32_t GROUPS_PAGE_SIZE = 256;

PagedAllocator<Task, false, TASKS_PAGE_SIZE> task_allocator;
PagedAllocator<Group, false, GROUPS_PAGE_SIZE> group_allocator;

SelfList<Task>::List low_priority_task_queue;
SelfList<Task>::List task_queue;
Expand All @@ -117,8 +120,20 @@ class WorkerThreadPool : public Object {
bool exit_threads = false;

HashMap<Thread::ID, int> thread_ids;
HashMap<TaskID, Task *> tasks;
HashMap<GroupID, Group *> groups;
HashMap<
TaskID,
Task *,
HashMapHasherDefault,
HashMapComparatorDefault<TaskID>,
PagedAllocator<HashMapElement<TaskID, Task *>, false, TASKS_PAGE_SIZE>>
tasks;
HashMap<
GroupID,
Group *,
HashMapHasherDefault,
HashMapComparatorDefault<GroupID>,
PagedAllocator<HashMapElement<GroupID, Group *>, false, GROUPS_PAGE_SIZE>>
groups;

uint32_t max_low_priority_threads = 0;
uint32_t low_priority_threads_used = 0;
Expand Down
6 changes: 1 addition & 5 deletions core/templates/paged_allocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
#include <type_traits>
#include <typeinfo>

template <class T, bool thread_safe = false>
template <class T, bool thread_safe = false, uint32_t DEFAULT_PAGE_SIZE = 4096>
class PagedAllocator {
T **page_pool = nullptr;
T ***available_pool = nullptr;
Expand All @@ -53,10 +53,6 @@ class PagedAllocator {
SpinLock spin_lock;

public:
enum {
DEFAULT_PAGE_SIZE = 4096
};

template <class... Args>
T *alloc(Args &&...p_args) {
if (thread_safe) {
Expand Down

0 comments on commit a731774

Please sign in to comment.