aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorgingerBill <gingerBill@users.noreply.github.com>2021-08-23 09:32:41 +0100
committerGitHub <noreply@github.com>2021-08-23 09:32:41 +0100
commit7a00ef1879719a6304a80eff7febae93273d862c (patch)
treebf2879903e045c2ef4e6c08c8a86be96b1da1708 /src
parentdaced956e3066ac536987217e6ee5bc1e909e306 (diff)
parent35204e3cc5f860229a30fbe408206d9d3adaddce (diff)
Merge pull request #1096 from nakst/master
thread_pool.cpp: fix with 1 thread; gb.h: remove buggy /proc/cpuinfo code
Diffstat (limited to 'src')
-rw-r--r--src/gb/gb.h66
-rw-r--r--src/thread_pool.cpp38
2 files changed, 19 insertions, 85 deletions
diff --git a/src/gb/gb.h b/src/gb/gb.h
index 1ffaa81e1..40215b80a 100644
--- a/src/gb/gb.h
+++ b/src/gb/gb.h
@@ -3785,74 +3785,14 @@ isize gb_affinity_thread_count_for_core(gbAffinity *a, isize core) {
}
#elif defined(GB_SYSTEM_LINUX)
-// IMPORTANT TODO(bill): This gbAffinity stuff for linux needs be improved a lot!
-// NOTE(zangent): I have to read /proc/cpuinfo to get the number of threads per core.
#include <stdio.h>
void gb_affinity_init(gbAffinity *a) {
- b32 accurate = true;
- isize threads = 0;
-
- a->thread_count = 1;
a->core_count = sysconf(_SC_NPROCESSORS_ONLN);
a->threads_per_core = 1;
-
-
- if(a->core_count <= 0) {
- a->core_count = 1;
- accurate = false;
- }
-
- // Parsing /proc/cpuinfo to get the number of threads per core.
- // NOTE(zangent): This calls the CPU's threads "cores", although the wording
- // is kind of weird. This should be right, though.
-
- FILE* cpu_info = fopen("/proc/cpuinfo", "r");
-
- if (cpu_info != NULL) {
- for (;;) {
- // The 'temporary char'. Everything goes into this char,
- // so that we can check against EOF at the end of this loop.
- int c;
-
-#define AF__CHECK(letter) ((c = getc(cpu_info)) == letter)
- if (AF__CHECK('c') && AF__CHECK('p') && AF__CHECK('u') && AF__CHECK(' ') &&
- AF__CHECK('c') && AF__CHECK('o') && AF__CHECK('r') && AF__CHECK('e') && AF__CHECK('s')) {
- // We're on a CPU info line.
- while (!AF__CHECK(EOF)) {
- if (c == '\n') {
- break;
- } else if (c < '0' || '9' > c) {
- continue;
- }
- threads = threads * 10 + (c - '0');
- }
- break;
- } else {
- while (!AF__CHECK('\n')) {
- if (c==EOF) {
- break;
- }
- }
- }
- if (c == EOF) {
- break;
- }
-#undef AF__CHECK
- }
-
- fclose(cpu_info);
- }
-
- if (threads == 0) {
- threads = 1;
- accurate = false;
- }
-
- a->threads_per_core = threads;
- a->thread_count = a->threads_per_core * a->core_count;
- a->is_accurate = accurate;
-
+ a->is_accurate = a->core_count > 0;
+ a->core_count = a->is_accurate ? a->core_count : 1;
+ a->thread_count = a->core_count;
}
void gb_affinity_destroy(gbAffinity *a) {
diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp
index 5f21e0c3d..54d6cd72c 100644
--- a/src/thread_pool.cpp
+++ b/src/thread_pool.cpp
@@ -11,30 +11,27 @@ struct WorkerTask {
struct ThreadPool {
std::atomic<isize> outstanding_task_count;
- WorkerTask *next_task;
+ WorkerTask *volatile next_task;
BlockingMutex task_list_mutex;
};
-void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize thread_count, char const *worker_prefix = nullptr);
-void thread_pool_destroy(ThreadPool *pool);
-void thread_pool_wait(ThreadPool *pool);
-void thread_pool_add_task(ThreadPool *pool, WorkerTaskProc *proc, void *data);
-void worker_thread_internal();
-
void thread_pool_thread_entry(ThreadPool *pool) {
while (pool->outstanding_task_count) {
- mutex_lock(&pool->task_list_mutex);
-
- if (pool->next_task) {
- WorkerTask *task = pool->next_task;
- pool->next_task = task->next_task;
- mutex_unlock(&pool->task_list_mutex);
- task->do_work(task->data);
- pool->outstanding_task_count.fetch_sub(1);
- gb_free(heap_allocator(), task);
+ if (!pool->next_task) {
+ yield(); // No need to grab the mutex.
} else {
- mutex_unlock(&pool->task_list_mutex);
- yield();
+ mutex_lock(&pool->task_list_mutex);
+
+ if (pool->next_task) {
+ WorkerTask *task = pool->next_task;
+ pool->next_task = task->next_task;
+ mutex_unlock(&pool->task_list_mutex);
+ task->do_work(task->data);
+ pool->outstanding_task_count.fetch_sub(1);
+ gb_free(heap_allocator(), task);
+ } else {
+ mutex_unlock(&pool->task_list_mutex);
+ }
}
}
}
@@ -77,10 +74,7 @@ void thread_pool_destroy(ThreadPool *pool) {
void thread_pool_wait(ThreadPool *pool) {
pool->outstanding_task_count.fetch_sub(1);
-
- while (pool->outstanding_task_count.load() != 0) {
- yield();
- }
+ thread_pool_thread_entry(pool);
}
void thread_pool_add_task(ThreadPool *pool, WorkerTaskProc *proc, void *data) {