diff options
| author | gingerBill <bill@gingerbill.org> | 2021-08-23 11:31:09 +0100 |
|---|---|---|
| committer | gingerBill <bill@gingerbill.org> | 2021-08-23 11:31:09 +0100 |
| commit | 276d4b8f0d5c5bcff4b83bab12746dd80e10fc90 (patch) | |
| tree | be3eb312025e4e5ee233b65c3ecc9d41a768023f /src | |
| parent | 7bdbaca9385ae4533642a8d58dcf3b4461687821 (diff) | |
| parent | 7f34080b69b1bf0cf00606ec19e4e8bbc1701e69 (diff) | |
Merge branch 'master' of https://github.com/odin-lang/Odin
Diffstat (limited to 'src')
| -rw-r--r-- | src/gb/gb.h | 66 | ||||
| -rw-r--r-- | src/thread_pool.cpp | 48 |
2 files changed, 25 insertions, 89 deletions
diff --git a/src/gb/gb.h b/src/gb/gb.h index 1ffaa81e1..40215b80a 100644 --- a/src/gb/gb.h +++ b/src/gb/gb.h @@ -3785,74 +3785,14 @@ isize gb_affinity_thread_count_for_core(gbAffinity *a, isize core) { } #elif defined(GB_SYSTEM_LINUX) -// IMPORTANT TODO(bill): This gbAffinity stuff for linux needs be improved a lot! -// NOTE(zangent): I have to read /proc/cpuinfo to get the number of threads per core. #include <stdio.h> void gb_affinity_init(gbAffinity *a) { - b32 accurate = true; - isize threads = 0; - - a->thread_count = 1; a->core_count = sysconf(_SC_NPROCESSORS_ONLN); a->threads_per_core = 1; - - - if(a->core_count <= 0) { - a->core_count = 1; - accurate = false; - } - - // Parsing /proc/cpuinfo to get the number of threads per core. - // NOTE(zangent): This calls the CPU's threads "cores", although the wording - // is kind of weird. This should be right, though. - - FILE* cpu_info = fopen("/proc/cpuinfo", "r"); - - if (cpu_info != NULL) { - for (;;) { - // The 'temporary char'. Everything goes into this char, - // so that we can check against EOF at the end of this loop. - int c; - -#define AF__CHECK(letter) ((c = getc(cpu_info)) == letter) - if (AF__CHECK('c') && AF__CHECK('p') && AF__CHECK('u') && AF__CHECK(' ') && - AF__CHECK('c') && AF__CHECK('o') && AF__CHECK('r') && AF__CHECK('e') && AF__CHECK('s')) { - // We're on a CPU info line. - while (!AF__CHECK(EOF)) { - if (c == '\n') { - break; - } else if (c < '0' || '9' > c) { - continue; - } - threads = threads * 10 + (c - '0'); - } - break; - } else { - while (!AF__CHECK('\n')) { - if (c==EOF) { - break; - } - } - } - if (c == EOF) { - break; - } -#undef AF__CHECK - } - - fclose(cpu_info); - } - - if (threads == 0) { - threads = 1; - accurate = false; - } - - a->threads_per_core = threads; - a->thread_count = a->threads_per_core * a->core_count; - a->is_accurate = accurate; - + a->is_accurate = a->core_count > 0; + a->core_count = a->is_accurate ? a->core_count : 1; + a->thread_count = a->core_count; } void gb_affinity_destroy(gbAffinity *a) { diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp index 5f21e0c3d..e904a2e29 100644 --- a/src/thread_pool.cpp +++ b/src/thread_pool.cpp @@ -11,30 +11,28 @@ struct WorkerTask { struct ThreadPool { std::atomic<isize> outstanding_task_count; - WorkerTask *next_task; + WorkerTask *volatile next_task; BlockingMutex task_list_mutex; + isize thread_count; }; -void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize thread_count, char const *worker_prefix = nullptr); -void thread_pool_destroy(ThreadPool *pool); -void thread_pool_wait(ThreadPool *pool); -void thread_pool_add_task(ThreadPool *pool, WorkerTaskProc *proc, void *data); -void worker_thread_internal(); - void thread_pool_thread_entry(ThreadPool *pool) { while (pool->outstanding_task_count) { - mutex_lock(&pool->task_list_mutex); - - if (pool->next_task) { - WorkerTask *task = pool->next_task; - pool->next_task = task->next_task; - mutex_unlock(&pool->task_list_mutex); - task->do_work(task->data); - pool->outstanding_task_count.fetch_sub(1); - gb_free(heap_allocator(), task); + if (!pool->next_task) { + yield(); // No need to grab the mutex. } else { - mutex_unlock(&pool->task_list_mutex); - yield(); + mutex_lock(&pool->task_list_mutex); + + if (pool->next_task) { + WorkerTask *task = pool->next_task; + pool->next_task = task->next_task; + mutex_unlock(&pool->task_list_mutex); + task->do_work(task->data); + pool->outstanding_task_count.fetch_sub(1); + gb_free(heap_allocator(), task); + } else { + mutex_unlock(&pool->task_list_mutex); + } } } } @@ -65,10 +63,7 @@ void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize thread_count memset(pool, 0, sizeof(ThreadPool)); mutex_init(&pool->task_list_mutex); pool->outstanding_task_count.store(1); - - for (int i = 0; i < thread_count; i++) { - thread_pool_start_thread(pool); - } + pool->thread_count = thread_count; } void thread_pool_destroy(ThreadPool *pool) { @@ -76,11 +71,12 @@ void thread_pool_destroy(ThreadPool *pool) { } void thread_pool_wait(ThreadPool *pool) { - pool->outstanding_task_count.fetch_sub(1); - - while (pool->outstanding_task_count.load() != 0) { - yield(); + for (int i = 0; i < pool->thread_count; i++) { + thread_pool_start_thread(pool); } + + pool->outstanding_task_count.fetch_sub(1); + thread_pool_thread_entry(pool); } void thread_pool_add_task(ThreadPool *pool, WorkerTaskProc *proc, void *data) { |