From 99121d6ff2b02f3d16b791eb103bb9f9e8b96475 Mon Sep 17 00:00:00 2001 From: Tetralux Date: Sat, 26 Oct 2019 22:35:36 +0000 Subject: Implement core:thread and core:sync on Unix using pthreads Also do some cleanup and refactoring of the thread, sync and time APIs. - remove 'semaphore_release' because 'post' and 'wait' is easier to understand - change 'semaphore_wait' to '*_wait_for' to match Condition - pthreads can be given a stack, but doing so requires the user to set up the guard pages manually. BE WARNED. The alignment requirements of the stack are also platform-dependant; it may need to be page size aligned on some systems. Unclear which systems, however. See 'os.get_page_size', and 'mem.make_aligned'. HOWEVER: I was unable to get custom stacks with guard pages working reliably, so while you can do it, the API does not support it. - add 'os.get_page_size', 'mem.make_aligned', and 'mem.new_aligned'. - removed thread return values because windows and linux are not consistent; windows returns 'i32' and pthreads return 'void*'; besides which, if you really wanted to communicate how the thread exited, you probably wouldn't do it with the thread's exit code. - fixed 'thread.is_done' on Windows; it didn't report true immediately after calling 'thread.join'. - moved time related stuff out of 'core:os' to 'core:time'. - add 'mem.align_backward' - fixed default allocator alignment The heap on Windows, and calloc on Linux, both have no facility to request alignment. It's a bit of hack, but the heap_allocator now overallocates; `size + alignment` bytes, and aligns things to at least 2. It does both of these things to ensure that there is at least two bytes before the payload, which it uses to store how much padding it needed to insert in order to fulfil the alignment requested. - make conditions more sane by matching the Windows behaviour. The fact that they were signalled now lingers until a thread tries to wait, causing them to just pass by uninterrupted, without sleeping or locking the underlying mutex, as it would otherwise need to do. This means that a thread no longer has to be waiting in order to be signalled, which avoids timing bugs that causes deadlocks that are hard to debug and fix. See the comment on the `sync.Condition.flag` field. - add thread priority: `thread.create(worker_proc, .High)` --- core/sync/sync_linux.odin | 100 +++++++--------------------------------------- 1 file changed, 15 insertions(+), 85 deletions(-) (limited to 'core/sync/sync_linux.odin') diff --git a/core/sync/sync_linux.odin b/core/sync/sync_linux.odin index dcb2ee8e9..dc761f6aa 100644 --- a/core/sync/sync_linux.odin +++ b/core/sync/sync_linux.odin @@ -1,98 +1,28 @@ package sync -/* +import "core:sys/unix" -import "core:atomics" -import "core:os" - -Semaphore :: struct { - // _handle: win32.Handle, -} - -Mutex :: struct { - _semaphore: Semaphore, - _counter: i32, - _owner: i32, - _recursion: i32, -} - -current_thread_id :: proc() -> i32 { - return i32(os.current_thread_id()); +// The Darwin docs say it best: +// A semaphore is much like a lock, except that a finite number of threads can hold it simultaneously. +// Semaphores can be thought of as being much like piles of tokens; multiple threads can take these tokens, +// but when there are none left, a thread must wait until another thread returns one. +Semaphore :: struct #align 16 { + handle: unix.sem_t, } -semaphore_init :: proc(s: ^Semaphore) { - // s._handle = win32.CreateSemaphoreA(nil, 0, 1<<31-1, nil); +semaphore_init :: proc(s: ^Semaphore, initial_count := 0) { + assert(unix.sem_init(&s.handle, 0, u32(initial_count)) == 0); } semaphore_destroy :: proc(s: ^Semaphore) { - // win32.CloseHandle(s._handle); + assert(unix.sem_destroy(&s.handle) == 0); + s.handle = {}; } -semaphore_post :: proc(s: ^Semaphore, count: int) { - // win32.ReleaseSemaphore(s._handle, cast(i32)count, nil); +semaphore_post :: proc(s: ^Semaphore, count := 1) { + assert(unix.sem_post(&s.handle) == 0); } -semaphore_release :: inline proc(s: ^Semaphore) { - semaphore_post(s, 1); +semaphore_wait_for :: proc(s: ^Semaphore) { + assert(unix.sem_wait(&s.handle) == 0); } - -semaphore_wait :: proc(s: ^Semaphore) { - // win32.WaitForSingleObject(s._handle, win32.INFINITE); -} - - -mutex_init :: proc(m: ^Mutex) { - atomics.store(&m._counter, 0); - atomics.store(&m._owner, current_thread_id()); - semaphore_init(&m._semaphore); - m._recursion = 0; -} -mutex_destroy :: proc(m: ^Mutex) { - semaphore_destroy(&m._semaphore); -} -mutex_lock :: proc(m: ^Mutex) { - thread_id := current_thread_id(); - if atomics.fetch_add(&m._counter, 1) > 0 { - if thread_id != atomics.load(&m._owner) { - semaphore_wait(&m._semaphore); - } - } - atomics.store(&m._owner, thread_id); - m._recursion += 1; -} -mutex_try_lock :: proc(m: ^Mutex) -> bool { - thread_id := current_thread_id(); - if atomics.load(&m._owner) == thread_id { - atomics.fetch_add(&m._counter, 1); - } else { - expected: i32 = 0; - if atomics.load(&m._counter) != 0 { - return false; - } - if atomics.compare_exchange(&m._counter, expected, 1) == 0 { - return false; - } - atomics.store(&m._owner, thread_id); - } - m._recursion += 1; - return true; -} -mutex_unlock :: proc(m: ^Mutex) { - recursion: i32; - thread_id := current_thread_id(); - assert(thread_id == atomics.load(&m._owner)); - - m._recursion -= 1; - recursion = m._recursion; - if recursion == 0 { - atomics.store(&m._owner, thread_id); - } - - if atomics.fetch_add(&m._counter, -1) > 1 { - if recursion == 0 { - semaphore_release(&m._semaphore); - } - } -} - -*/ -- cgit v1.2.3