From 5c519f0e8dada6b15166a257d22a07f2316a394f Mon Sep 17 00:00:00 2001 From: gingerBill Date: Sun, 1 Jan 2023 16:19:21 +0000 Subject: Remove the synchronization primitive init/destroy calls --- src/types.cpp | 4 ---- 1 file changed, 4 deletions(-) (limited to 'src/types.cpp') diff --git a/src/types.cpp b/src/types.cpp index 5bddfc79e..afe0b7d5d 100644 --- a/src/types.cpp +++ b/src/types.cpp @@ -808,10 +808,6 @@ gb_internal void type_path_pop(TypePath *tp) { #define FAILURE_SIZE 0 #define FAILURE_ALIGNMENT 0 -gb_internal void init_type_mutex(void) { - mutex_init(&g_type_mutex); -} - gb_internal bool type_ptr_set_update(PtrSet *s, Type *t) { if (ptr_set_exists(s, t)) { return true; -- cgit v1.2.3 From 5b335bb88c9045961a2a20d021ec5f4b5acf96ce Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 20:48:24 +0000 Subject: Narrow `g_type_mutex` usage --- src/types.cpp | 76 ++++++++++++++++++++++++++++++----------------------------- 1 file changed, 39 insertions(+), 37 deletions(-) (limited to 'src/types.cpp') diff --git a/src/types.cpp b/src/types.cpp index afe0b7d5d..c49f43f7c 100644 --- a/src/types.cpp +++ b/src/types.cpp @@ -3383,8 +3383,6 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { if (t->failure) { return FAILURE_ALIGNMENT; } - mutex_lock(&g_type_mutex); - defer (mutex_unlock(&g_type_mutex)); t = base_type(t); @@ -3408,6 +3406,8 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { } break; case Type_Array: { + MUTEX_GUARD(&g_type_mutex); + Type *elem = t->Array.elem; bool pop = type_path_push(path, elem); if (path->failure) { @@ -3419,6 +3419,8 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { } case Type_EnumeratedArray: { + MUTEX_GUARD(&g_type_mutex); + Type *elem = t->EnumeratedArray.elem; bool pop = type_path_push(path, elem); if (path->failure) { @@ -3438,6 +3440,8 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { case Type_Tuple: { + MUTEX_GUARD(&g_type_mutex); + i64 max = 1; for_array(i, t->Tuple.variables) { i64 align = type_align_of_internal(t->Tuple.variables[i]->type, path); @@ -3461,6 +3465,8 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { return gb_max(t->Union.custom_align, 1); } + MUTEX_GUARD(&g_type_mutex); + i64 max = 1; for_array(i, t->Union.variants) { Type *variant = t->Union.variants[i]; @@ -3481,39 +3487,27 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { if (t->Struct.custom_align > 0) { return gb_max(t->Struct.custom_align, 1); } - if (t->Struct.is_raw_union) { - i64 max = 1; - for_array(i, t->Struct.fields) { - Type *field_type = t->Struct.fields[i]->type; - bool pop = type_path_push(path, field_type); - if (path->failure) { - return FAILURE_ALIGNMENT; - } - i64 align = type_align_of_internal(field_type, path); - if (pop) type_path_pop(path); - if (max < align) { - max = align; - } - } - return max; - } else if (t->Struct.fields.count > 0) { - i64 max = 1; - // NOTE(bill): Check the fields to check for cyclic definitions - for_array(i, t->Struct.fields) { - Type *field_type = t->Struct.fields[i]->type; - bool pop = type_path_push(path, field_type); - if (path->failure) return FAILURE_ALIGNMENT; - i64 align = type_align_of_internal(field_type, path); - if (pop) type_path_pop(path); - if (max < align) { - max = align; - } + + if (t->Struct.is_packed) { + return 1; + } + + MUTEX_GUARD(&g_type_mutex); + + i64 max = 1; + for_array(i, t->Struct.fields) { + Type *field_type = t->Struct.fields[i]->type; + bool pop = type_path_push(path, field_type); + if (path->failure) { + return FAILURE_ALIGNMENT; } - if (t->Struct.is_packed) { - return 1; + i64 align = type_align_of_internal(field_type, path); + if (pop) type_path_pop(path); + if (max < align) { + max = align; } - return max; } + return max; } break; case Type_BitSet: { @@ -3579,8 +3573,7 @@ gb_internal i64 *type_set_offsets_of(Slice const &fields, bool is_pack } gb_internal bool type_set_offsets(Type *t) { - mutex_lock(&g_type_mutex); - defer (mutex_unlock(&g_type_mutex)); + MUTEX_GUARD(&g_type_mutex); // TODO(bill): only per struct t = base_type(t); if (t->kind == Type_Struct) { @@ -3609,12 +3602,11 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { if (t->failure) { return FAILURE_SIZE; } - mutex_lock(&g_type_mutex); - defer (mutex_unlock(&g_type_mutex)); - switch (t->kind) { case Type_Named: { + MUTEX_GUARD(&g_type_mutex); + bool pop = type_path_push(path, t); if (path->failure) { return FAILURE_ALIGNMENT; @@ -3652,6 +3644,8 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { return build_context.word_size*2; case Type_Array: { + MUTEX_GUARD(&g_type_mutex); + i64 count, align, size, alignment; count = t->Array.count; if (count == 0) { @@ -3667,6 +3661,8 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { } break; case Type_EnumeratedArray: { + MUTEX_GUARD(&g_type_mutex); + i64 count, align, size, alignment; count = t->EnumeratedArray.count; if (count == 0) { @@ -3699,6 +3695,8 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { return (1 + 1 + 2)*build_context.word_size; case Type_Tuple: { + MUTEX_GUARD(&g_type_mutex); + i64 count, align, size; count = t->Tuple.variables.count; if (count == 0) { @@ -3717,6 +3715,8 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { if (t->Union.variants.count == 0) { return 0; } + MUTEX_GUARD(&g_type_mutex); + i64 align = type_align_of_internal(t, path); if (path->failure) { return FAILURE_SIZE; @@ -3754,6 +3754,8 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { case Type_Struct: { + MUTEX_GUARD(&g_type_mutex); + if (t->Struct.is_raw_union) { i64 count = t->Struct.fields.count; i64 align = type_align_of_internal(t, path); -- cgit v1.2.3 From 7ffffeecccc6a1fa1b26238f8ed4608d93ec9bb0 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 21:35:40 +0000 Subject: Comment out many mutex guards in `type_(size|align)_of_internal` --- src/thread_pool.cpp | 21 ++++++++++----------- src/types.cpp | 22 +++++++++++----------- 2 files changed, 21 insertions(+), 22 deletions(-) (limited to 'src/types.cpp') diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp index a429e47ff..b89e00454 100644 --- a/src/thread_pool.cpp +++ b/src/thread_pool.cpp @@ -30,7 +30,7 @@ gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize slice_init(&pool->threads, a, thread_count + 1); // NOTE: this needs to be initialized before any thread starts - pool->running.store(true); + pool->running.store(true, std::memory_order_seq_cst); // setup the main thread thread_init(pool, &pool->threads[0], 0); @@ -43,7 +43,7 @@ gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize } gb_internal void thread_pool_destroy(ThreadPool *pool) { - pool->running.store(false); + pool->running.store(false, std::memory_order_seq_cst); for_array_off(i, 1, pool->threads) { Thread *t = &pool->threads[i]; @@ -114,7 +114,7 @@ gb_internal bool thread_pool_add_task(ThreadPool *pool, WorkerTaskProc *proc, vo gb_internal void thread_pool_wait(ThreadPool *pool) { WorkerTask task; - while (pool->tasks_left.load()) { + while (pool->tasks_left.load(std::memory_order_acquire)) { // if we've got tasks on our queue, run them while (thread_pool_queue_pop(current_thread, &task)) { task.do_work(task.data); @@ -126,7 +126,7 @@ gb_internal void thread_pool_wait(ThreadPool *pool) { // This *must* be executed in this order, so the futex wakes immediately // if rem_tasks has changed since we checked last, otherwise the program // will permanently sleep - Footex rem_tasks = pool->tasks_left.load(); + Footex rem_tasks = pool->tasks_left.load(std::memory_order_acquire); if (rem_tasks == 0) { return; } @@ -141,7 +141,7 @@ gb_internal THREAD_PROC(thread_pool_thread_proc) { ThreadPool *pool = current_thread->pool; // debugf("worker id: %td\n", current_thread->idx); - while (pool->running.load()) { + while (pool->running.load(std::memory_order_seq_cst)) { // If we've got tasks to process, work through them usize finished_tasks = 0; i32 state; @@ -152,30 +152,29 @@ gb_internal THREAD_PROC(thread_pool_thread_proc) { finished_tasks += 1; } - if (finished_tasks > 0 && pool->tasks_left.load() == 0) { + if (finished_tasks > 0 && pool->tasks_left.load(std::memory_order_acquire) == 0) { futex_signal(&pool->tasks_left); } // If there's still work somewhere and we don't have it, steal it - if (pool->tasks_left.load()) { + if (pool->tasks_left.load(std::memory_order_acquire)) { usize idx = cast(usize)current_thread->idx; for_array(i, pool->threads) { - if (pool->tasks_left.load() == 0) { + if (pool->tasks_left.load(std::memory_order_acquire) == 0) { break; } idx = (idx + 1) % cast(usize)pool->threads.count; Thread *thread = &pool->threads.data[idx]; - WorkerTask task; + WorkerTask task, another_task; if (!thread_pool_queue_pop(thread, &task)) { continue; } - task.do_work(task.data); pool->tasks_left.fetch_sub(1, std::memory_order_release); - if (pool->tasks_left.load() == 0) { + if (pool->tasks_left.load(std::memory_order_acquire) == 0) { futex_signal(&pool->tasks_left); } diff --git a/src/types.cpp b/src/types.cpp index c49f43f7c..1e2d85ac6 100644 --- a/src/types.cpp +++ b/src/types.cpp @@ -3406,7 +3406,7 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { } break; case Type_Array: { - MUTEX_GUARD(&g_type_mutex); + // MUTEX_GUARD(&g_type_mutex); Type *elem = t->Array.elem; bool pop = type_path_push(path, elem); @@ -3419,7 +3419,7 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { } case Type_EnumeratedArray: { - MUTEX_GUARD(&g_type_mutex); + // MUTEX_GUARD(&g_type_mutex); Type *elem = t->EnumeratedArray.elem; bool pop = type_path_push(path, elem); @@ -3440,7 +3440,7 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { case Type_Tuple: { - MUTEX_GUARD(&g_type_mutex); + // MUTEX_GUARD(&g_type_mutex); i64 max = 1; for_array(i, t->Tuple.variables) { @@ -3465,7 +3465,7 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { return gb_max(t->Union.custom_align, 1); } - MUTEX_GUARD(&g_type_mutex); + // MUTEX_GUARD(&g_type_mutex); i64 max = 1; for_array(i, t->Union.variants) { @@ -3492,7 +3492,7 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { return 1; } - MUTEX_GUARD(&g_type_mutex); + // MUTEX_GUARD(&g_type_mutex); i64 max = 1; for_array(i, t->Struct.fields) { @@ -3605,7 +3605,7 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { switch (t->kind) { case Type_Named: { - MUTEX_GUARD(&g_type_mutex); + // MUTEX_GUARD(&g_type_mutex); bool pop = type_path_push(path, t); if (path->failure) { @@ -3644,7 +3644,7 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { return build_context.word_size*2; case Type_Array: { - MUTEX_GUARD(&g_type_mutex); + // MUTEX_GUARD(&g_type_mutex); i64 count, align, size, alignment; count = t->Array.count; @@ -3661,7 +3661,7 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { } break; case Type_EnumeratedArray: { - MUTEX_GUARD(&g_type_mutex); + // MUTEX_GUARD(&g_type_mutex); i64 count, align, size, alignment; count = t->EnumeratedArray.count; @@ -3695,7 +3695,7 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { return (1 + 1 + 2)*build_context.word_size; case Type_Tuple: { - MUTEX_GUARD(&g_type_mutex); + // MUTEX_GUARD(&g_type_mutex); i64 count, align, size; count = t->Tuple.variables.count; @@ -3715,7 +3715,7 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { if (t->Union.variants.count == 0) { return 0; } - MUTEX_GUARD(&g_type_mutex); + // MUTEX_GUARD(&g_type_mutex); i64 align = type_align_of_internal(t, path); if (path->failure) { @@ -3754,7 +3754,7 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { case Type_Struct: { - MUTEX_GUARD(&g_type_mutex); + // MUTEX_GUARD(&g_type_mutex); if (t->Struct.is_raw_union) { i64 count = t->Struct.fields.count; -- cgit v1.2.3 From 747a11a954824da7960a247299986f56d1316773 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 12:18:35 +0000 Subject: Allow all set entry types to be implicitly cast to their key/value type to allow for easier iteration --- src/build_settings.cpp | 6 ++---- src/check_decl.cpp | 6 ++---- src/check_expr.cpp | 7 +++---- src/checker.cpp | 38 +++++++++++++------------------------- src/ptr_set.cpp | 12 ++++++++---- src/string_map.cpp | 13 ++++++++++--- src/string_set.cpp | 15 +++++++++++---- src/types.cpp | 3 +-- 8 files changed, 50 insertions(+), 50 deletions(-) (limited to 'src/types.cpp') diff --git a/src/build_settings.cpp b/src/build_settings.cpp index 75615a901..1dff5f43e 100644 --- a/src/build_settings.cpp +++ b/src/build_settings.cpp @@ -1332,11 +1332,10 @@ gb_internal void enable_target_feature(TokenPos pos, String const &target_featur gb_internal char const *target_features_set_to_cstring(gbAllocator allocator, bool with_quotes) { isize len = 0; isize i = 0; - for (auto const &entry : build_context.target_features_set) { + for (String const &feature : build_context.target_features_set) { if (i != 0) { len += 1; } - String feature = entry.value; len += feature.len; if (with_quotes) len += 2; i += 1; @@ -1344,13 +1343,12 @@ gb_internal char const *target_features_set_to_cstring(gbAllocator allocator, bo char *features = gb_alloc_array(allocator, char, len+1); len = 0; i = 0; - for (auto const &entry : build_context.target_features_set) { + for (String const &feature : build_context.target_features_set) { if (i != 0) { features[len++] = ','; } if (with_quotes) features[len++] = '"'; - String feature = entry.value; gb_memmove(features + len, feature.text, feature.len); len += feature.len; if (with_quotes) features[len++] = '"'; diff --git a/src/check_decl.cpp b/src/check_decl.cpp index 0c1a7c325..7b229db08 100644 --- a/src/check_decl.cpp +++ b/src/check_decl.cpp @@ -1587,16 +1587,14 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de MUTEX_GUARD_BLOCK(decl->deps_mutex) MUTEX_GUARD_BLOCK(decl->parent->deps_mutex) { - for (auto const &entry : decl->deps) { - Entity *e = entry.ptr; + for (Entity *e : decl->deps) { ptr_set_add(&decl->parent->deps, e); } } MUTEX_GUARD_BLOCK(decl->type_info_deps_mutex) MUTEX_GUARD_BLOCK(decl->parent->type_info_deps_mutex) { - for (auto const &entry : decl->type_info_deps) { - Type *t = entry.ptr; + for (Type *t : decl->type_info_deps) { ptr_set_add(&decl->parent->type_info_deps, t); } } diff --git a/src/check_expr.cpp b/src/check_expr.cpp index 030bfb8e6..2924f9d13 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -202,8 +202,8 @@ gb_internal void check_did_you_mean_objc_entity(String const &name, Entity *e, b DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), set.entries.count, name); defer (did_you_mean_destroy(&d)); - for (auto const &entry : set) { - did_you_mean_append(&d, entry.value); + for (String const &target : set) { + did_you_mean_append(&d, target); } check_did_you_mean_print(&d, prefix); } @@ -4942,8 +4942,7 @@ gb_internal isize add_dependencies_from_unpacking(CheckerContext *c, Entity **lh if (e != nullptr) { DeclInfo *decl = decl_info_of_entity(e); if (decl != nullptr) { - for (auto const &entry : decl->deps) { - Entity *dep = entry.ptr; + for (Entity *dep : decl->deps) { ptr_set_add(&c->decl->deps, dep); } } diff --git a/src/checker.cpp b/src/checker.cpp index 8779d9d45..0075fa543 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -2222,12 +2222,11 @@ gb_internal void add_dependency_to_set(Checker *c, Entity *entity) { return; } - for (auto const &entry : decl->type_info_deps) { - add_min_dep_type_info(c, entry.ptr); + for (Type *t : decl->type_info_deps) { + add_min_dep_type_info(c, t); } - for (auto const &entry : decl->deps) { - Entity *e = entry.ptr; + for (Entity *e : decl->deps) { add_dependency_to_set(c, e); if (e->kind == Entity_Procedure && e->Procedure.is_foreign) { Entity *fl = e->Procedure.foreign_library; @@ -2510,8 +2509,7 @@ gb_internal Array generate_entity_dependency_graph(CheckerInf DeclInfo *decl = decl_info_of_entity(e); GB_ASSERT(decl != nullptr); - for (auto const &entry : decl->deps) { - Entity *dep = entry.ptr; + for (Entity *dep : decl->deps) { if (dep->flags & EntityFlag_Field) { continue; } @@ -2537,15 +2535,12 @@ gb_internal Array generate_entity_dependency_graph(CheckerInf if (e->kind == Entity_Procedure) { // Connect each pred 'p' of 'n' with each succ 's' and from // the procedure node - for (auto const &p_entry : n->pred) { - EntityGraphNode *p = p_entry.ptr; - + for (EntityGraphNode *p : n->pred) { // Ignore self-cycles if (p != n) { // Each succ 's' of 'n' becomes a succ of 'p', and // each pred 'p' of 'n' becomes a pred of 's' - for (auto const &s_entry : n->succ) { - EntityGraphNode *s = s_entry.ptr; + for (EntityGraphNode *s : n->succ) { // Ignore self-cycles if (s != n) { if (p->entity->kind == Entity_Procedure && @@ -4784,8 +4779,7 @@ gb_internal void check_import_entities(Checker *c) { } } - for (auto const &entry : n->pred) { - ImportGraphNode *p = entry.ptr; + for (ImportGraphNode *p : n->pred) { p->dep_count = gb_max(p->dep_count-1, 0); priority_queue_fix(&pq, p->index); } @@ -4893,8 +4887,7 @@ gb_internal bool find_entity_path_tuple(Type *tuple, Entity *end, PtrSetdeps) { - Entity *dep = entry.ptr; + for (Entity *dep : var_decl->deps) { if (dep == end) { auto path = array_make(heap_allocator()); array_add(&path, dep); @@ -4944,8 +4937,7 @@ gb_internal Array find_entity_path(Entity *start, Entity *end, PtrSet< return path; } } else { - for (auto const &entry : decl->deps) { - Entity *dep = entry.ptr; + for (Entity *dep : decl->deps) { if (dep == end) { auto path = array_make(heap_allocator()); array_add(&path, dep); @@ -5002,8 +4994,7 @@ gb_internal void calculate_global_init_order(Checker *c) { } } - for (auto const &entry : n->pred) { - EntityGraphNode *p = entry.ptr; + for (EntityGraphNode *p : n->pred) { p->dep_count -= 1; p->dep_count = gb_max(p->dep_count, 0); priority_queue_fix(&pq, p->index); @@ -5163,8 +5154,7 @@ gb_internal void check_unchecked_bodies(Checker *c) { // use the `procs_to_check` array global_procedure_body_in_worker_queue = false; - for (auto const &entry : c->info.minimum_dependency_set) { - Entity *e = entry.ptr; + for (Entity *e : c->info.minimum_dependency_set) { if (e == nullptr || e->kind != Entity_Procedure) { continue; } @@ -5239,8 +5229,7 @@ gb_internal void check_test_procedures(Checker *c) { AstPackage *pkg = c->info.init_package; Scope *s = pkg->scope; - for (auto const &entry : build_context.test_names) { - String name = entry.value; + for (String const &name : build_context.test_names) { Entity *e = scope_lookup(s, name); if (e == nullptr) { Token tok = {}; @@ -5744,8 +5733,7 @@ gb_internal void check_parsed_files(Checker *c) { DeclInfo *decl = e->decl_info; ast_node(pl, ProcLit, decl->proc_lit); if (pl->inlining == ProcInlining_inline) { - for (auto const &entry : decl->deps) { - Entity *dep = entry.ptr; + for (Entity *dep : decl->deps) { if (dep == e) { error(e->token, "Cannot inline recursive procedure '%.*s'", LIT(e->token.string)); break; diff --git a/src/ptr_set.cpp b/src/ptr_set.cpp index affde5c2f..303bde07e 100644 --- a/src/ptr_set.cpp +++ b/src/ptr_set.cpp @@ -2,6 +2,10 @@ template struct PtrSetEntry { T ptr; MapIndex next; + + operator T() const noexcept { + return this->ptr; + } }; template @@ -245,21 +249,21 @@ gb_internal gb_inline void ptr_set_clear(PtrSet *s) { template -gb_internal PtrSetEntry *begin(PtrSet &m) { +gb_internal PtrSetEntry *begin(PtrSet &m) noexcept { return m.entries.data; } template -gb_internal PtrSetEntry const *begin(PtrSet const &m) { +gb_internal PtrSetEntry const *begin(PtrSet const &m) noexcept { return m.entries.data; } template -gb_internal PtrSetEntry *end(PtrSet &m) { +gb_internal PtrSetEntry *end(PtrSet &m) noexcept { return m.entries.data + m.entries.count; } template -gb_internal PtrSetEntry const *end(PtrSet const &m) { +gb_internal PtrSetEntry const *end(PtrSet const &m) noexcept { return m.entries.data + m.entries.count; } \ No newline at end of file diff --git a/src/string_map.cpp b/src/string_map.cpp index b5db63e90..74a16de73 100644 --- a/src/string_map.cpp +++ b/src/string_map.cpp @@ -1,6 +1,13 @@ struct StringHashKey { u32 hash; String string; + + operator String() const noexcept { + return this->string; + } + operator String const &() const noexcept { + return this->string; + } }; gb_internal gb_inline StringHashKey string_hash_string(String const &s) { @@ -283,11 +290,11 @@ gb_internal gb_inline void string_map_clear(StringMap *h) { template -gb_internal StringMapEntry *begin(StringMap &m) { +gb_internal StringMapEntry *begin(StringMap &m) noexcept { return m.entries.data; } template -gb_internal StringMapEntry const *begin(StringMap const &m) { +gb_internal StringMapEntry const *begin(StringMap const &m) noexcept { return m.entries.data; } @@ -298,6 +305,6 @@ gb_internal StringMapEntry *end(StringMap &m) { } template -gb_internal StringMapEntry const *end(StringMap const &m) { +gb_internal StringMapEntry const *end(StringMap const &m) noexcept { return m.entries.data + m.entries.count; } \ No newline at end of file diff --git a/src/string_set.cpp b/src/string_set.cpp index 753afa9bf..fb4640c20 100644 --- a/src/string_set.cpp +++ b/src/string_set.cpp @@ -2,6 +2,13 @@ struct StringSetEntry { u32 hash; MapIndex next; String value; + + operator String const() const noexcept { + return this->value; + } + operator String const &() const noexcept { + return this->value; + } }; struct StringSet { @@ -226,18 +233,18 @@ gb_internal gb_inline void string_set_clear(StringSet *s) { } -gb_internal StringSetEntry *begin(StringSet &m) { +gb_internal StringSetEntry *begin(StringSet &m) noexcept { return m.entries.data; } -gb_internal StringSetEntry const *begin(StringSet const &m) { +gb_internal StringSetEntry const *begin(StringSet const &m) noexcept { return m.entries.data; } -gb_internal StringSetEntry *end(StringSet &m) { +gb_internal StringSetEntry *end(StringSet &m) noexcept { return m.entries.data + m.entries.count; } -gb_internal StringSetEntry const *end(StringSet const &m) { +gb_internal StringSetEntry const *end(StringSet const &m) noexcept { return m.entries.data + m.entries.count; } \ No newline at end of file diff --git a/src/types.cpp b/src/types.cpp index 1e2d85ac6..d33c36e94 100644 --- a/src/types.cpp +++ b/src/types.cpp @@ -823,8 +823,7 @@ gb_internal bool type_ptr_set_exists(PtrSet *s, Type *t) { // TODO(bill, 2019-10-05): This is very slow and it's probably a lot // faster to cache types correctly - for (auto const &entry : *s) { - Type *f = entry.ptr; + for (Type *f : *s) { if (are_types_identical(t, f)) { ptr_set_add(s, t); return true; -- cgit v1.2.3 From 0fb3032b731b640a2d0d1d62b9f8dd548e224b0e Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 14:45:09 +0000 Subject: General improves to `alloc_ast_node` and other unnecessary checks --- src/common.cpp | 2 +- src/main.cpp | 4 ++-- src/parser.cpp | 4 +--- src/parser.hpp | 5 ++--- src/ptr_map.cpp | 6 ++++-- src/thread_pool.cpp | 6 +++--- src/threading.cpp | 1 + src/types.cpp | 4 ++-- 8 files changed, 16 insertions(+), 16 deletions(-) (limited to 'src/types.cpp') diff --git a/src/common.cpp b/src/common.cpp index 199a263a1..988a992d0 100644 --- a/src/common.cpp +++ b/src/common.cpp @@ -43,9 +43,9 @@ gb_internal void debugf(char const *fmt, ...); #error Odin on Windows requires a 64-bit build-system. The 'Developer Command Prompt' for VS still defaults to 32-bit shell. The 64-bit shell can be found under the name 'x64 Native Tools Command Prompt' for VS. For more information, please see https://odin-lang.org/docs/install/#for-windows #endif -#include "threading.cpp" #include "unicode.cpp" #include "array.cpp" +#include "threading.cpp" #include "queue.cpp" #include "common_memory.cpp" #include "string.cpp" diff --git a/src/main.cpp b/src/main.cpp index 7ac78241e..c07d2c400 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -13,11 +13,11 @@ #endif #include "exact_value.cpp" #include "build_settings.cpp" - gb_global ThreadPool global_thread_pool; gb_internal void init_global_thread_pool(void) { isize thread_count = gb_max(build_context.thread_count, 1); - thread_pool_init(&global_thread_pool, permanent_allocator(), thread_count, "ThreadPoolWorker"); + isize worker_count = thread_count-1; + thread_pool_init(&global_thread_pool, permanent_allocator(), worker_count, "ThreadPoolWorker"); } gb_internal bool thread_pool_add_task(WorkerTaskProc *proc, void *data) { return thread_pool_add_task(&global_thread_pool, proc, data); diff --git a/src/parser.cpp b/src/parser.cpp index 046469c16..c6f35d326 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -64,11 +64,9 @@ gb_global std::atomic global_total_node_memory_allocated; // NOTE(bill): And this below is why is I/we need a new language! Discriminated unions are a pain in C/C++ gb_internal Ast *alloc_ast_node(AstFile *f, AstKind kind) { - gbAllocator a = ast_allocator(f); - isize size = ast_node_size(kind); - Ast *node = cast(Ast *)gb_alloc(a, size); + Ast *node = cast(Ast *)arena_alloc(&global_thread_local_ast_arena, size, 16); node->kind = kind; node->file_id = f ? f->id : 0; diff --git a/src/parser.hpp b/src/parser.hpp index b492cfa85..d81194831 100644 --- a/src/parser.hpp +++ b/src/parser.hpp @@ -821,9 +821,8 @@ gb_internal gb_inline bool is_ast_when_stmt(Ast *node) { gb_global gb_thread_local Arena global_thread_local_ast_arena = {}; -gb_internal gbAllocator ast_allocator(AstFile *f) { - Arena *arena = &global_thread_local_ast_arena; - return arena_allocator(arena); +gb_internal gb_inline gbAllocator ast_allocator(AstFile *f) { + return arena_allocator(&global_thread_local_ast_arena); } gb_internal Ast *alloc_ast_node(AstFile *f, AstKind kind); diff --git a/src/ptr_map.cpp b/src/ptr_map.cpp index 083cd6697..264136881 100644 --- a/src/ptr_map.cpp +++ b/src/ptr_map.cpp @@ -27,6 +27,7 @@ struct PtrMap { gb_internal gb_inline u32 ptr_map_hash_key(uintptr key) { + u32 res; #if defined(GB_ARCH_64_BIT) key = (~key) + (key << 21); key = key ^ (key >> 24); @@ -34,12 +35,13 @@ gb_internal gb_inline u32 ptr_map_hash_key(uintptr key) { key = key ^ (key >> 14); key = (key + (key << 2)) + (key << 4); key = key ^ (key << 28); - return cast(u32)key; + res = cast(u32)key; #elif defined(GB_ARCH_32_BIT) u32 state = ((u32)key) * 747796405u + 2891336453u; u32 word = ((state >> ((state >> 28u) + 4u)) ^ state) * 277803737u; - return (word >> 22u) ^ word; + res = (word >> 22u) ^ word; #endif + return res ^ (res == MAP_SENTINEL); } gb_internal gb_inline u32 ptr_map_hash_key(void const *key) { return ptr_map_hash_key((uintptr)key); diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp index 07ab3d323..276e93dff 100644 --- a/src/thread_pool.cpp +++ b/src/thread_pool.cpp @@ -5,7 +5,7 @@ struct ThreadPool; gb_thread_local Thread *current_thread; -gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize thread_count, char const *worker_name); +gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize worker_count, char const *worker_name); gb_internal void thread_pool_destroy(ThreadPool *pool); gb_internal bool thread_pool_add_task(ThreadPool *pool, WorkerTaskProc *proc, void *data); gb_internal void thread_pool_wait(ThreadPool *pool); @@ -25,9 +25,9 @@ gb_internal isize current_thread_index(void) { return current_thread ? current_thread->idx : 0; } -gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize thread_count, char const *worker_name) { +gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize worker_count, char const *worker_name) { pool->allocator = a; - slice_init(&pool->threads, a, thread_count + 1); + slice_init(&pool->threads, a, worker_count + 1); // NOTE: this needs to be initialized before any thread starts pool->running.store(true, std::memory_order_seq_cst); diff --git a/src/threading.cpp b/src/threading.cpp index aca77cd8f..78943150e 100644 --- a/src/threading.cpp +++ b/src/threading.cpp @@ -398,6 +398,7 @@ gb_internal void thread_init(ThreadPool *pool, Thread *t, isize idx) { t->idx = idx; } + gb_internal void thread_init_and_start(ThreadPool *pool, Thread *t, isize idx) { thread_init(pool, t, idx); isize stack_size = 0; diff --git a/src/types.cpp b/src/types.cpp index d33c36e94..fa7c1d7f7 100644 --- a/src/types.cpp +++ b/src/types.cpp @@ -2535,13 +2535,13 @@ gb_internal bool are_types_identical_internal(Type *x, Type *y, bool check_tuple if (x->kind == Type_Named) { Entity *e = x->Named.type_name; - if (e != nullptr && e->kind == Entity_TypeName && e->TypeName.is_type_alias) { + if (e->TypeName.is_type_alias) { x = x->Named.base; } } if (y->kind == Type_Named) { Entity *e = y->Named.type_name; - if (e != nullptr && e->kind == Entity_TypeName && e->TypeName.is_type_alias) { + if (e->TypeName.is_type_alias) { y = y->Named.base; } } -- cgit v1.2.3 From 17fa8cb6ef4424e4c7cff2439e2d52220f440660 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 18:21:42 +0000 Subject: Add extra mutex to TypePth just in case --- src/ptr_set.cpp | 3 +++ src/types.cpp | 16 ++++++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) (limited to 'src/types.cpp') diff --git a/src/ptr_set.cpp b/src/ptr_set.cpp index 303bde07e..9b8b678f8 100644 --- a/src/ptr_set.cpp +++ b/src/ptr_set.cpp @@ -1,5 +1,7 @@ template struct PtrSetEntry { + static_assert(sizeof(T) == sizeof(void *), "Key size must be pointer size"); + T ptr; MapIndex next; @@ -10,6 +12,7 @@ struct PtrSetEntry { template struct PtrSet { + Slice hashes; Array> entries; }; diff --git a/src/types.cpp b/src/types.cpp index fa7c1d7f7..ec7adab5a 100644 --- a/src/types.cpp +++ b/src/types.cpp @@ -748,6 +748,7 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path); // IMPORTANT TODO(bill): SHould this TypePath code be removed since type cycle checking is handled much earlier on? struct TypePath { + RecursiveMutex mutex; Array path; // Entity_TypeName; bool failure; }; @@ -758,7 +759,9 @@ gb_internal void type_path_init(TypePath *tp) { } gb_internal void type_path_free(TypePath *tp) { + mutex_lock(&tp->mutex); array_free(&tp->path); + mutex_unlock(&tp->mutex); } gb_internal void type_path_print_illegal_cycle(TypePath *tp, isize start_index) { @@ -787,6 +790,8 @@ gb_internal bool type_path_push(TypePath *tp, Type *t) { } Entity *e = t->Named.type_name; + mutex_lock(&tp->mutex); + for (isize i = 0; i < tp->path.count; i++) { Entity *p = tp->path[i]; if (p == e) { @@ -795,12 +800,19 @@ gb_internal bool type_path_push(TypePath *tp, Type *t) { } array_add(&tp->path, e); + + mutex_unlock(&tp->mutex); + return true; } gb_internal void type_path_pop(TypePath *tp) { - if (tp != nullptr && tp->path.count > 0) { - array_pop(&tp->path); + if (tp != nullptr) { + mutex_lock(&tp->mutex); + if (tp->path.count > 0) { + array_pop(&tp->path); + } + mutex_unlock(&tp->mutex); } } -- cgit v1.2.3 From 291ea33939cc62420e0a3e4ae767ff7996b25ddc Mon Sep 17 00:00:00 2001 From: gingerBill Date: Wed, 4 Jan 2023 22:34:59 +0000 Subject: Initialize `TypePath` constructor like to keep the `Futex` constructor happy --- src/types.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src/types.cpp') diff --git a/src/types.cpp b/src/types.cpp index ec7adab5a..5ff6d7261 100644 --- a/src/types.cpp +++ b/src/types.cpp @@ -3363,7 +3363,7 @@ gb_internal i64 type_size_of(Type *t) { } else if (t->kind != Type_Basic && t->cached_size >= 0) { return t->cached_size; } - TypePath path = {0}; + TypePath path{}; type_path_init(&path); t->cached_size = type_size_of_internal(t, &path); type_path_free(&path); @@ -3381,7 +3381,7 @@ gb_internal i64 type_align_of(Type *t) { return t->cached_align; } - TypePath path = {0}; + TypePath path{}; type_path_init(&path); t->cached_align = type_align_of_internal(t, &path); type_path_free(&path); -- cgit v1.2.3 From 3c7e45a46fc68426641047a540d4cb50b0fbd9c8 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Wed, 11 Jan 2023 17:45:18 +0000 Subject: Remove possible race condition in `type_size_of`/`type_align_of` --- src/types.cpp | 74 +++++++++++++++++++++++++++++------------------------------ 1 file changed, 36 insertions(+), 38 deletions(-) (limited to 'src/types.cpp') diff --git a/src/types.cpp b/src/types.cpp index 5ff6d7261..99f393cc5 100644 --- a/src/types.cpp +++ b/src/types.cpp @@ -3357,35 +3357,55 @@ gb_internal i64 type_size_of(Type *t) { if (t == nullptr) { return 0; } - // NOTE(bill): Always calculate the size when it is a Type_Basic - if (t->kind == Type_Named && t->cached_size >= 0) { + i64 size = -1; + if (t->kind == Type_Basic) { + GB_ASSERT_MSG(is_type_typed(t), "%s", type_to_string(t)); + switch (t->Basic.kind) { + case Basic_string: size = 2*build_context.word_size; break; + case Basic_cstring: size = build_context.word_size; break; + case Basic_any: size = 2*build_context.word_size; break; + case Basic_typeid: size = build_context.word_size; break; - } else if (t->kind != Type_Basic && t->cached_size >= 0) { - return t->cached_size; + case Basic_int: case Basic_uint: case Basic_uintptr: case Basic_rawptr: + size = build_context.word_size; + break; + default: + size = t->Basic.size; + break; + } + t->cached_size.store(size); + return size; + } else if (t->kind != Type_Named && t->cached_size >= 0) { + return t->cached_size.load(); + } else { + TypePath path{}; + type_path_init(&path); + { + MUTEX_GUARD(&g_type_mutex); + size = type_size_of_internal(t, &path); + t->cached_size.store(size); + } + type_path_free(&path); + return size; } - TypePath path{}; - type_path_init(&path); - t->cached_size = type_size_of_internal(t, &path); - type_path_free(&path); - return t->cached_size; } gb_internal i64 type_align_of(Type *t) { if (t == nullptr) { return 1; } - // NOTE(bill): Always calculate the size when it is a Type_Basic - if (t->kind == Type_Named && t->cached_align >= 0) { - - } if (t->kind != Type_Basic && t->cached_align > 0) { - return t->cached_align; + if (t->kind != Type_Named && t->cached_align > 0) { + return t->cached_align.load(); } TypePath path{}; type_path_init(&path); - t->cached_align = type_align_of_internal(t, &path); + { + MUTEX_GUARD(&g_type_mutex); + t->cached_align.store(type_align_of_internal(t, &path)); + } type_path_free(&path); - return t->cached_align; + return t->cached_align.load(); } @@ -3417,8 +3437,6 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { } break; case Type_Array: { - // MUTEX_GUARD(&g_type_mutex); - Type *elem = t->Array.elem; bool pop = type_path_push(path, elem); if (path->failure) { @@ -3430,8 +3448,6 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { } case Type_EnumeratedArray: { - // MUTEX_GUARD(&g_type_mutex); - Type *elem = t->EnumeratedArray.elem; bool pop = type_path_push(path, elem); if (path->failure) { @@ -3451,8 +3467,6 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { case Type_Tuple: { - // MUTEX_GUARD(&g_type_mutex); - i64 max = 1; for_array(i, t->Tuple.variables) { i64 align = type_align_of_internal(t->Tuple.variables[i]->type, path); @@ -3476,8 +3490,6 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { return gb_max(t->Union.custom_align, 1); } - // MUTEX_GUARD(&g_type_mutex); - i64 max = 1; for_array(i, t->Union.variants) { Type *variant = t->Union.variants[i]; @@ -3503,8 +3515,6 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { return 1; } - // MUTEX_GUARD(&g_type_mutex); - i64 max = 1; for_array(i, t->Struct.fields) { Type *field_type = t->Struct.fields[i]->type; @@ -3616,8 +3626,6 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { switch (t->kind) { case Type_Named: { - // MUTEX_GUARD(&g_type_mutex); - bool pop = type_path_push(path, t); if (path->failure) { return FAILURE_ALIGNMENT; @@ -3655,8 +3663,6 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { return build_context.word_size*2; case Type_Array: { - // MUTEX_GUARD(&g_type_mutex); - i64 count, align, size, alignment; count = t->Array.count; if (count == 0) { @@ -3672,8 +3678,6 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { } break; case Type_EnumeratedArray: { - // MUTEX_GUARD(&g_type_mutex); - i64 count, align, size, alignment; count = t->EnumeratedArray.count; if (count == 0) { @@ -3706,8 +3710,6 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { return (1 + 1 + 2)*build_context.word_size; case Type_Tuple: { - // MUTEX_GUARD(&g_type_mutex); - i64 count, align, size; count = t->Tuple.variables.count; if (count == 0) { @@ -3726,8 +3728,6 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { if (t->Union.variants.count == 0) { return 0; } - // MUTEX_GUARD(&g_type_mutex); - i64 align = type_align_of_internal(t, path); if (path->failure) { return FAILURE_SIZE; @@ -3765,8 +3765,6 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { case Type_Struct: { - // MUTEX_GUARD(&g_type_mutex); - if (t->Struct.is_raw_union) { i64 count = t->Struct.fields.count; i64 align = type_align_of_internal(t, path); -- cgit v1.2.3 From 7124d541a132fc94b2c66c54bd73eb0d103ce3d3 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Wed, 11 Jan 2023 18:10:27 +0000 Subject: General optimizations --- src/check_type.cpp | 3 ++- src/checker.cpp | 15 ++++++++------- src/checker.hpp | 2 +- src/ptr_map.cpp | 36 ++++++++++++++++++++++++++++++++++++ src/types.cpp | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- 5 files changed, 99 insertions(+), 11 deletions(-) (limited to 'src/types.cpp') diff --git a/src/check_type.cpp b/src/check_type.cpp index fd4e965d4..0863af967 100644 --- a/src/check_type.cpp +++ b/src/check_type.cpp @@ -2405,7 +2405,8 @@ gb_internal Type *make_soa_struct_internal(CheckerContext *ctx, Ast *array_typ_e } soa_struct->Struct.soa_count = cast(i32)count; - scope = create_scope(ctx->info, ctx->scope, 8); + scope = create_scope(ctx->info, ctx->scope); + string_map_init(&scope->elements, 8); soa_struct->Struct.scope = scope; String params_xyzw[4] = { diff --git a/src/checker.cpp b/src/checker.cpp index 4e8d19016..473af7128 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -220,11 +220,9 @@ gb_internal DeclInfo *make_decl_info(Scope *scope, DeclInfo *parent) { -gb_internal Scope *create_scope(CheckerInfo *info, Scope *parent, isize init_elements_capacity=DEFAULT_SCOPE_CAPACITY) { +gb_internal Scope *create_scope(CheckerInfo *info, Scope *parent) { Scope *s = gb_alloc_item(permanent_allocator(), Scope); s->parent = parent; - string_map_init(&s->elements, init_elements_capacity); - ptr_set_init(&s->imported, 0); if (parent != nullptr && parent != builtin_pkg->scope) { Scope *prev_head_child = parent->head_child.exchange(s, std::memory_order_acq_rel); @@ -246,7 +244,8 @@ gb_internal Scope *create_scope_from_file(CheckerInfo *info, AstFile *f) { GB_ASSERT(f->pkg->scope != nullptr); isize init_elements_capacity = gb_max(DEFAULT_SCOPE_CAPACITY, 2*f->total_file_decl_count); - Scope *s = create_scope(info, f->pkg->scope, init_elements_capacity); + Scope *s = create_scope(info, f->pkg->scope); + string_map_init(&s->elements, init_elements_capacity); s->flags |= ScopeFlag_File; @@ -265,7 +264,8 @@ gb_internal Scope *create_scope_from_package(CheckerContext *c, AstPackage *pkg) } isize init_elements_capacity = gb_max(DEFAULT_SCOPE_CAPACITY, 2*total_pkg_decl_count); - Scope *s = create_scope(c->info, builtin_pkg->scope, init_elements_capacity); + Scope *s = create_scope(c->info, builtin_pkg->scope); + string_map_init(&s->elements, init_elements_capacity); s->flags |= ScopeFlag_Pkg; s->pkg = pkg; @@ -1753,7 +1753,8 @@ gb_internal void add_type_info_type_internal(CheckerContext *c, Type *t) { add_type_info_dependency(c->info, c->decl, t); MUTEX_GUARD_BLOCK(&c->info->type_info_mutex) { - auto found = map_get(&c->info->type_info_map, t); + MapFindResult fr; + auto found = map_try_get(&c->info->type_info_map, t, &fr); if (found != nullptr) { // Types have already been added return; @@ -1777,7 +1778,7 @@ gb_internal void add_type_info_type_internal(CheckerContext *c, Type *t) { ti_index = c->info->type_info_types.count; array_add(&c->info->type_info_types, t); } - map_set(&c->checker->info.type_info_map, t, ti_index); + map_set_internal_from_try_get(&c->checker->info.type_info_map, t, ti_index, fr); if (prev) { // NOTE(bill): If a previous one exists already, no need to continue diff --git a/src/checker.hpp b/src/checker.hpp index 8b8819d97..806eb2e51 100644 --- a/src/checker.hpp +++ b/src/checker.hpp @@ -223,7 +223,7 @@ enum ScopeFlag : i32 { ScopeFlag_ContextDefined = 1<<16, }; -enum { DEFAULT_SCOPE_CAPACITY = 29 }; +enum { DEFAULT_SCOPE_CAPACITY = 32 }; struct Scope { Ast * node; diff --git a/src/ptr_map.cpp b/src/ptr_map.cpp index 89d2cbf9d..598904906 100644 --- a/src/ptr_map.cpp +++ b/src/ptr_map.cpp @@ -192,6 +192,26 @@ gb_internal void map_rehash(PtrMap *h, isize new_count) { template gb_internal V *map_get(PtrMap *h, K key) { + MapIndex hash_index = MAP_SENTINEL; + MapIndex entry_prev = MAP_SENTINEL; + MapIndex entry_index = MAP_SENTINEL; + if (h->hashes.count != 0) { + u32 hash = ptr_map_hash_key(key); + hash_index = cast(MapIndex)(hash & (h->hashes.count-1)); + entry_index = h->hashes.data[hash_index]; + while (entry_index != MAP_SENTINEL) { + auto *entry = &h->entries.data[entry_index]; + if (entry->key == key) { + return &entry->value; + } + entry_prev = entry_index; + entry_index = entry->next; + } + } + return nullptr; +} +template +gb_internal V *map_try_get(PtrMap *h, K key, MapFindResult *fr_) { MapFindResult fr = {MAP_SENTINEL, MAP_SENTINEL, MAP_SENTINEL}; if (h->hashes.count != 0) { u32 hash = ptr_map_hash_key(key); @@ -206,9 +226,25 @@ gb_internal V *map_get(PtrMap *h, K key) { fr.entry_index = entry->next; } } + if (h->hashes.count == 0 || map__full(h)) { + map_grow(h); + } + if (fr_) *fr_ = fr; return nullptr; } + +template +gb_internal void map_set_internal_from_try_get(PtrMap *h, K key, V const &value, MapFindResult const &fr) { + MapIndex index = map__add_entry(h, key); + if (fr.entry_prev != MAP_SENTINEL) { + h->entries.data[fr.entry_prev].next = index; + } else { + h->hashes.data[fr.hash_index] = index; + } + h->entries.data[index].value = value; +} + template gb_internal V &map_must_get(PtrMap *h, K key) { V *ptr = map_get(h, key); diff --git a/src/types.cpp b/src/types.cpp index 99f393cc5..69c1ebe68 100644 --- a/src/types.cpp +++ b/src/types.cpp @@ -2528,9 +2528,58 @@ gb_internal bool lookup_subtype_polymorphic_selection(Type *dst, Type *src, Sele gb_internal bool are_types_identical_internal(Type *x, Type *y, bool check_tuple_names); gb_internal bool are_types_identical(Type *x, Type *y) { + if (x == y) { + return true; + } + + if ((x == nullptr && y != nullptr) || + (x != nullptr && y == nullptr)) { + return false; + } + + if (x->kind == Type_Named) { + Entity *e = x->Named.type_name; + if (e->TypeName.is_type_alias) { + x = x->Named.base; + } + } + if (y->kind == Type_Named) { + Entity *e = y->Named.type_name; + if (e->TypeName.is_type_alias) { + y = y->Named.base; + } + } + if (x->kind != y->kind) { + return false; + } + return are_types_identical_internal(x, y, false); } gb_internal bool are_types_identical_unique_tuples(Type *x, Type *y) { + if (x == y) { + return true; + } + + if (!x | !y) { + return false; + } + + if (x->kind == Type_Named) { + Entity *e = x->Named.type_name; + if (e->TypeName.is_type_alias) { + x = x->Named.base; + } + } + if (y->kind == Type_Named) { + Entity *e = y->Named.type_name; + if (e->TypeName.is_type_alias) { + y = y->Named.base; + } + } + if (x->kind != y->kind) { + return false; + } + return are_types_identical_internal(x, y, true); } @@ -2540,11 +2589,11 @@ gb_internal bool are_types_identical_internal(Type *x, Type *y, bool check_tuple return true; } - if ((x == nullptr && y != nullptr) || - (x != nullptr && y == nullptr)) { + if (!x | !y) { return false; } + #if 0 if (x->kind == Type_Named) { Entity *e = x->Named.type_name; if (e->TypeName.is_type_alias) { @@ -2560,6 +2609,7 @@ gb_internal bool are_types_identical_internal(Type *x, Type *y, bool check_tuple if (x->kind != y->kind) { return false; } + #endif switch (x->kind) { case Type_Generic: -- cgit v1.2.3