From 600f2b7284b8974a18827242c18e790dab0cf06a Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 11:53:59 +0000 Subject: Use heap_allocator for all hash set types --- src/check_stmt.cpp | 1 - 1 file changed, 1 deletion(-) (limited to 'src/check_stmt.cpp') diff --git a/src/check_stmt.cpp b/src/check_stmt.cpp index cf111e84c..945ba8f02 100644 --- a/src/check_stmt.cpp +++ b/src/check_stmt.cpp @@ -1185,7 +1185,6 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_ } PtrSet seen = {}; - ptr_set_init(&seen, heap_allocator()); defer (ptr_set_destroy(&seen)); for_array(i, bs->stmts) { -- cgit v1.2.3 From 252be0fb417f9cdde5e9c4b348cd995a20433aea Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 11:59:52 +0000 Subject: Make all maps use heap allocator implicitly --- src/check_builtin.cpp | 2 +- src/check_expr.cpp | 3 +-- src/check_stmt.cpp | 1 - src/checker.cpp | 33 +++++++++++++++------------------ src/common.cpp | 2 +- src/docs_writer.cpp | 11 +++++------ src/llvm_backend_general.cpp | 44 ++++++++++++++++++++++---------------------- src/llvm_backend_proc.cpp | 10 +++++----- src/main.cpp | 2 +- src/ptr_map.cpp | 18 ++++++++++++++---- src/string_map.cpp | 18 ++++++++++++++---- 11 files changed, 79 insertions(+), 65 deletions(-) (limited to 'src/check_stmt.cpp') diff --git a/src/check_builtin.cpp b/src/check_builtin.cpp index 36dc9b7a1..af196234e 100644 --- a/src/check_builtin.cpp +++ b/src/check_builtin.cpp @@ -1110,7 +1110,7 @@ gb_internal bool cache_load_file_directive(CheckerContext *c, Ast *call, String new_cache->path = path; new_cache->data = data; new_cache->file_error = file_error; - string_map_init(&new_cache->hashes, heap_allocator(), 32); + string_map_init(&new_cache->hashes, 32); string_map_set(&c->info->load_file_cache, path, new_cache); if (cache_) *cache_ = new_cache; } else { diff --git a/src/check_expr.cpp b/src/check_expr.cpp index 7a00b5353..030bfb8e6 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -5753,7 +5753,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op // in order to improve the type inference system StringMap type_hint_map = {}; // Key: String - string_map_init(&type_hint_map, heap_allocator(), 2*args.count); + string_map_init(&type_hint_map, 2*args.count); defer (string_map_destroy(&type_hint_map)); Type *ptype = nullptr; @@ -8283,7 +8283,6 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast * bool is_partial = cl->tag && (cl->tag->BasicDirective.name.string == "partial"); SeenMap seen = {}; // NOTE(bill): Multimap, Key: ExactValue - map_init(&seen, heap_allocator()); defer (map_destroy(&seen)); if (cl->elems.count > 0 && cl->elems[0]->kind == Ast_FieldValue) { diff --git a/src/check_stmt.cpp b/src/check_stmt.cpp index 945ba8f02..7192b16b5 100644 --- a/src/check_stmt.cpp +++ b/src/check_stmt.cpp @@ -929,7 +929,6 @@ gb_internal void check_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags } SeenMap seen = {}; // NOTE(bill): Multimap, Key: ExactValue - map_init(&seen, heap_allocator()); defer (map_destroy(&seen)); for_array(stmt_index, bs->stmts) { diff --git a/src/checker.cpp b/src/checker.cpp index 8da659461..8779d9d45 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -221,7 +221,7 @@ gb_internal DeclInfo *make_decl_info(Scope *scope, DeclInfo *parent) { gb_internal Scope *create_scope(CheckerInfo *info, Scope *parent, isize init_elements_capacity=DEFAULT_SCOPE_CAPACITY) { Scope *s = gb_alloc_item(permanent_allocator(), Scope); s->parent = parent; - string_map_init(&s->elements, heap_allocator(), init_elements_capacity); + string_map_init(&s->elements, init_elements_capacity); ptr_set_init(&s->imported, 0); if (parent != nullptr && parent != builtin_pkg->scope) { @@ -1135,14 +1135,14 @@ gb_internal void init_checker_info(CheckerInfo *i) { array_init(&i->definitions, a); array_init(&i->entities, a); - map_init(&i->global_untyped, a); - string_map_init(&i->foreigns, a); - map_init(&i->gen_procs, a); - map_init(&i->gen_types, a); + map_init(&i->global_untyped); + string_map_init(&i->foreigns); + map_init(&i->gen_procs); + map_init(&i->gen_types); array_init(&i->type_info_types, a); - map_init(&i->type_info_map, a); - string_map_init(&i->files, a); - string_map_init(&i->packages, a); + map_init(&i->type_info_map); + string_map_init(&i->files); + string_map_init(&i->packages); array_init(&i->variable_init_order, a); array_init(&i->testing_procedures, a, 0, 0); array_init(&i->init_procedures, a, 0, 0); @@ -1160,8 +1160,8 @@ gb_internal void init_checker_info(CheckerInfo *i) { mpmc_init(&i->intrinsics_entry_point_usage, a, 1<<10); // just waste some memory here, even if it probably never used - map_init(&i->objc_msgSend_types, a); - string_map_init(&i->load_file_cache, a); + map_init(&i->objc_msgSend_types); + string_map_init(&i->load_file_cache); array_init(&i->all_procedures, heap_allocator()); @@ -2490,7 +2490,7 @@ gb_internal bool is_entity_a_dependency(Entity *e) { gb_internal Array generate_entity_dependency_graph(CheckerInfo *info, gbAllocator allocator) { PtrMap M = {}; - map_init(&M, allocator, info->entities.count); + map_init(&M, info->entities.count); defer (map_destroy(&M)); for_array(i, info->entities) { Entity *e = info->entities[i]; @@ -4200,7 +4200,7 @@ gb_internal void add_import_dependency_node(Checker *c, Ast *decl, PtrMap generate_import_dependency_graph(Checker *c) { PtrMap M = {}; - map_init(&M, heap_allocator(), 2*c->parser->packages.count); + map_init(&M, 2*c->parser->packages.count); defer (map_destroy(&M)); for_array(i, c->parser->packages) { @@ -4688,7 +4688,7 @@ gb_internal void check_collect_entities_all(Checker *c) { auto *wd = &collect_entity_worker_data[i]; wd->c = c; wd->ctx = make_checker_context(c); - map_init(&wd->untyped, heap_allocator()); + map_init(&wd->untyped); } for (auto const &entry : c->info.files.entries) { @@ -4804,7 +4804,6 @@ gb_internal void check_import_entities(Checker *c) { CheckerContext ctx = make_checker_context(c); UntypedExprInfoMap untyped = {}; - map_init(&untyped, heap_allocator()); defer (map_destroy(&untyped)); isize min_pkg_index = 0; @@ -5159,7 +5158,6 @@ gb_internal void check_unchecked_bodies(Checker *c) { GB_ASSERT(c->procs_to_check.count == 0); UntypedExprInfoMap untyped = {}; - map_init(&untyped, heap_allocator()); defer (map_destroy(&untyped)); // use the `procs_to_check` array @@ -5212,7 +5210,6 @@ gb_internal void check_unchecked_bodies(Checker *c) { gb_internal void check_safety_all_procedures_for_unchecked(Checker *c) { GB_ASSERT(DEBUG_CHECK_ALL_PROCEDURES); UntypedExprInfoMap untyped = {}; - map_init(&untyped, heap_allocator()); defer (map_destroy(&untyped)); @@ -5345,7 +5342,7 @@ gb_internal void check_procedure_bodies(Checker *c) { for (isize i = 0; i < thread_count; i++) { check_procedure_bodies_worker_data[i].c = c; - map_init(&check_procedure_bodies_worker_data[i].untyped, heap_allocator()); + map_init(&check_procedure_bodies_worker_data[i].untyped); } defer (for (isize i = 0; i < thread_count; i++) { @@ -5545,7 +5542,7 @@ gb_internal void check_deferred_procedures(Checker *c) { gb_internal void check_unique_package_names(Checker *c) { StringMap pkgs = {}; // Key: package name - string_map_init(&pkgs, heap_allocator(), 2*c->info.packages.entries.count); + string_map_init(&pkgs, 2*c->info.packages.entries.count); defer (string_map_destroy(&pkgs)); for (auto const &entry : c->info.packages) { diff --git a/src/common.cpp b/src/common.cpp index 3b6ea59e8..199a263a1 100644 --- a/src/common.cpp +++ b/src/common.cpp @@ -373,7 +373,7 @@ gb_internal char const *string_intern(String const &string) { } gb_internal void init_string_interner(void) { - map_init(&string_intern_map, heap_allocator()); + map_init(&string_intern_map); } diff --git a/src/docs_writer.cpp b/src/docs_writer.cpp index bab97158d..2aefe29eb 100644 --- a/src/docs_writer.cpp +++ b/src/docs_writer.cpp @@ -53,13 +53,12 @@ gb_internal void odin_doc_writer_item_tracker_init(OdinDocWriterItemTracker * gb_internal void odin_doc_writer_prepare(OdinDocWriter *w) { w->state = OdinDocWriterState_Preparing; - gbAllocator a = heap_allocator(); - string_map_init(&w->string_cache, a); + string_map_init(&w->string_cache); - map_init(&w->file_cache, a); - map_init(&w->pkg_cache, a); - map_init(&w->entity_cache, a); - map_init(&w->type_cache, a); + map_init(&w->file_cache); + map_init(&w->pkg_cache); + map_init(&w->entity_cache); + map_init(&w->type_cache); odin_doc_writer_item_tracker_init(&w->files, 1); odin_doc_writer_item_tracker_init(&w->pkgs, 1); diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp index 75675474a..a849929f0 100644 --- a/src/llvm_backend_general.cpp +++ b/src/llvm_backend_general.cpp @@ -55,30 +55,30 @@ gb_internal void lb_init_module(lbModule *m, Checker *c) { } gbAllocator a = heap_allocator(); - map_init(&m->types, a); - map_init(&m->func_raw_types, a); - map_init(&m->struct_field_remapping, a); - map_init(&m->values, a); - map_init(&m->soa_values, a); - string_map_init(&m->members, a); - map_init(&m->procedure_values, a); - string_map_init(&m->procedures, a); - string_map_init(&m->const_strings, a); - map_init(&m->function_type_map, a); - map_init(&m->equal_procs, a); - map_init(&m->hasher_procs, a); - map_init(&m->map_get_procs, a); - map_init(&m->map_set_procs, a); + map_init(&m->types); + map_init(&m->func_raw_types); + map_init(&m->struct_field_remapping); + map_init(&m->values); + map_init(&m->soa_values); + string_map_init(&m->members); + map_init(&m->procedure_values); + string_map_init(&m->procedures); + string_map_init(&m->const_strings); + map_init(&m->function_type_map); + map_init(&m->equal_procs); + map_init(&m->hasher_procs); + map_init(&m->map_get_procs); + map_init(&m->map_set_procs); array_init(&m->procedures_to_generate, a, 0, 1024); array_init(&m->missing_procedures_to_check, a, 0, 16); - map_init(&m->debug_values, a); + map_init(&m->debug_values); array_init(&m->debug_incomplete_types, a, 0, 1024); - string_map_init(&m->objc_classes, a); - string_map_init(&m->objc_selectors, a); + string_map_init(&m->objc_classes); + string_map_init(&m->objc_selectors); - map_init(&m->map_info_map, a, 0); - map_init(&m->map_cell_info_map, a, 0); + map_init(&m->map_info_map, 0); + map_init(&m->map_cell_info_map, 0); } @@ -127,9 +127,9 @@ gb_internal bool lb_init_generator(lbGenerator *gen, Checker *c) { gen->info = &c->info; - map_init(&gen->modules, permanent_allocator(), gen->info->packages.entries.count*2); - map_init(&gen->modules_through_ctx, permanent_allocator(), gen->info->packages.entries.count*2); - map_init(&gen->anonymous_proc_lits, heap_allocator(), 1024); + map_init(&gen->modules, gen->info->packages.entries.count*2); + map_init(&gen->modules_through_ctx, gen->info->packages.entries.count*2); + map_init(&gen->anonymous_proc_lits, 1024); array_init(&gen->foreign_libraries, heap_allocator(), 0, 1024); diff --git a/src/llvm_backend_proc.cpp b/src/llvm_backend_proc.cpp index 7245bdd80..c66462bc1 100644 --- a/src/llvm_backend_proc.cpp +++ b/src/llvm_backend_proc.cpp @@ -119,9 +119,9 @@ gb_internal lbProcedure *lb_create_procedure(lbModule *m, Entity *entity, bool i p->branch_blocks.allocator = a; p->context_stack.allocator = a; p->scope_stack.allocator = a; - map_init(&p->selector_values, a, 0); - map_init(&p->selector_addr, a, 0); - map_init(&p->tuple_fix_map, a, 0); + map_init(&p->selector_values, 0); + map_init(&p->selector_addr, 0); + map_init(&p->tuple_fix_map, 0); if (p->is_foreign) { lb_add_foreign_library_path(p->module, entity->Procedure.foreign_library); @@ -345,7 +345,7 @@ gb_internal lbProcedure *lb_create_dummy_procedure(lbModule *m, String link_name p->blocks.allocator = a; p->branch_blocks.allocator = a; p->context_stack.allocator = a; - map_init(&p->tuple_fix_map, a, 0); + map_init(&p->tuple_fix_map, 0); char *c_link_name = alloc_cstring(permanent_allocator(), p->name); @@ -486,7 +486,7 @@ gb_internal void lb_begin_procedure_body(lbProcedure *p) { p->entry_block = lb_create_block(p, "entry", true); lb_start_block(p, p->entry_block); - map_init(&p->direct_parameters, heap_allocator()); + map_init(&p->direct_parameters); GB_ASSERT(p->type != nullptr); diff --git a/src/main.cpp b/src/main.cpp index 91dcbdb01..7ac78241e 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -2516,7 +2516,7 @@ int main(int arg_count, char const **arg_ptr) { add_library_collection(str_lit("core"), get_fullpath_relative(heap_allocator(), odin_root_dir(), str_lit("core"))); add_library_collection(str_lit("vendor"), get_fullpath_relative(heap_allocator(), odin_root_dir(), str_lit("vendor"))); - map_init(&build_context.defined_values, heap_allocator()); + map_init(&build_context.defined_values); build_context.extra_packages.allocator = heap_allocator(); string_set_init(&build_context.test_names); diff --git a/src/ptr_map.cpp b/src/ptr_map.cpp index 434680e91..083cd6697 100644 --- a/src/ptr_map.cpp +++ b/src/ptr_map.cpp @@ -46,7 +46,7 @@ gb_internal gb_inline u32 ptr_map_hash_key(void const *key) { } -template gb_internal void map_init (PtrMap *h, gbAllocator a, isize capacity = 16); +template gb_internal void map_init (PtrMap *h, isize capacity = 16); template gb_internal void map_destroy (PtrMap *h); template gb_internal V * map_get (PtrMap *h, K key); template gb_internal void map_set (PtrMap *h, K key, V const &value); @@ -68,11 +68,15 @@ template gb_internal void multi_map_remove (PtrMap< template gb_internal void multi_map_remove_all(PtrMap *h, K key); #endif +gb_internal gbAllocator map_allocator(void) { + return heap_allocator(); +} + template -gb_internal gb_inline void map_init(PtrMap *h, gbAllocator a, isize capacity) { +gb_internal gb_inline void map_init(PtrMap *h, isize capacity) { capacity = next_pow2_isize(capacity); - slice_init(&h->hashes, a, capacity); - array_init(&h->entries, a, 0, capacity); + slice_init(&h->hashes, map_allocator(), capacity); + array_init(&h->entries, map_allocator(), 0, capacity); for (isize i = 0; i < capacity; i++) { h->hashes.data[i] = MAP_SENTINEL; } @@ -80,6 +84,9 @@ gb_internal gb_inline void map_init(PtrMap *h, gbAllocator a, isize capaci template gb_internal gb_inline void map_destroy(PtrMap *h) { + if (h->entries.allocator.proc == nullptr) { + h->entries.allocator = map_allocator(); + } slice_free(&h->hashes, h->entries.allocator); array_free(&h->entries); } @@ -162,6 +169,9 @@ gb_internal void map_reset_entries(PtrMap *h) { template gb_internal void map_reserve(PtrMap *h, isize cap) { + if (h->entries.allocator.proc == nullptr) { + h->entries.allocator = map_allocator(); + } array_reserve(&h->entries, cap); if (h->entries.count*2 < h->hashes.count) { return; diff --git a/src/string_map.cpp b/src/string_map.cpp index 9f9374ece..b5db63e90 100644 --- a/src/string_map.cpp +++ b/src/string_map.cpp @@ -35,7 +35,7 @@ struct StringMap { }; -template gb_internal void string_map_init (StringMap *h, gbAllocator a, isize capacity = 16); +template gb_internal void string_map_init (StringMap *h, isize capacity = 16); template gb_internal void string_map_destroy (StringMap *h); template gb_internal T * string_map_get (StringMap *h, char const *key); @@ -56,11 +56,15 @@ template gb_internal void string_map_grow (StringMap template gb_internal void string_map_rehash (StringMap *h, isize new_count); template gb_internal void string_map_reserve (StringMap *h, isize cap); +gb_internal gbAllocator string_map_allocator(void) { + return heap_allocator(); +} + template -gb_internal gb_inline void string_map_init(StringMap *h, gbAllocator a, isize capacity) { +gb_internal gb_inline void string_map_init(StringMap *h, isize capacity) { capacity = next_pow2_isize(capacity); - slice_init(&h->hashes, a, capacity); - array_init(&h->entries, a, 0, capacity); + slice_init(&h->hashes, string_map_allocator(), capacity); + array_init(&h->entries, string_map_allocator(), 0, capacity); for (isize i = 0; i < capacity; i++) { h->hashes.data[i] = MAP_SENTINEL; } @@ -68,6 +72,9 @@ gb_internal gb_inline void string_map_init(StringMap *h, gbAllocator a, isize template gb_internal gb_inline void string_map_destroy(StringMap *h) { + if (h->entries.allocator.proc == nullptr) { + h->entries.allocator = string_map_allocator(); + } slice_free(&h->hashes, h->entries.allocator); array_free(&h->entries); } @@ -147,6 +154,9 @@ gb_internal void string_map_reset_entries(StringMap *h) { template gb_internal void string_map_reserve(StringMap *h, isize cap) { + if (h->entries.allocator.proc == nullptr) { + h->entries.allocator = string_map_allocator(); + } array_reserve(&h->entries, cap); if (h->entries.count*2 < h->hashes.count) { return; -- cgit v1.2.3 From 69934c3b0b1b8ad0a499574c39c1ab177a1fe30a Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 13:04:09 +0000 Subject: More `for_array(i, y)` to `for (x : y)` translations --- src/check_builtin.cpp | 15 ++++------ src/check_decl.cpp | 27 +++++++---------- src/check_stmt.cpp | 73 ++++++++++++++++------------------------------ src/llvm_backend.cpp | 9 ++---- src/llvm_backend_expr.cpp | 18 ++++-------- src/llvm_backend_stmt.cpp | 74 +++++++++++++++++++---------------------------- 6 files changed, 80 insertions(+), 136 deletions(-) (limited to 'src/check_stmt.cpp') diff --git a/src/check_builtin.cpp b/src/check_builtin.cpp index af196234e..7c5521dde 100644 --- a/src/check_builtin.cpp +++ b/src/check_builtin.cpp @@ -96,8 +96,7 @@ gb_internal void check_or_else_expr_no_value_error(CheckerContext *c, String con gbString th = nullptr; if (type_hint != nullptr) { GB_ASSERT(bsrc->kind == Type_Union); - for_array(i, bsrc->Union.variants) { - Type *vt = bsrc->Union.variants[i]; + for (Type *vt : bsrc->Union.variants) { if (are_types_identical(vt, type_hint)) { th = type_to_string(type_hint); break; @@ -198,8 +197,7 @@ gb_internal void add_objc_proc_type(CheckerContext *c, Ast *call, Type *return_t { auto variables = array_make(permanent_allocator(), 0, param_types.count); - for_array(i, param_types) { - Type *type = param_types[i]; + for (Type *type : param_types) { Entity *param = alloc_entity_param(scope, blank_token, type, false, true); array_add(&variables, param); } @@ -3071,8 +3069,7 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As bool first_is_field_value = (ce->args[0]->kind == Ast_FieldValue); bool fail = false; - for_array(i, ce->args) { - Ast *arg = ce->args[i]; + for (Ast *arg : ce->args) { bool mix = false; if (first_is_field_value) { mix = arg->kind != Ast_FieldValue; @@ -3088,9 +3085,8 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As StringSet name_set = {}; string_set_init(&name_set, 2*ce->args.count); - for_array(i, ce->args) { + for (Ast *arg : ce->args) { String name = {}; - Ast *arg = ce->args[i]; if (arg->kind == Ast_FieldValue) { Ast *ename = arg->FieldValue.field; if (!fail && ename->kind != Ast_Ident) { @@ -4987,8 +4983,7 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As bool is_variant = false; - for_array(i, u->Union.variants) { - Type *vt = u->Union.variants[i]; + for (Type *vt : u->Union.variants) { if (are_types_identical(v, vt)) { is_variant = true; break; diff --git a/src/check_decl.cpp b/src/check_decl.cpp index 7b229db08..66f16546c 100644 --- a/src/check_decl.cpp +++ b/src/check_decl.cpp @@ -354,8 +354,7 @@ gb_internal void check_type_decl(CheckerContext *ctx, Entity *e, Ast *init_expr, Type *t = base_type(e->type); if (t->kind == Type_Enum) { - for_array(i, t->Enum.fields) { - Entity *f = t->Enum.fields[i]; + for (Entity *f : t->Enum.fields) { if (f->kind != Entity_Constant) { continue; } @@ -1237,8 +1236,7 @@ gb_internal void check_proc_group_decl(CheckerContext *ctx, Entity *&pg_entity, PtrSet entity_set = {}; ptr_set_init(&entity_set, 2*pg->args.count); - for_array(i, pg->args) { - Ast *arg = pg->args[i]; + for (Ast *arg : pg->args) { Entity *e = nullptr; Operand o = {}; if (arg->kind == Ast_Ident) { @@ -1271,7 +1269,7 @@ gb_internal void check_proc_group_decl(CheckerContext *ctx, Entity *&pg_entity, ptr_set_destroy(&entity_set); - for_array(j, pge->entities) { + for (isize j = 0; j < pge->entities.count; j++) { Entity *p = pge->entities[j]; if (p->type == t_invalid) { // NOTE(bill): This invalid overload has already been handled @@ -1462,8 +1460,7 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de { if (type->Proc.param_count > 0) { TypeTuple *params = &type->Proc.params->Tuple; - for_array(i, params->variables) { - Entity *e = params->variables[i]; + for (Entity *e : params->variables) { if (e->kind != Entity_Variable) { continue; } @@ -1499,9 +1496,9 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de } } - MUTEX_GUARD_BLOCK(ctx->scope->mutex) for_array(i, using_entities) { - Entity *e = using_entities[i].e; - Entity *uvar = using_entities[i].uvar; + MUTEX_GUARD_BLOCK(ctx->scope->mutex) for (auto const &entry : using_entities) { + Entity *e = entry.e; + Entity *uvar = entry.uvar; Entity *prev = scope_insert_no_mutex(ctx->scope, uvar); if (prev != nullptr) { error(e->token, "Namespace collision while 'using' procedure argument '%.*s' of: %.*s", LIT(e->token.string), LIT(prev->token.string)); @@ -1519,8 +1516,8 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de check_open_scope(ctx, body); { - for_array(i, using_entities) { - Entity *uvar = using_entities[i].uvar; + for (auto const &entry : using_entities) { + Entity *uvar = entry.uvar; Entity *prev = scope_insert(ctx->scope, uvar); gb_unused(prev); // NOTE(bill): Don't err here @@ -1537,12 +1534,10 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de decl->defer_use_checked = true; - for_array(i, bs->stmts) { - Ast *stmt = bs->stmts[i]; + for (Ast *stmt : bs->stmts) { if (stmt->kind == Ast_ValueDecl) { ast_node(vd, ValueDecl, stmt); - for_array(j, vd->names) { - Ast *name = vd->names[j]; + for (Ast *name : vd->names) { if (!is_blank_ident(name)) { if (name->kind == Ast_Ident) { GB_ASSERT(name->Ident.entity != nullptr); diff --git a/src/check_stmt.cpp b/src/check_stmt.cpp index 7192b16b5..e075297a4 100644 --- a/src/check_stmt.cpp +++ b/src/check_stmt.cpp @@ -931,16 +931,15 @@ gb_internal void check_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags SeenMap seen = {}; // NOTE(bill): Multimap, Key: ExactValue defer (map_destroy(&seen)); - for_array(stmt_index, bs->stmts) { - Ast *stmt = bs->stmts[stmt_index]; + for (Ast *stmt : bs->stmts) { if (stmt->kind != Ast_CaseClause) { // NOTE(bill): error handled by above multiple default checker continue; } ast_node(cc, CaseClause, stmt); - for_array(j, cc->list) { - Ast *expr = unparen_expr(cc->list[j]); + for (Ast *expr : cc->list) { + expr = unparen_expr(expr); if (is_ast_range(expr)) { ast_node(be, BinaryExpr, expr); @@ -1052,8 +1051,7 @@ gb_internal void check_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags auto unhandled = array_make(temporary_allocator(), 0, fields.count); - for_array(i, fields) { - Entity *f = fields[i]; + for (Entity *f : fields) { if (f->kind != Entity_Constant) { continue; } @@ -1072,8 +1070,7 @@ gb_internal void check_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags error_no_newline(node, "Unhandled switch case: %.*s", LIT(unhandled[0]->token.string)); } else { error(node, "Unhandled switch cases:"); - for_array(i, unhandled) { - Entity *f = unhandled[i]; + for (Entity *f : unhandled) { error_line("\t%.*s\n", LIT(f->token.string)); } } @@ -1154,8 +1151,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_ // NOTE(bill): Check for multiple defaults Ast *first_default = nullptr; ast_node(bs, BlockStmt, ss->body); - for_array(i, bs->stmts) { - Ast *stmt = bs->stmts[i]; + for (Ast *stmt : bs->stmts) { Ast *default_stmt = nullptr; if (stmt->kind == Ast_CaseClause) { ast_node(cc, CaseClause, stmt); @@ -1186,8 +1182,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_ PtrSet seen = {}; defer (ptr_set_destroy(&seen)); - for_array(i, bs->stmts) { - Ast *stmt = bs->stmts[i]; + for (Ast *stmt : bs->stmts) { if (stmt->kind != Ast_CaseClause) { // NOTE(bill): error handled by above multiple default checker continue; @@ -1198,8 +1193,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_ Type *bt = base_type(type_deref(x.type)); Type *case_type = nullptr; - for_array(type_index, cc->list) { - Ast *type_expr = cc->list[type_index]; + for (Ast *type_expr : cc->list) { if (type_expr != nullptr) { // Otherwise it's a default expression Operand y = {}; check_expr_or_type(ctx, &y, type_expr); @@ -1213,8 +1207,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_ if (switch_kind == TypeSwitch_Union) { GB_ASSERT(is_type_union(bt)); bool tag_type_found = false; - for_array(j, bt->Union.variants) { - Type *vt = bt->Union.variants[j]; + for (Type *vt : bt->Union.variants) { if (are_types_identical(vt, y.type)) { tag_type_found = true; break; @@ -1288,8 +1281,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_ auto unhandled = array_make(temporary_allocator(), 0, variants.count); - for_array(i, variants) { - Type *t = variants[i]; + for (Type *t : variants) { if (!type_ptr_set_exists(&seen, t)) { array_add(&unhandled, t); } @@ -1302,8 +1294,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_ gb_string_free(s); } else { error_no_newline(node, "Unhandled switch cases:\n"); - for_array(i, unhandled) { - Type *t = unhandled[i]; + for (Type *t : unhandled) { gbString s = type_to_string(t); error_line("\t%s\n", s); gb_string_free(s); @@ -1340,8 +1331,7 @@ gb_internal void check_block_stmt_for_errors(CheckerContext *ctx, Ast *body) { isize stmt_count = 0; Ast *the_stmt = nullptr; - for_array(i, bs->stmts) { - Ast *stmt = bs->stmts[i]; + for (Ast *stmt : bs->stmts) { GB_ASSERT(stmt != nullptr); switch (stmt->kind) { case_ast_node(es, EmptyStmt, stmt); @@ -1359,8 +1349,7 @@ gb_internal void check_block_stmt_for_errors(CheckerContext *ctx, Ast *body) { if (stmt_count == 1) { if (the_stmt->kind == Ast_ValueDecl) { - for_array(i, the_stmt->ValueDecl.names) { - Ast *name = the_stmt->ValueDecl.names[i]; + for (Ast *name : the_stmt->ValueDecl.names) { if (name->kind != Ast_Ident) { continue; } @@ -1376,8 +1365,8 @@ gb_internal void check_block_stmt_for_errors(CheckerContext *ctx, Ast *body) { gb_internal bool all_operands_valid(Array const &operands) { if (any_errors()) { - for_array(i, operands) { - if (operands[i].type == t_invalid) { + for (Operand const &o : operands) { + if (o.type == t_invalid) { return false; } } @@ -1548,16 +1537,9 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) check_assignment_arguments(ctx, lhs_operands, &rhs_operands, as->rhs); - isize rhs_count = rhs_operands.count; - for_array(i, rhs_operands) { - if (rhs_operands[i].mode == Addressing_Invalid) { - // TODO(bill): Should I ignore invalid parameters? - // rhs_count--; - } - } - auto lhs_to_ignore = array_make(temporary_allocator(), lhs_count); + isize rhs_count = rhs_operands.count; isize max = gb_min(lhs_count, rhs_count); for (isize i = 0; i < max; i++) { if (lhs_to_ignore[i]) { @@ -1856,8 +1838,8 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) break; } - for_array(ti, t->Tuple.variables) { - array_add(&vals, t->Tuple.variables[ti]->type); + for (Entity *e : t->Tuple.variables) { + array_add(&vals, e->type); } if (rs->vals.count > 1 && rs->vals[1] != nullptr && count < 3) { @@ -1976,8 +1958,7 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) } } - for_array(i, entities) { - Entity *e = entities[i]; + for (Entity *e : entities) { DeclInfo *d = decl_info_of_entity(e); GB_ASSERT(d == nullptr); add_entity(ctx, ctx->scope, e->identifier, e); @@ -2091,8 +2072,8 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) error(us->token, "Empty 'using' list"); return; } - for_array(i, us->list) { - Ast *expr = unparen_expr(us->list[i]); + for (Ast *expr : us->list) { + expr = unparen_expr(expr); Entity *e = nullptr; bool is_selector = false; @@ -2132,8 +2113,7 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) check_decl_attributes(&c, fb->attributes, foreign_block_decl_attribute, nullptr); ast_node(block, BlockStmt, fb->body); - for_array(i, block->stmts) { - Ast *decl = block->stmts[i]; + for (Ast *decl : block->stmts) { if (decl->kind == Ast_ValueDecl && decl->ValueDecl.is_mutable) { check_stmt(&c, decl, flags); } @@ -2146,8 +2126,7 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) isize entity_count = 0; isize new_name_count = 0; - for_array(i, vd->names) { - Ast *name = vd->names[i]; + for (Ast *name : vd->names) { Entity *entity = nullptr; if (name->kind != Ast_Ident) { error(name, "A variable declaration must be an identifier"); @@ -2193,8 +2172,7 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) begin_error_block(); error(node, "No new declarations on the left hand side"); bool all_underscore = true; - for_array(i, vd->names) { - Ast *name = vd->names[i]; + for (Ast *name : vd->names) { if (name->kind == Ast_Ident) { if (!is_blank_ident(name)) { all_underscore = false; @@ -2388,8 +2366,7 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) } else { // constant value declaration // NOTE(bill): Check `_` declarations - for_array(i, vd->names) { - Ast *name = vd->names[i]; + for (Ast *name : vd->names) { if (is_blank_ident(name)) { Entity *e = name->Ident.entity; DeclInfo *d = decl_info_of_entity(e); diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index 3e62f678a..304e5ef36 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -240,11 +240,10 @@ gb_internal lbValue lb_equal_proc_for_type(lbModule *m, Type *type) { LLVMValueRef v_switch = LLVMBuildSwitch(p->builder, left_tag.value, block_false->block, cast(unsigned)type->Union.variants.count); - for_array(i, type->Union.variants) { + for (Type *v : type->Union.variants) { lbBlock *case_block = lb_create_block(p, "bcase"); lb_start_block(p, case_block); - Type *v = type->Union.variants[i]; lbValue case_tag = lb_const_union_tag(p->module, type, v); Type *vp = alloc_type_pointer(v); @@ -374,11 +373,10 @@ gb_internal lbValue lb_hasher_proc_for_type(lbModule *m, Type *type) { LLVMValueRef v_switch = LLVMBuildSwitch(p->builder, tag.value, end_block->block, cast(unsigned)type->Union.variants.count); - for_array(i, type->Union.variants) { + for (Type *v : type->Union.variants) { lbBlock *case_block = lb_create_block(p, "bcase"); lb_start_block(p, case_block); - Type *v = type->Union.variants[i]; lbValue case_tag = lb_const_union_tag(p->module, type, v); lbValue variant_hasher = lb_hasher_proc_for_type(m, v); @@ -2235,8 +2233,7 @@ gb_internal void lb_generate_code(lbGenerator *gen) { for (auto const &entry : gen->modules) { lbModule *m = entry.value; - for_array(i, m->info->required_foreign_imports_through_force) { - Entity *e = m->info->required_foreign_imports_through_force[i]; + for (Entity *e : m->info->required_foreign_imports_through_force) { lb_add_foreign_library_path(m, e); } diff --git a/src/llvm_backend_expr.cpp b/src/llvm_backend_expr.cpp index d574caf4c..c28e9fb2b 100644 --- a/src/llvm_backend_expr.cpp +++ b/src/llvm_backend_expr.cpp @@ -61,8 +61,7 @@ gb_internal lbValue lb_emit_logical_binary_expr(lbProcedure *p, TokenKind op, As GB_ASSERT(incoming_values.count > 0); LLVMTypeRef phi_type = nullptr; - for_array(i, incoming_values) { - LLVMValueRef incoming_value = incoming_values[i]; + for (LLVMValueRef incoming_value : incoming_values) { if (!LLVMIsConstant(incoming_value)) { phi_type = LLVMTypeOf(incoming_value); break; @@ -1921,8 +1920,7 @@ gb_internal lbValue lb_emit_conv(lbProcedure *p, lbValue value, Type *t) { } if (is_type_union(dst)) { - for_array(i, dst->Union.variants) { - Type *vt = dst->Union.variants[i]; + for (Type *vt : dst->Union.variants) { if (are_types_identical(vt, src_type)) { lbAddr parent = lb_add_local_generated(p, t, true); lb_emit_store_union_variant(p, parent.addr, value, vt); @@ -3596,8 +3594,7 @@ gb_internal void lb_build_addr_compound_lit_populate(lbProcedure *p, Slice const &temp_data) { - for_array(i, temp_data) { - auto td = temp_data[i]; + for (auto const &td : temp_data) { if (td.value.value != nullptr) { if (td.elem_length > 0) { auto loop_data = lb_loop_start(p, cast(isize)td.elem_length, t_i32); @@ -4129,8 +4126,7 @@ gb_internal lbAddr lb_build_addr_compound_lit(lbProcedure *p, Ast *expr) { lbValue err = lb_dynamic_map_reserve(p, v.addr, 2*cl->elems.count, pos); gb_unused(err); - for_array(field_index, cl->elems) { - Ast *elem = cl->elems[field_index]; + for (Ast *elem : cl->elems) { ast_node(fv, FieldValue, elem); lbValue key = lb_build_expr(p, fv->field); @@ -4304,8 +4300,7 @@ gb_internal lbAddr lb_build_addr_compound_lit(lbProcedure *p, Ast *expr) { lb_addr_store(p, v, lb_const_value(p->module, type, exact_value_compound(expr))); lbValue lower = lb_const_value(p->module, t_int, exact_value_i64(bt->BitSet.lower)); - for_array(i, cl->elems) { - Ast *elem = cl->elems[i]; + for (Ast *elem : cl->elems) { GB_ASSERT(elem->kind != Ast_FieldValue); if (lb_is_elem_const(elem, et)) { @@ -4359,8 +4354,7 @@ gb_internal lbAddr lb_build_addr_compound_lit(lbProcedure *p, Ast *expr) { // TODO(bill): reduce the need for individual `insertelement` if a `shufflevector` // might be a better option - for_array(i, temp_data) { - auto td = temp_data[i]; + for (auto const &td : temp_data) { if (td.value.value != nullptr) { if (td.elem_length > 0) { for (i64 k = 0; k < td.elem_length; k++) { diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp index 2703c511a..c48115079 100644 --- a/src/llvm_backend_stmt.cpp +++ b/src/llvm_backend_stmt.cpp @@ -7,8 +7,7 @@ gb_internal void lb_build_constant_value_decl(lbProcedure *p, AstValueDecl *vd) static i32 global_guid = 0; - for_array(i, vd->names) { - Ast *ident = vd->names[i]; + for (Ast *ident : vd->names) { GB_ASSERT(ident->kind == Ast_Ident); Entity *e = entity_of_node(ident); GB_ASSERT(e != nullptr); @@ -106,8 +105,7 @@ gb_internal void lb_build_constant_value_decl(lbProcedure *p, AstValueDecl *vd) gb_internal void lb_build_stmt_list(lbProcedure *p, Slice const &stmts) { - for_array(i, stmts) { - Ast *stmt = stmts[i]; + for (Ast *stmt : stmts) { switch (stmt->kind) { case_ast_node(vd, ValueDecl, stmt); lb_build_constant_value_decl(p, vd); @@ -118,8 +116,8 @@ gb_internal void lb_build_stmt_list(lbProcedure *p, Slice const &stmts) { case_end; } } - for_array(i, stmts) { - lb_build_stmt(p, stmts[i]); + for (Ast *stmt : stmts) { + lb_build_stmt(p, stmt); } } @@ -129,10 +127,9 @@ gb_internal lbBranchBlocks lb_lookup_branch_blocks(lbProcedure *p, Ast *ident) { GB_ASSERT(ident->kind == Ast_Ident); Entity *e = entity_of_node(ident); GB_ASSERT(e->kind == Entity_Label); - for_array(i, p->branch_blocks) { - lbBranchBlocks *b = &p->branch_blocks[i]; - if (b->label == e->Label.node) { - return *b; + for (lbBranchBlocks const &b : p->branch_blocks) { + if (b.label == e->Label.node) { + return b; } } @@ -153,13 +150,12 @@ gb_internal lbTargetList *lb_push_target_list(lbProcedure *p, Ast *label, lbBloc if (label != nullptr) { // Set label blocks GB_ASSERT(label->kind == Ast_Label); - for_array(i, p->branch_blocks) { - lbBranchBlocks *b = &p->branch_blocks[i]; - GB_ASSERT(b->label != nullptr && label != nullptr); - GB_ASSERT(b->label->kind == Ast_Label); - if (b->label == label) { - b->break_ = break_; - b->continue_ = continue_; + for (lbBranchBlocks &b : p->branch_blocks) { + GB_ASSERT(b.label != nullptr && label != nullptr); + GB_ASSERT(b.label->kind == Ast_Label); + if (b.label == label) { + b.break_ = break_; + b.continue_ = continue_; return tl; } } @@ -1095,8 +1091,7 @@ gb_internal bool lb_switch_stmt_can_be_trivial_jump_table(AstSwitchStmt *ss, boo } ast_node(body, BlockStmt, ss->body); - for_array(i, body->stmts) { - Ast *clause = body->stmts[i]; + for (Ast *clause : body->stmts) { ast_node(cc, CaseClause, clause); if (cc->list.count == 0) { @@ -1104,8 +1099,8 @@ gb_internal bool lb_switch_stmt_can_be_trivial_jump_table(AstSwitchStmt *ss, boo continue; } - for_array(j, cc->list) { - Ast *expr = unparen_expr(cc->list[j]); + for (Ast *expr : cc->list) { + expr = unparen_expr(expr); if (is_ast_range(expr)) { return false; } @@ -1166,8 +1161,7 @@ gb_internal void lb_build_switch_stmt(lbProcedure *p, AstSwitchStmt *ss, Scope * LLVMValueRef switch_instr = nullptr; if (is_trivial) { isize num_cases = 0; - for_array(i, body->stmts) { - Ast *clause = body->stmts[i]; + for (Ast *clause : body->stmts) { ast_node(cc, CaseClause, clause); num_cases += cc->list.count; } @@ -1204,8 +1198,8 @@ gb_internal void lb_build_switch_stmt(lbProcedure *p, AstSwitchStmt *ss, Scope * } lbBlock *next_cond = nullptr; - for_array(j, cc->list) { - Ast *expr = unparen_expr(cc->list[j]); + for (Ast *expr : cc->list) { + expr = unparen_expr(expr); if (switch_instr != nullptr) { lbValue on_val = {}; @@ -1384,8 +1378,7 @@ gb_internal void lb_build_type_switch_stmt(lbProcedure *p, AstTypeSwitchStmt *ss lbBlock *default_block = nullptr; isize num_cases = 0; - for_array(i, body->stmts) { - Ast *clause = body->stmts[i]; + for (Ast *clause : body->stmts) { ast_node(cc, CaseClause, clause); num_cases += cc->list.count; if (cc->list.count == 0) { @@ -1405,8 +1398,7 @@ gb_internal void lb_build_type_switch_stmt(lbProcedure *p, AstTypeSwitchStmt *ss switch_instr = LLVMBuildSwitch(p->builder, tag.value, else_block->block, cast(unsigned)num_cases); } - for_array(i, body->stmts) { - Ast *clause = body->stmts[i]; + for (Ast *clause : body->stmts) { ast_node(cc, CaseClause, clause); lb_open_scope(p, cc->scope); if (cc->list.count == 0) { @@ -1420,9 +1412,8 @@ gb_internal void lb_build_type_switch_stmt(lbProcedure *p, AstTypeSwitchStmt *ss if (p->debug_info != nullptr) { LLVMSetCurrentDebugLocation2(p->builder, lb_debug_location_from_ast(p, clause)); } - Type *case_type = nullptr; - for_array(type_index, cc->list) { - case_type = type_of_expr(cc->list[type_index]); + for (Ast *type_expr : cc->list) { + Type *case_type = type_of_expr(type_expr); lbValue on_val = {}; if (switch_kind == TypeSwitch_Union) { Type *ut = base_type(type_deref(parent.type)); @@ -1538,8 +1529,8 @@ gb_internal void lb_append_tuple_values(lbProcedure *p, Array *dst_valu if (t->kind == Type_Tuple) { lbTupleFix *tf = map_get(&p->tuple_fix_map, src_value.value); if (tf) { - for_array(j, tf->values) { - array_add(dst_values, tf->values[j]); + for (lbValue const &value : tf->values) { + array_add(dst_values, value); } } else { for_array(i, t->Tuple.variables) { @@ -1560,8 +1551,7 @@ gb_internal void lb_build_assignment(lbProcedure *p, Array &lvals, Slice auto inits = array_make(permanent_allocator(), 0, lvals.count); - for_array(i, values) { - Ast *rhs = values[i]; + for (Ast *rhs : values) { lbValue init = lb_build_expr(p, rhs); lb_append_tuple_values(p, &inits, init); } @@ -1971,8 +1961,7 @@ gb_internal void lb_build_assign_stmt_array(lbProcedure *p, TokenKind op, lbAddr auto indices_handled = slice_make(temporary_allocator(), bt->Array.count); auto indices = slice_make(temporary_allocator(), bt->Array.count); i32 index_count = 0; - for_array(i, lhs.swizzle_large.indices) { - i32 index = lhs.swizzle_large.indices[i]; + for (i32 index : lhs.swizzle_large.indices) { if (indices_handled[index]) { continue; } @@ -2049,8 +2038,7 @@ gb_internal void lb_build_assign_stmt(lbProcedure *p, AstAssignStmt *as) { if (as->op.kind == Token_Eq) { auto lvals = array_make(permanent_allocator(), 0, as->lhs.count); - for_array(i, as->lhs) { - Ast *lhs = as->lhs[i]; + for (Ast *lhs : as->lhs) { lbAddr lval = {}; if (!is_blank_ident(lhs)) { lval = lb_build_addr(p, lhs); @@ -2185,8 +2173,7 @@ gb_internal void lb_build_stmt(lbProcedure *p, Ast *node) { bool is_static = false; if (vd->names.count > 0) { - for_array(i, vd->names) { - Ast *name = vd->names[i]; + for (Ast *name : vd->names) { if (!is_blank_ident(name)) { GB_ASSERT(name->kind == Ast_Ident); Entity *e = entity_of_node(name); @@ -2208,8 +2195,7 @@ gb_internal void lb_build_stmt(lbProcedure *p, Ast *node) { auto lvals = array_make(permanent_allocator(), 0, vd->names.count); - for_array(i, vd->names) { - Ast *name = vd->names[i]; + for (Ast *name : vd->names) { lbAddr lval = {}; if (!is_blank_ident(name)) { Entity *e = entity_of_node(name); -- cgit v1.2.3 From c7a704d345e9bda38da18807a1d7cd5bc5accc17 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 15:26:47 +0000 Subject: Use `RwMutex` for the `Scope` --- src/check_decl.cpp | 12 ++++--- src/check_expr.cpp | 4 ++- src/check_stmt.cpp | 5 ++- src/checker.cpp | 21 +++++++----- src/checker.hpp | 2 +- src/thread_pool.cpp | 27 ++++++++------- src/threading.cpp | 96 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 138 insertions(+), 29 deletions(-) (limited to 'src/check_stmt.cpp') diff --git a/src/check_decl.cpp b/src/check_decl.cpp index 66f16546c..4afde6e51 100644 --- a/src/check_decl.cpp +++ b/src/check_decl.cpp @@ -381,8 +381,8 @@ gb_internal void override_entity_in_scope(Entity *original_entity, Entity *new_e if (found_scope == nullptr) { return; } - mutex_lock(&found_scope->mutex); - defer (mutex_unlock(&found_scope->mutex)); + rw_mutex_lock(&found_scope->mutex); + defer (rw_mutex_unlock(&found_scope->mutex)); // IMPORTANT NOTE(bill, 2021-04-10): Overriding behaviour was flawed in that the // original entity was still used check checked, but the checking was only @@ -1478,7 +1478,8 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de if (t->kind == Type_Struct) { Scope *scope = t->Struct.scope; GB_ASSERT(scope != nullptr); - MUTEX_GUARD_BLOCK(scope->mutex) for (auto const &entry : scope->elements) { + rw_mutex_lock(&scope->mutex); + for (auto const &entry : scope->elements) { Entity *f = entry.value; if (f->kind == Entity_Variable) { Entity *uvar = alloc_entity_using_variable(e, f->token, f->type, nullptr); @@ -1488,6 +1489,7 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de array_add(&using_entities, puv); } } + rw_mutex_unlock(&scope->mutex); } else { error(e->token, "'using' can only be applied to variables of type struct"); break; @@ -1496,7 +1498,8 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de } } - MUTEX_GUARD_BLOCK(ctx->scope->mutex) for (auto const &entry : using_entities) { + rw_mutex_lock(&ctx->scope->mutex); + for (auto const &entry : using_entities) { Entity *e = entry.e; Entity *uvar = entry.uvar; Entity *prev = scope_insert_no_mutex(ctx->scope, uvar); @@ -1506,6 +1509,7 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de break; } } + rw_mutex_unlock(&ctx->scope->mutex); bool where_clause_ok = evaluate_where_clauses(ctx, nullptr, decl->scope, &decl->proc_lit->ProcLit.where_clauses, !decl->where_clauses_evaluated); diff --git a/src/check_expr.cpp b/src/check_expr.cpp index c1787e7b6..d9ab328cb 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -236,10 +236,12 @@ gb_internal void check_did_you_mean_scope(String const &name, Scope *scope, char DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), scope->elements.entries.count, name); defer (did_you_mean_destroy(&d)); - MUTEX_GUARD_BLOCK(&scope->mutex) for (auto const &entry : scope->elements) { + rw_mutex_shared_lock(&scope->mutex); + for (auto const &entry : scope->elements) { Entity *e = entry.value; did_you_mean_append(&d, e->token.string); } + rw_mutex_shared_unlock(&scope->mutex); check_did_you_mean_print(&d, prefix); } diff --git a/src/check_stmt.cpp b/src/check_stmt.cpp index e075297a4..6e84d0789 100644 --- a/src/check_stmt.cpp +++ b/src/check_stmt.cpp @@ -622,7 +622,10 @@ gb_internal bool check_using_stmt_entity(CheckerContext *ctx, AstUsingStmt *us, case Entity_ImportName: { Scope *scope = e->ImportName.scope; - MUTEX_GUARD_BLOCK(scope->mutex) for (auto const &entry : scope->elements) { + rw_mutex_lock(&scope->mutex); + defer (rw_mutex_unlock(&scope->mutex)); + + for (auto const &entry : scope->elements) { String name = entry.key.string; Entity *decl = entry.value; if (!is_entity_exported(decl)) continue; diff --git a/src/checker.cpp b/src/checker.cpp index 0075fa543..1d536074d 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -51,10 +51,11 @@ gb_internal bool check_rtti_type_disallowed(Ast *expr, Type *type, char const *f gb_internal void scope_reset(Scope *scope) { if (scope == nullptr) return; - MUTEX_GUARD(&scope->mutex); + rw_mutex_lock(&scope->mutex); scope->head_child.store(nullptr, std::memory_order_relaxed); string_map_clear(&scope->elements); ptr_set_clear(&scope->imported); + rw_mutex_unlock(&scope->mutex); } gb_internal void scope_reserve(Scope *scope, isize capacity) { @@ -180,9 +181,9 @@ gb_internal void init_decl_info(DeclInfo *d, Scope *scope, DeclInfo *parent) { gb_zero_item(d); d->parent = parent; d->scope = scope; - ptr_set_init(&d->deps); - ptr_set_init(&d->type_info_deps); - array_init (&d->labels, heap_allocator()); + ptr_set_init(&d->deps, 0); + ptr_set_init(&d->type_info_deps, 0); + d->labels.allocator = heap_allocator(); } gb_internal DeclInfo *make_decl_info(Scope *scope, DeclInfo *parent) { @@ -394,9 +395,9 @@ gb_internal void scope_lookup_parent(Scope *scope, String const &name, Scope **s StringHashKey key = string_hash_string(name); for (Scope *s = scope; s != nullptr; s = s->parent) { Entity **found = nullptr; - mutex_lock(&s->mutex); + rw_mutex_shared_lock(&s->mutex); found = string_map_get(&s->elements, key); - mutex_unlock(&s->mutex); + rw_mutex_shared_unlock(&s->mutex); if (found) { Entity *e = *found; if (gone_thru_proc) { @@ -482,7 +483,7 @@ gb_internal Entity *scope_insert_with_name(Scope *s, String const &name, Entity Entity **found = nullptr; Entity *result = nullptr; - MUTEX_GUARD(&s->mutex); + rw_mutex_lock(&s->mutex); found = string_map_get(&s->elements, key); @@ -509,6 +510,8 @@ gb_internal Entity *scope_insert_with_name(Scope *s, String const &name, Entity entity->scope = s; } end:; + rw_mutex_unlock(&s->mutex); + return result; } @@ -669,7 +672,8 @@ gb_internal void check_scope_usage(Checker *c, Scope *scope) { Array vetted_entities = {}; array_init(&vetted_entities, heap_allocator()); - MUTEX_GUARD_BLOCK(scope->mutex) for (auto const &entry : scope->elements) { + rw_mutex_shared_lock(&scope->mutex); + for (auto const &entry : scope->elements) { Entity *e = entry.value; if (e == nullptr) continue; VettedEntity ve_unused = {}; @@ -686,6 +690,7 @@ gb_internal void check_scope_usage(Checker *c, Scope *scope) { array_add(&vetted_entities, ve_shadowed); } } + rw_mutex_shared_unlock(&scope->mutex); gb_sort(vetted_entities.data, vetted_entities.count, gb_size_of(VettedEntity), vetted_entity_variable_pos_cmp); diff --git a/src/checker.hpp b/src/checker.hpp index cc92fce28..53052d5cd 100644 --- a/src/checker.hpp +++ b/src/checker.hpp @@ -224,7 +224,7 @@ struct Scope { std::atomic next; std::atomic head_child; - BlockingMutex mutex; + RwMutex mutex; StringMap elements; PtrSet imported; diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp index 276e93dff..2c369eaad 100644 --- a/src/thread_pool.cpp +++ b/src/thread_pool.cpp @@ -47,7 +47,7 @@ gb_internal void thread_pool_destroy(ThreadPool *pool) { for_array_off(i, 1, pool->threads) { Thread *t = &pool->threads[i]; - pool->tasks_available.fetch_add(1, std::memory_order_release); + pool->tasks_available.fetch_add(1, std::memory_order_relaxed); futex_broadcast(&pool->tasks_available); thread_join_and_destroy(t); } @@ -74,7 +74,7 @@ void thread_pool_queue_push(Thread *thread, WorkerTask task) { } while (!thread->head_and_tail.compare_exchange_weak(capture, new_capture)); thread->pool->tasks_left.fetch_add(1, std::memory_order_release); - thread->pool->tasks_available.fetch_add(1, std::memory_order_release); + thread->pool->tasks_available.fetch_add(1, std::memory_order_relaxed); futex_broadcast(&thread->pool->tasks_available); } @@ -82,7 +82,7 @@ bool thread_pool_queue_pop(Thread *thread, WorkerTask *task) { u64 capture; u64 new_capture; do { - capture = thread->head_and_tail.load(); + capture = thread->head_and_tail.load(std::memory_order_acquire); u64 mask = thread->capacity - 1; u64 head = (capture >> 32) & mask; @@ -97,7 +97,7 @@ bool thread_pool_queue_pop(Thread *thread, WorkerTask *task) { *task = thread->queue[tail]; new_capture = (head << 32) | new_tail; - } while (!thread->head_and_tail.compare_exchange_weak(capture, new_capture)); + } while (!thread->head_and_tail.compare_exchange_weak(capture, new_capture, std::memory_order_release)); return true; } @@ -168,22 +168,21 @@ gb_internal THREAD_PROC(thread_pool_thread_proc) { Thread *thread = &pool->threads.data[idx]; WorkerTask task; - if (!thread_pool_queue_pop(thread, &task)) { - continue; - } - task.do_work(task.data); - pool->tasks_left.fetch_sub(1, std::memory_order_release); + if (thread_pool_queue_pop(thread, &task)) { + task.do_work(task.data); + pool->tasks_left.fetch_sub(1, std::memory_order_release); - if (pool->tasks_left.load(std::memory_order_acquire) == 0) { - futex_signal(&pool->tasks_left); - } + if (pool->tasks_left.load(std::memory_order_acquire) == 0) { + futex_signal(&pool->tasks_left); + } - goto main_loop_continue; + goto main_loop_continue; + } } } // if we've done all our work, and there's nothing to steal, go to sleep - state = pool->tasks_available.load(); + state = pool->tasks_available.load(std::memory_order_acquire); futex_wait(&pool->tasks_available, state); main_loop_continue:; diff --git a/src/threading.cpp b/src/threading.cpp index 78943150e..27a17112e 100644 --- a/src/threading.cpp +++ b/src/threading.cpp @@ -8,10 +8,12 @@ struct BlockingMutex; struct RecursiveMutex; +struct RwMutex; struct Semaphore; struct Condition; struct Thread; struct ThreadPool; +struct Parker; #define THREAD_PROC(name) isize name(struct Thread *thread) gb_internal THREAD_PROC(thread_pool_thread_proc); @@ -56,6 +58,13 @@ gb_internal void mutex_lock (RecursiveMutex *m); gb_internal bool mutex_try_lock(RecursiveMutex *m); gb_internal void mutex_unlock (RecursiveMutex *m); +gb_internal void rw_mutex_lock (RwMutex *m); +gb_internal bool rw_mutex_try_lock (RwMutex *m); +gb_internal void rw_mutex_unlock (RwMutex *m); +gb_internal void rw_mutex_shared_lock (RwMutex *m); +gb_internal bool rw_mutex_try_shared_lock(RwMutex *m); +gb_internal void rw_mutex_shared_unlock (RwMutex *m); + gb_internal void semaphore_post (Semaphore *s, i32 count); gb_internal void semaphore_wait (Semaphore *s); gb_internal void semaphore_release(Semaphore *s) { semaphore_post(s, 1); } @@ -65,6 +74,10 @@ gb_internal void condition_broadcast(Condition *c); gb_internal void condition_signal(Condition *c); gb_internal void condition_wait(Condition *c, BlockingMutex *m); +gb_internal void park(Parker *p); +gb_internal void unpark_one(Parker *p); +gb_internal void unpark_all(Parker *p); + gb_internal u32 thread_current_id(void); gb_internal void thread_init (ThreadPool *pool, Thread *t, isize idx); @@ -205,6 +218,30 @@ gb_internal void semaphore_wait(Semaphore *s) { gb_internal void condition_wait(Condition *c, BlockingMutex *m) { SleepConditionVariableSRW(&c->cond, &m->srwlock, INFINITE, 0); } + + struct RwMutex { + SRWLOCK srwlock; + }; + + gb_internal void rw_mutex_lock(RwMutex *m) { + AcquireSRWLockExclusive(&m->srwlock); + } + gb_internal bool rw_mutex_try_lock(RwMutex *m) { + return !!TryAcquireSRWLockExclusive(&m->srwlock); + } + gb_internal void rw_mutex_unlock(RwMutex *m) { + ReleaseSRWLockExclusive(&m->srwlock); + } + + gb_internal void rw_mutex_shared_lock(RwMutex *m) { + AcquireSRWLockShared(&m->srwlock); + } + gb_internal bool rw_mutex_try_shared_lock(RwMutex *m) { + return !!TryAcquireSRWLockShared(&m->srwlock); + } + gb_internal void rw_mutex_shared_unlock(RwMutex *m) { + ReleaseSRWLockShared(&m->srwlock); + } #else enum Internal_Mutex_State : i32 { Internal_Mutex_State_Unlocked = 0, @@ -306,8 +343,67 @@ gb_internal void semaphore_wait(Semaphore *s) { futex_wait(&c->state(), state); mutex_lock(m); } + + struct RwMutex { + // TODO(bill): make this a proper RW mutex + BlockingMutex mutex; + }; + + gb_internal void rw_mutex_lock(RwMutex *m) { + mutex_lock(&m->mutex); + } + gb_internal bool rw_mutex_try_lock(RwMutex *m) { + return mutex_try_lock(&m->mutex); + } + gb_internal void rw_mutex_unlock(RwMutex *m) { + mutex_unlock(&m->mutex); + } + + gb_internal void rw_mutex_shared_lock(RwMutex *m) { + mutex_lock(&m->mutex); + } + gb_internal bool rw_mutex_try_shared_lock(RwMutex *m) { + return mutex_try_lock(&m->mutex); + } + gb_internal void rw_mutex_shared_unlock(RwMutex *m) { + mutex_unlock(&m->mutex); + } #endif +struct Parker { + Futex state; +}; +enum ParkerState : u32 { + ParkerState_Empty = 0, + ParkerState_Notified = 1, + ParkerState_Parked = UINT32_MAX, +}; + +gb_internal void park(Parker *p) { + if (p->state.fetch_sub(1, std::memory_order_acquire) == ParkerState_Notified) { + return; + } + for (;;) { + futex_wait(&p->state, ParkerState_Parked); + i32 notified = ParkerState_Empty; + if (p->state.compare_exchange_strong(notified, ParkerState_Empty, std::memory_order_acquire, std::memory_order_acquire)) { + return; + } + } +} + +gb_internal void unpark_one(Parker *p) { + if (p->state.exchange(ParkerState_Notified, std::memory_order_release) == ParkerState_Parked) { + futex_signal(&p->state); + } +} + +gb_internal void unpark_all(Parker *p) { + if (p->state.exchange(ParkerState_Notified, std::memory_order_release) == ParkerState_Parked) { + futex_broadcast(&p->state); + } +} + gb_internal u32 thread_current_id(void) { u32 thread_id; -- cgit v1.2.3 From 855ebceadcc4612a6451f268ab6d6693838ed5f4 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 17:26:05 +0000 Subject: Minimize `add_type_info_type` usage --- src/check_builtin.cpp | 2 +- src/check_expr.cpp | 8 ++++---- src/check_stmt.cpp | 8 +++++--- 3 files changed, 10 insertions(+), 8 deletions(-) (limited to 'src/check_stmt.cpp') diff --git a/src/check_builtin.cpp b/src/check_builtin.cpp index 7c5521dde..606283c32 100644 --- a/src/check_builtin.cpp +++ b/src/check_builtin.cpp @@ -3573,7 +3573,7 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As Entity *base_type_entity = alloc_entity_type_name(scope, token, elem, EntityState_Resolved); add_entity(c, scope, nullptr, base_type_entity); - add_type_info_type(c, soa_struct); + // add_type_info_type(c, soa_struct); operand->type = soa_struct; break; diff --git a/src/check_expr.cpp b/src/check_expr.cpp index 746a29ce0..5f28504a2 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -8779,8 +8779,8 @@ gb_internal ExprKind check_type_assertion(CheckerContext *c, Operand *o, Ast *no return kind; } - add_type_info_type(c, o->type); - add_type_info_type(c, bsrc->Union.variants[0]); + // add_type_info_type(c, o->type); + // add_type_info_type(c, bsrc->Union.variants[0]); o->type = bsrc->Union.variants[0]; o->mode = Addressing_OptionalOk; @@ -8812,8 +8812,8 @@ gb_internal ExprKind check_type_assertion(CheckerContext *c, Operand *o, Ast *no return kind; } - add_type_info_type(c, o->type); - add_type_info_type(c, t); + // add_type_info_type(c, o->type); + // add_type_info_type(c, t); o->type = t; o->mode = Addressing_OptionalOk; diff --git a/src/check_stmt.cpp b/src/check_stmt.cpp index 6e84d0789..9547035d0 100644 --- a/src/check_stmt.cpp +++ b/src/check_stmt.cpp @@ -1132,7 +1132,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_ check_expr(ctx, &x, rhs); check_assignment(ctx, &x, nullptr, str_lit("type switch expression")); - add_type_info_type(ctx, x.type); + // add_type_info_type(ctx, x.type); TypeSwitchKind switch_kind = check_valid_type_switch_type(x.type); if (switch_kind == TypeSwitch_Invalid) { @@ -1223,7 +1223,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_ continue; } case_type = y.type; - add_type_info_type(ctx, y.type); + // add_type_info_type(ctx, y.type); } else if (switch_kind == TypeSwitch_Any) { case_type = y.type; add_type_info_type(ctx, y.type); @@ -1259,7 +1259,9 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_ if (case_type == nullptr) { case_type = x.type; } - add_type_info_type(ctx, case_type); + if (switch_kind == TypeSwitch_Any) { + add_type_info_type(ctx, case_type); + } check_open_scope(ctx, stmt); { -- cgit v1.2.3 From d06a0e7093c3f06a474a040385f1b9dfdfce29ad Mon Sep 17 00:00:00 2001 From: gingerBill Date: Wed, 4 Jan 2023 13:30:27 +0000 Subject: Improve the `PtrSet` to be as simple and small as possible --- src/check_stmt.cpp | 1 + src/checker.cpp | 33 +++--- src/ptr_map.cpp | 2 +- src/ptr_set.cpp | 303 +++++++++++++++++++++++------------------------------ src/threading.cpp | 20 ++-- 5 files changed, 157 insertions(+), 202 deletions(-) (limited to 'src/check_stmt.cpp') diff --git a/src/check_stmt.cpp b/src/check_stmt.cpp index 9547035d0..b4dd4cd7d 100644 --- a/src/check_stmt.cpp +++ b/src/check_stmt.cpp @@ -1289,6 +1289,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_ for (Type *t : variants) { if (!type_ptr_set_exists(&seen, t)) { array_add(&unhandled, t); + gb_printf_err("HERE: %p %s\n", t, type_to_string(t)); } } diff --git a/src/checker.cpp b/src/checker.cpp index 78f96e47f..b8709f15e 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -66,15 +66,10 @@ gb_internal void scope_reserve(Scope *scope, isize capacity) { } gb_internal void entity_graph_node_set_destroy(EntityGraphNodeSet *s) { - if (s->hashes.data != nullptr) { - ptr_set_destroy(s); - } + ptr_set_destroy(s); } gb_internal void entity_graph_node_set_add(EntityGraphNodeSet *s, EntityGraphNode *n) { - if (s->hashes.data == nullptr) { - ptr_set_init(s); - } ptr_set_add(s, n); } @@ -2556,7 +2551,6 @@ gb_internal Array generate_entity_dependency_graph(CheckerInf } // IMPORTANT NOTE/TODO(bill, 2020-11-15): These three calls take the majority of the // the time to process - entity_graph_node_set_add(&p->succ, s); entity_graph_node_set_add(&s->pred, p); // Remove edge to 'n' @@ -2577,7 +2571,7 @@ gb_internal Array generate_entity_dependency_graph(CheckerInf for_array(i, G) { EntityGraphNode *n = G[i]; n->index = i; - n->dep_count = n->succ.entries.count; + n->dep_count = n->succ.count; GB_ASSERT(n->dep_count >= 0); } @@ -4228,7 +4222,7 @@ gb_internal Array generate_import_dependency_graph(Checker *c for (auto const &entry : M) { auto n = entry.value; n->index = i++; - n->dep_count = n->succ.entries.count; + n->dep_count = n->succ.count; GB_ASSERT(n->dep_count >= 0); array_add(&G, n); } @@ -5706,17 +5700,6 @@ gb_internal void check_parsed_files(Checker *c) { check_scope_usage(c, f->scope); } - TIME_SECTION("add untyped expression values"); - // Add untyped expression values - for (UntypedExprInfo u = {}; mpmc_dequeue(&c->global_untyped_queue, &u); /**/) { - GB_ASSERT(u.expr != nullptr && u.info != nullptr); - if (is_type_typed(u.info->type)) { - compiler_error("%s (type %s) is typed!", expr_to_string(u.expr), type_to_string(u.info->type)); - } - add_type_and_value(&c->builtin_ctx, u.expr, u.info->mode, u.info->type, u.info->value); - } - - TIME_SECTION("add basic type information"); // Add "Basic" type information for (isize i = 0; i < Basic_COUNT; i++) { @@ -5810,6 +5793,16 @@ gb_internal void check_parsed_files(Checker *c) { GB_ASSERT(c->info.entity_queue.count.load(std::memory_order_relaxed) == 0); GB_ASSERT(c->info.definition_queue.count.load(std::memory_order_relaxed) == 0); + TIME_SECTION("add untyped expression values"); + // Add untyped expression values + for (UntypedExprInfo u = {}; mpmc_dequeue(&c->global_untyped_queue, &u); /**/) { + GB_ASSERT(u.expr != nullptr && u.info != nullptr); + if (is_type_typed(u.info->type)) { + compiler_error("%s (type %s) is typed!", expr_to_string(u.expr), type_to_string(u.info->type)); + } + add_type_and_value(&c->builtin_ctx, u.expr, u.info->mode, u.info->type, u.info->value); + } + TIME_SECTION("sort init procedures"); check_sort_init_procedures(c); diff --git a/src/ptr_map.cpp b/src/ptr_map.cpp index ae3cd4b40..8869bf3fe 100644 --- a/src/ptr_map.cpp +++ b/src/ptr_map.cpp @@ -41,7 +41,7 @@ gb_internal gb_inline u32 ptr_map_hash_key(uintptr key) { u32 word = ((state >> ((state >> 28u) + 4u)) ^ state) * 277803737u; res = (word >> 22u) ^ word; #endif - return res ^ (res == MAP_SENTINEL); + return res; } gb_internal gb_inline u32 ptr_map_hash_key(void const *key) { return ptr_map_hash_key((uintptr)key); diff --git a/src/ptr_set.cpp b/src/ptr_set.cpp index e2b3f2372..8be2b0524 100644 --- a/src/ptr_set.cpp +++ b/src/ptr_set.cpp @@ -1,19 +1,22 @@ template -struct PtrSetEntry { - static_assert(sizeof(T) == sizeof(void *), "Key size must be pointer size"); - - T ptr; - MapIndex next; +struct TypeIsPointer { + enum {value = false}; +}; - operator T() const noexcept { - return this->ptr; - } +template +struct TypeIsPointer { + enum {value = true}; }; + template struct PtrSet { - Slice hashes; - Array> entries; + static_assert(TypeIsPointer::value, "PtrSet::T must be a pointer"); + static constexpr T TOMBSTONE = (T)(~uintptr(0)); + + T * keys; + usize count; + usize capacity; }; template gb_internal void ptr_set_init (PtrSet *s, isize capacity = 16); @@ -30,225 +33,183 @@ gb_internal gbAllocator ptr_set_allocator(void) { template gb_internal void ptr_set_init(PtrSet *s, isize capacity) { + GB_ASSERT(s->keys == nullptr); if (capacity != 0) { capacity = next_pow2_isize(gb_max(16, capacity)); + s->keys = gb_alloc_array(ptr_set_allocator(), T, capacity); + // This memory will be zeroed, no need to explicitly zero it } - - slice_init(&s->hashes, ptr_set_allocator(), capacity); - array_init(&s->entries, ptr_set_allocator(), 0, capacity); - for (isize i = 0; i < capacity; i++) { - s->hashes.data[i] = MAP_SENTINEL; - } + s->count = 0; + s->capacity = capacity; } template gb_internal void ptr_set_destroy(PtrSet *s) { - if (s->entries.allocator.proc == nullptr) { - s->entries.allocator = ptr_set_allocator(); - } - slice_free(&s->hashes, s->entries.allocator); - array_free(&s->entries); + gb_free(ptr_set_allocator(), s->keys); + s->keys = nullptr; + s->count = 0; + s->capacity = 0; } template -gb_internal MapIndex ptr_set__add_entry(PtrSet *s, T ptr) { - PtrSetEntry e = {}; - e.ptr = ptr; - e.next = MAP_SENTINEL; - array_add(&s->entries, e); - return cast(MapIndex)(s->entries.count-1); -} - - -template -gb_internal MapFindResult ptr_set__find(PtrSet *s, T ptr) { - MapFindResult fr = {MAP_SENTINEL, MAP_SENTINEL, MAP_SENTINEL}; - if (s->hashes.count != 0) { - u32 hash = ptr_map_hash_key(ptr); - fr.hash_index = cast(MapIndex)(hash & (s->hashes.count-1)); - fr.entry_index = s->hashes.data[fr.hash_index]; - while (fr.entry_index != MAP_SENTINEL) { - if (s->entries.data[fr.entry_index].ptr == ptr) { - return fr; +gb_internal isize ptr_set__find(PtrSet *s, T ptr) { + GB_ASSERT(ptr != nullptr); + if (s->count != 0) { + #if 0 + for (usize i = 0; i < s->capacity; i++) { + if (s->keys[i] == ptr) { + return i; } - fr.entry_prev = fr.entry_index; - fr.entry_index = s->entries.data[fr.entry_index].next; } - } - return fr; -} - -template -gb_internal MapFindResult ptr_set__find_from_entry(PtrSet *s, PtrSetEntry *e) { - MapFindResult fr = {MAP_SENTINEL, MAP_SENTINEL, MAP_SENTINEL}; - if (s->hashes.count != 0) { - u32 hash = ptr_map_hash_key(e->ptr); - fr.hash_index = cast(MapIndex)(hash & (s->hashes.count-1)); - fr.entry_index = s->hashes.data[fr.hash_index]; - while (fr.entry_index != MAP_SENTINEL) { - if (&s->entries.data[fr.entry_index] == e) { - return fr; + #else + u32 hash = ptr_map_hash_key(ptr); + usize mask = s->capacity-1; + usize hash_index = cast(usize)hash & mask; + for (usize i = 0; i < s->capacity; i++) { + T key = s->keys[hash_index]; + if (key == ptr) { + return hash_index; + } else if (key == nullptr) { + return -1; } - fr.entry_prev = fr.entry_index; - fr.entry_index = s->entries.data[fr.entry_index].next; + hash_index = (hash_index+1)&mask; } + #endif } - return fr; + return -1; } template gb_internal bool ptr_set__full(PtrSet *s) { - return 0.75f * s->hashes.count <= s->entries.count; + return 0.75f * s->capacity <= s->count; } template -gb_internal void ptr_set_reset_entries(PtrSet *s) { - for (isize i = 0; i < s->hashes.count; i++) { - s->hashes.data[i] = MAP_SENTINEL; - } - for (isize i = 0; i < s->entries.count; i++) { - MapFindResult fr; - PtrSetEntry *e = &s->entries.data[i]; - e->next = MAP_SENTINEL; - fr = ptr_set__find_from_entry(s, e); - if (fr.entry_prev == MAP_SENTINEL) { - s->hashes[fr.hash_index] = cast(MapIndex)i; - } else { - s->entries[fr.entry_prev].next = cast(MapIndex)i; - } +gb_internal gb_inline void ptr_set_grow(PtrSet *old_set) { + if (old_set->capacity == 0) { + ptr_set_init(old_set); + return; } -} -template -gb_internal void ptr_set_reserve(PtrSet *s, isize cap) { - if (s->entries.allocator.proc == nullptr) { - s->entries.allocator = ptr_set_allocator(); - } - array_reserve(&s->entries, cap); - if (s->entries.count*2 < s->hashes.count) { - return; + PtrSet new_set = {}; + ptr_set_init(&new_set, gb_max(old_set->capacity<<1, 16)); + + for (T ptr : *old_set) { + bool was_new = ptr_set_update(&new_set, ptr); + GB_ASSERT(!was_new); } - slice_resize(&s->hashes, s->entries.allocator, cap*2); - ptr_set_reset_entries(s); -} + GB_ASSERT(old_set->count == new_set.count); -template -gb_internal gb_inline void ptr_set_grow(PtrSet *s) { - isize new_count = gb_max(s->hashes.count<<1, 16); - ptr_set_reserve(s, new_count); + ptr_set_destroy(old_set); + + *old_set = new_set; } template gb_internal gb_inline bool ptr_set_exists(PtrSet *s, T ptr) { - isize index = ptr_set__find(s, ptr).entry_index; - return index != MAP_SENTINEL; + return ptr_set__find(s, ptr) >= 0; } -// Returns true if it already exists -template -gb_internal T ptr_set_add(PtrSet *s, T ptr) { - MapIndex index; - MapFindResult fr; - if (s->hashes.count == 0) { - ptr_set_grow(s); - } - fr = ptr_set__find(s, ptr); - if (fr.entry_index == MAP_SENTINEL) { - index = ptr_set__add_entry(s, ptr); - if (fr.entry_prev != MAP_SENTINEL) { - s->entries.data[fr.entry_prev].next = index; - } else { - s->hashes.data[fr.hash_index] = index; - } - } - if (ptr_set__full(s)) { - ptr_set_grow(s); - } - return ptr; -} template gb_internal bool ptr_set_update(PtrSet *s, T ptr) { // returns true if it previously existsed - bool exists = false; - MapIndex index; - MapFindResult fr; - if (s->hashes.count == 0) { + if (ptr_set_exists(s, ptr)) { + return true; + } + + if (s->keys == nullptr) { + ptr_set_init(s); + } else if (ptr_set__full(s)) { ptr_set_grow(s); } - fr = ptr_set__find(s, ptr); - if (fr.entry_index != MAP_SENTINEL) { - exists = true; - } else { - index = ptr_set__add_entry(s, ptr); - if (fr.entry_prev != MAP_SENTINEL) { - s->entries.data[fr.entry_prev].next = index; - } else { - s->hashes.data[fr.hash_index] = index; + GB_ASSERT(s->count < s->capacity); + GB_ASSERT(s->capacity >= 0); + + usize mask = s->capacity-1; + u32 hash = ptr_map_hash_key(ptr); + usize hash_index = (cast(usize)hash) & mask; + GB_ASSERT(hash_index < s->capacity); + for (usize i = 0; i < s->capacity; i++) { + T *key = &s->keys[hash_index]; + GB_ASSERT(*key != ptr); + if (*key == PtrSet::TOMBSTONE || *key == nullptr) { + *key = ptr; + s->count++; + return false; } + hash_index = (hash_index+1)&mask; } - if (ptr_set__full(s)) { - ptr_set_grow(s); - } - return exists; -} - + GB_PANIC("ptr set out of memory"); + return false; +} template -gb_internal void ptr_set__erase(PtrSet *s, MapFindResult fr) { - MapFindResult last; - if (fr.entry_prev == MAP_SENTINEL) { - s->hashes.data[fr.hash_index] = s->entries.data[fr.entry_index].next; - } else { - s->entries.data[fr.entry_prev].next = s->entries.data[fr.entry_index].next; - } - if (cast(isize)fr.entry_index == s->entries.count-1) { - array_pop(&s->entries); - return; - } - s->entries.data[fr.entry_index] = s->entries.data[s->entries.count-1]; - last = ptr_set__find(s, s->entries.data[fr.entry_index].ptr); - if (last.entry_prev != MAP_SENTINEL) { - s->entries.data[last.entry_prev].next = fr.entry_index; - } else { - s->hashes.data[last.hash_index] = fr.entry_index; - } +gb_internal T ptr_set_add(PtrSet *s, T ptr) { + ptr_set_update(s, ptr); + return ptr; } + template gb_internal void ptr_set_remove(PtrSet *s, T ptr) { - MapFindResult fr = ptr_set__find(s, ptr); - if (fr.entry_index != MAP_SENTINEL) { - ptr_set__erase(s, fr); + isize index = ptr_set__find(s, ptr); + if (index >= 0) { + GB_ASSERT(s->count > 0); + s->keys[index] = PtrSet::TOMBSTONE; + s->count--; } } template gb_internal gb_inline void ptr_set_clear(PtrSet *s) { - array_clear(&s->entries); - for (isize i = 0; i < s->hashes.count; i++) { - s->hashes.data[i] = MAP_SENTINEL; - } + s->count = 0; + gb_zero_size(s->keys, s->capacity*gb_size_of(T)); } - -template -gb_internal PtrSetEntry *begin(PtrSet &m) noexcept { - return m.entries.data; -} template -gb_internal PtrSetEntry const *begin(PtrSet const &m) noexcept { - return m.entries.data; -} +struct PtrSetIterator { + PtrSet *set; + usize index; + + PtrSetIterator &operator++() noexcept { + for (;;) { + ++index; + if (set->capacity == index) { + return *this; + } + T key = set->keys[index]; + if (key != nullptr && key != PtrSet::TOMBSTONE) { + return *this; + } + } + } + + bool operator==(PtrSetIterator const &other) const noexcept { + return this->set == other.set && this->index == other.index; + } + + + operator T *() const { + return &set->keys[index]; + } +}; template -gb_internal PtrSetEntry *end(PtrSet &m) noexcept { - return m.entries.data + m.entries.count; +gb_internal PtrSetIterator begin(PtrSet &set) noexcept { + usize index = 0; + while (index < set.capacity) { + T key = set.keys[index]; + if (key != nullptr && key != PtrSet::TOMBSTONE) { + break; + } + index++; + } + return PtrSetIterator{&set, index}; } - template -gb_internal PtrSetEntry const *end(PtrSet const &m) noexcept { - return m.entries.data + m.entries.count; +gb_internal PtrSetIterator end(PtrSet &set) noexcept { + return PtrSetIterator{&set, set.capacity}; } \ No newline at end of file diff --git a/src/threading.cpp b/src/threading.cpp index 27a17112e..bf298e024 100644 --- a/src/threading.cpp +++ b/src/threading.cpp @@ -699,13 +699,13 @@ extern "C" int __ulock_wake(uint32_t operation, void *addr, uint64_t wake_value) gb_internal void futex_signal(Futex *f) { for (;;) { int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, f, 0); - if (ret >= 0) { + if (ret == 0) { return; } - if (ret == EINTR || ret == EFAULT) { + if (ret == -EINTR || ret == -EFAULT) { continue; } - if (ret == ENOENT) { + if (ret == -ENOENT) { return; } GB_PANIC("Failed in futex wake!\n"); @@ -716,13 +716,13 @@ gb_internal void futex_broadcast(Futex *f) { for (;;) { enum { ULF_WAKE_ALL = 0x00000100 }; int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO | ULF_WAKE_ALL, f, 0); - if (ret >= 0) { + if (ret == 0) { return; } - if (ret == EINTR || ret == EFAULT) { + if (ret == -EINTR || ret == -EFAULT) { continue; } - if (ret == ENOENT) { + if (ret == -ENOENT) { return; } GB_PANIC("Failed in futex wake!\n"); @@ -732,16 +732,16 @@ gb_internal void futex_broadcast(Futex *f) { gb_internal void futex_wait(Futex *f, Footex val) { for (;;) { int ret = __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, f, val, 0); - if (ret >= 0) { + if (ret == 0) { if (*f != val) { return; } continue; } - if (ret == EINTR || ret == EFAULT) { - continue; + if (ret == -EINTR || ret == -EFAULT) { + -continue; } - if (ret == ENOENT) { + if (ret == -ENOENT) { return; } -- cgit v1.2.3