aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorgingerBill <gingerBill@users.noreply.github.com>2023-01-11 22:14:53 +0000
committerGitHub <noreply@github.com>2023-01-11 22:14:53 +0000
commit320062157f06d979db926fcbf407bbbdcc3028c1 (patch)
tree770bb60802ff24cc66c8e5e5837819969dc84cd6 /src
parent86511d44e46b6271b01df2cd1ebb83b5496e143c (diff)
parentd7d6608142c8e169a7856c9e5965619809653903 (diff)
Merge pull request #2288 from odin-lang/compiler-improvements-2023-01
Multithreading Compiler Improvements 2023-01
Diffstat (limited to 'src')
-rw-r--r--src/build_settings.cpp24
-rw-r--r--src/check_builtin.cpp24
-rw-r--r--src/check_decl.cpp113
-rw-r--r--src/check_expr.cpp327
-rw-r--r--src/check_stmt.cpp89
-rw-r--r--src/check_type.cpp124
-rw-r--r--src/checker.cpp850
-rw-r--r--src/checker.hpp82
-rw-r--r--src/common.cpp4
-rw-r--r--src/common_memory.cpp41
-rw-r--r--src/docs_writer.cpp11
-rw-r--r--src/entity.cpp6
-rw-r--r--src/error.cpp4
-rw-r--r--src/llvm_backend.cpp977
-rw-r--r--src/llvm_backend.hpp22
-rw-r--r--src/llvm_backend_debug.cpp7
-rw-r--r--src/llvm_backend_expr.cpp18
-rw-r--r--src/llvm_backend_general.cpp70
-rw-r--r--src/llvm_backend_opt.cpp3
-rw-r--r--src/llvm_backend_proc.cpp12
-rw-r--r--src/llvm_backend_stmt.cpp90
-rw-r--r--src/llvm_backend_type.cpp9
-rw-r--r--src/main.cpp65
-rw-r--r--src/parser.cpp404
-rw-r--r--src/parser.hpp6
-rw-r--r--src/path.cpp12
-rw-r--r--src/ptr_map.cpp115
-rw-r--r--src/ptr_set.cpp313
-rw-r--r--src/queue.cpp2
-rw-r--r--src/string.cpp5
-rw-r--r--src/string_map.cpp46
-rw-r--r--src/string_set.cpp33
-rw-r--r--src/thread_pool.cpp130
-rw-r--r--src/threading.cpp524
-rw-r--r--src/types.cpp189
35 files changed, 2594 insertions, 2157 deletions
diff --git a/src/build_settings.cpp b/src/build_settings.cpp
index a8abcba67..609a010de 100644
--- a/src/build_settings.cpp
+++ b/src/build_settings.cpp
@@ -291,13 +291,14 @@ struct BuildContext {
bool show_error_line;
bool ignore_lazy;
+ bool ignore_llvm_build;
bool use_subsystem_windows;
bool ignore_microsoft_magic;
bool linker_map_file;
bool use_separate_modules;
- bool threaded_checker;
+ bool no_threaded_checker;
bool show_debug_messages;
@@ -936,16 +937,20 @@ gb_global BlockingMutex fullpath_mutex;
#if defined(GB_SYSTEM_WINDOWS)
gb_internal String path_to_fullpath(gbAllocator a, String s) {
String result = {};
- mutex_lock(&fullpath_mutex);
- defer (mutex_unlock(&fullpath_mutex));
String16 string16 = string_to_string16(heap_allocator(), s);
defer (gb_free(heap_allocator(), string16.text));
- DWORD len = GetFullPathNameW(&string16[0], 0, nullptr, nullptr);
+ DWORD len;
+
+ mutex_lock(&fullpath_mutex);
+
+ len = GetFullPathNameW(&string16[0], 0, nullptr, nullptr);
if (len != 0) {
wchar_t *text = gb_alloc_array(permanent_allocator(), wchar_t, len+1);
GetFullPathNameW(&string16[0], len, text, nullptr);
+ mutex_unlock(&fullpath_mutex);
+
text[len] = 0;
result = string16_to_string(a, make_string16(text, len));
result = string_trim_whitespace(result);
@@ -956,6 +961,8 @@ gb_internal String path_to_fullpath(gbAllocator a, String s) {
result.text[i] = '/';
}
}
+ } else {
+ mutex_unlock(&fullpath_mutex);
}
return result;
@@ -1325,11 +1332,10 @@ gb_internal void enable_target_feature(TokenPos pos, String const &target_featur
gb_internal char const *target_features_set_to_cstring(gbAllocator allocator, bool with_quotes) {
isize len = 0;
isize i = 0;
- for (auto const &entry : build_context.target_features_set) {
+ for (String const &feature : build_context.target_features_set) {
if (i != 0) {
len += 1;
}
- String feature = entry.value;
len += feature.len;
if (with_quotes) len += 2;
i += 1;
@@ -1337,13 +1343,12 @@ gb_internal char const *target_features_set_to_cstring(gbAllocator allocator, bo
char *features = gb_alloc_array(allocator, char, len+1);
len = 0;
i = 0;
- for (auto const &entry : build_context.target_features_set) {
+ for (String const &feature : build_context.target_features_set) {
if (i != 0) {
features[len++] = ',';
}
if (with_quotes) features[len++] = '"';
- String feature = entry.value;
gb_memmove(features + len, feature.text, feature.len);
len += feature.len;
if (with_quotes) features[len++] = '"';
@@ -1362,8 +1367,7 @@ gb_internal bool init_build_paths(String init_filename) {
// NOTE(Jeroen): We're pre-allocating BuildPathCOUNT slots so that certain paths are always at the same enumerated index.
array_init(&bc->build_paths, permanent_allocator(), BuildPathCOUNT);
- string_set_init(&bc->target_features_set, heap_allocator(), 1024);
- mutex_init(&bc->target_features_mutex);
+ string_set_init(&bc->target_features_set, 1024);
// [BuildPathMainPackage] Turn given init path into a `Path`, which includes normalizing it into a full path.
bc->build_paths[BuildPath_Main_Package] = path_from_string(ha, init_filename);
diff --git a/src/check_builtin.cpp b/src/check_builtin.cpp
index 99d956f5e..294bc7da8 100644
--- a/src/check_builtin.cpp
+++ b/src/check_builtin.cpp
@@ -96,8 +96,7 @@ gb_internal void check_or_else_expr_no_value_error(CheckerContext *c, String con
gbString th = nullptr;
if (type_hint != nullptr) {
GB_ASSERT(bsrc->kind == Type_Union);
- for_array(i, bsrc->Union.variants) {
- Type *vt = bsrc->Union.variants[i];
+ for (Type *vt : bsrc->Union.variants) {
if (are_types_identical(vt, type_hint)) {
th = type_to_string(type_hint);
break;
@@ -198,8 +197,7 @@ gb_internal void add_objc_proc_type(CheckerContext *c, Ast *call, Type *return_t
{
auto variables = array_make<Entity *>(permanent_allocator(), 0, param_types.count);
- for_array(i, param_types) {
- Type *type = param_types[i];
+ for (Type *type : param_types) {
Entity *param = alloc_entity_param(scope, blank_token, type, false, true);
array_add(&variables, param);
}
@@ -1110,7 +1108,7 @@ gb_internal bool cache_load_file_directive(CheckerContext *c, Ast *call, String
new_cache->path = path;
new_cache->data = data;
new_cache->file_error = file_error;
- string_map_init(&new_cache->hashes, heap_allocator(), 32);
+ string_map_init(&new_cache->hashes, 32);
string_map_set(&c->info->load_file_cache, path, new_cache);
if (cache_) *cache_ = new_cache;
} else {
@@ -1120,8 +1118,7 @@ gb_internal bool cache_load_file_directive(CheckerContext *c, Ast *call, String
}
});
- char *c_str = alloc_cstring(heap_allocator(), path);
- defer (gb_free(heap_allocator(), c_str));
+ char *c_str = alloc_cstring(temporary_allocator(), path);
gbFile f = {};
if (cache == nullptr) {
@@ -3071,8 +3068,7 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As
bool first_is_field_value = (ce->args[0]->kind == Ast_FieldValue);
bool fail = false;
- for_array(i, ce->args) {
- Ast *arg = ce->args[i];
+ for (Ast *arg : ce->args) {
bool mix = false;
if (first_is_field_value) {
mix = arg->kind != Ast_FieldValue;
@@ -3086,11 +3082,10 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As
}
}
StringSet name_set = {};
- string_set_init(&name_set, heap_allocator(), 2*ce->args.count);
+ string_set_init(&name_set, 2*ce->args.count);
- for_array(i, ce->args) {
+ for (Ast *arg : ce->args) {
String name = {};
- Ast *arg = ce->args[i];
if (arg->kind == Ast_FieldValue) {
Ast *ename = arg->FieldValue.field;
if (!fail && ename->kind != Ast_Ident) {
@@ -3577,7 +3572,7 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As
Entity *base_type_entity = alloc_entity_type_name(scope, token, elem, EntityState_Resolved);
add_entity(c, scope, nullptr, base_type_entity);
- add_type_info_type(c, soa_struct);
+ // add_type_info_type(c, soa_struct);
operand->type = soa_struct;
break;
@@ -4987,8 +4982,7 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As
bool is_variant = false;
- for_array(i, u->Union.variants) {
- Type *vt = u->Union.variants[i];
+ for (Type *vt : u->Union.variants) {
if (are_types_identical(v, vt)) {
is_variant = true;
break;
diff --git a/src/check_decl.cpp b/src/check_decl.cpp
index 59beae56d..d4ae9c59d 100644
--- a/src/check_decl.cpp
+++ b/src/check_decl.cpp
@@ -45,7 +45,7 @@ gb_internal Type *check_init_variable(CheckerContext *ctx, Entity *e, Operand *o
if (operand->mode == Addressing_Type) {
if (e->type != nullptr && is_type_typeid(e->type)) {
add_type_info_type(ctx, operand->type);
- add_type_and_value(ctx->info, operand->expr, Addressing_Value, e->type, exact_value_typeid(operand->type));
+ add_type_and_value(ctx, operand->expr, Addressing_Value, e->type, exact_value_typeid(operand->type));
return e->type;
} else {
gbString t = type_to_string(operand->type);
@@ -354,8 +354,7 @@ gb_internal void check_type_decl(CheckerContext *ctx, Entity *e, Ast *init_expr,
Type *t = base_type(e->type);
if (t->kind == Type_Enum) {
- for_array(i, t->Enum.fields) {
- Entity *f = t->Enum.fields[i];
+ for (Entity *f : t->Enum.fields) {
if (f->kind != Entity_Constant) {
continue;
}
@@ -382,8 +381,8 @@ gb_internal void override_entity_in_scope(Entity *original_entity, Entity *new_e
if (found_scope == nullptr) {
return;
}
- mutex_lock(&found_scope->mutex);
- defer (mutex_unlock(&found_scope->mutex));
+ rw_mutex_lock(&found_scope->mutex);
+ defer (rw_mutex_unlock(&found_scope->mutex));
// IMPORTANT NOTE(bill, 2021-04-10): Overriding behaviour was flawed in that the
// original entity was still used check checked, but the checking was only
@@ -986,7 +985,7 @@ gb_internal void check_proc_decl(CheckerContext *ctx, Entity *e, DeclInfo *d) {
GB_ASSERT(pl->body->kind == Ast_BlockStmt);
if (!pt->is_polymorphic) {
- check_procedure_later(ctx, ctx->file, e->token, d, proc_type, pl->body, pl->tags);
+ check_procedure_later(ctx->checker, ctx->file, e->token, d, proc_type, pl->body, pl->tags);
}
} else if (!is_foreign) {
if (e->Procedure.is_export) {
@@ -1235,10 +1234,9 @@ gb_internal void check_proc_group_decl(CheckerContext *ctx, Entity *&pg_entity,
pg_entity->type = t_invalid;
PtrSet<Entity *> entity_set = {};
- ptr_set_init(&entity_set, heap_allocator(), 2*pg->args.count);
+ ptr_set_init(&entity_set, 2*pg->args.count);
- for_array(i, pg->args) {
- Ast *arg = pg->args[i];
+ for (Ast *arg : pg->args) {
Entity *e = nullptr;
Operand o = {};
if (arg->kind == Ast_Ident) {
@@ -1271,7 +1269,7 @@ gb_internal void check_proc_group_decl(CheckerContext *ctx, Entity *&pg_entity,
ptr_set_destroy(&entity_set);
- for_array(j, pge->entities) {
+ for (isize j = 0; j < pge->entities.count; j++) {
Entity *p = pge->entities[j];
if (p->type == t_invalid) {
// NOTE(bill): This invalid overload has already been handled
@@ -1413,15 +1411,46 @@ end:;
}
+gb_internal void add_deps_from_child_to_parent(DeclInfo *decl) {
+ if (decl && decl->parent) {
+ Scope *ps = decl->parent->scope;
+ if (ps->flags & (ScopeFlag_File & ScopeFlag_Pkg & ScopeFlag_Global)) {
+ return;
+ } else {
+ // NOTE(bill): Add the dependencies from the procedure literal (lambda)
+ // But only at the procedure level
+ rw_mutex_shared_lock(&decl->deps_mutex);
+ rw_mutex_lock(&decl->parent->deps_mutex);
+
+ for (Entity *e : decl->deps) {
+ ptr_set_add(&decl->parent->deps, e);
+ }
+
+ rw_mutex_unlock(&decl->parent->deps_mutex);
+ rw_mutex_shared_unlock(&decl->deps_mutex);
+
+ rw_mutex_shared_lock(&decl->type_info_deps_mutex);
+ rw_mutex_lock(&decl->parent->type_info_deps_mutex);
+
+ for (Type *t : decl->type_info_deps) {
+ ptr_set_add(&decl->parent->type_info_deps, t);
+ }
+
+ rw_mutex_unlock(&decl->parent->type_info_deps_mutex);
+ rw_mutex_shared_unlock(&decl->type_info_deps_mutex);
+ }
+ }
+}
+
struct ProcUsingVar {
Entity *e;
Entity *uvar;
};
-gb_internal void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *decl, Type *type, Ast *body) {
+gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *decl, Type *type, Ast *body) {
if (body == nullptr) {
- return;
+ return false;
}
GB_ASSERT(body->kind == Ast_BlockStmt);
@@ -1462,8 +1491,7 @@ gb_internal void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de
{
if (type->Proc.param_count > 0) {
TypeTuple *params = &type->Proc.params->Tuple;
- for_array(i, params->variables) {
- Entity *e = params->variables[i];
+ for (Entity *e : params->variables) {
if (e->kind != Entity_Variable) {
continue;
}
@@ -1471,7 +1499,7 @@ gb_internal void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de
continue;
}
if (is_blank_ident(e->token)) {
- error(e->token, "'using' a procedure parameter requires a non blank identifier");
+ error(e->token, "'using' a procedure parameter requires a non blank identifier");
break;
}
@@ -1481,7 +1509,8 @@ gb_internal void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de
if (t->kind == Type_Struct) {
Scope *scope = t->Struct.scope;
GB_ASSERT(scope != nullptr);
- MUTEX_GUARD_BLOCK(scope->mutex) for (auto const &entry : scope->elements) {
+ rw_mutex_lock(&scope->mutex);
+ for (auto const &entry : scope->elements) {
Entity *f = entry.value;
if (f->kind == Entity_Variable) {
Entity *uvar = alloc_entity_using_variable(e, f->token, f->type, nullptr);
@@ -1491,6 +1520,7 @@ gb_internal void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de
array_add(&using_entities, puv);
}
}
+ rw_mutex_unlock(&scope->mutex);
} else {
error(e->token, "'using' can only be applied to variables of type struct");
break;
@@ -1499,45 +1529,50 @@ gb_internal void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de
}
}
- MUTEX_GUARD_BLOCK(ctx->scope->mutex) for_array(i, using_entities) {
- Entity *e = using_entities[i].e;
- Entity *uvar = using_entities[i].uvar;
- Entity *prev = scope_insert(ctx->scope, uvar, false);
+ rw_mutex_lock(&ctx->scope->mutex);
+ for (auto const &entry : using_entities) {
+ Entity *e = entry.e;
+ Entity *uvar = entry.uvar;
+ Entity *prev = scope_insert_no_mutex(ctx->scope, uvar);
if (prev != nullptr) {
error(e->token, "Namespace collision while 'using' procedure argument '%.*s' of: %.*s", LIT(e->token.string), LIT(prev->token.string));
error_line("%.*s != %.*s\n", LIT(uvar->token.string), LIT(prev->token.string));
break;
}
}
+ rw_mutex_unlock(&ctx->scope->mutex);
bool where_clause_ok = evaluate_where_clauses(ctx, nullptr, decl->scope, &decl->proc_lit->ProcLit.where_clauses, !decl->where_clauses_evaluated);
if (!where_clause_ok) {
// NOTE(bill, 2019-08-31): Don't check the body as the where clauses failed
- return;
+ return false;
}
check_open_scope(ctx, body);
{
- for_array(i, using_entities) {
- Entity *uvar = using_entities[i].uvar;
+ for (auto const &entry : using_entities) {
+ Entity *uvar = entry.uvar;
Entity *prev = scope_insert(ctx->scope, uvar);
gb_unused(prev);
// NOTE(bill): Don't err here
}
- GB_ASSERT(decl->defer_use_checked == false);
+ GB_ASSERT(decl->proc_checked_state != ProcCheckedState_Checked);
+ if (decl->defer_use_checked) {
+ GB_ASSERT(is_type_polymorphic(type, true));
+ error(token, "Defer Use Checked: %.*s", LIT(decl->entity->token.string));
+ GB_ASSERT(decl->defer_use_checked == false);
+ }
check_stmt_list(ctx, bs->stmts, Stmt_CheckScopeDecls);
decl->defer_use_checked = true;
- for_array(i, bs->stmts) {
- Ast *stmt = bs->stmts[i];
+ for (Ast *stmt : bs->stmts) {
if (stmt->kind == Ast_ValueDecl) {
ast_node(vd, ValueDecl, stmt);
- for_array(j, vd->names) {
- Ast *name = vd->names[j];
+ for (Ast *name : vd->names) {
if (!is_blank_ident(name)) {
if (name->kind == Ast_Ident) {
GB_ASSERT(name->Ident.entity != nullptr);
@@ -1572,25 +1607,7 @@ gb_internal void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de
check_scope_usage(ctx->checker, ctx->scope);
- if (decl->parent != nullptr) {
- Scope *ps = decl->parent->scope;
- if (ps->flags & (ScopeFlag_File & ScopeFlag_Pkg & ScopeFlag_Global)) {
- return;
- } else {
- mutex_lock(&ctx->info->deps_mutex);
+ add_deps_from_child_to_parent(decl);
- // NOTE(bill): Add the dependencies from the procedure literal (lambda)
- // But only at the procedure level
- for (auto const &entry : decl->deps) {
- Entity *e = entry.ptr;
- ptr_set_add(&decl->parent->deps, e);
- }
- for (auto const &entry : decl->type_info_deps) {
- Type *t = entry.ptr;
- ptr_set_add(&decl->parent->type_info_deps, t);
- }
-
- mutex_unlock(&ctx->info->deps_mutex);
- }
- }
+ return true;
}
diff --git a/src/check_expr.cpp b/src/check_expr.cpp
index ed1ddd1f1..e0519d26b 100644
--- a/src/check_expr.cpp
+++ b/src/check_expr.cpp
@@ -86,7 +86,6 @@ gb_internal Entity * find_polymorphic_record_entity (CheckerContext *c, Type *or
gb_internal void check_not_tuple (CheckerContext *c, Operand *operand);
gb_internal void convert_to_typed (CheckerContext *c, Operand *operand, Type *target_type);
gb_internal gbString expr_to_string (Ast *expression);
-gb_internal void check_proc_body (CheckerContext *c, Token token, DeclInfo *decl, Type *type, Ast *body);
gb_internal void update_untyped_expr_type (CheckerContext *c, Ast *e, Type *type, bool final);
gb_internal bool check_is_terminating (Ast *node, String const &label);
gb_internal bool check_has_break (Ast *stmt, String const &label, bool implicit);
@@ -147,8 +146,8 @@ gb_internal void check_did_you_mean_print(DidYouMeanAnswers *d, char const *pref
auto results = did_you_mean_results(d);
if (results.count != 0) {
error_line("\tSuggestion: Did you mean?\n");
- for_array(i, results) {
- String const &target = results[i].target;
+ for (auto const &result : results) {
+ String const &target = result.target;
error_line("\t\t%s%.*s\n", prefix, LIT(target));
// error_line("\t\t%.*s %td\n", LIT(target), results[i].distance);
}
@@ -167,19 +166,16 @@ gb_internal void populate_check_did_you_mean_objc_entity(StringSet *set, Entity
GB_ASSERT(t->kind == Type_Struct);
if (is_type) {
- for_array(i, objc_metadata->type_entries) {
- String name = objc_metadata->type_entries[i].name;
- string_set_add(set, name);
+ for (auto const &entry : objc_metadata->type_entries) {
+ string_set_add(set, entry.name);
}
} else {
- for_array(i, objc_metadata->value_entries) {
- String name = objc_metadata->value_entries[i].name;
- string_set_add(set, name);
+ for (auto const &entry : objc_metadata->value_entries) {
+ string_set_add(set, entry.name);
}
}
- for_array(i, t->Struct.fields) {
- Entity *f = t->Struct.fields[i];
+ for (Entity *f : t->Struct.fields) {
if (f->flags & EntityFlag_Using && f->type != nullptr) {
if (f->type->kind == Type_Named && f->type->Named.type_name) {
populate_check_did_you_mean_objc_entity(set, f->type->Named.type_name, is_type);
@@ -194,19 +190,17 @@ gb_internal void check_did_you_mean_objc_entity(String const &name, Entity *e, b
GB_ASSERT(e->kind == Entity_TypeName);
GB_ASSERT(e->TypeName.objc_metadata != nullptr);
auto *objc_metadata = e->TypeName.objc_metadata;
- mutex_lock(objc_metadata->mutex);
- defer (mutex_unlock(objc_metadata->mutex));
+ MUTEX_GUARD(objc_metadata->mutex);
StringSet set = {};
- string_set_init(&set, heap_allocator());
defer (string_set_destroy(&set));
populate_check_did_you_mean_objc_entity(&set, e, is_type);
DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), set.entries.count, name);
defer (did_you_mean_destroy(&d));
- for (auto const &entry : set) {
- did_you_mean_append(&d, entry.value);
+ for (String const &target : set) {
+ did_you_mean_append(&d, target);
}
check_did_you_mean_print(&d, prefix);
}
@@ -217,8 +211,8 @@ gb_internal void check_did_you_mean_type(String const &name, Array<Entity *> con
DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), fields.count, name);
defer (did_you_mean_destroy(&d));
- for_array(i, fields) {
- did_you_mean_append(&d, fields[i]->token.string);
+ for (Entity *e : fields) {
+ did_you_mean_append(&d, e->token.string);
}
check_did_you_mean_print(&d, prefix);
}
@@ -230,8 +224,8 @@ gb_internal void check_did_you_mean_type(String const &name, Slice<Entity *> con
DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), fields.count, name);
defer (did_you_mean_destroy(&d));
- for_array(i, fields) {
- did_you_mean_append(&d, fields[i]->token.string);
+ for (Entity *e : fields) {
+ did_you_mean_append(&d, e->token.string);
}
check_did_you_mean_print(&d, prefix);
}
@@ -242,10 +236,12 @@ gb_internal void check_did_you_mean_scope(String const &name, Scope *scope, char
DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), scope->elements.entries.count, name);
defer (did_you_mean_destroy(&d));
- MUTEX_GUARD_BLOCK(&scope->mutex) for (auto const &entry : scope->elements) {
+ rw_mutex_shared_lock(&scope->mutex);
+ for (auto const &entry : scope->elements) {
Entity *e = entry.value;
did_you_mean_append(&d, e->token.string);
}
+ rw_mutex_shared_unlock(&scope->mutex);
check_did_you_mean_print(&d, prefix);
}
@@ -370,9 +366,6 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
GB_ASSERT(dst == nullptr);
}
- mutex_lock(&info->gen_procs_mutex);
- defer (mutex_unlock(&info->gen_procs_mutex));
-
if (!src->Proc.is_polymorphic || src->Proc.is_poly_specialized) {
return false;
}
@@ -417,8 +410,6 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
CheckerContext nctx = *old_c;
- nctx.procs_to_check_queue = old_c->procs_to_check_queue;
-
Scope *scope = create_scope(info, base_entity->scope);
scope->flags |= ScopeFlag_Proc;
nctx.scope = scope;
@@ -439,21 +430,39 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
return false;
}
- auto *found_gen_procs = map_get(&info->gen_procs, base_entity->identifier.load());
- if (found_gen_procs) {
- auto procs = *found_gen_procs;
- for_array(i, procs) {
- Entity *other = procs[i];
+ GenProcsData *gen_procs = nullptr;
+
+ GB_ASSERT(base_entity->identifier.load()->kind == Ast_Ident);
+ GB_ASSERT(base_entity->kind == Entity_Procedure);
+
+ mutex_lock(&base_entity->Procedure.gen_procs_mutex); // @entity-mutex
+ gen_procs = base_entity->Procedure.gen_procs;
+ if (gen_procs) {
+ rw_mutex_shared_lock(&gen_procs->mutex); // @local-mutex
+
+ mutex_unlock(&base_entity->Procedure.gen_procs_mutex); // @entity-mutex
+
+ for (Entity *other : gen_procs->procs) {
Type *pt = base_type(other->type);
if (are_types_identical(pt, final_proc_type)) {
+ rw_mutex_shared_unlock(&gen_procs->mutex); // @local-mutex
+
if (poly_proc_data) {
poly_proc_data->gen_entity = other;
}
return true;
}
}
+
+ rw_mutex_shared_unlock(&gen_procs->mutex); // @local-mutex
+ } else {
+ gen_procs = gb_alloc_item(permanent_allocator(), GenProcsData);
+ gen_procs->procs.allocator = heap_allocator();
+ base_entity->Procedure.gen_procs = gen_procs;
+ mutex_unlock(&base_entity->Procedure.gen_procs_mutex); // @entity-mutex
}
+
{
// LEAK TODO(bill): This is technically a memory leak as it has to generate the type twice
bool prev_no_polymorphic_errors = nctx.no_polymorphic_errors;
@@ -466,24 +475,39 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
// LEAK TODO(bill): Cloning this AST may be leaky
Ast *cloned_proc_type_node = clone_ast(pt->node);
success = check_procedure_type(&nctx, final_proc_type, cloned_proc_type_node, &operands);
-
if (!success) {
return false;
}
- if (found_gen_procs) {
- auto procs = *found_gen_procs;
- for_array(i, procs) {
- Entity *other = procs[i];
- Type *pt = base_type(other->type);
- if (are_types_identical(pt, final_proc_type)) {
- if (poly_proc_data) {
- poly_proc_data->gen_entity = other;
- }
- return true;
+ rw_mutex_shared_lock(&gen_procs->mutex); // @local-mutex
+ for (Entity *other : gen_procs->procs) {
+ Type *pt = base_type(other->type);
+ if (are_types_identical(pt, final_proc_type)) {
+ rw_mutex_shared_unlock(&gen_procs->mutex); // @local-mutex
+
+ if (poly_proc_data) {
+ poly_proc_data->gen_entity = other;
}
+
+ DeclInfo *decl = other->decl_info;
+ if (decl->proc_checked_state != ProcCheckedState_Checked) {
+ ProcInfo *proc_info = gb_alloc_item(permanent_allocator(), ProcInfo);
+ proc_info->file = other->file;
+ proc_info->token = other->token;
+ proc_info->decl = decl;
+ proc_info->type = other->type;
+ proc_info->body = decl->proc_lit->ProcLit.body;
+ proc_info->tags = other->Procedure.tags;;
+ proc_info->generated_from_polymorphic = true;
+ proc_info->poly_def_node = poly_def_node;
+
+ check_procedure_later(nctx.checker, proc_info);
+ }
+
+ return true;
}
}
+ rw_mutex_shared_unlock(&gen_procs->mutex); // @local-mutex
}
@@ -520,7 +544,8 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
d->gen_proc_type = final_proc_type;
d->type_expr = pl->type;
d->proc_lit = proc_lit;
- d->proc_checked = false;
+ d->proc_checked_state = ProcCheckedState_Unchecked;
+ d->defer_use_checked = false;
Entity *entity = alloc_entity_procedure(nullptr, token, final_proc_type, tags);
entity->identifier = ident;
@@ -530,7 +555,8 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
entity->scope = scope->parent;
entity->file = base_entity->file;
entity->pkg = base_entity->pkg;
- entity->flags &= ~EntityFlag_ProcBodyChecked;
+ entity->flags = 0;
+ d->entity = entity;
AstFile *file = nullptr;
{
@@ -541,6 +567,10 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
}
}
+ rw_mutex_lock(&gen_procs->mutex); // @local-mutex
+ array_add(&gen_procs->procs, entity);
+ rw_mutex_unlock(&gen_procs->mutex); // @local-mutex
+
ProcInfo *proc_info = gb_alloc_item(permanent_allocator(), ProcInfo);
proc_info->file = file;
proc_info->token = token;
@@ -551,13 +581,6 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
proc_info->generated_from_polymorphic = true;
proc_info->poly_def_node = poly_def_node;
- if (found_gen_procs) {
- array_add(found_gen_procs, entity);
- } else {
- auto array = array_make<Entity *>(heap_allocator());
- array_add(&array, entity);
- map_set(&info->gen_procs, base_entity->identifier.load(), array);
- }
if (poly_proc_data) {
poly_proc_data->gen_entity = entity;
@@ -566,7 +589,7 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
}
// NOTE(bill): Check the newly generated procedure body
- check_procedure_later(&nctx, proc_info);
+ check_procedure_later(nctx.checker, proc_info);
return true;
}
@@ -755,8 +778,7 @@ gb_internal i64 check_distance_between_types(CheckerContext *c, Operand *operand
}
if (is_type_union(dst)) {
- for_array(i, dst->Union.variants) {
- Type *vt = dst->Union.variants[i];
+ for (Type *vt : dst->Union.variants) {
if (are_types_identical(vt, s)) {
return 1;
}
@@ -772,8 +794,7 @@ gb_internal i64 check_distance_between_types(CheckerContext *c, Operand *operand
} else if (is_type_untyped(src)) {
i64 prev_lowest_score = -1;
i64 lowest_score = -1;
- for_array(i, dst->Union.variants) {
- Type *vt = dst->Union.variants[i];
+ for (Type *vt : dst->Union.variants) {
i64 score = check_distance_between_types(c, operand, vt);
if (score >= 0) {
if (lowest_score < 0) {
@@ -817,7 +838,7 @@ gb_internal i64 check_distance_between_types(CheckerContext *c, Operand *operand
PolyProcData poly_proc_data = {};
if (check_polymorphic_procedure_assignment(c, operand, type, operand->expr, &poly_proc_data)) {
Entity *e = poly_proc_data.gen_entity;
- add_type_and_value(c->info, operand->expr, Addressing_Value, e->type, {});
+ add_type_and_value(c, operand->expr, Addressing_Value, e->type, {});
add_entity_use(c, operand->expr, e);
return 4;
}
@@ -1005,8 +1026,8 @@ gb_internal void check_assignment(CheckerContext *c, Operand *operand, Type *typ
if (type != nullptr && is_type_proc(type)) {
Array<Entity *> procs = proc_group_entities(c, *operand);
// NOTE(bill): These should be done
- for_array(i, procs) {
- Type *t = base_type(procs[i]->type);
+ for (Entity *e : procs) {
+ Type *t = base_type(e->type);
if (t == t_invalid) {
continue;
}
@@ -1014,7 +1035,6 @@ gb_internal void check_assignment(CheckerContext *c, Operand *operand, Type *typ
x.mode = Addressing_Value;
x.type = t;
if (check_is_assignable_to(c, &x, type)) {
- Entity *e = procs[i];
add_entity_use(c, operand->expr, e);
good = true;
break;
@@ -1047,7 +1067,7 @@ gb_internal void check_assignment(CheckerContext *c, Operand *operand, Type *typ
if (check_is_assignable_to(c, operand, type)) {
if (operand->mode == Addressing_Type && is_type_typeid(type)) {
add_type_info_type(c, operand->type);
- add_type_and_value(c->info, operand->expr, Addressing_Value, type, exact_value_typeid(operand->type));
+ add_type_and_value(c, operand->expr, Addressing_Value, type, exact_value_typeid(operand->type));
}
} else {
gbString expr_str = expr_to_string(operand->expr);
@@ -1444,7 +1464,7 @@ gb_internal bool check_cycle(CheckerContext *c, Entity *curr, bool report) {
return false;
}
for_array(i, *c->type_path) {
- Entity *prev = (*c->type_path)[i];
+ Entity *prev = c->type_path->data[i];
if (prev == curr) {
if (report) {
error(curr->token, "Illegal declaration cycle of `%.*s`", LIT(curr->token.string));
@@ -1509,8 +1529,8 @@ gb_internal Entity *check_ident(CheckerContext *c, Operand *o, Ast *n, Type *nam
if (type_hint != nullptr && is_type_proc(type_hint)) {
// NOTE(bill): These should be done
- for_array(i, procs) {
- Type *t = base_type(procs[i]->type);
+ for (Entity *proc : procs) {
+ Type *t = base_type(proc->type);
if (t == t_invalid) {
continue;
}
@@ -1518,7 +1538,7 @@ gb_internal Entity *check_ident(CheckerContext *c, Operand *o, Ast *n, Type *nam
x.mode = Addressing_Value;
x.type = t;
if (check_is_assignable_to(c, &x, type_hint)) {
- e = procs[i];
+ e = proc;
add_entity_use(c, n, e);
skip = true;
break;
@@ -2339,7 +2359,7 @@ gb_internal void check_comparison(CheckerContext *c, Operand *x, Operand *y, Tok
if (x->mode == Addressing_Type && is_type_typeid(y->type)) {
add_type_info_type(c, x->type);
add_type_info_type(c, y->type);
- add_type_and_value(c->info, x->expr, Addressing_Value, y->type, exact_value_typeid(x->type));
+ add_type_and_value(c, x->expr, Addressing_Value, y->type, exact_value_typeid(x->type));
x->mode = Addressing_Value;
x->type = t_untyped_bool;
@@ -2347,7 +2367,7 @@ gb_internal void check_comparison(CheckerContext *c, Operand *x, Operand *y, Tok
} else if (is_type_typeid(x->type) && y->mode == Addressing_Type) {
add_type_info_type(c, x->type);
add_type_info_type(c, y->type);
- add_type_and_value(c->info, y->expr, Addressing_Value, x->type, exact_value_typeid(y->type));
+ add_type_and_value(c, y->expr, Addressing_Value, x->type, exact_value_typeid(y->type));
x->mode = Addressing_Value;
x->type = t_untyped_bool;
@@ -3580,7 +3600,7 @@ gb_internal void update_untyped_expr_type(CheckerContext *c, Ast *e, Type *type,
if (old == nullptr) {
if (type != nullptr && type != t_invalid) {
if (e->tav.type == nullptr || e->tav.type == t_invalid) {
- add_type_and_value(c->info, e, e->tav.mode, type ? type : e->tav.type, e->tav.value);
+ add_type_and_value(c, e, e->tav.mode, type ? type : e->tav.type, e->tav.value);
if (e->kind == Ast_TernaryIfExpr) {
update_untyped_expr_type(c, e->TernaryIfExpr.x, type, final);
update_untyped_expr_type(c, e->TernaryIfExpr.y, type, final);
@@ -3686,7 +3706,7 @@ gb_internal void update_untyped_expr_type(CheckerContext *c, Ast *e, Type *type,
return;
}
- add_type_and_value(c->info, e, old->mode, type, old->value);
+ add_type_and_value(c, e, old->mode, type, old->value);
}
gb_internal void update_untyped_expr_value(CheckerContext *c, Ast *e, ExactValue value) {
@@ -4148,8 +4168,7 @@ gb_internal ExactValue get_constant_field_single(CheckerContext *c, ExactValue v
if (cl->elems[0]->kind == Ast_FieldValue) {
if (is_type_struct(node->tav.type)) {
bool found = false;
- for_array(i, cl->elems) {
- Ast *elem = cl->elems[i];
+ for (Ast *elem : cl->elems) {
if (elem->kind != Ast_FieldValue) {
continue;
}
@@ -4168,8 +4187,7 @@ gb_internal ExactValue get_constant_field_single(CheckerContext *c, ExactValue v
value = {};
}
} else if (is_type_array(node->tav.type) || is_type_enumerated_array(node->tav.type)) {
- for_array(i, cl->elems) {
- Ast *elem = cl->elems[i];
+ for (Ast *elem : cl->elems) {
if (elem->kind != Ast_FieldValue) {
continue;
}
@@ -4534,7 +4552,7 @@ gb_internal Entity *check_selector(CheckerContext *c, Operand *operand, Ast *nod
operand->mode = Addressing_ProcGroup;
operand->proc_group = entity;
- add_type_and_value(c->info, operand->expr, operand->mode, operand->type, operand->value);
+ add_type_and_value(c, operand->expr, operand->mode, operand->type, operand->value);
return entity;
}
GB_ASSERT_MSG(entity->type != nullptr, "%.*s (%.*s)", LIT(entity->token.string), LIT(entity_strings[entity->kind]));
@@ -4552,8 +4570,7 @@ gb_internal Entity *check_selector(CheckerContext *c, Operand *operand, Ast *nod
if (entity->kind == Entity_ProcGroup) {
Array<Entity *> procs = entity->ProcGroup.entities;
bool skip = false;
- for_array(i, procs) {
- Entity *p = procs[i];
+ for (Entity *p : procs) {
Type *t = base_type(p->type);
if (t == t_invalid) {
continue;
@@ -4703,7 +4720,7 @@ gb_internal Entity *check_selector(CheckerContext *c, Operand *operand, Ast *nod
}
Entity *swizzle_entity = alloc_entity_variable(nullptr, make_token_ident(field_name), operand->type, EntityState_Resolved);
- add_type_and_value(c->info, operand->expr, operand->mode, operand->type, operand->value);
+ add_type_and_value(c, operand->expr, operand->mode, operand->type, operand->value);
return swizzle_entity;
}
end_of_array_selector_swizzle:;
@@ -4747,7 +4764,7 @@ gb_internal Entity *check_selector(CheckerContext *c, Operand *operand, Ast *nod
operand->value = field_value;
operand->type = entity->type;
add_entity_use(c, selector, entity);
- add_type_and_value(c->info, operand->expr, operand->mode, operand->type, operand->value);
+ add_type_and_value(c, operand->expr, operand->mode, operand->type, operand->value);
return entity;
}
@@ -4772,7 +4789,7 @@ gb_internal Entity *check_selector(CheckerContext *c, Operand *operand, Ast *nod
operand->value = field_value;
operand->type = entity->type;
add_entity_use(c, selector, entity);
- add_type_and_value(c->info, operand->expr, operand->mode, operand->type, operand->value);
+ add_type_and_value(c, operand->expr, operand->mode, operand->type, operand->value);
return entity;
}
@@ -4860,7 +4877,7 @@ gb_internal Entity *check_selector(CheckerContext *c, Operand *operand, Ast *nod
break;
}
- add_type_and_value(c->info, operand->expr, operand->mode, operand->type, operand->value);
+ add_type_and_value(c, operand->expr, operand->mode, operand->type, operand->value);
return entity;
}
@@ -4909,22 +4926,21 @@ gb_internal bool check_identifier_exists(Scope *s, Ast *node, bool nested = fals
gb_internal isize add_dependencies_from_unpacking(CheckerContext *c, Entity **lhs, isize lhs_count, isize tuple_index, isize tuple_count) {
if (lhs != nullptr && c->decl != nullptr) {
- mutex_lock(&c->info->deps_mutex);
-
for (isize j = 0; (tuple_index + j) < lhs_count && j < tuple_count; j++) {
Entity *e = lhs[tuple_index + j];
if (e != nullptr) {
DeclInfo *decl = decl_info_of_entity(e);
if (decl != nullptr) {
- for (auto const &entry : decl->deps) {
- Entity *dep = entry.ptr;
+ rw_mutex_shared_lock(&decl->deps_mutex);
+ rw_mutex_lock(&c->decl->deps_mutex);
+ for (Entity *dep : decl->deps) {
ptr_set_add(&c->decl->deps, dep);
}
+ rw_mutex_unlock(&c->decl->deps_mutex);
+ rw_mutex_shared_unlock(&decl->deps_mutex);
}
}
}
-
- mutex_unlock(&c->info->deps_mutex);
}
return tuple_count;
}
@@ -4933,7 +4949,7 @@ gb_internal isize add_dependencies_from_unpacking(CheckerContext *c, Entity **lh
gb_internal bool check_assignment_arguments(CheckerContext *ctx, Array<Operand> const &lhs, Array<Operand> *operands, Slice<Ast *> const &rhs) {
bool optional_ok = false;
isize tuple_index = 0;
- for_array(i, rhs) {
+ for (Ast *rhs_expr : rhs) {
CheckerContext c_ = *ctx;
CheckerContext *c = &c_;
@@ -4945,7 +4961,7 @@ gb_internal bool check_assignment_arguments(CheckerContext *ctx, Array<Operand>
type_hint = lhs[tuple_index].type;
}
- check_expr_base(c, &o, rhs[i], type_hint);
+ check_expr_base(c, &o, rhs_expr, type_hint);
if (o.mode == Addressing_NoValue) {
error_operand_no_value(&o);
o.mode = Addressing_Invalid;
@@ -4997,8 +5013,8 @@ gb_internal bool check_assignment_arguments(CheckerContext *ctx, Array<Operand>
}
} else {
TypeTuple *tuple = &o.type->Tuple;
- for_array(j, tuple->variables) {
- o.type = tuple->variables[j]->type;
+ for (Entity *e : tuple->variables) {
+ o.type = e->type;
array_add(operands, o);
}
@@ -5090,8 +5106,8 @@ gb_internal bool check_unpack_arguments(CheckerContext *ctx, Entity **lhs, isize
}
} else {
TypeTuple *tuple = &o.type->Tuple;
- for_array(j, tuple->variables) {
- o.type = tuple->variables[j]->type;
+ for (Entity *e : tuple->variables) {
+ o.type = e->type;
array_add(operands, o);
}
@@ -5326,7 +5342,7 @@ gb_internal CALL_ARGUMENT_CHECKER(check_call_arguments_internal) {
if (o.mode == Addressing_Type && is_type_typeid(e->type)) {
add_type_info_type(c, o.type);
- add_type_and_value(c->info, o.expr, Addressing_Value, e->type, exact_value_typeid(o.type));
+ add_type_and_value(c, o.expr, Addressing_Value, e->type, exact_value_typeid(o.type));
} else if (show_error && is_type_untyped(o.type)) {
update_untyped_expr_type(c, o.expr, t, true);
}
@@ -5377,7 +5393,7 @@ gb_internal CALL_ARGUMENT_CHECKER(check_call_arguments_internal) {
}
if (o.mode == Addressing_Type && is_type_typeid(t)) {
add_type_info_type(c, o.type);
- add_type_and_value(c->info, o.expr, Addressing_Value, t, exact_value_typeid(o.type));
+ add_type_and_value(c, o.expr, Addressing_Value, t, exact_value_typeid(o.type));
} else if (show_error && is_type_untyped(o.type)) {
update_untyped_expr_type(c, o.expr, t, true);
}
@@ -5390,7 +5406,7 @@ gb_internal CALL_ARGUMENT_CHECKER(check_call_arguments_internal) {
data->score = score;
data->result_type = final_proc_type->Proc.results;
data->gen_entity = gen_entity;
- add_type_and_value(c->info, ce->proc, Addressing_Value, final_proc_type, {});
+ add_type_and_value(c, ce->proc, Addressing_Value, final_proc_type, {});
}
return err;
@@ -5434,8 +5450,7 @@ gb_internal CALL_ARGUMENT_CHECKER(check_named_call_arguments) {
bool *visited = gb_alloc_array(temporary_allocator(), bool, param_count);
auto ordered_operands = array_make<Operand>(temporary_allocator(), param_count);
defer ({
- for_array(i, ordered_operands) {
- Operand const &o = ordered_operands[i];
+ for (Operand const &o : ordered_operands) {
if (o.expr != nullptr) {
call->viral_state_flags |= o.expr->viral_state_flags;
}
@@ -5590,7 +5605,7 @@ gb_internal CALL_ARGUMENT_CHECKER(check_named_call_arguments) {
if (o->mode == Addressing_Type && is_type_typeid(e->type)) {
add_type_info_type(c, o->type);
- add_type_and_value(c->info, o->expr, Addressing_Value, e->type, exact_value_typeid(o->type));
+ add_type_and_value(c, o->expr, Addressing_Value, e->type, exact_value_typeid(o->type));
}
}
@@ -5598,7 +5613,7 @@ gb_internal CALL_ARGUMENT_CHECKER(check_named_call_arguments) {
data->score = score;
data->result_type = pt->results;
data->gen_entity = gen_entity;
- add_type_and_value(c->info, ce->proc, Addressing_Value, proc_type, {});
+ add_type_and_value(c, ce->proc, Addressing_Value, proc_type, {});
}
return err;
@@ -5727,7 +5742,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op
// in order to improve the type inference system
StringMap<Type *> type_hint_map = {}; // Key: String
- string_map_init(&type_hint_map, heap_allocator(), 2*args.count);
+ string_map_init(&type_hint_map, 2*args.count);
defer (string_map_destroy(&type_hint_map));
Type *ptype = nullptr;
@@ -5753,8 +5768,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op
param_tuple = &pt->params->Tuple;
}
if (param_tuple != nullptr) {
- for_array(i, param_tuple->variables) {
- Entity *e = param_tuple->variables[i];
+ for (Entity *e : param_tuple->variables) {
if (is_blank_ident(e->token)) {
continue;
}
@@ -5764,8 +5778,8 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op
}
} else {
Array<Entity *> procs = proc_group_entities(c, *operand);
- for_array(j, procs) {
- Type *proc_type = base_type(procs[j]->type);
+ for (Entity *proc : procs) {
+ Type *proc_type = base_type(proc->type);
if (is_type_proc(proc_type)) {
TypeProc *pt = &proc_type->Proc;
TypeTuple *param_tuple = nullptr;
@@ -5775,8 +5789,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op
if (param_tuple == nullptr) {
continue;
}
- for_array(i, param_tuple->variables) {
- Entity *e = param_tuple->variables[i];
+ for (Entity *e : param_tuple->variables) {
if (is_blank_ident(e->token)) {
continue;
}
@@ -5840,10 +5853,10 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op
if (procs.count > 1) {
isize max_arg_count = args.count;
- for_array(i, args) {
+ for (Ast *arg : args) {
// NOTE(bill): The only thing that may have multiple values
// will be a call expression (assuming `or_return` and `()` will be stripped)
- Ast *arg = strip_or_return_expr(args[i]);
+ arg = strip_or_return_expr(arg);
if (arg && arg->kind == Ast_CallExpr) {
max_arg_count = ISIZE_MAX;
break;
@@ -5906,8 +5919,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op
// where the same positional parameter has the same type value (and ellipsis)
bool proc_arg_count_all_equal = true;
isize proc_arg_count = -1;
- for_array(i, procs) {
- Entity *p = procs[i];
+ for (Entity *p : procs) {
Type *pt = base_type(p->type);
if (pt != nullptr && is_type_proc(pt)) {
if (proc_arg_count < 0) {
@@ -5929,8 +5941,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op
lhs = gb_alloc_array(heap_allocator(), Entity *, lhs_count);
for (isize param_index = 0; param_index < lhs_count; param_index++) {
Entity *e = nullptr;
- for_array(j, procs) {
- Entity *p = procs[j];
+ for (Entity *p : procs) {
Type *pt = base_type(p->type);
if (pt != nullptr && is_type_proc(pt)) {
if (e == nullptr) {
@@ -5971,8 +5982,8 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op
auto proc_entities = array_make<Entity *>(heap_allocator(), 0, procs.count*2 + 1);
defer (array_free(&proc_entities));
- for_array(i, procs) {
- array_add(&proc_entities, procs[i]);
+ for (Entity *proc : procs) {
+ array_add(&proc_entities, proc);
}
@@ -6062,8 +6073,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op
if (procs.count > 0) {
error_line("Did you mean to use one of the following:\n");
}
- for_array(i, procs) {
- Entity *proc = procs[i];
+ for (Entity *proc : procs) {
TokenPos pos = proc->token.pos;
Type *t = base_type(proc->type);
if (t == t_invalid) continue;
@@ -6187,7 +6197,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op
decl->where_clauses_evaluated = true;
if (ok && (data.gen_entity->flags & EntityFlag_ProcBodyChecked) == 0) {
- check_procedure_later(c, e->file, e->token, decl, e->type, decl->proc_lit->ProcLit.body, decl->proc_lit->ProcLit.tags);
+ check_procedure_later(c->checker, e->file, e->token, decl, e->type, decl->proc_lit->ProcLit.body, decl->proc_lit->ProcLit.tags);
}
}
return data;
@@ -6225,7 +6235,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op
decl->where_clauses_evaluated = true;
if (ok && (data.gen_entity->flags & EntityFlag_ProcBodyChecked) == 0) {
- check_procedure_later(c, e->file, e->token, decl, e->type, decl->proc_lit->ProcLit.body, decl->proc_lit->ProcLit.tags);
+ check_procedure_later(c->checker, e->file, e->token, decl, e->type, decl->proc_lit->ProcLit.body, decl->proc_lit->ProcLit.tags);
}
}
return data;
@@ -6600,7 +6610,7 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
operand->builtin_id = BuiltinProc_DIRECTIVE;
operand->expr = proc;
operand->type = t_invalid;
- add_type_and_value(c->info, proc, operand->mode, operand->type, operand->value);
+ add_type_and_value(c, proc, operand->mode, operand->type, operand->value);
} else {
error(proc, "Unknown directive: #%.*s", LIT(name));
operand->expr = proc;
@@ -6622,8 +6632,7 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
if (args.count > 0) {
bool fail = false;
bool first_is_field_value = (args[0]->kind == Ast_FieldValue);
- for_array(i, args) {
- Ast *arg = args[i];
+ for (Ast *arg : args) {
bool mix = false;
if (first_is_field_value) {
mix = arg->kind != Ast_FieldValue;
@@ -6644,8 +6653,7 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
}
if (operand->mode == Addressing_Invalid) {
- for_array(i, args) {
- Ast *arg = args[i];
+ for (Ast *arg : args) {
if (arg->kind == Ast_FieldValue) {
arg = arg->FieldValue.value;
}
@@ -6678,7 +6686,7 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
GB_ASSERT(ot->kind == Type_Named);
Entity *e = ot->Named.type_name;
add_entity_use(c, ident, e);
- add_type_and_value(c->info, call, Addressing_Type, ot, empty_exact_value);
+ add_type_and_value(c, call, Addressing_Type, ot, empty_exact_value);
} else {
operand->mode = Addressing_Invalid;
operand->type = t_invalid;
@@ -6850,7 +6858,7 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
}
}
- // add_type_and_value(c->info, operand->expr, operand->mode, operand->type, operand->value);
+ // add_type_and_value(c, operand->expr, operand->mode, operand->type, operand->value);
return Expr_Expr;
}
@@ -7094,8 +7102,8 @@ gb_internal bool check_range(CheckerContext *c, Ast *node, Operand *x, Operand *
return false;
}
- add_type_and_value(c->info, ie->left, x->mode, x->type, x->value);
- add_type_and_value(c->info, ie->right, y->mode, y->type, y->value);
+ add_type_and_value(c, ie->left, x->mode, x->type, x->value);
+ add_type_and_value(c, ie->right, y->mode, y->type, y->value);
return true;
}
@@ -7111,7 +7119,7 @@ gb_internal bool check_is_operand_compound_lit_constant(CheckerContext *c, Opera
return true;
}
if (expr->kind == Ast_ProcLit) {
- add_type_and_value(c->info, expr, Addressing_Constant, type_of_expr(expr), exact_value_procedure(expr));
+ add_type_and_value(c, expr, Addressing_Constant, type_of_expr(expr), exact_value_procedure(expr));
return true;
}
}
@@ -7141,9 +7149,7 @@ gb_internal bool attempt_implicit_selector_expr(CheckerContext *c, Operand *o, A
Type *union_type = base_type(th);
auto operands = array_make<Operand>(temporary_allocator(), 0, union_type->Union.variants.count);
- for_array(i, union_type->Union.variants) {
- Type *vt = union_type->Union.variants[i];
-
+ for (Type *vt : union_type->Union.variants) {
Operand x = {};
if (attempt_implicit_selector_expr(c, &x, ise, vt)) {
array_add(&operands, x);
@@ -7220,7 +7226,7 @@ gb_internal void check_promote_optional_ok(CheckerContext *c, Operand *x, Type *
Type *pt = base_type(type_of_expr(expr->CallExpr.proc));
if (is_type_proc(pt)) {
Type *tuple = pt->Proc.results;
- add_type_and_value(c->info, x->expr, x->mode, tuple, x->value);
+ add_type_and_value(c, x->expr, x->mode, tuple, x->value);
if (pt->Proc.result_count >= 2) {
if (ok_type_) *ok_type_ = tuple->Tuple.variables[1]->type;
@@ -7233,7 +7239,7 @@ gb_internal void check_promote_optional_ok(CheckerContext *c, Operand *x, Type *
Type *tuple = make_optional_ok_type(x->type);
if (ok_type_) *ok_type_ = tuple->Tuple.variables[1]->type;
- add_type_and_value(c->info, x->expr, x->mode, tuple, x->value);
+ add_type_and_value(c, x->expr, x->mode, tuple, x->value);
x->type = tuple;
GB_ASSERT(is_type_tuple(type_of_expr(x->expr)));
}
@@ -7373,8 +7379,7 @@ gb_internal void add_to_seen_map(CheckerContext *ctx, SeenMap *seen, TokenKind u
}
bool found = false;
- for_array(j, bt->Enum.fields) {
- Entity *f = bt->Enum.fields[j];
+ for (Entity *f : bt->Enum.fields) {
GB_ASSERT(f->kind == Entity_Constant);
i64 fv = exact_value_to_i64(f->Constant.value);
@@ -7653,7 +7658,7 @@ gb_internal ExprKind check_or_else_expr(CheckerContext *c, Operand *o, Ast *node
Type *left_type = nullptr;
Type *right_type = nullptr;
check_or_else_split_types(c, &x, name, &left_type, &right_type);
- add_type_and_value(&c->checker->info, arg, x.mode, x.type, x.value);
+ add_type_and_value(c, arg, x.mode, x.type, x.value);
if (left_type != nullptr) {
if (!y_is_diverging) {
@@ -7688,7 +7693,7 @@ gb_internal ExprKind check_or_return_expr(CheckerContext *c, Operand *o, Ast *no
Type *left_type = nullptr;
Type *right_type = nullptr;
check_or_return_split_types(c, &x, name, &left_type, &right_type);
- add_type_and_value(&c->checker->info, re->expr, x.mode, x.type, x.value);
+ add_type_and_value(c, re->expr, x.mode, x.type, x.value);
if (right_type == nullptr) {
check_or_else_expr_no_value_error(c, name, x, type_hint);
@@ -7892,8 +7897,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast *
if (cl->elems[0]->kind == Ast_FieldValue) {
bool *fields_visited = gb_alloc_array(temporary_allocator(), bool, field_count);
- for_array(i, cl->elems) {
- Ast *elem = cl->elems[i];
+ for (Ast *elem : cl->elems) {
if (elem->kind != Ast_FieldValue) {
error(elem, "Mixture of 'field = value' and value elements in a literal is not allowed");
continue;
@@ -8045,8 +8049,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast *
RangeCache rc = range_cache_make(heap_allocator());
defer (range_cache_destroy(&rc));
- for_array(i, cl->elems) {
- Ast *elem = cl->elems[i];
+ for (Ast *elem : cl->elems) {
if (elem->kind != Ast_FieldValue) {
error(elem, "Mixture of 'field = value' and value elements in a literal is not allowed");
continue;
@@ -8114,7 +8117,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast *
error(elem, "Expected a constant integer as an array field");
continue;
}
- // add_type_and_value(c->info, op_index.expr, op_index.mode, op_index.type, op_index.value);
+ // add_type_and_value(c, op_index.expr, op_index.mode, op_index.type, op_index.value);
i64 index = exact_value_to_i64(op_index.value);
@@ -8227,8 +8230,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast *
{
Type *bt = base_type(index_type);
GB_ASSERT(bt->kind == Type_Enum);
- for_array(i, bt->Enum.fields) {
- Entity *f = bt->Enum.fields[i];
+ for (Entity *f : bt->Enum.fields) {
if (f->kind != Entity_Constant) {
continue;
}
@@ -8257,15 +8259,13 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast *
bool is_partial = cl->tag && (cl->tag->BasicDirective.name.string == "partial");
SeenMap seen = {}; // NOTE(bill): Multimap, Key: ExactValue
- map_init(&seen, heap_allocator());
defer (map_destroy(&seen));
if (cl->elems.count > 0 && cl->elems[0]->kind == Ast_FieldValue) {
RangeCache rc = range_cache_make(heap_allocator());
defer (range_cache_destroy(&rc));
- for_array(i, cl->elems) {
- Ast *elem = cl->elems[i];
+ for (Ast *elem : cl->elems) {
if (elem->kind != Ast_FieldValue) {
error(elem, "Mixture of 'field = value' and value elements in a literal is not allowed");
continue;
@@ -8429,8 +8429,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast *
auto unhandled = array_make<Entity *>(temporary_allocator(), 0, fields.count);
- for_array(i, fields) {
- Entity *f = fields[i];
+ for (Entity *f : fields) {
if (f->kind != Entity_Constant) {
continue;
}
@@ -8556,8 +8555,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast *
bool key_is_typeid = is_type_typeid(t->Map.key);
bool value_is_typeid = is_type_typeid(t->Map.value);
- for_array(i, cl->elems) {
- Ast *elem = cl->elems[i];
+ for (Ast *elem : cl->elems) {
if (elem->kind != Ast_FieldValue) {
error(elem, "Only 'field = value' elements are allowed in a map literal");
continue;
@@ -8606,8 +8604,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast *
error(cl->elems[0], "'field = value' in a bit_set a literal is not allowed");
is_constant = false;
} else {
- for_array(index, cl->elems) {
- Ast *elem = cl->elems[index];
+ for (Ast *elem : cl->elems) {
if (elem->kind == Ast_FieldValue) {
error(elem, "'field = value' in a bit_set a literal is not allowed");
continue;
@@ -8659,8 +8656,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast *
BigInt one = {};
big_int_from_u64(&one, 1);
- for_array(i, cl->elems) {
- Ast *e = cl->elems[i];
+ for (Ast *e : cl->elems) {
GB_ASSERT(e->kind != Ast_FieldValue);
TypeAndValue tav = e->tav;
@@ -8759,8 +8755,7 @@ gb_internal ExprKind check_type_assertion(CheckerContext *c, Operand *o, Ast *no
if (bsrc->Union.variants.count != 1 && type_hint != nullptr) {
bool allowed = false;
- for_array(i, bsrc->Union.variants) {
- Type *vt = bsrc->Union.variants[i];
+ for (Type *vt : bsrc->Union.variants) {
if (are_types_identical(vt, type_hint)) {
allowed = true;
add_type_info_type(c, vt);
@@ -8793,8 +8788,7 @@ gb_internal ExprKind check_type_assertion(CheckerContext *c, Operand *o, Ast *no
if (is_type_union(src)) {
bool ok = false;
- for_array(i, bsrc->Union.variants) {
- Type *vt = bsrc->Union.variants[i];
+ for (Type *vt : bsrc->Union.variants) {
if (are_types_identical(vt, dst)) {
ok = true;
break;
@@ -8954,8 +8948,7 @@ gb_internal ExprKind check_selector_call_expr(CheckerContext *c, Operand *o, Ast
if (ce->args.count > 0) {
bool fail = false;
bool first_is_field_value = (ce->args[0]->kind == Ast_FieldValue);
- for_array(i, ce->args) {
- Ast *arg = ce->args[i];
+ for (Ast *arg : ce->args) {
bool mix = false;
if (first_is_field_value) {
mix = arg->kind != Ast_FieldValue;
@@ -9447,7 +9440,7 @@ gb_internal ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast
}
pl->decl = decl;
- check_procedure_later(&ctx, ctx.file, empty_token, decl, type, pl->body, pl->tags);
+ check_procedure_later(ctx.checker, ctx.file, empty_token, decl, type, pl->body, pl->tags);
}
check_close_scope(&ctx);
@@ -9748,7 +9741,7 @@ gb_internal ExprKind check_expr_base(CheckerContext *c, Operand *o, Ast *node, T
}
check_rtti_type_disallowed(node, o->type, "An expression is using a type, %s, which has been disallowed");
- add_type_and_value(c->info, node, o->mode, o->type, o->value);
+ add_type_and_value(c, node, o->mode, o->type, o->value);
return kind;
}
@@ -9857,12 +9850,9 @@ gb_internal bool is_exact_value_zero(ExactValue const &v) {
if (cl->elems.count == 0) {
return true;
} else {
- for_array(i, cl->elems) {
- Ast *elem = cl->elems[i];
+ for (Ast *elem : cl->elems) {
if (elem->tav.mode != Addressing_Constant) {
- // if (elem->tav.value.kind != ExactValue_Invalid) {
return false;
- // }
}
if (!is_exact_value_zero(elem->tav.value)) {
return false;
@@ -10342,8 +10332,7 @@ gb_internal gbString write_expr_to_string(gbString str, Ast *node, bool shorthan
bool parens_needed = false;
if (pt->results && pt->results->kind == Ast_FieldList) {
- for_array(i, pt->results->FieldList.list) {
- Ast *field = pt->results->FieldList.list[i];
+ for (Ast *field : pt->results->FieldList.list) {
ast_node(f, Field, field);
if (f->names.count != 0) {
parens_needed = true;
diff --git a/src/check_stmt.cpp b/src/check_stmt.cpp
index cf111e84c..b4dd4cd7d 100644
--- a/src/check_stmt.cpp
+++ b/src/check_stmt.cpp
@@ -622,7 +622,10 @@ gb_internal bool check_using_stmt_entity(CheckerContext *ctx, AstUsingStmt *us,
case Entity_ImportName: {
Scope *scope = e->ImportName.scope;
- MUTEX_GUARD_BLOCK(scope->mutex) for (auto const &entry : scope->elements) {
+ rw_mutex_lock(&scope->mutex);
+ defer (rw_mutex_unlock(&scope->mutex));
+
+ for (auto const &entry : scope->elements) {
String name = entry.key.string;
Entity *decl = entry.value;
if (!is_entity_exported(decl)) continue;
@@ -929,19 +932,17 @@ gb_internal void check_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags
}
SeenMap seen = {}; // NOTE(bill): Multimap, Key: ExactValue
- map_init(&seen, heap_allocator());
defer (map_destroy(&seen));
- for_array(stmt_index, bs->stmts) {
- Ast *stmt = bs->stmts[stmt_index];
+ for (Ast *stmt : bs->stmts) {
if (stmt->kind != Ast_CaseClause) {
// NOTE(bill): error handled by above multiple default checker
continue;
}
ast_node(cc, CaseClause, stmt);
- for_array(j, cc->list) {
- Ast *expr = unparen_expr(cc->list[j]);
+ for (Ast *expr : cc->list) {
+ expr = unparen_expr(expr);
if (is_ast_range(expr)) {
ast_node(be, BinaryExpr, expr);
@@ -1053,8 +1054,7 @@ gb_internal void check_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags
auto unhandled = array_make<Entity *>(temporary_allocator(), 0, fields.count);
- for_array(i, fields) {
- Entity *f = fields[i];
+ for (Entity *f : fields) {
if (f->kind != Entity_Constant) {
continue;
}
@@ -1073,8 +1073,7 @@ gb_internal void check_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags
error_no_newline(node, "Unhandled switch case: %.*s", LIT(unhandled[0]->token.string));
} else {
error(node, "Unhandled switch cases:");
- for_array(i, unhandled) {
- Entity *f = unhandled[i];
+ for (Entity *f : unhandled) {
error_line("\t%.*s\n", LIT(f->token.string));
}
}
@@ -1133,7 +1132,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_
check_expr(ctx, &x, rhs);
check_assignment(ctx, &x, nullptr, str_lit("type switch expression"));
- add_type_info_type(ctx, x.type);
+ // add_type_info_type(ctx, x.type);
TypeSwitchKind switch_kind = check_valid_type_switch_type(x.type);
if (switch_kind == TypeSwitch_Invalid) {
@@ -1155,8 +1154,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_
// NOTE(bill): Check for multiple defaults
Ast *first_default = nullptr;
ast_node(bs, BlockStmt, ss->body);
- for_array(i, bs->stmts) {
- Ast *stmt = bs->stmts[i];
+ for (Ast *stmt : bs->stmts) {
Ast *default_stmt = nullptr;
if (stmt->kind == Ast_CaseClause) {
ast_node(cc, CaseClause, stmt);
@@ -1185,11 +1183,9 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_
}
PtrSet<Type *> seen = {};
- ptr_set_init(&seen, heap_allocator());
defer (ptr_set_destroy(&seen));
- for_array(i, bs->stmts) {
- Ast *stmt = bs->stmts[i];
+ for (Ast *stmt : bs->stmts) {
if (stmt->kind != Ast_CaseClause) {
// NOTE(bill): error handled by above multiple default checker
continue;
@@ -1200,8 +1196,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_
Type *bt = base_type(type_deref(x.type));
Type *case_type = nullptr;
- for_array(type_index, cc->list) {
- Ast *type_expr = cc->list[type_index];
+ for (Ast *type_expr : cc->list) {
if (type_expr != nullptr) { // Otherwise it's a default expression
Operand y = {};
check_expr_or_type(ctx, &y, type_expr);
@@ -1215,8 +1210,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_
if (switch_kind == TypeSwitch_Union) {
GB_ASSERT(is_type_union(bt));
bool tag_type_found = false;
- for_array(j, bt->Union.variants) {
- Type *vt = bt->Union.variants[j];
+ for (Type *vt : bt->Union.variants) {
if (are_types_identical(vt, y.type)) {
tag_type_found = true;
break;
@@ -1229,7 +1223,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_
continue;
}
case_type = y.type;
- add_type_info_type(ctx, y.type);
+ // add_type_info_type(ctx, y.type);
} else if (switch_kind == TypeSwitch_Any) {
case_type = y.type;
add_type_info_type(ctx, y.type);
@@ -1265,7 +1259,9 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_
if (case_type == nullptr) {
case_type = x.type;
}
- add_type_info_type(ctx, case_type);
+ if (switch_kind == TypeSwitch_Any) {
+ add_type_info_type(ctx, case_type);
+ }
check_open_scope(ctx, stmt);
{
@@ -1290,10 +1286,10 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_
auto unhandled = array_make<Type *>(temporary_allocator(), 0, variants.count);
- for_array(i, variants) {
- Type *t = variants[i];
+ for (Type *t : variants) {
if (!type_ptr_set_exists(&seen, t)) {
array_add(&unhandled, t);
+ gb_printf_err("HERE: %p %s\n", t, type_to_string(t));
}
}
@@ -1304,8 +1300,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_
gb_string_free(s);
} else {
error_no_newline(node, "Unhandled switch cases:\n");
- for_array(i, unhandled) {
- Type *t = unhandled[i];
+ for (Type *t : unhandled) {
gbString s = type_to_string(t);
error_line("\t%s\n", s);
gb_string_free(s);
@@ -1342,8 +1337,7 @@ gb_internal void check_block_stmt_for_errors(CheckerContext *ctx, Ast *body) {
isize stmt_count = 0;
Ast *the_stmt = nullptr;
- for_array(i, bs->stmts) {
- Ast *stmt = bs->stmts[i];
+ for (Ast *stmt : bs->stmts) {
GB_ASSERT(stmt != nullptr);
switch (stmt->kind) {
case_ast_node(es, EmptyStmt, stmt);
@@ -1361,8 +1355,7 @@ gb_internal void check_block_stmt_for_errors(CheckerContext *ctx, Ast *body) {
if (stmt_count == 1) {
if (the_stmt->kind == Ast_ValueDecl) {
- for_array(i, the_stmt->ValueDecl.names) {
- Ast *name = the_stmt->ValueDecl.names[i];
+ for (Ast *name : the_stmt->ValueDecl.names) {
if (name->kind != Ast_Ident) {
continue;
}
@@ -1378,8 +1371,8 @@ gb_internal void check_block_stmt_for_errors(CheckerContext *ctx, Ast *body) {
gb_internal bool all_operands_valid(Array<Operand> const &operands) {
if (any_errors()) {
- for_array(i, operands) {
- if (operands[i].type == t_invalid) {
+ for (Operand const &o : operands) {
+ if (o.type == t_invalid) {
return false;
}
}
@@ -1550,16 +1543,9 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags)
check_assignment_arguments(ctx, lhs_operands, &rhs_operands, as->rhs);
- isize rhs_count = rhs_operands.count;
- for_array(i, rhs_operands) {
- if (rhs_operands[i].mode == Addressing_Invalid) {
- // TODO(bill): Should I ignore invalid parameters?
- // rhs_count--;
- }
- }
-
auto lhs_to_ignore = array_make<bool>(temporary_allocator(), lhs_count);
+ isize rhs_count = rhs_operands.count;
isize max = gb_min(lhs_count, rhs_count);
for (isize i = 0; i < max; i++) {
if (lhs_to_ignore[i]) {
@@ -1858,8 +1844,8 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags)
break;
}
- for_array(ti, t->Tuple.variables) {
- array_add(&vals, t->Tuple.variables[ti]->type);
+ for (Entity *e : t->Tuple.variables) {
+ array_add(&vals, e->type);
}
if (rs->vals.count > 1 && rs->vals[1] != nullptr && count < 3) {
@@ -1978,8 +1964,7 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags)
}
}
- for_array(i, entities) {
- Entity *e = entities[i];
+ for (Entity *e : entities) {
DeclInfo *d = decl_info_of_entity(e);
GB_ASSERT(d == nullptr);
add_entity(ctx, ctx->scope, e->identifier, e);
@@ -2093,8 +2078,8 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags)
error(us->token, "Empty 'using' list");
return;
}
- for_array(i, us->list) {
- Ast *expr = unparen_expr(us->list[i]);
+ for (Ast *expr : us->list) {
+ expr = unparen_expr(expr);
Entity *e = nullptr;
bool is_selector = false;
@@ -2134,8 +2119,7 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags)
check_decl_attributes(&c, fb->attributes, foreign_block_decl_attribute, nullptr);
ast_node(block, BlockStmt, fb->body);
- for_array(i, block->stmts) {
- Ast *decl = block->stmts[i];
+ for (Ast *decl : block->stmts) {
if (decl->kind == Ast_ValueDecl && decl->ValueDecl.is_mutable) {
check_stmt(&c, decl, flags);
}
@@ -2148,8 +2132,7 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags)
isize entity_count = 0;
isize new_name_count = 0;
- for_array(i, vd->names) {
- Ast *name = vd->names[i];
+ for (Ast *name : vd->names) {
Entity *entity = nullptr;
if (name->kind != Ast_Ident) {
error(name, "A variable declaration must be an identifier");
@@ -2195,8 +2178,7 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags)
begin_error_block();
error(node, "No new declarations on the left hand side");
bool all_underscore = true;
- for_array(i, vd->names) {
- Ast *name = vd->names[i];
+ for (Ast *name : vd->names) {
if (name->kind == Ast_Ident) {
if (!is_blank_ident(name)) {
all_underscore = false;
@@ -2390,8 +2372,7 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags)
} else {
// constant value declaration
// NOTE(bill): Check `_` declarations
- for_array(i, vd->names) {
- Ast *name = vd->names[i];
+ for (Ast *name : vd->names) {
if (is_blank_ident(name)) {
Entity *e = name->Ident.entity;
DeclInfo *d = decl_info_of_entity(e);
diff --git a/src/check_type.cpp b/src/check_type.cpp
index 4634e1fbe..0863af967 100644
--- a/src/check_type.cpp
+++ b/src/check_type.cpp
@@ -257,63 +257,67 @@ gb_internal bool check_custom_align(CheckerContext *ctx, Ast *node, i64 *align_)
gb_internal Entity *find_polymorphic_record_entity(CheckerContext *ctx, Type *original_type, isize param_count, Array<Operand> const &ordered_operands, bool *failure) {
- mutex_lock(&ctx->info->gen_types_mutex);
- defer (mutex_unlock(&ctx->info->gen_types_mutex));
+ rw_mutex_shared_lock(&ctx->info->gen_types_mutex); // @@global
auto *found_gen_types = map_get(&ctx->info->gen_types, original_type);
- if (found_gen_types != nullptr) {
- // GB_ASSERT_MSG(ordered_operands.count >= param_count, "%td >= %td", ordered_operands.count, param_count);
-
- for_array(i, *found_gen_types) {
- Entity *e = (*found_gen_types)[i];
- Type *t = base_type(e->type);
- TypeTuple *tuple = get_record_polymorphic_params(t);
- GB_ASSERT(param_count == tuple->variables.count);
-
- bool skip = false;
-
- for (isize j = 0; j < param_count; j++) {
- Entity *p = tuple->variables[j];
- Operand o = {};
- if (j < ordered_operands.count) {
- o = ordered_operands[j];
+ if (found_gen_types == nullptr) {
+ rw_mutex_shared_unlock(&ctx->info->gen_types_mutex); // @@global
+ return nullptr;
+ }
+
+ rw_mutex_shared_lock(&found_gen_types->mutex); // @@local
+ defer (rw_mutex_shared_unlock(&found_gen_types->mutex)); // @@local
+
+ rw_mutex_shared_unlock(&ctx->info->gen_types_mutex); // @@global
+
+ for (Entity *e : found_gen_types->types) {
+ Type *t = base_type(e->type);
+ TypeTuple *tuple = get_record_polymorphic_params(t);
+ GB_ASSERT(param_count == tuple->variables.count);
+
+ bool skip = false;
+
+ for (isize j = 0; j < param_count; j++) {
+ Entity *p = tuple->variables[j];
+ Operand o = {};
+ if (j < ordered_operands.count) {
+ o = ordered_operands[j];
+ }
+ if (o.expr == nullptr) {
+ continue;
+ }
+ Entity *oe = entity_of_node(o.expr);
+ if (p == oe) {
+ // NOTE(bill): This is the same type, make sure that it will be be same thing and use that
+ // Saves on a lot of checking too below
+ continue;
+ }
+
+ if (p->kind == Entity_TypeName) {
+ if (is_type_polymorphic(o.type)) {
+ // NOTE(bill): Do not add polymorphic version to the gen_types
+ skip = true;
+ break;
}
- if (o.expr == nullptr) {
- continue;
+ if (!are_types_identical(o.type, p->type)) {
+ skip = true;
+ break;
}
- Entity *oe = entity_of_node(o.expr);
- if (p == oe) {
- // NOTE(bill): This is the same type, make sure that it will be be same thing and use that
- // Saves on a lot of checking too below
- continue;
+ } else if (p->kind == Entity_Constant) {
+ if (!compare_exact_values(Token_CmpEq, o.value, p->Constant.value)) {
+ skip = true;
+ break;
}
-
- if (p->kind == Entity_TypeName) {
- if (is_type_polymorphic(o.type)) {
- // NOTE(bill): Do not add polymorphic version to the gen_types
- skip = true;
- break;
- }
- if (!are_types_identical(o.type, p->type)) {
- skip = true;
- break;
- }
- } else if (p->kind == Entity_Constant) {
- if (!compare_exact_values(Token_CmpEq, o.value, p->Constant.value)) {
- skip = true;
- break;
- }
- if (!are_types_identical(o.type, p->type)) {
- skip = true;
- break;
- }
- } else {
- GB_PANIC("Unknown entity kind");
+ if (!are_types_identical(o.type, p->type)) {
+ skip = true;
+ break;
}
+ } else {
+ GB_PANIC("Unknown entity kind");
}
- if (!skip) {
- return e;
- }
+ }
+ if (!skip) {
+ return e;
}
}
return nullptr;
@@ -346,16 +350,19 @@ gb_internal void add_polymorphic_record_entity(CheckerContext *ctx, Ast *node, T
// TODO(bill): Is this even correct? Or should the metadata be copied?
e->TypeName.objc_metadata = original_type->Named.type_name->TypeName.objc_metadata;
- mutex_lock(&ctx->info->gen_types_mutex);
+ rw_mutex_lock(&ctx->info->gen_types_mutex);
auto *found_gen_types = map_get(&ctx->info->gen_types, original_type);
if (found_gen_types) {
- array_add(found_gen_types, e);
+ rw_mutex_lock(&found_gen_types->mutex);
+ array_add(&found_gen_types->types, e);
+ rw_mutex_unlock(&found_gen_types->mutex);
} else {
- auto array = array_make<Entity *>(heap_allocator());
- array_add(&array, e);
- map_set(&ctx->info->gen_types, original_type, array);
+ GenTypesData gen_types = {};
+ gen_types.types = array_make<Entity *>(heap_allocator());
+ array_add(&gen_types.types, e);
+ map_set(&ctx->info->gen_types, original_type, gen_types);
}
- mutex_unlock(&ctx->info->gen_types_mutex);
+ rw_mutex_unlock(&ctx->info->gen_types_mutex);
}
gb_internal Type *check_record_polymorphic_params(CheckerContext *ctx, Ast *polymorphic_params,
@@ -2398,7 +2405,8 @@ gb_internal Type *make_soa_struct_internal(CheckerContext *ctx, Ast *array_typ_e
}
soa_struct->Struct.soa_count = cast(i32)count;
- scope = create_scope(ctx->info, ctx->scope, 8);
+ scope = create_scope(ctx->info, ctx->scope);
+ string_map_init(&scope->elements, 8);
soa_struct->Struct.scope = scope;
String params_xyzw[4] = {
@@ -3045,7 +3053,7 @@ gb_internal Type *check_type_expr(CheckerContext *ctx, Ast *e, Type *named_type)
#endif
if (is_type_typed(type)) {
- add_type_and_value(ctx->info, e, Addressing_Type, type, empty_exact_value);
+ add_type_and_value(ctx, e, Addressing_Type, type, empty_exact_value);
} else {
gbString name = type_to_string(type);
error(e, "Invalid type definition of %s", name);
diff --git a/src/checker.cpp b/src/checker.cpp
index b78da2827..053bb0e17 100644
--- a/src/checker.cpp
+++ b/src/checker.cpp
@@ -1,3 +1,5 @@
+#define DEBUG_CHECK_ALL_PROCEDURES 1
+
#include "entity.cpp"
#include "types.cpp"
@@ -49,9 +51,11 @@ gb_internal bool check_rtti_type_disallowed(Ast *expr, Type *type, char const *f
gb_internal void scope_reset(Scope *scope) {
if (scope == nullptr) return;
+ rw_mutex_lock(&scope->mutex);
scope->head_child.store(nullptr, std::memory_order_relaxed);
string_map_clear(&scope->elements);
ptr_set_clear(&scope->imported);
+ rw_mutex_unlock(&scope->mutex);
}
gb_internal void scope_reserve(Scope *scope, isize capacity) {
@@ -62,15 +66,10 @@ gb_internal void scope_reserve(Scope *scope, isize capacity) {
}
gb_internal void entity_graph_node_set_destroy(EntityGraphNodeSet *s) {
- if (s->hashes.data != nullptr) {
- ptr_set_destroy(s);
- }
+ ptr_set_destroy(s);
}
gb_internal void entity_graph_node_set_add(EntityGraphNodeSet *s, EntityGraphNode *n) {
- if (s->hashes.data == nullptr) {
- ptr_set_init(s, heap_allocator());
- }
ptr_set_add(s, n);
}
@@ -115,15 +114,10 @@ gb_internal void entity_graph_node_swap(EntityGraphNode **data, isize i, isize j
gb_internal void import_graph_node_set_destroy(ImportGraphNodeSet *s) {
- if (s->hashes.data != nullptr) {
- ptr_set_destroy(s);
- }
+ ptr_set_destroy(s);
}
gb_internal void import_graph_node_set_add(ImportGraphNodeSet *s, ImportGraphNode *n) {
- if (s->hashes.data == nullptr) {
- ptr_set_init(s, heap_allocator());
- }
ptr_set_add(s, n);
}
@@ -179,12 +173,18 @@ gb_internal void import_graph_node_swap(ImportGraphNode **data, isize i, isize j
gb_internal void init_decl_info(DeclInfo *d, Scope *scope, DeclInfo *parent) {
+ gb_zero_item(d);
+ if (parent) {
+ mutex_lock(&parent->next_mutex);
+ d->next_sibling = parent->next_child;
+ parent->next_child = d;
+ mutex_unlock(&parent->next_mutex);
+ }
d->parent = parent;
d->scope = scope;
- ptr_set_init(&d->deps, heap_allocator());
- ptr_set_init(&d->type_info_deps, heap_allocator());
- array_init (&d->labels, heap_allocator());
- mutex_init(&d->proc_checked_mutex);
+ ptr_set_init(&d->deps, 0);
+ ptr_set_init(&d->type_info_deps, 0);
+ d->labels.allocator = heap_allocator();
}
gb_internal DeclInfo *make_decl_info(Scope *scope, DeclInfo *parent) {
@@ -220,12 +220,9 @@ gb_internal DeclInfo *make_decl_info(Scope *scope, DeclInfo *parent) {
-gb_internal Scope *create_scope(CheckerInfo *info, Scope *parent, isize init_elements_capacity=DEFAULT_SCOPE_CAPACITY) {
+gb_internal Scope *create_scope(CheckerInfo *info, Scope *parent) {
Scope *s = gb_alloc_item(permanent_allocator(), Scope);
s->parent = parent;
- string_map_init(&s->elements, heap_allocator(), init_elements_capacity);
- ptr_set_init(&s->imported, heap_allocator(), 0);
- mutex_init(&s->mutex);
if (parent != nullptr && parent != builtin_pkg->scope) {
Scope *prev_head_child = parent->head_child.exchange(s, std::memory_order_acq_rel);
@@ -247,7 +244,8 @@ gb_internal Scope *create_scope_from_file(CheckerInfo *info, AstFile *f) {
GB_ASSERT(f->pkg->scope != nullptr);
isize init_elements_capacity = gb_max(DEFAULT_SCOPE_CAPACITY, 2*f->total_file_decl_count);
- Scope *s = create_scope(info, f->pkg->scope, init_elements_capacity);
+ Scope *s = create_scope(info, f->pkg->scope);
+ string_map_init(&s->elements, init_elements_capacity);
s->flags |= ScopeFlag_File;
@@ -266,7 +264,8 @@ gb_internal Scope *create_scope_from_package(CheckerContext *c, AstPackage *pkg)
}
isize init_elements_capacity = gb_max(DEFAULT_SCOPE_CAPACITY, 2*total_pkg_decl_count);
- Scope *s = create_scope(c->info, builtin_pkg->scope, init_elements_capacity);
+ Scope *s = create_scope(c->info, builtin_pkg->scope);
+ string_map_init(&s->elements, init_elements_capacity);
s->flags |= ScopeFlag_Pkg;
s->pkg = pkg;
@@ -306,7 +305,6 @@ gb_internal void destroy_scope(Scope *scope) {
string_map_destroy(&scope->elements);
ptr_set_destroy(&scope->imported);
- mutex_destroy(&scope->mutex);
// NOTE(bill): No need to free scope as it "should" be allocated in an arena (except for the global scope)
}
@@ -398,9 +396,9 @@ gb_internal void scope_lookup_parent(Scope *scope, String const &name, Scope **s
StringHashKey key = string_hash_string(name);
for (Scope *s = scope; s != nullptr; s = s->parent) {
Entity **found = nullptr;
- mutex_lock(&s->mutex);
+ rw_mutex_shared_lock(&s->mutex);
found = string_map_get(&s->elements, key);
- mutex_unlock(&s->mutex);
+ rw_mutex_shared_unlock(&s->mutex);
if (found) {
Entity *e = *found;
if (gone_thru_proc) {
@@ -441,9 +439,44 @@ gb_internal Entity *scope_lookup(Scope *s, String const &name) {
return entity;
}
+gb_internal Entity *scope_insert_with_name_no_mutex(Scope *s, String const &name, Entity *entity) {
+ if (name == "") {
+ return nullptr;
+ }
+ StringHashKey key = string_hash_string(name);
+ Entity **found = nullptr;
+ Entity *result = nullptr;
+ found = string_map_get(&s->elements, key);
-gb_internal Entity *scope_insert_with_name(Scope *s, String const &name, Entity *entity, bool use_mutex=true) {
+ if (found) {
+ if (entity != *found) {
+ result = *found;
+ }
+ goto end;
+ }
+ if (s->parent != nullptr && (s->parent->flags & ScopeFlag_Proc) != 0) {
+ found = string_map_get(&s->parent->elements, key);
+ if (found) {
+ if ((*found)->flags & EntityFlag_Result) {
+ if (entity != *found) {
+ result = *found;
+ }
+ goto end;
+ }
+ }
+ }
+
+ string_map_set(&s->elements, key, entity);
+ if (entity->scope == nullptr) {
+ entity->scope = s;
+ }
+end:;
+ return result;
+}
+
+
+gb_internal Entity *scope_insert_with_name(Scope *s, String const &name, Entity *entity) {
if (name == "") {
return nullptr;
}
@@ -451,9 +484,8 @@ gb_internal Entity *scope_insert_with_name(Scope *s, String const &name, Entity
Entity **found = nullptr;
Entity *result = nullptr;
- if (use_mutex) mutex_lock(&s->mutex);
- defer (if (use_mutex) mutex_unlock(&s->mutex));
-
+ rw_mutex_lock(&s->mutex);
+
found = string_map_get(&s->elements, key);
if (found) {
@@ -479,12 +511,19 @@ gb_internal Entity *scope_insert_with_name(Scope *s, String const &name, Entity
entity->scope = s;
}
end:;
+ rw_mutex_unlock(&s->mutex);
+
return result;
}
-gb_internal Entity *scope_insert(Scope *s, Entity *entity, bool use_mutex) {
+gb_internal Entity *scope_insert(Scope *s, Entity *entity) {
String name = entity->token.string;
- return scope_insert_with_name(s, name, entity, use_mutex);
+ return scope_insert_with_name(s, name, entity);
+}
+
+gb_internal Entity *scope_insert_no_mutex(Scope *s, Entity *entity) {
+ String name = entity->token.string;
+ return scope_insert_with_name_no_mutex(s, name, entity);
}
@@ -634,7 +673,8 @@ gb_internal void check_scope_usage(Checker *c, Scope *scope) {
Array<VettedEntity> vetted_entities = {};
array_init(&vetted_entities, heap_allocator());
- MUTEX_GUARD_BLOCK(scope->mutex) for (auto const &entry : scope->elements) {
+ rw_mutex_shared_lock(&scope->mutex);
+ for (auto const &entry : scope->elements) {
Entity *e = entry.value;
if (e == nullptr) continue;
VettedEntity ve_unused = {};
@@ -651,6 +691,7 @@ gb_internal void check_scope_usage(Checker *c, Scope *scope) {
array_add(&vetted_entities, ve_shadowed);
}
}
+ rw_mutex_shared_unlock(&scope->mutex);
gb_sort(vetted_entities.data, vetted_entities.count, gb_size_of(VettedEntity), vetted_entity_variable_pos_cmp);
@@ -704,21 +745,17 @@ gb_internal void check_scope_usage(Checker *c, Scope *scope) {
gb_internal void add_dependency(CheckerInfo *info, DeclInfo *d, Entity *e) {
- mutex_lock(&info->deps_mutex);
+ rw_mutex_lock(&d->deps_mutex);
ptr_set_add(&d->deps, e);
- mutex_unlock(&info->deps_mutex);
+ rw_mutex_unlock(&d->deps_mutex);
}
-gb_internal void add_type_info_dependency(CheckerInfo *info, DeclInfo *d, Type *type, bool require_mutex) {
+gb_internal void add_type_info_dependency(CheckerInfo *info, DeclInfo *d, Type *type) {
if (d == nullptr) {
return;
}
- if (require_mutex) {
- mutex_lock(&info->deps_mutex);
- }
+ rw_mutex_lock(&d->type_info_deps_mutex);
ptr_set_add(&d->type_info_deps, type);
- if (require_mutex) {
- mutex_unlock(&info->deps_mutex);
- }
+ rw_mutex_unlock(&d->type_info_deps_mutex);
}
gb_internal AstPackage *get_core_package(CheckerInfo *info, String name) {
@@ -1104,26 +1141,22 @@ gb_internal void init_checker_info(CheckerInfo *i) {
array_init(&i->definitions, a);
array_init(&i->entities, a);
- map_init(&i->global_untyped, a);
- string_map_init(&i->foreigns, a);
- map_init(&i->gen_procs, a);
- map_init(&i->gen_types, a);
+ map_init(&i->global_untyped);
+ string_map_init(&i->foreigns);
+ // map_init(&i->gen_procs);
+ map_init(&i->gen_types);
array_init(&i->type_info_types, a);
- map_init(&i->type_info_map, a);
- string_map_init(&i->files, a);
- string_map_init(&i->packages, a);
+ map_init(&i->type_info_map);
+ string_map_init(&i->files);
+ string_map_init(&i->packages);
array_init(&i->variable_init_order, a);
array_init(&i->testing_procedures, a, 0, 0);
array_init(&i->init_procedures, a, 0, 0);
array_init(&i->required_foreign_imports_through_force, a, 0, 0);
-
-
- i->allow_identifier_uses = false;
- if (i->allow_identifier_uses) {
- array_init(&i->identifier_uses, a);
- }
-
+ map_init(&i->objc_msgSend_types);
+ string_map_init(&i->load_file_cache);
+ array_init(&i->all_procedures, heap_allocator());
TIME_SECTION("checker info: mpmc queues");
@@ -1131,28 +1164,7 @@ gb_internal void init_checker_info(CheckerInfo *i) {
mpmc_init(&i->definition_queue, a, 1<<20);
mpmc_init(&i->required_global_variable_queue, a, 1<<10);
mpmc_init(&i->required_foreign_imports_through_force_queue, a, 1<<10);
-
- TIME_SECTION("checker info: mutexes");
-
- mutex_init(&i->gen_procs_mutex);
- mutex_init(&i->gen_types_mutex);
- mutex_init(&i->lazy_mutex);
- mutex_init(&i->builtin_mutex);
- mutex_init(&i->global_untyped_mutex);
- mutex_init(&i->type_info_mutex);
- mutex_init(&i->deps_mutex);
- mutex_init(&i->type_and_value_mutex);
- mutex_init(&i->identifier_uses_mutex);
- mutex_init(&i->foreign_mutex);
-
- semaphore_init(&i->collect_semaphore);
-
mpmc_init(&i->intrinsics_entry_point_usage, a, 1<<10); // just waste some memory here, even if it probably never used
-
- mutex_init(&i->objc_types_mutex);
- map_init(&i->objc_msgSend_types, a);
- mutex_init(&i->load_file_mutex);
- string_map_init(&i->load_file_cache, a);
}
gb_internal void destroy_checker_info(CheckerInfo *i) {
@@ -1160,14 +1172,13 @@ gb_internal void destroy_checker_info(CheckerInfo *i) {
array_free(&i->entities);
map_destroy(&i->global_untyped);
string_map_destroy(&i->foreigns);
- map_destroy(&i->gen_procs);
+ // map_destroy(&i->gen_procs);
map_destroy(&i->gen_types);
array_free(&i->type_info_types);
map_destroy(&i->type_info_map);
string_map_destroy(&i->files);
string_map_destroy(&i->packages);
array_free(&i->variable_init_order);
- array_free(&i->identifier_uses);
array_free(&i->required_foreign_imports_through_force);
mpmc_destroy(&i->entity_queue);
@@ -1175,20 +1186,7 @@ gb_internal void destroy_checker_info(CheckerInfo *i) {
mpmc_destroy(&i->required_global_variable_queue);
mpmc_destroy(&i->required_foreign_imports_through_force_queue);
- mutex_destroy(&i->gen_procs_mutex);
- mutex_destroy(&i->gen_types_mutex);
- mutex_destroy(&i->lazy_mutex);
- mutex_destroy(&i->builtin_mutex);
- mutex_destroy(&i->global_untyped_mutex);
- mutex_destroy(&i->type_info_mutex);
- mutex_destroy(&i->deps_mutex);
- mutex_destroy(&i->type_and_value_mutex);
- mutex_destroy(&i->identifier_uses_mutex);
- mutex_destroy(&i->foreign_mutex);
-
- mutex_destroy(&i->objc_types_mutex);
map_destroy(&i->objc_msgSend_types);
- mutex_init(&i->load_file_mutex);
string_map_destroy(&i->load_file_cache);
}
@@ -1201,11 +1199,9 @@ gb_internal CheckerContext make_checker_context(Checker *c) {
ctx.type_path = new_checker_type_path();
ctx.type_level = 0;
- mutex_init(&ctx.mutex);
return ctx;
}
gb_internal void destroy_checker_context(CheckerContext *ctx) {
- mutex_destroy(&ctx->mutex);
destroy_checker_type_path(ctx->type_path);
}
@@ -1226,7 +1222,6 @@ gb_internal void reset_checker_context(CheckerContext *ctx, AstFile *file, Untyp
GB_ASSERT(ctx->checker != nullptr);
mutex_lock(&ctx->mutex);
- auto *queue = ctx->procs_to_check_queue;
auto type_path = ctx->type_path;
array_clear(type_path);
@@ -1242,7 +1237,6 @@ gb_internal void reset_checker_context(CheckerContext *ctx, AstFile *file, Untyp
add_curr_ast_file(ctx, file);
- ctx->procs_to_check_queue = queue;
ctx->untyped = untyped;
mutex_unlock(&ctx->mutex);
@@ -1263,8 +1257,7 @@ gb_internal void init_checker(Checker *c) {
mpmc_init(&c->procs_with_deferred_to_check, a, 1<<10);
// NOTE(bill): 1 Mi elements should be enough on average
- mpmc_init(&c->procs_to_check_queue, heap_allocator(), 1<<20);
- semaphore_init(&c->procs_to_check_semaphore);
+ array_init(&c->procs_to_check, heap_allocator(), 0, 1<<20);
mpmc_init(&c->global_untyped_queue, a, 1<<20);
@@ -1276,9 +1269,7 @@ gb_internal void destroy_checker(Checker *c) {
destroy_checker_context(&c->builtin_ctx);
- mpmc_destroy(&c->procs_to_check_queue);
- semaphore_destroy(&c->procs_to_check_semaphore);
-
+ array_free(&c->procs_to_check);
mpmc_destroy(&c->global_untyped_queue);
}
@@ -1360,9 +1351,9 @@ gb_internal ExprInfo *check_get_expr_info(CheckerContext *c, Ast *expr) {
}
return nullptr;
} else {
- mutex_lock(&c->info->global_untyped_mutex);
- defer (mutex_unlock(&c->info->global_untyped_mutex));
+ rw_mutex_shared_lock(&c->info->global_untyped_mutex);
ExprInfo **found = map_get(&c->info->global_untyped, expr);
+ rw_mutex_shared_unlock(&c->info->global_untyped_mutex);
if (found) {
return *found;
}
@@ -1374,9 +1365,9 @@ gb_internal void check_set_expr_info(CheckerContext *c, Ast *expr, AddressingMod
if (c->untyped != nullptr) {
map_set(c->untyped, expr, make_expr_info(mode, type, value, false));
} else {
- mutex_lock(&c->info->global_untyped_mutex);
+ rw_mutex_lock(&c->info->global_untyped_mutex);
map_set(&c->info->global_untyped, expr, make_expr_info(mode, type, value, false));
- mutex_unlock(&c->info->global_untyped_mutex);
+ rw_mutex_unlock(&c->info->global_untyped_mutex);
}
}
@@ -1386,10 +1377,10 @@ gb_internal void check_remove_expr_info(CheckerContext *c, Ast *e) {
GB_ASSERT(map_get(c->untyped, e) == nullptr);
} else {
auto *untyped = &c->info->global_untyped;
- mutex_lock(&c->info->global_untyped_mutex);
+ rw_mutex_lock(&c->info->global_untyped_mutex);
map_remove(untyped, e);
GB_ASSERT(map_get(untyped, e) == nullptr);
- mutex_unlock(&c->info->global_untyped_mutex);
+ rw_mutex_unlock(&c->info->global_untyped_mutex);
}
}
@@ -1445,7 +1436,7 @@ gb_internal void add_untyped(CheckerContext *c, Ast *expr, AddressingMode mode,
check_set_expr_info(c, expr, mode, type, value);
}
-gb_internal void add_type_and_value(CheckerInfo *i, Ast *expr, AddressingMode mode, Type *type, ExactValue value) {
+gb_internal void add_type_and_value(CheckerContext *ctx, Ast *expr, AddressingMode mode, Type *type, ExactValue value) {
if (expr == nullptr) {
return;
}
@@ -1456,7 +1447,15 @@ gb_internal void add_type_and_value(CheckerInfo *i, Ast *expr, AddressingMode mo
return;
}
- mutex_lock(&i->type_and_value_mutex);
+ BlockingMutex *mutex = &ctx->info->type_and_value_mutex;
+ if (ctx->decl) {
+ mutex = &ctx->decl->type_and_value_mutex;
+ } else if (ctx->pkg) {
+ // TODO(bill): is a per package mutex is a good idea here?
+ mutex = &ctx->pkg->type_and_value_mutex;
+ }
+
+ mutex_lock(mutex);
Ast *prev_expr = nullptr;
while (prev_expr != expr) {
prev_expr = expr;
@@ -1478,7 +1477,7 @@ gb_internal void add_type_and_value(CheckerInfo *i, Ast *expr, AddressingMode mo
expr = unparen_expr(expr);
}
- mutex_unlock(&i->type_and_value_mutex);
+ mutex_unlock(mutex);
}
gb_internal void add_entity_definition(CheckerInfo *i, Ast *identifier, Entity *entity) {
@@ -1590,12 +1589,6 @@ gb_internal void add_entity_use(CheckerContext *c, Ast *identifier, Entity *enti
identifier->Ident.entity = entity;
- if (c->info->allow_identifier_uses) {
- mutex_lock(&c->info->identifier_uses_mutex);
- array_add(&c->info->identifier_uses, identifier);
- mutex_unlock(&c->info->identifier_uses_mutex);
- }
-
String dmsg = entity->deprecated_message;
if (dmsg.len > 0) {
warning(identifier, "%.*s is deprecated: %.*s", LIT(entity->token.string), LIT(dmsg));
@@ -1742,13 +1735,6 @@ gb_internal void add_type_info_type(CheckerContext *c, Type *t) {
if (build_context.disallow_rtti) {
return;
}
-
- mutex_lock(&c->info->type_info_mutex);
- add_type_info_type_internal(c, t);
- mutex_unlock(&c->info->type_info_mutex);
-}
-
-gb_internal void add_type_info_type_internal(CheckerContext *c, Type *t) {
if (t == nullptr) {
return;
}
@@ -1756,39 +1742,52 @@ gb_internal void add_type_info_type_internal(CheckerContext *c, Type *t) {
if (is_type_untyped(t)) {
return; // Could be nil
}
- if (is_type_polymorphic(base_type(t))) {
+ if (is_type_polymorphic(t)) {
return;
}
- add_type_info_dependency(c->info, c->decl, t, false);
+ add_type_info_type_internal(c, t);
+}
- auto found = map_get(&c->info->type_info_map, t);
- if (found != nullptr) {
- // Types have already been added
+gb_internal void add_type_info_type_internal(CheckerContext *c, Type *t) {
+ if (t == nullptr) {
return;
}
- bool prev = false;
- isize ti_index = -1;
- for (auto const &e : c->info->type_info_map) {
- if (are_types_identical_unique_tuples(t, e.key)) {
- // Duplicate entry
- ti_index = e.value;
- prev = true;
- break;
+ add_type_info_dependency(c->info, c->decl, t);
+
+ MUTEX_GUARD_BLOCK(&c->info->type_info_mutex) {
+ MapFindResult fr;
+ auto found = map_try_get(&c->info->type_info_map, t, &fr);
+ if (found != nullptr) {
+ // Types have already been added
+ return;
}
- }
- if (ti_index < 0) {
- // Unique entry
- // NOTE(bill): map entries grow linearly and in order
- ti_index = c->info->type_info_types.count;
- array_add(&c->info->type_info_types, t);
- }
- map_set(&c->checker->info.type_info_map, t, ti_index);
- if (prev) {
- // NOTE(bill): If a previous one exists already, no need to continue
- return;
+ bool prev = false;
+ isize ti_index = -1;
+ // NOTE(bill): this is a linear lookup, and is most likely very costly
+ // as this map keeps growing linearly
+ for (auto const &e : c->info->type_info_map) {
+ if (are_types_identical_unique_tuples(t, e.key)) {
+ // Duplicate entry
+ ti_index = e.value;
+ prev = true;
+ break;
+ }
+ }
+ if (ti_index < 0) {
+ // Unique entry
+ // NOTE(bill): map entries grow linearly and in order
+ ti_index = c->info->type_info_types.count;
+ array_add(&c->info->type_info_types, t);
+ }
+ map_set_internal_from_try_get(&c->checker->info.type_info_map, t, ti_index, fr);
+
+ if (prev) {
+ // NOTE(bill): If a previous one exists already, no need to continue
+ return;
+ }
}
// Add nested types
@@ -1971,21 +1970,36 @@ gb_internal void add_type_info_type_internal(CheckerContext *c, Type *t) {
-gb_global bool global_procedure_body_in_worker_queue = false;
+gb_global std::atomic<bool> global_procedure_body_in_worker_queue;
+gb_global std::atomic<bool> global_after_checking_procedure_bodies;
+
+gb_internal WORKER_TASK_PROC(check_proc_info_worker_proc);
-gb_internal void check_procedure_later(CheckerContext *c, ProcInfo *info) {
+gb_internal void check_procedure_later(Checker *c, ProcInfo *info) {
GB_ASSERT(info != nullptr);
GB_ASSERT(info->decl != nullptr);
- if (build_context.threaded_checker && global_procedure_body_in_worker_queue) {
- GB_ASSERT(c->procs_to_check_queue != nullptr);
+ if (global_after_checking_procedure_bodies) {
+ Entity *e = info->decl->entity;
+ debugf("CHECK PROCEDURE LATER! %.*s :: %s {...}\n", LIT(e->token.string), type_to_string(e->type));
+ }
+
+ if (global_procedure_body_in_worker_queue.load()) {
+ thread_pool_add_task(check_proc_info_worker_proc, info);
+ } else {
+ array_add(&c->procs_to_check, info);
}
- auto *queue = c->procs_to_check_queue ? c->procs_to_check_queue : &c->checker->procs_to_check_queue;
- mpmc_enqueue(queue, info);
+ if (DEBUG_CHECK_ALL_PROCEDURES) {
+ MUTEX_GUARD_BLOCK(&c->info.all_procedures_mutex) {
+ GB_ASSERT(info != nullptr);
+ GB_ASSERT(info->decl != nullptr);
+ array_add(&c->info.all_procedures, info);
+ }
+ }
}
-gb_internal void check_procedure_later(CheckerContext *c, AstFile *file, Token token, DeclInfo *decl, Type *type, Ast *body, u64 tags) {
+gb_internal void check_procedure_later(Checker *c, AstFile *file, Token token, DeclInfo *decl, Type *type, Ast *body, u64 tags) {
ProcInfo *info = gb_alloc_item(permanent_allocator(), ProcInfo);
info->file = file;
info->token = token;
@@ -2017,8 +2031,11 @@ gb_internal void add_min_dep_type_info(Checker *c, Type *t) {
ti_index = type_info_index(&c->info, t, false);
}
GB_ASSERT(ti_index >= 0);
- if (ptr_set_update(set, ti_index)) {
- // Type Already exists
+ // IMPORTANT NOTE(bill): this must be copied as `map_set` takes a const ref
+ // and effectively assigns the `+1` of the value
+ isize const count = set->entries.count;
+ if (map_set_if_not_previously_exists(set, ti_index, count)) {
+ // Type already exists;
return;
}
@@ -2218,12 +2235,11 @@ gb_internal void add_dependency_to_set(Checker *c, Entity *entity) {
return;
}
- for (auto const &entry : decl->type_info_deps) {
- add_min_dep_type_info(c, entry.ptr);
+ for (Type *t : decl->type_info_deps) {
+ add_min_dep_type_info(c, t);
}
- for (auto const &entry : decl->deps) {
- Entity *e = entry.ptr;
+ for (Entity *e : decl->deps) {
add_dependency_to_set(c, e);
if (e->kind == Entity_Procedure && e->Procedure.is_foreign) {
Entity *fl = e->Procedure.foreign_library;
@@ -2261,8 +2277,8 @@ gb_internal void generate_minimum_dependency_set(Checker *c, Entity *start) {
isize entity_count = c->info.entities.count;
isize min_dep_set_cap = next_pow2_isize(entity_count*4); // empirically determined factor
- ptr_set_init(&c->info.minimum_dependency_set, heap_allocator(), min_dep_set_cap);
- ptr_set_init(&c->info.minimum_dependency_type_info_set, heap_allocator());
+ ptr_set_init(&c->info.minimum_dependency_set, min_dep_set_cap);
+ map_init(&c->info.minimum_dependency_type_info_set);
#define FORCE_ADD_RUNTIME_ENTITIES(condition, ...) do { \
if (condition) { \
@@ -2486,7 +2502,7 @@ gb_internal bool is_entity_a_dependency(Entity *e) {
gb_internal Array<EntityGraphNode *> generate_entity_dependency_graph(CheckerInfo *info, gbAllocator allocator) {
PtrMap<Entity *, EntityGraphNode *> M = {};
- map_init(&M, allocator, info->entities.count);
+ map_init(&M, info->entities.count);
defer (map_destroy(&M));
for_array(i, info->entities) {
Entity *e = info->entities[i];
@@ -2506,8 +2522,7 @@ gb_internal Array<EntityGraphNode *> generate_entity_dependency_graph(CheckerInf
DeclInfo *decl = decl_info_of_entity(e);
GB_ASSERT(decl != nullptr);
- for (auto const &entry : decl->deps) {
- Entity *dep = entry.ptr;
+ for (Entity *dep : decl->deps) {
if (dep->flags & EntityFlag_Field) {
continue;
}
@@ -2533,15 +2548,12 @@ gb_internal Array<EntityGraphNode *> generate_entity_dependency_graph(CheckerInf
if (e->kind == Entity_Procedure) {
// Connect each pred 'p' of 'n' with each succ 's' and from
// the procedure node
- for (auto const &p_entry : n->pred) {
- EntityGraphNode *p = p_entry.ptr;
-
+ for (EntityGraphNode *p : n->pred) {
// Ignore self-cycles
if (p != n) {
// Each succ 's' of 'n' becomes a succ of 'p', and
// each pred 'p' of 'n' becomes a pred of 's'
- for (auto const &s_entry : n->succ) {
- EntityGraphNode *s = s_entry.ptr;
+ for (EntityGraphNode *s : n->succ) {
// Ignore self-cycles
if (s != n) {
if (p->entity->kind == Entity_Procedure &&
@@ -2552,7 +2564,6 @@ gb_internal Array<EntityGraphNode *> generate_entity_dependency_graph(CheckerInf
}
// IMPORTANT NOTE/TODO(bill, 2020-11-15): These three calls take the majority of the
// the time to process
-
entity_graph_node_set_add(&p->succ, s);
entity_graph_node_set_add(&s->pred, p);
// Remove edge to 'n'
@@ -2573,7 +2584,7 @@ gb_internal Array<EntityGraphNode *> generate_entity_dependency_graph(CheckerInf
for_array(i, G) {
EntityGraphNode *n = G[i];
n->index = i;
- n->dep_count = n->succ.entries.count;
+ n->dep_count = n->succ.count;
GB_ASSERT(n->dep_count >= 0);
}
@@ -3379,7 +3390,6 @@ gb_internal void check_decl_attributes(CheckerContext *c, Array<Ast *> const &at
}
StringSet set = {};
- string_set_init(&set, heap_allocator());
defer (string_set_destroy(&set));
for_array(i, attributes) {
@@ -4197,7 +4207,7 @@ gb_internal void add_import_dependency_node(Checker *c, Ast *decl, PtrMap<AstPac
gb_internal Array<ImportGraphNode *> generate_import_dependency_graph(Checker *c) {
PtrMap<AstPackage *, ImportGraphNode *> M = {};
- map_init(&M, heap_allocator(), 2*c->parser->packages.count);
+ map_init(&M, 2*c->parser->packages.count);
defer (map_destroy(&M));
for_array(i, c->parser->packages) {
@@ -4225,7 +4235,7 @@ gb_internal Array<ImportGraphNode *> generate_import_dependency_graph(Checker *c
for (auto const &entry : M) {
auto n = entry.value;
n->index = i++;
- n->dep_count = n->succ.entries.count;
+ n->dep_count = n->succ.count;
GB_ASSERT(n->dep_count >= 0);
array_add(&G, n);
}
@@ -4651,85 +4661,48 @@ gb_internal void check_create_file_scopes(Checker *c) {
}
}
-struct ThreadProcCheckerSection {
- Checker *checker;
- isize offset;
- isize count;
+struct CollectEntityWorkerData {
+ Checker *c;
+ CheckerContext ctx;
+ UntypedExprInfoMap untyped;
};
+gb_global CollectEntityWorkerData *collect_entity_worker_data;
-gb_internal void check_with_workers(Checker *c, WorkerTaskProc *proc, isize total_count) {
- isize thread_count = gb_max(build_context.thread_count, 1);
- isize worker_count = thread_count-1; // NOTE(bill): The main thread will also be used for work
- if (!build_context.threaded_checker) {
- worker_count = 0;
- }
+gb_internal WORKER_TASK_PROC(check_collect_entities_all_worker_proc) {
+ CollectEntityWorkerData *wd = &collect_entity_worker_data[current_thread_index()];
- semaphore_post(&c->info.collect_semaphore, cast(i32)thread_count);
- if (worker_count == 0) {
- ThreadProcCheckerSection section_all = {};
- section_all.checker = c;
- section_all.offset = 0;
- section_all.count = total_count;
- proc(&section_all);
- return;
- }
+ Checker *c = wd->c;
+ CheckerContext *ctx = &wd->ctx;
+ UntypedExprInfoMap *untyped = &wd->untyped;
- isize file_load_count = (total_count+thread_count-1)/thread_count;
- isize remaining_count = total_count;
+ AstFile *f = cast(AstFile *)data;
+ reset_checker_context(ctx, f, untyped);
- ThreadProcCheckerSection *thread_data = gb_alloc_array(permanent_allocator(), ThreadProcCheckerSection, thread_count);
- for (isize i = 0; i < thread_count; i++) {
- ThreadProcCheckerSection *data = thread_data + i;
- data->checker = c;
- data->offset = total_count-remaining_count;
- data->count = file_load_count;
- remaining_count -= file_load_count;
- }
- GB_ASSERT(remaining_count <= 0);
+ check_collect_entities(ctx, f->decls);
+ GB_ASSERT(ctx->collect_delayed_decls == false);
+ add_untyped_expressions(&c->info, ctx->untyped);
- for (isize i = 0; i < thread_count; i++) {
- global_thread_pool_add_task(proc, thread_data+i);
- }
- global_thread_pool_wait();
- semaphore_wait(&c->info.collect_semaphore);
+ return 0;
}
+gb_internal void check_collect_entities_all(Checker *c) {
+ isize thread_count = global_thread_pool.threads.count;
-gb_internal WORKER_TASK_PROC(thread_proc_collect_entities) {
- auto *cs = cast(ThreadProcCheckerSection *)data;
- Checker *c = cs->checker;
- CheckerContext collect_entity_ctx = make_checker_context(c);
- defer (destroy_checker_context(&collect_entity_ctx));
-
- CheckerContext *ctx = &collect_entity_ctx;
-
- UntypedExprInfoMap untyped = {};
- map_init(&untyped, heap_allocator());
-
- isize offset = cs->offset;
- isize file_end = gb_min(offset+cs->count, c->info.files.entries.count);
-
- for (isize i = offset; i < file_end; i++) {
- AstFile *f = c->info.files.entries[i].value;
- reset_checker_context(ctx, f, &untyped);
-
- check_collect_entities(ctx, f->decls);
- GB_ASSERT(ctx->collect_delayed_decls == false);
-
- add_untyped_expressions(&c->info, ctx->untyped);
+ collect_entity_worker_data = gb_alloc_array(permanent_allocator(), CollectEntityWorkerData, thread_count);
+ for (isize i = 0; i < thread_count; i++) {
+ auto *wd = &collect_entity_worker_data[i];
+ wd->c = c;
+ wd->ctx = make_checker_context(c);
+ map_init(&wd->untyped);
}
- map_destroy(&untyped);
-
- semaphore_release(&c->info.collect_semaphore);
- return 0;
-}
-
-
-gb_internal void check_collect_entities_all(Checker *c) {
- check_with_workers(c, thread_proc_collect_entities, c->info.files.entries.count);
+ for (auto const &entry : c->info.files.entries) {
+ AstFile *f = entry.value;
+ thread_pool_add_task(check_collect_entities_all_worker_proc, f);
+ }
+ thread_pool_wait();
}
gb_internal void check_export_entities_in_pkg(CheckerContext *ctx, AstPackage *pkg, UntypedExprInfoMap *untyped) {
@@ -4746,30 +4719,30 @@ gb_internal void check_export_entities_in_pkg(CheckerContext *ctx, AstPackage *p
}
}
-gb_internal WORKER_TASK_PROC(thread_proc_check_export_entities) {
- auto cs = cast(ThreadProcCheckerSection *)data;
- Checker *c = cs->checker;
+gb_internal WORKER_TASK_PROC(check_export_entities_worker_proc) {
+ AstPackage *pkg = (AstPackage *)data;
+ auto *wd = &collect_entity_worker_data[current_thread_index()];
+ check_export_entities_in_pkg(&wd->ctx, pkg, &wd->untyped);
+ return 0;
+}
- CheckerContext ctx = make_checker_context(c);
- defer (destroy_checker_context(&ctx));
- UntypedExprInfoMap untyped = {};
- map_init(&untyped, heap_allocator());
-
- isize end = gb_min(cs->offset + cs->count, c->info.packages.entries.count);
- for (isize i = cs->offset; i < end; i++) {
- AstPackage *pkg = c->info.packages.entries[i].value;
- check_export_entities_in_pkg(&ctx, pkg, &untyped);
- }
+gb_internal void check_export_entities(Checker *c) {
+ isize thread_count = global_thread_pool.threads.count;
- map_destroy(&untyped);
+ // NOTE(bill): reuse `collect_entity_worker_data`
- semaphore_release(&c->info.collect_semaphore);
- return 0;
-}
+ for (isize i = 0; i < thread_count; i++) {
+ auto *wd = &collect_entity_worker_data[i];
+ map_clear(&wd->untyped);
+ wd->ctx = make_checker_context(c);
+ }
-gb_internal void check_export_entities(Checker *c) {
- check_with_workers(c, thread_proc_check_export_entities, c->info.packages.entries.count);
+ for (auto const &entry : c->info.packages.entries) {
+ AstPackage *pkg = entry.value;
+ thread_pool_add_task(check_export_entities_worker_proc, pkg);
+ }
+ thread_pool_wait();
}
gb_internal void check_import_entities(Checker *c) {
@@ -4787,7 +4760,6 @@ gb_internal void check_import_entities(Checker *c) {
auto pq = priority_queue_create(dep_graph, import_graph_node_cmp, import_graph_node_swap);
PtrSet<AstPackage *> emitted = {};
- ptr_set_init(&emitted, heap_allocator());
defer (ptr_set_destroy(&emitted));
Array<ImportGraphNode *> package_order = {};
@@ -4801,7 +4773,6 @@ gb_internal void check_import_entities(Checker *c) {
if (n->dep_count > 0) {
PtrSet<AstPackage *> visited = {};
- ptr_set_init(&visited, heap_allocator());
defer (ptr_set_destroy(&visited));
auto path = find_import_path(c, pkg, pkg, &visited);
@@ -4820,8 +4791,7 @@ gb_internal void check_import_entities(Checker *c) {
}
}
- for (auto const &entry : n->pred) {
- ImportGraphNode *p = entry.ptr;
+ for (ImportGraphNode *p : n->pred) {
p->dep_count = gb_max(p->dep_count-1, 0);
priority_queue_fix(&pq, p->index);
}
@@ -4840,7 +4810,6 @@ gb_internal void check_import_entities(Checker *c) {
CheckerContext ctx = make_checker_context(c);
UntypedExprInfoMap untyped = {};
- map_init(&untyped, heap_allocator());
defer (map_destroy(&untyped));
isize min_pkg_index = 0;
@@ -4930,8 +4899,7 @@ gb_internal bool find_entity_path_tuple(Type *tuple, Entity *end, PtrSet<Entity
if (var_decl == nullptr) {
continue;
}
- for (auto const &entry : var_decl->deps) {
- Entity *dep = entry.ptr;
+ for (Entity *dep : var_decl->deps) {
if (dep == end) {
auto path = array_make<Entity *>(heap_allocator());
array_add(&path, dep);
@@ -4955,7 +4923,6 @@ gb_internal Array<Entity *> find_entity_path(Entity *start, Entity *end, PtrSet<
bool made_visited = false;
if (visited == nullptr) {
made_visited = true;
- ptr_set_init(&visited_, heap_allocator());
visited = &visited_;
}
defer (if (made_visited) {
@@ -4982,8 +4949,7 @@ gb_internal Array<Entity *> find_entity_path(Entity *start, Entity *end, PtrSet<
return path;
}
} else {
- for (auto const &entry : decl->deps) {
- Entity *dep = entry.ptr;
+ for (Entity *dep : decl->deps) {
if (dep == end) {
auto path = array_make<Entity *>(heap_allocator());
array_add(&path, dep);
@@ -5018,7 +4984,6 @@ gb_internal void calculate_global_init_order(Checker *c) {
auto pq = priority_queue_create(dep_graph, entity_graph_node_cmp, entity_graph_node_swap);
PtrSet<DeclInfo *> emitted = {};
- ptr_set_init(&emitted, heap_allocator());
defer (ptr_set_destroy(&emitted));
TIME_SECTION("calculate_global_init_order: queue sort");
@@ -5041,8 +5006,7 @@ gb_internal void calculate_global_init_order(Checker *c) {
}
}
- for (auto const &entry : n->pred) {
- EntityGraphNode *p = entry.ptr;
+ for (EntityGraphNode *p : n->pred) {
p->dep_count -= 1;
p->dep_count = gb_max(p->dep_count, 0);
priority_queue_fix(&pq, p->index);
@@ -5076,30 +5040,36 @@ gb_internal void calculate_global_init_order(Checker *c) {
}
-gb_internal bool check_proc_info(Checker *c, ProcInfo *pi, UntypedExprInfoMap *untyped, ProcBodyQueue *procs_to_check_queue) {
+gb_internal bool check_proc_info(Checker *c, ProcInfo *pi, UntypedExprInfoMap *untyped) {
if (pi == nullptr) {
return false;
}
if (pi->type == nullptr) {
return false;
}
- Entity *e = pi->decl->entity;
- MUTEX_GUARD_BLOCK(&pi->decl->proc_checked_mutex) {
- if (pi->decl->proc_checked) {
- if (e != nullptr) {
- GB_ASSERT(e->flags & EntityFlag_ProcBodyChecked);
- }
- return true;
- }
+ if (!mutex_try_lock(&pi->decl->proc_checked_mutex)) {
+ return false;
}
+ defer (mutex_unlock(&pi->decl->proc_checked_mutex));
- CheckerContext ctx = make_checker_context(c);
- defer (destroy_checker_context(&ctx));
- reset_checker_context(&ctx, pi->file, untyped);
- ctx.decl = pi->decl;
- ctx.procs_to_check_queue = procs_to_check_queue;
- GB_ASSERT(procs_to_check_queue != nullptr);
+ Entity *e = pi->decl->entity;
+ switch (pi->decl->proc_checked_state.load()) {
+ case ProcCheckedState_InProgress:
+ if (e) {
+ GB_ASSERT(global_procedure_body_in_worker_queue.load());
+ }
+ return false;
+ case ProcCheckedState_Checked:
+ if (e != nullptr) {
+ GB_ASSERT(e->flags & EntityFlag_ProcBodyChecked);
+ }
+ return true;
+ case ProcCheckedState_Unchecked:
+ // okay
+ break;
+ }
+ pi->decl->proc_checked_state.store(ProcCheckedState_InProgress);
GB_ASSERT(pi->type->kind == Type_Proc);
TypeProc *pt = &pi->type->Proc;
@@ -5111,16 +5081,26 @@ gb_internal bool check_proc_info(Checker *c, ProcInfo *pi, UntypedExprInfoMap *u
token = ast_token(pi->poly_def_node);
}
error(token, "Unspecialized polymorphic procedure '%.*s'", LIT(name));
+ pi->decl->proc_checked_state.store(ProcCheckedState_Unchecked);
return false;
}
if (pt->is_polymorphic && pt->is_poly_specialized) {
+ Entity *e = pi->decl->entity;
+ GB_ASSERT(e != nullptr);
if ((e->flags & EntityFlag_Used) == 0) {
// NOTE(bill, 2019-08-31): It was never used, don't check
+ // NOTE(bill, 2023-01-02): This may need to be checked again if it is used elsewhere?
+ pi->decl->proc_checked_state.store(ProcCheckedState_Unchecked);
return false;
}
}
+ CheckerContext ctx = make_checker_context(c);
+ defer (destroy_checker_context(&ctx));
+ reset_checker_context(&ctx, pi->file, untyped);
+ ctx.decl = pi->decl;
+
bool bounds_check = (pi->tags & ProcTag_bounds_check) != 0;
bool no_bounds_check = (pi->tags & ProcTag_no_bounds_check) != 0;
@@ -5143,24 +5123,34 @@ gb_internal bool check_proc_info(Checker *c, ProcInfo *pi, UntypedExprInfoMap *u
ctx.state_flags &= ~StateFlag_type_assert;
}
- if (pi->body != nullptr && e != nullptr) {
- GB_ASSERT((e->flags & EntityFlag_ProcBodyChecked) == 0);
- }
+ bool body_was_checked = check_proc_body(&ctx, pi->token, pi->decl, pi->type, pi->body);
- check_proc_body(&ctx, pi->token, pi->decl, pi->type, pi->body);
- MUTEX_GUARD_BLOCK(&pi->decl->proc_checked_mutex) {
- if (e != nullptr) {
- e->flags |= EntityFlag_ProcBodyChecked;
+ if (body_was_checked) {
+ pi->decl->proc_checked_state.store(ProcCheckedState_Checked);
+ if (pi->body) {
+ Entity *e = pi->decl->entity;
+ if (e != nullptr) {
+ e->flags |= EntityFlag_ProcBodyChecked;
+ }
+ }
+ } else {
+ pi->decl->proc_checked_state.store(ProcCheckedState_Unchecked);
+ if (pi->body) {
+ Entity *e = pi->decl->entity;
+ if (e != nullptr) {
+ e->flags &= ~EntityFlag_ProcBodyChecked;
+ }
}
- pi->decl->proc_checked = true;
}
+
add_untyped_expressions(&c->info, ctx.untyped);
+
return true;
}
GB_STATIC_ASSERT(sizeof(isize) == sizeof(void *));
-gb_internal bool consume_proc_info_queue(Checker *c, ProcInfo *pi, ProcBodyQueue *q, UntypedExprInfoMap *untyped);
+gb_internal bool consume_proc_info(Checker *c, ProcInfo *pi, UntypedExprInfoMap *untyped);
gb_internal void check_unchecked_bodies(Checker *c) {
// NOTE(2021-02-26, bill): Sanity checker
@@ -5168,12 +5158,15 @@ gb_internal void check_unchecked_bodies(Checker *c) {
// even ones which should not exist, due to the multithreaded nature of the parser
// HACK TODO(2021-02-26, bill): Actually fix this race condition
+ GB_ASSERT(c->procs_to_check.count == 0);
+
UntypedExprInfoMap untyped = {};
- map_init(&untyped, heap_allocator());
defer (map_destroy(&untyped));
- for (auto const &entry : c->info.minimum_dependency_set) {
- Entity *e = entry.ptr;
+ // use the `procs_to_check` array
+ global_procedure_body_in_worker_queue = false;
+
+ for (Entity *e : c->info.minimum_dependency_set) {
if (e == nullptr || e->kind != Entity_Procedure) {
continue;
}
@@ -5198,20 +5191,46 @@ gb_internal void check_unchecked_bodies(Checker *c) {
}
debugf("unchecked: %.*s\n", LIT(e->token.string));
- mpmc_enqueue(&c->procs_to_check_queue, pi);
+ check_procedure_later(c, pi);
}
}
- auto *q = &c->procs_to_check_queue;
- ProcInfo *pi = nullptr;
- while (mpmc_dequeue(q, &pi)) {
- Entity *e = pi->decl->entity;
- if (consume_proc_info_queue(c, pi, q, &untyped)) {
- add_dependency_to_set(c, e);
- GB_ASSERT(e->flags & EntityFlag_ProcBodyChecked);
+ if (!global_procedure_body_in_worker_queue) {
+ for_array(i, c->procs_to_check) {
+ ProcInfo *pi = c->procs_to_check[i];
+ consume_proc_info(c, pi, &untyped);
}
+ array_clear(&c->procs_to_check);
+ } else {
+ thread_pool_wait();
}
+ global_procedure_body_in_worker_queue = false;
+ global_after_checking_procedure_bodies = true;
+}
+
+gb_internal void check_safety_all_procedures_for_unchecked(Checker *c) {
+ GB_ASSERT(DEBUG_CHECK_ALL_PROCEDURES);
+ UntypedExprInfoMap untyped = {};
+ defer (map_destroy(&untyped));
+
+
+ for_array(i, c->info.all_procedures) {
+ ProcInfo *pi = c->info.all_procedures[i];
+ GB_ASSERT(pi != nullptr);
+ GB_ASSERT(pi->decl != nullptr);
+ Entity *e = pi->decl->entity;
+ auto proc_checked_state = pi->decl->proc_checked_state.load();
+ if (e && ((e->flags & EntityFlag_ProcBodyChecked) == 0)) {
+ if ((e->flags & EntityFlag_Used) != 0) {
+ debugf("%.*s :: %s\n", LIT(e->token.string), type_to_string(e->type));
+ debugf("proc body unchecked\n");
+ debugf("Checked State: %s\n\n", ProcCheckedState_strings[proc_checked_state]);
+
+ consume_proc_info(c, pi, &untyped);
+ }
+ }
+ }
}
gb_internal void check_test_procedures(Checker *c) {
@@ -5222,8 +5241,7 @@ gb_internal void check_test_procedures(Checker *c) {
AstPackage *pkg = c->info.init_package;
Scope *s = pkg->scope;
- for (auto const &entry : build_context.test_names) {
- String name = entry.value;
+ for (String const &name : build_context.test_names) {
Entity *e = scope_lookup(s, name);
if (e == nullptr) {
Token tok = {};
@@ -5249,73 +5267,93 @@ gb_internal void check_test_procedures(Checker *c) {
gb_global std::atomic<isize> total_bodies_checked;
-gb_internal bool consume_proc_info_queue(Checker *c, ProcInfo *pi, ProcBodyQueue *q, UntypedExprInfoMap *untyped) {
+gb_internal bool consume_proc_info(Checker *c, ProcInfo *pi, UntypedExprInfoMap *untyped) {
GB_ASSERT(pi->decl != nullptr);
+ switch (pi->decl->proc_checked_state.load()) {
+ case ProcCheckedState_InProgress:
+ return false;
+ case ProcCheckedState_Checked:
+ return true;
+ }
+
if (pi->decl->parent && pi->decl->parent->entity) {
Entity *parent = pi->decl->parent->entity;
// NOTE(bill): Only check a nested procedure if its parent's body has been checked first
// This is prevent any possible race conditions in evaluation when multithreaded
// NOTE(bill): In single threaded mode, this should never happen
if (parent->kind == Entity_Procedure && (parent->flags & EntityFlag_ProcBodyChecked) == 0) {
- mpmc_enqueue(q, pi);
+ check_procedure_later(c, pi);
return false;
}
}
if (untyped) {
map_clear(untyped);
}
- bool ok = check_proc_info(c, pi, untyped, q);
- total_bodies_checked.fetch_add(1, std::memory_order_relaxed);
- return ok;
+ if (check_proc_info(c, pi, untyped)) {
+ total_bodies_checked.fetch_add(1, std::memory_order_relaxed);
+ return true;
+ }
+ return false;
}
-struct ThreadProcBodyData {
- Checker *checker;
- ProcBodyQueue *queue;
- u32 thread_index;
- u32 thread_count;
- ThreadProcBodyData *all_data;
+struct CheckProcedureBodyWorkerData {
+ Checker *c;
+ UntypedExprInfoMap untyped;
};
-gb_internal WORKER_TASK_PROC(thread_proc_body) {
- ThreadProcBodyData *bd = cast(ThreadProcBodyData *)data;
- Checker *c = bd->checker;
- GB_ASSERT(c != nullptr);
- ProcBodyQueue *this_queue = bd->queue;
+gb_global CheckProcedureBodyWorkerData *check_procedure_bodies_worker_data;
- UntypedExprInfoMap untyped = {};
- map_init(&untyped, heap_allocator());
+gb_internal WORKER_TASK_PROC(check_proc_info_worker_proc) {
+ auto *wd = &check_procedure_bodies_worker_data[current_thread_index()];
+ UntypedExprInfoMap *untyped = &wd->untyped;
+ Checker *c = wd->c;
- for (ProcInfo *pi; mpmc_dequeue(this_queue, &pi); /**/) {
- consume_proc_info_queue(c, pi, this_queue, &untyped);
+ ProcInfo *pi = cast(ProcInfo *)data;
+
+ GB_ASSERT(pi->decl != nullptr);
+ if (pi->decl->parent && pi->decl->parent->entity) {
+ Entity *parent = pi->decl->parent->entity;
+ // NOTE(bill): Only check a nested procedure if its parent's body has been checked first
+ // This is prevent any possible race conditions in evaluation when multithreaded
+ // NOTE(bill): In single threaded mode, this should never happen
+ if (parent->kind == Entity_Procedure && (parent->flags & EntityFlag_ProcBodyChecked) == 0) {
+ thread_pool_add_task(check_proc_info_worker_proc, pi);
+ return 1;
+ }
+ }
+ map_clear(untyped);
+ if (check_proc_info(c, pi, untyped)) {
+ total_bodies_checked.fetch_add(1, std::memory_order_relaxed);
+ return 0;
}
+ return 1;
+}
- map_destroy(&untyped);
+gb_internal void check_init_worker_data(Checker *c) {
+ u32 thread_count = cast(u32)global_thread_pool.threads.count;
- semaphore_release(&c->procs_to_check_semaphore);
+ check_procedure_bodies_worker_data = gb_alloc_array(permanent_allocator(), CheckProcedureBodyWorkerData, thread_count);
- return 0;
+ for (isize i = 0; i < thread_count; i++) {
+ check_procedure_bodies_worker_data[i].c = c;
+ map_init(&check_procedure_bodies_worker_data[i].untyped);
+ }
}
gb_internal void check_procedure_bodies(Checker *c) {
GB_ASSERT(c != nullptr);
- u32 thread_count = cast(u32)gb_max(build_context.thread_count, 1);
- u32 worker_count = thread_count-1; // NOTE(bill): The main thread will also be used for work
- if (!build_context.threaded_checker) {
- worker_count = 0;
+ u32 thread_count = cast(u32)global_thread_pool.threads.count;
+ if (build_context.no_threaded_checker) {
+ thread_count = 1;
}
- if (worker_count == 0) {
- auto *this_queue = &c->procs_to_check_queue;
- UntypedExprInfoMap untyped = {};
- map_init(&untyped, heap_allocator());
-
- for (ProcInfo *pi = nullptr; mpmc_dequeue(this_queue, &pi); /**/) {
- consume_proc_info_queue(c, pi, this_queue, &untyped);
+ if (thread_count == 1) {
+ UntypedExprInfoMap *untyped = &check_procedure_bodies_worker_data[0].untyped;
+ for_array(i, c->procs_to_check) {
+ consume_proc_info(c, c->procs_to_check[i], untyped);
}
-
- map_destroy(&untyped);
+ array_clear(&c->procs_to_check);
debugf("Total Procedure Bodies Checked: %td\n", total_bodies_checked.load(std::memory_order_relaxed));
return;
@@ -5323,53 +5361,14 @@ gb_internal void check_procedure_bodies(Checker *c) {
global_procedure_body_in_worker_queue = true;
- isize original_queue_count = c->procs_to_check_queue.count.load(std::memory_order_relaxed);
- isize load_count = (original_queue_count+thread_count-1)/thread_count;
-
- ThreadProcBodyData *thread_data = gb_alloc_array(permanent_allocator(), ThreadProcBodyData, thread_count);
- for (u32 i = 0; i < thread_count; i++) {
- ThreadProcBodyData *data = thread_data + i;
- data->checker = c;
- data->queue = gb_alloc_item(permanent_allocator(), ProcBodyQueue);
- data->thread_index = i;
- data->thread_count = thread_count;
- data->all_data = thread_data;
- // NOTE(bill) 2x the amount assumes on average only 1 nested procedure
- // TODO(bill): Determine a good heuristic
- mpmc_init(data->queue, heap_allocator(), next_pow2_isize(load_count*2));
- }
-
- // Distibute the work load into multiple queues
- for (isize j = 0; j < load_count; j++) {
- for (isize i = 0; i < thread_count; i++) {
- ProcBodyQueue *queue = thread_data[i].queue;
- ProcInfo *pi = nullptr;
- if (!mpmc_dequeue(&c->procs_to_check_queue, &pi)) {
- break;
- }
- mpmc_enqueue(queue, pi);
- }
- }
- isize total_queued = 0;
- for (isize i = 0; i < thread_count; i++) {
- ProcBodyQueue *queue = thread_data[i].queue;
- total_queued += queue->count.load();
- }
- GB_ASSERT(total_queued == original_queue_count);
-
- semaphore_post(&c->procs_to_check_semaphore, cast(i32)thread_count);
-
- for (isize i = 0; i < thread_count; i++) {
- global_thread_pool_add_task(thread_proc_body, thread_data+i);
+ isize prev_procs_to_check_count = c->procs_to_check.count;
+ for_array(i, c->procs_to_check) {
+ thread_pool_add_task(check_proc_info_worker_proc, c->procs_to_check[i]);
}
- global_thread_pool_wait();
- semaphore_wait(&c->procs_to_check_semaphore);
-
- isize global_remaining = c->procs_to_check_queue.count.load(std::memory_order_relaxed);
- GB_ASSERT(global_remaining == 0);
-
- debugf("Total Procedure Bodies Checked: %td\n", total_bodies_checked.load(std::memory_order_relaxed));
+ GB_ASSERT(prev_procs_to_check_count == c->procs_to_check.count);
+ array_clear(&c->procs_to_check);
+ thread_pool_wait();
global_procedure_body_in_worker_queue = false;
}
@@ -5542,7 +5541,7 @@ gb_internal void check_deferred_procedures(Checker *c) {
gb_internal void check_unique_package_names(Checker *c) {
StringMap<AstPackage *> pkgs = {}; // Key: package name
- string_map_init(&pkgs, heap_allocator(), 2*c->info.packages.entries.count);
+ string_map_init(&pkgs, 2*c->info.packages.entries.count);
defer (string_map_destroy(&pkgs));
for (auto const &entry : c->info.packages) {
@@ -5648,6 +5647,23 @@ gb_internal void add_type_info_for_type_definitions(Checker *c) {
}
}
+gb_internal void check_walk_all_dependencies(DeclInfo *decl) {
+ if (decl == nullptr) {
+ return;
+ }
+ for (DeclInfo *child = decl->next_child; child != nullptr; child = child->next_sibling) {
+ check_walk_all_dependencies(child);
+ }
+ add_deps_from_child_to_parent(decl);
+}
+
+gb_internal void check_update_dependency_tree_for_procedures(Checker *c) {
+ for (Entity *e : c->info.entities) {
+ DeclInfo *decl = e->decl_info;
+ check_walk_all_dependencies(decl);
+ }
+}
+
gb_internal void check_parsed_files(Checker *c) {
TIME_SECTION("map full filepaths to scope");
add_type_info_type(&c->builtin_ctx, t_invalid);
@@ -5669,6 +5685,9 @@ gb_internal void check_parsed_files(Checker *c) {
}
}
+ TIME_SECTION("init worker data");
+ check_init_worker_data(c);
+
TIME_SECTION("create file scopes");
check_create_file_scopes(c);
@@ -5712,17 +5731,6 @@ gb_internal void check_parsed_files(Checker *c) {
check_scope_usage(c, f->scope);
}
- TIME_SECTION("add untyped expression values");
- // Add untyped expression values
- for (UntypedExprInfo u = {}; mpmc_dequeue(&c->global_untyped_queue, &u); /**/) {
- GB_ASSERT(u.expr != nullptr && u.info != nullptr);
- if (is_type_typed(u.info->type)) {
- compiler_error("%s (type %s) is typed!", expr_to_string(u.expr), type_to_string(u.info->type));
- }
- add_type_and_value(&c->info, u.expr, u.info->mode, u.info->type, u.info->value);
- }
-
-
TIME_SECTION("add basic type information");
// Add "Basic" type information
for (isize i = 0; i < Basic_COUNT; i++) {
@@ -5744,8 +5752,7 @@ gb_internal void check_parsed_files(Checker *c) {
DeclInfo *decl = e->decl_info;
ast_node(pl, ProcLit, decl->proc_lit);
if (pl->inlining == ProcInlining_inline) {
- for (auto const &entry : decl->deps) {
- Entity *dep = entry.ptr;
+ for (Entity *dep : decl->deps) {
if (dep == e) {
error(e->token, "Cannot inline recursive procedure '%.*s'", LIT(e->token.string));
break;
@@ -5764,16 +5771,22 @@ gb_internal void check_parsed_files(Checker *c) {
TIME_SECTION("check test procedures");
check_test_procedures(c);
- TIME_SECTION("check bodies have all been checked");
- check_unchecked_bodies(c);
-
TIME_SECTION("add type info for type definitions");
add_type_info_for_type_definitions(c);
check_merge_queues_into_arrays(c);
+ TIME_SECTION("update dependency tree for procedures");
+ check_update_dependency_tree_for_procedures(c);
+
TIME_SECTION("generate minimum dependency set");
generate_minimum_dependency_set(c, c->info.entry_point);
+ TIME_SECTION("check bodies have all been checked");
+ check_unchecked_bodies(c);
+
+ check_merge_queues_into_arrays(c);
+
+
TIME_SECTION("check entry point");
if (build_context.build_mode == BuildMode_Executable && !build_context.no_entry_point && build_context.command_kind != Command_test) {
Scope *s = c->info.init_scope;
@@ -5796,14 +5809,34 @@ gb_internal void check_parsed_files(Checker *c) {
}
}
+ thread_pool_wait();
+ GB_ASSERT(c->procs_to_check.count == 0);
+
+ if (DEBUG_CHECK_ALL_PROCEDURES) {
+ TIME_SECTION("check unchecked (safety measure)");
+ check_safety_all_procedures_for_unchecked(c);
+ }
+
+ debugf("Total Procedure Bodies Checked: %td\n", total_bodies_checked.load(std::memory_order_relaxed));
+
TIME_SECTION("check unique package names");
check_unique_package_names(c);
-
TIME_SECTION("sanity checks");
+ check_merge_queues_into_arrays(c);
GB_ASSERT(c->info.entity_queue.count.load(std::memory_order_relaxed) == 0);
GB_ASSERT(c->info.definition_queue.count.load(std::memory_order_relaxed) == 0);
+ TIME_SECTION("add untyped expression values");
+ // Add untyped expression values
+ for (UntypedExprInfo u = {}; mpmc_dequeue(&c->global_untyped_queue, &u); /**/) {
+ GB_ASSERT(u.expr != nullptr && u.info != nullptr);
+ if (is_type_typed(u.info->type)) {
+ compiler_error("%s (type %s) is typed!", expr_to_string(u.expr), type_to_string(u.info->type));
+ }
+ add_type_and_value(&c->builtin_ctx, u.expr, u.info->mode, u.info->type, u.info->value);
+ }
+
TIME_SECTION("sort init procedures");
check_sort_init_procedures(c);
@@ -5819,5 +5852,6 @@ gb_internal void check_parsed_files(Checker *c) {
}
}
+
TIME_SECTION("type check finish");
}
diff --git a/src/checker.hpp b/src/checker.hpp
index 1d6019b79..806eb2e51 100644
--- a/src/checker.hpp
+++ b/src/checker.hpp
@@ -142,9 +142,28 @@ typedef DECL_ATTRIBUTE_PROC(DeclAttributeProc);
gb_internal void check_decl_attributes(CheckerContext *c, Array<Ast *> const &attributes, DeclAttributeProc *proc, AttributeContext *ac);
+enum ProcCheckedState : u8 {
+ ProcCheckedState_Unchecked,
+ ProcCheckedState_InProgress,
+ ProcCheckedState_Checked,
+
+ ProcCheckedState_COUNT
+};
+
+char const *ProcCheckedState_strings[ProcCheckedState_COUNT] {
+ "Unchecked",
+ "In Progress",
+ "Checked",
+};
+
// DeclInfo is used to store information of certain declarations to allow for "any order" usage
struct DeclInfo {
DeclInfo * parent; // NOTE(bill): only used for procedure literals at the moment
+
+ BlockingMutex next_mutex;
+ DeclInfo * next_child;
+ DeclInfo * next_sibling;
+
Scope * scope;
Entity *entity;
@@ -157,7 +176,7 @@ struct DeclInfo {
Type * gen_proc_type; // Precalculated
bool is_using;
bool where_clauses_evaluated;
- bool proc_checked;
+ std::atomic<ProcCheckedState> proc_checked_state;
BlockingMutex proc_checked_mutex;
isize defer_used;
bool defer_use_checked;
@@ -165,8 +184,14 @@ struct DeclInfo {
CommentGroup *comment;
CommentGroup *docs;
- PtrSet<Entity *> deps;
+ RwMutex deps_mutex;
+ PtrSet<Entity *> deps;
+
+ RwMutex type_info_deps_mutex;
PtrSet<Type *> type_info_deps;
+
+ BlockingMutex type_and_value_mutex;
+
Array<BlockLabel> labels;
};
@@ -198,7 +223,7 @@ enum ScopeFlag : i32 {
ScopeFlag_ContextDefined = 1<<16,
};
-enum { DEFAULT_SCOPE_CAPACITY = 29 };
+enum { DEFAULT_SCOPE_CAPACITY = 32 };
struct Scope {
Ast * node;
@@ -206,7 +231,7 @@ struct Scope {
std::atomic<Scope *> next;
std::atomic<Scope *> head_child;
- BlockingMutex mutex;
+ RwMutex mutex;
StringMap<Entity *> elements;
PtrSet<Scope *> imported;
@@ -297,6 +322,16 @@ struct LoadFileCache {
StringMap<u64> hashes;
};
+struct GenProcsData {
+ Array<Entity *> procs;
+ RwMutex mutex;
+};
+
+struct GenTypesData {
+ Array<Entity *> types;
+ RwMutex mutex;
+};
+
// CheckerInfo stores all the symbol information for a type-checked program
struct CheckerInfo {
Checker *checker;
@@ -311,7 +346,7 @@ struct CheckerInfo {
Scope * init_scope;
Entity * entry_point;
PtrSet<Entity *> minimum_dependency_set;
- PtrSet<isize> minimum_dependency_type_info_set;
+ PtrMap</*type info index*/isize, /*min dep index*/isize> minimum_dependency_type_info_set;
@@ -324,30 +359,17 @@ struct CheckerInfo {
// Below are accessed within procedures
- // NOTE(bill): If the semantic checker (check_proc_body) is to ever to be multithreaded,
- // these variables will be of contention
-
- Semaphore collect_semaphore;
-
+ RwMutex global_untyped_mutex;
UntypedExprInfoMap global_untyped; // NOTE(bill): This needs to be a map and not on the Ast
// as it needs to be iterated across afterwards
- BlockingMutex global_untyped_mutex;
BlockingMutex builtin_mutex;
- // NOT recursive & only used at the end of `check_proc_body`
- // and in `add_dependency`.
- // This is a possible source of contention but probably not
- // too much of a problem in practice
- BlockingMutex deps_mutex;
-
BlockingMutex type_and_value_mutex;
RecursiveMutex lazy_mutex; // Mutex required for lazy type checking of specific files
- RecursiveMutex gen_procs_mutex;
- RecursiveMutex gen_types_mutex;
- PtrMap<Ast *, Array<Entity *> > gen_procs; // Key: Ast * | Identifier -> Entity
- PtrMap<Type *, Array<Entity *> > gen_types;
+ RwMutex gen_types_mutex;
+ PtrMap<Type *, GenTypesData > gen_types;
BlockingMutex type_info_mutex; // NOT recursive
Array<Type *> type_info_types;
@@ -356,11 +378,6 @@ struct CheckerInfo {
BlockingMutex foreign_mutex; // NOT recursive
StringMap<Entity *> foreigns;
- // only used by 'odin query'
- bool allow_identifier_uses;
- BlockingMutex identifier_uses_mutex;
- Array<Ast *> identifier_uses;
-
// NOTE(bill): These are actually MPSC queues
// TODO(bill): Convert them to be MPSC queues
MPMCQueue<Entity *> definition_queue;
@@ -375,6 +392,9 @@ struct CheckerInfo {
BlockingMutex load_file_mutex;
StringMap<LoadFileCache *> load_file_cache;
+
+ BlockingMutex all_procedures_mutex;
+ Array<ProcInfo *> all_procedures;
};
struct CheckerContext {
@@ -418,8 +438,6 @@ struct CheckerContext {
Scope * polymorphic_scope;
Ast *assignment_lhs_hint;
-
- ProcBodyQueue *procs_to_check_queue;
};
@@ -430,9 +448,7 @@ struct Checker {
CheckerContext builtin_ctx;
MPMCQueue<Entity *> procs_with_deferred_to_check;
-
- ProcBodyQueue procs_to_check_queue;
- Semaphore procs_to_check_semaphore;
+ Array<ProcInfo *> procs_to_check;
// TODO(bill): Technically MPSC queue
MPMCQueue<UntypedExprInfo> global_untyped_queue;
@@ -462,10 +478,10 @@ gb_internal Entity *entity_of_node(Ast *expr);
gb_internal Entity *scope_lookup_current(Scope *s, String const &name);
gb_internal Entity *scope_lookup (Scope *s, String const &name);
gb_internal void scope_lookup_parent (Scope *s, String const &name, Scope **scope_, Entity **entity_);
-gb_internal Entity *scope_insert (Scope *s, Entity *entity, bool use_mutex=true);
+gb_internal Entity *scope_insert (Scope *s, Entity *entity);
-gb_internal void add_type_and_value (CheckerInfo *i, Ast *expression, AddressingMode mode, Type *type, ExactValue value);
+gb_internal void add_type_and_value (CheckerContext *c, Ast *expression, AddressingMode mode, Type *type, ExactValue value);
gb_internal ExprInfo *check_get_expr_info (CheckerContext *c, Ast *expr);
gb_internal void add_untyped (CheckerContext *c, Ast *expression, AddressingMode mode, Type *basic_type, ExactValue value);
gb_internal void add_entity_use (CheckerContext *c, Ast *identifier, Entity *entity);
diff --git a/src/common.cpp b/src/common.cpp
index 3b6ea59e8..988a992d0 100644
--- a/src/common.cpp
+++ b/src/common.cpp
@@ -43,9 +43,9 @@ gb_internal void debugf(char const *fmt, ...);
#error Odin on Windows requires a 64-bit build-system. The 'Developer Command Prompt' for VS still defaults to 32-bit shell. The 64-bit shell can be found under the name 'x64 Native Tools Command Prompt' for VS. For more information, please see https://odin-lang.org/docs/install/#for-windows
#endif
-#include "threading.cpp"
#include "unicode.cpp"
#include "array.cpp"
+#include "threading.cpp"
#include "queue.cpp"
#include "common_memory.cpp"
#include "string.cpp"
@@ -373,7 +373,7 @@ gb_internal char const *string_intern(String const &string) {
}
gb_internal void init_string_interner(void) {
- map_init(&string_intern_map, heap_allocator());
+ map_init(&string_intern_map);
}
diff --git a/src/common_memory.cpp b/src/common_memory.cpp
index c8a62756a..4c77521e1 100644
--- a/src/common_memory.cpp
+++ b/src/common_memory.cpp
@@ -14,36 +14,24 @@ gb_internal gb_inline U const &bit_cast(V const &v) { return reinterpret_cast<U
gb_internal gb_inline i64 align_formula(i64 size, i64 align) {
- if (align > 0) {
- i64 result = size + align-1;
- return result - result%align;
- }
- return size;
+ i64 result = size + align-1;
+ return result - (i64)((u64)result%(u64)align);
}
gb_internal gb_inline isize align_formula_isize(isize size, isize align) {
- if (align > 0) {
- isize result = size + align-1;
- return result - result%align;
- }
- return size;
+ isize result = size + align-1;
+ return result - (isize)((usize)result%(usize)align);
}
gb_internal gb_inline void *align_formula_ptr(void *ptr, isize align) {
- if (align > 0) {
- uintptr result = (cast(uintptr)ptr) + align-1;
- return (void *)(result - result%align);
- }
- return ptr;
+ uintptr result = (cast(uintptr)ptr) + align-1;
+ return (void *)(result - result%align);
}
gb_global BlockingMutex global_memory_block_mutex;
-gb_global BlockingMutex global_memory_allocator_mutex;
gb_internal void platform_virtual_memory_init(void);
gb_internal void virtual_memory_init(void) {
- mutex_init(&global_memory_block_mutex);
- mutex_init(&global_memory_allocator_mutex);
platform_virtual_memory_init();
}
@@ -57,9 +45,9 @@ struct MemoryBlock {
};
struct Arena {
- MemoryBlock *curr_block;
- isize minimum_block_size;
- bool ignore_mutex;
+ MemoryBlock * curr_block;
+ isize minimum_block_size;
+ BlockingMutex mutex;
};
enum { DEFAULT_MINIMUM_BLOCK_SIZE = 8ll*1024ll*1024ll };
@@ -85,10 +73,7 @@ gb_internal isize arena_align_forward_offset(Arena *arena, isize alignment) {
gb_internal void *arena_alloc(Arena *arena, isize min_size, isize alignment) {
GB_ASSERT(gb_is_power_of_two(alignment));
- BlockingMutex *mutex = &global_memory_allocator_mutex;
- if (!arena->ignore_mutex) {
- mutex_lock(mutex);
- }
+ mutex_lock(&arena->mutex);
isize size = 0;
if (arena->curr_block != nullptr) {
@@ -115,9 +100,7 @@ gb_internal void *arena_alloc(Arena *arena, isize min_size, isize alignment) {
curr_block->used += size;
GB_ASSERT(curr_block->used <= curr_block->size);
- if (!arena->ignore_mutex) {
- mutex_unlock(mutex);
- }
+ mutex_unlock(&arena->mutex);
// NOTE(bill): memory will be zeroed by default due to virtual memory
return ptr;
@@ -306,7 +289,7 @@ gb_internal GB_ALLOCATOR_PROC(arena_allocator_proc) {
}
-gb_global gb_thread_local Arena permanent_arena = {nullptr, DEFAULT_MINIMUM_BLOCK_SIZE, true};
+gb_global gb_thread_local Arena permanent_arena = {nullptr, DEFAULT_MINIMUM_BLOCK_SIZE};
gb_internal gbAllocator permanent_allocator() {
return arena_allocator(&permanent_arena);
}
diff --git a/src/docs_writer.cpp b/src/docs_writer.cpp
index bab97158d..2aefe29eb 100644
--- a/src/docs_writer.cpp
+++ b/src/docs_writer.cpp
@@ -53,13 +53,12 @@ gb_internal void odin_doc_writer_item_tracker_init(OdinDocWriterItemTracker<T> *
gb_internal void odin_doc_writer_prepare(OdinDocWriter *w) {
w->state = OdinDocWriterState_Preparing;
- gbAllocator a = heap_allocator();
- string_map_init(&w->string_cache, a);
+ string_map_init(&w->string_cache);
- map_init(&w->file_cache, a);
- map_init(&w->pkg_cache, a);
- map_init(&w->entity_cache, a);
- map_init(&w->type_cache, a);
+ map_init(&w->file_cache);
+ map_init(&w->pkg_cache);
+ map_init(&w->entity_cache);
+ map_init(&w->type_cache);
odin_doc_writer_item_tracker_init(&w->files, 1);
odin_doc_writer_item_tracker_init(&w->pkgs, 1);
diff --git a/src/entity.cpp b/src/entity.cpp
index 0605a293a..b92ba825f 100644
--- a/src/entity.cpp
+++ b/src/entity.cpp
@@ -130,7 +130,7 @@ enum EntityConstantFlags : u32 {
EntityConstantFlag_ImplicitEnumValue = 1<<0,
};
-enum ProcedureOptimizationMode : u32 {
+enum ProcedureOptimizationMode : u8 {
ProcedureOptimizationMode_Default,
ProcedureOptimizationMode_None,
ProcedureOptimizationMode_Minimal,
@@ -154,7 +154,6 @@ struct TypeNameObjCMetadata {
gb_internal TypeNameObjCMetadata *create_type_name_obj_c_metadata() {
TypeNameObjCMetadata *md = gb_alloc_item(permanent_allocator(), TypeNameObjCMetadata);
md->mutex = gb_alloc_item(permanent_allocator(), BlockingMutex);
- mutex_init(md->mutex);
array_init(&md->type_entries, heap_allocator());
array_init(&md->value_entries, heap_allocator());
return md;
@@ -234,6 +233,9 @@ struct Entity {
String link_name;
String link_prefix;
DeferredProcedure deferred_procedure;
+
+ struct GenProcsData *gen_procs;
+ BlockingMutex gen_procs_mutex;
ProcedureOptimizationMode optimization_mode;
bool is_foreign : 1;
bool is_export : 1;
diff --git a/src/error.cpp b/src/error.cpp
index 085e1a8dd..a0bb4ad5b 100644
--- a/src/error.cpp
+++ b/src/error.cpp
@@ -22,10 +22,6 @@ gb_internal bool any_errors(void) {
}
gb_internal void init_global_error_collector(void) {
- mutex_init(&global_error_collector.mutex);
- mutex_init(&global_error_collector.block_mutex);
- mutex_init(&global_error_collector.error_out_mutex);
- mutex_init(&global_error_collector.string_mutex);
array_init(&global_error_collector.errors, heap_allocator());
array_init(&global_error_collector.error_buffer, heap_allocator());
array_init(&global_file_path_strings, heap_allocator(), 1, 4096);
diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp
index 146cb2944..fef222817 100644
--- a/src/llvm_backend.cpp
+++ b/src/llvm_backend.cpp
@@ -240,11 +240,10 @@ gb_internal lbValue lb_equal_proc_for_type(lbModule *m, Type *type) {
LLVMValueRef v_switch = LLVMBuildSwitch(p->builder, left_tag.value, block_false->block, cast(unsigned)type->Union.variants.count);
- for_array(i, type->Union.variants) {
+ for (Type *v : type->Union.variants) {
lbBlock *case_block = lb_create_block(p, "bcase");
lb_start_block(p, case_block);
- Type *v = type->Union.variants[i];
lbValue case_tag = lb_const_union_tag(p->module, type, v);
Type *vp = alloc_type_pointer(v);
@@ -374,11 +373,10 @@ gb_internal lbValue lb_hasher_proc_for_type(lbModule *m, Type *type) {
LLVMValueRef v_switch = LLVMBuildSwitch(p->builder, tag.value, end_block->block, cast(unsigned)type->Union.variants.count);
- for_array(i, type->Union.variants) {
+ for (Type *v : type->Union.variants) {
lbBlock *case_block = lb_create_block(p, "bcase");
lb_start_block(p, case_block);
- Type *v = type->Union.variants[i];
lbValue case_tag = lb_const_union_tag(p->module, type, v);
lbValue variant_hasher = lb_hasher_proc_for_type(m, v);
@@ -732,6 +730,8 @@ gb_internal lbValue lb_map_set_proc_for_type(lbModule *m, Type *type) {
gb_internal lbValue lb_generate_anonymous_proc_lit(lbModule *m, String const &prefix_name, Ast *expr, lbProcedure *parent) {
+ MUTEX_GUARD(&m->gen->anonymous_proc_lits_mutex);
+
lbProcedure **found = map_get(&m->gen->anonymous_proc_lits, expr);
if (found) {
return lb_find_procedure_value_from_entity(m, (*found)->entity);
@@ -990,10 +990,6 @@ gb_internal lbProcedure *lb_create_startup_type_info(lbModule *m) {
if (build_context.disallow_rtti) {
return nullptr;
}
- LLVMPassManagerRef default_function_pass_manager = LLVMCreateFunctionPassManagerForModule(m->mod);
- lb_populate_function_pass_manager(m, default_function_pass_manager, false, build_context.optimization_level);
- LLVMFinalizeFunctionPassManager(default_function_pass_manager);
-
Type *proc_type = alloc_type_proc(nullptr, nullptr, 0, nullptr, 0, false, ProcCC_CDecl);
lbProcedure *p = lb_create_dummy_procedure(m, str_lit(LB_STARTUP_TYPE_INFO_PROC_NAME), proc_type);
@@ -1014,9 +1010,6 @@ gb_internal lbProcedure *lb_create_startup_type_info(lbModule *m) {
gb_printf_err("\n\n\n\n");
LLVMVerifyFunction(p->value, LLVMAbortProcessAction);
}
-
- lb_run_function_pass_manager(default_function_pass_manager, p);
-
return p;
}
@@ -1037,11 +1030,6 @@ gb_internal void lb_finalize_objc_names(lbProcedure *p) {
}
lbModule *m = p->module;
- LLVMPassManagerRef default_function_pass_manager = LLVMCreateFunctionPassManagerForModule(m->mod);
- lb_populate_function_pass_manager(m, default_function_pass_manager, false, build_context.optimization_level);
- LLVMFinalizeFunctionPassManager(default_function_pass_manager);
-
-
auto args = array_make<lbValue>(permanent_allocator(), 1);
LLVMSetLinkage(p->value, LLVMInternalLinkage);
@@ -1061,16 +1049,9 @@ gb_internal void lb_finalize_objc_names(lbProcedure *p) {
}
lb_end_procedure_body(p);
-
- lb_run_function_pass_manager(default_function_pass_manager, p);
-
}
gb_internal lbProcedure *lb_create_startup_runtime(lbModule *main_module, lbProcedure *startup_type_info, lbProcedure *objc_names, Array<lbGlobalVariable> &global_variables) { // Startup Runtime
- LLVMPassManagerRef default_function_pass_manager = LLVMCreateFunctionPassManagerForModule(main_module->mod);
- lb_populate_function_pass_manager(main_module, default_function_pass_manager, false, build_context.optimization_level);
- LLVMFinalizeFunctionPassManager(default_function_pass_manager);
-
Type *proc_type = alloc_type_proc(nullptr, nullptr, 0, nullptr, 0, false, ProcCC_Odin);
lbProcedure *p = lb_create_dummy_procedure(main_module, str_lit(LB_STARTUP_RUNTIME_PROC_NAME), proc_type);
@@ -1175,11 +1156,504 @@ gb_internal lbProcedure *lb_create_startup_runtime(lbModule *main_module, lbProc
LLVMVerifyFunction(p->value, LLVMAbortProcessAction);
}
- lb_run_function_pass_manager(default_function_pass_manager, p);
-
return p;
}
+gb_internal WORKER_TASK_PROC(lb_generate_procedures_and_types_per_module) {
+ lbModule *m = cast(lbModule *)data;
+ for (Entity *e : m->global_procedures_and_types_to_create) {
+ String mangled_name = lb_get_entity_name(m, e);
+
+ switch (e->kind) {
+ case Entity_TypeName:
+ lb_type(m, e->type);
+ break;
+ case Entity_Procedure:
+ array_add(&m->procedures_to_generate, lb_create_procedure(m, e));
+ break;
+ }
+ }
+ return 0;
+}
+
+gb_internal void lb_create_global_procedures_and_types(lbGenerator *gen, CheckerInfo *info, bool do_threading) {
+ auto *min_dep_set = &info->minimum_dependency_set;
+
+ for (Entity *e : info->entities) {
+ String name = e->token.string;
+ Scope * scope = e->scope;
+
+ if ((scope->flags & ScopeFlag_File) == 0) {
+ continue;
+ }
+
+ Scope *package_scope = scope->parent;
+ GB_ASSERT(package_scope->flags & ScopeFlag_Pkg);
+
+ switch (e->kind) {
+ case Entity_Variable:
+ // NOTE(bill): Handled above as it requires a specific load order
+ continue;
+ case Entity_ProcGroup:
+ continue;
+
+ case Entity_TypeName:
+ case Entity_Procedure:
+ break;
+ case Entity_Constant:
+ if (build_context.ODIN_DEBUG) {
+ add_debug_info_for_global_constant_from_entity(gen, e);
+ }
+ break;
+ }
+
+ bool polymorphic_struct = false;
+ if (e->type != nullptr && e->kind == Entity_TypeName) {
+ Type *bt = base_type(e->type);
+ if (bt->kind == Type_Struct) {
+ polymorphic_struct = is_type_polymorphic(bt);
+ }
+ }
+
+ if (!polymorphic_struct && !ptr_set_exists(min_dep_set, e)) {
+ // NOTE(bill): Nothing depends upon it so doesn't need to be built
+ continue;
+ }
+
+ lbModule *m = &gen->default_module;
+ if (USE_SEPARATE_MODULES) {
+ m = lb_pkg_module(gen, e->pkg);
+ }
+
+ array_add(&m->global_procedures_and_types_to_create, e);
+ }
+
+ for (auto const &entry : gen->modules) {
+ lbModule *m = entry.value;
+ if (do_threading) {
+ thread_pool_add_task(lb_generate_procedures_and_types_per_module, m);
+ } else {
+ lb_generate_procedures_and_types_per_module(m);
+ }
+ }
+
+ thread_pool_wait();
+}
+
+gb_internal void lb_generate_procedure(lbModule *m, lbProcedure *p);
+
+
+gb_internal bool lb_is_module_empty(lbModule *m) {
+ if (LLVMGetFirstFunction(m->mod) == nullptr &&
+ LLVMGetFirstGlobal(m->mod) == nullptr) {
+ return true;
+ }
+ for (auto fn = LLVMGetFirstFunction(m->mod); fn != nullptr; fn = LLVMGetNextFunction(fn)) {
+ if (LLVMGetFirstBasicBlock(fn) != nullptr) {
+ return false;
+ }
+ }
+
+ for (auto g = LLVMGetFirstGlobal(m->mod); g != nullptr; g = LLVMGetNextGlobal(g)) {
+ if (LLVMGetLinkage(g) == LLVMExternalLinkage) {
+ continue;
+ }
+ if (!LLVMIsExternallyInitialized(g)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+struct lbLLVMEmitWorker {
+ LLVMTargetMachineRef target_machine;
+ LLVMCodeGenFileType code_gen_file_type;
+ String filepath_obj;
+ lbModule *m;
+};
+
+gb_internal WORKER_TASK_PROC(lb_llvm_emit_worker_proc) {
+ GB_ASSERT(MULTITHREAD_OBJECT_GENERATION);
+
+ char *llvm_error = nullptr;
+
+ auto wd = cast(lbLLVMEmitWorker *)data;
+
+ if (LLVMTargetMachineEmitToFile(wd->target_machine, wd->m->mod, cast(char *)wd->filepath_obj.text, wd->code_gen_file_type, &llvm_error)) {
+ gb_printf_err("LLVM Error: %s\n", llvm_error);
+ gb_exit(1);
+ }
+
+ return 0;
+}
+
+
+gb_internal void lb_llvm_function_pass_per_function_internal(lbModule *module, lbProcedure *p, lbFunctionPassManagerKind pass_manager_kind = lbFunctionPassManager_default) {
+ LLVMPassManagerRef pass_manager = module->function_pass_managers[pass_manager_kind];
+ lb_run_function_pass_manager(pass_manager, p);
+}
+
+gb_internal WORKER_TASK_PROC(lb_llvm_function_pass_per_module) {
+ lbModule *m = cast(lbModule *)data;
+ {
+ GB_ASSERT(m->function_pass_managers[lbFunctionPassManager_default] == nullptr);
+
+ m->function_pass_managers[lbFunctionPassManager_default] = LLVMCreateFunctionPassManagerForModule(m->mod);
+ m->function_pass_managers[lbFunctionPassManager_default_without_memcpy] = LLVMCreateFunctionPassManagerForModule(m->mod);
+ m->function_pass_managers[lbFunctionPassManager_minimal] = LLVMCreateFunctionPassManagerForModule(m->mod);
+ m->function_pass_managers[lbFunctionPassManager_size] = LLVMCreateFunctionPassManagerForModule(m->mod);
+ m->function_pass_managers[lbFunctionPassManager_speed] = LLVMCreateFunctionPassManagerForModule(m->mod);
+
+ LLVMInitializeFunctionPassManager(m->function_pass_managers[lbFunctionPassManager_default]);
+ LLVMInitializeFunctionPassManager(m->function_pass_managers[lbFunctionPassManager_default_without_memcpy]);
+ LLVMInitializeFunctionPassManager(m->function_pass_managers[lbFunctionPassManager_minimal]);
+ LLVMInitializeFunctionPassManager(m->function_pass_managers[lbFunctionPassManager_size]);
+ LLVMInitializeFunctionPassManager(m->function_pass_managers[lbFunctionPassManager_speed]);
+
+ lb_populate_function_pass_manager(m, m->function_pass_managers[lbFunctionPassManager_default], false, build_context.optimization_level);
+ lb_populate_function_pass_manager(m, m->function_pass_managers[lbFunctionPassManager_default_without_memcpy], true, build_context.optimization_level);
+ lb_populate_function_pass_manager_specific(m, m->function_pass_managers[lbFunctionPassManager_minimal], 0);
+ lb_populate_function_pass_manager_specific(m, m->function_pass_managers[lbFunctionPassManager_size], 1);
+ lb_populate_function_pass_manager_specific(m, m->function_pass_managers[lbFunctionPassManager_speed], 2);
+
+ LLVMFinalizeFunctionPassManager(m->function_pass_managers[lbFunctionPassManager_default]);
+ LLVMFinalizeFunctionPassManager(m->function_pass_managers[lbFunctionPassManager_default_without_memcpy]);
+ LLVMFinalizeFunctionPassManager(m->function_pass_managers[lbFunctionPassManager_minimal]);
+ LLVMFinalizeFunctionPassManager(m->function_pass_managers[lbFunctionPassManager_size]);
+ LLVMFinalizeFunctionPassManager(m->function_pass_managers[lbFunctionPassManager_speed]);
+ }
+
+ if (m == &m->gen->default_module) {
+ lb_llvm_function_pass_per_function_internal(m, m->gen->startup_type_info);
+ lb_llvm_function_pass_per_function_internal(m, m->gen->startup_runtime);
+ lb_llvm_function_pass_per_function_internal(m, m->gen->objc_names);
+ }
+
+ for (lbProcedure *p : m->procedures_to_generate) {
+ if (p->body != nullptr) { // Build Procedure
+ lbFunctionPassManagerKind pass_manager_kind = lbFunctionPassManager_default;
+ if (p->flags & lbProcedureFlag_WithoutMemcpyPass) {
+ pass_manager_kind = lbFunctionPassManager_default_without_memcpy;
+ } else {
+ if (p->entity && p->entity->kind == Entity_Procedure) {
+ switch (p->entity->Procedure.optimization_mode) {
+ case ProcedureOptimizationMode_None:
+ case ProcedureOptimizationMode_Minimal:
+ pass_manager_kind = lbFunctionPassManager_minimal;
+ break;
+ case ProcedureOptimizationMode_Size:
+ pass_manager_kind = lbFunctionPassManager_size;
+ break;
+ case ProcedureOptimizationMode_Speed:
+ pass_manager_kind = lbFunctionPassManager_speed;
+ break;
+ }
+ }
+ }
+
+ lb_llvm_function_pass_per_function_internal(m, p, pass_manager_kind);
+ }
+ }
+
+ for (auto const &entry : m->equal_procs) {
+ lbProcedure *p = entry.value;
+ lb_llvm_function_pass_per_function_internal(m, p);
+ }
+ for (auto const &entry : m->hasher_procs) {
+ lbProcedure *p = entry.value;
+ lb_llvm_function_pass_per_function_internal(m, p);
+ }
+ for (auto const &entry : m->map_get_procs) {
+ lbProcedure *p = entry.value;
+ lb_llvm_function_pass_per_function_internal(m, p);
+ }
+ for (auto const &entry : m->map_set_procs) {
+ lbProcedure *p = entry.value;
+ lb_llvm_function_pass_per_function_internal(m, p);
+ }
+
+ return 0;
+}
+
+
+struct lbLLVMModulePassWorkerData {
+ lbModule *m;
+ LLVMTargetMachineRef target_machine;
+};
+
+gb_internal WORKER_TASK_PROC(lb_llvm_module_pass_worker_proc) {
+ auto wd = cast(lbLLVMModulePassWorkerData *)data;
+
+ lb_run_remove_unused_function_pass(wd->m);
+ lb_run_remove_unused_globals_pass(wd->m);
+
+ LLVMPassManagerRef module_pass_manager = LLVMCreatePassManager();
+ lb_populate_module_pass_manager(wd->target_machine, module_pass_manager, build_context.optimization_level);
+ LLVMRunPassManager(module_pass_manager, wd->m->mod);
+ return 0;
+}
+
+
+
+gb_internal WORKER_TASK_PROC(lb_generate_procedures_worker_proc) {
+ lbModule *m = cast(lbModule *)data;
+ for (isize i = 0; i < m->procedures_to_generate.count; i++) {
+ lbProcedure *p = m->procedures_to_generate[i];
+ lb_generate_procedure(p->module, p);
+ }
+ return 0;
+}
+
+gb_internal void lb_generate_procedures(lbGenerator *gen, bool do_threading) {
+ for (auto const &entry : gen->modules) {
+ lbModule *m = entry.value;
+ if (do_threading) {
+ thread_pool_add_task(lb_generate_procedures_worker_proc, m);
+ } else {
+ lb_generate_procedures_worker_proc(m);
+ }
+ }
+
+ thread_pool_wait();
+}
+
+gb_internal WORKER_TASK_PROC(lb_generate_missing_procedures_to_check_worker_proc) {
+ lbModule *m = cast(lbModule *)data;
+ for (isize i = 0; i < m->missing_procedures_to_check.count; i++) {
+ lbProcedure *p = m->missing_procedures_to_check[i];
+ debugf("Generate missing procedure: %.*s\n", LIT(p->name));
+ lb_generate_procedure(m, p);
+ }
+ return 0;
+}
+
+gb_internal void lb_generate_missing_procedures(lbGenerator *gen, bool do_threading) {
+ for (auto const &entry : gen->modules) {
+ lbModule *m = entry.value;
+ // NOTE(bill): procedures may be added during generation
+ if (do_threading) {
+ thread_pool_add_task(lb_generate_missing_procedures_to_check_worker_proc, m);
+ } else {
+ lb_generate_missing_procedures_to_check_worker_proc(m);
+ }
+ }
+
+ thread_pool_wait();
+}
+
+gb_internal void lb_debug_info_complete_types_and_finalize(lbGenerator *gen) {
+ for (auto const &entry : gen->modules) {
+ lbModule *m = entry.value;
+ if (m->debug_builder != nullptr) {
+ lb_debug_complete_types(m);
+ LLVMDIBuilderFinalize(m->debug_builder);
+ }
+ }
+}
+
+gb_internal void lb_llvm_function_passes(lbGenerator *gen, bool do_threading) {
+ for (auto const &entry : gen->modules) {
+ lbModule *m = entry.value;
+ if (do_threading) {
+ thread_pool_add_task(lb_llvm_function_pass_per_module, m);
+ } else {
+ lb_llvm_function_pass_per_module(m);
+ }
+ }
+ thread_pool_wait();
+}
+
+
+gb_internal void lb_llvm_module_passes(lbGenerator *gen, bool do_threading) {
+ for (auto const &entry : gen->modules) {
+ lbModule *m = entry.value;
+ auto wd = gb_alloc_item(permanent_allocator(), lbLLVMModulePassWorkerData);
+ wd->m = m;
+ wd->target_machine = m->target_machine;
+
+ if (do_threading) {
+ thread_pool_add_task(lb_llvm_module_pass_worker_proc, wd);
+ } else {
+ lb_llvm_module_pass_worker_proc(wd);
+ }
+ }
+ thread_pool_wait();
+}
+
+
+gb_internal String lb_filepath_ll_for_module(lbModule *m) {
+ String path = concatenate3_strings(permanent_allocator(),
+ build_context.build_paths[BuildPath_Output].basename,
+ STR_LIT("/"),
+ build_context.build_paths[BuildPath_Output].name
+ );
+
+ if (m->pkg) {
+ path = concatenate3_strings(permanent_allocator(), path, STR_LIT("-"), m->pkg->name);
+ } else if (USE_SEPARATE_MODULES) {
+ path = concatenate_strings(permanent_allocator(), path, STR_LIT("-builtin"));
+ }
+ path = concatenate_strings(permanent_allocator(), path, STR_LIT(".ll"));
+
+ return path;
+}
+gb_internal String lb_filepath_obj_for_module(lbModule *m) {
+ String path = concatenate3_strings(permanent_allocator(),
+ build_context.build_paths[BuildPath_Output].basename,
+ STR_LIT("/"),
+ build_context.build_paths[BuildPath_Output].name
+ );
+
+ if (m->pkg) {
+ path = concatenate3_strings(permanent_allocator(), path, STR_LIT("-"), m->pkg->name);
+ }
+
+ String ext = {};
+
+ if (build_context.build_mode == BuildMode_Assembly) {
+ ext = STR_LIT(".S");
+ } else {
+ if (is_arch_wasm()) {
+ ext = STR_LIT(".wasm.o");
+ } else {
+ switch (build_context.metrics.os) {
+ case TargetOs_windows:
+ ext = STR_LIT(".obj");
+ break;
+ default:
+ case TargetOs_darwin:
+ case TargetOs_linux:
+ case TargetOs_essence:
+ ext = STR_LIT(".o");
+ break;
+
+ case TargetOs_freestanding:
+ switch (build_context.metrics.abi) {
+ default:
+ case TargetABI_Default:
+ case TargetABI_SysV:
+ ext = STR_LIT(".o");
+ break;
+ case TargetABI_Win64:
+ ext = STR_LIT(".obj");
+ break;
+ }
+ break;
+ }
+ }
+ }
+
+ return concatenate_strings(permanent_allocator(), path, ext);
+}
+
+gb_internal WORKER_TASK_PROC(lb_llvm_module_verification_worker_proc) {
+ char *llvm_error = nullptr;
+ defer (LLVMDisposeMessage(llvm_error));
+ lbModule *m = cast(lbModule *)data;
+ if (LLVMVerifyModule(m->mod, LLVMReturnStatusAction, &llvm_error)) {
+ gb_printf_err("LLVM Error:\n%s\n", llvm_error);
+ if (build_context.keep_temp_files) {
+ TIME_SECTION("LLVM Print Module to File");
+ String filepath_ll = lb_filepath_ll_for_module(m);
+ if (LLVMPrintModuleToFile(m->mod, cast(char const *)filepath_ll.text, &llvm_error)) {
+ gb_printf_err("LLVM Error: %s\n", llvm_error);
+ gb_exit(1);
+ return false;
+ }
+ }
+ gb_exit(1);
+ return 1;
+ }
+ return 0;
+}
+
+
+gb_internal bool lb_llvm_module_verification(lbGenerator *gen, bool do_threading) {
+ for (auto const &entry : gen->modules) {
+ lbModule *m = entry.value;
+ if (do_threading) {
+ thread_pool_add_task(lb_llvm_module_verification_worker_proc, m);
+ } else {
+ if (lb_llvm_module_verification_worker_proc(m)) {
+ return false;
+ }
+ }
+ }
+ thread_pool_wait();
+
+ return true;
+}
+
+gb_internal void lb_add_foreign_library_paths(lbGenerator *gen) {
+ for (auto const &entry : gen->modules) {
+ lbModule *m = entry.value;
+ for (Entity *e : m->info->required_foreign_imports_through_force) {
+ lb_add_foreign_library_path(m, e);
+ }
+
+ if (lb_is_module_empty(m)) {
+ continue;
+ }
+ }
+}
+
+gb_internal bool lb_llvm_object_generation(lbGenerator *gen, bool do_threading) {
+ LLVMCodeGenFileType code_gen_file_type = LLVMObjectFile;
+ if (build_context.build_mode == BuildMode_Assembly) {
+ code_gen_file_type = LLVMAssemblyFile;
+ }
+
+ char *llvm_error = nullptr;
+ defer (LLVMDisposeMessage(llvm_error));
+
+ if (do_threading) {
+ for (auto const &entry : gen->modules) {
+ lbModule *m = entry.value;
+ if (lb_is_module_empty(m)) {
+ continue;
+ }
+
+ String filepath_ll = lb_filepath_ll_for_module(m);
+ String filepath_obj = lb_filepath_obj_for_module(m);
+ array_add(&gen->output_object_paths, filepath_obj);
+ array_add(&gen->output_temp_paths, filepath_ll);
+
+ auto *wd = gb_alloc_item(permanent_allocator(), lbLLVMEmitWorker);
+ wd->target_machine = m->target_machine;
+ wd->code_gen_file_type = code_gen_file_type;
+ wd->filepath_obj = filepath_obj;
+ wd->m = m;
+ thread_pool_add_task(lb_llvm_emit_worker_proc, wd);
+ }
+
+ thread_pool_wait(&global_thread_pool);
+ } else {
+ for (auto const &entry : gen->modules) {
+ lbModule *m = entry.value;
+ if (lb_is_module_empty(m)) {
+ continue;
+ }
+
+ String filepath_obj = lb_filepath_obj_for_module(m);
+ array_add(&gen->output_object_paths, filepath_obj);
+
+ String short_name = remove_directory_from_path(filepath_obj);
+ gbString section_name = gb_string_make(heap_allocator(), "LLVM Generate Object: ");
+ section_name = gb_string_append_length(section_name, short_name.text, short_name.len);
+
+ TIME_SECTION_WITH_LEN(section_name, gb_string_length(section_name));
+
+ if (LLVMTargetMachineEmitToFile(m->target_machine, m->mod, cast(char *)filepath_obj.text, code_gen_file_type, &llvm_error)) {
+ gb_printf_err("LLVM Error: %s\n", llvm_error);
+ gb_exit(1);
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+
gb_internal lbProcedure *lb_create_main_procedure(lbModule *m, lbProcedure *startup_runtime) {
LLVMPassManagerRef default_function_pass_manager = LLVMCreateFunctionPassManagerForModule(m->mod);
@@ -1190,7 +1664,7 @@ gb_internal lbProcedure *lb_create_main_procedure(lbModule *m, lbProcedure *star
Type *results = alloc_type_tuple();
Type *t_ptr_cstring = alloc_type_pointer(t_cstring);
-
+
bool call_cleanup = true;
bool has_args = false;
@@ -1238,7 +1712,7 @@ gb_internal lbProcedure *lb_create_main_procedure(lbModule *m, lbProcedure *star
lbAddr args = lb_addr(lb_find_runtime_value(p->module, str_lit("args__")));
lb_fill_slice(p, args, argv, argc);
}
-
+
lbValue startup_runtime_value = {startup_runtime->value, startup_runtime->type};
lb_emit_call(p, startup_runtime_value, {}, ProcInlining_none);
@@ -1298,12 +1772,12 @@ gb_internal lbProcedure *lb_create_main_procedure(lbModule *m, lbProcedure *star
}
}
-
+
if (call_cleanup) {
lbValue cleanup_runtime_value = lb_find_runtime_value(m, str_lit("_cleanup_runtime"));
lb_emit_call(p, cleanup_runtime_value, {}, ProcInlining_none);
}
-
+
if (is_dll_main) {
LLVMBuildRet(p->builder, LLVMConstInt(lb_type(m, t_i32), 1, false));
@@ -1312,13 +1786,13 @@ gb_internal lbProcedure *lb_create_main_procedure(lbModule *m, lbProcedure *star
}
lb_end_procedure_body(p);
-
+
LLVMSetLinkage(p->value, LLVMExternalLinkage);
if (is_arch_wasm()) {
lb_set_wasm_export_attributes(p->value, p->name);
}
-
+
if (!m->debug_builder && LLVMVerifyFunction(p->value, LLVMReturnStatusAction)) {
gb_printf_err("LLVM CODE GEN FAILED FOR PROCEDURE: %s\n", "main");
@@ -1331,210 +1805,6 @@ gb_internal lbProcedure *lb_create_main_procedure(lbModule *m, lbProcedure *star
return p;
}
-gb_internal String lb_filepath_ll_for_module(lbModule *m) {
- String path = concatenate3_strings(permanent_allocator(),
- build_context.build_paths[BuildPath_Output].basename,
- STR_LIT("/"),
- build_context.build_paths[BuildPath_Output].name
- );
-
- if (m->pkg) {
- path = concatenate3_strings(permanent_allocator(), path, STR_LIT("-"), m->pkg->name);
- } else if (USE_SEPARATE_MODULES) {
- path = concatenate_strings(permanent_allocator(), path, STR_LIT("-builtin"));
- }
- path = concatenate_strings(permanent_allocator(), path, STR_LIT(".ll"));
-
- return path;
-}
-gb_internal String lb_filepath_obj_for_module(lbModule *m) {
- String path = concatenate3_strings(permanent_allocator(),
- build_context.build_paths[BuildPath_Output].basename,
- STR_LIT("/"),
- build_context.build_paths[BuildPath_Output].name
- );
-
- if (m->pkg) {
- path = concatenate3_strings(permanent_allocator(), path, STR_LIT("-"), m->pkg->name);
- }
-
- String ext = {};
-
- if (build_context.build_mode == BuildMode_Assembly) {
- ext = STR_LIT(".S");
- } else {
- if (is_arch_wasm()) {
- ext = STR_LIT(".wasm.o");
- } else {
- switch (build_context.metrics.os) {
- case TargetOs_windows:
- ext = STR_LIT(".obj");
- break;
- default:
- case TargetOs_darwin:
- case TargetOs_linux:
- case TargetOs_essence:
- ext = STR_LIT(".o");
- break;
-
- case TargetOs_freestanding:
- switch (build_context.metrics.abi) {
- default:
- case TargetABI_Default:
- case TargetABI_SysV:
- ext = STR_LIT(".o");
- break;
- case TargetABI_Win64:
- ext = STR_LIT(".obj");
- break;
- }
- break;
- }
- }
- }
-
- return concatenate_strings(permanent_allocator(), path, ext);
-}
-
-
-gb_internal bool lb_is_module_empty(lbModule *m) {
- if (LLVMGetFirstFunction(m->mod) == nullptr &&
- LLVMGetFirstGlobal(m->mod) == nullptr) {
- return true;
- }
- for (auto fn = LLVMGetFirstFunction(m->mod); fn != nullptr; fn = LLVMGetNextFunction(fn)) {
- if (LLVMGetFirstBasicBlock(fn) != nullptr) {
- return false;
- }
- }
-
- for (auto g = LLVMGetFirstGlobal(m->mod); g != nullptr; g = LLVMGetNextGlobal(g)) {
- if (LLVMGetLinkage(g) == LLVMExternalLinkage) {
- continue;
- }
- if (!LLVMIsExternallyInitialized(g)) {
- return false;
- }
- }
- return true;
-}
-
-struct lbLLVMEmitWorker {
- LLVMTargetMachineRef target_machine;
- LLVMCodeGenFileType code_gen_file_type;
- String filepath_obj;
- lbModule *m;
-};
-
-gb_internal WORKER_TASK_PROC(lb_llvm_emit_worker_proc) {
- GB_ASSERT(MULTITHREAD_OBJECT_GENERATION);
-
- char *llvm_error = nullptr;
-
- auto wd = cast(lbLLVMEmitWorker *)data;
-
- if (LLVMTargetMachineEmitToFile(wd->target_machine, wd->m->mod, cast(char *)wd->filepath_obj.text, wd->code_gen_file_type, &llvm_error)) {
- gb_printf_err("LLVM Error: %s\n", llvm_error);
- gb_exit(1);
- }
-
- return 0;
-}
-
-gb_internal WORKER_TASK_PROC(lb_llvm_function_pass_worker_proc) {
- GB_ASSERT(MULTITHREAD_OBJECT_GENERATION);
-
- auto m = cast(lbModule *)data;
-
- LLVMPassManagerRef default_function_pass_manager = LLVMCreateFunctionPassManagerForModule(m->mod);
- LLVMPassManagerRef function_pass_manager_minimal = LLVMCreateFunctionPassManagerForModule(m->mod);
- LLVMPassManagerRef function_pass_manager_size = LLVMCreateFunctionPassManagerForModule(m->mod);
- LLVMPassManagerRef function_pass_manager_speed = LLVMCreateFunctionPassManagerForModule(m->mod);
-
- LLVMInitializeFunctionPassManager(default_function_pass_manager);
- LLVMInitializeFunctionPassManager(function_pass_manager_minimal);
- LLVMInitializeFunctionPassManager(function_pass_manager_size);
- LLVMInitializeFunctionPassManager(function_pass_manager_speed);
-
- lb_populate_function_pass_manager(m, default_function_pass_manager, false, build_context.optimization_level);
- lb_populate_function_pass_manager_specific(m, function_pass_manager_minimal, 0);
- lb_populate_function_pass_manager_specific(m, function_pass_manager_size, 1);
- lb_populate_function_pass_manager_specific(m, function_pass_manager_speed, 2);
-
- LLVMFinalizeFunctionPassManager(default_function_pass_manager);
- LLVMFinalizeFunctionPassManager(function_pass_manager_minimal);
- LLVMFinalizeFunctionPassManager(function_pass_manager_size);
- LLVMFinalizeFunctionPassManager(function_pass_manager_speed);
-
-
- LLVMPassManagerRef default_function_pass_manager_without_memcpy = LLVMCreateFunctionPassManagerForModule(m->mod);
- LLVMInitializeFunctionPassManager(default_function_pass_manager_without_memcpy);
- lb_populate_function_pass_manager(m, default_function_pass_manager_without_memcpy, true, build_context.optimization_level);
- LLVMFinalizeFunctionPassManager(default_function_pass_manager_without_memcpy);
-
- for (lbProcedure *p : m->procedures_to_generate) {
- if (p->body != nullptr) { // Build Procedure
- if (p->flags & lbProcedureFlag_WithoutMemcpyPass) {
- lb_run_function_pass_manager(default_function_pass_manager_without_memcpy, p);
- } else {
- if (p->entity && p->entity->kind == Entity_Procedure) {
- switch (p->entity->Procedure.optimization_mode) {
- case ProcedureOptimizationMode_None:
- case ProcedureOptimizationMode_Minimal:
- lb_run_function_pass_manager(function_pass_manager_minimal, p);
- break;
- case ProcedureOptimizationMode_Size:
- lb_run_function_pass_manager(function_pass_manager_size, p);
- break;
- case ProcedureOptimizationMode_Speed:
- lb_run_function_pass_manager(function_pass_manager_speed, p);
- break;
- default:
- lb_run_function_pass_manager(default_function_pass_manager, p);
- break;
- }
- } else {
- lb_run_function_pass_manager(default_function_pass_manager, p);
- }
- }
- }
- }
-
- for (auto const &entry : m->equal_procs) {
- lbProcedure *p = entry.value;
- lb_run_function_pass_manager(default_function_pass_manager, p);
- }
- for (auto const &entry : m->hasher_procs) {
- lbProcedure *p = entry.value;
- lb_run_function_pass_manager(default_function_pass_manager, p);
- }
- for (auto const &entry : m->map_get_procs) {
- lbProcedure *p = entry.value;
- lb_run_function_pass_manager(default_function_pass_manager, p);
- }
- for (auto const &entry : m->map_set_procs) {
- lbProcedure *p = entry.value;
- lb_run_function_pass_manager(default_function_pass_manager, p);
- }
-
- return 0;
-}
-
-
-struct lbLLVMModulePassWorkerData {
- lbModule *m;
- LLVMTargetMachineRef target_machine;
-};
-
-gb_internal WORKER_TASK_PROC(lb_llvm_module_pass_worker_proc) {
- auto wd = cast(lbLLVMModulePassWorkerData *)data;
- LLVMPassManagerRef module_pass_manager = LLVMCreatePassManager();
- lb_populate_module_pass_manager(wd->target_machine, module_pass_manager, build_context.optimization_level);
- LLVMRunPassManager(module_pass_manager, wd->m->mod);
- return 0;
-}
-
-
gb_internal void lb_generate_procedure(lbModule *m, lbProcedure *p) {
if (p->is_done) {
return;
@@ -1575,13 +1845,13 @@ gb_internal void lb_generate_procedure(lbModule *m, lbProcedure *p) {
}
-gb_internal void lb_generate_code(lbGenerator *gen) {
+gb_internal bool lb_generate_code(lbGenerator *gen) {
TIME_SECTION("LLVM Initializtion");
isize thread_count = gb_max(build_context.thread_count, 1);
isize worker_count = thread_count-1;
- LLVMBool do_threading = (LLVMIsMultithreaded() && USE_SEPARATE_MODULES && MULTITHREAD_OBJECT_GENERATION && worker_count > 0);
+ bool do_threading = !!(LLVMIsMultithreaded() && USE_SEPARATE_MODULES && MULTITHREAD_OBJECT_GENERATION && worker_count > 0);
lbModule *default_module = &gen->default_module;
CheckerInfo *info = gen->info;
@@ -1922,6 +2192,7 @@ gb_internal void lb_generate_code(lbGenerator *gen) {
if (!ptr_set_exists(min_dep_set, e)) {
continue;
}
+
DeclInfo *decl = decl_info_of_entity(e);
if (decl == nullptr) {
continue;
@@ -2041,13 +2312,11 @@ gb_internal void lb_generate_code(lbGenerator *gen) {
}
TIME_SECTION("LLVM Runtime Type Information Creation");
- lbProcedure *startup_type_info = lb_create_startup_type_info(default_module);
-
- lbProcedure *objc_names = lb_create_objc_names(default_module);
+ gen->startup_type_info = lb_create_startup_type_info(default_module);
+ gen->objc_names = lb_create_objc_names(default_module);
TIME_SECTION("LLVM Runtime Startup Creation (Global Variables)");
- lbProcedure *startup_runtime = lb_create_startup_runtime(default_module, startup_type_info, objc_names, global_variables);
- gb_unused(startup_runtime);
+ gen->startup_runtime = lb_create_startup_runtime(default_module, gen->startup_type_info, gen->objc_names, global_variables);
if (build_context.ODIN_DEBUG) {
for (auto const &entry : builtin_pkg->scope->elements) {
@@ -2056,156 +2325,65 @@ gb_internal void lb_generate_code(lbGenerator *gen) {
}
}
- TIME_SECTION("LLVM Global Procedures and Types");
- for (Entity *e : info->entities) {
- String name = e->token.string;
- Scope * scope = e->scope;
-
- if ((scope->flags & ScopeFlag_File) == 0) {
- continue;
- }
-
- Scope *package_scope = scope->parent;
- GB_ASSERT(package_scope->flags & ScopeFlag_Pkg);
-
- switch (e->kind) {
- case Entity_Variable:
- // NOTE(bill): Handled above as it requires a specific load order
- continue;
- case Entity_ProcGroup:
- continue;
-
- case Entity_TypeName:
- case Entity_Procedure:
- break;
- case Entity_Constant:
- if (build_context.ODIN_DEBUG) {
- add_debug_info_for_global_constant_from_entity(gen, e);
- }
- break;
- }
-
- bool polymorphic_struct = false;
- if (e->type != nullptr && e->kind == Entity_TypeName) {
- Type *bt = base_type(e->type);
- if (bt->kind == Type_Struct) {
- polymorphic_struct = is_type_polymorphic(bt);
- }
- }
-
- if (!polymorphic_struct && !ptr_set_exists(min_dep_set, e)) {
- // NOTE(bill): Nothing depends upon it so doesn't need to be built
- continue;
- }
-
- lbModule *m = &gen->default_module;
- if (USE_SEPARATE_MODULES) {
- m = lb_pkg_module(gen, e->pkg);
- }
-
- String mangled_name = lb_get_entity_name(m, e);
-
- switch (e->kind) {
- case Entity_TypeName:
- lb_type(m, e->type);
- break;
- case Entity_Procedure:
- {
- lbProcedure *p = lb_create_procedure(m, e);
- array_add(&m->procedures_to_generate, p);
- }
- break;
- }
+ if (gen->modules.entries.count <= 1) {
+ do_threading = false;
}
+ TIME_SECTION("LLVM Global Procedures and Types");
+ lb_create_global_procedures_and_types(gen, info, do_threading);
+
TIME_SECTION("LLVM Procedure Generation");
- for (auto const &entry : gen->modules) {
- lbModule *m = entry.value;
- // NOTE(bill): procedures may be added during generation
- for (isize i = 0; i < m->procedures_to_generate.count; i++) {
- lbProcedure *p = m->procedures_to_generate[i];
- lb_generate_procedure(m, p);
- }
- }
+ lb_generate_procedures(gen, do_threading);
if (build_context.command_kind == Command_test && !already_has_entry_point) {
TIME_SECTION("LLVM main");
- lb_create_main_procedure(default_module, startup_runtime);
+ lb_create_main_procedure(default_module, gen->startup_runtime);
}
- for (auto const &entry : gen->modules) {
- lbModule *m = entry.value;
- // NOTE(bill): procedures may be added during generation
- for (isize i = 0; i < m->missing_procedures_to_check.count; i++) {
- lbProcedure *p = m->missing_procedures_to_check[i];
- debugf("Generate missing procedure: %.*s\n", LIT(p->name));
- lb_generate_procedure(m, p);
- }
- }
+ TIME_SECTION("LLVM Procedure Generation (missing)");
+ lb_generate_missing_procedures(gen, do_threading);
- lb_finalize_objc_names(objc_names);
+ if (gen->objc_names) {
+ TIME_SECTION("Finalize objc names");
+ lb_finalize_objc_names(gen->objc_names);
+ }
if (build_context.ODIN_DEBUG) {
TIME_SECTION("LLVM Debug Info Complete Types and Finalize");
+ lb_debug_info_complete_types_and_finalize(gen);
+ }
+
+ if (do_threading) {
+ isize non_empty_module_count = 0;
for (auto const &entry : gen->modules) {
lbModule *m = entry.value;
- if (m->debug_builder != nullptr) {
- lb_debug_complete_types(m);
- LLVMDIBuilderFinalize(m->debug_builder);
+ if (!lb_is_module_empty(m)) {
+ non_empty_module_count += 1;
}
}
+ if (non_empty_module_count <= 1) {
+ do_threading = false;
+ }
}
-
TIME_SECTION("LLVM Function Pass");
- for (auto const &entry : gen->modules) {
- lbModule *m = entry.value;
- lb_llvm_function_pass_worker_proc(m);
- }
+ lb_llvm_function_passes(gen, do_threading);
TIME_SECTION("LLVM Module Pass");
+ lb_llvm_module_passes(gen, do_threading);
- for (auto const &entry : gen->modules) {
- lbModule *m = entry.value;
- lb_run_remove_unused_function_pass(m);
- lb_run_remove_unused_globals_pass(m);
- auto wd = gb_alloc_item(permanent_allocator(), lbLLVMModulePassWorkerData);
- wd->m = m;
- wd->target_machine = m->target_machine;
+ TIME_SECTION("LLVM Module Verification");
- lb_llvm_module_pass_worker_proc(wd);
- }
+ if (!lb_llvm_module_verification(gen, do_threading)) {
+ return false;
+ }
llvm_error = nullptr;
defer (LLVMDisposeMessage(llvm_error));
- LLVMCodeGenFileType code_gen_file_type = LLVMObjectFile;
- if (build_context.build_mode == BuildMode_Assembly) {
- code_gen_file_type = LLVMAssemblyFile;
- }
-
-
- for (auto const &entry : gen->modules) {
- lbModule *m = entry.value;
- if (LLVMVerifyModule(m->mod, LLVMReturnStatusAction, &llvm_error)) {
- gb_printf_err("LLVM Error:\n%s\n", llvm_error);
- if (build_context.keep_temp_files) {
- TIME_SECTION("LLVM Print Module to File");
- String filepath_ll = lb_filepath_ll_for_module(m);
- if (LLVMPrintModuleToFile(m->mod, cast(char const *)filepath_ll.text, &llvm_error)) {
- gb_printf_err("LLVM Error: %s\n", llvm_error);
- gb_exit(1);
- return;
- }
- }
- gb_exit(1);
- return;
- }
- }
- llvm_error = nullptr;
if (build_context.keep_temp_files ||
build_context.build_mode == BuildMode_LLVM_IR) {
TIME_SECTION("LLVM Print Module to File");
@@ -2220,85 +2398,30 @@ gb_internal void lb_generate_code(lbGenerator *gen) {
if (LLVMPrintModuleToFile(m->mod, cast(char const *)filepath_ll.text, &llvm_error)) {
gb_printf_err("LLVM Error: %s\n", llvm_error);
gb_exit(1);
- return;
+ return false;
}
array_add(&gen->output_temp_paths, filepath_ll);
}
if (build_context.build_mode == BuildMode_LLVM_IR) {
- gb_exit(0);
- return;
+ return true;
}
}
TIME_SECTION("LLVM Add Foreign Library Paths");
-
- for (auto const &entry : gen->modules) {
- lbModule *m = entry.value;
- for_array(i, m->info->required_foreign_imports_through_force) {
- Entity *e = m->info->required_foreign_imports_through_force[i];
- lb_add_foreign_library_path(m, e);
- }
-
- if (lb_is_module_empty(m)) {
- continue;
- }
- }
+ lb_add_foreign_library_paths(gen);
TIME_SECTION("LLVM Object Generation");
- isize non_empty_module_count = 0;
- for (auto const &entry : gen->modules) {
- lbModule *m = entry.value;
- if (!lb_is_module_empty(m)) {
- non_empty_module_count += 1;
- }
+ if (build_context.ignore_llvm_build) {
+ gb_printf_err("LLVM object generation has been ignored!\n");
+ return false;
}
-
- if (do_threading && non_empty_module_count > 1) {
- for (auto const &entry : gen->modules) {
- lbModule *m = entry.value;
- if (lb_is_module_empty(m)) {
- continue;
- }
-
- String filepath_ll = lb_filepath_ll_for_module(m);
- String filepath_obj = lb_filepath_obj_for_module(m);
- array_add(&gen->output_object_paths, filepath_obj);
- array_add(&gen->output_temp_paths, filepath_ll);
-
- auto *wd = gb_alloc_item(permanent_allocator(), lbLLVMEmitWorker);
- wd->target_machine = m->target_machine;
- wd->code_gen_file_type = code_gen_file_type;
- wd->filepath_obj = filepath_obj;
- wd->m = m;
- global_thread_pool_add_task(lb_llvm_emit_worker_proc, wd);
- }
-
- thread_pool_wait(&global_thread_pool);
- } else {
- for (auto const &entry : gen->modules) {
- lbModule *m = entry.value;
- if (lb_is_module_empty(m)) {
- continue;
- }
-
- String filepath_obj = lb_filepath_obj_for_module(m);
- array_add(&gen->output_object_paths, filepath_obj);
-
- String short_name = remove_directory_from_path(filepath_obj);
- gbString section_name = gb_string_make(heap_allocator(), "LLVM Generate Object: ");
- section_name = gb_string_append_length(section_name, short_name.text, short_name.len);
-
- TIME_SECTION_WITH_LEN(section_name, gb_string_length(section_name));
-
- if (LLVMTargetMachineEmitToFile(m->target_machine, m->mod, cast(char *)filepath_obj.text, code_gen_file_type, &llvm_error)) {
- gb_printf_err("LLVM Error: %s\n", llvm_error);
- gb_exit(1);
- return;
- }
- }
+ if (!lb_llvm_object_generation(gen, do_threading)) {
+ return false;
}
gb_sort_array(gen->foreign_libraries.data, gen->foreign_libraries.count, foreign_library_cmp);
+
+ return true;
}
diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp
index 9f7caa3bb..de4deffd4 100644
--- a/src/llvm_backend.hpp
+++ b/src/llvm_backend.hpp
@@ -117,6 +117,16 @@ struct lbIncompleteDebugType {
typedef Slice<i32> lbStructFieldRemapping;
+enum lbFunctionPassManagerKind {
+ lbFunctionPassManager_default,
+ lbFunctionPassManager_default_without_memcpy,
+ lbFunctionPassManager_minimal,
+ lbFunctionPassManager_size,
+ lbFunctionPassManager_speed,
+
+ lbFunctionPassManager_COUNT
+};
+
struct lbModule {
LLVMModuleRef mod;
LLVMContextRef ctx;
@@ -132,6 +142,8 @@ struct lbModule {
PtrMap<void *, lbStructFieldRemapping> struct_field_remapping; // Key: LLVMTypeRef or Type *
i32 internal_type_level;
+ RecursiveMutex values_mutex;
+
PtrMap<Entity *, lbValue> values;
PtrMap<Entity *, lbAddr> soa_values;
StringMap<lbValue> members;
@@ -151,11 +163,14 @@ struct lbModule {
u32 nested_type_name_guid;
Array<lbProcedure *> procedures_to_generate;
+ Array<Entity *> global_procedures_and_types_to_create;
lbProcedure *curr_procedure;
LLVMDIBuilderRef debug_builder;
LLVMMetadataRef debug_compile_unit;
+
+ RecursiveMutex debug_values_mutex;
PtrMap<void *, LLVMMetadataRef> debug_values;
Array<lbIncompleteDebugType> debug_incomplete_types;
@@ -165,6 +180,8 @@ struct lbModule {
PtrMap<Type *, lbAddr> map_cell_info_map; // address of runtime.Map_Info
PtrMap<Type *, lbAddr> map_info_map; // address of runtime.Map_Cell_Info
+
+ LLVMPassManagerRef function_pass_managers[lbFunctionPassManager_COUNT];
};
struct lbGenerator {
@@ -178,6 +195,7 @@ struct lbGenerator {
PtrMap<LLVMContextRef, lbModule *> modules_through_ctx;
lbModule default_module;
+ BlockingMutex anonymous_proc_lits_mutex;
PtrMap<Ast *, lbProcedure *> anonymous_proc_lits;
BlockingMutex foreign_mutex;
@@ -186,6 +204,10 @@ struct lbGenerator {
std::atomic<u32> global_array_index;
std::atomic<u32> global_generated_index;
+
+ lbProcedure *startup_type_info;
+ lbProcedure *startup_runtime;
+ lbProcedure *objc_names;
};
diff --git a/src/llvm_backend_debug.cpp b/src/llvm_backend_debug.cpp
index 55c4370a2..9bf4063d6 100644
--- a/src/llvm_backend_debug.cpp
+++ b/src/llvm_backend_debug.cpp
@@ -2,7 +2,9 @@ gb_internal LLVMMetadataRef lb_get_llvm_metadata(lbModule *m, void *key) {
if (key == nullptr) {
return nullptr;
}
+ mutex_lock(&m->debug_values_mutex);
auto found = map_get(&m->debug_values, key);
+ mutex_unlock(&m->debug_values_mutex);
if (found) {
return *found;
}
@@ -10,7 +12,9 @@ gb_internal LLVMMetadataRef lb_get_llvm_metadata(lbModule *m, void *key) {
}
gb_internal void lb_set_llvm_metadata(lbModule *m, void *key, LLVMMetadataRef value) {
if (key != nullptr) {
+ mutex_lock(&m->debug_values_mutex);
map_set(&m->debug_values, key, value);
+ mutex_unlock(&m->debug_values_mutex);
}
}
@@ -491,6 +495,9 @@ gb_internal LLVMMetadataRef lb_get_base_scope_metadata(lbModule *m, Scope *scope
}
gb_internal LLVMMetadataRef lb_debug_type(lbModule *m, Type *type) {
+ mutex_lock(&m->debug_values_mutex);
+ defer (mutex_unlock(&m->debug_values_mutex));
+
GB_ASSERT(type != nullptr);
LLVMMetadataRef found = lb_get_llvm_metadata(m, type);
if (found != nullptr) {
diff --git a/src/llvm_backend_expr.cpp b/src/llvm_backend_expr.cpp
index d574caf4c..c28e9fb2b 100644
--- a/src/llvm_backend_expr.cpp
+++ b/src/llvm_backend_expr.cpp
@@ -61,8 +61,7 @@ gb_internal lbValue lb_emit_logical_binary_expr(lbProcedure *p, TokenKind op, As
GB_ASSERT(incoming_values.count > 0);
LLVMTypeRef phi_type = nullptr;
- for_array(i, incoming_values) {
- LLVMValueRef incoming_value = incoming_values[i];
+ for (LLVMValueRef incoming_value : incoming_values) {
if (!LLVMIsConstant(incoming_value)) {
phi_type = LLVMTypeOf(incoming_value);
break;
@@ -1921,8 +1920,7 @@ gb_internal lbValue lb_emit_conv(lbProcedure *p, lbValue value, Type *t) {
}
if (is_type_union(dst)) {
- for_array(i, dst->Union.variants) {
- Type *vt = dst->Union.variants[i];
+ for (Type *vt : dst->Union.variants) {
if (are_types_identical(vt, src_type)) {
lbAddr parent = lb_add_local_generated(p, t, true);
lb_emit_store_union_variant(p, parent.addr, value, vt);
@@ -3596,8 +3594,7 @@ gb_internal void lb_build_addr_compound_lit_populate(lbProcedure *p, Slice<Ast *
}
}
gb_internal void lb_build_addr_compound_lit_assign_array(lbProcedure *p, Array<lbCompoundLitElemTempData> const &temp_data) {
- for_array(i, temp_data) {
- auto td = temp_data[i];
+ for (auto const &td : temp_data) {
if (td.value.value != nullptr) {
if (td.elem_length > 0) {
auto loop_data = lb_loop_start(p, cast(isize)td.elem_length, t_i32);
@@ -4129,8 +4126,7 @@ gb_internal lbAddr lb_build_addr_compound_lit(lbProcedure *p, Ast *expr) {
lbValue err = lb_dynamic_map_reserve(p, v.addr, 2*cl->elems.count, pos);
gb_unused(err);
- for_array(field_index, cl->elems) {
- Ast *elem = cl->elems[field_index];
+ for (Ast *elem : cl->elems) {
ast_node(fv, FieldValue, elem);
lbValue key = lb_build_expr(p, fv->field);
@@ -4304,8 +4300,7 @@ gb_internal lbAddr lb_build_addr_compound_lit(lbProcedure *p, Ast *expr) {
lb_addr_store(p, v, lb_const_value(p->module, type, exact_value_compound(expr)));
lbValue lower = lb_const_value(p->module, t_int, exact_value_i64(bt->BitSet.lower));
- for_array(i, cl->elems) {
- Ast *elem = cl->elems[i];
+ for (Ast *elem : cl->elems) {
GB_ASSERT(elem->kind != Ast_FieldValue);
if (lb_is_elem_const(elem, et)) {
@@ -4359,8 +4354,7 @@ gb_internal lbAddr lb_build_addr_compound_lit(lbProcedure *p, Ast *expr) {
// TODO(bill): reduce the need for individual `insertelement` if a `shufflevector`
// might be a better option
- for_array(i, temp_data) {
- auto td = temp_data[i];
+ for (auto const &td : temp_data) {
if (td.value.value != nullptr) {
if (td.elem_length > 0) {
for (i64 k = 0; k < td.elem_length; k++) {
diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp
index e5aa95f10..dca8c829d 100644
--- a/src/llvm_backend_general.cpp
+++ b/src/llvm_backend_general.cpp
@@ -55,30 +55,31 @@ gb_internal void lb_init_module(lbModule *m, Checker *c) {
}
gbAllocator a = heap_allocator();
- map_init(&m->types, a);
- map_init(&m->func_raw_types, a);
- map_init(&m->struct_field_remapping, a);
- map_init(&m->values, a);
- map_init(&m->soa_values, a);
- string_map_init(&m->members, a);
- map_init(&m->procedure_values, a);
- string_map_init(&m->procedures, a);
- string_map_init(&m->const_strings, a);
- map_init(&m->function_type_map, a);
- map_init(&m->equal_procs, a);
- map_init(&m->hasher_procs, a);
- map_init(&m->map_get_procs, a);
- map_init(&m->map_set_procs, a);
+ map_init(&m->types);
+ map_init(&m->func_raw_types);
+ map_init(&m->struct_field_remapping);
+ map_init(&m->values);
+ map_init(&m->soa_values);
+ string_map_init(&m->members);
+ map_init(&m->procedure_values);
+ string_map_init(&m->procedures);
+ string_map_init(&m->const_strings);
+ map_init(&m->function_type_map);
+ map_init(&m->equal_procs);
+ map_init(&m->hasher_procs);
+ map_init(&m->map_get_procs);
+ map_init(&m->map_set_procs);
array_init(&m->procedures_to_generate, a, 0, 1024);
+ array_init(&m->global_procedures_and_types_to_create, a, 0, 1024);
array_init(&m->missing_procedures_to_check, a, 0, 16);
- map_init(&m->debug_values, a);
+ map_init(&m->debug_values);
array_init(&m->debug_incomplete_types, a, 0, 1024);
- string_map_init(&m->objc_classes, a);
- string_map_init(&m->objc_selectors, a);
+ string_map_init(&m->objc_classes);
+ string_map_init(&m->objc_selectors);
- map_init(&m->map_info_map, a, 0);
- map_init(&m->map_cell_info_map, a, 0);
+ map_init(&m->map_info_map, 0);
+ map_init(&m->map_cell_info_map, 0);
}
@@ -127,14 +128,13 @@ gb_internal bool lb_init_generator(lbGenerator *gen, Checker *c) {
gen->info = &c->info;
- map_init(&gen->modules, permanent_allocator(), gen->info->packages.entries.count*2);
- map_init(&gen->modules_through_ctx, permanent_allocator(), gen->info->packages.entries.count*2);
- map_init(&gen->anonymous_proc_lits, heap_allocator(), 1024);
+ map_init(&gen->modules, gen->info->packages.entries.count*2);
+ map_init(&gen->modules_through_ctx, gen->info->packages.entries.count*2);
+ map_init(&gen->anonymous_proc_lits, 1024);
- mutex_init(&gen->foreign_mutex);
array_init(&gen->foreign_libraries, heap_allocator(), 0, 1024);
- ptr_set_init(&gen->foreign_libraries_set, heap_allocator(), 1024);
+ ptr_set_init(&gen->foreign_libraries_set, 1024);
if (USE_SEPARATE_MODULES) {
for (auto const &entry : gen->info->packages) {
@@ -317,6 +317,7 @@ gb_internal bool lb_is_instr_terminating(LLVMValueRef instr) {
gb_internal lbModule *lb_pkg_module(lbGenerator *gen, AstPackage *pkg) {
+ // NOTE(bill): no need for a mutex since it's immutable
auto *found = map_get(&gen->modules, pkg);
if (found) {
return *found;
@@ -1355,7 +1356,7 @@ gb_internal String lb_mangle_name(lbModule *m, Entity *e) {
return mangled_name;
}
-gb_internal String lb_set_nested_type_name_ir_mangled_name(Entity *e, lbProcedure *p) {
+gb_internal String lb_set_nested_type_name_ir_mangled_name(Entity *e, lbProcedure *p, lbModule *module) {
// NOTE(bill, 2020-03-08): A polymorphic procedure may take a nested type declaration
// and as a result, the declaration does not have time to determine what it should be
@@ -1422,7 +1423,7 @@ gb_internal String lb_get_entity_name(lbModule *m, Entity *e, String default_nam
}
if (e->kind == Entity_TypeName && (e->scope->flags & ScopeFlag_File) == 0) {
- return lb_set_nested_type_name_ir_mangled_name(e, nullptr);
+ return lb_set_nested_type_name_ir_mangled_name(e, nullptr, m);
}
String name = {};
@@ -2165,19 +2166,25 @@ gb_internal void lb_ensure_abi_function_type(lbModule *m, lbProcedure *p) {
gb_internal void lb_add_entity(lbModule *m, Entity *e, lbValue val) {
if (e != nullptr) {
+ mutex_lock(&m->values_mutex);
map_set(&m->values, e, val);
+ mutex_unlock(&m->values_mutex);
}
}
gb_internal void lb_add_member(lbModule *m, String const &name, lbValue val) {
if (name.len > 0) {
+ mutex_lock(&m->values_mutex);
string_map_set(&m->members, name, val);
+ mutex_unlock(&m->values_mutex);
}
}
gb_internal void lb_add_procedure_value(lbModule *m, lbProcedure *p) {
+ mutex_lock(&m->values_mutex);
if (p->entity != nullptr) {
map_set(&m->procedure_values, p->value, p->entity);
}
string_map_set(&m->procedures, p->name, p);
+ mutex_unlock(&m->values_mutex);
}
@@ -2520,6 +2527,8 @@ gb_internal lbValue lb_find_ident(lbProcedure *p, lbModule *m, Entity *e, Ast *e
return *found;
}
}
+ mutex_lock(&m->values_mutex);
+ defer (mutex_unlock(&m->values_mutex));
auto *found = map_get(&m->values, e);
if (found) {
@@ -2539,7 +2548,6 @@ gb_internal lbValue lb_find_ident(lbProcedure *p, lbModule *m, Entity *e, Ast *e
if (USE_SEPARATE_MODULES) {
lbModule *other_module = lb_pkg_module(m->gen, e->pkg);
if (other_module != m) {
-
String name = lb_get_entity_name(other_module, e);
lb_set_entity_from_other_modules_linkage_correctly(other_module, e, name);
@@ -2570,6 +2578,9 @@ gb_internal lbValue lb_find_procedure_value_from_entity(lbModule *m, Entity *e)
e = strip_entity_wrapping(e);
GB_ASSERT(e != nullptr);
+ mutex_lock(&m->values_mutex);
+ defer (mutex_unlock(&m->values_mutex));
+
auto *found = map_get(&m->values, e);
if (found) {
return *found;
@@ -2658,6 +2669,10 @@ gb_internal lbValue lb_find_value_from_entity(lbModule *m, Entity *e) {
return lb_find_procedure_value_from_entity(m, e);
}
+ mutex_lock(&m->values_mutex);
+ defer (mutex_unlock(&m->values_mutex));
+
+
auto *found = map_get(&m->values, e);
if (found) {
return *found;
@@ -2715,7 +2730,6 @@ gb_internal lbValue lb_find_value_from_entity(lbModule *m, Entity *e) {
return g;
}
}
-
GB_PANIC("\n\tError in: %s, missing value '%.*s'\n", token_pos_to_string(e->token.pos), LIT(e->token.string));
return {};
}
diff --git a/src/llvm_backend_opt.cpp b/src/llvm_backend_opt.cpp
index fd6d94361..d7a34d82a 100644
--- a/src/llvm_backend_opt.cpp
+++ b/src/llvm_backend_opt.cpp
@@ -359,6 +359,9 @@ gb_internal void lb_run_remove_dead_instruction_pass(lbProcedure *p) {
gb_internal void lb_run_function_pass_manager(LLVMPassManagerRef fpm, lbProcedure *p) {
+ if (p == nullptr) {
+ return;
+ }
LLVMRunFunctionPassManager(fpm, p->value);
// NOTE(bill): LLVMAddDCEPass doesn't seem to be exported in the official DLL's for LLVM
// which means we cannot rely upon it
diff --git a/src/llvm_backend_proc.cpp b/src/llvm_backend_proc.cpp
index 384d29ca7..c66462bc1 100644
--- a/src/llvm_backend_proc.cpp
+++ b/src/llvm_backend_proc.cpp
@@ -68,7 +68,7 @@ gb_internal lbProcedure *lb_create_procedure(lbModule *m, Entity *entity, bool i
GB_ASSERT(entity != nullptr);
GB_ASSERT(entity->kind == Entity_Procedure);
if (!entity->Procedure.is_foreign) {
- GB_ASSERT_MSG(entity->flags & EntityFlag_ProcBodyChecked, "%.*s :: %s", LIT(entity->token.string), type_to_string(entity->type));
+ GB_ASSERT_MSG(entity->flags & EntityFlag_ProcBodyChecked, "%.*s :: %s (was parapoly: %d)", LIT(entity->token.string), type_to_string(entity->type), is_type_polymorphic(entity->type, true));
}
String link_name = {};
@@ -119,9 +119,9 @@ gb_internal lbProcedure *lb_create_procedure(lbModule *m, Entity *entity, bool i
p->branch_blocks.allocator = a;
p->context_stack.allocator = a;
p->scope_stack.allocator = a;
- map_init(&p->selector_values, a, 0);
- map_init(&p->selector_addr, a, 0);
- map_init(&p->tuple_fix_map, a, 0);
+ map_init(&p->selector_values, 0);
+ map_init(&p->selector_addr, 0);
+ map_init(&p->tuple_fix_map, 0);
if (p->is_foreign) {
lb_add_foreign_library_path(p->module, entity->Procedure.foreign_library);
@@ -345,7 +345,7 @@ gb_internal lbProcedure *lb_create_dummy_procedure(lbModule *m, String link_name
p->blocks.allocator = a;
p->branch_blocks.allocator = a;
p->context_stack.allocator = a;
- map_init(&p->tuple_fix_map, a, 0);
+ map_init(&p->tuple_fix_map, 0);
char *c_link_name = alloc_cstring(permanent_allocator(), p->name);
@@ -486,7 +486,7 @@ gb_internal void lb_begin_procedure_body(lbProcedure *p) {
p->entry_block = lb_create_block(p, "entry", true);
lb_start_block(p, p->entry_block);
- map_init(&p->direct_parameters, heap_allocator());
+ map_init(&p->direct_parameters);
GB_ASSERT(p->type != nullptr);
diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp
index 6400a8a9d..73b4e251f 100644
--- a/src/llvm_backend_stmt.cpp
+++ b/src/llvm_backend_stmt.cpp
@@ -7,8 +7,7 @@ gb_internal void lb_build_constant_value_decl(lbProcedure *p, AstValueDecl *vd)
static i32 global_guid = 0;
- for_array(i, vd->names) {
- Ast *ident = vd->names[i];
+ for (Ast *ident : vd->names) {
GB_ASSERT(ident->kind == Ast_Ident);
Entity *e = entity_of_node(ident);
GB_ASSERT(e != nullptr);
@@ -33,7 +32,7 @@ gb_internal void lb_build_constant_value_decl(lbProcedure *p, AstValueDecl *vd)
continue;
}
- lb_set_nested_type_name_ir_mangled_name(e, p);
+ lb_set_nested_type_name_ir_mangled_name(e, p, p->module);
}
for_array(i, vd->names) {
@@ -51,21 +50,20 @@ gb_internal void lb_build_constant_value_decl(lbProcedure *p, AstValueDecl *vd)
continue; // It's an alias
}
- CheckerInfo *info = p->module->info;
DeclInfo *decl = decl_info_of_entity(e);
ast_node(pl, ProcLit, decl->proc_lit);
if (pl->body != nullptr) {
- auto *found = map_get(&info->gen_procs, ident);
- if (found) {
- auto procs = *found;
- for_array(i, procs) {
- Entity *e = procs[i];
+ GenProcsData *gpd = e->Procedure.gen_procs;
+ if (gpd) {
+ rw_mutex_shared_lock(&gpd->mutex);
+ for (Entity *e : gpd->procs) {
if (!ptr_set_exists(min_dep_set, e)) {
continue;
}
DeclInfo *d = decl_info_of_entity(e);
lb_build_nested_proc(p, &d->proc_lit->ProcLit, e);
}
+ rw_mutex_shared_unlock(&gpd->mutex);
} else {
lb_build_nested_proc(p, pl, e);
}
@@ -106,8 +104,7 @@ gb_internal void lb_build_constant_value_decl(lbProcedure *p, AstValueDecl *vd)
gb_internal void lb_build_stmt_list(lbProcedure *p, Slice<Ast *> const &stmts) {
- for_array(i, stmts) {
- Ast *stmt = stmts[i];
+ for (Ast *stmt : stmts) {
switch (stmt->kind) {
case_ast_node(vd, ValueDecl, stmt);
lb_build_constant_value_decl(p, vd);
@@ -118,8 +115,8 @@ gb_internal void lb_build_stmt_list(lbProcedure *p, Slice<Ast *> const &stmts) {
case_end;
}
}
- for_array(i, stmts) {
- lb_build_stmt(p, stmts[i]);
+ for (Ast *stmt : stmts) {
+ lb_build_stmt(p, stmt);
}
}
@@ -129,10 +126,9 @@ gb_internal lbBranchBlocks lb_lookup_branch_blocks(lbProcedure *p, Ast *ident) {
GB_ASSERT(ident->kind == Ast_Ident);
Entity *e = entity_of_node(ident);
GB_ASSERT(e->kind == Entity_Label);
- for_array(i, p->branch_blocks) {
- lbBranchBlocks *b = &p->branch_blocks[i];
- if (b->label == e->Label.node) {
- return *b;
+ for (lbBranchBlocks const &b : p->branch_blocks) {
+ if (b.label == e->Label.node) {
+ return b;
}
}
@@ -153,13 +149,12 @@ gb_internal lbTargetList *lb_push_target_list(lbProcedure *p, Ast *label, lbBloc
if (label != nullptr) { // Set label blocks
GB_ASSERT(label->kind == Ast_Label);
- for_array(i, p->branch_blocks) {
- lbBranchBlocks *b = &p->branch_blocks[i];
- GB_ASSERT(b->label != nullptr && label != nullptr);
- GB_ASSERT(b->label->kind == Ast_Label);
- if (b->label == label) {
- b->break_ = break_;
- b->continue_ = continue_;
+ for (lbBranchBlocks &b : p->branch_blocks) {
+ GB_ASSERT(b.label != nullptr && label != nullptr);
+ GB_ASSERT(b.label->kind == Ast_Label);
+ if (b.label == label) {
+ b.break_ = break_;
+ b.continue_ = continue_;
return tl;
}
}
@@ -1095,8 +1090,7 @@ gb_internal bool lb_switch_stmt_can_be_trivial_jump_table(AstSwitchStmt *ss, boo
}
ast_node(body, BlockStmt, ss->body);
- for_array(i, body->stmts) {
- Ast *clause = body->stmts[i];
+ for (Ast *clause : body->stmts) {
ast_node(cc, CaseClause, clause);
if (cc->list.count == 0) {
@@ -1104,8 +1098,8 @@ gb_internal bool lb_switch_stmt_can_be_trivial_jump_table(AstSwitchStmt *ss, boo
continue;
}
- for_array(j, cc->list) {
- Ast *expr = unparen_expr(cc->list[j]);
+ for (Ast *expr : cc->list) {
+ expr = unparen_expr(expr);
if (is_ast_range(expr)) {
return false;
}
@@ -1166,8 +1160,7 @@ gb_internal void lb_build_switch_stmt(lbProcedure *p, AstSwitchStmt *ss, Scope *
LLVMValueRef switch_instr = nullptr;
if (is_trivial) {
isize num_cases = 0;
- for_array(i, body->stmts) {
- Ast *clause = body->stmts[i];
+ for (Ast *clause : body->stmts) {
ast_node(cc, CaseClause, clause);
num_cases += cc->list.count;
}
@@ -1204,8 +1197,8 @@ gb_internal void lb_build_switch_stmt(lbProcedure *p, AstSwitchStmt *ss, Scope *
}
lbBlock *next_cond = nullptr;
- for_array(j, cc->list) {
- Ast *expr = unparen_expr(cc->list[j]);
+ for (Ast *expr : cc->list) {
+ expr = unparen_expr(expr);
if (switch_instr != nullptr) {
lbValue on_val = {};
@@ -1384,8 +1377,7 @@ gb_internal void lb_build_type_switch_stmt(lbProcedure *p, AstTypeSwitchStmt *ss
lbBlock *default_block = nullptr;
isize num_cases = 0;
- for_array(i, body->stmts) {
- Ast *clause = body->stmts[i];
+ for (Ast *clause : body->stmts) {
ast_node(cc, CaseClause, clause);
num_cases += cc->list.count;
if (cc->list.count == 0) {
@@ -1405,8 +1397,7 @@ gb_internal void lb_build_type_switch_stmt(lbProcedure *p, AstTypeSwitchStmt *ss
switch_instr = LLVMBuildSwitch(p->builder, tag.value, else_block->block, cast(unsigned)num_cases);
}
- for_array(i, body->stmts) {
- Ast *clause = body->stmts[i];
+ for (Ast *clause : body->stmts) {
ast_node(cc, CaseClause, clause);
lb_open_scope(p, cc->scope);
if (cc->list.count == 0) {
@@ -1420,9 +1411,8 @@ gb_internal void lb_build_type_switch_stmt(lbProcedure *p, AstTypeSwitchStmt *ss
if (p->debug_info != nullptr) {
LLVMSetCurrentDebugLocation2(p->builder, lb_debug_location_from_ast(p, clause));
}
- Type *case_type = nullptr;
- for_array(type_index, cc->list) {
- case_type = type_of_expr(cc->list[type_index]);
+ for (Ast *type_expr : cc->list) {
+ Type *case_type = type_of_expr(type_expr);
lbValue on_val = {};
if (switch_kind == TypeSwitch_Union) {
Type *ut = base_type(type_deref(parent.type));
@@ -1538,8 +1528,8 @@ gb_internal void lb_append_tuple_values(lbProcedure *p, Array<lbValue> *dst_valu
if (t->kind == Type_Tuple) {
lbTupleFix *tf = map_get(&p->tuple_fix_map, src_value.value);
if (tf) {
- for_array(j, tf->values) {
- array_add(dst_values, tf->values[j]);
+ for (lbValue const &value : tf->values) {
+ array_add(dst_values, value);
}
} else {
for_array(i, t->Tuple.variables) {
@@ -1560,8 +1550,7 @@ gb_internal void lb_build_assignment(lbProcedure *p, Array<lbAddr> &lvals, Slice
auto inits = array_make<lbValue>(permanent_allocator(), 0, lvals.count);
- for_array(i, values) {
- Ast *rhs = values[i];
+ for (Ast *rhs : values) {
lbValue init = lb_build_expr(p, rhs);
lb_append_tuple_values(p, &inits, init);
}
@@ -1971,8 +1960,7 @@ gb_internal void lb_build_assign_stmt_array(lbProcedure *p, TokenKind op, lbAddr
auto indices_handled = slice_make<bool>(temporary_allocator(), bt->Array.count);
auto indices = slice_make<i32>(temporary_allocator(), bt->Array.count);
i32 index_count = 0;
- for_array(i, lhs.swizzle_large.indices) {
- i32 index = lhs.swizzle_large.indices[i];
+ for (i32 index : lhs.swizzle_large.indices) {
if (indices_handled[index]) {
continue;
}
@@ -2049,8 +2037,7 @@ gb_internal void lb_build_assign_stmt(lbProcedure *p, AstAssignStmt *as) {
if (as->op.kind == Token_Eq) {
auto lvals = array_make<lbAddr>(permanent_allocator(), 0, as->lhs.count);
- for_array(i, as->lhs) {
- Ast *lhs = as->lhs[i];
+ for (Ast *lhs : as->lhs) {
lbAddr lval = {};
if (!is_blank_ident(lhs)) {
lval = lb_build_addr(p, lhs);
@@ -2185,12 +2172,12 @@ gb_internal void lb_build_stmt(lbProcedure *p, Ast *node) {
bool is_static = false;
if (vd->names.count > 0) {
- for_array(i, vd->names) {
- Ast *name = vd->names[i];
+ for (Ast *name : vd->names) {
if (!is_blank_ident(name)) {
+ GB_ASSERT(name->kind == Ast_Ident);
Entity *e = entity_of_node(name);
TokenPos pos = ast_token(name).pos;
- GB_ASSERT_MSG(e != nullptr, "%s", token_pos_to_string(pos));
+ GB_ASSERT_MSG(e != nullptr, "\n%s missing entity for %.*s", token_pos_to_string(pos), LIT(name->Ident.token.string));
if (e->flags & EntityFlag_Static) {
// NOTE(bill): If one of the entities is static, they all are
is_static = true;
@@ -2207,8 +2194,7 @@ gb_internal void lb_build_stmt(lbProcedure *p, Ast *node) {
auto lvals = array_make<lbAddr>(permanent_allocator(), 0, vd->names.count);
- for_array(i, vd->names) {
- Ast *name = vd->names[i];
+ for (Ast *name : vd->names) {
lbAddr lval = {};
if (!is_blank_ident(name)) {
Entity *e = entity_of_node(name);
diff --git a/src/llvm_backend_type.cpp b/src/llvm_backend_type.cpp
index 26bb614e6..e2b5c9dd0 100644
--- a/src/llvm_backend_type.cpp
+++ b/src/llvm_backend_type.cpp
@@ -2,9 +2,10 @@ gb_internal isize lb_type_info_index(CheckerInfo *info, Type *type, bool err_on_
auto *set = &info->minimum_dependency_type_info_set;
isize index = type_info_index(info, type, err_on_not_found);
if (index >= 0) {
- isize i = ptr_entry_index(set, index);
- if (i >= 0) {
- return i+1;
+ auto *found = map_get(set, index);
+ if (found) {
+ GB_ASSERT(*found >= 0);
+ return *found + 1;
}
}
if (err_on_not_found) {
@@ -185,7 +186,7 @@ gb_internal void lb_setup_type_info_data(lbProcedure *p) { // NOTE(bill): Setup
if (entry_index <= 0) {
continue;
}
-
+
if (entries_handled[entry_index]) {
continue;
}
diff --git a/src/main.cpp b/src/main.cpp
index 6d910c7bf..4e8dfaf75 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -13,17 +13,16 @@
#endif
#include "exact_value.cpp"
#include "build_settings.cpp"
-
gb_global ThreadPool global_thread_pool;
gb_internal void init_global_thread_pool(void) {
isize thread_count = gb_max(build_context.thread_count, 1);
- isize worker_count = thread_count-1; // NOTE(bill): The main thread will also be used for work
+ isize worker_count = thread_count-1;
thread_pool_init(&global_thread_pool, permanent_allocator(), worker_count, "ThreadPoolWorker");
}
-gb_internal bool global_thread_pool_add_task(WorkerTaskProc *proc, void *data) {
+gb_internal bool thread_pool_add_task(WorkerTaskProc *proc, void *data) {
return thread_pool_add_task(&global_thread_pool, proc, data);
}
-gb_internal void global_thread_pool_wait(void) {
+gb_internal void thread_pool_wait(void) {
thread_pool_wait(&global_thread_pool);
}
@@ -213,11 +212,11 @@ gb_internal i32 linker_stage(lbGenerator *gen) {
StringSet libs = {};
- string_set_init(&libs, heap_allocator(), 64);
+ string_set_init(&libs, 64);
defer (string_set_destroy(&libs));
StringSet asm_files = {};
- string_set_init(&asm_files, heap_allocator(), 64);
+ string_set_init(&asm_files, 64);
defer (string_set_destroy(&asm_files));
for_array(j, gen->foreign_libraries) {
@@ -372,7 +371,7 @@ gb_internal i32 linker_stage(lbGenerator *gen) {
defer (gb_string_free(lib_str));
StringSet libs = {};
- string_set_init(&libs, heap_allocator(), 64);
+ string_set_init(&libs, 64);
defer (string_set_destroy(&libs));
for_array(j, gen->foreign_libraries) {
@@ -618,7 +617,6 @@ enum BuildFlagKind {
BuildFlag_NoEntryPoint,
BuildFlag_UseLLD,
BuildFlag_UseSeparateModules,
- BuildFlag_ThreadedChecker,
BuildFlag_NoThreadedChecker,
BuildFlag_ShowDebugMessages,
BuildFlag_Vet,
@@ -660,6 +658,7 @@ enum BuildFlagKind {
// internal use only
BuildFlag_InternalIgnoreLazy,
+ BuildFlag_InternalIgnoreLLVMBuild,
#if defined(GB_SYSTEM_WINDOWS)
BuildFlag_IgnoreVsSearch,
@@ -793,7 +792,6 @@ gb_internal bool parse_build_flags(Array<String> args) {
add_flag(&build_flags, BuildFlag_NoEntryPoint, str_lit("no-entry-point"), BuildFlagParam_None, Command__does_check &~ Command_test);
add_flag(&build_flags, BuildFlag_UseLLD, str_lit("lld"), BuildFlagParam_None, Command__does_build);
add_flag(&build_flags, BuildFlag_UseSeparateModules, str_lit("use-separate-modules"), BuildFlagParam_None, Command__does_build);
- add_flag(&build_flags, BuildFlag_ThreadedChecker, str_lit("threaded-checker"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_NoThreadedChecker, str_lit("no-threaded-checker"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_ShowDebugMessages, str_lit("show-debug-messages"), BuildFlagParam_None, Command_all);
add_flag(&build_flags, BuildFlag_Vet, str_lit("vet"), BuildFlagParam_None, Command__does_check);
@@ -832,6 +830,7 @@ gb_internal bool parse_build_flags(Array<String> args) {
add_flag(&build_flags, BuildFlag_ErrorPosStyle, str_lit("error-pos-style"), BuildFlagParam_String, Command_all);
add_flag(&build_flags, BuildFlag_InternalIgnoreLazy, str_lit("internal-ignore-lazy"), BuildFlagParam_None, Command_all);
+ add_flag(&build_flags, BuildFlag_InternalIgnoreLLVMBuild, str_lit("internal-ignore-llvm-build"),BuildFlagParam_None, Command_all);
#if defined(GB_SYSTEM_WINDOWS)
add_flag(&build_flags, BuildFlag_IgnoreVsSearch, str_lit("ignore-vs-search"), BuildFlagParam_None, Command__does_build);
@@ -1310,20 +1309,8 @@ gb_internal bool parse_build_flags(Array<String> args) {
case BuildFlag_UseSeparateModules:
build_context.use_separate_modules = true;
break;
- case BuildFlag_ThreadedChecker: {
- #if defined(DEFAULT_TO_THREADED_CHECKER)
- gb_printf_err("-threaded-checker is the default on this platform\n");
- bad_flags = true;
- #endif
- build_context.threaded_checker = true;
- break;
- }
case BuildFlag_NoThreadedChecker: {
- #if !defined(DEFAULT_TO_THREADED_CHECKER)
- gb_printf_err("-no-threaded-checker is the default on this platform\n");
- bad_flags = true;
- #endif
- build_context.threaded_checker = false;
+ build_context.no_threaded_checker = true;
break;
}
case BuildFlag_ShowDebugMessages:
@@ -1491,6 +1478,9 @@ gb_internal bool parse_build_flags(Array<String> args) {
case BuildFlag_InternalIgnoreLazy:
build_context.ignore_lazy = true;
break;
+ case BuildFlag_InternalIgnoreLLVMBuild:
+ build_context.ignore_llvm_build = true;
+ break;
#if defined(GB_SYSTEM_WINDOWS)
case BuildFlag_IgnoreVsSearch: {
GB_ASSERT(value.kind == ExactValue_Invalid);
@@ -2498,15 +2488,10 @@ int main(int arg_count, char const **arg_ptr) {
MAIN_TIME_SECTION("initialization");
virtual_memory_init();
- mutex_init(&fullpath_mutex);
- mutex_init(&hash_exact_value_mutex);
- mutex_init(&global_type_name_objc_metadata_mutex);
- init_string_buffer_memory();
init_string_interner();
init_global_error_collector();
init_keyword_hash_table();
- init_type_mutex();
if (!check_env()) {
return 1;
@@ -2517,9 +2502,9 @@ int main(int arg_count, char const **arg_ptr) {
add_library_collection(str_lit("core"), get_fullpath_relative(heap_allocator(), odin_root_dir(), str_lit("core")));
add_library_collection(str_lit("vendor"), get_fullpath_relative(heap_allocator(), odin_root_dir(), str_lit("vendor")));
- map_init(&build_context.defined_values, heap_allocator());
+ map_init(&build_context.defined_values);
build_context.extra_packages.allocator = heap_allocator();
- string_set_init(&build_context.test_names, heap_allocator());
+ string_set_init(&build_context.test_names);
Array<String> args = setup_args(arg_count, arg_ptr);
@@ -2785,19 +2770,19 @@ int main(int arg_count, char const **arg_ptr) {
if (!lb_init_generator(gen, checker)) {
return 1;
}
- lb_generate_code(gen);
-
- switch (build_context.build_mode) {
- case BuildMode_Executable:
- case BuildMode_DynamicLibrary:
- i32 result = linker_stage(gen);
- if (result) {
- if (build_context.show_timings) {
- show_timings(checker, &global_timings);
+ if (lb_generate_code(gen)) {
+ switch (build_context.build_mode) {
+ case BuildMode_Executable:
+ case BuildMode_DynamicLibrary:
+ i32 result = linker_stage(gen);
+ if (result) {
+ if (build_context.show_timings) {
+ show_timings(checker, &global_timings);
+ }
+ return result;
}
- return result;
+ break;
}
- break;
}
remove_temp_files(gen);
diff --git a/src/parser.cpp b/src/parser.cpp
index e07f26004..0eb7e5fc1 100644
--- a/src/parser.cpp
+++ b/src/parser.cpp
@@ -64,11 +64,9 @@ gb_global std::atomic<isize> global_total_node_memory_allocated;
// NOTE(bill): And this below is why is I/we need a new language! Discriminated unions are a pain in C/C++
gb_internal Ast *alloc_ast_node(AstFile *f, AstKind kind) {
- gbAllocator a = ast_allocator(f);
-
isize size = ast_node_size(kind);
- Ast *node = cast(Ast *)gb_alloc(a, size);
+ Ast *node = cast(Ast *)arena_alloc(&global_thread_local_ast_arena, size, 16);
node->kind = kind;
node->file_id = f ? f->id : 0;
@@ -77,33 +75,35 @@ gb_internal Ast *alloc_ast_node(AstFile *f, AstKind kind) {
return node;
}
-gb_internal Ast *clone_ast(Ast *node);
-gb_internal Array<Ast *> clone_ast_array(Array<Ast *> const &array) {
+gb_internal Ast *clone_ast(Ast *node, AstFile *f = nullptr);
+gb_internal Array<Ast *> clone_ast_array(Array<Ast *> const &array, AstFile *f) {
Array<Ast *> result = {};
if (array.count > 0) {
result = array_make<Ast *>(ast_allocator(nullptr), array.count);
for_array(i, array) {
- result[i] = clone_ast(array[i]);
+ result[i] = clone_ast(array[i], f);
}
}
return result;
}
-gb_internal Slice<Ast *> clone_ast_array(Slice<Ast *> const &array) {
+gb_internal Slice<Ast *> clone_ast_array(Slice<Ast *> const &array, AstFile *f) {
Slice<Ast *> result = {};
if (array.count > 0) {
result = slice_clone(permanent_allocator(), array);
for_array(i, array) {
- result[i] = clone_ast(array[i]);
+ result[i] = clone_ast(array[i], f);
}
}
return result;
}
-gb_internal Ast *clone_ast(Ast *node) {
+gb_internal Ast *clone_ast(Ast *node, AstFile *f) {
if (node == nullptr) {
return nullptr;
}
- AstFile *f = node->thread_safe_file();
+ if (f == nullptr) {
+ f = node->thread_safe_file();
+ }
Ast *n = alloc_ast_node(f, node->kind);
gb_memmove(n, node, ast_node_size(node->kind));
@@ -120,279 +120,279 @@ gb_internal Ast *clone_ast(Ast *node) {
case Ast_BasicDirective: break;
case Ast_PolyType:
- n->PolyType.type = clone_ast(n->PolyType.type);
- n->PolyType.specialization = clone_ast(n->PolyType.specialization);
+ n->PolyType.type = clone_ast(n->PolyType.type, f);
+ n->PolyType.specialization = clone_ast(n->PolyType.specialization, f);
break;
case Ast_Ellipsis:
- n->Ellipsis.expr = clone_ast(n->Ellipsis.expr);
+ n->Ellipsis.expr = clone_ast(n->Ellipsis.expr, f);
break;
case Ast_ProcGroup:
- n->ProcGroup.args = clone_ast_array(n->ProcGroup.args);
+ n->ProcGroup.args = clone_ast_array(n->ProcGroup.args, f);
break;
case Ast_ProcLit:
- n->ProcLit.type = clone_ast(n->ProcLit.type);
- n->ProcLit.body = clone_ast(n->ProcLit.body);
- n->ProcLit.where_clauses = clone_ast_array(n->ProcLit.where_clauses);
+ n->ProcLit.type = clone_ast(n->ProcLit.type, f);
+ n->ProcLit.body = clone_ast(n->ProcLit.body, f);
+ n->ProcLit.where_clauses = clone_ast_array(n->ProcLit.where_clauses, f);
break;
case Ast_CompoundLit:
- n->CompoundLit.type = clone_ast(n->CompoundLit.type);
- n->CompoundLit.elems = clone_ast_array(n->CompoundLit.elems);
+ n->CompoundLit.type = clone_ast(n->CompoundLit.type, f);
+ n->CompoundLit.elems = clone_ast_array(n->CompoundLit.elems, f);
break;
case Ast_BadExpr: break;
case Ast_TagExpr:
- n->TagExpr.expr = clone_ast(n->TagExpr.expr);
+ n->TagExpr.expr = clone_ast(n->TagExpr.expr, f);
break;
case Ast_UnaryExpr:
- n->UnaryExpr.expr = clone_ast(n->UnaryExpr.expr);
+ n->UnaryExpr.expr = clone_ast(n->UnaryExpr.expr, f);
break;
case Ast_BinaryExpr:
- n->BinaryExpr.left = clone_ast(n->BinaryExpr.left);
- n->BinaryExpr.right = clone_ast(n->BinaryExpr.right);
+ n->BinaryExpr.left = clone_ast(n->BinaryExpr.left, f);
+ n->BinaryExpr.right = clone_ast(n->BinaryExpr.right, f);
break;
case Ast_ParenExpr:
- n->ParenExpr.expr = clone_ast(n->ParenExpr.expr);
+ n->ParenExpr.expr = clone_ast(n->ParenExpr.expr, f);
break;
case Ast_SelectorExpr:
- n->SelectorExpr.expr = clone_ast(n->SelectorExpr.expr);
- n->SelectorExpr.selector = clone_ast(n->SelectorExpr.selector);
+ n->SelectorExpr.expr = clone_ast(n->SelectorExpr.expr, f);
+ n->SelectorExpr.selector = clone_ast(n->SelectorExpr.selector, f);
break;
case Ast_ImplicitSelectorExpr:
- n->ImplicitSelectorExpr.selector = clone_ast(n->ImplicitSelectorExpr.selector);
+ n->ImplicitSelectorExpr.selector = clone_ast(n->ImplicitSelectorExpr.selector, f);
break;
case Ast_SelectorCallExpr:
- n->SelectorCallExpr.expr = clone_ast(n->SelectorCallExpr.expr);
- n->SelectorCallExpr.call = clone_ast(n->SelectorCallExpr.call);
+ n->SelectorCallExpr.expr = clone_ast(n->SelectorCallExpr.expr, f);
+ n->SelectorCallExpr.call = clone_ast(n->SelectorCallExpr.call, f);
break;
case Ast_IndexExpr:
- n->IndexExpr.expr = clone_ast(n->IndexExpr.expr);
- n->IndexExpr.index = clone_ast(n->IndexExpr.index);
+ n->IndexExpr.expr = clone_ast(n->IndexExpr.expr, f);
+ n->IndexExpr.index = clone_ast(n->IndexExpr.index, f);
break;
case Ast_MatrixIndexExpr:
- n->MatrixIndexExpr.expr = clone_ast(n->MatrixIndexExpr.expr);
- n->MatrixIndexExpr.row_index = clone_ast(n->MatrixIndexExpr.row_index);
- n->MatrixIndexExpr.column_index = clone_ast(n->MatrixIndexExpr.column_index);
+ n->MatrixIndexExpr.expr = clone_ast(n->MatrixIndexExpr.expr, f);
+ n->MatrixIndexExpr.row_index = clone_ast(n->MatrixIndexExpr.row_index, f);
+ n->MatrixIndexExpr.column_index = clone_ast(n->MatrixIndexExpr.column_index, f);
break;
case Ast_DerefExpr:
- n->DerefExpr.expr = clone_ast(n->DerefExpr.expr);
+ n->DerefExpr.expr = clone_ast(n->DerefExpr.expr, f);
break;
case Ast_SliceExpr:
- n->SliceExpr.expr = clone_ast(n->SliceExpr.expr);
- n->SliceExpr.low = clone_ast(n->SliceExpr.low);
- n->SliceExpr.high = clone_ast(n->SliceExpr.high);
+ n->SliceExpr.expr = clone_ast(n->SliceExpr.expr, f);
+ n->SliceExpr.low = clone_ast(n->SliceExpr.low, f);
+ n->SliceExpr.high = clone_ast(n->SliceExpr.high, f);
break;
case Ast_CallExpr:
- n->CallExpr.proc = clone_ast(n->CallExpr.proc);
- n->CallExpr.args = clone_ast_array(n->CallExpr.args);
+ n->CallExpr.proc = clone_ast(n->CallExpr.proc, f);
+ n->CallExpr.args = clone_ast_array(n->CallExpr.args, f);
break;
case Ast_FieldValue:
- n->FieldValue.field = clone_ast(n->FieldValue.field);
- n->FieldValue.value = clone_ast(n->FieldValue.value);
+ n->FieldValue.field = clone_ast(n->FieldValue.field, f);
+ n->FieldValue.value = clone_ast(n->FieldValue.value, f);
break;
case Ast_EnumFieldValue:
- n->EnumFieldValue.name = clone_ast(n->EnumFieldValue.name);
- n->EnumFieldValue.value = clone_ast(n->EnumFieldValue.value);
+ n->EnumFieldValue.name = clone_ast(n->EnumFieldValue.name, f);
+ n->EnumFieldValue.value = clone_ast(n->EnumFieldValue.value, f);
break;
case Ast_TernaryIfExpr:
- n->TernaryIfExpr.x = clone_ast(n->TernaryIfExpr.x);
- n->TernaryIfExpr.cond = clone_ast(n->TernaryIfExpr.cond);
- n->TernaryIfExpr.y = clone_ast(n->TernaryIfExpr.y);
+ n->TernaryIfExpr.x = clone_ast(n->TernaryIfExpr.x, f);
+ n->TernaryIfExpr.cond = clone_ast(n->TernaryIfExpr.cond, f);
+ n->TernaryIfExpr.y = clone_ast(n->TernaryIfExpr.y, f);
break;
case Ast_TernaryWhenExpr:
- n->TernaryWhenExpr.x = clone_ast(n->TernaryWhenExpr.x);
- n->TernaryWhenExpr.cond = clone_ast(n->TernaryWhenExpr.cond);
- n->TernaryWhenExpr.y = clone_ast(n->TernaryWhenExpr.y);
+ n->TernaryWhenExpr.x = clone_ast(n->TernaryWhenExpr.x, f);
+ n->TernaryWhenExpr.cond = clone_ast(n->TernaryWhenExpr.cond, f);
+ n->TernaryWhenExpr.y = clone_ast(n->TernaryWhenExpr.y, f);
break;
case Ast_OrElseExpr:
- n->OrElseExpr.x = clone_ast(n->OrElseExpr.x);
- n->OrElseExpr.y = clone_ast(n->OrElseExpr.y);
+ n->OrElseExpr.x = clone_ast(n->OrElseExpr.x, f);
+ n->OrElseExpr.y = clone_ast(n->OrElseExpr.y, f);
break;
case Ast_OrReturnExpr:
- n->OrReturnExpr.expr = clone_ast(n->OrReturnExpr.expr);
+ n->OrReturnExpr.expr = clone_ast(n->OrReturnExpr.expr, f);
break;
case Ast_TypeAssertion:
- n->TypeAssertion.expr = clone_ast(n->TypeAssertion.expr);
- n->TypeAssertion.type = clone_ast(n->TypeAssertion.type);
+ n->TypeAssertion.expr = clone_ast(n->TypeAssertion.expr, f);
+ n->TypeAssertion.type = clone_ast(n->TypeAssertion.type, f);
break;
case Ast_TypeCast:
- n->TypeCast.type = clone_ast(n->TypeCast.type);
- n->TypeCast.expr = clone_ast(n->TypeCast.expr);
+ n->TypeCast.type = clone_ast(n->TypeCast.type, f);
+ n->TypeCast.expr = clone_ast(n->TypeCast.expr, f);
break;
case Ast_AutoCast:
- n->AutoCast.expr = clone_ast(n->AutoCast.expr);
+ n->AutoCast.expr = clone_ast(n->AutoCast.expr, f);
break;
case Ast_InlineAsmExpr:
- n->InlineAsmExpr.param_types = clone_ast_array(n->InlineAsmExpr.param_types);
- n->InlineAsmExpr.return_type = clone_ast(n->InlineAsmExpr.return_type);
- n->InlineAsmExpr.asm_string = clone_ast(n->InlineAsmExpr.asm_string);
- n->InlineAsmExpr.constraints_string = clone_ast(n->InlineAsmExpr.constraints_string);
+ n->InlineAsmExpr.param_types = clone_ast_array(n->InlineAsmExpr.param_types, f);
+ n->InlineAsmExpr.return_type = clone_ast(n->InlineAsmExpr.return_type, f);
+ n->InlineAsmExpr.asm_string = clone_ast(n->InlineAsmExpr.asm_string, f);
+ n->InlineAsmExpr.constraints_string = clone_ast(n->InlineAsmExpr.constraints_string, f);
break;
case Ast_BadStmt: break;
case Ast_EmptyStmt: break;
case Ast_ExprStmt:
- n->ExprStmt.expr = clone_ast(n->ExprStmt.expr);
+ n->ExprStmt.expr = clone_ast(n->ExprStmt.expr, f);
break;
case Ast_AssignStmt:
- n->AssignStmt.lhs = clone_ast_array(n->AssignStmt.lhs);
- n->AssignStmt.rhs = clone_ast_array(n->AssignStmt.rhs);
+ n->AssignStmt.lhs = clone_ast_array(n->AssignStmt.lhs, f);
+ n->AssignStmt.rhs = clone_ast_array(n->AssignStmt.rhs, f);
break;
case Ast_BlockStmt:
- n->BlockStmt.label = clone_ast(n->BlockStmt.label);
- n->BlockStmt.stmts = clone_ast_array(n->BlockStmt.stmts);
+ n->BlockStmt.label = clone_ast(n->BlockStmt.label, f);
+ n->BlockStmt.stmts = clone_ast_array(n->BlockStmt.stmts, f);
break;
case Ast_IfStmt:
- n->IfStmt.label = clone_ast(n->IfStmt.label);
- n->IfStmt.init = clone_ast(n->IfStmt.init);
- n->IfStmt.cond = clone_ast(n->IfStmt.cond);
- n->IfStmt.body = clone_ast(n->IfStmt.body);
- n->IfStmt.else_stmt = clone_ast(n->IfStmt.else_stmt);
+ n->IfStmt.label = clone_ast(n->IfStmt.label, f);
+ n->IfStmt.init = clone_ast(n->IfStmt.init, f);
+ n->IfStmt.cond = clone_ast(n->IfStmt.cond, f);
+ n->IfStmt.body = clone_ast(n->IfStmt.body, f);
+ n->IfStmt.else_stmt = clone_ast(n->IfStmt.else_stmt, f);
break;
case Ast_WhenStmt:
- n->WhenStmt.cond = clone_ast(n->WhenStmt.cond);
- n->WhenStmt.body = clone_ast(n->WhenStmt.body);
- n->WhenStmt.else_stmt = clone_ast(n->WhenStmt.else_stmt);
+ n->WhenStmt.cond = clone_ast(n->WhenStmt.cond, f);
+ n->WhenStmt.body = clone_ast(n->WhenStmt.body, f);
+ n->WhenStmt.else_stmt = clone_ast(n->WhenStmt.else_stmt, f);
break;
case Ast_ReturnStmt:
- n->ReturnStmt.results = clone_ast_array(n->ReturnStmt.results);
+ n->ReturnStmt.results = clone_ast_array(n->ReturnStmt.results, f);
break;
case Ast_ForStmt:
- n->ForStmt.label = clone_ast(n->ForStmt.label);
- n->ForStmt.init = clone_ast(n->ForStmt.init);
- n->ForStmt.cond = clone_ast(n->ForStmt.cond);
- n->ForStmt.post = clone_ast(n->ForStmt.post);
- n->ForStmt.body = clone_ast(n->ForStmt.body);
+ n->ForStmt.label = clone_ast(n->ForStmt.label, f);
+ n->ForStmt.init = clone_ast(n->ForStmt.init, f);
+ n->ForStmt.cond = clone_ast(n->ForStmt.cond, f);
+ n->ForStmt.post = clone_ast(n->ForStmt.post, f);
+ n->ForStmt.body = clone_ast(n->ForStmt.body, f);
break;
case Ast_RangeStmt:
- n->RangeStmt.label = clone_ast(n->RangeStmt.label);
- n->RangeStmt.vals = clone_ast_array(n->RangeStmt.vals);
- n->RangeStmt.expr = clone_ast(n->RangeStmt.expr);
- n->RangeStmt.body = clone_ast(n->RangeStmt.body);
+ n->RangeStmt.label = clone_ast(n->RangeStmt.label, f);
+ n->RangeStmt.vals = clone_ast_array(n->RangeStmt.vals, f);
+ n->RangeStmt.expr = clone_ast(n->RangeStmt.expr, f);
+ n->RangeStmt.body = clone_ast(n->RangeStmt.body, f);
break;
case Ast_UnrollRangeStmt:
- n->UnrollRangeStmt.val0 = clone_ast(n->UnrollRangeStmt.val0);
- n->UnrollRangeStmt.val1 = clone_ast(n->UnrollRangeStmt.val1);
- n->UnrollRangeStmt.expr = clone_ast(n->UnrollRangeStmt.expr);
- n->UnrollRangeStmt.body = clone_ast(n->UnrollRangeStmt.body);
+ n->UnrollRangeStmt.val0 = clone_ast(n->UnrollRangeStmt.val0, f);
+ n->UnrollRangeStmt.val1 = clone_ast(n->UnrollRangeStmt.val1, f);
+ n->UnrollRangeStmt.expr = clone_ast(n->UnrollRangeStmt.expr, f);
+ n->UnrollRangeStmt.body = clone_ast(n->UnrollRangeStmt.body, f);
break;
case Ast_CaseClause:
- n->CaseClause.list = clone_ast_array(n->CaseClause.list);
- n->CaseClause.stmts = clone_ast_array(n->CaseClause.stmts);
+ n->CaseClause.list = clone_ast_array(n->CaseClause.list, f);
+ n->CaseClause.stmts = clone_ast_array(n->CaseClause.stmts, f);
n->CaseClause.implicit_entity = nullptr;
break;
case Ast_SwitchStmt:
- n->SwitchStmt.label = clone_ast(n->SwitchStmt.label);
- n->SwitchStmt.init = clone_ast(n->SwitchStmt.init);
- n->SwitchStmt.tag = clone_ast(n->SwitchStmt.tag);
- n->SwitchStmt.body = clone_ast(n->SwitchStmt.body);
+ n->SwitchStmt.label = clone_ast(n->SwitchStmt.label, f);
+ n->SwitchStmt.init = clone_ast(n->SwitchStmt.init, f);
+ n->SwitchStmt.tag = clone_ast(n->SwitchStmt.tag, f);
+ n->SwitchStmt.body = clone_ast(n->SwitchStmt.body, f);
break;
case Ast_TypeSwitchStmt:
- n->TypeSwitchStmt.label = clone_ast(n->TypeSwitchStmt.label);
- n->TypeSwitchStmt.tag = clone_ast(n->TypeSwitchStmt.tag);
- n->TypeSwitchStmt.body = clone_ast(n->TypeSwitchStmt.body);
+ n->TypeSwitchStmt.label = clone_ast(n->TypeSwitchStmt.label, f);
+ n->TypeSwitchStmt.tag = clone_ast(n->TypeSwitchStmt.tag, f);
+ n->TypeSwitchStmt.body = clone_ast(n->TypeSwitchStmt.body, f);
break;
case Ast_DeferStmt:
- n->DeferStmt.stmt = clone_ast(n->DeferStmt.stmt);
+ n->DeferStmt.stmt = clone_ast(n->DeferStmt.stmt, f);
break;
case Ast_BranchStmt:
- n->BranchStmt.label = clone_ast(n->BranchStmt.label);
+ n->BranchStmt.label = clone_ast(n->BranchStmt.label, f);
break;
case Ast_UsingStmt:
- n->UsingStmt.list = clone_ast_array(n->UsingStmt.list);
+ n->UsingStmt.list = clone_ast_array(n->UsingStmt.list, f);
break;
case Ast_BadDecl: break;
case Ast_ForeignBlockDecl:
- n->ForeignBlockDecl.foreign_library = clone_ast(n->ForeignBlockDecl.foreign_library);
- n->ForeignBlockDecl.body = clone_ast(n->ForeignBlockDecl.body);
- n->ForeignBlockDecl.attributes = clone_ast_array(n->ForeignBlockDecl.attributes);
+ n->ForeignBlockDecl.foreign_library = clone_ast(n->ForeignBlockDecl.foreign_library, f);
+ n->ForeignBlockDecl.body = clone_ast(n->ForeignBlockDecl.body, f);
+ n->ForeignBlockDecl.attributes = clone_ast_array(n->ForeignBlockDecl.attributes, f);
break;
case Ast_Label:
- n->Label.name = clone_ast(n->Label.name);
+ n->Label.name = clone_ast(n->Label.name, f);
break;
case Ast_ValueDecl:
- n->ValueDecl.names = clone_ast_array(n->ValueDecl.names);
- n->ValueDecl.type = clone_ast(n->ValueDecl.type);
- n->ValueDecl.values = clone_ast_array(n->ValueDecl.values);
- n->ValueDecl.attributes = clone_ast_array(n->ValueDecl.attributes);
+ n->ValueDecl.names = clone_ast_array(n->ValueDecl.names, f);
+ n->ValueDecl.type = clone_ast(n->ValueDecl.type, f);
+ n->ValueDecl.values = clone_ast_array(n->ValueDecl.values, f);
+ n->ValueDecl.attributes = clone_ast_array(n->ValueDecl.attributes, f);
break;
case Ast_Attribute:
- n->Attribute.elems = clone_ast_array(n->Attribute.elems);
+ n->Attribute.elems = clone_ast_array(n->Attribute.elems, f);
break;
case Ast_Field:
- n->Field.names = clone_ast_array(n->Field.names);
- n->Field.type = clone_ast(n->Field.type);
+ n->Field.names = clone_ast_array(n->Field.names, f);
+ n->Field.type = clone_ast(n->Field.type, f);
break;
case Ast_FieldList:
- n->FieldList.list = clone_ast_array(n->FieldList.list);
+ n->FieldList.list = clone_ast_array(n->FieldList.list, f);
break;
case Ast_TypeidType:
- n->TypeidType.specialization = clone_ast(n->TypeidType.specialization);
+ n->TypeidType.specialization = clone_ast(n->TypeidType.specialization, f);
break;
case Ast_HelperType:
- n->HelperType.type = clone_ast(n->HelperType.type);
+ n->HelperType.type = clone_ast(n->HelperType.type, f);
break;
case Ast_DistinctType:
- n->DistinctType.type = clone_ast(n->DistinctType.type);
+ n->DistinctType.type = clone_ast(n->DistinctType.type, f);
break;
case Ast_ProcType:
- n->ProcType.params = clone_ast(n->ProcType.params);
- n->ProcType.results = clone_ast(n->ProcType.results);
+ n->ProcType.params = clone_ast(n->ProcType.params, f);
+ n->ProcType.results = clone_ast(n->ProcType.results, f);
break;
case Ast_RelativeType:
- n->RelativeType.tag = clone_ast(n->RelativeType.tag);
- n->RelativeType.type = clone_ast(n->RelativeType.type);
+ n->RelativeType.tag = clone_ast(n->RelativeType.tag, f);
+ n->RelativeType.type = clone_ast(n->RelativeType.type, f);
break;
case Ast_PointerType:
- n->PointerType.type = clone_ast(n->PointerType.type);
- n->PointerType.tag = clone_ast(n->PointerType.tag);
+ n->PointerType.type = clone_ast(n->PointerType.type, f);
+ n->PointerType.tag = clone_ast(n->PointerType.tag, f);
break;
case Ast_MultiPointerType:
- n->MultiPointerType.type = clone_ast(n->MultiPointerType.type);
+ n->MultiPointerType.type = clone_ast(n->MultiPointerType.type, f);
break;
case Ast_ArrayType:
- n->ArrayType.count = clone_ast(n->ArrayType.count);
- n->ArrayType.elem = clone_ast(n->ArrayType.elem);
- n->ArrayType.tag = clone_ast(n->ArrayType.tag);
+ n->ArrayType.count = clone_ast(n->ArrayType.count, f);
+ n->ArrayType.elem = clone_ast(n->ArrayType.elem, f);
+ n->ArrayType.tag = clone_ast(n->ArrayType.tag, f);
break;
case Ast_DynamicArrayType:
- n->DynamicArrayType.elem = clone_ast(n->DynamicArrayType.elem);
+ n->DynamicArrayType.elem = clone_ast(n->DynamicArrayType.elem, f);
break;
case Ast_StructType:
- n->StructType.fields = clone_ast_array(n->StructType.fields);
- n->StructType.polymorphic_params = clone_ast(n->StructType.polymorphic_params);
- n->StructType.align = clone_ast(n->StructType.align);
- n->StructType.where_clauses = clone_ast_array(n->StructType.where_clauses);
+ n->StructType.fields = clone_ast_array(n->StructType.fields, f);
+ n->StructType.polymorphic_params = clone_ast(n->StructType.polymorphic_params, f);
+ n->StructType.align = clone_ast(n->StructType.align, f);
+ n->StructType.where_clauses = clone_ast_array(n->StructType.where_clauses, f);
break;
case Ast_UnionType:
- n->UnionType.variants = clone_ast_array(n->UnionType.variants);
- n->UnionType.polymorphic_params = clone_ast(n->UnionType.polymorphic_params);
- n->UnionType.where_clauses = clone_ast_array(n->UnionType.where_clauses);
+ n->UnionType.variants = clone_ast_array(n->UnionType.variants, f);
+ n->UnionType.polymorphic_params = clone_ast(n->UnionType.polymorphic_params, f);
+ n->UnionType.where_clauses = clone_ast_array(n->UnionType.where_clauses, f);
break;
case Ast_EnumType:
- n->EnumType.base_type = clone_ast(n->EnumType.base_type);
- n->EnumType.fields = clone_ast_array(n->EnumType.fields);
+ n->EnumType.base_type = clone_ast(n->EnumType.base_type, f);
+ n->EnumType.fields = clone_ast_array(n->EnumType.fields, f);
break;
case Ast_BitSetType:
- n->BitSetType.elem = clone_ast(n->BitSetType.elem);
- n->BitSetType.underlying = clone_ast(n->BitSetType.underlying);
+ n->BitSetType.elem = clone_ast(n->BitSetType.elem, f);
+ n->BitSetType.underlying = clone_ast(n->BitSetType.underlying, f);
break;
case Ast_MapType:
- n->MapType.count = clone_ast(n->MapType.count);
- n->MapType.key = clone_ast(n->MapType.key);
- n->MapType.value = clone_ast(n->MapType.value);
+ n->MapType.count = clone_ast(n->MapType.count, f);
+ n->MapType.key = clone_ast(n->MapType.key, f);
+ n->MapType.value = clone_ast(n->MapType.value, f);
break;
case Ast_MatrixType:
- n->MatrixType.row_count = clone_ast(n->MatrixType.row_count);
- n->MatrixType.column_count = clone_ast(n->MatrixType.column_count);
- n->MatrixType.elem = clone_ast(n->MatrixType.elem);
+ n->MatrixType.row_count = clone_ast(n->MatrixType.row_count, f);
+ n->MatrixType.column_count = clone_ast(n->MatrixType.column_count, f);
+ n->MatrixType.elem = clone_ast(n->MatrixType.elem, f);
break;
}
@@ -1905,13 +1905,11 @@ gb_internal void check_polymorphic_params_for_type(AstFile *f, Ast *polymorphic_
return;
}
ast_node(fl, FieldList, polymorphic_params);
- for_array(fi, fl->list) {
- Ast *field = fl->list[fi];
+ for (Ast *field : fl->list) {
if (field->kind != Ast_Field) {
continue;
}
- for_array(i, field->Field.names) {
- Ast *name = field->Field.names[i];
+ for (Ast *name : field->Field.names) {
if (name->kind != field->Field.names[0]->kind) {
syntax_error(name, "Mixture of polymorphic names using both $ and not for %.*s parameters", LIT(token.string));
return;
@@ -3473,16 +3471,14 @@ gb_internal Ast *parse_proc_type(AstFile *f, Token proc_token) {
u64 tags = 0;
bool is_generic = false;
- for_array(i, params->FieldList.list) {
- Ast *param = params->FieldList.list[i];
+ for (Ast *param : params->FieldList.list) {
ast_node(field, Field, param);
if (field->type != nullptr) {
if (field->type->kind == Ast_PolyType) {
is_generic = true;
goto end;
}
- for_array(j, field->names) {
- Ast *name = field->names[j];
+ for (Ast *name : field->names) {
if (name->kind == Ast_PolyType) {
is_generic = true;
goto end;
@@ -3646,8 +3642,9 @@ struct AstAndFlags {
gb_internal Array<Ast *> convert_to_ident_list(AstFile *f, Array<AstAndFlags> list, bool ignore_flags, bool allow_poly_names) {
auto idents = array_make<Ast *>(heap_allocator(), 0, list.count);
// Convert to ident list
- for_array(i, list) {
- Ast *ident = list[i].node;
+ isize i = 0;
+ for (AstAndFlags const &item : list) {
+ Ast *ident = item.node;
if (!ignore_flags) {
if (i != 0) {
@@ -3678,6 +3675,7 @@ gb_internal Array<Ast *> convert_to_ident_list(AstFile *f, Array<AstAndFlags> li
break;
}
array_add(&idents, ident);
+ i += 1;
}
return idents;
}
@@ -3919,8 +3917,8 @@ gb_internal Ast *parse_field_list(AstFile *f, isize *name_count_, u32 allowed_fl
return ast_field_list(f, start_token, params);
}
- for_array(i, list) {
- Ast *type = list[i].node;
+ for (AstAndFlags const &item : list) {
+ Ast *type = item.node;
Token token = blank_token;
if (allowed_flags&FieldFlag_Results) {
// NOTE(bill): Make this nothing and not `_`
@@ -3930,9 +3928,9 @@ gb_internal Ast *parse_field_list(AstFile *f, isize *name_count_, u32 allowed_fl
auto names = array_make<Ast *>(heap_allocator(), 1);
token.pos = ast_token(type).pos;
names[0] = ast_ident(f, token);
- u32 flags = check_field_prefixes(f, list.count, allowed_flags, list[i].flags);
+ u32 flags = check_field_prefixes(f, list.count, allowed_flags, item.flags);
Token tag = {};
- Ast *param = ast_field(f, names, list[i].node, nullptr, flags, tag, docs, f->line_comment);
+ Ast *param = ast_field(f, names, item.node, nullptr, flags, tag, docs, f->line_comment);
array_add(&params, param);
}
@@ -4856,40 +4854,31 @@ gb_internal void destroy_ast_file(AstFile *f) {
gb_internal bool init_parser(Parser *p) {
GB_ASSERT(p != nullptr);
- string_set_init(&p->imported_files, heap_allocator());
+ string_set_init(&p->imported_files);
array_init(&p->packages, heap_allocator());
- mutex_init(&p->imported_files_mutex);
- mutex_init(&p->file_decl_mutex);
- mutex_init(&p->packages_mutex);
- mutex_init(&p->file_error_mutex);
return true;
}
gb_internal void destroy_parser(Parser *p) {
GB_ASSERT(p != nullptr);
// TODO(bill): Fix memory leak
- for_array(i, p->packages) {
- AstPackage *pkg = p->packages[i];
- for_array(j, pkg->files) {
- destroy_ast_file(pkg->files[j]);
+ for (AstPackage *pkg : p->packages) {
+ for (AstFile *file : pkg->files) {
+ destroy_ast_file(file);
}
array_free(&pkg->files);
array_free(&pkg->foreign_files);
}
array_free(&p->packages);
string_set_destroy(&p->imported_files);
- mutex_destroy(&p->imported_files_mutex);
- mutex_destroy(&p->file_decl_mutex);
- mutex_destroy(&p->packages_mutex);
- mutex_destroy(&p->file_error_mutex);
}
gb_internal void parser_add_package(Parser *p, AstPackage *pkg) {
- mutex_lock(&p->packages_mutex);
- pkg->id = p->packages.count+1;
- array_add(&p->packages, pkg);
- mutex_unlock(&p->packages_mutex);
+ MUTEX_GUARD_BLOCK(&p->packages_mutex) {
+ pkg->id = p->packages.count+1;
+ array_add(&p->packages, pkg);
+ }
}
gb_internal ParseFileError process_imported_file(Parser *p, ImportedFile imported_file);
@@ -4901,15 +4890,15 @@ gb_internal WORKER_TASK_PROC(parser_worker_proc) {
auto *node = gb_alloc_item(permanent_allocator(), ParseFileErrorNode);
node->err = err;
- mutex_lock(&wd->parser->file_error_mutex);
- if (wd->parser->file_error_tail != nullptr) {
- wd->parser->file_error_tail->next = node;
- }
- wd->parser->file_error_tail = node;
- if (wd->parser->file_error_head == nullptr) {
- wd->parser->file_error_head = node;
+ MUTEX_GUARD_BLOCK(&wd->parser->file_error_mutex) {
+ if (wd->parser->file_error_tail != nullptr) {
+ wd->parser->file_error_tail->next = node;
+ }
+ wd->parser->file_error_tail = node;
+ if (wd->parser->file_error_head == nullptr) {
+ wd->parser->file_error_head = node;
+ }
}
- mutex_unlock(&wd->parser->file_error_mutex);
}
return cast(isize)err;
}
@@ -4921,7 +4910,7 @@ gb_internal void parser_add_file_to_process(Parser *p, AstPackage *pkg, FileInfo
auto wd = gb_alloc_item(permanent_allocator(), ParserWorkerData);
wd->parser = p;
wd->imported_file = f;
- global_thread_pool_add_task(parser_worker_proc, wd);
+ thread_pool_add_task(parser_worker_proc, wd);
}
gb_internal WORKER_TASK_PROC(foreign_file_worker_proc) {
@@ -4945,9 +4934,9 @@ gb_internal WORKER_TASK_PROC(foreign_file_worker_proc) {
// TODO(bill): Actually do something with it
break;
}
- mutex_lock(&pkg->foreign_files_mutex);
- array_add(&pkg->foreign_files, foreign_file);
- mutex_unlock(&pkg->foreign_files_mutex);
+ MUTEX_GUARD_BLOCK(&pkg->foreign_files_mutex) {
+ array_add(&pkg->foreign_files, foreign_file);
+ }
return 0;
}
@@ -4959,7 +4948,7 @@ gb_internal void parser_add_foreign_file_to_process(Parser *p, AstPackage *pkg,
wd->parser = p;
wd->imported_file = f;
wd->foreign_kind = kind;
- global_thread_pool_add_task(foreign_file_worker_proc, wd);
+ thread_pool_add_task(foreign_file_worker_proc, wd);
}
@@ -4978,19 +4967,16 @@ gb_internal AstPackage *try_add_import_path(Parser *p, String const &path, Strin
pkg->fullpath = path;
array_init(&pkg->files, heap_allocator());
pkg->foreign_files.allocator = heap_allocator();
- mutex_init(&pkg->files_mutex);
- mutex_init(&pkg->foreign_files_mutex);
-
// NOTE(bill): Single file initial package
if (kind == Package_Init && string_ends_with(path, FILE_EXT)) {
-
FileInfo fi = {};
fi.name = filename_from_path(path);
fi.fullpath = path;
fi.size = get_file_size(path);
fi.is_dir = false;
+ array_reserve(&pkg->files, 1);
pkg->is_single_file = true;
parser_add_package(p, pkg);
parser_add_file_to_process(p, pkg, fi, pos);
@@ -5028,8 +5014,17 @@ gb_internal AstPackage *try_add_import_path(Parser *p, String const &path, Strin
return nullptr;
}
- for_array(list_index, list) {
- FileInfo fi = list[list_index];
+ isize files_to_reserve = 1; // always reserve 1
+ for (FileInfo fi : list) {
+ String name = fi.name;
+ String ext = path_extension(name);
+ if (ext == FILE_EXT && !is_excluded_target_filename(name)) {
+ files_to_reserve += 1;
+ }
+ }
+
+ array_reserve(&pkg->files, files_to_reserve);
+ for (FileInfo fi : list) {
String name = fi.name;
String ext = path_extension(name);
if (ext == FILE_EXT) {
@@ -5322,14 +5317,14 @@ gb_internal void parse_setup_file_decls(Parser *p, AstFile *f, String const &bas
auto fullpaths = array_make<String>(permanent_allocator(), 0, fl->filepaths.count);
- for_array(fp_idx, fl->filepaths) {
- String file_str = string_trim_whitespace(string_value_from_token(f, fl->filepaths[fp_idx]));
+ for (Token const &fp : fl->filepaths) {
+ String file_str = string_trim_whitespace(string_value_from_token(f, fp));
String fullpath = file_str;
if (allow_check_foreign_filepath()) {
String foreign_path = {};
bool ok = determine_path_from_string(&p->file_decl_mutex, node, base_dir, file_str, &foreign_path);
if (!ok) {
- decls[i] = ast_bad_decl(f, fl->filepaths[fp_idx], fl->filepaths[fl->filepaths.count-1]);
+ decls[i] = ast_bad_decl(f, fp, fl->filepaths[fl->filepaths.count-1]);
goto end;
}
fullpath = foreign_path;
@@ -5454,8 +5449,8 @@ gb_internal isize calc_decl_count(Ast *decl) {
isize count = 0;
switch (decl->kind) {
case Ast_BlockStmt:
- for_array(i, decl->BlockStmt.stmts) {
- count += calc_decl_count(decl->BlockStmt.stmts.data[i]);
+ for (Ast *stmt : decl->BlockStmt.stmts) {
+ count += calc_decl_count(stmt);
}
break;
case Ast_WhenStmt:
@@ -5575,8 +5570,8 @@ gb_internal bool parse_file(Parser *p, AstFile *f) {
f->package_name = package_name.string;
if (!f->pkg->is_single_file && docs != nullptr && docs->list.count > 0) {
- for_array(i, docs->list) {
- Token tok = docs->list[i]; GB_ASSERT(tok.kind == Token_Comment);
+ for (Token const &tok : docs->list) {
+ GB_ASSERT(tok.kind == Token_Comment);
String str = tok.string;
if (string_starts_with(str, str_lit("//"))) {
String lc = string_trim_whitespace(substring(str, 2, str.len));
@@ -5589,6 +5584,8 @@ gb_internal bool parse_file(Parser *p, AstFile *f) {
if (!parse_build_tag(tok, lc)) {
return false;
}
+ } else if (string_starts_with(lc, str_lit("+ignore"))) {
+ return false;
} else if (string_starts_with(lc, str_lit("+private"))) {
f->flags |= AstFile_IsPrivatePkg;
String command = string_trim_starts_with(lc, str_lit("+private "));
@@ -5787,8 +5784,7 @@ gb_internal ParseFileError parse_packages(Parser *p, String init_filename) {
}
- for_array(i, build_context.extra_packages) {
- String path = build_context.extra_packages[i];
+ for (String const &path : build_context.extra_packages) {
String fullpath = path_to_full_path(heap_allocator(), path); // LEAK?
if (!path_is_directory(fullpath)) {
String const ext = str_lit(".odin");
@@ -5804,7 +5800,7 @@ gb_internal ParseFileError parse_packages(Parser *p, String init_filename) {
}
}
- global_thread_pool_wait();
+ thread_pool_wait();
for (ParseFileErrorNode *node = p->file_error_head; node != nullptr; node = node->next) {
if (node->err != ParseFile_None) {
diff --git a/src/parser.hpp b/src/parser.hpp
index c33739ebe..d81194831 100644
--- a/src/parser.hpp
+++ b/src/parser.hpp
@@ -174,6 +174,7 @@ struct AstPackage {
BlockingMutex files_mutex;
BlockingMutex foreign_files_mutex;
+ BlockingMutex type_and_value_mutex;
MPMCQueue<AstPackageExportedEntity> exported_entity_queue;
@@ -820,9 +821,8 @@ gb_internal gb_inline bool is_ast_when_stmt(Ast *node) {
gb_global gb_thread_local Arena global_thread_local_ast_arena = {};
-gb_internal gbAllocator ast_allocator(AstFile *f) {
- Arena *arena = &global_thread_local_ast_arena;
- return arena_allocator(arena);
+gb_internal gb_inline gbAllocator ast_allocator(AstFile *f) {
+ return arena_allocator(&global_thread_local_ast_arena);
}
gb_internal Ast *alloc_ast_node(AstFile *f, AstKind kind);
diff --git a/src/path.cpp b/src/path.cpp
index ad1d7f0af..3b359a269 100644
--- a/src/path.cpp
+++ b/src/path.cpp
@@ -225,7 +225,6 @@ gb_internal i64 get_file_size(String path) {
gb_internal ReadDirectoryError read_directory(String path, Array<FileInfo> *fi) {
GB_ASSERT(fi != nullptr);
- gbAllocator a = heap_allocator();
while (path.len > 0) {
Rune end = path[path.len-1];
@@ -242,9 +241,7 @@ gb_internal ReadDirectoryError read_directory(String path, Array<FileInfo> *fi)
return ReadDirectory_InvalidPath;
}
{
- char *c_str = alloc_cstring(a, path);
- defer (gb_free(a, c_str));
-
+ char *c_str = alloc_cstring(temporary_allocator(), path);
gbFile f = {};
gbFileError file_err = gb_file_open(&f, c_str);
defer (gb_file_close(&f));
@@ -261,6 +258,7 @@ gb_internal ReadDirectoryError read_directory(String path, Array<FileInfo> *fi)
}
+ gbAllocator a = heap_allocator();
char *new_path = gb_alloc_array(a, char, path.len+3);
defer (gb_free(a, new_path));
@@ -283,8 +281,8 @@ gb_internal ReadDirectoryError read_directory(String path, Array<FileInfo> *fi)
do {
wchar_t *filename_w = file_data.cFileName;
- i64 size = cast(i64)file_data.nFileSizeLow;
- size |= (cast(i64)file_data.nFileSizeHigh) << 32;
+ u64 size = cast(u64)file_data.nFileSizeLow;
+ size |= (cast(u64)file_data.nFileSizeHigh) << 32;
String name = string16_to_string(a, make_string16_c(filename_w));
if (name == "." || name == "..") {
gb_free(a, name.text);
@@ -302,7 +300,7 @@ gb_internal ReadDirectoryError read_directory(String path, Array<FileInfo> *fi)
FileInfo info = {};
info.name = name;
info.fullpath = path_to_full_path(a, filepath);
- info.size = size;
+ info.size = cast(i64)size;
info.is_dir = (file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0;
array_add(fi, info);
} while (FindNextFileW(find_file, &file_data));
diff --git a/src/ptr_map.cpp b/src/ptr_map.cpp
index 434680e91..598904906 100644
--- a/src/ptr_map.cpp
+++ b/src/ptr_map.cpp
@@ -27,6 +27,7 @@ struct PtrMap {
gb_internal gb_inline u32 ptr_map_hash_key(uintptr key) {
+ u32 res;
#if defined(GB_ARCH_64_BIT)
key = (~key) + (key << 21);
key = key ^ (key >> 24);
@@ -34,22 +35,24 @@ gb_internal gb_inline u32 ptr_map_hash_key(uintptr key) {
key = key ^ (key >> 14);
key = (key + (key << 2)) + (key << 4);
key = key ^ (key << 28);
- return cast(u32)key;
+ res = cast(u32)key;
#elif defined(GB_ARCH_32_BIT)
u32 state = ((u32)key) * 747796405u + 2891336453u;
u32 word = ((state >> ((state >> 28u) + 4u)) ^ state) * 277803737u;
- return (word >> 22u) ^ word;
+ res = (word >> 22u) ^ word;
#endif
+ return res;
}
gb_internal gb_inline u32 ptr_map_hash_key(void const *key) {
return ptr_map_hash_key((uintptr)key);
}
-template <typename K, typename V> gb_internal void map_init (PtrMap<K, V> *h, gbAllocator a, isize capacity = 16);
+template <typename K, typename V> gb_internal void map_init (PtrMap<K, V> *h, isize capacity = 16);
template <typename K, typename V> gb_internal void map_destroy (PtrMap<K, V> *h);
template <typename K, typename V> gb_internal V * map_get (PtrMap<K, V> *h, K key);
template <typename K, typename V> gb_internal void map_set (PtrMap<K, V> *h, K key, V const &value);
+template <typename K, typename V> gb_internal bool map_set_if_not_previously_exists(PtrMap<K, V> *h, K key, V const &value); // returns true if it previously existed
template <typename K, typename V> gb_internal void map_remove (PtrMap<K, V> *h, K key);
template <typename K, typename V> gb_internal void map_clear (PtrMap<K, V> *h);
template <typename K, typename V> gb_internal void map_grow (PtrMap<K, V> *h);
@@ -68,11 +71,15 @@ template <typename K, typename V> gb_internal void multi_map_remove (PtrMap<
template <typename K, typename V> gb_internal void multi_map_remove_all(PtrMap<K, V> *h, K key);
#endif
+gb_internal gbAllocator map_allocator(void) {
+ return heap_allocator();
+}
+
template <typename K, typename V>
-gb_internal gb_inline void map_init(PtrMap<K, V> *h, gbAllocator a, isize capacity) {
+gb_internal gb_inline void map_init(PtrMap<K, V> *h, isize capacity) {
capacity = next_pow2_isize(capacity);
- slice_init(&h->hashes, a, capacity);
- array_init(&h->entries, a, 0, capacity);
+ slice_init(&h->hashes, map_allocator(), capacity);
+ array_init(&h->entries, map_allocator(), 0, capacity);
for (isize i = 0; i < capacity; i++) {
h->hashes.data[i] = MAP_SENTINEL;
}
@@ -80,6 +87,9 @@ gb_internal gb_inline void map_init(PtrMap<K, V> *h, gbAllocator a, isize capaci
template <typename K, typename V>
gb_internal gb_inline void map_destroy(PtrMap<K, V> *h) {
+ if (h->entries.allocator.proc == nullptr) {
+ h->entries.allocator = map_allocator();
+ }
slice_free(&h->hashes, h->entries.allocator);
array_free(&h->entries);
}
@@ -103,11 +113,12 @@ gb_internal MapFindResult map__find(PtrMap<K, V> *h, K key) {
fr.hash_index = cast(MapIndex)(hash & (h->hashes.count-1));
fr.entry_index = h->hashes.data[fr.hash_index];
while (fr.entry_index != MAP_SENTINEL) {
- if (h->entries.data[fr.entry_index].key == key) {
+ auto *entry = &h->entries.data[fr.entry_index];
+ if (entry->key == key) {
return fr;
}
fr.entry_prev = fr.entry_index;
- fr.entry_index = h->entries.data[fr.entry_index].next;
+ fr.entry_index = entry->next;
}
return fr;
}
@@ -162,6 +173,9 @@ gb_internal void map_reset_entries(PtrMap<K, V> *h) {
template <typename K, typename V>
gb_internal void map_reserve(PtrMap<K, V> *h, isize cap) {
+ if (h->entries.allocator.proc == nullptr) {
+ h->entries.allocator = map_allocator();
+ }
array_reserve(&h->entries, cap);
if (h->entries.count*2 < h->hashes.count) {
return;
@@ -178,18 +192,64 @@ gb_internal void map_rehash(PtrMap<K, V> *h, isize new_count) {
template <typename K, typename V>
gb_internal V *map_get(PtrMap<K, V> *h, K key) {
- MapIndex index = map__find(h, key).entry_index;
- if (index != MAP_SENTINEL) {
- return &h->entries.data[index].value;
+ MapIndex hash_index = MAP_SENTINEL;
+ MapIndex entry_prev = MAP_SENTINEL;
+ MapIndex entry_index = MAP_SENTINEL;
+ if (h->hashes.count != 0) {
+ u32 hash = ptr_map_hash_key(key);
+ hash_index = cast(MapIndex)(hash & (h->hashes.count-1));
+ entry_index = h->hashes.data[hash_index];
+ while (entry_index != MAP_SENTINEL) {
+ auto *entry = &h->entries.data[entry_index];
+ if (entry->key == key) {
+ return &entry->value;
+ }
+ entry_prev = entry_index;
+ entry_index = entry->next;
+ }
+ }
+ return nullptr;
+}
+template <typename K, typename V>
+gb_internal V *map_try_get(PtrMap<K, V> *h, K key, MapFindResult *fr_) {
+ MapFindResult fr = {MAP_SENTINEL, MAP_SENTINEL, MAP_SENTINEL};
+ if (h->hashes.count != 0) {
+ u32 hash = ptr_map_hash_key(key);
+ fr.hash_index = cast(MapIndex)(hash & (h->hashes.count-1));
+ fr.entry_index = h->hashes.data[fr.hash_index];
+ while (fr.entry_index != MAP_SENTINEL) {
+ auto *entry = &h->entries.data[fr.entry_index];
+ if (entry->key == key) {
+ return &entry->value;
+ }
+ fr.entry_prev = fr.entry_index;
+ fr.entry_index = entry->next;
+ }
+ }
+ if (h->hashes.count == 0 || map__full(h)) {
+ map_grow(h);
}
+ if (fr_) *fr_ = fr;
return nullptr;
}
+
+template <typename K, typename V>
+gb_internal void map_set_internal_from_try_get(PtrMap<K, V> *h, K key, V const &value, MapFindResult const &fr) {
+ MapIndex index = map__add_entry(h, key);
+ if (fr.entry_prev != MAP_SENTINEL) {
+ h->entries.data[fr.entry_prev].next = index;
+ } else {
+ h->hashes.data[fr.hash_index] = index;
+ }
+ h->entries.data[index].value = value;
+}
+
template <typename K, typename V>
gb_internal V &map_must_get(PtrMap<K, V> *h, K key) {
- MapIndex index = map__find(h, key).entry_index;
- GB_ASSERT(index != MAP_SENTINEL);
- return h->entries.data[index].value;
+ V *ptr = map_get(h, key);
+ GB_ASSERT(ptr != nullptr);
+ return *ptr;
}
template <typename K, typename V>
@@ -217,6 +277,33 @@ gb_internal void map_set(PtrMap<K, V> *h, K key, V const &value) {
}
}
+// returns true if it previously existed
+template <typename K, typename V>
+gb_internal bool map_set_if_not_previously_exists(PtrMap<K, V> *h, K key, V const &value) {
+ MapIndex index;
+ MapFindResult fr;
+ if (h->hashes.count == 0) {
+ map_grow(h);
+ }
+ fr = map__find(h, key);
+ if (fr.entry_index != MAP_SENTINEL) {
+ return true;
+ } else {
+ index = map__add_entry(h, key);
+ if (fr.entry_prev != MAP_SENTINEL) {
+ h->entries.data[fr.entry_prev].next = index;
+ } else {
+ h->hashes.data[fr.hash_index] = index;
+ }
+ }
+ h->entries.data[index].value = value;
+
+ if (map__full(h)) {
+ map_grow(h);
+ }
+ return false;
+}
+
template <typename K, typename V>
gb_internal void map__erase(PtrMap<K, V> *h, MapFindResult const &fr) {
diff --git a/src/ptr_set.cpp b/src/ptr_set.cpp
index 9ecf1043e..019ede8a5 100644
--- a/src/ptr_set.cpp
+++ b/src/ptr_set.cpp
@@ -1,256 +1,215 @@
template <typename T>
-struct PtrSetEntry {
- T ptr;
- MapIndex next;
+struct TypeIsPointer {
+ enum {value = false};
};
template <typename T>
+struct TypeIsPointer<T *> {
+ enum {value = true};
+};
+
+
+template <typename T>
struct PtrSet {
- Slice<MapIndex> hashes;
- Array<PtrSetEntry<T>> entries;
+ static_assert(TypeIsPointer<T>::value, "PtrSet::T must be a pointer");
+ static constexpr uintptr TOMBSTONE = ~(uintptr)(0ull);
+
+ T * keys;
+ usize count;
+ usize capacity;
};
-template <typename T> gb_internal void ptr_set_init (PtrSet<T> *s, gbAllocator a, isize capacity = 16);
+template <typename T> gb_internal void ptr_set_init (PtrSet<T> *s, isize capacity = 16);
template <typename T> gb_internal void ptr_set_destroy(PtrSet<T> *s);
template <typename T> gb_internal T ptr_set_add (PtrSet<T> *s, T ptr);
template <typename T> gb_internal bool ptr_set_update (PtrSet<T> *s, T ptr); // returns true if it previously existed
template <typename T> gb_internal bool ptr_set_exists (PtrSet<T> *s, T ptr);
template <typename T> gb_internal void ptr_set_remove (PtrSet<T> *s, T ptr);
template <typename T> gb_internal void ptr_set_clear (PtrSet<T> *s);
-template <typename T> gb_internal void ptr_set_grow (PtrSet<T> *s);
-template <typename T> gb_internal void ptr_set_rehash (PtrSet<T> *s, isize new_count);
-template <typename T> gb_internal void ptr_set_reserve(PtrSet<T> *h, isize cap);
+gb_internal gbAllocator ptr_set_allocator(void) {
+ return heap_allocator();
+}
template <typename T>
-gb_internal void ptr_set_init(PtrSet<T> *s, gbAllocator a, isize capacity) {
+gb_internal void ptr_set_init(PtrSet<T> *s, isize capacity) {
+ GB_ASSERT(s->keys == nullptr);
if (capacity != 0) {
capacity = next_pow2_isize(gb_max(16, capacity));
+ s->keys = gb_alloc_array(ptr_set_allocator(), T, capacity);
+ // This memory will be zeroed, no need to explicitly zero it
}
-
- slice_init(&s->hashes, a, capacity);
- array_init(&s->entries, a, 0, capacity);
- for (isize i = 0; i < capacity; i++) {
- s->hashes.data[i] = MAP_SENTINEL;
- }
+ s->count = 0;
+ s->capacity = capacity;
}
template <typename T>
gb_internal void ptr_set_destroy(PtrSet<T> *s) {
- slice_free(&s->hashes, s->entries.allocator);
- array_free(&s->entries);
+ gb_free(ptr_set_allocator(), s->keys);
+ s->keys = nullptr;
+ s->count = 0;
+ s->capacity = 0;
}
template <typename T>
-gb_internal MapIndex ptr_set__add_entry(PtrSet<T> *s, T ptr) {
- PtrSetEntry<T> e = {};
- e.ptr = ptr;
- e.next = MAP_SENTINEL;
- array_add(&s->entries, e);
- return cast(MapIndex)(s->entries.count-1);
-}
-
-
-template <typename T>
-gb_internal MapFindResult ptr_set__find(PtrSet<T> *s, T ptr) {
- MapFindResult fr = {MAP_SENTINEL, MAP_SENTINEL, MAP_SENTINEL};
- if (s->hashes.count != 0) {
- u32 hash = ptr_map_hash_key(ptr);
- fr.hash_index = cast(MapIndex)(hash & (s->hashes.count-1));
- fr.entry_index = s->hashes.data[fr.hash_index];
- while (fr.entry_index != MAP_SENTINEL) {
- if (s->entries.data[fr.entry_index].ptr == ptr) {
- return fr;
+gb_internal isize ptr_set__find(PtrSet<T> *s, T ptr) {
+ GB_ASSERT(ptr != nullptr);
+ if (s->count != 0) {
+ #if 0
+ for (usize i = 0; i < s->capacity; i++) {
+ if (s->keys[i] == ptr) {
+ return i;
}
- fr.entry_prev = fr.entry_index;
- fr.entry_index = s->entries.data[fr.entry_index].next;
}
- }
- return fr;
-}
-
-template <typename T>
-gb_internal MapFindResult ptr_set__find_from_entry(PtrSet<T> *s, PtrSetEntry<T> *e) {
- MapFindResult fr = {MAP_SENTINEL, MAP_SENTINEL, MAP_SENTINEL};
- if (s->hashes.count != 0) {
- u32 hash = ptr_map_hash_key(e->ptr);
- fr.hash_index = cast(MapIndex)(hash & (s->hashes.count-1));
- fr.entry_index = s->hashes.data[fr.hash_index];
- while (fr.entry_index != MAP_SENTINEL) {
- if (&s->entries.data[fr.entry_index] == e) {
- return fr;
+ #else
+ u32 hash = ptr_map_hash_key(ptr);
+ usize mask = s->capacity-1;
+ usize hash_index = cast(usize)hash & mask;
+ for (usize i = 0; i < s->capacity; i++) {
+ T key = s->keys[hash_index];
+ if (key == ptr) {
+ return hash_index;
+ } else if (key == nullptr) {
+ return -1;
}
- fr.entry_prev = fr.entry_index;
- fr.entry_index = s->entries.data[fr.entry_index].next;
+ hash_index = (hash_index+1)&mask;
}
+ #endif
}
- return fr;
+ return -1;
}
template <typename T>
gb_internal bool ptr_set__full(PtrSet<T> *s) {
- return 0.75f * s->hashes.count <= s->entries.count;
+ return 0.75f * s->capacity <= s->count;
}
template <typename T>
-gb_internal gb_inline void ptr_set_grow(PtrSet<T> *s) {
- isize new_count = gb_max(s->hashes.count<<1, 16);
- ptr_set_rehash(s, new_count);
-}
-
-template <typename T>
-gb_internal void ptr_set_reset_entries(PtrSet<T> *s) {
- for (isize i = 0; i < s->hashes.count; i++) {
- s->hashes.data[i] = MAP_SENTINEL;
- }
- for (isize i = 0; i < s->entries.count; i++) {
- MapFindResult fr;
- PtrSetEntry<T> *e = &s->entries.data[i];
- e->next = MAP_SENTINEL;
- fr = ptr_set__find_from_entry(s, e);
- if (fr.entry_prev == MAP_SENTINEL) {
- s->hashes[fr.hash_index] = cast(MapIndex)i;
- } else {
- s->entries[fr.entry_prev].next = cast(MapIndex)i;
- }
+gb_internal gb_inline void ptr_set_grow(PtrSet<T> *old_set) {
+ if (old_set->capacity == 0) {
+ ptr_set_init(old_set);
+ return;
}
-}
-template <typename T>
-gb_internal void ptr_set_reserve(PtrSet<T> *s, isize cap) {
- array_reserve(&s->entries, cap);
- if (s->entries.count*2 < s->hashes.count) {
- return;
+ PtrSet<T> new_set = {};
+ ptr_set_init(&new_set, gb_max(old_set->capacity<<1, 16));
+
+ for (T ptr : *old_set) {
+ bool was_new = ptr_set_update(&new_set, ptr);
+ GB_ASSERT(!was_new);
}
- slice_resize(&s->hashes, s->entries.allocator, cap*2);
- ptr_set_reset_entries(s);
-}
+ GB_ASSERT(old_set->count == new_set.count);
+ ptr_set_destroy(old_set);
-template <typename T>
-gb_internal void ptr_set_rehash(PtrSet<T> *s, isize new_count) {
- ptr_set_reserve(s, new_count);
+ *old_set = new_set;
}
+
template <typename T>
gb_internal gb_inline bool ptr_set_exists(PtrSet<T> *s, T ptr) {
- isize index = ptr_set__find(s, ptr).entry_index;
- return index != MAP_SENTINEL;
+ return ptr_set__find(s, ptr) >= 0;
}
+
template <typename T>
-gb_internal gb_inline isize ptr_entry_index(PtrSet<T> *s, T ptr) {
- isize index = ptr_set__find(s, ptr).entry_index;
- if (index != MAP_SENTINEL) {
- return index;
+gb_internal bool ptr_set_update(PtrSet<T> *s, T ptr) { // returns true if it previously existsed
+ if (ptr_set_exists(s, ptr)) {
+ return true;
}
- return -1;
-}
-// Returns true if it already exists
-template <typename T>
-gb_internal T ptr_set_add(PtrSet<T> *s, T ptr) {
- MapIndex index;
- MapFindResult fr;
- if (s->hashes.count == 0) {
+ if (s->keys == nullptr) {
+ ptr_set_init(s);
+ } else if (ptr_set__full(s)) {
ptr_set_grow(s);
}
- fr = ptr_set__find(s, ptr);
- if (fr.entry_index == MAP_SENTINEL) {
- index = ptr_set__add_entry(s, ptr);
- if (fr.entry_prev != MAP_SENTINEL) {
- s->entries.data[fr.entry_prev].next = index;
- } else {
- s->hashes.data[fr.hash_index] = index;
+ GB_ASSERT(s->count < s->capacity);
+ GB_ASSERT(s->capacity >= 0);
+
+ usize mask = s->capacity-1;
+ u32 hash = ptr_map_hash_key(ptr);
+ usize hash_index = (cast(usize)hash) & mask;
+ GB_ASSERT(hash_index < s->capacity);
+ for (usize i = 0; i < s->capacity; i++) {
+ T *key = &s->keys[hash_index];
+ GB_ASSERT(*key != ptr);
+ if (*key == (T)PtrSet<T>::TOMBSTONE || *key == nullptr) {
+ *key = ptr;
+ s->count++;
+ return false;
}
+ hash_index = (hash_index+1)&mask;
}
- if (ptr_set__full(s)) {
- ptr_set_grow(s);
- }
- return ptr;
+
+ GB_PANIC("ptr set out of memory");
+ return false;
}
template <typename T>
-gb_internal bool ptr_set_update(PtrSet<T> *s, T ptr) { // returns true if it previously existsed
- bool exists = false;
- MapIndex index;
- MapFindResult fr;
- if (s->hashes.count == 0) {
- ptr_set_grow(s);
- }
- fr = ptr_set__find(s, ptr);
- if (fr.entry_index != MAP_SENTINEL) {
- exists = true;
- } else {
- index = ptr_set__add_entry(s, ptr);
- if (fr.entry_prev != MAP_SENTINEL) {
- s->entries.data[fr.entry_prev].next = index;
- } else {
- s->hashes.data[fr.hash_index] = index;
- }
- }
- if (ptr_set__full(s)) {
- ptr_set_grow(s);
- }
- return exists;
+gb_internal T ptr_set_add(PtrSet<T> *s, T ptr) {
+ ptr_set_update(s, ptr);
+ return ptr;
}
-
template <typename T>
-gb_internal void ptr_set__erase(PtrSet<T> *s, MapFindResult fr) {
- MapFindResult last;
- if (fr.entry_prev == MAP_SENTINEL) {
- s->hashes.data[fr.hash_index] = s->entries.data[fr.entry_index].next;
- } else {
- s->entries.data[fr.entry_prev].next = s->entries.data[fr.entry_index].next;
- }
- if (cast(isize)fr.entry_index == s->entries.count-1) {
- array_pop(&s->entries);
- return;
- }
- s->entries.data[fr.entry_index] = s->entries.data[s->entries.count-1];
- last = ptr_set__find(s, s->entries.data[fr.entry_index].ptr);
- if (last.entry_prev != MAP_SENTINEL) {
- s->entries.data[last.entry_prev].next = fr.entry_index;
- } else {
- s->hashes.data[last.hash_index] = fr.entry_index;
+gb_internal void ptr_set_remove(PtrSet<T> *s, T ptr) {
+ isize index = ptr_set__find(s, ptr);
+ if (index >= 0) {
+ GB_ASSERT(s->count > 0);
+ s->keys[index] = (T)PtrSet<T>::TOMBSTONE;
+ s->count--;
}
}
template <typename T>
-gb_internal void ptr_set_remove(PtrSet<T> *s, T ptr) {
- MapFindResult fr = ptr_set__find(s, ptr);
- if (fr.entry_index != MAP_SENTINEL) {
- ptr_set__erase(s, fr);
- }
+gb_internal gb_inline void ptr_set_clear(PtrSet<T> *s) {
+ s->count = 0;
+ gb_zero_size(s->keys, s->capacity*gb_size_of(T));
}
template <typename T>
-gb_internal gb_inline void ptr_set_clear(PtrSet<T> *s) {
- array_clear(&s->entries);
- for (isize i = 0; i < s->hashes.count; i++) {
- s->hashes.data[i] = MAP_SENTINEL;
+struct PtrSetIterator {
+ PtrSet<T> *set;
+ usize index;
+
+ PtrSetIterator<T> &operator++() noexcept {
+ for (;;) {
+ ++index;
+ if (set->capacity == index) {
+ return *this;
+ }
+ T key = set->keys[index];
+ if (key != nullptr && key != (T)PtrSet<T>::TOMBSTONE) {
+ return *this;
+ }
+ }
}
-}
+ bool operator==(PtrSetIterator<T> const &other) const noexcept {
+ return this->set == other.set && this->index == other.index;
+ }
-template <typename T>
-gb_internal PtrSetEntry<T> *begin(PtrSet<T> &m) {
- return m.entries.data;
-}
-template <typename T>
-gb_internal PtrSetEntry<T> const *begin(PtrSet<T> const &m) {
- return m.entries.data;
-}
+
+ operator T *() const {
+ return &set->keys[index];
+ }
+};
template <typename T>
-gb_internal PtrSetEntry<T> *end(PtrSet<T> &m) {
- return m.entries.data + m.entries.count;
+gb_internal PtrSetIterator<T> begin(PtrSet<T> &set) noexcept {
+ usize index = 0;
+ while (index < set.capacity) {
+ T key = set.keys[index];
+ if (key != nullptr && key != (T)PtrSet<T>::TOMBSTONE) {
+ break;
+ }
+ index++;
+ }
+ return PtrSetIterator<T>{&set, index};
}
-
template <typename T>
-gb_internal PtrSetEntry<T> const *end(PtrSet<T> const &m) {
- return m.entries.data + m.entries.count;
+gb_internal PtrSetIterator<T> end(PtrSet<T> &set) noexcept {
+ return PtrSetIterator<T>{&set, set.capacity};
} \ No newline at end of file
diff --git a/src/queue.cpp b/src/queue.cpp
index 4de5ac5e5..8f279bb21 100644
--- a/src/queue.cpp
+++ b/src/queue.cpp
@@ -52,7 +52,6 @@ gb_internal void mpmc_init(MPMCQueue<T> *q, gbAllocator a, isize size_i) {
size = next_pow2(size);
GB_ASSERT(gb_is_power_of_two(size));
- mutex_init(&q->mutex);
q->mask = size-1;
q->allocator = a;
q->nodes = gb_alloc_array(a, T, size);
@@ -65,7 +64,6 @@ gb_internal void mpmc_init(MPMCQueue<T> *q, gbAllocator a, isize size_i) {
template <typename T>
gb_internal void mpmc_destroy(MPMCQueue<T> *q) {
- mutex_destroy(&q->mutex);
gb_free(q->allocator, q->nodes);
gb_free(q->allocator, q->indices);
}
diff --git a/src/string.cpp b/src/string.cpp
index 8cce0f1ef..a2254d100 100644
--- a/src/string.cpp
+++ b/src/string.cpp
@@ -1,10 +1,5 @@
gb_global BlockingMutex string_buffer_mutex = {};
-gb_internal void init_string_buffer_memory(void) {
- mutex_init(&string_buffer_mutex);
-}
-
-
// NOTE(bill): Used for UTF-8 strings
struct String {
u8 * text;
diff --git a/src/string_map.cpp b/src/string_map.cpp
index 9f9374ece..facd00bb0 100644
--- a/src/string_map.cpp
+++ b/src/string_map.cpp
@@ -1,6 +1,13 @@
struct StringHashKey {
u32 hash;
String string;
+
+ operator String() const noexcept {
+ return this->string;
+ }
+ operator String const &() const noexcept {
+ return this->string;
+ }
};
gb_internal gb_inline StringHashKey string_hash_string(String const &s) {
@@ -35,7 +42,7 @@ struct StringMap {
};
-template <typename T> gb_internal void string_map_init (StringMap<T> *h, gbAllocator a, isize capacity = 16);
+template <typename T> gb_internal void string_map_init (StringMap<T> *h, isize capacity = 16);
template <typename T> gb_internal void string_map_destroy (StringMap<T> *h);
template <typename T> gb_internal T * string_map_get (StringMap<T> *h, char const *key);
@@ -56,11 +63,15 @@ template <typename T> gb_internal void string_map_grow (StringMap<T>
template <typename T> gb_internal void string_map_rehash (StringMap<T> *h, isize new_count);
template <typename T> gb_internal void string_map_reserve (StringMap<T> *h, isize cap);
+gb_internal gbAllocator string_map_allocator(void) {
+ return heap_allocator();
+}
+
template <typename T>
-gb_internal gb_inline void string_map_init(StringMap<T> *h, gbAllocator a, isize capacity) {
+gb_internal gb_inline void string_map_init(StringMap<T> *h, isize capacity) {
capacity = next_pow2_isize(capacity);
- slice_init(&h->hashes, a, capacity);
- array_init(&h->entries, a, 0, capacity);
+ slice_init(&h->hashes, string_map_allocator(), capacity);
+ array_init(&h->entries, string_map_allocator(), 0, capacity);
for (isize i = 0; i < capacity; i++) {
h->hashes.data[i] = MAP_SENTINEL;
}
@@ -68,6 +79,9 @@ gb_internal gb_inline void string_map_init(StringMap<T> *h, gbAllocator a, isize
template <typename T>
gb_internal gb_inline void string_map_destroy(StringMap<T> *h) {
+ if (h->entries.allocator.proc == nullptr) {
+ h->entries.allocator = string_map_allocator();
+ }
slice_free(&h->hashes, h->entries.allocator);
array_free(&h->entries);
}
@@ -147,6 +161,9 @@ gb_internal void string_map_reset_entries(StringMap<T> *h) {
template <typename T>
gb_internal void string_map_reserve(StringMap<T> *h, isize cap) {
+ if (h->entries.allocator.proc == nullptr) {
+ h->entries.allocator = string_map_allocator();
+ }
array_reserve(&h->entries, cap);
if (h->entries.count*2 < h->hashes.count) {
return;
@@ -163,9 +180,18 @@ gb_internal void string_map_rehash(StringMap<T> *h, isize new_count) {
template <typename T>
gb_internal T *string_map_get(StringMap<T> *h, StringHashKey const &key) {
- isize index = string_map__find(h, key).entry_index;
- if (index != MAP_SENTINEL) {
- return &h->entries.data[index].value;
+ MapFindResult fr = {MAP_SENTINEL, MAP_SENTINEL, MAP_SENTINEL};
+ if (h->hashes.count != 0) {
+ fr.hash_index = cast(MapIndex)(key.hash & (h->hashes.count-1));
+ fr.entry_index = h->hashes.data[fr.hash_index];
+ while (fr.entry_index != MAP_SENTINEL) {
+ auto *entry = &h->entries.data[fr.entry_index];
+ if (string_hash_key_equal(entry->key, key)) {
+ return &entry->value;
+ }
+ fr.entry_prev = fr.entry_index;
+ fr.entry_index = entry->next;
+ }
}
return nullptr;
}
@@ -273,11 +299,11 @@ gb_internal gb_inline void string_map_clear(StringMap<T> *h) {
template <typename T>
-gb_internal StringMapEntry<T> *begin(StringMap<T> &m) {
+gb_internal StringMapEntry<T> *begin(StringMap<T> &m) noexcept {
return m.entries.data;
}
template <typename T>
-gb_internal StringMapEntry<T> const *begin(StringMap<T> const &m) {
+gb_internal StringMapEntry<T> const *begin(StringMap<T> const &m) noexcept {
return m.entries.data;
}
@@ -288,6 +314,6 @@ gb_internal StringMapEntry<T> *end(StringMap<T> &m) {
}
template <typename T>
-gb_internal StringMapEntry<T> const *end(StringMap<T> const &m) {
+gb_internal StringMapEntry<T> const *end(StringMap<T> const &m) noexcept {
return m.entries.data + m.entries.count;
} \ No newline at end of file
diff --git a/src/string_set.cpp b/src/string_set.cpp
index 1c97d253e..fb4640c20 100644
--- a/src/string_set.cpp
+++ b/src/string_set.cpp
@@ -2,6 +2,13 @@ struct StringSetEntry {
u32 hash;
MapIndex next;
String value;
+
+ operator String const() const noexcept {
+ return this->value;
+ }
+ operator String const &() const noexcept {
+ return this->value;
+ }
};
struct StringSet {
@@ -10,7 +17,7 @@ struct StringSet {
};
-gb_internal void string_set_init (StringSet *s, gbAllocator a, isize capacity = 16);
+gb_internal void string_set_init (StringSet *s, isize capacity = 16);
gb_internal void string_set_destroy(StringSet *s);
gb_internal void string_set_add (StringSet *s, String const &str);
gb_internal bool string_set_update (StringSet *s, String const &str); // returns true if it previously existed
@@ -20,18 +27,24 @@ gb_internal void string_set_clear (StringSet *s);
gb_internal void string_set_grow (StringSet *s);
gb_internal void string_set_rehash (StringSet *s, isize new_count);
+gb_internal gbAllocator string_set_allocator(void) {
+ return heap_allocator();
+}
-gb_internal gb_inline void string_set_init(StringSet *s, gbAllocator a, isize capacity) {
+gb_internal gb_inline void string_set_init(StringSet *s, isize capacity) {
capacity = next_pow2_isize(gb_max(16, capacity));
- slice_init(&s->hashes, a, capacity);
- array_init(&s->entries, a, 0, capacity);
+ slice_init(&s->hashes, string_set_allocator(), capacity);
+ array_init(&s->entries, string_set_allocator(), 0, capacity);
for (isize i = 0; i < capacity; i++) {
s->hashes.data[i] = MAP_SENTINEL;
}
}
gb_internal gb_inline void string_set_destroy(StringSet *s) {
+ if (s->entries.allocator.proc == nullptr) {
+ s->entries.allocator = string_set_allocator();
+ }
slice_free(&s->hashes, s->entries.allocator);
array_free(&s->entries);
}
@@ -106,6 +119,9 @@ gb_internal void string_set_reset_entries(StringSet *s) {
}
gb_internal void string_set_reserve(StringSet *s, isize cap) {
+ if (s->entries.allocator.proc == nullptr) {
+ s->entries.allocator = string_set_allocator();
+ }
array_reserve(&s->entries, cap);
if (s->entries.count*2 < s->hashes.count) {
return;
@@ -217,19 +233,18 @@ gb_internal gb_inline void string_set_clear(StringSet *s) {
}
-
-gb_internal StringSetEntry *begin(StringSet &m) {
+gb_internal StringSetEntry *begin(StringSet &m) noexcept {
return m.entries.data;
}
-gb_internal StringSetEntry const *begin(StringSet const &m) {
+gb_internal StringSetEntry const *begin(StringSet const &m) noexcept {
return m.entries.data;
}
-gb_internal StringSetEntry *end(StringSet &m) {
+gb_internal StringSetEntry *end(StringSet &m) noexcept {
return m.entries.data + m.entries.count;
}
-gb_internal StringSetEntry const *end(StringSet const &m) {
+gb_internal StringSetEntry const *end(StringSet const &m) noexcept {
return m.entries.data + m.entries.count;
} \ No newline at end of file
diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp
index 3565ef25a..2c369eaad 100644
--- a/src/thread_pool.cpp
+++ b/src/thread_pool.cpp
@@ -5,7 +5,7 @@ struct ThreadPool;
gb_thread_local Thread *current_thread;
-gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize thread_count, char const *worker_name);
+gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize worker_count, char const *worker_name);
gb_internal void thread_pool_destroy(ThreadPool *pool);
gb_internal bool thread_pool_add_task(ThreadPool *pool, WorkerTaskProc *proc, void *data);
gb_internal void thread_pool_wait(ThreadPool *pool);
@@ -16,18 +16,21 @@ struct ThreadPool {
Slice<Thread> threads;
std::atomic<bool> running;
- BlockingMutex task_lock;
- Condition tasks_available;
+ Futex tasks_available;
Futex tasks_left;
};
-gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize thread_count, char const *worker_name) {
- mutex_init(&pool->task_lock);
- condition_init(&pool->tasks_available);
+gb_internal isize current_thread_index(void) {
+ return current_thread ? current_thread->idx : 0;
+}
+gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize worker_count, char const *worker_name) {
pool->allocator = a;
- slice_init(&pool->threads, a, thread_count + 1);
+ slice_init(&pool->threads, a, worker_count + 1);
+
+ // NOTE: this needs to be initialized before any thread starts
+ pool->running.store(true, std::memory_order_seq_cst);
// setup the main thread
thread_init(pool, &pool->threads[0], 0);
@@ -37,62 +40,55 @@ gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize
Thread *t = &pool->threads[i];
thread_init_and_start(pool, t, i);
}
-
- pool->running = true;
}
gb_internal void thread_pool_destroy(ThreadPool *pool) {
- pool->running = false;
+ pool->running.store(false, std::memory_order_seq_cst);
for_array_off(i, 1, pool->threads) {
Thread *t = &pool->threads[i];
- condition_broadcast(&pool->tasks_available);
+ pool->tasks_available.fetch_add(1, std::memory_order_relaxed);
+ futex_broadcast(&pool->tasks_available);
thread_join_and_destroy(t);
}
- for_array(i, pool->threads) {
- free(pool->threads[i].queue);
- }
gb_free(pool->allocator, pool->threads.data);
- mutex_destroy(&pool->task_lock);
- condition_destroy(&pool->tasks_available);
}
void thread_pool_queue_push(Thread *thread, WorkerTask task) {
- uint64_t capture;
- uint64_t new_capture;
+ u64 capture;
+ u64 new_capture;
do {
capture = thread->head_and_tail.load();
- uint64_t mask = thread->capacity - 1;
- uint64_t head = (capture >> 32) & mask;
- uint64_t tail = ((uint32_t)capture) & mask;
+ u64 mask = thread->capacity - 1;
+ u64 head = (capture >> 32) & mask;
+ u64 tail = ((u32)capture) & mask;
- uint64_t new_head = (head + 1) & mask;
- if (new_head == tail) {
- GB_PANIC("Thread Queue Full!\n");
- }
+ u64 new_head = (head + 1) & mask;
+ GB_ASSERT_MSG(new_head != tail, "Thread Queue Full!");
// This *must* be done in here, to avoid a potential race condition where we no longer own the slot by the time we're assigning
thread->queue[head] = task;
new_capture = (new_head << 32) | tail;
} while (!thread->head_and_tail.compare_exchange_weak(capture, new_capture));
- thread->pool->tasks_left.fetch_add(1);
- condition_broadcast(&thread->pool->tasks_available);
+ thread->pool->tasks_left.fetch_add(1, std::memory_order_release);
+ thread->pool->tasks_available.fetch_add(1, std::memory_order_relaxed);
+ futex_broadcast(&thread->pool->tasks_available);
}
bool thread_pool_queue_pop(Thread *thread, WorkerTask *task) {
- uint64_t capture;
- uint64_t new_capture;
+ u64 capture;
+ u64 new_capture;
do {
- capture = thread->head_and_tail.load();
+ capture = thread->head_and_tail.load(std::memory_order_acquire);
- uint64_t mask = thread->capacity - 1;
- uint64_t head = (capture >> 32) & mask;
- uint64_t tail = ((uint32_t)capture) & mask;
+ u64 mask = thread->capacity - 1;
+ u64 head = (capture >> 32) & mask;
+ u64 tail = ((u32)capture) & mask;
- uint64_t new_tail = (tail + 1) & mask;
+ u64 new_tail = (tail + 1) & mask;
if (tail == head) {
return false;
}
@@ -101,7 +97,7 @@ bool thread_pool_queue_pop(Thread *thread, WorkerTask *task) {
*task = thread->queue[tail];
new_capture = (head << 32) | new_tail;
- } while (!thread->head_and_tail.compare_exchange_weak(capture, new_capture));
+ } while (!thread->head_and_tail.compare_exchange_weak(capture, new_capture, std::memory_order_release));
return true;
}
@@ -118,12 +114,11 @@ gb_internal bool thread_pool_add_task(ThreadPool *pool, WorkerTaskProc *proc, vo
gb_internal void thread_pool_wait(ThreadPool *pool) {
WorkerTask task;
- while (pool->tasks_left) {
-
+ while (pool->tasks_left.load(std::memory_order_acquire)) {
// if we've got tasks on our queue, run them
while (thread_pool_queue_pop(current_thread, &task)) {
task.do_work(task.data);
- pool->tasks_left.fetch_sub(1);
+ pool->tasks_left.fetch_sub(1, std::memory_order_release);
}
@@ -131,12 +126,12 @@ gb_internal void thread_pool_wait(ThreadPool *pool) {
// This *must* be executed in this order, so the futex wakes immediately
// if rem_tasks has changed since we checked last, otherwise the program
// will permanently sleep
- Footex rem_tasks = pool->tasks_left.load();
- if (!rem_tasks) {
- break;
+ Footex rem_tasks = pool->tasks_left.load(std::memory_order_acquire);
+ if (rem_tasks == 0) {
+ return;
}
- tpool_wait_on_addr(&pool->tasks_left, rem_tasks);
+ futex_wait(&pool->tasks_left, rem_tasks);
}
}
@@ -144,56 +139,53 @@ gb_internal THREAD_PROC(thread_pool_thread_proc) {
WorkerTask task;
current_thread = thread;
ThreadPool *pool = current_thread->pool;
+ // debugf("worker id: %td\n", current_thread->idx);
- for (;;) {
-work_start:
- if (!pool->running) {
- break;
- }
-
+ while (pool->running.load(std::memory_order_seq_cst)) {
// If we've got tasks to process, work through them
- size_t finished_tasks = 0;
+ usize finished_tasks = 0;
+ i32 state;
+
while (thread_pool_queue_pop(current_thread, &task)) {
task.do_work(task.data);
- pool->tasks_left.fetch_sub(1);
+ pool->tasks_left.fetch_sub(1, std::memory_order_release);
finished_tasks += 1;
}
- if (finished_tasks > 0 && !pool->tasks_left) {
- tpool_wake_addr(&pool->tasks_left);
+ if (finished_tasks > 0 && pool->tasks_left.load(std::memory_order_acquire) == 0) {
+ futex_signal(&pool->tasks_left);
}
// If there's still work somewhere and we don't have it, steal it
- if (pool->tasks_left) {
- isize idx = current_thread->idx;
+ if (pool->tasks_left.load(std::memory_order_acquire)) {
+ usize idx = cast(usize)current_thread->idx;
for_array(i, pool->threads) {
- if (!pool->tasks_left) {
+ if (pool->tasks_left.load(std::memory_order_acquire) == 0) {
break;
}
- idx = (idx + 1) % pool->threads.count;
- Thread *thread = &pool->threads[idx];
+ idx = (idx + 1) % cast(usize)pool->threads.count;
+ Thread *thread = &pool->threads.data[idx];
WorkerTask task;
- if (!thread_pool_queue_pop(thread, &task)) {
- continue;
- }
+ if (thread_pool_queue_pop(thread, &task)) {
+ task.do_work(task.data);
+ pool->tasks_left.fetch_sub(1, std::memory_order_release);
- task.do_work(task.data);
- pool->tasks_left.fetch_sub(1);
+ if (pool->tasks_left.load(std::memory_order_acquire) == 0) {
+ futex_signal(&pool->tasks_left);
+ }
- if (!pool->tasks_left) {
- tpool_wake_addr(&pool->tasks_left);
+ goto main_loop_continue;
}
-
- goto work_start;
}
}
// if we've done all our work, and there's nothing to steal, go to sleep
- mutex_lock(&pool->task_lock);
- condition_wait(&pool->tasks_available, &pool->task_lock);
- mutex_unlock(&pool->task_lock);
+ state = pool->tasks_available.load(std::memory_order_acquire);
+ futex_wait(&pool->tasks_available, state);
+
+ main_loop_continue:;
}
return 0;
diff --git a/src/threading.cpp b/src/threading.cpp
index 7a7d1a299..52e6b722a 100644
--- a/src/threading.cpp
+++ b/src/threading.cpp
@@ -8,10 +8,12 @@
struct BlockingMutex;
struct RecursiveMutex;
+struct RwMutex;
struct Semaphore;
struct Condition;
struct Thread;
struct ThreadPool;
+struct Parker;
#define THREAD_PROC(name) isize name(struct Thread *thread)
gb_internal THREAD_PROC(thread_pool_thread_proc);
@@ -41,31 +43,40 @@ struct Thread {
struct ThreadPool *pool;
};
+typedef std::atomic<i32> Futex;
+typedef volatile i32 Footex;
+
+gb_internal void futex_wait(Futex *addr, Footex val);
+gb_internal void futex_signal(Futex *addr);
+gb_internal void futex_broadcast(Futex *addr);
-gb_internal void mutex_init (BlockingMutex *m);
-gb_internal void mutex_destroy (BlockingMutex *m);
gb_internal void mutex_lock (BlockingMutex *m);
gb_internal bool mutex_try_lock(BlockingMutex *m);
gb_internal void mutex_unlock (BlockingMutex *m);
-gb_internal void mutex_init (RecursiveMutex *m);
-gb_internal void mutex_destroy (RecursiveMutex *m);
+
gb_internal void mutex_lock (RecursiveMutex *m);
gb_internal bool mutex_try_lock(RecursiveMutex *m);
gb_internal void mutex_unlock (RecursiveMutex *m);
-gb_internal void semaphore_init (Semaphore *s);
-gb_internal void semaphore_destroy(Semaphore *s);
+gb_internal void rw_mutex_lock (RwMutex *m);
+gb_internal bool rw_mutex_try_lock (RwMutex *m);
+gb_internal void rw_mutex_unlock (RwMutex *m);
+gb_internal void rw_mutex_shared_lock (RwMutex *m);
+gb_internal bool rw_mutex_try_shared_lock(RwMutex *m);
+gb_internal void rw_mutex_shared_unlock (RwMutex *m);
+
gb_internal void semaphore_post (Semaphore *s, i32 count);
gb_internal void semaphore_wait (Semaphore *s);
gb_internal void semaphore_release(Semaphore *s) { semaphore_post(s, 1); }
-gb_internal void condition_init(Condition *c);
-gb_internal void condition_destroy(Condition *c);
gb_internal void condition_broadcast(Condition *c);
gb_internal void condition_signal(Condition *c);
gb_internal void condition_wait(Condition *c, BlockingMutex *m);
-gb_internal void condition_wait_with_timeout(Condition *c, BlockingMutex *m, u32 timeout_in_ms);
+
+gb_internal void park(Parker *p);
+gb_internal void unpark_one(Parker *p);
+gb_internal void unpark_all(Parker *p);
gb_internal u32 thread_current_id(void);
@@ -79,22 +90,23 @@ gb_internal void yield_process(void);
struct MutexGuard {
- MutexGuard() = delete;
+ MutexGuard() = delete;
MutexGuard(MutexGuard const &) = delete;
+ MutexGuard(MutexGuard &&) = delete;
- MutexGuard(BlockingMutex *bm) : bm{bm} {
+ explicit MutexGuard(BlockingMutex *bm) noexcept : bm{bm} {
mutex_lock(this->bm);
}
- MutexGuard(RecursiveMutex *rm) : rm{rm} {
+ explicit MutexGuard(RecursiveMutex *rm) noexcept : rm{rm} {
mutex_lock(this->rm);
}
- MutexGuard(BlockingMutex &bm) : bm{&bm} {
+ explicit MutexGuard(BlockingMutex &bm) noexcept : bm{&bm} {
mutex_lock(this->bm);
}
- MutexGuard(RecursiveMutex &rm) : rm{&rm} {
+ explicit MutexGuard(RecursiveMutex &rm) noexcept : rm{&rm} {
mutex_lock(this->rm);
}
- ~MutexGuard() {
+ ~MutexGuard() noexcept {
if (this->bm) {
mutex_unlock(this->bm);
} else if (this->rm) {
@@ -102,24 +114,87 @@ struct MutexGuard {
}
}
- operator bool() const { return true; }
+ operator bool() const noexcept { return true; }
BlockingMutex *bm;
RecursiveMutex *rm;
};
#define MUTEX_GUARD_BLOCK(m) if (MutexGuard GB_DEFER_3(_mutex_guard_){m})
-#define MUTEX_GUARD(m) MutexGuard GB_DEFER_3(_mutex_guard_){m}
+#define MUTEX_GUARD(m) mutex_lock(m); defer (mutex_unlock(m))
+
+
+struct RecursiveMutex {
+ Futex owner;
+ i32 recursion;
+};
+
+gb_internal void mutex_lock(RecursiveMutex *m) {
+ Futex tid;
+ tid.store(cast(i32)thread_current_id());
+ for (;;) {
+ i32 prev_owner = 0;
+ m->owner.compare_exchange_strong(prev_owner, tid, std::memory_order_acquire, std::memory_order_acquire);
+ if (prev_owner == 0 || prev_owner == tid) {
+ m->recursion++;
+ // inside the lock
+ return;
+ }
+ futex_wait(&m->owner, prev_owner);
+ }
+}
+gb_internal bool mutex_try_lock(RecursiveMutex *m) {
+ Futex tid;
+ tid.store(cast(i32)thread_current_id());
+ i32 prev_owner = 0;
+ m->owner.compare_exchange_strong(prev_owner, tid, std::memory_order_acquire, std::memory_order_acquire);
+ if (prev_owner == 0 || prev_owner == tid) {
+ m->recursion++;
+ // inside the lock
+ return true;
+ }
+ return false;
+}
+gb_internal void mutex_unlock(RecursiveMutex *m) {
+ m->recursion--;
+ if (m->recursion != 0) {
+ return;
+ }
+ m->owner.exchange(0, std::memory_order_release);
+ futex_signal(&m->owner);
+ // outside the lock
+}
+
+struct Semaphore {
+ Futex count;
+};
+
+gb_internal void semaphore_post(Semaphore *s, i32 count) {
+ s->count.fetch_add(count, std::memory_order_release);
+ if (s->count == 1) {
+ futex_signal(&s->count);
+ } else {
+ futex_broadcast(&s->count);
+ }
+}
+gb_internal void semaphore_wait(Semaphore *s) {
+ for (;;) {
+ i32 original_count = s->count.load(std::memory_order_relaxed);
+ while (original_count == 0) {
+ futex_wait(&s->count, original_count);
+ original_count = s->count;
+ }
+ if (!s->count.compare_exchange_strong(original_count, original_count-1, std::memory_order_acquire, std::memory_order_acquire)) {
+ return;
+ }
+ }
+}
#if defined(GB_SYSTEM_WINDOWS)
struct BlockingMutex {
SRWLOCK srwlock;
};
- gb_internal void mutex_init(BlockingMutex *m) {
- }
- gb_internal void mutex_destroy(BlockingMutex *m) {
- }
gb_internal void mutex_lock(BlockingMutex *m) {
AcquireSRWLockExclusive(&m->srwlock);
}
@@ -130,50 +205,10 @@ struct MutexGuard {
ReleaseSRWLockExclusive(&m->srwlock);
}
- struct RecursiveMutex {
- CRITICAL_SECTION win32_critical_section;
- };
- gb_internal void mutex_init(RecursiveMutex *m) {
- InitializeCriticalSection(&m->win32_critical_section);
- }
- gb_internal void mutex_destroy(RecursiveMutex *m) {
- DeleteCriticalSection(&m->win32_critical_section);
- }
- gb_internal void mutex_lock(RecursiveMutex *m) {
- EnterCriticalSection(&m->win32_critical_section);
- }
- gb_internal bool mutex_try_lock(RecursiveMutex *m) {
- return TryEnterCriticalSection(&m->win32_critical_section) != 0;
- }
- gb_internal void mutex_unlock(RecursiveMutex *m) {
- LeaveCriticalSection(&m->win32_critical_section);
- }
-
- struct Semaphore {
- void *win32_handle;
- };
-
- gb_internal void semaphore_init(Semaphore *s) {
- s->win32_handle = CreateSemaphoreA(NULL, 0, I32_MAX, NULL);
- }
- gb_internal void semaphore_destroy(Semaphore *s) {
- CloseHandle(s->win32_handle);
- }
- gb_internal void semaphore_post(Semaphore *s, i32 count) {
- ReleaseSemaphore(s->win32_handle, count, NULL);
- }
- gb_internal void semaphore_wait(Semaphore *s) {
- WaitForSingleObjectEx(s->win32_handle, INFINITE, FALSE);
- }
-
struct Condition {
CONDITION_VARIABLE cond;
};
- gb_internal void condition_init(Condition *c) {
- }
- gb_internal void condition_destroy(Condition *c) {
- }
gb_internal void condition_broadcast(Condition *c) {
WakeAllConditionVariable(&c->cond);
}
@@ -183,103 +218,192 @@ struct MutexGuard {
gb_internal void condition_wait(Condition *c, BlockingMutex *m) {
SleepConditionVariableSRW(&c->cond, &m->srwlock, INFINITE, 0);
}
- gb_internal void condition_wait_with_timeout(Condition *c, BlockingMutex *m, u32 timeout_in_ms) {
- SleepConditionVariableSRW(&c->cond, &m->srwlock, timeout_in_ms, 0);
+
+ struct RwMutex {
+ SRWLOCK srwlock;
+ };
+
+ gb_internal void rw_mutex_lock(RwMutex *m) {
+ AcquireSRWLockExclusive(&m->srwlock);
+ }
+ gb_internal bool rw_mutex_try_lock(RwMutex *m) {
+ return !!TryAcquireSRWLockExclusive(&m->srwlock);
+ }
+ gb_internal void rw_mutex_unlock(RwMutex *m) {
+ ReleaseSRWLockExclusive(&m->srwlock);
}
+ gb_internal void rw_mutex_shared_lock(RwMutex *m) {
+ AcquireSRWLockShared(&m->srwlock);
+ }
+ gb_internal bool rw_mutex_try_shared_lock(RwMutex *m) {
+ return !!TryAcquireSRWLockShared(&m->srwlock);
+ }
+ gb_internal void rw_mutex_shared_unlock(RwMutex *m) {
+ ReleaseSRWLockShared(&m->srwlock);
+ }
#else
+ enum Internal_Mutex_State : i32 {
+ Internal_Mutex_State_Unlocked = 0,
+ Internal_Mutex_State_Locked = 1,
+ Internal_Mutex_State_Waiting = 2,
+ };
+
struct BlockingMutex {
- pthread_mutex_t pthread_mutex;
+ i32 state_;
+
+ Futex &state() {
+ return *(Futex *)&this->state_;
+ }
+ Futex const &state() const {
+ return *(Futex const *)&this->state_;
+ }
};
- gb_internal void mutex_init(BlockingMutex *m) {
- pthread_mutex_init(&m->pthread_mutex, nullptr);
- }
- gb_internal void mutex_destroy(BlockingMutex *m) {
- pthread_mutex_destroy(&m->pthread_mutex);
+
+ gb_no_inline gb_internal void mutex_lock_slow(BlockingMutex *m, i32 curr_state) {
+ i32 new_state = curr_state;
+ for (i32 spin = 0; spin < 100; spin++) {
+ i32 state = Internal_Mutex_State_Unlocked;
+ bool ok = m->state().compare_exchange_weak(state, new_state, std::memory_order_acquire, std::memory_order_consume);
+ if (ok) {
+ return;
+ }
+ if (state == Internal_Mutex_State_Waiting) {
+ break;
+ }
+ for (i32 i = gb_min(spin+1, 32); i > 0; i--) {
+ yield_thread();
+ }
+ }
+
+ // Set just in case 100 iterations did not do it
+ new_state = Internal_Mutex_State_Waiting;
+
+ for (;;) {
+ if (m->state().exchange(Internal_Mutex_State_Waiting, std::memory_order_acquire) == Internal_Mutex_State_Unlocked) {
+ return;
+ }
+ futex_wait(&m->state(), new_state);
+ yield_thread();
+ }
}
+
gb_internal void mutex_lock(BlockingMutex *m) {
- pthread_mutex_lock(&m->pthread_mutex);
+ i32 v = m->state().exchange(Internal_Mutex_State_Locked, std::memory_order_acquire);
+ if (v != Internal_Mutex_State_Unlocked) {
+ mutex_lock_slow(m, v);
+ }
}
gb_internal bool mutex_try_lock(BlockingMutex *m) {
- return pthread_mutex_trylock(&m->pthread_mutex) == 0;
+ i32 v = m->state().exchange(Internal_Mutex_State_Locked, std::memory_order_acquire);
+ return v == Internal_Mutex_State_Unlocked;
}
- gb_internal void mutex_unlock(BlockingMutex *m) {
- pthread_mutex_unlock(&m->pthread_mutex);
+
+ gb_no_inline gb_internal void mutex_unlock_slow(BlockingMutex *m) {
+ futex_signal(&m->state());
}
- struct RecursiveMutex {
- pthread_mutex_t pthread_mutex;
- pthread_mutexattr_t pthread_mutexattr;
- };
- gb_internal void mutex_init(RecursiveMutex *m) {
- pthread_mutexattr_init(&m->pthread_mutexattr);
- pthread_mutexattr_settype(&m->pthread_mutexattr, PTHREAD_MUTEX_RECURSIVE);
- pthread_mutex_init(&m->pthread_mutex, &m->pthread_mutexattr);
- }
- gb_internal void mutex_destroy(RecursiveMutex *m) {
- pthread_mutex_destroy(&m->pthread_mutex);
- }
- gb_internal void mutex_lock(RecursiveMutex *m) {
- pthread_mutex_lock(&m->pthread_mutex);
- }
- gb_internal bool mutex_try_lock(RecursiveMutex *m) {
- return pthread_mutex_trylock(&m->pthread_mutex) == 0;
- }
- gb_internal void mutex_unlock(RecursiveMutex *m) {
- pthread_mutex_unlock(&m->pthread_mutex);
- }
-
- #if defined(GB_SYSTEM_OSX)
- struct Semaphore {
- semaphore_t osx_handle;
- };
-
- gb_internal void semaphore_init (Semaphore *s) { semaphore_create(mach_task_self(), &s->osx_handle, SYNC_POLICY_FIFO, 0); }
- gb_internal void semaphore_destroy(Semaphore *s) { semaphore_destroy(mach_task_self(), s->osx_handle); }
- gb_internal void semaphore_post (Semaphore *s, i32 count) { while (count --> 0) semaphore_signal(s->osx_handle); }
- gb_internal void semaphore_wait (Semaphore *s) { semaphore_wait(s->osx_handle); }
- #elif defined(GB_SYSTEM_UNIX)
- struct Semaphore {
- sem_t unix_handle;
- };
-
- gb_internal void semaphore_init (Semaphore *s) { sem_init(&s->unix_handle, 0, 0); }
- gb_internal void semaphore_destroy(Semaphore *s) { sem_destroy(&s->unix_handle); }
- gb_internal void semaphore_post (Semaphore *s, i32 count) { while (count --> 0) sem_post(&s->unix_handle); }
- void semaphore_wait (Semaphore *s) { int i; do { i = sem_wait(&s->unix_handle); } while (i == -1 && errno == EINTR); }
- #else
- #error Implement Semaphore for this platform
- #endif
-
+ gb_internal void mutex_unlock(BlockingMutex *m) {
+ i32 v = m->state().exchange(Internal_Mutex_State_Unlocked, std::memory_order_release);
+ switch (v) {
+ case Internal_Mutex_State_Unlocked:
+ GB_PANIC("Unreachable");
+ break;
+ case Internal_Mutex_State_Locked:
+ // Okay
+ break;
+ case Internal_Mutex_State_Waiting:
+ mutex_unlock_slow(m);
+ break;
+ }
+ }
struct Condition {
- pthread_cond_t pthread_cond;
+ i32 state_;
+
+ Futex &state() {
+ return *(Futex *)&this->state_;
+ }
+ Futex const &state() const {
+ return *(Futex const *)&this->state_;
+ }
};
-
- gb_internal void condition_init(Condition *c) {
- pthread_cond_init(&c->pthread_cond, NULL);
- }
- gb_internal void condition_destroy(Condition *c) {
- pthread_cond_destroy(&c->pthread_cond);
- }
+
gb_internal void condition_broadcast(Condition *c) {
- pthread_cond_broadcast(&c->pthread_cond);
+ c->state().fetch_add(1, std::memory_order_release);
+ futex_broadcast(&c->state());
}
gb_internal void condition_signal(Condition *c) {
- pthread_cond_signal(&c->pthread_cond);
+ c->state().fetch_add(1, std::memory_order_release);
+ futex_signal(&c->state());
}
gb_internal void condition_wait(Condition *c, BlockingMutex *m) {
- pthread_cond_wait(&c->pthread_cond, &m->pthread_mutex);
+ i32 state = c->state().load(std::memory_order_relaxed);
+ mutex_unlock(m);
+ futex_wait(&c->state(), state);
+ mutex_lock(m);
}
- gb_internal void condition_wait_with_timeout(Condition *c, BlockingMutex *m, u32 timeout_in_ms) {
- struct timespec abstime = {};
- abstime.tv_sec = timeout_in_ms/1000;
- abstime.tv_nsec = cast(long)(timeout_in_ms%1000)*1e6;
- pthread_cond_timedwait(&c->pthread_cond, &m->pthread_mutex, &abstime);
-
+
+ struct RwMutex {
+ // TODO(bill): make this a proper RW mutex
+ BlockingMutex mutex;
+ };
+
+ gb_internal void rw_mutex_lock(RwMutex *m) {
+ mutex_lock(&m->mutex);
+ }
+ gb_internal bool rw_mutex_try_lock(RwMutex *m) {
+ return mutex_try_lock(&m->mutex);
+ }
+ gb_internal void rw_mutex_unlock(RwMutex *m) {
+ mutex_unlock(&m->mutex);
+ }
+
+ gb_internal void rw_mutex_shared_lock(RwMutex *m) {
+ mutex_lock(&m->mutex);
+ }
+ gb_internal bool rw_mutex_try_shared_lock(RwMutex *m) {
+ return mutex_try_lock(&m->mutex);
+ }
+ gb_internal void rw_mutex_shared_unlock(RwMutex *m) {
+ mutex_unlock(&m->mutex);
}
#endif
+struct Parker {
+ Futex state;
+};
+enum ParkerState : u32 {
+ ParkerState_Empty = 0,
+ ParkerState_Notified = 1,
+ ParkerState_Parked = UINT32_MAX,
+};
+
+gb_internal void park(Parker *p) {
+ if (p->state.fetch_sub(1, std::memory_order_acquire) == ParkerState_Notified) {
+ return;
+ }
+ for (;;) {
+ futex_wait(&p->state, ParkerState_Parked);
+ i32 notified = ParkerState_Empty;
+ if (p->state.compare_exchange_strong(notified, ParkerState_Empty, std::memory_order_acquire, std::memory_order_acquire)) {
+ return;
+ }
+ }
+}
+
+gb_internal void unpark_one(Parker *p) {
+ if (p->state.exchange(ParkerState_Notified, std::memory_order_release) == ParkerState_Parked) {
+ futex_signal(&p->state);
+ }
+}
+
+gb_internal void unpark_all(Parker *p) {
+ if (p->state.exchange(ParkerState_Notified, std::memory_order_release) == ParkerState_Parked) {
+ futex_broadcast(&p->state);
+ }
+}
+
gb_internal u32 thread_current_id(void) {
u32 thread_id;
@@ -364,12 +488,13 @@ gb_internal void thread_init(ThreadPool *pool, Thread *t, isize idx) {
#endif
t->capacity = 1 << 14; // must be a power of 2
- t->queue = (WorkerTask *)calloc(sizeof(WorkerTask), t->capacity);
+ t->queue = gb_alloc_array(heap_allocator(), WorkerTask, t->capacity);
t->head_and_tail = 0;
t->pool = pool;
t->idx = idx;
}
+
gb_internal void thread_init_and_start(ThreadPool *pool, Thread *t, isize idx) {
thread_init(pool, t, idx);
isize stack_size = 0;
@@ -400,6 +525,8 @@ gb_internal void thread_join_and_destroy(Thread *t) {
pthread_join(t->posix_handle, NULL);
t->posix_handle = 0;
#endif
+
+ gb_free(heap_allocator(), t->queue);
}
gb_internal void thread_set_name(Thread *t, char const *name) {
@@ -441,24 +568,25 @@ gb_internal void thread_set_name(Thread *t, char const *name) {
#include <linux/futex.h>
#include <sys/syscall.h>
-typedef std::atomic<int32_t> Futex;
-typedef volatile int32_t Footex;
+gb_internal void futex_signal(Futex *addr) {
+ int ret = syscall(SYS_futex, addr, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1, NULL, NULL, 0);
+ if (ret == -1) {
+ perror("Futex wake");
+ GB_PANIC("Failed in futex wake!\n");
+ }
+}
-gb_internal void tpool_wake_addr(Futex *addr) {
- for (;;) {
- int ret = syscall(SYS_futex, addr, FUTEX_WAKE, 1, NULL, NULL, 0);
- if (ret == -1) {
- perror("Futex wake");
- GB_PANIC("Failed in futex wake!\n");
- } else if (ret > 0) {
- return;
- }
+gb_internal void futex_broadcast(Futex *addr) {
+ int ret = syscall(SYS_futex, addr, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, INT32_MAX, NULL, NULL, 0);
+ if (ret == -1) {
+ perror("Futex wake");
+ GB_PANIC("Failed in futex wake!\n");
}
}
-gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) {
+gb_internal void futex_wait(Futex *addr, Footex val) {
for (;;) {
- int ret = syscall(SYS_futex, addr, FUTEX_WAIT, val, NULL, NULL, 0);
+ int ret = syscall(SYS_futex, addr, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, NULL, NULL, 0);
if (ret == -1) {
if (errno != EAGAIN) {
perror("Futex wait");
@@ -479,14 +607,15 @@ gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) {
#include <sys/types.h>
#include <sys/umtx.h>
-typedef std::atomic<int32_t> Futex;
-typedef volatile int32_t Footex;
-
-gb_internal void tpool_wake_addr(Futex *addr) {
+gb_internal void futex_signal(Futex *addr) {
_umtx_op(addr, UMTX_OP_WAKE, 1, 0, 0);
}
-gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) {
+gb_internal void futex_broadcast(Futex *addr) {
+ _umtx_op(addr, UMTX_OP_WAKE, INT32_MAX, 0, 0);
+}
+
+gb_internal void futex_wait(Futex *addr, Footex val) {
for (;;) {
int ret = _umtx_op(addr, UMTX_OP_WAIT_UINT, val, 0, NULL);
if (ret == 0) {
@@ -508,12 +637,26 @@ gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) {
#include <sys/futex.h>
-typedef std::atomic<int32_t> Futex;
-typedef volatile int32_t Footex;
+gb_internal void futex_signal(Futex *f) {
+ for (;;) {
+ int ret = futex((volatile uint32_t *)f, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1, NULL, NULL);
+ if (ret == -1) {
+ if (errno == ETIMEDOUT || errno == EINTR) {
+ continue;
+ }
+
+ perror("Futex wake");
+ GB_PANIC("futex wake fail");
+ } else if (ret == 1) {
+ return;
+ }
+ }
+}
+
-gb_internal void tpool_wake_addr(Futex *addr) {
+gb_internal void futex_broadcast(Futex *f) {
for (;;) {
- int ret = futex((volatile uint32_t *)addr, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1, NULL, NULL);
+ int ret = futex((volatile uint32_t *)f, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, INT32_MAX, NULL, NULL);
if (ret == -1) {
if (errno == ETIMEDOUT || errno == EINTR) {
continue;
@@ -527,11 +670,11 @@ gb_internal void tpool_wake_addr(Futex *addr) {
}
}
-gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) {
+gb_internal void futex_wait(Futex *f, Footex val) {
for (;;) {
- int ret = futex((volatile uint32_t *)addr, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, NULL, NULL);
+ int ret = futex((volatile uint32_t *)f, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, NULL, NULL);
if (ret == -1) {
- if (*addr != val) {
+ if (*f != val) {
return;
}
@@ -547,46 +690,58 @@ gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) {
#elif defined(GB_SYSTEM_OSX)
-typedef std::atomic<int64_t> Futex;
-typedef volatile int64_t Footex;
-
#define UL_COMPARE_AND_WAIT 0x00000001
#define ULF_NO_ERRNO 0x01000000
extern "C" int __ulock_wait(uint32_t operation, void *addr, uint64_t value, uint32_t timeout); /* timeout is specified in microseconds */
extern "C" int __ulock_wake(uint32_t operation, void *addr, uint64_t wake_value);
-gb_internal void tpool_wake_addr(Futex *addr) {
+gb_internal void futex_signal(Futex *f) {
for (;;) {
- int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, addr, 0);
+ int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, f, 0);
if (ret >= 0) {
return;
}
- ret = -ret;
- if (ret == EINTR || ret == EFAULT) {
+ if (ret == -EINTR || ret == -EFAULT) {
+ continue;
+ }
+ if (ret == -ENOENT) {
+ return;
+ }
+ GB_PANIC("Failed in futex wake!\n");
+ }
+}
+
+gb_internal void futex_broadcast(Futex *f) {
+ for (;;) {
+ enum { ULF_WAKE_ALL = 0x00000100 };
+ int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO | ULF_WAKE_ALL, f, 0);
+ if (ret == 0) {
+ return;
+ }
+ if (ret == -EINTR || ret == -EFAULT) {
continue;
}
- if (ret == ENOENT) {
+ if (ret == -ENOENT) {
return;
}
GB_PANIC("Failed in futex wake!\n");
}
}
-gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) {
+gb_internal void futex_wait(Futex *f, Footex val) {
for (;;) {
- int ret = __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, addr, val, 0);
+ int ret = __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, f, val, 0);
if (ret >= 0) {
- if (*addr != val) {
+ if (*f != val) {
return;
}
continue;
}
- ret = -ret;
- if (ret == EINTR || ret == EFAULT) {
- continue;
+ if (ret == -EINTR || ret == -EFAULT) {continue;
+ ret = -ret;
}
- if (ret == ENOENT) {
+ if (ret == -ENOENT) {
return;
}
@@ -594,18 +749,19 @@ gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) {
}
}
#elif defined(GB_SYSTEM_WINDOWS)
-typedef std::atomic<int64_t> Futex;
-typedef volatile int64_t Footex;
-gb_internal void tpool_wake_addr(Futex *addr) {
- WakeByAddressSingle((void *)addr);
+gb_internal void futex_signal(Futex *f) {
+ WakeByAddressSingle(f);
}
-gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) {
- for (;;) {
- WaitOnAddress(addr, (void *)&val, sizeof(val), INFINITE);
- if (*addr != val) break;
- }
+gb_internal void futex_broadcast(Futex *f) {
+ WakeByAddressAll(f);
+}
+
+gb_internal void futex_wait(Futex *f, Footex val) {
+ do {
+ WaitOnAddress(f, (void *)&val, sizeof(val), INFINITE);
+ } while (f->load() == val);
}
#endif
diff --git a/src/types.cpp b/src/types.cpp
index 5bddfc79e..69c1ebe68 100644
--- a/src/types.cpp
+++ b/src/types.cpp
@@ -748,6 +748,7 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path);
// IMPORTANT TODO(bill): SHould this TypePath code be removed since type cycle checking is handled much earlier on?
struct TypePath {
+ RecursiveMutex mutex;
Array<Entity *> path; // Entity_TypeName;
bool failure;
};
@@ -758,7 +759,9 @@ gb_internal void type_path_init(TypePath *tp) {
}
gb_internal void type_path_free(TypePath *tp) {
+ mutex_lock(&tp->mutex);
array_free(&tp->path);
+ mutex_unlock(&tp->mutex);
}
gb_internal void type_path_print_illegal_cycle(TypePath *tp, isize start_index) {
@@ -787,6 +790,8 @@ gb_internal bool type_path_push(TypePath *tp, Type *t) {
}
Entity *e = t->Named.type_name;
+ mutex_lock(&tp->mutex);
+
for (isize i = 0; i < tp->path.count; i++) {
Entity *p = tp->path[i];
if (p == e) {
@@ -795,12 +800,19 @@ gb_internal bool type_path_push(TypePath *tp, Type *t) {
}
array_add(&tp->path, e);
+
+ mutex_unlock(&tp->mutex);
+
return true;
}
gb_internal void type_path_pop(TypePath *tp) {
- if (tp != nullptr && tp->path.count > 0) {
- array_pop(&tp->path);
+ if (tp != nullptr) {
+ mutex_lock(&tp->mutex);
+ if (tp->path.count > 0) {
+ array_pop(&tp->path);
+ }
+ mutex_unlock(&tp->mutex);
}
}
@@ -808,10 +820,6 @@ gb_internal void type_path_pop(TypePath *tp) {
#define FAILURE_SIZE 0
#define FAILURE_ALIGNMENT 0
-gb_internal void init_type_mutex(void) {
- mutex_init(&g_type_mutex);
-}
-
gb_internal bool type_ptr_set_update(PtrSet<Type *> *s, Type *t) {
if (ptr_set_exists(s, t)) {
return true;
@@ -827,8 +835,7 @@ gb_internal bool type_ptr_set_exists(PtrSet<Type *> *s, Type *t) {
// TODO(bill, 2019-10-05): This is very slow and it's probably a lot
// faster to cache types correctly
- for (auto const &entry : *s) {
- Type *f = entry.ptr;
+ for (Type *f : *s) {
if (are_types_identical(t, f)) {
ptr_set_add(s, t);
return true;
@@ -2521,9 +2528,58 @@ gb_internal bool lookup_subtype_polymorphic_selection(Type *dst, Type *src, Sele
gb_internal bool are_types_identical_internal(Type *x, Type *y, bool check_tuple_names);
gb_internal bool are_types_identical(Type *x, Type *y) {
+ if (x == y) {
+ return true;
+ }
+
+ if ((x == nullptr && y != nullptr) ||
+ (x != nullptr && y == nullptr)) {
+ return false;
+ }
+
+ if (x->kind == Type_Named) {
+ Entity *e = x->Named.type_name;
+ if (e->TypeName.is_type_alias) {
+ x = x->Named.base;
+ }
+ }
+ if (y->kind == Type_Named) {
+ Entity *e = y->Named.type_name;
+ if (e->TypeName.is_type_alias) {
+ y = y->Named.base;
+ }
+ }
+ if (x->kind != y->kind) {
+ return false;
+ }
+
return are_types_identical_internal(x, y, false);
}
gb_internal bool are_types_identical_unique_tuples(Type *x, Type *y) {
+ if (x == y) {
+ return true;
+ }
+
+ if (!x | !y) {
+ return false;
+ }
+
+ if (x->kind == Type_Named) {
+ Entity *e = x->Named.type_name;
+ if (e->TypeName.is_type_alias) {
+ x = x->Named.base;
+ }
+ }
+ if (y->kind == Type_Named) {
+ Entity *e = y->Named.type_name;
+ if (e->TypeName.is_type_alias) {
+ y = y->Named.base;
+ }
+ }
+ if (x->kind != y->kind) {
+ return false;
+ }
+
return are_types_identical_internal(x, y, true);
}
@@ -2533,26 +2589,27 @@ gb_internal bool are_types_identical_internal(Type *x, Type *y, bool check_tuple
return true;
}
- if ((x == nullptr && y != nullptr) ||
- (x != nullptr && y == nullptr)) {
+ if (!x | !y) {
return false;
}
+ #if 0
if (x->kind == Type_Named) {
Entity *e = x->Named.type_name;
- if (e != nullptr && e->kind == Entity_TypeName && e->TypeName.is_type_alias) {
+ if (e->TypeName.is_type_alias) {
x = x->Named.base;
}
}
if (y->kind == Type_Named) {
Entity *e = y->Named.type_name;
- if (e != nullptr && e->kind == Entity_TypeName && e->TypeName.is_type_alias) {
+ if (e->TypeName.is_type_alias) {
y = y->Named.base;
}
}
if (x->kind != y->kind) {
return false;
}
+ #endif
switch (x->kind) {
case Type_Generic:
@@ -3350,35 +3407,55 @@ gb_internal i64 type_size_of(Type *t) {
if (t == nullptr) {
return 0;
}
- // NOTE(bill): Always calculate the size when it is a Type_Basic
- if (t->kind == Type_Named && t->cached_size >= 0) {
+ i64 size = -1;
+ if (t->kind == Type_Basic) {
+ GB_ASSERT_MSG(is_type_typed(t), "%s", type_to_string(t));
+ switch (t->Basic.kind) {
+ case Basic_string: size = 2*build_context.word_size; break;
+ case Basic_cstring: size = build_context.word_size; break;
+ case Basic_any: size = 2*build_context.word_size; break;
+ case Basic_typeid: size = build_context.word_size; break;
- } else if (t->kind != Type_Basic && t->cached_size >= 0) {
- return t->cached_size;
+ case Basic_int: case Basic_uint: case Basic_uintptr: case Basic_rawptr:
+ size = build_context.word_size;
+ break;
+ default:
+ size = t->Basic.size;
+ break;
+ }
+ t->cached_size.store(size);
+ return size;
+ } else if (t->kind != Type_Named && t->cached_size >= 0) {
+ return t->cached_size.load();
+ } else {
+ TypePath path{};
+ type_path_init(&path);
+ {
+ MUTEX_GUARD(&g_type_mutex);
+ size = type_size_of_internal(t, &path);
+ t->cached_size.store(size);
+ }
+ type_path_free(&path);
+ return size;
}
- TypePath path = {0};
- type_path_init(&path);
- t->cached_size = type_size_of_internal(t, &path);
- type_path_free(&path);
- return t->cached_size;
}
gb_internal i64 type_align_of(Type *t) {
if (t == nullptr) {
return 1;
}
- // NOTE(bill): Always calculate the size when it is a Type_Basic
- if (t->kind == Type_Named && t->cached_align >= 0) {
-
- } if (t->kind != Type_Basic && t->cached_align > 0) {
- return t->cached_align;
+ if (t->kind != Type_Named && t->cached_align > 0) {
+ return t->cached_align.load();
}
- TypePath path = {0};
+ TypePath path{};
type_path_init(&path);
- t->cached_align = type_align_of_internal(t, &path);
+ {
+ MUTEX_GUARD(&g_type_mutex);
+ t->cached_align.store(type_align_of_internal(t, &path));
+ }
type_path_free(&path);
- return t->cached_align;
+ return t->cached_align.load();
}
@@ -3387,8 +3464,6 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) {
if (t->failure) {
return FAILURE_ALIGNMENT;
}
- mutex_lock(&g_type_mutex);
- defer (mutex_unlock(&g_type_mutex));
t = base_type(t);
@@ -3485,39 +3560,25 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) {
if (t->Struct.custom_align > 0) {
return gb_max(t->Struct.custom_align, 1);
}
- if (t->Struct.is_raw_union) {
- i64 max = 1;
- for_array(i, t->Struct.fields) {
- Type *field_type = t->Struct.fields[i]->type;
- bool pop = type_path_push(path, field_type);
- if (path->failure) {
- return FAILURE_ALIGNMENT;
- }
- i64 align = type_align_of_internal(field_type, path);
- if (pop) type_path_pop(path);
- if (max < align) {
- max = align;
- }
- }
- return max;
- } else if (t->Struct.fields.count > 0) {
- i64 max = 1;
- // NOTE(bill): Check the fields to check for cyclic definitions
- for_array(i, t->Struct.fields) {
- Type *field_type = t->Struct.fields[i]->type;
- bool pop = type_path_push(path, field_type);
- if (path->failure) return FAILURE_ALIGNMENT;
- i64 align = type_align_of_internal(field_type, path);
- if (pop) type_path_pop(path);
- if (max < align) {
- max = align;
- }
+
+ if (t->Struct.is_packed) {
+ return 1;
+ }
+
+ i64 max = 1;
+ for_array(i, t->Struct.fields) {
+ Type *field_type = t->Struct.fields[i]->type;
+ bool pop = type_path_push(path, field_type);
+ if (path->failure) {
+ return FAILURE_ALIGNMENT;
}
- if (t->Struct.is_packed) {
- return 1;
+ i64 align = type_align_of_internal(field_type, path);
+ if (pop) type_path_pop(path);
+ if (max < align) {
+ max = align;
}
- return max;
}
+ return max;
} break;
case Type_BitSet: {
@@ -3583,8 +3644,7 @@ gb_internal i64 *type_set_offsets_of(Slice<Entity *> const &fields, bool is_pack
}
gb_internal bool type_set_offsets(Type *t) {
- mutex_lock(&g_type_mutex);
- defer (mutex_unlock(&g_type_mutex));
+ MUTEX_GUARD(&g_type_mutex); // TODO(bill): only per struct
t = base_type(t);
if (t->kind == Type_Struct) {
@@ -3613,9 +3673,6 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) {
if (t->failure) {
return FAILURE_SIZE;
}
- mutex_lock(&g_type_mutex);
- defer (mutex_unlock(&g_type_mutex));
-
switch (t->kind) {
case Type_Named: {