aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJesse Meyer <jesse.r.meyer@me.com>2026-02-03 20:52:52 -0500
committerGitHub <noreply@github.com>2026-02-03 20:52:52 -0500
commitb8276065f9296754d1e76e25d6661b7b5567e3e1 (patch)
tree7b8783d43193c16e4ef393a175fede50a8fe52dd /src
parentbd6148dd6b77920cf64fea8804b205e8257e8a66 (diff)
parent270df36468df8f89e1ac944205272469142c7a65 (diff)
Merge branch 'master' into lto-support
Diffstat (limited to 'src')
-rw-r--r--src/build_settings.cpp29
-rw-r--r--src/check_builtin.cpp27
-rw-r--r--src/check_decl.cpp8
-rw-r--r--src/checker.cpp40
-rw-r--r--src/checker.hpp14
-rw-r--r--src/checker_builtin_procs.hpp4
-rw-r--r--src/entity.cpp8
-rw-r--r--src/exact_value.cpp5
-rw-r--r--src/linker.cpp6
-rw-r--r--src/llvm_backend.hpp1
-rw-r--r--src/llvm_backend_const.cpp3
-rw-r--r--src/llvm_backend_debug.cpp21
-rw-r--r--src/llvm_backend_expr.cpp2
-rw-r--r--src/llvm_backend_proc.cpp18
-rw-r--r--src/llvm_backend_stmt.cpp26
-rw-r--r--src/llvm_backend_utility.cpp14
-rw-r--r--src/name_canonicalization.cpp4
-rw-r--r--src/parser.hpp22
-rw-r--r--src/thread_pool.cpp14
-rw-r--r--src/threading.cpp4
-rw-r--r--src/types.cpp34
21 files changed, 222 insertions, 82 deletions
diff --git a/src/build_settings.cpp b/src/build_settings.cpp
index e0017baea..a7928721a 100644
--- a/src/build_settings.cpp
+++ b/src/build_settings.cpp
@@ -1728,6 +1728,29 @@ gb_internal char *token_pos_to_string(TokenPos const &pos) {
return s;
}
+gb_internal String normalize_minimum_os_version_string(String version) {
+ GB_ASSERT(version.len > 0);
+
+ gbString normalized = gb_string_make(permanent_allocator(), "");
+
+ int granularity = 0;
+ String_Iterator it = {version, 0};
+ for (;; granularity++) {
+ String str = string_split_iterator(&it, '.');
+ if (str == "") break;
+ if (granularity > 0) {
+ normalized = gb_string_appendc(normalized, ".");
+ }
+ normalized = gb_string_append_length(normalized, str.text, str.len);
+ }
+
+ for (; granularity < 3; granularity++) {
+ normalized = gb_string_appendc(normalized, ".0");
+ }
+
+ return make_string_c(normalized);
+}
+
gb_internal void init_build_context(TargetMetrics *cross_target, Subtarget subtarget) {
BuildContext *bc = &build_context;
@@ -1861,7 +1884,7 @@ gb_internal void init_build_context(TargetMetrics *cross_target, Subtarget subta
if (bc->disable_red_zone) {
if (is_arch_wasm() && bc->metrics.os == TargetOs_freestanding) {
- gb_printf_err("-disable-red-zone is not support for this target");
+ gb_printf_err("-disable-red-zone is not supported on this target");
gb_exit(1);
}
}
@@ -1997,10 +2020,12 @@ gb_internal void init_build_context(TargetMetrics *cross_target, Subtarget subta
} else if (subtarget == Subtarget_iPhone || subtarget == Subtarget_iPhoneSimulator) {
// NOTE(harold): We default to 17.4 on iOS because that's when os_sync_wait_on_address was added and
// we'd like to avoid any potential App Store issues by using the private ulock_* there.
- bc->minimum_os_version_string = str_lit("17.4");
+ bc->minimum_os_version_string = str_lit("17.4.0");
}
}
+ bc->minimum_os_version_string = normalize_minimum_os_version_string(bc->minimum_os_version_string);
+
if (subtarget == Subtarget_iPhoneSimulator) {
// For the iPhoneSimulator subtarget, the version must be between 'ios' and '-simulator'.
String suffix = str_lit("-simulator");
diff --git a/src/check_builtin.cpp b/src/check_builtin.cpp
index e732d8ec3..1a094c1f0 100644
--- a/src/check_builtin.cpp
+++ b/src/check_builtin.cpp
@@ -534,12 +534,12 @@ gb_internal bool check_builtin_objc_procedure(CheckerContext *c, Operand *operan
return false;
}
- if (ident.entity->kind != Entity_Procedure) {
+ if (ident.entity.load()->kind != Entity_Procedure) {
gbString e = expr_to_string(handler_node);
ERROR_BLOCK();
error(handler.expr, "'%.*s' expected a direct reference to a procedure", LIT(builtin_name));
- if(ident.entity->kind == Entity_Variable) {
+ if(ident.entity.load()->kind == Entity_Variable) {
error_line("\tSuggestion: Variables referencing a procedure are not allowed, they are not a direct procedure reference.");
} else {
error_line("\tSuggestion: Ensure '%s' is not a runtime-evaluated expression.", e); // NOTE(harold): Is this case possible to hit?
@@ -5204,6 +5204,8 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As
case BuiltinProc_count_zeros:
case BuiltinProc_count_trailing_zeros:
case BuiltinProc_count_leading_zeros:
+ case BuiltinProc_count_trailing_ones:
+ case BuiltinProc_count_leading_ones:
case BuiltinProc_reverse_bits:
{
Operand x = {};
@@ -5301,6 +5303,27 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As
v += 1;
}
break;
+
+ case BuiltinProc_count_trailing_ones:
+ for (u64 i = 0; i < bit_size; i++) {
+ u8 b = cast(u8)(i & 7);
+ u8 j = cast(u8)(i >> 3);
+ if ((rop[j] & (1 << b)) == 0) {
+ break;
+ }
+ v += 1;
+ }
+ break;
+ case BuiltinProc_count_leading_ones:
+ for (u64 i = bit_size-1; i < bit_size; i--) {
+ u8 b = cast(u8)(i & 7);
+ u8 j = cast(u8)(i >> 3);
+ if ((rop[j] & (1 << b)) == 0) {
+ break;
+ }
+ v += 1;
+ }
+ break;
}
diff --git a/src/check_decl.cpp b/src/check_decl.cpp
index 8019d00c3..22a74f370 100644
--- a/src/check_decl.cpp
+++ b/src/check_decl.cpp
@@ -2155,7 +2155,7 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de
rw_mutex_unlock(&ctx->scope->mutex);
- bool where_clause_ok = evaluate_where_clauses(ctx, nullptr, decl->scope, &decl->proc_lit->ProcLit.where_clauses, !decl->where_clauses_evaluated);
+ bool where_clause_ok = evaluate_where_clauses(ctx, nullptr, decl->scope, &decl->proc_lit->ProcLit.where_clauses, !decl->where_clauses_evaluated.load(std::memory_order_relaxed));
if (!where_clause_ok) {
// NOTE(bill, 2019-08-31): Don't check the body as the where clauses failed
return false;
@@ -2173,15 +2173,15 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de
}
GB_ASSERT(decl->proc_checked_state != ProcCheckedState_Checked);
- if (decl->defer_use_checked) {
+ if (decl->defer_use_checked.load(std::memory_order_relaxed)) {
GB_ASSERT(is_type_polymorphic(type, true));
error(token, "Defer Use Checked: %.*s", LIT(decl->entity.load()->token.string));
- GB_ASSERT(decl->defer_use_checked == false);
+ GB_ASSERT(decl->defer_use_checked.load(std::memory_order_relaxed) == false);
}
check_stmt_list(ctx, bs->stmts, Stmt_CheckScopeDecls);
- decl->defer_use_checked = true;
+ decl->defer_use_checked.store(true, std::memory_order_relaxed);
for (Ast *stmt : bs->stmts) {
if (stmt->kind == Ast_ValueDecl) {
diff --git a/src/checker.cpp b/src/checker.cpp
index 453f3e241..71ccfebc4 100644
--- a/src/checker.cpp
+++ b/src/checker.cpp
@@ -495,15 +495,18 @@ gb_internal Entity *scope_insert_with_name(Scope *s, String const &name, Entity
goto end;
}
if (s->parent != nullptr && (s->parent->flags & ScopeFlag_Proc) != 0) {
+ rw_mutex_shared_lock(&s->parent->mutex);
found = string_map_get(&s->parent->elements, key);
if (found) {
if ((*found)->flags & EntityFlag_Result) {
if (entity != *found) {
result = *found;
}
+ rw_mutex_shared_unlock(&s->parent->mutex);
goto end;
}
}
+ rw_mutex_shared_unlock(&s->parent->mutex);
}
string_map_set(&s->elements, key, entity);
@@ -1792,7 +1795,22 @@ gb_internal void add_untyped(CheckerContext *c, Ast *expr, AddressingMode mode,
check_set_expr_info(c, expr, mode, type, value);
}
-gb_internal void add_type_and_value(CheckerContext *ctx, Ast *expr, AddressingMode mode, Type *type, ExactValue const &value, bool use_mutex) {
+struct alignas(GB_CACHE_LINE_SIZE) TypeAndValueMutexStripes {
+ BlockingMutex mutex;
+ u8 padding[GB_CACHE_LINE_SIZE - gb_size_of(BlockingMutex)];
+};
+
+enum { TypeAndValueMutexStripes_COUNT = 128 };
+gb_global TypeAndValueMutexStripes tav_mutex_stripes[TypeAndValueMutexStripes_COUNT];
+
+gb_internal BlockingMutex *tav_mutex_for_node(Ast *node) {
+ GB_ASSERT(node != nullptr);
+ uintptr h = cast(uintptr)node;
+ h ^= h >> 6;
+ return &tav_mutex_stripes[h % TypeAndValueMutexStripes_COUNT].mutex;
+}
+
+gb_internal void add_type_and_value(CheckerContext *ctx, Ast *expr, AddressingMode mode, Type *type, ExactValue const &value) {
if (expr == nullptr) {
return;
}
@@ -1803,14 +1821,18 @@ gb_internal void add_type_and_value(CheckerContext *ctx, Ast *expr, AddressingMo
return;
}
- BlockingMutex *mutex = &ctx->info->type_and_value_mutex;
- if (ctx->decl) {
- mutex = &ctx->decl->type_and_value_mutex;
- } else if (ctx->pkg) {
- mutex = &ctx->pkg->type_and_value_mutex;
- }
+ BlockingMutex *mutex = tav_mutex_for_node(expr);
+
+ /* Previous logic:
+ BlockingMutex *mutex = &ctx->info->type_and_value_mutex;
+ if (ctx->decl) {
+ mutex = &ctx->decl->type_and_value_mutex;
+ } else if (ctx->pkg) {
+ mutex = &ctx->pkg->type_and_value_mutex;
+ }
+ */
- if (use_mutex) mutex_lock(mutex);
+ mutex_lock(mutex);
Ast *prev_expr = nullptr;
while (prev_expr != expr) {
prev_expr = expr;
@@ -1835,7 +1857,7 @@ gb_internal void add_type_and_value(CheckerContext *ctx, Ast *expr, AddressingMo
break;
};
}
- if (use_mutex) mutex_unlock(mutex);
+ mutex_unlock(mutex);
}
gb_internal void add_entity_definition(CheckerInfo *i, Ast *identifier, Entity *entity) {
diff --git a/src/checker.hpp b/src/checker.hpp
index f9c279a51..374aaf10d 100644
--- a/src/checker.hpp
+++ b/src/checker.hpp
@@ -221,14 +221,14 @@ struct DeclInfo {
Entity * para_poly_original;
- bool is_using;
- bool where_clauses_evaluated;
- bool foreign_require_results;
+ bool is_using;
+ bool foreign_require_results;
+ std::atomic<bool> where_clauses_evaluated;
std::atomic<ProcCheckedState> proc_checked_state;
- BlockingMutex proc_checked_mutex;
- isize defer_used;
- bool defer_use_checked;
+ BlockingMutex proc_checked_mutex;
+ isize defer_used;
+ std::atomic<bool> defer_use_checked;
CommentGroup *comment;
CommentGroup *docs;
@@ -631,7 +631,7 @@ gb_internal void scope_lookup_parent (Scope *s, String const &name, Scope **s
gb_internal Entity *scope_insert (Scope *s, Entity *entity);
-gb_internal void add_type_and_value (CheckerContext *c, Ast *expression, AddressingMode mode, Type *type, ExactValue const &value, bool use_mutex=true);
+gb_internal void add_type_and_value (CheckerContext *c, Ast *expression, AddressingMode mode, Type *type, ExactValue const &value);
gb_internal ExprInfo *check_get_expr_info (CheckerContext *c, Ast *expr);
gb_internal void add_untyped (CheckerContext *c, Ast *expression, AddressingMode mode, Type *basic_type, ExactValue const &value);
gb_internal void add_entity_use (CheckerContext *c, Ast *identifier, Entity *entity);
diff --git a/src/checker_builtin_procs.hpp b/src/checker_builtin_procs.hpp
index 5b446cc1c..a13ffc3cd 100644
--- a/src/checker_builtin_procs.hpp
+++ b/src/checker_builtin_procs.hpp
@@ -74,6 +74,8 @@ enum BuiltinProcId {
BuiltinProc_count_zeros,
BuiltinProc_count_trailing_zeros,
BuiltinProc_count_leading_zeros,
+ BuiltinProc_count_trailing_ones,
+ BuiltinProc_count_leading_ones,
BuiltinProc_reverse_bits,
BuiltinProc_byte_swap,
@@ -453,6 +455,8 @@ gb_global BuiltinProc builtin_procs[BuiltinProc_COUNT] = {
{STR_LIT("count_zeros"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
{STR_LIT("count_trailing_zeros"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
{STR_LIT("count_leading_zeros"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
+ {STR_LIT("count_trailing_ones"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
+ {STR_LIT("count_leading_ones"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
{STR_LIT("reverse_bits"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
{STR_LIT("byte_swap"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
diff --git a/src/entity.cpp b/src/entity.cpp
index 55aca8069..070b05462 100644
--- a/src/entity.cpp
+++ b/src/entity.cpp
@@ -170,7 +170,7 @@ struct Entity {
Type * type;
std::atomic<Ast *> identifier; // Can be nullptr
DeclInfo * decl_info;
- DeclInfo * parent_proc_decl; // nullptr if in file/global scope
+ std::atomic<DeclInfo *> parent_proc_decl; // nullptr if in file/global scope
AstFile * file;
AstPackage *pkg;
@@ -181,11 +181,11 @@ struct Entity {
Entity * aliased_of;
union {
- struct lbModule *code_gen_module;
+ std::atomic<struct lbModule *> code_gen_module;
struct cgModule *cg_module;
};
union {
- struct lbProcedure *code_gen_procedure;
+ std::atomic<struct lbProcedure *> code_gen_procedure;
struct cgProcedure *cg_procedure;
};
@@ -370,7 +370,7 @@ gb_internal Entity *alloc_entity_using_variable(Entity *parent, Token token, Typ
token.pos = parent->token.pos;
Entity *entity = alloc_entity(Entity_Variable, parent->scope, token, type);
entity->using_parent = parent;
- entity->parent_proc_decl = parent->parent_proc_decl;
+ entity->parent_proc_decl.store(parent->parent_proc_decl, std::memory_order_relaxed);
entity->using_expr = using_expr;
entity->flags |= EntityFlag_Using;
entity->flags |= EntityFlag_Used;
diff --git a/src/exact_value.cpp b/src/exact_value.cpp
index 0f425e043..fa99ed3fe 100644
--- a/src/exact_value.cpp
+++ b/src/exact_value.cpp
@@ -1,8 +1,6 @@
#include <math.h>
#include <stdlib.h>
-gb_global BlockingMutex hash_exact_value_mutex;
-
struct Ast;
struct HashKey;
struct Type;
@@ -54,9 +52,6 @@ struct ExactValue {
gb_global ExactValue const empty_exact_value = {};
gb_internal uintptr hash_exact_value(ExactValue v) {
- mutex_lock(&hash_exact_value_mutex);
- defer (mutex_unlock(&hash_exact_value_mutex));
-
uintptr res = 0;
switch (v.kind) {
diff --git a/src/linker.cpp b/src/linker.cpp
index 7969d776e..e48486d9a 100644
--- a/src/linker.cpp
+++ b/src/linker.cpp
@@ -181,7 +181,7 @@ try_cross_linking:;
case Linker_radlink: section_name = str_lit("rad-link"); break;
#endif
default:
- gb_printf_err("'%.*s' linker is not support for this platform\n", LIT(linker_choices[build_context.linker_choice]));
+ gb_printf_err("'%.*s' linker is not supported on this platform\n", LIT(linker_choices[build_context.linker_choice]));
return 1;
}
@@ -978,6 +978,10 @@ try_cross_linking:;
if (build_context.lto_kind != LTO_None) {
link_command_line = gb_string_appendc(link_command_line, " -flto=thin");
link_command_line = gb_string_append_fmt(link_command_line, " -flto-jobs=%d ", build_context.thread_count);
+
+ if (is_osx && !build_context.minimum_os_version_string_given) {
+ link_command_line = gb_string_appendc(link_command_line, " -Wno-override-module ");
+ }
}
link_command_line = gb_string_appendc(link_command_line, object_files);
diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp
index 3491c0d39..a8076d75e 100644
--- a/src/llvm_backend.hpp
+++ b/src/llvm_backend.hpp
@@ -290,6 +290,7 @@ struct lbDefer {
isize scope_index;
isize context_stack_count;
lbBlock * block;
+ TokenPos pos;
union {
Ast *stmt;
struct {
diff --git a/src/llvm_backend_const.cpp b/src/llvm_backend_const.cpp
index 8ce2137ab..57c07f5c9 100644
--- a/src/llvm_backend_const.cpp
+++ b/src/llvm_backend_const.cpp
@@ -736,13 +736,14 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, lb
}
LLVMValueRef tag = LLVMConstInt(LLVMStructGetTypeAtIndex(llvm_type, 1), tag_value, false);
LLVMValueRef padding = nullptr;
- LLVMValueRef values[3] = {cv.value, tag, padding};
isize value_count = 2;
if (LLVMCountStructElementTypes(llvm_type) > 2) {
value_count = 3;
padding = LLVMConstNull(LLVMStructGetTypeAtIndex(llvm_type, 2));
}
+
+ LLVMValueRef values[3] = {cv.value, tag, padding};
res.value = llvm_const_named_struct_internal(m, llvm_type, values, value_count);
res.type = original_type;
return res;
diff --git a/src/llvm_backend_debug.cpp b/src/llvm_backend_debug.cpp
index 187aebf7c..e9b0f72cb 100644
--- a/src/llvm_backend_debug.cpp
+++ b/src/llvm_backend_debug.cpp
@@ -729,22 +729,22 @@ gb_internal LLVMMetadataRef lb_debug_type_internal(lbModule *m, Type *type) {
case Basic_i128be: return lb_debug_type_basic_type(m, str_lit("i128be"), 128, LLVMDWARFTypeEncoding_Signed, LLVMDIFlagBigEndian);
case Basic_u128be: return lb_debug_type_basic_type(m, str_lit("u128be"), 128, LLVMDWARFTypeEncoding_Unsigned, LLVMDIFlagBigEndian);
- case Basic_f16be: return lb_debug_type_basic_type(m, str_lit("f16be"), 16, LLVMDWARFTypeEncoding_Float, LLVMDIFlagLittleEndian);
- case Basic_f32be: return lb_debug_type_basic_type(m, str_lit("f32be"), 32, LLVMDWARFTypeEncoding_Float, LLVMDIFlagLittleEndian);
- case Basic_f64be: return lb_debug_type_basic_type(m, str_lit("f64be"), 64, LLVMDWARFTypeEncoding_Float, LLVMDIFlagLittleEndian);
+ case Basic_f16be: return lb_debug_type_basic_type(m, str_lit("f16be"), 16, LLVMDWARFTypeEncoding_Float, LLVMDIFlagBigEndian);
+ case Basic_f32be: return lb_debug_type_basic_type(m, str_lit("f32be"), 32, LLVMDWARFTypeEncoding_Float, LLVMDIFlagBigEndian);
+ case Basic_f64be: return lb_debug_type_basic_type(m, str_lit("f64be"), 64, LLVMDWARFTypeEncoding_Float, LLVMDIFlagBigEndian);
case Basic_complex32:
{
LLVMMetadataRef elements[2] = {};
elements[0] = lb_debug_struct_field(m, str_lit("real"), t_f16, 0*16);
elements[1] = lb_debug_struct_field(m, str_lit("imag"), t_f16, 1*16);
- return lb_debug_basic_struct(m, str_lit("complex32"), 64, 32, elements, gb_count_of(elements));
+ return lb_debug_basic_struct(m, str_lit("complex32"), 32, 16, elements, gb_count_of(elements));
}
case Basic_complex64:
{
LLVMMetadataRef elements[2] = {};
elements[0] = lb_debug_struct_field(m, str_lit("real"), t_f32, 0*32);
- elements[1] = lb_debug_struct_field(m, str_lit("imag"), t_f32, 2*32);
+ elements[1] = lb_debug_struct_field(m, str_lit("imag"), t_f32, 1*32);
return lb_debug_basic_struct(m, str_lit("complex64"), 64, 32, elements, gb_count_of(elements));
}
case Basic_complex128:
@@ -762,7 +762,7 @@ gb_internal LLVMMetadataRef lb_debug_type_internal(lbModule *m, Type *type) {
elements[1] = lb_debug_struct_field(m, str_lit("jmag"), t_f16, 1*16);
elements[2] = lb_debug_struct_field(m, str_lit("kmag"), t_f16, 2*16);
elements[3] = lb_debug_struct_field(m, str_lit("real"), t_f16, 3*16);
- return lb_debug_basic_struct(m, str_lit("quaternion64"), 128, 32, elements, gb_count_of(elements));
+ return lb_debug_basic_struct(m, str_lit("quaternion64"), 64, 16, elements, gb_count_of(elements));
}
case Basic_quaternion128:
{
@@ -780,7 +780,7 @@ gb_internal LLVMMetadataRef lb_debug_type_internal(lbModule *m, Type *type) {
elements[1] = lb_debug_struct_field(m, str_lit("jmag"), t_f64, 1*64);
elements[2] = lb_debug_struct_field(m, str_lit("kmag"), t_f64, 2*64);
elements[3] = lb_debug_struct_field(m, str_lit("real"), t_f64, 3*64);
- return lb_debug_basic_struct(m, str_lit("quaternion256"), 256, 32, elements, gb_count_of(elements));
+ return lb_debug_basic_struct(m, str_lit("quaternion256"), 256, 64, elements, gb_count_of(elements));
}
@@ -792,6 +792,8 @@ gb_internal LLVMMetadataRef lb_debug_type_internal(lbModule *m, Type *type) {
}
case Basic_string:
{
+ // NOTE(bill): size_of(^u8) <= size_of(int)
+
LLVMMetadataRef elements[2] = {};
elements[0] = lb_debug_struct_field(m, str_lit("data"), t_u8_ptr, 0);
elements[1] = lb_debug_struct_field(m, str_lit("len"), t_int, int_bits);
@@ -805,6 +807,8 @@ gb_internal LLVMMetadataRef lb_debug_type_internal(lbModule *m, Type *type) {
case Basic_string16:
{
+ // NOTE(bill): size_of(^u16) <= size_of(int)
+
LLVMMetadataRef elements[2] = {};
elements[0] = lb_debug_struct_field(m, str_lit("data"), t_u16_ptr, 0);
elements[1] = lb_debug_struct_field(m, str_lit("len"), t_int, int_bits);
@@ -820,7 +824,7 @@ gb_internal LLVMMetadataRef lb_debug_type_internal(lbModule *m, Type *type) {
{
LLVMMetadataRef elements[2] = {};
elements[0] = lb_debug_struct_field(m, str_lit("data"), t_rawptr, 0);
- elements[1] = lb_debug_struct_field(m, str_lit("id"), t_typeid, 64);
+ elements[1] = lb_debug_struct_field(m, str_lit("id"), t_typeid, 64); // typeid is always 64 bits in size and 64 bits in alignment
return lb_debug_basic_struct(m, str_lit("any"), 128, 64, elements, gb_count_of(elements));
}
@@ -843,6 +847,7 @@ gb_internal LLVMMetadataRef lb_debug_type_internal(lbModule *m, Type *type) {
GB_PANIC("Type_Named should be handled in lb_debug_type separately");
case Type_SoaPointer:
+ // TODO(bill): This is technically incorrect and needs fixing
return LLVMDIBuilderCreatePointerType(m->debug_builder, lb_debug_type(m, type->SoaPointer.elem), int_bits, int_bits, 0, nullptr, 0);
case Type_Pointer:
return LLVMDIBuilderCreatePointerType(m->debug_builder, lb_debug_type(m, type->Pointer.elem), ptr_bits, ptr_bits, 0, nullptr, 0);
diff --git a/src/llvm_backend_expr.cpp b/src/llvm_backend_expr.cpp
index aba196af8..d4acfa196 100644
--- a/src/llvm_backend_expr.cpp
+++ b/src/llvm_backend_expr.cpp
@@ -1377,6 +1377,8 @@ gb_internal LLVMValueRef lb_integer_modulo(lbProcedure *p, LLVMValueRef lhs, LLV
if (LLVMIsConstant(rhs)) {
if (LLVMIsNull(rhs)) {
switch (behaviour) {
+ case IntegerDivisionByZero_Trap:
+ break;
case IntegerDivisionByZero_Self:
return zero;
case IntegerDivisionByZero_Zero:
diff --git a/src/llvm_backend_proc.cpp b/src/llvm_backend_proc.cpp
index cb8ffcf91..7897e17cd 100644
--- a/src/llvm_backend_proc.cpp
+++ b/src/llvm_backend_proc.cpp
@@ -689,7 +689,7 @@ gb_internal void lb_begin_procedure_body(lbProcedure *p) {
lbAddr res = {};
if (p->entity && p->entity->decl_info &&
- p->entity->decl_info->defer_use_checked &&
+ p->entity->decl_info->defer_use_checked.load(std::memory_order_relaxed) &&
p->entity->decl_info->defer_used == 0) {
// NOTE(bill): this is a bodge to get around the issue of the problem BELOW
@@ -996,7 +996,9 @@ gb_internal lbValue lb_emit_call_internal(lbProcedure *p, lbValue value, lbValue
break;
case ProcTailing_must_tail:
LLVMSetTailCall(ret, true);
+ #if LLVM_VERSION_MAJOR > 14
LLVMSetTailCallKind(ret, LLVMTailCallKindMustTail);
+ #endif
break;
}
@@ -1301,7 +1303,7 @@ gb_internal lbValue lb_emit_call(lbProcedure *p, lbValue value, Array<lbValue> c
}
}
- lb_add_defer_proc(p, p->scope_index, deferred, result_as_args);
+ lb_add_defer_proc(p, p->scope_index, deferred, result_as_args, e->token.pos);
}
}
@@ -2228,8 +2230,10 @@ gb_internal lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValu
Entity *e = entity_of_node(ident);
GB_ASSERT(e != nullptr);
- if (e->parent_proc_decl != nullptr && e->parent_proc_decl->entity != nullptr) {
- procedure = e->parent_proc_decl->entity.load()->token.string;
+ DeclInfo *parent_proc_decl = e->parent_proc_decl.load(std::memory_order_relaxed);
+ if (parent_proc_decl != nullptr &&
+ parent_proc_decl->entity != nullptr) {
+ procedure = parent_proc_decl->entity.load()->token.string;
} else {
procedure = str_lit("");
}
@@ -2859,6 +2863,12 @@ gb_internal lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValu
case BuiltinProc_count_leading_zeros:
return lb_emit_count_leading_zeros(p, lb_build_expr(p, ce->args[0]), tv.type);
+ case BuiltinProc_count_trailing_ones:
+ return lb_emit_count_trailing_ones(p, lb_build_expr(p, ce->args[0]), tv.type);
+ case BuiltinProc_count_leading_ones:
+ return lb_emit_count_leading_ones(p, lb_build_expr(p, ce->args[0]), tv.type);
+
+
case BuiltinProc_count_ones:
return lb_emit_count_ones(p, lb_build_expr(p, ce->args[0]), tv.type);
case BuiltinProc_count_zeros:
diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp
index 81755af2d..05ec10cda 100644
--- a/src/llvm_backend_stmt.cpp
+++ b/src/llvm_backend_stmt.cpp
@@ -847,6 +847,10 @@ gb_internal void lb_build_range_interval(lbProcedure *p, AstBinaryExpr *node,
lb_close_scope(p, lbDeferExit_Default, nullptr, node->left);
lb_pop_target_list(p);
+ if (p->debug_info != nullptr) {
+ LLVMSetCurrentDebugLocation2(p->builder, lb_debug_end_location_from_ast(p, rs->body));
+ }
+
if (check != nullptr) {
lb_emit_jump(p, check);
lb_start_block(p, check);
@@ -979,6 +983,9 @@ gb_internal void lb_build_range_tuple(lbProcedure *p, AstRangeStmt *rs, Scope *s
lb_close_scope(p, lbDeferExit_Default, nullptr, rs->body);
lb_pop_target_list(p);
+ if (p->debug_info != nullptr) {
+ LLVMSetCurrentDebugLocation2(p->builder, lb_debug_end_location_from_ast(p, rs->body));
+ }
lb_emit_jump(p, loop);
lb_start_block(p, done);
}
@@ -1108,6 +1115,9 @@ gb_internal void lb_build_range_stmt_struct_soa(lbProcedure *p, AstRangeStmt *rs
lb_close_scope(p, lbDeferExit_Default, nullptr, rs->body);
lb_pop_target_list(p);
+ if (p->debug_info != nullptr) {
+ LLVMSetCurrentDebugLocation2(p->builder, lb_debug_end_location_from_ast(p, rs->body));
+ }
lb_emit_jump(p, loop);
lb_start_block(p, done);
@@ -1330,6 +1340,9 @@ gb_internal void lb_build_range_stmt(lbProcedure *p, AstRangeStmt *rs, Scope *sc
lb_close_scope(p, lbDeferExit_Default, nullptr, rs->body);
lb_pop_target_list(p);
+ if (p->debug_info != nullptr) {
+ LLVMSetCurrentDebugLocation2(p->builder, lb_debug_end_location_from_ast(p, rs->body));
+ }
lb_emit_jump(p, loop);
lb_start_block(p, done);
}
@@ -1807,6 +1820,10 @@ gb_internal void lb_build_switch_stmt(lbProcedure *p, AstSwitchStmt *ss, Scope *
}
lbBlock *next_cond = nullptr;
+ if (p->debug_info != nullptr) {
+ LLVMSetCurrentDebugLocation2(p->builder, lb_debug_end_location_from_ast(p, clause));
+ }
+
for (Ast *expr : cc->list) {
expr = unparen_expr(expr);
@@ -3164,6 +3181,9 @@ gb_internal void lb_build_defer_stmt(lbProcedure *p, lbDefer const &d) {
if (d.kind == lbDefer_Node) {
lb_build_stmt(p, d.stmt);
} else if (d.kind == lbDefer_Proc) {
+ if (p->debug_info != nullptr && d.pos.line > 0) {
+ LLVMSetCurrentDebugLocation2(p->builder, lb_debug_location_from_token_pos(p, d.pos));
+ }
lb_emit_call(p, d.proc.deferred, d.proc.result_as_args);
}
}
@@ -3240,10 +3260,11 @@ gb_internal void lb_add_defer_node(lbProcedure *p, isize scope_index, Ast *stmt)
d->scope_index = scope_index;
d->context_stack_count = p->context_stack.count;
d->block = p->curr_block;
- d->stmt = stmt;
+ d->pos = ast_token(stmt).pos;
+ d->stmt = stmt;
}
-gb_internal void lb_add_defer_proc(lbProcedure *p, isize scope_index, lbValue deferred, Array<lbValue> const &result_as_args) {
+gb_internal void lb_add_defer_proc(lbProcedure *p, isize scope_index, lbValue deferred, Array<lbValue> const &result_as_args, TokenPos pos) {
Type *pt = base_type(p->type);
GB_ASSERT(pt->kind == Type_Proc);
if (pt->Proc.calling_convention == ProcCC_Odin) {
@@ -3254,6 +3275,7 @@ gb_internal void lb_add_defer_proc(lbProcedure *p, isize scope_index, lbValue de
d->kind = lbDefer_Proc;
d->scope_index = p->scope_index;
d->block = p->curr_block;
+ d->pos = pos;
d->context_stack_count = p->context_stack.count;
d->proc.deferred = deferred;
d->proc.result_as_args = result_as_args;
diff --git a/src/llvm_backend_utility.cpp b/src/llvm_backend_utility.cpp
index 929239486..8a7bced59 100644
--- a/src/llvm_backend_utility.cpp
+++ b/src/llvm_backend_utility.cpp
@@ -648,6 +648,18 @@ gb_internal lbValue lb_emit_count_leading_zeros(lbProcedure *p, lbValue x, Type
return res;
}
+gb_internal lbValue lb_emit_unary_arith(lbProcedure *p, TokenKind op, lbValue x, Type *type);
+
+gb_internal lbValue lb_emit_count_trailing_ones(lbProcedure *p, lbValue x, Type *type) {
+ lbValue z = lb_emit_unary_arith(p, Token_Xor, x, type);
+ return lb_emit_count_trailing_zeros(p, z, type);
+}
+
+gb_internal lbValue lb_emit_count_leading_ones(lbProcedure *p, lbValue x, Type *type) {
+ lbValue z = lb_emit_unary_arith(p, Token_Xor, x, type);
+ return lb_emit_count_leading_zeros(p, z, type);
+}
+
gb_internal lbValue lb_emit_reverse_bits(lbProcedure *p, lbValue x, Type *type) {
@@ -2419,7 +2431,7 @@ gb_internal lbValue lb_handle_objc_block(lbProcedure *p, Ast *expr) {
Ast *proc_lit = unparen_expr(ce->args[capture_arg_count]);
if (proc_lit->kind == Ast_Ident) {
- proc_lit = proc_lit->Ident.entity->decl_info->proc_lit;
+ proc_lit = proc_lit->Ident.entity.load()->decl_info->proc_lit;
}
GB_ASSERT(proc_lit->kind == Ast_ProcLit);
diff --git a/src/name_canonicalization.cpp b/src/name_canonicalization.cpp
index f1dccb182..d3faefed7 100644
--- a/src/name_canonicalization.cpp
+++ b/src/name_canonicalization.cpp
@@ -559,8 +559,8 @@ gb_internal void write_canonical_parent_prefix(TypeWriter *w, Entity *e) {
// no prefix
return;
}
- if (e->parent_proc_decl) {
- Entity *p = e->parent_proc_decl->entity;
+ if (e->parent_proc_decl.load(std::memory_order_relaxed)) {
+ Entity *p = e->parent_proc_decl.load(std::memory_order_relaxed)->entity;
write_canonical_parent_prefix(w, p);
type_writer_append(w, p->token.string.text, p->token.string.len);
if (is_type_polymorphic(p->type)) {
diff --git a/src/parser.hpp b/src/parser.hpp
index 39f56ffae..1026433d0 100644
--- a/src/parser.hpp
+++ b/src/parser.hpp
@@ -425,7 +425,7 @@ struct AstSplitArgs {
#define AST_KINDS \
AST_KIND(Ident, "identifier", struct { \
Token token; \
- Entity *entity; \
+ std::atomic<Entity *> entity; \
u32 hash; \
}) \
AST_KIND(Implicit, "implicit", Token) \
@@ -856,19 +856,19 @@ gb_global isize const ast_variant_sizes[] = {
};
struct AstCommonStuff {
- AstKind kind; // u16
- u8 state_flags;
- u8 viral_state_flags;
- i32 file_id;
- TypeAndValue tav; // NOTE(bill): Making this a pointer is slower
+ AstKind kind; // u16
+ u8 state_flags;
+ std::atomic<u8> viral_state_flags;
+ i32 file_id;
+ TypeAndValue tav; // NOTE(bill): Making this a pointer is slower
};
struct Ast {
- AstKind kind; // u16
- u8 state_flags;
- u8 viral_state_flags;
- i32 file_id;
- TypeAndValue tav; // NOTE(bill): Making this a pointer is slower
+ AstKind kind; // u16
+ u8 state_flags;
+ std::atomic<u8> viral_state_flags;
+ i32 file_id;
+ TypeAndValue tav; // NOTE(bill): Making this a pointer is slower
// IMPORTANT NOTE(bill): This must be at the end since the AST is allocated to be size of the variant
union {
diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp
index ca6483fd9..a0afbd269 100644
--- a/src/thread_pool.cpp
+++ b/src/thread_pool.cpp
@@ -1,5 +1,15 @@
// thread_pool.cpp
+// TODO(bill): make work on MSVC
+// #if defined(__SANITIZE_THREAD__) || (defined(__has_feature) && __has_feature(thread_sanitizer))
+// #include <sanitizer/tsan_interface.h>
+// #define TSAN_RELEASE(addr) __tsan_release(addr)
+// #define TSAN_ACQUIRE(addr) __tsan_acquire(addr)
+// #else
+#define TSAN_RELEASE(addr)
+#define TSAN_ACQUIRE(addr)
+// #endif
+
struct WorkerTask;
struct ThreadPool;
@@ -88,6 +98,7 @@ void thread_pool_queue_push(Thread *thread, WorkerTask task) {
}
cur_ring->buffer[bot % cur_ring->size] = task;
+ TSAN_RELEASE(cur_ring->buffer[bot % cur_ring->size]);
std::atomic_thread_fence(std::memory_order_release);
thread->queue.bottom.store(bot + 1, std::memory_order_relaxed);
@@ -108,6 +119,7 @@ GrabState thread_pool_queue_take(Thread *thread, WorkerTask *task) {
if (top <= bot) {
// Queue is not empty
+ TSAN_ACQUIRE(cur_ring->buffer[bot % cur_ring->size]);
*task = cur_ring->buffer[bot % cur_ring->size];
if (top == bot) {
// Only one entry left in queue
@@ -139,6 +151,8 @@ GrabState thread_pool_queue_steal(Thread *thread, WorkerTask *task) {
if (top < bot) {
// Queue is not empty
TaskRingBuffer *cur_ring = thread->queue.ring.load(std::memory_order_consume);
+
+ TSAN_ACQUIRE(&cur_ring->buffer[top % cur_ring->size]);
*task = cur_ring->buffer[top % cur_ring->size];
if (!thread->queue.top.compare_exchange_strong(top, top + 1, std::memory_order_seq_cst, std::memory_order_relaxed)) {
diff --git a/src/threading.cpp b/src/threading.cpp
index 02e6de14b..d8ae321f5 100644
--- a/src/threading.cpp
+++ b/src/threading.cpp
@@ -375,8 +375,8 @@ gb_internal void semaphore_wait(Semaphore *s) {
}
gb_internal bool mutex_try_lock(BlockingMutex *m) {
ANNOTATE_LOCK_PRE(m, 1);
- i32 v = m->state().exchange(Internal_Mutex_State_Locked, std::memory_order_acquire);
- if (v == Internal_Mutex_State_Unlocked) {
+ i32 expected = Internal_Mutex_State_Unlocked;
+ if (m->state().compare_exchange_strong(expected, Internal_Mutex_State_Locked, std::memory_order_acquire)) {
ANNOTATE_LOCK_POST(m);
return true;
}
diff --git a/src/types.cpp b/src/types.cpp
index a7f2bfda2..0ecce1adc 100644
--- a/src/types.cpp
+++ b/src/types.cpp
@@ -170,21 +170,21 @@ struct TypeStruct {
};
struct TypeUnion {
- Slice<Type *> variants;
+ Slice<Type *> variants;
- Ast * node;
- Scope * scope;
+ Ast * node;
+ Scope * scope;
- i64 variant_block_size;
- i64 custom_align;
- Type * polymorphic_params; // Type_Tuple
- Type * polymorphic_parent;
- Wait_Signal polymorphic_wait_signal;
+ std::atomic<i64> variant_block_size;
+ i64 custom_align;
+ Type * polymorphic_params; // Type_Tuple
+ Type * polymorphic_parent;
+ Wait_Signal polymorphic_wait_signal;
- i16 tag_size;
- bool is_polymorphic;
- bool is_poly_specialized;
- UnionTypeKind kind;
+ std::atomic<i16> tag_size;
+ bool is_polymorphic;
+ bool is_poly_specialized;
+ UnionTypeKind kind;
};
struct TypeProc {
@@ -4308,17 +4308,17 @@ gb_internal i64 *type_set_offsets_of(Slice<Entity *> const &fields, bool is_pack
gb_internal bool type_set_offsets(Type *t) {
t = base_type(t);
if (t->kind == Type_Struct) {
- if (t->Struct.are_offsets_being_processed.load()) {
- return true;
- }
+ // if (t->Struct.are_offsets_being_processed.load()) {
+ // return true;
+ // }
MUTEX_GUARD(&t->Struct.offset_mutex);
if (!t->Struct.are_offsets_set) {
t->Struct.are_offsets_being_processed.store(true);
t->Struct.offsets = type_set_offsets_of(t->Struct.fields, t->Struct.is_packed, t->Struct.is_raw_union, t->Struct.custom_min_field_align, t->Struct.custom_max_field_align);
t->Struct.are_offsets_being_processed.store(false);
t->Struct.are_offsets_set = true;
- return true;
}
+ return true;
} else if (is_type_tuple(t)) {
MUTEX_GUARD(&t->Tuple.mutex);
if (!t->Tuple.are_offsets_set) {
@@ -4326,8 +4326,8 @@ gb_internal bool type_set_offsets(Type *t) {
t->Tuple.offsets = type_set_offsets_of(t->Tuple.variables, t->Tuple.is_packed, false, 1, 0);
t->Tuple.are_offsets_being_processed.store(false);
t->Tuple.are_offsets_set = true;
- return true;
}
+ return true;
} else {
GB_PANIC("Invalid type for setting offsets");
}