From f85db012b80f0ddbd33833c0aa617c8d5a6892cb Mon Sep 17 00:00:00 2001 From: Laytan Laats Date: Fri, 21 Mar 2025 22:53:00 +0100 Subject: fix off by one temp cstring and put objc names on permanent allocator to be safe Fixes #4922 --- src/llvm_backend_general.cpp | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'src/llvm_backend_general.cpp') diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp index 71d368ec9..b7f70893f 100644 --- a/src/llvm_backend_general.cpp +++ b/src/llvm_backend_general.cpp @@ -2812,15 +2812,11 @@ gb_internal lbAddr lb_add_global_generated_with_name(lbModule *m, Type *type, lb GB_ASSERT(type != nullptr); type = default_type(type); - u8 *str = cast(u8 *)gb_alloc_array(temporary_allocator(), u8, name.len); - memcpy(str, name.text, name.len); - str[name.len] = 0; - Scope *scope = nullptr; Entity *e = alloc_entity_variable(scope, make_token_ident(name), type); lbValue g = {}; g.type = alloc_type_pointer(type); - g.value = LLVMAddGlobal(m->mod, lb_type(m, type), cast(char const *)str); + g.value = LLVMAddGlobal(m->mod, lb_type(m, type), alloc_cstring(temporary_allocator(), name)); if (value.value != nullptr) { GB_ASSERT_MSG(LLVMIsConstant(value.value), LLVMPrintValueToString(value.value)); LLVMSetInitializer(g.value, value.value); -- cgit v1.2.3 From cd5bef4f610ec3ee32957cbca354ccbfef310921 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Thu, 3 Apr 2025 09:33:14 +0100 Subject: Rewrite objc SEL/Class register handling code --- src/llvm_backend.cpp | 45 ++++++++++++----- src/llvm_backend.hpp | 21 ++++---- src/llvm_backend_general.cpp | 2 + src/llvm_backend_utility.cpp | 118 ++++++++++++++++++------------------------- 4 files changed, 96 insertions(+), 90 deletions(-) (limited to 'src/llvm_backend_general.cpp') diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index 6f3abc607..396b94f98 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -1126,30 +1126,51 @@ gb_internal lbProcedure *lb_create_objc_names(lbModule *main_module) { return p; } -gb_internal void lb_finalize_objc_names(lbProcedure *p) { +gb_internal void lb_finalize_objc_names(lbGenerator *gen, lbProcedure *p) { if (p == nullptr) { return; } lbModule *m = p->module; + GB_ASSERT(m == &p->module->gen->default_module); TEMPORARY_ALLOCATOR_GUARD(); + StringSet handled = {}; + string_set_init(&handled); + defer (string_set_destroy(&handled)); + auto args = array_make(temporary_allocator(), 1); LLVMSetLinkage(p->value, LLVMInternalLinkage); lb_begin_procedure_body(p); - for (auto const &entry : m->objc_classes) { - String name = entry.key; - args[0] = lb_const_value(m, t_cstring, exact_value_string(name)); - lbValue ptr = lb_emit_runtime_call(p, "objc_lookUpClass", args); - lb_addr_store(p, entry.value.local_module_addr, ptr); + + auto register_thing = [&handled, &m, &args](lbProcedure *p, lbObjCGlobal const &g, char const *call) { + if (!string_set_update(&handled, g.name)) { + lbAddr addr = {}; + lbValue *found = string_map_get(&m->members, g.global_name); + if (found) { + addr = lb_addr(*found); + } else { + lbValue v = {}; + LLVMTypeRef t = lb_type(m, g.type); + v.value = LLVMAddGlobal(m->mod, t, g.global_name); + v.type = alloc_type_pointer(g.type); + addr = lb_addr(v); + LLVMSetInitializer(v.value, LLVMConstNull(t)); + } + + args[0] = lb_const_value(m, t_cstring, exact_value_string(g.name)); + lbValue ptr = lb_emit_runtime_call(p, call, args); + lb_addr_store(p, addr, ptr); + } + }; + + for (lbObjCGlobal g = {}; mpsc_dequeue(&gen->objc_classes, &g); /**/) { + register_thing(p, g, "objc_lookUpClass"); } - for (auto const &entry : m->objc_selectors) { - String name = entry.key; - args[0] = lb_const_value(m, t_cstring, exact_value_string(name)); - lbValue ptr = lb_emit_runtime_call(p, "sel_registerName", args); - lb_addr_store(p, entry.value.local_module_addr, ptr); + for (lbObjCGlobal g = {}; mpsc_dequeue(&gen->objc_selectors, &g); /**/) { + register_thing(p, g, "sel_registerName"); } lb_end_procedure_body(p); @@ -2637,7 +2658,7 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { if (gen->objc_names) { TIME_SECTION("Finalize objc names"); - lb_finalize_objc_names(gen->objc_names); + lb_finalize_objc_names(gen, gen->objc_names); } if (build_context.ODIN_DEBUG) { diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp index f9f96a906..3e01ada5f 100644 --- a/src/llvm_backend.hpp +++ b/src/llvm_backend.hpp @@ -143,11 +143,6 @@ struct lbPadType { LLVMTypeRef type; }; -struct lbObjcRef { - Entity * entity; - lbAddr local_module_addr; -}; - struct lbModule { LLVMModuleRef mod; LLVMContextRef ctx; @@ -199,11 +194,8 @@ struct lbModule { PtrMap debug_values; - RecursiveMutex objc_classes_mutex; - RecursiveMutex objc_selectors_mutex; - - StringMap objc_classes; - StringMap objc_selectors; + StringMap objc_classes; + StringMap objc_selectors; PtrMap map_cell_info_map; // address of runtime.Map_Info PtrMap map_info_map; // address of runtime.Map_Cell_Info @@ -222,6 +214,13 @@ struct lbEntityCorrection { char const *cname; }; +struct lbObjCGlobal { + lbModule *module; + gbString global_name; + String name; + Type * type; +}; + struct lbGenerator : LinkerData { CheckerInfo *info; @@ -239,6 +238,8 @@ struct lbGenerator : LinkerData { lbProcedure *objc_names; MPSCQueue entities_to_correct_linkage; + MPSCQueue objc_selectors; + MPSCQueue objc_classes; }; diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp index b7f70893f..ce2c70661 100644 --- a/src/llvm_backend_general.cpp +++ b/src/llvm_backend_general.cpp @@ -171,6 +171,8 @@ gb_internal bool lb_init_generator(lbGenerator *gen, Checker *c) { } mpsc_init(&gen->entities_to_correct_linkage, heap_allocator()); + mpsc_init(&gen->objc_selectors, heap_allocator()); + mpsc_init(&gen->objc_classes, heap_allocator()); return true; } diff --git a/src/llvm_backend_utility.cpp b/src/llvm_backend_utility.cpp index efe196e58..0decbcdb8 100644 --- a/src/llvm_backend_utility.cpp +++ b/src/llvm_backend_utility.cpp @@ -2094,48 +2094,71 @@ gb_internal void lb_set_wasm_export_attributes(LLVMValueRef value, String export } - gb_internal lbAddr lb_handle_objc_find_or_register_selector(lbProcedure *p, String const &name) { - mutex_lock(&p->module->objc_selectors_mutex); - defer (mutex_unlock(&p->module->objc_selectors_mutex)); - - lbObjcRef *found = string_map_get(&p->module->objc_selectors, name); - + lbModule *m = p->module; + lbAddr *found = string_map_get(&m->objc_selectors, name); if (found) { - return found->local_module_addr; + return *found; } lbModule *default_module = &p->module->gen->default_module; - Entity *entity = {}; - if (default_module != p->module) { - found = string_map_get(&default_module->objc_selectors, name); - if (found) { - entity = found->entity; - } + + gbString global_name = gb_string_make(permanent_allocator(), "__$objc_SEL::"); + global_name = gb_string_append_length(global_name, name.text, name.len); + + LLVMTypeRef t = lb_type(m, t_objc_SEL); + lbValue g = {}; + g.value = LLVMAddGlobal(m->mod, t, global_name); + g.type = alloc_type_pointer(t_objc_SEL); + + if (default_module == m) { + LLVMSetInitializer(g.value, LLVMConstNull(t)); + lb_add_member(m, make_string_c(global_name), g); + } else { + LLVMSetLinkage(g.value, LLVMExternalLinkage); } - if (!entity) { - gbString global_name = gb_string_make(permanent_allocator(), "__$objc_SEL::"); - global_name = gb_string_append_length(global_name, name.text, name.len); + mpsc_enqueue(&m->gen->objc_selectors, lbObjCGlobal{m, global_name, name, t_objc_SEL}); + + lbAddr addr = lb_addr(g); - lbAddr default_addr = lb_add_global_generated_with_name(default_module, t_objc_SEL, {}, - make_string(cast(u8 const *)global_name, gb_string_length(global_name)), - &entity); + string_map_set(&m->objc_selectors, name, addr); - mutex_lock(&default_module->objc_selectors_mutex); - string_map_set(&default_module->objc_selectors, name, lbObjcRef{entity, default_addr}); - mutex_unlock(&default_module->objc_selectors_mutex); + return addr; +} + +gb_internal lbAddr lb_handle_objc_find_or_register_class(lbProcedure *p, String const &name) { + lbModule *m = p->module; + lbAddr *found = string_map_get(&m->objc_classes, name); + if (found) { + return *found; } - lbValue ptr = lb_find_value_from_entity(p->module, entity); - lbAddr local_addr = lb_addr(ptr); + lbModule *default_module = &p->module->gen->default_module; + + + gbString global_name = gb_string_make(permanent_allocator(), "__$objc_Class::"); + global_name = gb_string_append_length(global_name, name.text, name.len); - if (default_module != p->module) { - string_map_set(&p->module->objc_selectors, name, lbObjcRef{entity, local_addr}); + LLVMTypeRef t = lb_type(m, t_objc_Class); + lbValue g = {}; + g.value = LLVMAddGlobal(m->mod, t, global_name); + g.type = alloc_type_pointer(t_objc_Class); + + if (default_module == m) { + LLVMSetInitializer(g.value, LLVMConstNull(t)); + lb_add_member(m, make_string_c(global_name), g); + } else { + LLVMSetLinkage(g.value, LLVMExternalLinkage); } + mpsc_enqueue(&m->gen->objc_classes, lbObjCGlobal{m, global_name, name, t_objc_Class}); - return local_addr; + lbAddr addr = lb_addr(g); + + string_map_set(&m->objc_classes, name, addr); + + return addr; } gb_internal lbValue lb_handle_objc_find_selector(lbProcedure *p, Ast *expr) { @@ -2164,47 +2187,6 @@ gb_internal lbValue lb_handle_objc_register_selector(lbProcedure *p, Ast *expr) return lb_addr_load(p, dst); } -gb_internal lbAddr lb_handle_objc_find_or_register_class(lbProcedure *p, String const &name) { - mutex_lock(&p->module->objc_classes_mutex); - defer (mutex_unlock(&p->module->objc_classes_mutex)); - - lbObjcRef *found = string_map_get(&p->module->objc_classes, name); - if (found) { - return found->local_module_addr; - } - - lbModule *default_module = &p->module->gen->default_module; - Entity *entity = {}; - - if (default_module != p->module) { - found = string_map_get(&default_module->objc_classes, name); - if (found) { - entity = found->entity; - } - } - - if (!entity) { - gbString global_name = gb_string_make(permanent_allocator(), "__$objc_Class::"); - global_name = gb_string_append_length(global_name, name.text, name.len); - - lbAddr default_addr = lb_add_global_generated_with_name(default_module, t_objc_Class, {}, - make_string(cast(u8 const *)global_name, gb_string_length(global_name)), - &entity); - - mutex_lock(&default_module->objc_classes_mutex); - string_map_set(&default_module->objc_classes, name, lbObjcRef{entity, default_addr}); - mutex_unlock(&default_module->objc_classes_mutex); - } - - lbValue ptr = lb_find_value_from_entity(p->module, entity); - lbAddr local_addr = lb_addr(ptr); - - if (default_module != p->module) { - string_map_set(&p->module->objc_classes, name, lbObjcRef{entity, local_addr}); - } - - return local_addr; -} gb_internal lbValue lb_handle_objc_find_class(lbProcedure *p, Ast *expr) { ast_node(ce, CallExpr, expr); -- cgit v1.2.3 From fe040d1bbd22c78081ffc1d45b3462f40f8eb17a Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 8 Apr 2025 11:36:53 +0100 Subject: Propagate `@(link_section=)` to nested declarations --- src/llvm_backend.cpp | 19 ++++++--- src/llvm_backend.hpp | 14 ++++++- src/llvm_backend_const.cpp | 92 +++++++++++++++++++++++++------------------- src/llvm_backend_expr.cpp | 7 ++-- src/llvm_backend_general.cpp | 23 +++++++---- src/llvm_backend_opt.cpp | 4 +- src/llvm_backend_stmt.cpp | 3 +- 7 files changed, 99 insertions(+), 63 deletions(-) (limited to 'src/llvm_backend_general.cpp') diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index 396b94f98..ee0ea7567 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -2056,8 +2056,8 @@ gb_internal lbProcedure *lb_create_main_procedure(lbModule *m, lbProcedure *star if (testing_proc->pkg != nullptr) { pkg_name = testing_proc->pkg->name; } - lbValue v_pkg = lb_find_or_add_entity_string(m, pkg_name); - lbValue v_name = lb_find_or_add_entity_string(m, name); + lbValue v_pkg = lb_find_or_add_entity_string(m, pkg_name, false); + lbValue v_name = lb_find_or_add_entity_string(m, name, false); lbValue v_proc = lb_find_procedure_value_from_entity(m, testing_proc); indices[1] = LLVMConstInt(lb_type(m, t_int), testing_proc_index++, false); @@ -2565,12 +2565,16 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { if (!is_type_any(e->type) && !is_type_union(e->type)) { if (tav.mode != Addressing_Invalid) { if (tav.value.kind != ExactValue_Invalid) { - bool is_rodata = e->kind == Entity_Variable && e->Variable.is_rodata; + auto cc = LB_CONST_CONTEXT_DEFAULT; + cc.is_rodata = e->kind == Entity_Variable && e->Variable.is_rodata; + cc.allow_local = false; + cc.link_section = e->Variable.link_section; + ExactValue v = tav.value; - lbValue init = lb_const_value(m, tav.type, v, false, is_rodata); + lbValue init = lb_const_value(m, tav.type, v, cc); LLVMSetInitializer(g.value, init.value); var.is_initialized = true; - if (is_rodata) { + if (cc.is_rodata) { LLVMSetGlobalConstant(g.value, true); } } @@ -2585,6 +2589,11 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { } else if (e->kind == Entity_Variable && e->Variable.is_rodata) { LLVMSetGlobalConstant(g.value, true); } + + if (e->flags & EntityFlag_Require) { + lb_append_to_compiler_used(m, g.value); + } + array_add(&global_variables, var); lb_add_entity(m, e, g); diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp index 3e01ada5f..6177fcf6e 100644 --- a/src/llvm_backend.hpp +++ b/src/llvm_backend.hpp @@ -415,9 +415,19 @@ gb_internal LLVMTypeRef llvm_get_element_type(LLVMTypeRef type); gb_internal lbBlock *lb_create_block(lbProcedure *p, char const *name, bool append=false); +struct lbConstContext { + bool allow_local; + bool is_rodata; + String link_section; +}; + +static lbConstContext const LB_CONST_CONTEXT_DEFAULT = {true, false, {}}; +static lbConstContext const LB_CONST_CONTEXT_DEFAULT_ALLOW_LOCAL = {true, false, {}}; +static lbConstContext const LB_CONST_CONTEXT_DEFAULT_NO_LOCAL = {false, false, {}}; + gb_internal lbValue lb_const_nil(lbModule *m, Type *type); gb_internal lbValue lb_const_undef(lbModule *m, Type *type); -gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bool allow_local=true, bool is_rodata=false); +gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, lbConstContext cc = LB_CONST_CONTEXT_DEFAULT); gb_internal lbValue lb_const_bool(lbModule *m, Type *type, bool value); gb_internal lbValue lb_const_int(lbModule *m, Type *type, u64 value); @@ -514,7 +524,7 @@ gb_internal void lb_fill_slice(lbProcedure *p, lbAddr const &slice, lbValue base gb_internal lbValue lb_type_info(lbProcedure *p, Type *type); -gb_internal lbValue lb_find_or_add_entity_string(lbModule *m, String const &str); +gb_internal lbValue lb_find_or_add_entity_string(lbModule *m, String const &str, bool custom_link_section); gb_internal lbValue lb_generate_anonymous_proc_lit(lbModule *m, String const &prefix_name, Ast *expr, lbProcedure *parent = nullptr); gb_internal bool lb_is_const(lbValue value); diff --git a/src/llvm_backend_const.cpp b/src/llvm_backend_const.cpp index 9401e4d55..dada2cff5 100644 --- a/src/llvm_backend_const.cpp +++ b/src/llvm_backend_const.cpp @@ -301,10 +301,10 @@ gb_internal lbValue lb_const_source_code_location_const(lbModule *m, String cons } LLVMValueRef fields[4] = {}; - fields[0]/*file*/ = lb_find_or_add_entity_string(m, file).value; + fields[0]/*file*/ = lb_find_or_add_entity_string(m, file, false).value; fields[1]/*line*/ = lb_const_int(m, t_i32, line).value; fields[2]/*column*/ = lb_const_int(m, t_i32, column).value; - fields[3]/*procedure*/ = lb_find_or_add_entity_string(m, procedure).value; + fields[3]/*procedure*/ = lb_find_or_add_entity_string(m, procedure, false).value; lbValue res = {}; res.value = llvm_const_named_struct(m, t_source_code_location, fields, gb_count_of(fields)); @@ -391,12 +391,12 @@ gb_internal lbValue lb_emit_source_code_location_as_global(lbProcedure *p, Ast * -gb_internal LLVMValueRef lb_build_constant_array_values(lbModule *m, Type *type, Type *elem_type, isize count, LLVMValueRef *values, bool allow_local, bool is_rodata) { - if (allow_local) { - is_rodata = false; +gb_internal LLVMValueRef lb_build_constant_array_values(lbModule *m, Type *type, Type *elem_type, isize count, LLVMValueRef *values, lbConstContext cc) { + if (cc.allow_local) { + cc.is_rodata = false; } - bool is_local = allow_local && m->curr_procedure != nullptr; + bool is_local = cc.allow_local && m->curr_procedure != nullptr; bool is_const = true; if (is_local) { for (isize i = 0; i < count; i++) { @@ -500,9 +500,9 @@ gb_internal bool lb_is_nested_possibly_constant(Type *ft, Selection const &sel, return lb_is_elem_const(elem, ft); } -gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bool allow_local, bool is_rodata) { - if (allow_local) { - is_rodata = false; +gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, lbConstContext cc) { + if (cc.allow_local) { + cc.is_rodata = false; } LLVMContextRef ctx = m->ctx; @@ -543,7 +543,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo return res; } - bool is_local = allow_local && m->curr_procedure != nullptr; + bool is_local = cc.allow_local && m->curr_procedure != nullptr; // GB_ASSERT_MSG(is_type_typed(type), "%s", type_to_string(type)); @@ -562,7 +562,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo count = gb_max(cast(isize)cl->max_count, count); Type *elem = base_type(type)->Slice.elem; Type *t = alloc_type_array(elem, count); - lbValue backing_array = lb_const_value(m, t, value, allow_local, is_rodata); + lbValue backing_array = lb_const_value(m, t, value, cc); LLVMValueRef array_data = nullptr; @@ -599,7 +599,10 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo array_data = LLVMAddGlobal(m->mod, lb_type(m, t), str); LLVMSetInitializer(array_data, backing_array.value); - if (is_rodata) { + if (cc.link_section.len > 0) { + LLVMSetSection(array_data, alloc_cstring(permanent_allocator(), cc.link_section)); + } + if (cc.is_rodata) { LLVMSetGlobalConstant(array_data, true); } @@ -650,7 +653,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo } // NOTE(bill, 2021-10-07): Allow for array programming value constants Type *core_elem = core_array_type(type); - return lb_const_value(m, core_elem, value, allow_local, is_rodata); + return lb_const_value(m, core_elem, value, cc); } else if (is_type_u8_array(type) && value.kind == ExactValue_String) { GB_ASSERT(type->Array.count == value.value_string.len); LLVMValueRef data = LLVMConstStringInContext(ctx, @@ -668,7 +671,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo Type *elem = type->Array.elem; - lbValue single_elem = lb_const_value(m, elem, value, allow_local, is_rodata); + lbValue single_elem = lb_const_value(m, elem, value, cc); LLVMValueRef *elems = gb_alloc_array(permanent_allocator(), LLVMValueRef, cast(isize)count); for (i64 i = 0; i < count; i++) { @@ -686,7 +689,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo Type *elem = type->Matrix.elem; - lbValue single_elem = lb_const_value(m, elem, value, allow_local, is_rodata); + lbValue single_elem = lb_const_value(m, elem, value, cc); single_elem.value = llvm_const_cast(single_elem.value, lb_type(m, elem)); i64 total_elem_count = matrix_type_total_internal_elems(type); @@ -708,7 +711,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo i64 count = type->SimdVector.count; Type *elem = type->SimdVector.elem; - lbValue single_elem = lb_const_value(m, elem, value, allow_local, is_rodata); + lbValue single_elem = lb_const_value(m, elem, value, cc); single_elem.value = llvm_const_cast(single_elem.value, lb_type(m, elem)); LLVMValueRef *elems = gb_alloc_array(permanent_allocator(), LLVMValueRef, count); @@ -729,9 +732,16 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo return res; case ExactValue_String: { - LLVMValueRef ptr = lb_find_or_add_entity_string_ptr(m, value.value_string); + bool custom_link_section = cc.link_section.len > 0; + + LLVMValueRef ptr = lb_find_or_add_entity_string_ptr(m, value.value_string, custom_link_section); lbValue res = {}; res.type = default_type(original_type); + + if (custom_link_section) { + LLVMSetSection(ptr, alloc_cstring(permanent_allocator(), cc.link_section)); + } + if (is_type_cstring(res.type)) { res.value = ptr; } else { @@ -837,7 +847,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo case ExactValue_Compound: if (is_type_slice(type)) { - return lb_const_value(m, type, value, allow_local, is_rodata); + return lb_const_value(m, type, value, cc); } else if (is_type_array(type)) { ast_node(cl, CompoundLit, value.value_compound); Type *elem_type = type->Array.elem; @@ -871,7 +881,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo } if (lo == i) { TypeAndValue tav = fv->value->tav; - LLVMValueRef val = lb_const_value(m, elem_type, tav.value, allow_local, is_rodata).value; + LLVMValueRef val = lb_const_value(m, elem_type, tav.value, cc).value; for (i64 k = lo; k < hi; k++) { values[value_index++] = val; } @@ -886,7 +896,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo i64 index = exact_value_to_i64(index_tav.value); if (index == i) { TypeAndValue tav = fv->value->tav; - LLVMValueRef val = lb_const_value(m, elem_type, tav.value, allow_local, is_rodata).value; + LLVMValueRef val = lb_const_value(m, elem_type, tav.value, cc).value; values[value_index++] = val; found = true; break; @@ -899,7 +909,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo } } - res.value = lb_build_constant_array_values(m, type, elem_type, cast(isize)type->Array.count, values, allow_local, is_rodata); + res.value = lb_build_constant_array_values(m, type, elem_type, cast(isize)type->Array.count, values, cc); return res; } else { GB_ASSERT_MSG(elem_count == type->Array.count, "%td != %td", elem_count, type->Array.count); @@ -909,13 +919,13 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo for (isize i = 0; i < elem_count; i++) { TypeAndValue tav = cl->elems[i]->tav; GB_ASSERT(tav.mode != Addressing_Invalid); - values[i] = lb_const_value(m, elem_type, tav.value, allow_local, is_rodata).value; + values[i] = lb_const_value(m, elem_type, tav.value, cc).value; } for (isize i = elem_count; i < type->Array.count; i++) { values[i] = LLVMConstNull(lb_type(m, elem_type)); } - res.value = lb_build_constant_array_values(m, type, elem_type, cast(isize)type->Array.count, values, allow_local, is_rodata); + res.value = lb_build_constant_array_values(m, type, elem_type, cast(isize)type->Array.count, values, cc); return res; } } else if (is_type_enumerated_array(type)) { @@ -955,7 +965,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo } if (lo == i) { TypeAndValue tav = fv->value->tav; - LLVMValueRef val = lb_const_value(m, elem_type, tav.value, allow_local, is_rodata).value; + LLVMValueRef val = lb_const_value(m, elem_type, tav.value, cc).value; for (i64 k = lo; k < hi; k++) { values[value_index++] = val; } @@ -970,7 +980,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo i64 index = exact_value_to_i64(index_tav.value); if (index == i) { TypeAndValue tav = fv->value->tav; - LLVMValueRef val = lb_const_value(m, elem_type, tav.value, allow_local, is_rodata).value; + LLVMValueRef val = lb_const_value(m, elem_type, tav.value, cc).value; values[value_index++] = val; found = true; break; @@ -983,7 +993,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo } } - res.value = lb_build_constant_array_values(m, type, elem_type, cast(isize)type->EnumeratedArray.count, values, allow_local, is_rodata); + res.value = lb_build_constant_array_values(m, type, elem_type, cast(isize)type->EnumeratedArray.count, values, cc); return res; } else { GB_ASSERT_MSG(elem_count == type->EnumeratedArray.count, "%td != %td", elem_count, type->EnumeratedArray.count); @@ -993,13 +1003,13 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo for (isize i = 0; i < elem_count; i++) { TypeAndValue tav = cl->elems[i]->tav; GB_ASSERT(tav.mode != Addressing_Invalid); - values[i] = lb_const_value(m, elem_type, tav.value, allow_local, is_rodata).value; + values[i] = lb_const_value(m, elem_type, tav.value, cc).value; } for (isize i = elem_count; i < type->EnumeratedArray.count; i++) { values[i] = LLVMConstNull(lb_type(m, elem_type)); } - res.value = lb_build_constant_array_values(m, type, elem_type, cast(isize)type->EnumeratedArray.count, values, allow_local, is_rodata); + res.value = lb_build_constant_array_values(m, type, elem_type, cast(isize)type->EnumeratedArray.count, values, cc); return res; } } else if (is_type_simd_vector(type)) { @@ -1038,7 +1048,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo } if (lo == i) { TypeAndValue tav = fv->value->tav; - LLVMValueRef val = lb_const_value(m, elem_type, tav.value, allow_local, is_rodata).value; + LLVMValueRef val = lb_const_value(m, elem_type, tav.value, cc).value; for (i64 k = lo; k < hi; k++) { values[value_index++] = val; } @@ -1053,7 +1063,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo i64 index = exact_value_to_i64(index_tav.value); if (index == i) { TypeAndValue tav = fv->value->tav; - LLVMValueRef val = lb_const_value(m, elem_type, tav.value, allow_local, is_rodata).value; + LLVMValueRef val = lb_const_value(m, elem_type, tav.value, cc).value; values[value_index++] = val; found = true; break; @@ -1072,7 +1082,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo for (isize i = 0; i < elem_count; i++) { TypeAndValue tav = cl->elems[i]->tav; GB_ASSERT(tav.mode != Addressing_Invalid); - values[i] = lb_const_value(m, elem_type, tav.value, allow_local, is_rodata).value; + values[i] = lb_const_value(m, elem_type, tav.value, cc).value; } LLVMTypeRef et = lb_type(m, elem_type); @@ -1121,11 +1131,13 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo i32 index = field_remapping[f->Variable.field_index]; if (elem_type_can_be_constant(f->type)) { if (sel.index.count == 1) { - values[index] = lb_const_value(m, f->type, tav.value, allow_local, is_rodata).value; + values[index] = lb_const_value(m, f->type, tav.value, cc).value; visited[index] = true; } else { if (!visited[index]) { - values[index] = lb_const_value(m, f->type, {}, /*allow_local*/false, is_rodata).value; + auto new_cc = cc; + new_cc.allow_local = false; + values[index] = lb_const_value(m, f->type, {}, new_cc).value; visited[index] = true; } @@ -1165,7 +1177,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo } } if (is_constant) { - LLVMValueRef elem_value = lb_const_value(m, tav.type, tav.value, allow_local, is_rodata).value; + LLVMValueRef elem_value = lb_const_value(m, tav.type, tav.value, cc).value; if (LLVMIsConstant(elem_value) && LLVMIsConstant(values[index])) { values[index] = llvm_const_insert_value(m, values[index], elem_value, idx_list, idx_list_len); } else if (is_local) { @@ -1219,7 +1231,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo i32 index = field_remapping[f->Variable.field_index]; if (elem_type_can_be_constant(f->type)) { - values[index] = lb_const_value(m, f->type, val, allow_local, is_rodata).value; + values[index] = lb_const_value(m, f->type, val, cc).value; visited[index] = true; } } @@ -1353,7 +1365,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo TypeAndValue tav = fv->value->tav; - LLVMValueRef val = lb_const_value(m, elem_type, tav.value, allow_local, is_rodata).value; + LLVMValueRef val = lb_const_value(m, elem_type, tav.value, cc).value; for (i64 k = lo; k < hi; k++) { i64 offset = matrix_row_major_index_to_offset(type, k); GB_ASSERT(values[offset] == nullptr); @@ -1365,7 +1377,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo i64 index = exact_value_to_i64(index_tav.value); GB_ASSERT(index < max_count); TypeAndValue tav = fv->value->tav; - LLVMValueRef val = lb_const_value(m, elem_type, tav.value, allow_local, is_rodata).value; + LLVMValueRef val = lb_const_value(m, elem_type, tav.value, cc).value; i64 offset = matrix_row_major_index_to_offset(type, index); GB_ASSERT(values[offset] == nullptr); values[offset] = val; @@ -1378,7 +1390,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo } } - res.value = lb_build_constant_array_values(m, type, elem_type, cast(isize)total_count, values, allow_local, is_rodata); + res.value = lb_build_constant_array_values(m, type, elem_type, cast(isize)total_count, values, cc); return res; } else { GB_ASSERT_MSG(elem_count == max_count, "%td != %td", elem_count, max_count); @@ -1389,7 +1401,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo GB_ASSERT(tav.mode != Addressing_Invalid); i64 offset = 0; offset = matrix_row_major_index_to_offset(type, i); - values[offset] = lb_const_value(m, elem_type, tav.value, allow_local, is_rodata).value; + values[offset] = lb_const_value(m, elem_type, tav.value, cc).value; } for (isize i = 0; i < total_count; i++) { if (values[i] == nullptr) { @@ -1397,7 +1409,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo } } - res.value = lb_build_constant_array_values(m, type, elem_type, cast(isize)total_count, values, allow_local, is_rodata); + res.value = lb_build_constant_array_values(m, type, elem_type, cast(isize)total_count, values, cc); return res; } } else { diff --git a/src/llvm_backend_expr.cpp b/src/llvm_backend_expr.cpp index 0c82180ec..20b8d3cf8 100644 --- a/src/llvm_backend_expr.cpp +++ b/src/llvm_backend_expr.cpp @@ -2352,7 +2352,7 @@ gb_internal lbValue lb_emit_conv(lbProcedure *p, lbValue value, Type *t) { Type *elem = base_array_type(dst); lbValue e = lb_emit_conv(p, value, elem); lbAddr v = lb_add_local_generated(p, t, false); - lbValue zero = lb_const_value(p->module, elem, exact_value_i64(0), true); + lbValue zero = lb_const_value(p->module, elem, exact_value_i64(0), LB_CONST_CONTEXT_DEFAULT_ALLOW_LOCAL); for (i64 j = 0; j < dst->Matrix.column_count; j++) { for (i64 i = 0; i < dst->Matrix.row_count; i++) { lbValue ptr = lb_emit_matrix_epi(p, v.addr, i, j); @@ -2389,7 +2389,7 @@ gb_internal lbValue lb_emit_conv(lbProcedure *p, lbValue value, Type *t) { lb_emit_store(p, d, s); } else if (i == j) { lbValue d = lb_emit_matrix_epi(p, v.addr, i, j); - lbValue s = lb_const_value(p->module, dst->Matrix.elem, exact_value_i64(1), true); + lbValue s = lb_const_value(p->module, dst->Matrix.elem, exact_value_i64(1), LB_CONST_CONTEXT_DEFAULT_ALLOW_LOCAL); lb_emit_store(p, d, s); } } @@ -3493,8 +3493,7 @@ gb_internal lbValue lb_build_expr_internal(lbProcedure *p, Ast *expr) { if (tv.value.kind != ExactValue_Invalid) { // NOTE(bill): Short on constant values - bool allow_local = true; - return lb_const_value(p->module, type, tv.value, allow_local); + return lb_const_value(p->module, type, tv.value, LB_CONST_CONTEXT_DEFAULT_ALLOW_LOCAL); } else if (tv.mode == Addressing_Type) { // NOTE(bill, 2023-01-16): is this correct? I hope so at least return lb_typeid(m, tv.type); diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp index ce2c70661..421720c4c 100644 --- a/src/llvm_backend_general.cpp +++ b/src/llvm_backend_general.cpp @@ -356,7 +356,7 @@ gb_internal LLVMValueRef llvm_const_insert_value(lbModule *m, LLVMValueRef agg, gb_internal LLVMValueRef llvm_cstring(lbModule *m, String const &str) { - lbValue v = lb_find_or_add_entity_string(m, str); + lbValue v = lb_find_or_add_entity_string(m, str, false); unsigned indices[1] = {0}; return llvm_const_extract_value(m, v.value, indices, gb_count_of(indices)); } @@ -568,7 +568,7 @@ gb_internal void lb_set_file_line_col(lbProcedure *p, Array arr, TokenP col = obfuscate_i32(col); } - arr[0] = lb_find_or_add_entity_string(p->module, file); + arr[0] = lb_find_or_add_entity_string(p->module, file, false); arr[1] = lb_const_int(p->module, t_i32, line); arr[2] = lb_const_int(p->module, t_i32, col); } @@ -2543,9 +2543,14 @@ general_end:; -gb_internal LLVMValueRef lb_find_or_add_entity_string_ptr(lbModule *m, String const &str) { - StringHashKey key = string_hash_string(str); - LLVMValueRef *found = string_map_get(&m->const_strings, key); +gb_internal LLVMValueRef lb_find_or_add_entity_string_ptr(lbModule *m, String const &str, bool custom_link_section) { + StringHashKey key = {}; + LLVMValueRef *found = nullptr; + + if (!custom_link_section) { + key = string_hash_string(str); + found = string_map_get(&m->const_strings, key); + } if (found != nullptr) { return *found; } else { @@ -2568,15 +2573,17 @@ gb_internal LLVMValueRef lb_find_or_add_entity_string_ptr(lbModule *m, String co LLVMSetAlignment(global_data, 1); LLVMValueRef ptr = LLVMConstInBoundsGEP2(type, global_data, indices, 2); - string_map_set(&m->const_strings, key, ptr); + if (!custom_link_section) { + string_map_set(&m->const_strings, key, ptr); + } return ptr; } } -gb_internal lbValue lb_find_or_add_entity_string(lbModule *m, String const &str) { +gb_internal lbValue lb_find_or_add_entity_string(lbModule *m, String const &str, bool custom_link_section) { LLVMValueRef ptr = nullptr; if (str.len != 0) { - ptr = lb_find_or_add_entity_string_ptr(m, str); + ptr = lb_find_or_add_entity_string_ptr(m, str, custom_link_section); } else { ptr = LLVMConstNull(lb_type(m, t_u8_ptr)); } diff --git a/src/llvm_backend_opt.cpp b/src/llvm_backend_opt.cpp index 7fe1359b4..8d5cfcb70 100644 --- a/src/llvm_backend_opt.cpp +++ b/src/llvm_backend_opt.cpp @@ -516,7 +516,7 @@ gb_internal void llvm_delete_function(LLVMValueRef func) { LLVMDeleteFunction(func); } -gb_internal void lb_append_to_compiler_used(lbModule *m, LLVMValueRef func) { +gb_internal void lb_append_to_compiler_used(lbModule *m, LLVMValueRef value) { LLVMValueRef global = LLVMGetNamedGlobal(m->mod, "llvm.compiler.used"); LLVMValueRef *constants; @@ -544,7 +544,7 @@ gb_internal void lb_append_to_compiler_used(lbModule *m, LLVMValueRef func) { LLVMTypeRef Int8PtrTy = LLVMPointerType(LLVMInt8TypeInContext(m->ctx), 0); LLVMTypeRef ATy = llvm_array_type(Int8PtrTy, operands); - constants[operands - 1] = LLVMConstBitCast(func, Int8PtrTy); + constants[operands - 1] = LLVMConstBitCast(value, Int8PtrTy); LLVMValueRef initializer = LLVMConstArray(Int8PtrTy, constants, operands); global = LLVMAddGlobal(m->mod, ATy, "llvm.compiler.used"); diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp index 1f783b1be..a0b7e8340 100644 --- a/src/llvm_backend_stmt.cpp +++ b/src/llvm_backend_stmt.cpp @@ -1963,8 +1963,7 @@ gb_internal void lb_build_static_variables(lbProcedure *p, AstValueDecl *vd) { GB_ASSERT(ast_value->tav.mode == Addressing_Constant || ast_value->tav.mode == Addressing_Invalid); - bool allow_local = false; - value = lb_const_value(p->module, ast_value->tav.type, ast_value->tav.value, allow_local); + value = lb_const_value(p->module, ast_value->tav.type, ast_value->tav.value, LB_CONST_CONTEXT_DEFAULT_NO_LOCAL); } Ast *ident = vd->names[i]; -- cgit v1.2.3 From a3de9c8de4e539905a85f3cc060f95529b402f18 Mon Sep 17 00:00:00 2001 From: Harold Brenes Date: Sat, 19 Apr 2025 08:04:23 -0400 Subject: Add initial support for Objective-C class implementation --- base/intrinsics/intrinsics.odin | 5 +- base/runtime/procs_darwin.odin | 25 +- src/check_builtin.cpp | 77 +++++- src/check_decl.cpp | 73 +++++ src/checker.cpp | 77 +++++- src/checker.hpp | 17 +- src/checker_builtin_procs.hpp | 2 + src/entity.cpp | 3 + src/llvm_backend.cpp | 591 +++++++++++++++++++++++++++++++++++++--- src/llvm_backend.hpp | 3 + src/llvm_backend_general.cpp | 2 + src/llvm_backend_proc.cpp | 1 + src/llvm_backend_utility.cpp | 74 ++++- src/types.cpp | 2 + 14 files changed, 900 insertions(+), 52 deletions(-) (limited to 'src/llvm_backend_general.cpp') diff --git a/base/intrinsics/intrinsics.odin b/base/intrinsics/intrinsics.odin index bec452007..515e8d48a 100644 --- a/base/intrinsics/intrinsics.odin +++ b/base/intrinsics/intrinsics.odin @@ -353,15 +353,18 @@ x86_xgetbv :: proc(cx: u32) -> (eax, edx: u32) --- objc_object :: struct{} objc_selector :: struct{} objc_class :: struct{} +objc_ivar :: struct{} + objc_id :: ^objc_object objc_SEL :: ^objc_selector objc_Class :: ^objc_class +objc_Ivar :: ^objc_ivar objc_find_selector :: proc($name: string) -> objc_SEL --- objc_register_selector :: proc($name: string) -> objc_SEL --- objc_find_class :: proc($name: string) -> objc_Class --- objc_register_class :: proc($name: string) -> objc_Class --- - +ivar_get :: proc(self: ^$T, $U: typeid) -> ^U --- valgrind_client_request :: proc(default: uintptr, request: uintptr, a0, a1, a2, a3, a4: uintptr) -> uintptr --- diff --git a/base/runtime/procs_darwin.odin b/base/runtime/procs_darwin.odin index c3fc46af1..0aec57e80 100644 --- a/base/runtime/procs_darwin.odin +++ b/base/runtime/procs_darwin.odin @@ -2,21 +2,34 @@ package runtime @(priority_index=-1e6) -foreign import "system:Foundation.framework" +foreign import ObjC "system:objc" import "base:intrinsics" -objc_id :: ^intrinsics.objc_object +objc_id :: ^intrinsics.objc_object objc_Class :: ^intrinsics.objc_class -objc_SEL :: ^intrinsics.objc_selector +objc_SEL :: ^intrinsics.objc_selector +objc_Ivar :: ^intrinsics.objc_ivar +objc_BOOL :: bool -foreign Foundation { - objc_lookUpClass :: proc "c" (name: cstring) -> objc_Class --- + +objc_IMP :: proc "c" (object: objc_id, sel: objc_SEL, #c_vararg args: ..any) -> objc_id + +foreign ObjC { sel_registerName :: proc "c" (name: cstring) -> objc_SEL --- - objc_allocateClassPair :: proc "c" (superclass: objc_Class, name: cstring, extraBytes: uint) -> objc_Class --- objc_msgSend :: proc "c" (self: objc_id, op: objc_SEL, #c_vararg args: ..any) --- objc_msgSend_fpret :: proc "c" (self: objc_id, op: objc_SEL, #c_vararg args: ..any) -> f64 --- objc_msgSend_fp2ret :: proc "c" (self: objc_id, op: objc_SEL, #c_vararg args: ..any) -> complex128 --- objc_msgSend_stret :: proc "c" (self: objc_id, op: objc_SEL, #c_vararg args: ..any) --- + + objc_lookUpClass :: proc "c" (name: cstring) -> objc_Class --- + objc_allocateClassPair :: proc "c" (superclass: objc_Class, name: cstring, extraBytes: uint) -> objc_Class --- + objc_registerClassPair :: proc "c" (cls : objc_Class) --- + class_addMethod :: proc "c" (cls: objc_Class, name: objc_SEL, imp: objc_IMP, types: cstring) -> objc_BOOL --- + class_addIvar :: proc "c" (cls: objc_Class, name: cstring, size: uint, alignment: u8, types: cstring) -> objc_BOOL --- + class_getInstanceVariable :: proc "c" (cls : objc_Class, name: cstring) -> objc_Ivar --- + class_getInstanceSize :: proc "c" (cls : objc_Class) -> uint --- + ivar_getOffset :: proc "c" (v: objc_Ivar) -> uintptr --- } + diff --git a/src/check_builtin.cpp b/src/check_builtin.cpp index f66a8605c..c44d1c123 100644 --- a/src/check_builtin.cpp +++ b/src/check_builtin.cpp @@ -387,6 +387,80 @@ gb_internal bool check_builtin_objc_procedure(CheckerContext *c, Operand *operan try_to_add_package_dependency(c, "runtime", "objc_allocateClassPair"); return true; } break; + + case BuiltinProc_objc_ivar_get: + { + Type *self_type = nullptr; + Type *ivar_type = nullptr; + + Operand self {}; + check_expr_or_type(c, &self, ce->args[0]); + + if (!is_operand_value(self) || !check_is_assignable_to(c, &self, t_objc_id)) { + gbString e = expr_to_string(self.expr); + gbString t = type_to_string(self.type); + error(self.expr, "'%.*s' expected a type or value derived from intrinsics.objc_object, got '%s' of type %s", LIT(builtin_name), e, t); + gb_string_free(t); + gb_string_free(e); + return false; + } + else if (!is_type_pointer(self.type)) { + gbString e = expr_to_string(self.expr); + gbString t = type_to_string(self.type); + error(self.expr, "'%.*s' expected a pointer of a value derived from intrinsics.objc_object, got '%s' of type %s", LIT(builtin_name), e, t); + gb_string_free(t); + gb_string_free(e); + return false; + } + + self_type = type_deref(self.type); + + if (!(self_type->kind == Type_Named && + self_type->Named.type_name != nullptr && + self_type->Named.type_name->TypeName.objc_class_name != "")) { + gbString t = type_to_string(self_type); + error(self.expr, "'%.*s' expected a named type with the attribute @(obj_class=) , got type %s", LIT(builtin_name), t); + gb_string_free(t); + return false; + } + + if (self_type->Named.type_name->TypeName.objc_ivar == nullptr) { + gbString t = type_to_string(self_type); + error(self.expr, "'%.*s' requires that type %s have the attribute @(obj_ivar=).", LIT(builtin_name), t); + gb_string_free(t); + return false; + } + + Operand ivar {}; + check_expr_or_type(c, &ivar, ce->args[1]); + if (ivar.mode == Addressing_Type) { + ivar_type = ivar.type; + } else { + return false; + } + + if (self_type->Named.type_name->TypeName.objc_ivar != ivar_type) { + gbString name_self = type_to_string(self_type); + gbString name_expected = type_to_string(self_type->Named.type_name->TypeName.objc_ivar); + gbString name_given = type_to_string(ivar_type); + error(self.expr, "'%.*s' ivar type %s does not match @obj_ivar type %s on Objective-C class %s.", + LIT(builtin_name), name_given, name_expected, name_self); + gb_string_free(name_self); + gb_string_free(name_expected); + gb_string_free(name_given); + return false; + } + + if (type_hint != nullptr && type_hint->kind == Type_Pointer && type_hint->Pointer.elem == ivar_type) { + operand->type = type_hint; + } else { + operand->type = alloc_type_pointer(ivar_type); + } + + operand->mode = Addressing_Value; + + return true; + } break; } } @@ -2132,7 +2206,8 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As case BuiltinProc_objc_find_selector: case BuiltinProc_objc_find_class: case BuiltinProc_objc_register_selector: - case BuiltinProc_objc_register_class: + case BuiltinProc_objc_register_class: + case BuiltinProc_objc_ivar_get: return check_builtin_objc_procedure(c, operand, call, id, type_hint); case BuiltinProc___entry_point: diff --git a/src/check_decl.cpp b/src/check_decl.cpp index ba6445ea4..dffe0b48e 100644 --- a/src/check_decl.cpp +++ b/src/check_decl.cpp @@ -526,6 +526,54 @@ gb_internal void check_type_decl(CheckerContext *ctx, Entity *e, Ast *init_expr, check_decl_attributes(ctx, decl->attributes, type_decl_attribute, &ac); if (e->kind == Entity_TypeName && ac.objc_class != "") { e->TypeName.objc_class_name = ac.objc_class; + e->TypeName.objc_superclass = ac.objc_superclass; + e->TypeName.objc_ivar = ac.objc_ivar; + + if (ac.objc_is_implementation) { + e->TypeName.objc_is_implementation = true; + mpsc_enqueue(&ctx->info->objc_class_implementations, e); // TODO(harold): Don't need this for anything. Remove. + + GB_ASSERT(e->TypeName.objc_ivar == nullptr || e->TypeName.objc_ivar->kind == Type_Named); + + // Ensure superclass hierarchy are all Objective-C classes and does not cycle + Type *super = ac.objc_superclass; + if (super != nullptr) { + TypeSet super_set{}; + type_set_init(&super_set, 8); + defer (type_set_destroy(&super_set)); + + type_set_update(&super_set, e->type); + + for (;;) { + if (type_set_update(&super_set, super)) { + error(e->token, "@(objc_superclass) Superclass hierarchy cycle encountered"); + break; + } + + if (super->kind != Type_Named) { + error(e->token, "@(objc_superclass) References type must be a named struct."); + break; + } + + Type* named_type = base_type(super->Named.type_name->type); + if (!is_type_objc_object(named_type)) { + error(e->token, "@(objc_superclass) Superclass must be an Objective-C class."); + break; + } + + super = super->Named.type_name->TypeName.objc_superclass; + if (super == nullptr) { + break; + } + + // TODO(harold): Is this the right way to do this??? The referenced entity must be already resolved + // so that we can access its objc_superclass attribute + check_single_global_entity(ctx->checker, super->Named.type_name, super->Named.type_name->decl_info); + } + } + } else if (e->TypeName.objc_superclass != nullptr) { + error(e->token, "@(objc_superclass) can only be applied when the obj_implement attribute is also applied"); + } if (type_size_of(e->type) > 0) { error(e->token, "@(objc_class) marked type must be of zero size"); @@ -942,6 +990,31 @@ gb_internal void check_objc_methods(CheckerContext *ctx, Entity *e, AttributeCon if (tn->scope != e->scope) { error(e->token, "@(objc_name) attribute may only be applied to procedures and types within the same scope"); } else { + + if (ac.objc_is_implementation) { + GB_ASSERT(e->kind == Entity_Procedure); + + CheckerInfo *info = ctx->info; + mutex_lock(&info->objc_method_mutex); + defer (mutex_unlock(&info->objc_method_mutex)); + + auto method = ObjcMethodData{ ac, e }; + + if (ac.objc_selector == "") { + method.ac.objc_selector = ac.objc_name; + } + + Array* method_list = map_get(&info->objc_method_implementations, t); + if (method_list) { + array_add(method_list, method); + } else { + auto list = array_make(permanent_allocator(), 1, 8); + list[0] = method; + + map_set(&info->objc_method_implementations, t, list); + } + } + mutex_lock(&global_type_name_objc_metadata_mutex); defer (mutex_unlock(&global_type_name_objc_metadata_mutex)); diff --git a/src/checker.cpp b/src/checker.cpp index 5a5ec9706..29ef7d2b3 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -1351,10 +1351,12 @@ gb_internal void init_universal(void) { t_objc_object = add_global_type_name(intrinsics_pkg->scope, str_lit("objc_object"), alloc_type_struct_complete()); t_objc_selector = add_global_type_name(intrinsics_pkg->scope, str_lit("objc_selector"), alloc_type_struct_complete()); t_objc_class = add_global_type_name(intrinsics_pkg->scope, str_lit("objc_class"), alloc_type_struct_complete()); + t_objc_ivar = add_global_type_name(intrinsics_pkg->scope, str_lit("objc_ivar"), alloc_type_struct_complete()); t_objc_id = alloc_type_pointer(t_objc_object); t_objc_SEL = alloc_type_pointer(t_objc_selector); t_objc_Class = alloc_type_pointer(t_objc_class); + t_objc_Ivar = alloc_type_pointer(t_objc_ivar); } } @@ -1387,6 +1389,9 @@ gb_internal void init_checker_info(CheckerInfo *i) { array_init(&i->defineables, a); map_init(&i->objc_msgSend_types); + mpsc_init(&i->objc_class_implementations, a); + map_init(&i->objc_method_implementations); + string_map_init(&i->load_file_cache); array_init(&i->all_procedures, heap_allocator()); @@ -3345,6 +3350,11 @@ gb_internal DECL_ATTRIBUTE_PROC(proc_decl_attribute) { ac->test = true; return true; } else if (name == "export") { + if (ac->objc_is_implementation) { + error(value, "Setting @(export) explicitly is not allowed when @(objc_implement) is set. It is exported implicitly."); + return false; + } + ExactValue ev = check_decl_attribute_value(c, value); if (ev.kind == ExactValue_Invalid) { ac->is_export = true; @@ -3356,6 +3366,12 @@ gb_internal DECL_ATTRIBUTE_PROC(proc_decl_attribute) { } return true; } else if (name == "linkage") { + + if (ac->objc_is_implementation) { + error(value, "Explicit linkage not allowed when @(objc_implement) is set. It is set implicitly"); + return false; + } + ExactValue ev = check_decl_attribute_value(c, value); if (ev.kind != ExactValue_String) { error(value, "Expected either a string 'linkage'"); @@ -3662,6 +3678,35 @@ gb_internal DECL_ATTRIBUTE_PROC(proc_decl_attribute) { } } return true; + } else if (name == "objc_implement") { + ExactValue ev = check_decl_attribute_value(c, value); + if (ev.kind == ExactValue_Bool) { + ac->objc_is_implementation = ev.value_bool; + } else if (ev.kind == ExactValue_Invalid) { + ac->objc_is_implementation = true; + } else { + error(elem, "Expected a boolean value, or no value, for '%.*s'", LIT(name)); + } + + // This implies exported, strongly linked + if (ac->objc_is_implementation) { + ac->is_export = true; + ac->linkage = str_lit("strong"); + } + + return true; + } else if (name == "objc_selector") { + ExactValue ev = check_decl_attribute_value(c, value); + if (ev.kind == ExactValue_String) { + if (string_is_valid_identifier(ev.value_string)) { + ac->objc_selector = ev.value_string; + } else { + error(elem, "Invalid identifier for '%.*s', got '%.*s'", LIT(name), LIT(ev.value_string)); + } + } else { + error(elem, "Expected a string value for '%.*s'", LIT(name)); + } + return true; } else if (name == "require_target_feature") { ExactValue ev = check_decl_attribute_value(c, value); if (ev.kind == ExactValue_String) { @@ -3901,8 +3946,36 @@ gb_internal DECL_ATTRIBUTE_PROC(type_decl_attribute) { ac->objc_class = ev.value_string; } return true; - } - return false; + } else if (name == "objc_implement") { + ExactValue ev = check_decl_attribute_value(c, value); + if (ev.kind == ExactValue_Bool) { + ac->objc_is_implementation = ev.value_bool; + } else if (ev.kind == ExactValue_Invalid) { + ac->objc_is_implementation = true; + } else { + error(elem, "Expected a boolean value, or no value, for '%.*s'", LIT(name)); + } + return true; + } else if (name == "objc_superclass") { + Type *objc_superclass = check_type(c, value); + + if (objc_superclass != nullptr) { + ac->objc_superclass = objc_superclass; + } else { + error(value, "'%.*s' expected a named type", LIT(name)); + } + return true; + } else if (name == "objc_ivar") { + Type *objc_ivar = check_type(c, value); + + if (objc_ivar != nullptr) { + ac->objc_ivar = objc_ivar; + } else { + error(value, "'%.*s' expected a named type", LIT(name)); + } + return true; + } + return false; } diff --git a/src/checker.hpp b/src/checker.hpp index d3b2d7d89..9910ed17b 100644 --- a/src/checker.hpp +++ b/src/checker.hpp @@ -148,8 +148,12 @@ struct AttributeContext { String objc_class; String objc_name; - bool objc_is_class_method; + String objc_selector; Type * objc_type; + Type * objc_superclass; + Type * objc_ivar; + bool objc_is_class_method : 1; + bool objc_is_implementation : 1; // This struct or proc provides a class/method implementation, not a binding to an existing type. String require_target_feature; // required by the target micro-architecture String enable_target_feature; // will be enabled for the procedure only @@ -365,6 +369,11 @@ struct ObjcMsgData { Type *proc_type; }; +struct ObjcMethodData { + AttributeContext ac; + Entity *proc_entity; +}; + enum LoadFileTier { LoadFileTier_Invalid, LoadFileTier_Exists, @@ -479,6 +488,12 @@ struct CheckerInfo { BlockingMutex objc_types_mutex; PtrMap objc_msgSend_types; + MPSCQueue objc_class_implementations; + + BlockingMutex objc_method_mutex; + PtrMap> objc_method_implementations; + + BlockingMutex load_file_mutex; StringMap load_file_cache; diff --git a/src/checker_builtin_procs.hpp b/src/checker_builtin_procs.hpp index 40dde8240..cb2ce3915 100644 --- a/src/checker_builtin_procs.hpp +++ b/src/checker_builtin_procs.hpp @@ -331,6 +331,7 @@ BuiltinProc__type_end, BuiltinProc_objc_find_class, BuiltinProc_objc_register_selector, BuiltinProc_objc_register_class, + BuiltinProc_objc_ivar_get, BuiltinProc_constant_utf16_cstring, @@ -673,6 +674,7 @@ gb_global BuiltinProc builtin_procs[BuiltinProc_COUNT] = { {STR_LIT("objc_find_class"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics}, {STR_LIT("objc_register_selector"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics, false, true}, {STR_LIT("objc_register_class"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics, false, true}, + {STR_LIT("ivar_get"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics, false, true}, {STR_LIT("constant_utf16_cstring"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics}, diff --git a/src/entity.cpp b/src/entity.cpp index b2148aa7b..9a5996e3d 100644 --- a/src/entity.cpp +++ b/src/entity.cpp @@ -235,6 +235,9 @@ struct Entity { Type * type_parameter_specialization; String ir_mangled_name; bool is_type_alias; + bool objc_is_implementation; + Type* objc_superclass; + Type* objc_ivar; String objc_class_name; TypeNameObjCMetadata *objc_metadata; } TypeName; diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index 083a1d90e..23ad81847 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -1173,6 +1173,332 @@ gb_internal lbProcedure *lb_create_objc_names(lbModule *main_module) { return p; } +// TODO(harold): Move this out of here and into a more suitable place. +// TODO(harold): Should not take an allocator, but always use temp, as we return string literals as well. +String lb_get_objc_type_encoding(Type *t, gbAllocator allocator, isize pointer_depth = 0) { + // NOTE(harold): See https://developer.apple.com/library/archive/documentation/Cocoa/Conceptual/ObjCRuntimeGuide/Articles/ocrtTypeEncodings.html#//apple_ref/doc/uid/TP40008048-CH100 + + // NOTE(harold): Darwin targets are always 64-bit. Should we drop this and assume "q" always? + #define INT_SIZE_ENCODING (build_context.metrics.ptr_size == 4 ? "i" : "q") + switch (t->kind) { + case Type_Basic: { + switch (t->Basic.kind) { + case Basic_Invalid: + return str_lit("?"); + + case Basic_llvm_bool: + case Basic_bool: + case Basic_b8: + return str_lit("B"); + + case Basic_b16: + return str_lit("C"); + case Basic_b32: + return str_lit("I"); + case Basic_b64: + return str_lit("q"); + case Basic_i8: + return str_lit("c"); + case Basic_u8: + return str_lit("C"); + case Basic_i16: + case Basic_i16le: + case Basic_i16be: + return str_lit("s"); + case Basic_u16: + case Basic_u16le: + case Basic_u16be: + return str_lit("S"); + case Basic_i32: + case Basic_i32le: + case Basic_i32be: + return str_lit("i"); + case Basic_u32le: + case Basic_u32: + case Basic_u32be: + return str_lit("I"); + case Basic_i64: + case Basic_i64le: + case Basic_i64be: + return str_lit("q"); + case Basic_u64: + case Basic_u64le: + case Basic_u64be: + return str_lit("Q"); + case Basic_i128: + case Basic_i128le: + case Basic_i128be: + return str_lit("t"); + case Basic_u128: + case Basic_u128le: + case Basic_u128be: + return str_lit("T"); + case Basic_rune: + return str_lit("I"); + case Basic_f16: + case Basic_f16le: + case Basic_f16be: + return str_lit("s"); // @harold: Closest we've got? + case Basic_f32: + case Basic_f32le: + case Basic_f32be: + return str_lit("f"); + case Basic_f64: + case Basic_f64le: + case Basic_f64be: + return str_lit("d"); + + // TODO(harold) These: + case Basic_complex32: + case Basic_complex64: + case Basic_complex128: + case Basic_quaternion64: + case Basic_quaternion128: + case Basic_quaternion256: + return str_lit("?"); + + case Basic_int: + return str_lit(INT_SIZE_ENCODING); + case Basic_uint: + return build_context.metrics.ptr_size == 4 ? str_lit("I") : str_lit("Q"); + case Basic_uintptr: + case Basic_rawptr: + return str_lit("^v"); + + case Basic_string: + return build_context.metrics.ptr_size == 4 ? str_lit("{string=*i}") : str_lit("{string=*q}"); + + case Basic_cstring: return str_lit("*"); + case Basic_any: return str_lit("{any=^v^v"); // rawptr + ^Type_Info + + case Basic_typeid: + GB_ASSERT(t->Basic.size == 8); + return str_lit("q"); + + // Untyped types + case Basic_UntypedBool: + case Basic_UntypedInteger: + case Basic_UntypedFloat: + case Basic_UntypedComplex: + case Basic_UntypedQuaternion: + case Basic_UntypedString: + case Basic_UntypedRune: + case Basic_UntypedNil: + case Basic_UntypedUninit: + GB_PANIC("Untyped types cannot be @encoded()"); + return str_lit("?"); + } + break; + } + + case Type_Named: + case Type_Struct: + case Type_Union: { + Type* base = t; + if (base->kind == Type_Named) { + base = base_type(base); + if(base->kind != Type_Struct && base->kind != Type_Union) { + return lb_get_objc_type_encoding(base, allocator, pointer_depth); + } + } + + const bool is_union = base->kind == Type_Union; + if (!is_union) { + // Check for objc_SEL + if (internal_check_is_assignable_to(base, t_objc_SEL)) { + return str_lit(":"); + } + + // Check for objc_Class + if (internal_check_is_assignable_to(base, t_objc_SEL)) { + return str_lit("#"); + } + + // Treat struct as an Objective-C Class? + if (has_type_got_objc_class_attribute(base) && pointer_depth == 0) { + return str_lit("#"); + } + } + + if (is_type_objc_object(base)) { + return str_lit("@"); + } + + + gbString s = gb_string_make_reserve(allocator, 16); + s = gb_string_append_length(s, is_union ? "(" :"{", 1); + if (t->kind == Type_Named) { + s = gb_string_append_length(s, t->Named.name.text, t->Named.name.len); + } + + // Write fields + if (pointer_depth < 2) { + s = gb_string_append_length(s, "=", 1); + + if (!is_union) { + for( auto& f : t->Struct.fields ) { + String field_type = lb_get_objc_type_encoding(f->type, allocator, pointer_depth); + s = gb_string_append_length(s, field_type.text, field_type.len); + } + } else { + // #TODO(harold): Encode fields + } + } + + s = gb_string_append_length(s, is_union ? ")" :"}", 1); + + return make_string_c(s); + } + + case Type_Generic: + GB_PANIC("Generic types cannot be @encoded()"); + return str_lit("?"); + + case Type_Pointer: { + String pointee = lb_get_objc_type_encoding(t->Pointer.elem, allocator, pointer_depth +1); + // Special case for Objective-C Objects + if (pointer_depth == 0 && pointee == "@") { + return pointee; + } + + return concatenate_strings(allocator, str_lit("^"), pointee); + } + + case Type_MultiPointer: + return concatenate_strings(allocator, str_lit("^"), lb_get_objc_type_encoding(t->Pointer.elem, allocator, pointer_depth +1)); + + case Type_Array: { + String type_str = lb_get_objc_type_encoding(t->Array.elem, allocator, pointer_depth); + + gbString s = gb_string_make_reserve(allocator, type_str.len + 8); + s = gb_string_append_fmt(s, "[%lld%s]", t->Array.count, type_str.text); + return make_string_c(s); + } + + case Type_EnumeratedArray: { + String type_str = lb_get_objc_type_encoding(t->EnumeratedArray.elem, allocator, pointer_depth); + + gbString s = gb_string_make_reserve(allocator, type_str.len + 8); + s = gb_string_append_fmt(s, "[%lld%s]", t->EnumeratedArray.count, type_str.text); + return make_string_c(s); + } + + case Type_Slice: { + String type_str = lb_get_objc_type_encoding(t->Slice.elem, allocator, pointer_depth); + gbString s = gb_string_make_reserve(allocator, type_str.len + 8); + s = gb_string_append_fmt(s, "{slice=^%s%s}", type_str, INT_SIZE_ENCODING); + return make_string_c(s); + } + + case Type_DynamicArray: { + String type_str = lb_get_objc_type_encoding(t->DynamicArray.elem, allocator, pointer_depth); + gbString s = gb_string_make_reserve(allocator, type_str.len + 8); + s = gb_string_append_fmt(s, "{dynamic=^%s%s%sAllocator={?^v}}", type_str, INT_SIZE_ENCODING, INT_SIZE_ENCODING); + return make_string_c(s); + } + + case Type_Map: + return str_lit("{^v^v{Allocator=?^v}}"); + case Type_Enum: + return lb_get_objc_type_encoding(t->Enum.base_type, allocator, pointer_depth); + case Type_Tuple: + // NOTE(harold): Is this allowed here? + return str_lit("?"); + case Type_Proc: + return str_lit("?"); + case Type_BitSet: + return lb_get_objc_type_encoding(t->BitSet.underlying, allocator, pointer_depth); + case Type_SimdVector: + break; + case Type_Matrix: + break; + case Type_BitField: + return lb_get_objc_type_encoding(t->BitField.backing_type, allocator, pointer_depth); + case Type_SoaPointer: { + gbString s = gb_string_make_reserve(allocator, 8); + s = gb_string_append_fmt(s, "{=^v%s}", INT_SIZE_ENCODING); + return make_string_c(s); + } + + } // End switch t->kind + #undef INT_SIZE_ENCODING + + GB_PANIC("Unreachable"); +} + +struct lbObjCGlobalClass { + lbObjCGlobal g; + lbValue class_value; // Local registered class value +}; + +gb_internal void lb_register_objc_thing( + StringSet &handled, + lbModule *m, + Array &args, + Array &class_impls, + StringMap &class_map, + lbProcedure *p, + lbObjCGlobal const &g, + char const *call +) { + if (string_set_update(&handled, g.name)) { + return; + } + + lbAddr addr = {}; + lbValue *found = string_map_get(&m->members, g.global_name); + if (found) { + addr = lb_addr(*found); + } else { + lbValue v = {}; + LLVMTypeRef t = lb_type(m, g.type); + v.value = LLVMAddGlobal(m->mod, t, g.global_name); + v.type = alloc_type_pointer(g.type); + addr = lb_addr(v); + LLVMSetInitializer(v.value, LLVMConstNull(t)); + } + + lbValue class_ptr{}; + lbValue class_name = lb_const_value(m, t_cstring, exact_value_string(g.name)); + + // If this class requires an implementation, save it for registration below. + if (g.class_impl_type != nullptr) { + + // Make sure the superclass has been initialized before us + lbValue superclass_value{}; + + auto& tn = g.class_impl_type->Named.type_name->TypeName; + Type *superclass = tn.objc_superclass; + if (superclass != nullptr) { + auto& superclass_global = string_map_must_get(&class_map, superclass->Named.type_name->TypeName.objc_class_name); + lb_register_objc_thing(handled, m, args, class_impls, class_map, p, superclass_global.g, call); + GB_ASSERT(superclass_global.class_value.value); + + superclass_value = superclass_global.class_value; + } + + args.count = 3; + args[0] = superclass == nullptr ? lb_const_nil(m, t_objc_Class) : superclass_value; + args[1] = class_name; + args[2] = lb_const_int(m, t_uint, 0); + class_ptr = lb_emit_runtime_call(p, "objc_allocateClassPair", args); + + array_add(&class_impls, lbObjCGlobalClass{g, class_ptr}); + } + else { + args.count = 1; + args[0] = class_name; + class_ptr = lb_emit_runtime_call(p, call, args); + } + + lb_addr_store(p, addr, class_ptr); + + lbObjCGlobalClass* class_global = string_map_get(&class_map, g.name); + if (class_global != nullptr) { + class_global->class_value = class_ptr; + } +} + gb_internal void lb_finalize_objc_names(lbGenerator *gen, lbProcedure *p) { if (p == nullptr) { return; @@ -1186,39 +1512,238 @@ gb_internal void lb_finalize_objc_names(lbGenerator *gen, lbProcedure *p) { string_set_init(&handled); defer (string_set_destroy(&handled)); - auto args = array_make(temporary_allocator(), 1); - - LLVMSetLinkage(p->value, LLVMInternalLinkage); - lb_begin_procedure_body(p); - - auto register_thing = [&handled, &m, &args](lbProcedure *p, lbObjCGlobal const &g, char const *call) { - if (!string_set_update(&handled, g.name)) { - lbAddr addr = {}; - lbValue *found = string_map_get(&m->members, g.global_name); - if (found) { - addr = lb_addr(*found); - } else { - lbValue v = {}; - LLVMTypeRef t = lb_type(m, g.type); - v.value = LLVMAddGlobal(m->mod, t, g.global_name); - v.type = alloc_type_pointer(g.type); - addr = lb_addr(v); - LLVMSetInitializer(v.value, LLVMConstNull(t)); - } - - args[0] = lb_const_value(m, t_cstring, exact_value_string(g.name)); - lbValue ptr = lb_emit_runtime_call(p, call, args); - lb_addr_store(p, addr, ptr); - } - }; - - for (lbObjCGlobal g = {}; mpsc_dequeue(&gen->objc_classes, &g); /**/) { - register_thing(p, g, "objc_lookUpClass"); - } - - for (lbObjCGlobal g = {}; mpsc_dequeue(&gen->objc_selectors, &g); /**/) { - register_thing(p, g, "sel_registerName"); - } + auto args = array_make(temporary_allocator(), 3, 8); + auto class_impls = array_make(temporary_allocator(), 0, 16); + + // Ensure classes that have been implicitly referenced through + // the objc_superclass attribute have a global variable available for them. + TypeSet class_set{}; + type_set_init(&class_set, gen->objc_classes.count+16); + defer (type_set_destroy(&class_set)); + + auto referenced_classes = array_make(temporary_allocator()); + for (lbObjCGlobal g = {}; mpsc_dequeue(&gen->objc_classes, &g); /**/) { + array_add( &referenced_classes, g); + + Type *cls = g.class_impl_type; + while (cls) { + if (type_set_update(&class_set, cls)) { + break; + } + GB_ASSERT(cls->kind == Type_Named); + + cls = cls->Named.type_name->TypeName.objc_superclass; + } + } + + for (auto pair : class_set) { + auto& tn = pair.type->Named.type_name->TypeName; + Type *class_impl = !tn.objc_is_implementation ? nullptr : pair.type; + lb_handle_objc_find_or_register_class(p, tn.objc_class_name, class_impl); + } + for (lbObjCGlobal g = {}; mpsc_dequeue(&gen->objc_classes, &g); /**/) { + array_add( &referenced_classes, g ); + } + + // Add all class globals to a map so that we can look them up dynamically + // in order to resolve out-of-order because classes that are being implemented + // need their superclasses to have been registered before them. + StringMap global_class_map{}; + string_map_init(&global_class_map, (usize)gen->objc_classes.count); + defer (string_map_destroy(&global_class_map)); + + for (lbObjCGlobal g :referenced_classes) { + string_map_set(&global_class_map, g.name, lbObjCGlobalClass{g}); + } + + LLVMSetLinkage(p->value, LLVMInternalLinkage); + lb_begin_procedure_body(p); + + // Register class globals, gathering classes that must be implemented + for (auto& kv : global_class_map) { + lb_register_objc_thing(handled, m, args, class_impls, global_class_map, p, kv.value.g, "objc_lookUpClass"); + } + + // Prefetch selectors for implemented methods so that they can also be registered. + for (const auto& cd : class_impls) { + auto& g = cd.g; + Type *class_type = g.class_impl_type; + + Array* methods = map_get(&m->info->objc_method_implementations, class_type); + if (!methods) { + continue; + } + + for (const ObjcMethodData& md : *methods) { + lb_handle_objc_find_or_register_selector(p, md.ac.objc_selector); + } + } + + // Now we can register all referenced selectors + for (lbObjCGlobal g = {}; mpsc_dequeue(&gen->objc_selectors, &g); /**/) { + lb_register_objc_thing(handled, m, args, class_impls, global_class_map, p, g, "sel_registerName"); + } + + + // Emit method wrapper implementations and registration + auto wrapper_args = array_make(temporary_allocator(), 2, 8); + + for (const auto& cd : class_impls) { + auto& g = cd.g; + Type *class_type = g.class_impl_type; + + Array* methods = map_get(&m->info->objc_method_implementations, class_type); + if (!methods) { + continue; + } + + Type *class_ptr_type = alloc_type_pointer(class_type); + lbValue class_value = cd.class_value; + + for (const ObjcMethodData& md : *methods) { + GB_ASSERT( md.proc_entity->kind == Entity_Procedure); + Type *method_type = md.proc_entity->type; + + String proc_name = make_string_c("__$objc_method::"); + proc_name = concatenate_strings(temporary_allocator(), proc_name, g.name); + proc_name = concatenate_strings(temporary_allocator(), proc_name, str_lit("::")); + proc_name = concatenate_strings( permanent_allocator(), proc_name, md.ac.objc_name); + + wrapper_args.count = 2; + wrapper_args[0] = md.ac.objc_is_class_method ? t_objc_Class : class_ptr_type; + wrapper_args[1] = t_objc_SEL; + + auto method_param_count = (isize)method_type->Proc.param_count; + i32 method_param_offset = 0; + + // TODO(harold): Need to make sure (at checker stage) that the non-class method has the self parameter already. + // (Maybe this is already accounted for?.) + if (!md.ac.objc_is_class_method) { + GB_ASSERT(method_param_count >= 1); + method_param_count -= 1; + method_param_offset = 1; + } + + for (i32 i = 0; i < method_param_count; i++) { + array_add(&wrapper_args, method_type->Proc.params->Tuple.variables[method_param_offset+i]->type); + } + + Type *wrapper_args_tuple = alloc_type_tuple_from_field_types(wrapper_args.data, wrapper_args.count, false, true); + Type *wrapper_proc_type = alloc_type_proc(nullptr, wrapper_args_tuple, (isize)wrapper_args_tuple->Tuple.variables.count, nullptr, 0, false, ProcCC_CDecl); + + lbProcedure *wrapper_proc = lb_create_dummy_procedure(m, proc_name, wrapper_proc_type); + lb_add_attribute_to_proc(wrapper_proc->module, wrapper_proc->value, "nounwind"); + + // Emit the wrapper + LLVMSetLinkage(wrapper_proc->value, LLVMExternalLinkage); + lb_begin_procedure_body(wrapper_proc); + { + auto method_call_args = array_make(temporary_allocator(), method_param_count + (isize)method_param_offset); + + if (!md.ac.objc_is_class_method) { + method_call_args[0] = lbValue { + wrapper_proc->raw_input_parameters[0], + class_ptr_type, + }; + } + + for (isize i = 0; i < method_param_count; i++) { + method_call_args[i+method_param_offset] = lbValue { + wrapper_proc->raw_input_parameters[i+2], + method_type->Proc.params->Tuple.variables[i+method_param_offset]->type, + }; + } + lbValue method_proc_value = lb_find_procedure_value_from_entity(m, md.proc_entity); + + // Call real procedure for method from here, passing the parameters expected, if any. + lb_emit_call(wrapper_proc, method_proc_value, method_call_args); + } + lb_end_procedure_body(wrapper_proc); + + + // Add the method to the class + String method_encoding = str_lit("v"); + // TODO (harold): Checker must ensure that objc_methods have a single return value or none! + GB_ASSERT(method_type->Proc.result_count <= 1); + if (method_type->Proc.result_count != 0) { + method_encoding = lb_get_objc_type_encoding(method_type->Proc.results->Tuple.variables[0]->type, temporary_allocator()); + } + + if (!md.ac.objc_is_class_method) { + method_encoding = concatenate_strings(temporary_allocator(), method_encoding, str_lit("@:")); + } else { + method_encoding = concatenate_strings(temporary_allocator(), method_encoding, str_lit("#:")); + } + + for (i32 i = method_param_offset; i < method_param_count; i++) { + Type *param_type = method_type->Proc.params->Tuple.variables[i]->type; + String param_encoding = lb_get_objc_type_encoding(param_type, temporary_allocator()); + + method_encoding = concatenate_strings(temporary_allocator(), method_encoding, param_encoding); + } + + // Emit method registration + lbAddr* sel_address = string_map_get(&m->objc_selectors, md.ac.objc_selector); + GB_ASSERT(sel_address); + lbValue selector_value = lb_addr_load(p, *sel_address); + + args.count = 4; + args[0] = class_value; // Class + args[1] = selector_value; // SEL + args[2] = lbValue { wrapper_proc->value, wrapper_proc->type }; + args[3] = lb_const_value(m, t_cstring, exact_value_string(method_encoding)); + + // TODO(harold): Emit check BOOL result and panic if false. + lb_emit_runtime_call(p, "class_addMethod", args); + + } // End methods + + // Add ivar if we have one + Type *ivar_type = class_type->Named.type_name->TypeName.objc_ivar; + if (ivar_type != nullptr) { + // Register a single ivar for this class + Type *ivar_base = ivar_type->Named.base; + // TODO(harold): No idea if I can use this, but I assume so? + const i64 size = ivar_base->cached_size; + const i64 alignment = ivar_base->cached_align; + // TODO(harold): Checker: Alignment must be compatible with ivar rules. Or we should increase the alignment if needed. + + String ivar_name = str_lit("__$ivar"); + String ivar_types = str_lit("{= }"); + args.count = 5; + args[0] = class_value; + args[1] = lb_const_value(m, t_cstring, exact_value_string(ivar_name)); + args[2] = lb_const_value(m, t_uint, exact_value_u64((u64)size)); + args[3] = lb_const_value(m, t_u8, exact_value_u64((u64)alignment)); + args[4] = lb_const_value(m, t_cstring, exact_value_string(ivar_types)); + lb_emit_runtime_call(p, "class_addIvar", args); + } + + // Complete the class registration + args.count = 1; + args[0] = class_value; + lb_emit_runtime_call(p, "objc_registerClassPair", args); + + // If we have an ivar, store its offset globally for an intrinsic + // TODO(harold): Only do this for types that had ivar_get calls registered! + if (ivar_type != nullptr) { + args.count = 2; + args[0] = class_value; + args[1] = lb_const_value(m, t_cstring, exact_value_string(str_lit("__$ivar"))); + lbValue ivar = lb_emit_runtime_call(p, "class_getInstanceVariable", args); + + args.count = 1; + args[0] = ivar; + lbValue ivar_offset = lb_emit_runtime_call(p, "ivar_getOffset", args); + lbValue ivar_offset_u32 = lb_emit_conv(p, ivar_offset, t_u32); + + String class_name = class_type->Named.type_name->TypeName.objc_class_name; + // TODO(harold): Oops! This is wrong, that map is there to prevent re-entry. + // Simply emit from referred ivars. For now use a single module only. + lbAddr ivar_addr = string_map_must_get(&m->objc_ivars, class_name); + lb_addr_store(p, ivar_addr, ivar_offset_u32); + } + } lb_end_procedure_body(p); } diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp index 6177fcf6e..7694c65c3 100644 --- a/src/llvm_backend.hpp +++ b/src/llvm_backend.hpp @@ -196,6 +196,7 @@ struct lbModule { StringMap objc_classes; StringMap objc_selectors; + StringMap objc_ivars; PtrMap map_cell_info_map; // address of runtime.Map_Info PtrMap map_info_map; // address of runtime.Map_Cell_Info @@ -219,6 +220,7 @@ struct lbObjCGlobal { gbString global_name; String name; Type * type; + Type * class_impl_type; // This is set when the class has the objc_implement attribute set to true. }; struct lbGenerator : LinkerData { @@ -240,6 +242,7 @@ struct lbGenerator : LinkerData { MPSCQueue entities_to_correct_linkage; MPSCQueue objc_selectors; MPSCQueue objc_classes; + MPSCQueue objc_ivars; }; diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp index 421720c4c..7f012e006 100644 --- a/src/llvm_backend_general.cpp +++ b/src/llvm_backend_general.cpp @@ -101,6 +101,7 @@ gb_internal void lb_init_module(lbModule *m, Checker *c) { string_map_init(&m->objc_classes); string_map_init(&m->objc_selectors); + string_map_init(&m->objc_ivars); map_init(&m->map_info_map, 0); map_init(&m->map_cell_info_map, 0); @@ -173,6 +174,7 @@ gb_internal bool lb_init_generator(lbGenerator *gen, Checker *c) { mpsc_init(&gen->entities_to_correct_linkage, heap_allocator()); mpsc_init(&gen->objc_selectors, heap_allocator()); mpsc_init(&gen->objc_classes, heap_allocator()); + mpsc_init(&gen->objc_ivars, heap_allocator()); return true; } diff --git a/src/llvm_backend_proc.cpp b/src/llvm_backend_proc.cpp index 3212abd9a..bf4ebf377 100644 --- a/src/llvm_backend_proc.cpp +++ b/src/llvm_backend_proc.cpp @@ -3290,6 +3290,7 @@ gb_internal lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValu case BuiltinProc_objc_find_class: return lb_handle_objc_find_class(p, expr); case BuiltinProc_objc_register_selector: return lb_handle_objc_register_selector(p, expr); case BuiltinProc_objc_register_class: return lb_handle_objc_register_class(p, expr); + case BuiltinProc_objc_ivar_get: return lb_handle_objc_ivar_get(p, expr); case BuiltinProc_constant_utf16_cstring: diff --git a/src/llvm_backend_utility.cpp b/src/llvm_backend_utility.cpp index bfeebfcbe..897b71b5b 100644 --- a/src/llvm_backend_utility.cpp +++ b/src/llvm_backend_utility.cpp @@ -2125,7 +2125,7 @@ gb_internal lbAddr lb_handle_objc_find_or_register_selector(lbProcedure *p, Stri return addr; } -gb_internal lbAddr lb_handle_objc_find_or_register_class(lbProcedure *p, String const &name) { +gb_internal lbAddr lb_handle_objc_find_or_register_class(lbProcedure *p, String const &name, Type *class_impl_type) { lbModule *m = p->module; lbAddr *found = string_map_get(&m->objc_classes, name); if (found) { @@ -2148,13 +2148,72 @@ gb_internal lbAddr lb_handle_objc_find_or_register_class(lbProcedure *p, String } else { LLVMSetLinkage(g.value, LLVMExternalLinkage); } - mpsc_enqueue(&m->gen->objc_classes, lbObjCGlobal{m, global_name, name, t_objc_Class}); + mpsc_enqueue(&m->gen->objc_classes, lbObjCGlobal{m, global_name, name, t_objc_Class, class_impl_type}); lbAddr addr = lb_addr(g); string_map_set(&m->objc_classes, name, addr); return addr; } +gb_internal lbAddr lb_handle_objc_find_or_register_ivar(lbModule *m, Type *self_type) { + + String name = self_type->Named.type_name->TypeName.objc_class_name; + GB_ASSERT(name != ""); + + lbAddr *found = string_map_get(&m->objc_ivars, name); + if (found) { + return *found; + } + + + lbModule *default_module = &m->gen->default_module; + + gbString global_name = gb_string_make(permanent_allocator(), "__$objc_ivar::"); + global_name = gb_string_append_length(global_name, name.text, name.len); + + // Create a global variable to store offset of the ivar in an instance of an object + Type *p_ivar_offset = alloc_type_pointer(t_u32); + + LLVMTypeRef t = lb_type(m, p_ivar_offset); + lbValue g = {}; + g.value = LLVMAddGlobal(m->mod, t, global_name); + g.type = p_ivar_offset; + + if (default_module == m) { + LLVMSetInitializer(g.value, LLVMConstNull(t)); + lb_add_member(m, make_string_c(global_name), g); + } else { + LLVMSetLinkage(g.value, LLVMExternalLinkage); + } + + mpsc_enqueue(&m->gen->objc_ivars, lbObjCGlobal{m, global_name, name, self_type}); + + lbAddr addr = lb_addr(g); + string_map_set(&m->objc_ivars, name, addr); + return addr; +} + +gb_internal lbValue lb_handle_objc_ivar_get(lbProcedure *p, Ast *expr) { + ast_node(ce, CallExpr, expr); + lbModule *m = p->module; + + GB_ASSERT(ce->args[0]->tav.type->kind == Type_Pointer); + Type *self_type = ce->args[0]->tav.type->Pointer.elem; + Type *ivar_type = self_type->Named.type_name->TypeName.objc_ivar; + + Type* p_ivar = alloc_type_pointer(ivar_type); + + lbValue ivar_offset = lb_addr_load(p, lb_handle_objc_find_or_register_ivar(m, self_type)); + lbValue ivar_offset_uptr = lb_emit_conv(p, ivar_offset, t_uintptr); + + lbValue self = lb_build_expr(p, ce->args[0]); + lbValue self_uptr = lb_emit_conv(p, self, t_uintptr); + + lbValue ivar_uptr = lb_emit_arith(p, Token_Add, self_uptr, ivar_offset_uptr, t_uintptr); + + return lb_emit_conv(p, ivar_uptr, p_ivar); +} + gb_internal lbValue lb_handle_objc_find_selector(lbProcedure *p, Ast *expr) { ast_node(ce, CallExpr, expr); @@ -2188,7 +2247,7 @@ gb_internal lbValue lb_handle_objc_find_class(lbProcedure *p, Ast *expr) { auto tav = ce->args[0]->tav; GB_ASSERT(tav.value.kind == ExactValue_String); String name = tav.value.value_string; - return lb_addr_load(p, lb_handle_objc_find_or_register_class(p, name)); + return lb_addr_load(p, lb_handle_objc_find_or_register_class(p, name, nullptr)); } gb_internal lbValue lb_handle_objc_register_class(lbProcedure *p, Ast *expr) { @@ -2198,7 +2257,7 @@ gb_internal lbValue lb_handle_objc_register_class(lbProcedure *p, Ast *expr) { auto tav = ce->args[0]->tav; GB_ASSERT(tav.value.kind == ExactValue_String); String name = tav.value.value_string; - lbAddr dst = lb_handle_objc_find_or_register_class(p, name); + lbAddr dst = lb_handle_objc_find_or_register_class(p, name, nullptr); auto args = array_make(permanent_allocator(), 3); args[0] = lb_const_nil(m, t_objc_Class); @@ -2220,7 +2279,9 @@ gb_internal lbValue lb_handle_objc_id(lbProcedure *p, Ast *expr) { GB_ASSERT(e->kind == Entity_TypeName); String name = e->TypeName.objc_class_name; - return lb_addr_load(p, lb_handle_objc_find_or_register_class(p, name)); + Type *class_impl_type = e->TypeName.objc_is_implementation ? type : nullptr; + + return lb_addr_load(p, lb_handle_objc_find_or_register_class(p, name, class_impl_type)); } return lb_build_expr(p, expr); @@ -2266,9 +2327,6 @@ gb_internal lbValue lb_handle_objc_send(lbProcedure *p, Ast *expr) { return lb_emit_call(p, the_proc, args); } - - - gb_internal LLVMAtomicOrdering llvm_atomic_ordering_from_odin(ExactValue const &value) { GB_ASSERT(value.kind == ExactValue_Integer); i64 v = exact_value_to_i64(value); diff --git a/src/types.cpp b/src/types.cpp index 9c9472a28..1b2545279 100644 --- a/src/types.cpp +++ b/src/types.cpp @@ -729,10 +729,12 @@ gb_global Type *t_map_set_proc = nullptr; gb_global Type *t_objc_object = nullptr; gb_global Type *t_objc_selector = nullptr; gb_global Type *t_objc_class = nullptr; +gb_global Type *t_objc_ivar = nullptr; gb_global Type *t_objc_id = nullptr; gb_global Type *t_objc_SEL = nullptr; gb_global Type *t_objc_Class = nullptr; +gb_global Type *t_objc_Ivar = nullptr; enum OdinAtomicMemoryOrder : i32 { OdinAtomicMemoryOrder_relaxed = 0, // unordered -- cgit v1.2.3 From f3923ed66640ea9fd342ca851fdd2bd794405e0c Mon Sep 17 00:00:00 2001 From: Harold Brenes Date: Sun, 27 Apr 2025 22:48:16 -0400 Subject: Fix indentations Fix Objective-C wrapper procs not forwarding return value --- src/check_builtin.cpp | 144 +++--- src/check_decl.cpp | 174 +++---- src/checker.cpp | 150 +++--- src/checker.hpp | 18 +- src/checker_builtin_procs.hpp | 4 +- src/entity.cpp | 6 +- src/llvm_backend.cpp | 1108 +++++++++++++++++++++-------------------- src/llvm_backend.hpp | 6 +- src/llvm_backend_general.cpp | 4 +- src/llvm_backend_proc.cpp | 2 +- src/llvm_backend_utility.cpp | 14 +- 11 files changed, 822 insertions(+), 808 deletions(-) (limited to 'src/llvm_backend_general.cpp') diff --git a/src/check_builtin.cpp b/src/check_builtin.cpp index 92942b4db..099f99045 100644 --- a/src/check_builtin.cpp +++ b/src/check_builtin.cpp @@ -389,77 +389,77 @@ gb_internal bool check_builtin_objc_procedure(CheckerContext *c, Operand *operan } break; case BuiltinProc_objc_ivar_get: - { - Type *self_type = nullptr; - Type *ivar_type = nullptr; - - Operand self = {}; - check_expr_or_type(c, &self, ce->args[0]); - - if (!is_operand_value(self) || !check_is_assignable_to(c, &self, t_objc_id)) { - gbString e = expr_to_string(self.expr); - gbString t = type_to_string(self.type); - error(self.expr, "'%.*s' expected a type or value derived from intrinsics.objc_object, got '%s' of type %s", LIT(builtin_name), e, t); - gb_string_free(t); - gb_string_free(e); - return false; - } else if (!is_type_pointer(self.type)) { - gbString e = expr_to_string(self.expr); - gbString t = type_to_string(self.type); - error(self.expr, "'%.*s' expected a pointer of a value derived from intrinsics.objc_object, got '%s' of type %s", LIT(builtin_name), e, t); - gb_string_free(t); - gb_string_free(e); - return false; - } - - self_type = type_deref(self.type); - - if (!(self_type->kind == Type_Named && - self_type->Named.type_name != nullptr && - self_type->Named.type_name->TypeName.objc_class_name != "")) { - gbString t = type_to_string(self_type); - error(self.expr, "'%.*s' expected a named type with the attribute @(obj_class=) , got type %s", LIT(builtin_name), t); - gb_string_free(t); - return false; - } - - if (self_type->Named.type_name->TypeName.objc_ivar == nullptr) { - gbString t = type_to_string(self_type); - error(self.expr, "'%.*s' requires that type %s have the attribute @(obj_ivar=).", LIT(builtin_name), t); - gb_string_free(t); - return false; - } - - Operand ivar = {}; - check_expr_or_type(c, &ivar, ce->args[1]); - if (ivar.mode == Addressing_Type) { - ivar_type = ivar.type; - } else { - return false; - } - - if (self_type->Named.type_name->TypeName.objc_ivar != ivar_type) { - gbString name_self = type_to_string(self_type); - gbString name_expected = type_to_string(self_type->Named.type_name->TypeName.objc_ivar); - gbString name_given = type_to_string(ivar_type); - error(self.expr, "'%.*s' ivar type %s does not match @obj_ivar type %s on Objective-C class %s.", - LIT(builtin_name), name_given, name_expected, name_self); - gb_string_free(name_self); - gb_string_free(name_expected); - gb_string_free(name_given); - return false; - } - - if (type_hint != nullptr && type_hint->kind == Type_Pointer && type_hint->Pointer.elem == ivar_type) { - operand->type = type_hint; - } else { - operand->type = alloc_type_pointer(ivar_type); - } - - operand->mode = Addressing_Value; - - return true; - } break; + { + Type *self_type = nullptr; + Type *ivar_type = nullptr; + + Operand self = {}; + check_expr_or_type(c, &self, ce->args[0]); + + if (!is_operand_value(self) || !check_is_assignable_to(c, &self, t_objc_id)) { + gbString e = expr_to_string(self.expr); + gbString t = type_to_string(self.type); + error(self.expr, "'%.*s' expected a type or value derived from intrinsics.objc_object, got '%s' of type %s", LIT(builtin_name), e, t); + gb_string_free(t); + gb_string_free(e); + return false; + } else if (!is_type_pointer(self.type)) { + gbString e = expr_to_string(self.expr); + gbString t = type_to_string(self.type); + error(self.expr, "'%.*s' expected a pointer of a value derived from intrinsics.objc_object, got '%s' of type %s", LIT(builtin_name), e, t); + gb_string_free(t); + gb_string_free(e); + return false; + } + + self_type = type_deref(self.type); + + if (!(self_type->kind == Type_Named && + self_type->Named.type_name != nullptr && + self_type->Named.type_name->TypeName.objc_class_name != "")) { + gbString t = type_to_string(self_type); + error(self.expr, "'%.*s' expected a named type with the attribute @(obj_class=) , got type %s", LIT(builtin_name), t); + gb_string_free(t); + return false; + } + + if (self_type->Named.type_name->TypeName.objc_ivar == nullptr) { + gbString t = type_to_string(self_type); + error(self.expr, "'%.*s' requires that type %s have the attribute @(obj_ivar=).", LIT(builtin_name), t); + gb_string_free(t); + return false; + } + + Operand ivar = {}; + check_expr_or_type(c, &ivar, ce->args[1]); + if (ivar.mode == Addressing_Type) { + ivar_type = ivar.type; + } else { + return false; + } + + if (self_type->Named.type_name->TypeName.objc_ivar != ivar_type) { + gbString name_self = type_to_string(self_type); + gbString name_expected = type_to_string(self_type->Named.type_name->TypeName.objc_ivar); + gbString name_given = type_to_string(ivar_type); + error(self.expr, "'%.*s' ivar type %s does not match @obj_ivar type %s on Objective-C class %s.", + LIT(builtin_name), name_given, name_expected, name_self); + gb_string_free(name_self); + gb_string_free(name_expected); + gb_string_free(name_given); + return false; + } + + if (type_hint != nullptr && type_hint->kind == Type_Pointer && type_hint->Pointer.elem == ivar_type) { + operand->type = type_hint; + } else { + operand->type = alloc_type_pointer(ivar_type); + } + + operand->mode = Addressing_Value; + + return true; + } break; } } @@ -2206,7 +2206,7 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As case BuiltinProc_objc_find_class: case BuiltinProc_objc_register_selector: case BuiltinProc_objc_register_class: - case BuiltinProc_objc_ivar_get: + case BuiltinProc_objc_ivar_get: return check_builtin_objc_procedure(c, operand, call, id, type_hint); case BuiltinProc___entry_point: diff --git a/src/check_decl.cpp b/src/check_decl.cpp index e67241b31..48e5172d6 100644 --- a/src/check_decl.cpp +++ b/src/check_decl.cpp @@ -526,68 +526,68 @@ gb_internal void check_type_decl(CheckerContext *ctx, Entity *e, Ast *init_expr, check_decl_attributes(ctx, decl->attributes, type_decl_attribute, &ac); if (e->kind == Entity_TypeName && ac.objc_class != "") { e->TypeName.objc_class_name = ac.objc_class; - e->TypeName.objc_superclass = ac.objc_superclass; - e->TypeName.objc_ivar = ac.objc_ivar; + e->TypeName.objc_superclass = ac.objc_superclass; + e->TypeName.objc_ivar = ac.objc_ivar; e->TypeName.objc_context_provider = ac.objc_context_provider; - if (ac.objc_is_implementation) { - e->TypeName.objc_is_implementation = true; - mpsc_enqueue(&ctx->info->objc_class_implementations, e); // TODO(harold): Don't need this for anything? See if needed when using explicit @export - - GB_ASSERT(e->TypeName.objc_ivar == nullptr || e->TypeName.objc_ivar->kind == Type_Named); - - // Enqueue the proc to be checked when resolved - if (e->TypeName.objc_context_provider != nullptr) { - mpsc_enqueue(&ctx->checker->procs_with_objc_context_provider_to_check, e); - } - - // @TODO(harold): I think there's a Check elsewhere in the checker for checking cycles. - // See about moving this to the right location. - // Ensure superclass hierarchy are all Objective-C classes and does not cycle - Type *super = ac.objc_superclass; - if (super != nullptr) { - TypeSet super_set{}; - type_set_init(&super_set, 8); - defer (type_set_destroy(&super_set)); - - type_set_update(&super_set, e->type); - - for (;;) { - if (type_set_update(&super_set, super)) { - error(e->token, "@(objc_superclass) Superclass hierarchy cycle encountered"); - break; - } - - if (super->kind != Type_Named) { - error(e->token, "@(objc_superclass) References type must be a named struct."); - break; - } - - Type* named_type = base_type(super->Named.type_name->type); - if (!is_type_objc_object(named_type)) { - error(e->token, "@(objc_superclass) Superclass must be an Objective-C class."); - break; - } - - super = super->Named.type_name->TypeName.objc_superclass; - if (super == nullptr) { - break; - } - - // TODO(harold): Is this the right way to do this??? The referenced entity must be already resolved - // so that we can access its objc_superclass attribute - check_single_global_entity(ctx->checker, super->Named.type_name, super->Named.type_name->decl_info); - } - } - } else { - if (e->TypeName.objc_superclass != nullptr) { - error(e->token, "@(objc_superclass) can only be applied when the @(obj_implement) attribute is also applied"); - } else if (e->TypeName.objc_ivar != nullptr) { - error(e->token, "@(objc_ivar) can only be applied when the @(obj_implement) attribute is also applied"); - } else if (e->TypeName.objc_context_provider != nullptr) { - error(e->token, "@(objc_context_provider) can only be applied when the @(obj_implement) attribute is also applied"); - } - } + if (ac.objc_is_implementation) { + e->TypeName.objc_is_implementation = true; + mpsc_enqueue(&ctx->info->objc_class_implementations, e); // TODO(harold): Don't need this for anything? See if needed when using explicit @export + + GB_ASSERT(e->TypeName.objc_ivar == nullptr || e->TypeName.objc_ivar->kind == Type_Named); + + // Enqueue the proc to be checked when resolved + if (e->TypeName.objc_context_provider != nullptr) { + mpsc_enqueue(&ctx->checker->procs_with_objc_context_provider_to_check, e); + } + + // @TODO(harold): I think there's a Check elsewhere in the checker for checking cycles. + // See about moving this to the right location. + // Ensure superclass hierarchy are all Objective-C classes and does not cycle + Type *super = ac.objc_superclass; + if (super != nullptr) { + TypeSet super_set{}; + type_set_init(&super_set, 8); + defer (type_set_destroy(&super_set)); + + type_set_update(&super_set, e->type); + + for (;;) { + if (type_set_update(&super_set, super)) { + error(e->token, "@(objc_superclass) Superclass hierarchy cycle encountered"); + break; + } + + if (super->kind != Type_Named) { + error(e->token, "@(objc_superclass) References type must be a named struct."); + break; + } + + Type* named_type = base_type(super->Named.type_name->type); + if (!is_type_objc_object(named_type)) { + error(e->token, "@(objc_superclass) Superclass must be an Objective-C class."); + break; + } + + super = super->Named.type_name->TypeName.objc_superclass; + if (super == nullptr) { + break; + } + + // TODO(harold): Is this the right way to do this??? The referenced entity must be already resolved + // so that we can access its objc_superclass attribute + check_single_global_entity(ctx->checker, super->Named.type_name, super->Named.type_name->decl_info); + } + } + } else { + if (e->TypeName.objc_superclass != nullptr) { + error(e->token, "@(objc_superclass) can only be applied when the @(obj_implement) attribute is also applied"); + } else if (e->TypeName.objc_ivar != nullptr) { + error(e->token, "@(objc_ivar) can only be applied when the @(obj_implement) attribute is also applied"); + } else if (e->TypeName.objc_context_provider != nullptr) { + error(e->token, "@(objc_context_provider) can only be applied when the @(obj_implement) attribute is also applied"); + } + } if (type_size_of(e->type) > 0) { error(e->token, "@(objc_class) marked type must be of zero size"); @@ -1005,37 +1005,37 @@ gb_internal void check_objc_methods(CheckerContext *ctx, Entity *e, AttributeCon error(e->token, "@(objc_name) attribute may only be applied to procedures and types within the same scope"); } else { - if (ac.objc_is_implementation) { - GB_ASSERT(e->kind == Entity_Procedure); + if (ac.objc_is_implementation) { + GB_ASSERT(e->kind == Entity_Procedure); - Type *proc_type = e->type; + Type *proc_type = e->type; - if (!tn->TypeName.objc_is_implementation) { - error(e->token, "@(objc_is_implement) attribute may only be applied to procedures whose class also have @(objc_is_implement) applied"); - } else if (proc_type->Proc.calling_convention == ProcCC_Odin && !tn->TypeName.objc_context_provider) { - error(e->token, "Objective-C methods with Odin calling convention can only be used with classes that have @(objc_context_provider) set"); - } else if (ac.objc_is_class_method && proc_type->Proc.calling_convention != ProcCC_CDecl) { - error(e->token, "Objective-C class methods (objc_is_class_method=true) that have @objc_is_implementation can only use \"c\" calling convention"); - } else { + if (!tn->TypeName.objc_is_implementation) { + error(e->token, "@(objc_is_implement) attribute may only be applied to procedures whose class also have @(objc_is_implement) applied"); + } else if (proc_type->Proc.calling_convention == ProcCC_Odin && !tn->TypeName.objc_context_provider) { + error(e->token, "Objective-C methods with Odin calling convention can only be used with classes that have @(objc_context_provider) set"); + } else if (ac.objc_is_class_method && proc_type->Proc.calling_convention != ProcCC_CDecl) { + error(e->token, "Objective-C class methods (objc_is_class_method=true) that have @objc_is_implementation can only use \"c\" calling convention"); + } else { auto method = ObjcMethodData{ ac, e }; - method.ac.objc_selector = ac.objc_selector != "" ? ac.objc_selector : ac.objc_name; - - CheckerInfo *info = ctx->info; - mutex_lock(&info->objc_method_mutex); - defer (mutex_unlock(&info->objc_method_mutex)); - - Array* method_list = map_get(&info->objc_method_implementations, t); - if (method_list) { - array_add(method_list, method); - } else { - auto list = array_make(permanent_allocator(), 1, 8); - list[0] = method; - - map_set(&info->objc_method_implementations, t, list); - } - } - } + method.ac.objc_selector = ac.objc_selector != "" ? ac.objc_selector : ac.objc_name; + + CheckerInfo *info = ctx->info; + mutex_lock(&info->objc_method_mutex); + defer (mutex_unlock(&info->objc_method_mutex)); + + Array* method_list = map_get(&info->objc_method_implementations, t); + if (method_list) { + array_add(method_list, method); + } else { + auto list = array_make(permanent_allocator(), 1, 8); + list[0] = method; + + map_set(&info->objc_method_implementations, t, list); + } + } + } mutex_lock(&global_type_name_objc_metadata_mutex); defer (mutex_unlock(&global_type_name_objc_metadata_mutex)); diff --git a/src/checker.cpp b/src/checker.cpp index 79c773a3c..6563b1c58 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -1351,12 +1351,12 @@ gb_internal void init_universal(void) { t_objc_object = add_global_type_name(intrinsics_pkg->scope, str_lit("objc_object"), alloc_type_struct_complete()); t_objc_selector = add_global_type_name(intrinsics_pkg->scope, str_lit("objc_selector"), alloc_type_struct_complete()); t_objc_class = add_global_type_name(intrinsics_pkg->scope, str_lit("objc_class"), alloc_type_struct_complete()); - t_objc_ivar = add_global_type_name(intrinsics_pkg->scope, str_lit("objc_ivar"), alloc_type_struct_complete()); + t_objc_ivar = add_global_type_name(intrinsics_pkg->scope, str_lit("objc_ivar"), alloc_type_struct_complete()); t_objc_id = alloc_type_pointer(t_objc_object); t_objc_SEL = alloc_type_pointer(t_objc_selector); t_objc_Class = alloc_type_pointer(t_objc_class); - t_objc_Ivar = alloc_type_pointer(t_objc_ivar); + t_objc_Ivar = alloc_type_pointer(t_objc_ivar); } } @@ -1389,8 +1389,8 @@ gb_internal void init_checker_info(CheckerInfo *i) { array_init(&i->defineables, a); map_init(&i->objc_msgSend_types); - mpsc_init(&i->objc_class_implementations, a); - map_init(&i->objc_method_implementations); + mpsc_init(&i->objc_class_implementations, a); + map_init(&i->objc_method_implementations); string_map_init(&i->load_file_cache); array_init(&i->all_procedures, heap_allocator()); @@ -3352,10 +3352,10 @@ gb_internal DECL_ATTRIBUTE_PROC(proc_decl_attribute) { ac->test = true; return true; } else if (name == "export") { - if (ac->objc_is_implementation) { - error(value, "Setting @(export) explicitly is not allowed when @(objc_implement) is set. It is exported implicitly."); - return false; - } + if (ac->objc_is_implementation) { + error(value, "Setting @(export) explicitly is not allowed when @(objc_implement) is set. It is exported implicitly."); + return false; + } ExactValue ev = check_decl_attribute_value(c, value); if (ev.kind == ExactValue_Invalid) { @@ -3369,10 +3369,10 @@ gb_internal DECL_ATTRIBUTE_PROC(proc_decl_attribute) { return true; } else if (name == "linkage") { - if (ac->objc_is_implementation) { - error(value, "Explicit linkage not allowed when @(objc_implement) is set. It is set implicitly"); - return false; - } + if (ac->objc_is_implementation) { + error(value, "Explicit linkage not allowed when @(objc_implement) is set. It is set implicitly"); + return false; + } ExactValue ev = check_decl_attribute_value(c, value); if (ev.kind != ExactValue_String) { @@ -3681,23 +3681,23 @@ gb_internal DECL_ATTRIBUTE_PROC(proc_decl_attribute) { } return true; } else if (name == "objc_implement") { - ExactValue ev = check_decl_attribute_value(c, value); - if (ev.kind == ExactValue_Bool) { - ac->objc_is_implementation = ev.value_bool; - } else if (ev.kind == ExactValue_Invalid) { - ac->objc_is_implementation = true; - } else { - error(elem, "Expected a boolean value, or no value, for '%.*s'", LIT(name)); - } - - // This implies exported, strongly linked - if (ac->objc_is_implementation) { - ac->is_export = true; - ac->linkage = str_lit("strong"); - } - - return true; - } else if (name == "objc_selector") { + ExactValue ev = check_decl_attribute_value(c, value); + if (ev.kind == ExactValue_Bool) { + ac->objc_is_implementation = ev.value_bool; + } else if (ev.kind == ExactValue_Invalid) { + ac->objc_is_implementation = true; + } else { + error(elem, "Expected a boolean value, or no value, for '%.*s'", LIT(name)); + } + + // This implies exported, strongly linked + if (ac->objc_is_implementation) { + ac->is_export = true; + ac->linkage = str_lit("strong"); + } + + return true; + } else if (name == "objc_selector") { ExactValue ev = check_decl_attribute_value(c, value); if (ev.kind == ExactValue_String) { if (string_is_valid_identifier(ev.value_string)) { @@ -3949,52 +3949,52 @@ gb_internal DECL_ATTRIBUTE_PROC(type_decl_attribute) { } return true; } else if (name == "objc_implement") { - ExactValue ev = check_decl_attribute_value(c, value); - if (ev.kind == ExactValue_Bool) { - ac->objc_is_implementation = ev.value_bool; - } else if (ev.kind == ExactValue_Invalid) { - ac->objc_is_implementation = true; - } else { - error(elem, "Expected a boolean value, or no value, for '%.*s'", LIT(name)); - } - return true; - } else if (name == "objc_superclass") { - Type *objc_superclass = check_type(c, value); - - if (objc_superclass != nullptr) { - ac->objc_superclass = objc_superclass; - } else { - error(value, "'%.*s' expected a named type", LIT(name)); - } - return true; - } else if (name == "objc_ivar") { - Type *objc_ivar = check_type(c, value); - - if (objc_ivar != nullptr) { - ac->objc_ivar = objc_ivar; - } else { - error(value, "'%.*s' expected a named type", LIT(name)); - } - return true; - } else if (name == "objc_context_provider") { - Operand o = {}; - check_expr(c, &o, value); - Entity *e = entity_of_node(o.expr); - - if (e != nullptr) { - if (ac->objc_context_provider != nullptr) { - error(elem, "Previous usage of a 'objc_context_provider' attribute"); - } - if (e->kind != Entity_Procedure) { - error(elem, "'objc_context_provider' must refer to a procedure"); - } else { - ac->objc_context_provider = e; - } - - return true; - } - } - return false; + ExactValue ev = check_decl_attribute_value(c, value); + if (ev.kind == ExactValue_Bool) { + ac->objc_is_implementation = ev.value_bool; + } else if (ev.kind == ExactValue_Invalid) { + ac->objc_is_implementation = true; + } else { + error(elem, "Expected a boolean value, or no value, for '%.*s'", LIT(name)); + } + return true; + } else if (name == "objc_superclass") { + Type *objc_superclass = check_type(c, value); + + if (objc_superclass != nullptr) { + ac->objc_superclass = objc_superclass; + } else { + error(value, "'%.*s' expected a named type", LIT(name)); + } + return true; + } else if (name == "objc_ivar") { + Type *objc_ivar = check_type(c, value); + + if (objc_ivar != nullptr) { + ac->objc_ivar = objc_ivar; + } else { + error(value, "'%.*s' expected a named type", LIT(name)); + } + return true; + } else if (name == "objc_context_provider") { + Operand o = {}; + check_expr(c, &o, value); + Entity *e = entity_of_node(o.expr); + + if (e != nullptr) { + if (ac->objc_context_provider != nullptr) { + error(elem, "Previous usage of a 'objc_context_provider' attribute"); + } + if (e->kind != Entity_Procedure) { + error(elem, "'objc_context_provider' must refer to a procedure"); + } else { + ac->objc_context_provider = e; + } + + return true; + } + } + return false; } diff --git a/src/checker.hpp b/src/checker.hpp index 574c71c7f..336f09a7e 100644 --- a/src/checker.hpp +++ b/src/checker.hpp @@ -148,13 +148,13 @@ struct AttributeContext { String objc_class; String objc_name; - String objc_selector; + String objc_selector; Type * objc_type; - Type * objc_superclass; - Type * objc_ivar; + Type * objc_superclass; + Type * objc_ivar; Entity *objc_context_provider; bool objc_is_class_method : 1; - bool objc_is_implementation : 1; // This struct or proc provides a class/method implementation, not a binding to an existing type. + bool objc_is_implementation : 1; // This struct or proc provides a class/method implementation, not a binding to an existing type. String require_target_feature; // required by the target micro-architecture String enable_target_feature; // will be enabled for the procedure only @@ -371,8 +371,8 @@ struct ObjcMsgData { }; struct ObjcMethodData { - AttributeContext ac; - Entity *proc_entity; + AttributeContext ac; + Entity *proc_entity; }; enum LoadFileTier { @@ -489,10 +489,10 @@ struct CheckerInfo { BlockingMutex objc_types_mutex; PtrMap objc_msgSend_types; - MPSCQueue objc_class_implementations; + MPSCQueue objc_class_implementations; - BlockingMutex objc_method_mutex; - PtrMap> objc_method_implementations; + BlockingMutex objc_method_mutex; + PtrMap> objc_method_implementations; BlockingMutex load_file_mutex; diff --git a/src/checker_builtin_procs.hpp b/src/checker_builtin_procs.hpp index cb2ce3915..ce7d8349b 100644 --- a/src/checker_builtin_procs.hpp +++ b/src/checker_builtin_procs.hpp @@ -331,7 +331,7 @@ BuiltinProc__type_end, BuiltinProc_objc_find_class, BuiltinProc_objc_register_selector, BuiltinProc_objc_register_class, - BuiltinProc_objc_ivar_get, + BuiltinProc_objc_ivar_get, BuiltinProc_constant_utf16_cstring, @@ -674,7 +674,7 @@ gb_global BuiltinProc builtin_procs[BuiltinProc_COUNT] = { {STR_LIT("objc_find_class"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics}, {STR_LIT("objc_register_selector"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics, false, true}, {STR_LIT("objc_register_class"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics, false, true}, - {STR_LIT("ivar_get"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics, false, true}, + {STR_LIT("ivar_get"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics, false, true}, {STR_LIT("constant_utf16_cstring"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics}, diff --git a/src/entity.cpp b/src/entity.cpp index a5443cf27..cc41b5e59 100644 --- a/src/entity.cpp +++ b/src/entity.cpp @@ -235,9 +235,9 @@ struct Entity { Type * type_parameter_specialization; String ir_mangled_name; bool is_type_alias; - bool objc_is_implementation; - Type* objc_superclass; - Type* objc_ivar; + bool objc_is_implementation; + Type* objc_superclass; + Type* objc_ivar; Entity*objc_context_provider; String objc_class_name; TypeNameObjCMetadata *objc_metadata; diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index fad542d4a..7ffd4ea30 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -1176,327 +1176,327 @@ gb_internal lbProcedure *lb_create_objc_names(lbModule *main_module) { // TODO(harold): Move this out of here and into a more suitable place. // TODO(harold): Should not take an allocator, but always use temp, as we return string literals as well. String lb_get_objc_type_encoding(Type *t, gbAllocator allocator, isize pointer_depth = 0) { - // NOTE(harold): See https://developer.apple.com/library/archive/documentation/Cocoa/Conceptual/ObjCRuntimeGuide/Articles/ocrtTypeEncodings.html#//apple_ref/doc/uid/TP40008048-CH100 - - // NOTE(harold): Darwin targets are always 64-bit. Should we drop this and assume "q" always? - #define INT_SIZE_ENCODING (build_context.metrics.ptr_size == 4 ? "i" : "q") - switch (t->kind) { - case Type_Basic: { - switch (t->Basic.kind) { - case Basic_Invalid: - return str_lit("?"); - - case Basic_llvm_bool: - case Basic_bool: - case Basic_b8: - return str_lit("B"); - - case Basic_b16: - return str_lit("C"); - case Basic_b32: - return str_lit("I"); - case Basic_b64: - return str_lit("q"); - case Basic_i8: - return str_lit("c"); - case Basic_u8: - return str_lit("C"); - case Basic_i16: - case Basic_i16le: - case Basic_i16be: - return str_lit("s"); - case Basic_u16: - case Basic_u16le: - case Basic_u16be: - return str_lit("S"); - case Basic_i32: - case Basic_i32le: - case Basic_i32be: - return str_lit("i"); - case Basic_u32le: - case Basic_u32: - case Basic_u32be: - return str_lit("I"); - case Basic_i64: - case Basic_i64le: - case Basic_i64be: - return str_lit("q"); - case Basic_u64: - case Basic_u64le: - case Basic_u64be: - return str_lit("Q"); - case Basic_i128: - case Basic_i128le: - case Basic_i128be: - return str_lit("t"); - case Basic_u128: - case Basic_u128le: - case Basic_u128be: - return str_lit("T"); - case Basic_rune: - return str_lit("I"); - case Basic_f16: - case Basic_f16le: - case Basic_f16be: - return str_lit("s"); // @harold: Closest we've got? - case Basic_f32: - case Basic_f32le: - case Basic_f32be: - return str_lit("f"); - case Basic_f64: - case Basic_f64le: - case Basic_f64be: - return str_lit("d"); - - // TODO(harold) These: - case Basic_complex32: - case Basic_complex64: - case Basic_complex128: - case Basic_quaternion64: - case Basic_quaternion128: - case Basic_quaternion256: - return str_lit("?"); - - case Basic_int: - return str_lit(INT_SIZE_ENCODING); - case Basic_uint: - return build_context.metrics.ptr_size == 4 ? str_lit("I") : str_lit("Q"); - case Basic_uintptr: - case Basic_rawptr: - return str_lit("^v"); - - case Basic_string: - return build_context.metrics.ptr_size == 4 ? str_lit("{string=*i}") : str_lit("{string=*q}"); - - case Basic_cstring: return str_lit("*"); - case Basic_any: return str_lit("{any=^v^v"); // rawptr + ^Type_Info - - case Basic_typeid: - GB_ASSERT(t->Basic.size == 8); - return str_lit("q"); - - // Untyped types - case Basic_UntypedBool: - case Basic_UntypedInteger: - case Basic_UntypedFloat: - case Basic_UntypedComplex: - case Basic_UntypedQuaternion: - case Basic_UntypedString: - case Basic_UntypedRune: - case Basic_UntypedNil: - case Basic_UntypedUninit: - GB_PANIC("Untyped types cannot be @encoded()"); - return str_lit("?"); - } - break; - } - - case Type_Named: - case Type_Struct: - case Type_Union: { - Type* base = t; - if (base->kind == Type_Named) { - base = base_type(base); - if(base->kind != Type_Struct && base->kind != Type_Union) { - return lb_get_objc_type_encoding(base, allocator, pointer_depth); - } - } - - const bool is_union = base->kind == Type_Union; - if (!is_union) { - // Check for objc_SEL - if (internal_check_is_assignable_to(base, t_objc_SEL)) { - return str_lit(":"); - } - - // Check for objc_Class - if (internal_check_is_assignable_to(base, t_objc_SEL)) { - return str_lit("#"); - } - - // Treat struct as an Objective-C Class? - if (has_type_got_objc_class_attribute(base) && pointer_depth == 0) { - return str_lit("#"); - } - } - - if (is_type_objc_object(base)) { - return str_lit("@"); - } - - - gbString s = gb_string_make_reserve(allocator, 16); - s = gb_string_append_length(s, is_union ? "(" :"{", 1); - if (t->kind == Type_Named) { - s = gb_string_append_length(s, t->Named.name.text, t->Named.name.len); - } - - // Write fields - if (pointer_depth < 2) { - s = gb_string_append_length(s, "=", 1); - - if (!is_union) { - for( auto& f : base->Struct.fields ) { - String field_type = lb_get_objc_type_encoding(f->type, allocator, pointer_depth); - s = gb_string_append_length(s, field_type.text, field_type.len); - } - } else { - // #TODO(harold): Encode fields - } - } - - s = gb_string_append_length(s, is_union ? ")" :"}", 1); - - return make_string_c(s); - } - - case Type_Generic: - GB_PANIC("Generic types cannot be @encoded()"); - return str_lit("?"); - - case Type_Pointer: { - String pointee = lb_get_objc_type_encoding(t->Pointer.elem, allocator, pointer_depth +1); - // Special case for Objective-C Objects - if (pointer_depth == 0 && pointee == "@") { - return pointee; - } - - return concatenate_strings(allocator, str_lit("^"), pointee); - } - - case Type_MultiPointer: - return concatenate_strings(allocator, str_lit("^"), lb_get_objc_type_encoding(t->Pointer.elem, allocator, pointer_depth +1)); - - case Type_Array: { - String type_str = lb_get_objc_type_encoding(t->Array.elem, allocator, pointer_depth); - - gbString s = gb_string_make_reserve(allocator, type_str.len + 8); - s = gb_string_append_fmt(s, "[%lld%s]", t->Array.count, type_str.text); - return make_string_c(s); - } - - case Type_EnumeratedArray: { - String type_str = lb_get_objc_type_encoding(t->EnumeratedArray.elem, allocator, pointer_depth); - - gbString s = gb_string_make_reserve(allocator, type_str.len + 8); - s = gb_string_append_fmt(s, "[%lld%s]", t->EnumeratedArray.count, type_str.text); - return make_string_c(s); - } - - case Type_Slice: { - String type_str = lb_get_objc_type_encoding(t->Slice.elem, allocator, pointer_depth); - gbString s = gb_string_make_reserve(allocator, type_str.len + 8); - s = gb_string_append_fmt(s, "{slice=^%s%s}", type_str, INT_SIZE_ENCODING); - return make_string_c(s); - } - - case Type_DynamicArray: { - String type_str = lb_get_objc_type_encoding(t->DynamicArray.elem, allocator, pointer_depth); - gbString s = gb_string_make_reserve(allocator, type_str.len + 8); - s = gb_string_append_fmt(s, "{dynamic=^%s%s%sAllocator={?^v}}", type_str, INT_SIZE_ENCODING, INT_SIZE_ENCODING); - return make_string_c(s); - } - - case Type_Map: - return str_lit("{^v^v{Allocator=?^v}}"); - case Type_Enum: - return lb_get_objc_type_encoding(t->Enum.base_type, allocator, pointer_depth); - case Type_Tuple: - // NOTE(harold): Is this allowed here? - return str_lit("?"); - case Type_Proc: - return str_lit("?"); - case Type_BitSet: - return lb_get_objc_type_encoding(t->BitSet.underlying, allocator, pointer_depth); - case Type_SimdVector: - break; - case Type_Matrix: - break; - case Type_BitField: - return lb_get_objc_type_encoding(t->BitField.backing_type, allocator, pointer_depth); - case Type_SoaPointer: { - gbString s = gb_string_make_reserve(allocator, 8); - s = gb_string_append_fmt(s, "{=^v%s}", INT_SIZE_ENCODING); - return make_string_c(s); - } - - } // End switch t->kind - #undef INT_SIZE_ENCODING - - GB_PANIC("Unreachable"); + // NOTE(harold): See https://developer.apple.com/library/archive/documentation/Cocoa/Conceptual/ObjCRuntimeGuide/Articles/ocrtTypeEncodings.html#//apple_ref/doc/uid/TP40008048-CH100 + + // NOTE(harold): Darwin targets are always 64-bit. Should we drop this and assume "q" always? + #define INT_SIZE_ENCODING (build_context.metrics.ptr_size == 4 ? "i" : "q") + switch (t->kind) { + case Type_Basic: { + switch (t->Basic.kind) { + case Basic_Invalid: + return str_lit("?"); + + case Basic_llvm_bool: + case Basic_bool: + case Basic_b8: + return str_lit("B"); + + case Basic_b16: + return str_lit("C"); + case Basic_b32: + return str_lit("I"); + case Basic_b64: + return str_lit("q"); + case Basic_i8: + return str_lit("c"); + case Basic_u8: + return str_lit("C"); + case Basic_i16: + case Basic_i16le: + case Basic_i16be: + return str_lit("s"); + case Basic_u16: + case Basic_u16le: + case Basic_u16be: + return str_lit("S"); + case Basic_i32: + case Basic_i32le: + case Basic_i32be: + return str_lit("i"); + case Basic_u32le: + case Basic_u32: + case Basic_u32be: + return str_lit("I"); + case Basic_i64: + case Basic_i64le: + case Basic_i64be: + return str_lit("q"); + case Basic_u64: + case Basic_u64le: + case Basic_u64be: + return str_lit("Q"); + case Basic_i128: + case Basic_i128le: + case Basic_i128be: + return str_lit("t"); + case Basic_u128: + case Basic_u128le: + case Basic_u128be: + return str_lit("T"); + case Basic_rune: + return str_lit("I"); + case Basic_f16: + case Basic_f16le: + case Basic_f16be: + return str_lit("s"); // @harold: Closest we've got? + case Basic_f32: + case Basic_f32le: + case Basic_f32be: + return str_lit("f"); + case Basic_f64: + case Basic_f64le: + case Basic_f64be: + return str_lit("d"); + + // TODO(harold) These: + case Basic_complex32: + case Basic_complex64: + case Basic_complex128: + case Basic_quaternion64: + case Basic_quaternion128: + case Basic_quaternion256: + return str_lit("?"); + + case Basic_int: + return str_lit(INT_SIZE_ENCODING); + case Basic_uint: + return build_context.metrics.ptr_size == 4 ? str_lit("I") : str_lit("Q"); + case Basic_uintptr: + case Basic_rawptr: + return str_lit("^v"); + + case Basic_string: + return build_context.metrics.ptr_size == 4 ? str_lit("{string=*i}") : str_lit("{string=*q}"); + + case Basic_cstring: return str_lit("*"); + case Basic_any: return str_lit("{any=^v^v"); // rawptr + ^Type_Info + + case Basic_typeid: + GB_ASSERT(t->Basic.size == 8); + return str_lit("q"); + + // Untyped types + case Basic_UntypedBool: + case Basic_UntypedInteger: + case Basic_UntypedFloat: + case Basic_UntypedComplex: + case Basic_UntypedQuaternion: + case Basic_UntypedString: + case Basic_UntypedRune: + case Basic_UntypedNil: + case Basic_UntypedUninit: + GB_PANIC("Untyped types cannot be @encoded()"); + return str_lit("?"); + } + break; + } + + case Type_Named: + case Type_Struct: + case Type_Union: { + Type* base = t; + if (base->kind == Type_Named) { + base = base_type(base); + if(base->kind != Type_Struct && base->kind != Type_Union) { + return lb_get_objc_type_encoding(base, allocator, pointer_depth); + } + } + + const bool is_union = base->kind == Type_Union; + if (!is_union) { + // Check for objc_SEL + if (internal_check_is_assignable_to(base, t_objc_SEL)) { + return str_lit(":"); + } + + // Check for objc_Class + if (internal_check_is_assignable_to(base, t_objc_SEL)) { + return str_lit("#"); + } + + // Treat struct as an Objective-C Class? + if (has_type_got_objc_class_attribute(base) && pointer_depth == 0) { + return str_lit("#"); + } + } + + if (is_type_objc_object(base)) { + return str_lit("@"); + } + + + gbString s = gb_string_make_reserve(allocator, 16); + s = gb_string_append_length(s, is_union ? "(" :"{", 1); + if (t->kind == Type_Named) { + s = gb_string_append_length(s, t->Named.name.text, t->Named.name.len); + } + + // Write fields + if (pointer_depth < 2) { + s = gb_string_append_length(s, "=", 1); + + if (!is_union) { + for( auto& f : base->Struct.fields ) { + String field_type = lb_get_objc_type_encoding(f->type, allocator, pointer_depth); + s = gb_string_append_length(s, field_type.text, field_type.len); + } + } else { + // #TODO(harold): Encode fields + } + } + + s = gb_string_append_length(s, is_union ? ")" :"}", 1); + + return make_string_c(s); + } + + case Type_Generic: + GB_PANIC("Generic types cannot be @encoded()"); + return str_lit("?"); + + case Type_Pointer: { + String pointee = lb_get_objc_type_encoding(t->Pointer.elem, allocator, pointer_depth +1); + // Special case for Objective-C Objects + if (pointer_depth == 0 && pointee == "@") { + return pointee; + } + + return concatenate_strings(allocator, str_lit("^"), pointee); + } + + case Type_MultiPointer: + return concatenate_strings(allocator, str_lit("^"), lb_get_objc_type_encoding(t->Pointer.elem, allocator, pointer_depth +1)); + + case Type_Array: { + String type_str = lb_get_objc_type_encoding(t->Array.elem, allocator, pointer_depth); + + gbString s = gb_string_make_reserve(allocator, type_str.len + 8); + s = gb_string_append_fmt(s, "[%lld%s]", t->Array.count, type_str.text); + return make_string_c(s); + } + + case Type_EnumeratedArray: { + String type_str = lb_get_objc_type_encoding(t->EnumeratedArray.elem, allocator, pointer_depth); + + gbString s = gb_string_make_reserve(allocator, type_str.len + 8); + s = gb_string_append_fmt(s, "[%lld%s]", t->EnumeratedArray.count, type_str.text); + return make_string_c(s); + } + + case Type_Slice: { + String type_str = lb_get_objc_type_encoding(t->Slice.elem, allocator, pointer_depth); + gbString s = gb_string_make_reserve(allocator, type_str.len + 8); + s = gb_string_append_fmt(s, "{slice=^%s%s}", type_str, INT_SIZE_ENCODING); + return make_string_c(s); + } + + case Type_DynamicArray: { + String type_str = lb_get_objc_type_encoding(t->DynamicArray.elem, allocator, pointer_depth); + gbString s = gb_string_make_reserve(allocator, type_str.len + 8); + s = gb_string_append_fmt(s, "{dynamic=^%s%s%sAllocator={?^v}}", type_str, INT_SIZE_ENCODING, INT_SIZE_ENCODING); + return make_string_c(s); + } + + case Type_Map: + return str_lit("{^v^v{Allocator=?^v}}"); + case Type_Enum: + return lb_get_objc_type_encoding(t->Enum.base_type, allocator, pointer_depth); + case Type_Tuple: + // NOTE(harold): Is this allowed here? + return str_lit("?"); + case Type_Proc: + return str_lit("?"); + case Type_BitSet: + return lb_get_objc_type_encoding(t->BitSet.underlying, allocator, pointer_depth); + case Type_SimdVector: + break; + case Type_Matrix: + break; + case Type_BitField: + return lb_get_objc_type_encoding(t->BitField.backing_type, allocator, pointer_depth); + case Type_SoaPointer: { + gbString s = gb_string_make_reserve(allocator, 8); + s = gb_string_append_fmt(s, "{=^v%s}", INT_SIZE_ENCODING); + return make_string_c(s); + } + + } // End switch t->kind + #undef INT_SIZE_ENCODING + + GB_PANIC("Unreachable"); } struct lbObjCGlobalClass { - lbObjCGlobal g; - lbValue class_value; // Local registered class value + lbObjCGlobal g; + lbValue class_value; // Local registered class value }; gb_internal void lb_register_objc_thing( - StringSet &handled, - lbModule *m, - Array &args, - Array &class_impls, - StringMap &class_map, - lbProcedure *p, - lbObjCGlobal const &g, - char const *call + StringSet &handled, + lbModule *m, + Array &args, + Array &class_impls, + StringMap &class_map, + lbProcedure *p, + lbObjCGlobal const &g, + char const *call ) { - if (string_set_update(&handled, g.name)) { - return; - } - - lbAddr addr = {}; - lbValue *found = string_map_get(&m->members, g.global_name); - if (found) { - addr = lb_addr(*found); - } else { - lbValue v = {}; - LLVMTypeRef t = lb_type(m, g.type); - v.value = LLVMAddGlobal(m->mod, t, g.global_name); - v.type = alloc_type_pointer(g.type); - addr = lb_addr(v); - LLVMSetInitializer(v.value, LLVMConstNull(t)); - } - - lbValue class_ptr{}; - lbValue class_name = lb_const_value(m, t_cstring, exact_value_string(g.name)); - - // If this class requires an implementation, save it for registration below. - if (g.class_impl_type != nullptr) { - - // Make sure the superclass has been initialized before us - lbValue superclass_value{}; - - auto& tn = g.class_impl_type->Named.type_name->TypeName; - Type *superclass = tn.objc_superclass; - if (superclass != nullptr) { - auto& superclass_global = string_map_must_get(&class_map, superclass->Named.type_name->TypeName.objc_class_name); - lb_register_objc_thing(handled, m, args, class_impls, class_map, p, superclass_global.g, call); - GB_ASSERT(superclass_global.class_value.value); - - superclass_value = superclass_global.class_value; - } - - args.count = 3; - args[0] = superclass == nullptr ? lb_const_nil(m, t_objc_Class) : superclass_value; - args[1] = class_name; - args[2] = lb_const_int(m, t_uint, 0); - class_ptr = lb_emit_runtime_call(p, "objc_allocateClassPair", args); - - array_add(&class_impls, lbObjCGlobalClass{g, class_ptr}); - } - else { - args.count = 1; - args[0] = class_name; - class_ptr = lb_emit_runtime_call(p, call, args); - } - - lb_addr_store(p, addr, class_ptr); - - lbObjCGlobalClass* class_global = string_map_get(&class_map, g.name); - if (class_global != nullptr) { - class_global->class_value = class_ptr; - } + if (string_set_update(&handled, g.name)) { + return; + } + + lbAddr addr = {}; + lbValue *found = string_map_get(&m->members, g.global_name); + if (found) { + addr = lb_addr(*found); + } else { + lbValue v = {}; + LLVMTypeRef t = lb_type(m, g.type); + v.value = LLVMAddGlobal(m->mod, t, g.global_name); + v.type = alloc_type_pointer(g.type); + addr = lb_addr(v); + LLVMSetInitializer(v.value, LLVMConstNull(t)); + } + + lbValue class_ptr{}; + lbValue class_name = lb_const_value(m, t_cstring, exact_value_string(g.name)); + + // If this class requires an implementation, save it for registration below. + if (g.class_impl_type != nullptr) { + + // Make sure the superclass has been initialized before us + lbValue superclass_value{}; + + auto& tn = g.class_impl_type->Named.type_name->TypeName; + Type *superclass = tn.objc_superclass; + if (superclass != nullptr) { + auto& superclass_global = string_map_must_get(&class_map, superclass->Named.type_name->TypeName.objc_class_name); + lb_register_objc_thing(handled, m, args, class_impls, class_map, p, superclass_global.g, call); + GB_ASSERT(superclass_global.class_value.value); + + superclass_value = superclass_global.class_value; + } + + args.count = 3; + args[0] = superclass == nullptr ? lb_const_nil(m, t_objc_Class) : superclass_value; + args[1] = class_name; + args[2] = lb_const_int(m, t_uint, 0); + class_ptr = lb_emit_runtime_call(p, "objc_allocateClassPair", args); + + array_add(&class_impls, lbObjCGlobalClass{g, class_ptr}); + } + else { + args.count = 1; + args[0] = class_name; + class_ptr = lb_emit_runtime_call(p, call, args); + } + + lb_addr_store(p, addr, class_ptr); + + lbObjCGlobalClass* class_global = string_map_get(&class_map, g.name); + if (class_global != nullptr) { + class_global->class_value = class_ptr; + } } gb_internal void lb_finalize_objc_names(lbGenerator *gen, lbProcedure *p) { @@ -1513,80 +1513,80 @@ gb_internal void lb_finalize_objc_names(lbGenerator *gen, lbProcedure *p) { defer (string_set_destroy(&handled)); auto args = array_make(temporary_allocator(), 3, 8); - auto class_impls = array_make(temporary_allocator(), 0, 16); - - // Ensure classes that have been implicitly referenced through - // the objc_superclass attribute have a global variable available for them. - TypeSet class_set{}; - type_set_init(&class_set, gen->objc_classes.count+16); - defer (type_set_destroy(&class_set)); - - auto referenced_classes = array_make(temporary_allocator()); - for (lbObjCGlobal g = {}; mpsc_dequeue(&gen->objc_classes, &g); /**/) { - array_add( &referenced_classes, g); - - Type *cls = g.class_impl_type; - while (cls) { - if (type_set_update(&class_set, cls)) { - break; - } - GB_ASSERT(cls->kind == Type_Named); - - cls = cls->Named.type_name->TypeName.objc_superclass; - } - } - - for (auto pair : class_set) { - auto& tn = pair.type->Named.type_name->TypeName; - Type *class_impl = !tn.objc_is_implementation ? nullptr : pair.type; - lb_handle_objc_find_or_register_class(p, tn.objc_class_name, class_impl); - } - for (lbObjCGlobal g = {}; mpsc_dequeue(&gen->objc_classes, &g); /**/) { - array_add( &referenced_classes, g ); - } - - // Add all class globals to a map so that we can look them up dynamically - // in order to resolve out-of-order because classes that are being implemented - // need their superclasses to have been registered before them. - StringMap global_class_map{}; - string_map_init(&global_class_map, (usize)gen->objc_classes.count); - defer (string_map_destroy(&global_class_map)); - - for (lbObjCGlobal g :referenced_classes) { - string_map_set(&global_class_map, g.name, lbObjCGlobalClass{g}); - } - - LLVMSetLinkage(p->value, LLVMInternalLinkage); - lb_begin_procedure_body(p); - - // Register class globals, gathering classes that must be implemented - for (auto& kv : global_class_map) { - lb_register_objc_thing(handled, m, args, class_impls, global_class_map, p, kv.value.g, "objc_lookUpClass"); - } - - // Prefetch selectors for implemented methods so that they can also be registered. - for (const auto& cd : class_impls) { - auto& g = cd.g; - Type *class_type = g.class_impl_type; - - Array* methods = map_get(&m->info->objc_method_implementations, class_type); - if (!methods) { - continue; - } - - for (const ObjcMethodData& md : *methods) { - lb_handle_objc_find_or_register_selector(p, md.ac.objc_selector); - } - } - - // Now we can register all referenced selectors - for (lbObjCGlobal g = {}; mpsc_dequeue(&gen->objc_selectors, &g); /**/) { - lb_register_objc_thing(handled, m, args, class_impls, global_class_map, p, g, "sel_registerName"); - } - - - // Emit method wrapper implementations and registration - auto wrapper_args = array_make(temporary_allocator(), 2, 8); + auto class_impls = array_make(temporary_allocator(), 0, 16); + + // Ensure classes that have been implicitly referenced through + // the objc_superclass attribute have a global variable available for them. + TypeSet class_set{}; + type_set_init(&class_set, gen->objc_classes.count+16); + defer (type_set_destroy(&class_set)); + + auto referenced_classes = array_make(temporary_allocator()); + for (lbObjCGlobal g = {}; mpsc_dequeue(&gen->objc_classes, &g); /**/) { + array_add( &referenced_classes, g); + + Type *cls = g.class_impl_type; + while (cls) { + if (type_set_update(&class_set, cls)) { + break; + } + GB_ASSERT(cls->kind == Type_Named); + + cls = cls->Named.type_name->TypeName.objc_superclass; + } + } + + for (auto pair : class_set) { + auto& tn = pair.type->Named.type_name->TypeName; + Type *class_impl = !tn.objc_is_implementation ? nullptr : pair.type; + lb_handle_objc_find_or_register_class(p, tn.objc_class_name, class_impl); + } + for (lbObjCGlobal g = {}; mpsc_dequeue(&gen->objc_classes, &g); /**/) { + array_add( &referenced_classes, g ); + } + + // Add all class globals to a map so that we can look them up dynamically + // in order to resolve out-of-order because classes that are being implemented + // need their superclasses to have been registered before them. + StringMap global_class_map{}; + string_map_init(&global_class_map, (usize)gen->objc_classes.count); + defer (string_map_destroy(&global_class_map)); + + for (lbObjCGlobal g :referenced_classes) { + string_map_set(&global_class_map, g.name, lbObjCGlobalClass{g}); + } + + LLVMSetLinkage(p->value, LLVMInternalLinkage); + lb_begin_procedure_body(p); + + // Register class globals, gathering classes that must be implemented + for (auto& kv : global_class_map) { + lb_register_objc_thing(handled, m, args, class_impls, global_class_map, p, kv.value.g, "objc_lookUpClass"); + } + + // Prefetch selectors for implemented methods so that they can also be registered. + for (const auto& cd : class_impls) { + auto& g = cd.g; + Type *class_type = g.class_impl_type; + + Array* methods = map_get(&m->info->objc_method_implementations, class_type); + if (!methods) { + continue; + } + + for (const ObjcMethodData& md : *methods) { + lb_handle_objc_find_or_register_selector(p, md.ac.objc_selector); + } + } + + // Now we can register all referenced selectors + for (lbObjCGlobal g = {}; mpsc_dequeue(&gen->objc_selectors, &g); /**/) { + lb_register_objc_thing(handled, m, args, class_impls, global_class_map, p, g, "sel_registerName"); + } + + + // Emit method wrapper implementations and registration + auto wrapper_args = array_make(temporary_allocator(), 2, 8); auto get_context_args = array_make(temporary_allocator(), 1); @@ -1597,186 +1597,200 @@ gb_internal void lb_finalize_objc_names(lbGenerator *gen, lbProcedure *p) { map_set(&ivar_map, g.class_impl_type, g); } - for (const auto& cd : class_impls) { - auto& g = cd.g; - Type *class_type = g.class_impl_type; - Type *class_ptr_type = alloc_type_pointer(class_type); - lbValue class_value = cd.class_value; + for (const auto& cd : class_impls) { + auto& g = cd.g; + Type *class_type = g.class_impl_type; + Type *class_ptr_type = alloc_type_pointer(class_type); + lbValue class_value = cd.class_value; - Type *ivar_type = class_type->Named.type_name->TypeName.objc_ivar; + Type *ivar_type = class_type->Named.type_name->TypeName.objc_ivar; - Entity *context_provider = class_type->Named.type_name->TypeName.objc_context_provider; - Type *contex_provider_self_ptr_type = nullptr; - Type *contex_provider_self_named_type = nullptr; - bool is_context_provider_ivar = false; - lbValue context_provider_proc_value{}; + Entity *context_provider = class_type->Named.type_name->TypeName.objc_context_provider; + Type *contex_provider_self_ptr_type = nullptr; + Type *contex_provider_self_named_type = nullptr; + bool is_context_provider_ivar = false; + lbValue context_provider_proc_value{}; - if (context_provider) { - context_provider_proc_value = lb_find_procedure_value_from_entity(m, context_provider); + if (context_provider) { + context_provider_proc_value = lb_find_procedure_value_from_entity(m, context_provider); - contex_provider_self_ptr_type = base_type(context_provider->type->Proc.params->Tuple.variables[0]->type); - GB_ASSERT(contex_provider_self_ptr_type->kind == Type_Pointer); - contex_provider_self_named_type = base_named_type(type_deref(contex_provider_self_ptr_type)); + contex_provider_self_ptr_type = base_type(context_provider->type->Proc.params->Tuple.variables[0]->type); + GB_ASSERT(contex_provider_self_ptr_type->kind == Type_Pointer); + contex_provider_self_named_type = base_named_type(type_deref(contex_provider_self_ptr_type)); - is_context_provider_ivar = ivar_type != nullptr && internal_check_is_assignable_to(contex_provider_self_named_type, ivar_type); - } + is_context_provider_ivar = ivar_type != nullptr && internal_check_is_assignable_to(contex_provider_self_named_type, ivar_type); + } - Array* methods = map_get(&m->info->objc_method_implementations, class_type); - if (!methods) { - continue; - } + Array* methods = map_get(&m->info->objc_method_implementations, class_type); + if (!methods) { + continue; + } - for (const ObjcMethodData& md : *methods) { - GB_ASSERT( md.proc_entity->kind == Entity_Procedure); - Type *method_type = md.proc_entity->type; + for (const ObjcMethodData& md : *methods) { + GB_ASSERT( md.proc_entity->kind == Entity_Procedure); + Type *method_type = md.proc_entity->type; - String proc_name = make_string_c("__$objc_method::"); - proc_name = concatenate_strings(temporary_allocator(), proc_name, g.name); - proc_name = concatenate_strings(temporary_allocator(), proc_name, str_lit("::")); - proc_name = concatenate_strings( permanent_allocator(), proc_name, md.ac.objc_name); + String proc_name = make_string_c("__$objc_method::"); + proc_name = concatenate_strings(temporary_allocator(), proc_name, g.name); + proc_name = concatenate_strings(temporary_allocator(), proc_name, str_lit("::")); + proc_name = concatenate_strings( permanent_allocator(), proc_name, md.ac.objc_name); - wrapper_args.count = 2; - wrapper_args[0] = md.ac.objc_is_class_method ? t_objc_Class : class_ptr_type; - wrapper_args[1] = t_objc_SEL; + wrapper_args.count = 2; + wrapper_args[0] = md.ac.objc_is_class_method ? t_objc_Class : class_ptr_type; + wrapper_args[1] = t_objc_SEL; - auto method_param_count = (isize)method_type->Proc.param_count; - i32 method_param_offset = 0; + auto method_param_count = (isize)method_type->Proc.param_count; + i32 method_param_offset = 0; - // TODO(harold): Need to make sure (at checker stage) that the non-class method has the self parameter already. - // (Maybe this is already accounted for?.) - if (!md.ac.objc_is_class_method) { - GB_ASSERT(method_param_count >= 1); - method_param_count -= 1; - method_param_offset = 1; - } + // TODO(harold): Need to make sure (at checker stage) that the non-class method has the self parameter already. + // (Maybe this is already accounted for?.) + if (!md.ac.objc_is_class_method) { + GB_ASSERT(method_param_count >= 1); + method_param_count -= 1; + method_param_offset = 1; + } - for (i32 i = 0; i < method_param_count; i++) { - array_add(&wrapper_args, method_type->Proc.params->Tuple.variables[method_param_offset+i]->type); - } + for (i32 i = 0; i < method_param_count; i++) { + array_add(&wrapper_args, method_type->Proc.params->Tuple.variables[method_param_offset+i]->type); + } - Type *wrapper_args_tuple = alloc_type_tuple_from_field_types(wrapper_args.data, wrapper_args.count, false, true); - Type *wrapper_proc_type = alloc_type_proc(nullptr, wrapper_args_tuple, (isize)wrapper_args_tuple->Tuple.variables.count, nullptr, 0, false, ProcCC_CDecl); + Type *wrapper_args_tuple = alloc_type_tuple_from_field_types(wrapper_args.data, wrapper_args.count, false, true); + Type *wrapper_results_tuple = nullptr; - lbProcedure *wrapper_proc = lb_create_dummy_procedure(m, proc_name, wrapper_proc_type); - lb_add_attribute_to_proc(wrapper_proc->module, wrapper_proc->value, "nounwind"); + if (method_type->Proc.result_count > 0) { + GB_ASSERT(method_type->Proc.result_count == 1); + wrapper_results_tuple = alloc_type_tuple_from_field_types(&method_type->Proc.results->Tuple.variables[0]->type, 1, false, true); + } + + Type *wrapper_proc_type = alloc_type_proc(nullptr, wrapper_args_tuple, wrapper_args_tuple->Tuple.variables.count, + wrapper_results_tuple, method_type->Proc.result_count, false, ProcCC_CDecl); - // Emit the wrapper - LLVMSetLinkage(wrapper_proc->value, LLVMExternalLinkage); - lb_begin_procedure_body(wrapper_proc); - { - if (method_type->Proc.calling_convention == ProcCC_Odin) { - GB_ASSERT(context_provider); + lbProcedure *wrapper_proc = lb_create_dummy_procedure(m, proc_name, wrapper_proc_type); + lb_add_attribute_to_proc(wrapper_proc->module, wrapper_proc->value, "nounwind"); - // Emit the get odin context call + // Emit the wrapper + LLVMSetLinkage(wrapper_proc->value, LLVMExternalLinkage); + lb_begin_procedure_body(wrapper_proc); + { + if (method_type->Proc.calling_convention == ProcCC_Odin) { + GB_ASSERT(context_provider); - get_context_args[0] = lbValue { - wrapper_proc->raw_input_parameters[0], + // Emit the get odin context call + + get_context_args[0] = lbValue { + wrapper_proc->raw_input_parameters[0], contex_provider_self_ptr_type, }; - if (is_context_provider_ivar) { - // The context provider takes the ivar's type. - // Emit an obj_ivar_get call and use that pointer for 'self' instead. - lbValue real_self { - wrapper_proc->raw_input_parameters[0], - class_ptr_type - }; - get_context_args[0] = lb_handle_objc_ivar_for_objc_object_pointer(wrapper_proc, real_self); - } - - lbValue context = lb_emit_call(wrapper_proc, context_provider_proc_value, get_context_args); - lbAddr context_addr = lb_addr(lb_address_from_load_or_generate_local(wrapper_proc, context)); - lb_push_context_onto_stack(wrapper_proc, context_addr); - } - - - auto method_call_args = array_make(temporary_allocator(), method_param_count + (isize)method_param_offset); - - if (!md.ac.objc_is_class_method) { - method_call_args[0] = lbValue { - wrapper_proc->raw_input_parameters[0], - class_ptr_type, - }; - } - - for (isize i = 0; i < method_param_count; i++) { - method_call_args[i+method_param_offset] = lbValue { - wrapper_proc->raw_input_parameters[i+2], - method_type->Proc.params->Tuple.variables[i+method_param_offset]->type, - }; - } - lbValue method_proc_value = lb_find_procedure_value_from_entity(m, md.proc_entity); - - // Call real procedure for method from here, passing the parameters expected, if any. - lb_emit_call(wrapper_proc, method_proc_value, method_call_args); - } - lb_end_procedure_body(wrapper_proc); - - - // Add the method to the class - String method_encoding = str_lit("v"); - // TODO (harold): Checker must ensure that objc_methods have a single return value or none! - GB_ASSERT(method_type->Proc.result_count <= 1); - if (method_type->Proc.result_count != 0) { - method_encoding = lb_get_objc_type_encoding(method_type->Proc.results->Tuple.variables[0]->type, temporary_allocator()); - } - - if (!md.ac.objc_is_class_method) { - method_encoding = concatenate_strings(temporary_allocator(), method_encoding, str_lit("@:")); - } else { - method_encoding = concatenate_strings(temporary_allocator(), method_encoding, str_lit("#:")); - } - - for (i32 i = method_param_offset; i < method_param_count; i++) { - Type *param_type = method_type->Proc.params->Tuple.variables[i]->type; - String param_encoding = lb_get_objc_type_encoding(param_type, temporary_allocator()); - - method_encoding = concatenate_strings(temporary_allocator(), method_encoding, param_encoding); - } - - // Emit method registration - lbAddr* sel_address = string_map_get(&m->objc_selectors, md.ac.objc_selector); - GB_ASSERT(sel_address); - lbValue selector_value = lb_addr_load(p, *sel_address); - - args.count = 4; - args[0] = class_value; // Class - args[1] = selector_value; // SEL - args[2] = lbValue { wrapper_proc->value, wrapper_proc->type }; - args[3] = lb_const_value(m, t_cstring, exact_value_string(method_encoding)); - - // TODO(harold): Emit check BOOL result and panic if false. - lb_emit_runtime_call(p, "class_addMethod", args); - - } // End methods - - // Add ivar if we have one - if (ivar_type != nullptr) { - // Register a single ivar for this class - Type *ivar_base = ivar_type->Named.base; - - const i64 size = type_size_of(ivar_base); - const i64 alignment = type_align_of(ivar_base); - // TODO(harold): Checker: Alignment must be compatible with ivar rules. Or we should increase the alignment if needed. - - // TODO(harold): Should we pass the actual type encoding? Might not be ideal for obfuscation. - String ivar_name = str_lit("__$ivar"); - String ivar_types = str_lit("{= }"); //lb_get_objc_type_encoding(ivar_type, temporary_allocator());// str_lit("{= }"); - args.count = 5; - args[0] = class_value; - args[1] = lb_const_value(m, t_cstring, exact_value_string(ivar_name)); - args[2] = lb_const_value(m, t_uint, exact_value_u64((u64)size)); - args[3] = lb_const_value(m, t_u8, exact_value_u64((u64)alignment)); - args[4] = lb_const_value(m, t_cstring, exact_value_string(ivar_types)); - lb_emit_runtime_call(p, "class_addIvar", args); - } - - // Complete the class registration - args.count = 1; - args[0] = class_value; - lb_emit_runtime_call(p, "objc_registerClassPair", args); - } + if (is_context_provider_ivar) { + // The context provider takes the ivar's type. + // Emit an obj_ivar_get call and use that pointer for 'self' instead. + lbValue real_self { + wrapper_proc->raw_input_parameters[0], + class_ptr_type + }; + get_context_args[0] = lb_handle_objc_ivar_for_objc_object_pointer(wrapper_proc, real_self); + } + + lbValue context = lb_emit_call(wrapper_proc, context_provider_proc_value, get_context_args); + lbAddr context_addr = lb_addr(lb_address_from_load_or_generate_local(wrapper_proc, context)); + lb_push_context_onto_stack(wrapper_proc, context_addr); + } + + + auto method_call_args = array_make(temporary_allocator(), method_param_count + (isize)method_param_offset); + + if (!md.ac.objc_is_class_method) { + method_call_args[0] = lbValue { + wrapper_proc->raw_input_parameters[0], + class_ptr_type, + }; + } + + for (isize i = 0; i < method_param_count; i++) { + method_call_args[i+method_param_offset] = lbValue { + wrapper_proc->raw_input_parameters[i+2], + method_type->Proc.params->Tuple.variables[i+method_param_offset]->type, + }; + } + lbValue method_proc_value = lb_find_procedure_value_from_entity(m, md.proc_entity); + + // Call real procedure for method from here, passing the parameters expected, if any. + lbValue return_value = lb_emit_call(wrapper_proc, method_proc_value, method_call_args); + + if (wrapper_results_tuple != nullptr) { + auto &result_var = method_type->Proc.results->Tuple.variables[0]; + return_value = lb_emit_conv(wrapper_proc, return_value, result_var->type); + lb_build_return_stmt_internal(wrapper_proc, return_value, result_var->token.pos); + } + } + lb_end_procedure_body(wrapper_proc); + + + // Add the method to the class + String method_encoding = str_lit("v"); + // TODO (harold): Checker must ensure that objc_methods have a single return value or none! + GB_ASSERT(method_type->Proc.result_count <= 1); + if (method_type->Proc.result_count != 0) { + method_encoding = lb_get_objc_type_encoding(method_type->Proc.results->Tuple.variables[0]->type, temporary_allocator()); + } + + if (!md.ac.objc_is_class_method) { + method_encoding = concatenate_strings(temporary_allocator(), method_encoding, str_lit("@:")); + } else { + method_encoding = concatenate_strings(temporary_allocator(), method_encoding, str_lit("#:")); + } + + for (i32 i = method_param_offset; i < method_param_count; i++) { + Type *param_type = method_type->Proc.params->Tuple.variables[i]->type; + String param_encoding = lb_get_objc_type_encoding(param_type, temporary_allocator()); + + method_encoding = concatenate_strings(temporary_allocator(), method_encoding, param_encoding); + } + + // Emit method registration + lbAddr* sel_address = string_map_get(&m->objc_selectors, md.ac.objc_selector); + GB_ASSERT(sel_address); + lbValue selector_value = lb_addr_load(p, *sel_address); + + args.count = 4; + args[0] = class_value; // Class + args[1] = selector_value; // SEL + args[2] = lbValue { wrapper_proc->value, wrapper_proc->type }; + args[3] = lb_const_value(m, t_cstring, exact_value_string(method_encoding)); + + // TODO(harold): Emit check BOOL result and panic if false. + lb_emit_runtime_call(p, "class_addMethod", args); + + } // End methods + + // Add ivar if we have one + if (ivar_type != nullptr) { + // Register a single ivar for this class + Type *ivar_base = ivar_type->Named.base; + + const i64 size = type_size_of(ivar_base); + const i64 alignment = type_align_of(ivar_base); + // TODO(harold): Checker: Alignment must be compatible with ivar rules. Or we should increase the alignment if needed. + + // TODO(harold): Should we pass the actual type encoding? Might not be ideal for obfuscation. + String ivar_name = str_lit("__$ivar"); + String ivar_types = str_lit("{= }"); //lb_get_objc_type_encoding(ivar_type, temporary_allocator());// str_lit("{= }"); + args.count = 5; + args[0] = class_value; + args[1] = lb_const_value(m, t_cstring, exact_value_string(ivar_name)); + args[2] = lb_const_value(m, t_uint, exact_value_u64((u64)size)); + args[3] = lb_const_value(m, t_u8, exact_value_u64((u64)alignment)); + args[4] = lb_const_value(m, t_cstring, exact_value_string(ivar_types)); + lb_emit_runtime_call(p, "class_addIvar", args); + } + + // Complete the class registration + args.count = 1; + args[0] = class_value; + lb_emit_runtime_call(p, "objc_registerClassPair", args); + } // Register ivar offsets for any `objc_ivar_get` expressions emitted. Type *ptr_u32 = alloc_type_pointer(t_u32); diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp index 7694c65c3..99ee2b2ff 100644 --- a/src/llvm_backend.hpp +++ b/src/llvm_backend.hpp @@ -196,7 +196,7 @@ struct lbModule { StringMap objc_classes; StringMap objc_selectors; - StringMap objc_ivars; + StringMap objc_ivars; PtrMap map_cell_info_map; // address of runtime.Map_Info PtrMap map_info_map; // address of runtime.Map_Cell_Info @@ -220,7 +220,7 @@ struct lbObjCGlobal { gbString global_name; String name; Type * type; - Type * class_impl_type; // This is set when the class has the objc_implement attribute set to true. + Type * class_impl_type; // This is set when the class has the objc_implement attribute set to true. }; struct lbGenerator : LinkerData { @@ -242,7 +242,7 @@ struct lbGenerator : LinkerData { MPSCQueue entities_to_correct_linkage; MPSCQueue objc_selectors; MPSCQueue objc_classes; - MPSCQueue objc_ivars; + MPSCQueue objc_ivars; }; diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp index 7f012e006..bb683465b 100644 --- a/src/llvm_backend_general.cpp +++ b/src/llvm_backend_general.cpp @@ -101,7 +101,7 @@ gb_internal void lb_init_module(lbModule *m, Checker *c) { string_map_init(&m->objc_classes); string_map_init(&m->objc_selectors); - string_map_init(&m->objc_ivars); + string_map_init(&m->objc_ivars); map_init(&m->map_info_map, 0); map_init(&m->map_cell_info_map, 0); @@ -174,7 +174,7 @@ gb_internal bool lb_init_generator(lbGenerator *gen, Checker *c) { mpsc_init(&gen->entities_to_correct_linkage, heap_allocator()); mpsc_init(&gen->objc_selectors, heap_allocator()); mpsc_init(&gen->objc_classes, heap_allocator()); - mpsc_init(&gen->objc_ivars, heap_allocator()); + mpsc_init(&gen->objc_ivars, heap_allocator()); return true; } diff --git a/src/llvm_backend_proc.cpp b/src/llvm_backend_proc.cpp index bf4ebf377..ba375283e 100644 --- a/src/llvm_backend_proc.cpp +++ b/src/llvm_backend_proc.cpp @@ -3290,7 +3290,7 @@ gb_internal lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValu case BuiltinProc_objc_find_class: return lb_handle_objc_find_class(p, expr); case BuiltinProc_objc_register_selector: return lb_handle_objc_register_selector(p, expr); case BuiltinProc_objc_register_class: return lb_handle_objc_register_class(p, expr); - case BuiltinProc_objc_ivar_get: return lb_handle_objc_ivar_get(p, expr); + case BuiltinProc_objc_ivar_get: return lb_handle_objc_ivar_get(p, expr); case BuiltinProc_constant_utf16_cstring: diff --git a/src/llvm_backend_utility.cpp b/src/llvm_backend_utility.cpp index 33211395a..264364162 100644 --- a/src/llvm_backend_utility.cpp +++ b/src/llvm_backend_utility.cpp @@ -2157,8 +2157,8 @@ gb_internal lbAddr lb_handle_objc_find_or_register_class(lbProcedure *p, String gb_internal lbAddr lb_handle_objc_find_or_register_ivar(lbModule *m, Type *self_type) { - String name = self_type->Named.type_name->TypeName.objc_class_name; - GB_ASSERT(name != ""); + String name = self_type->Named.type_name->TypeName.objc_class_name; + GB_ASSERT(name != ""); lbAddr *found = string_map_get(&m->objc_ivars, name); if (found) { @@ -2170,7 +2170,7 @@ gb_internal lbAddr lb_handle_objc_find_or_register_ivar(lbModule *m, Type *self_ gbString global_name = gb_string_make(permanent_allocator(), "__$objc_ivar::"); global_name = gb_string_append_length(global_name, name.text, name.len); - // Create a global variable to store offset of the ivar in an instance of an object + // Create a global variable to store offset of the ivar in an instance of an object LLVMTypeRef t = lb_type(m, t_u32); lbValue g = {}; @@ -2209,10 +2209,10 @@ gb_internal lbValue lb_handle_objc_ivar_for_objc_object_pointer(lbProcedure *p, } gb_internal lbValue lb_handle_objc_ivar_get(lbProcedure *p, Ast *expr) { - ast_node(ce, CallExpr, expr); + ast_node(ce, CallExpr, expr); - GB_ASSERT(ce->args[0]->tav.type->kind == Type_Pointer); - lbValue self = lb_build_expr(p, ce->args[0]); + GB_ASSERT(ce->args[0]->tav.type->kind == Type_Pointer); + lbValue self = lb_build_expr(p, ce->args[0]); return lb_handle_objc_ivar_for_objc_object_pointer(p, self); } @@ -2282,7 +2282,7 @@ gb_internal lbValue lb_handle_objc_id(lbProcedure *p, Ast *expr) { GB_ASSERT(e->kind == Entity_TypeName); String name = e->TypeName.objc_class_name; - Type *class_impl_type = e->TypeName.objc_is_implementation ? type : nullptr; + Type *class_impl_type = e->TypeName.objc_is_implementation ? type : nullptr; return lb_addr_load(p, lb_handle_objc_find_or_register_class(p, name, class_impl_type)); } -- cgit v1.2.3 From ee8aeea38163c18a9b3513717bd09d3765c0d6d8 Mon Sep 17 00:00:00 2001 From: bogwi Date: Mon, 5 May 2025 14:18:11 +0900 Subject: CHECK 1 done Fix panic in LLVM backend when using generic procedure with default arguments - Fixed panic in `llvm_backend_proc.cpp` when using unspecialized polymorphic procedures as defaults. - Ensured correct type inference when generic procedures are used as default parameters. --- src/llvm_backend_const.cpp | 5 ++++- src/llvm_backend_general.cpp | 12 ++++++++++-- src/llvm_backend_proc.cpp | 12 ++++++++++++ 3 files changed, 26 insertions(+), 3 deletions(-) (limited to 'src/llvm_backend_general.cpp') diff --git a/src/llvm_backend_const.cpp b/src/llvm_backend_const.cpp index dada2cff5..51c8a4449 100644 --- a/src/llvm_backend_const.cpp +++ b/src/llvm_backend_const.cpp @@ -533,7 +533,10 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, lb Entity *e = entity_from_expr(expr); res = lb_find_procedure_value_from_entity(m, e); } - GB_ASSERT(res.value != nullptr); + if (res.value == nullptr) { + // This is an unspecialized polymorphic procedure, return nil or dummy value + return lb_const_nil(m, original_type); + } GB_ASSERT(LLVMGetValueKind(res.value) == LLVMFunctionValueKind); if (LLVMGetIntrinsicID(res.value) == 0) { diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp index 421720c4c..41a6fb34a 100644 --- a/src/llvm_backend_general.cpp +++ b/src/llvm_backend_general.cpp @@ -885,8 +885,8 @@ gb_internal void lb_addr_store(lbProcedure *p, lbAddr addr, lbValue value) { Type *t = base_type(type_deref(addr.addr.type)); GB_ASSERT(t->kind == Type_Struct && t->Struct.soa_kind != StructSoa_None); lbValue len = lb_soa_struct_len(p, addr.addr); - if (addr.soa.index_expr != nullptr) { - lb_emit_bounds_check(p, ast_token(addr.soa.index_expr), index, len); + if (addr.soa.index_expr != nullptr && (!lb_is_const(addr.soa.index) || t->Struct.soa_kind != StructSoa_Fixed)) { + lb_emit_bounds_check(p, ast_token(addr.soa.index_expr), addr.soa.index, len); } } @@ -2728,6 +2728,14 @@ gb_internal lbValue lb_find_procedure_value_from_entity(lbModule *m, Entity *e) ignore_body = other_module != m; lbProcedure *missing_proc = lb_create_procedure(m, e, ignore_body); + if (missing_proc == nullptr) { + // This is an unspecialized polymorphic procedure, which should not be codegen'd + lbValue dummy = {}; + dummy.value = nullptr; + dummy.type = nullptr; + return dummy; + } + if (ignore_body) { mutex_lock(&gen->anonymous_proc_lits_mutex); defer (mutex_unlock(&gen->anonymous_proc_lits_mutex)); diff --git a/src/llvm_backend_proc.cpp b/src/llvm_backend_proc.cpp index be51f529d..519ab3e9d 100644 --- a/src/llvm_backend_proc.cpp +++ b/src/llvm_backend_proc.cpp @@ -67,6 +67,14 @@ gb_internal void lb_mem_copy_non_overlapping(lbProcedure *p, lbValue dst, lbValu gb_internal lbProcedure *lb_create_procedure(lbModule *m, Entity *entity, bool ignore_body) { GB_ASSERT(entity != nullptr); GB_ASSERT(entity->kind == Entity_Procedure); + // Skip codegen for unspecialized polymorphic procedures + if (is_type_polymorphic(entity->type) && !entity->Procedure.is_foreign) { + Type *bt = base_type(entity->type); + if (bt->kind == Type_Proc && bt->Proc.is_polymorphic && !bt->Proc.is_poly_specialized) { + // Do not generate code for unspecialized polymorphic procedures + return nullptr; + } + } if (!entity->Procedure.is_foreign) { if ((entity->flags & EntityFlag_ProcBodyChecked) == 0) { GB_PANIC("%.*s :: %s (was parapoly: %d %d)", LIT(entity->token.string), type_to_string(entity->type), is_type_polymorphic(entity->type, true), is_type_polymorphic(entity->type, false)); @@ -815,6 +823,10 @@ gb_internal void lb_build_nested_proc(lbProcedure *p, AstProcLit *pd, Entity *e) e->Procedure.link_name = name; lbProcedure *nested_proc = lb_create_procedure(p->module, e); + if (nested_proc == nullptr) { + // This is an unspecialized polymorphic procedure, skip codegen + return; + } e->code_gen_procedure = nested_proc; lbValue value = {}; -- cgit v1.2.3 From af0e067a12079cc16020e264c6157bb5581c9cf4 Mon Sep 17 00:00:00 2001 From: bogwi Date: Mon, 5 May 2025 15:14:06 +0900 Subject: CHECK 2 done Add support for handling generic types in LLVM backend - Updated `lb_type_internal` to return a pointer type for unspecialized generics. - Modified `write_type_to_canonical_string` to handle specialized generics without panicking. - Enhanced `default_type` to return the default type of specialized generics when applicable. --- src/llvm_backend_general.cpp | 8 ++++++++ src/name_canonicalization.cpp | 6 +++++- src/types.cpp | 4 ++++ 3 files changed, 17 insertions(+), 1 deletion(-) (limited to 'src/llvm_backend_general.cpp') diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp index 41a6fb34a..4b9b8d45f 100644 --- a/src/llvm_backend_general.cpp +++ b/src/llvm_backend_general.cpp @@ -2212,6 +2212,14 @@ gb_internal LLVMTypeRef lb_type_internal(lbModule *m, Type *type) { case Type_BitField: return lb_type_internal(m, type->BitField.backing_type); + + case Type_Generic: + if (type->Generic.specialized) { + return lb_type_internal(m, type->Generic.specialized); + } else { + // For unspecialized generics, use a pointer type as a placeholder + return LLVMPointerType(LLVMInt8TypeInContext(m->ctx), 0); + } } GB_PANIC("Invalid type %s", type_to_string(type)); diff --git a/src/name_canonicalization.cpp b/src/name_canonicalization.cpp index 6aa933e86..0372f5039 100644 --- a/src/name_canonicalization.cpp +++ b/src/name_canonicalization.cpp @@ -756,8 +756,12 @@ gb_internal void write_type_to_canonical_string(TypeWriter *w, Type *type) { type_writer_appendc(w, "/"); write_type_to_canonical_string(w, type->Generic.specialized); } + } else if (type->Generic.specialized) { + // If we have a specialized type, use that instead of panicking + write_type_to_canonical_string(w, type->Generic.specialized); } else { - GB_PANIC("Type_Generic should never be hit"); + // For unspecialized generics, use a generic placeholder string + type_writer_appendc(w, "rawptr"); } return; diff --git a/src/types.cpp b/src/types.cpp index 9c9472a28..cd33f1a0f 100644 --- a/src/types.cpp +++ b/src/types.cpp @@ -2932,6 +2932,10 @@ gb_internal Type *default_type(Type *type) { case Basic_UntypedString: return t_string; case Basic_UntypedRune: return t_rune; } + } else if (type->kind == Type_Generic) { + if (type->Generic.specialized) { + return default_type(type->Generic.specialized); + } } return type; } -- cgit v1.2.3 From 83bc2d3c4a186d6a8c362eed901acd6bc6363a8d Mon Sep 17 00:00:00 2001 From: Lucas Perlind Date: Wed, 30 Apr 2025 19:21:00 +1000 Subject: Add asan support for various allocators --- base/runtime/default_temp_allocator_arena.odin | 8 +++ base/runtime/heap_allocator_windows.odin | 12 +++- base/runtime/internal.odin | 9 +++ base/sanitizer/address.odin | 84 +++++++++++++++++++++- base/sanitizer/doc.odin | 4 +- core/mem/rollback_stack_allocator.odin | 50 +++++++++----- core/mem/tlsf/tlsf.odin | 2 +- core/mem/tlsf/tlsf_internal.odin | 96 ++++++++++++++------------ core/mem/tracking_allocator.odin | 11 ++- core/mem/virtual/arena.odin | 43 +++++++++--- core/mem/virtual/virtual.odin | 25 ++++--- core/mem/virtual/virtual_platform.odin | 3 + core/mem/virtual/virtual_windows.odin | 12 +++- src/llvm_backend.hpp | 2 + src/llvm_backend_general.cpp | 7 ++ src/llvm_backend_proc.cpp | 24 ++++--- src/llvm_backend_stmt.cpp | 16 +++++ 17 files changed, 308 insertions(+), 100 deletions(-) (limited to 'src/llvm_backend_general.cpp') diff --git a/base/runtime/default_temp_allocator_arena.odin b/base/runtime/default_temp_allocator_arena.odin index 5f25dac95..74994344a 100644 --- a/base/runtime/default_temp_allocator_arena.odin +++ b/base/runtime/default_temp_allocator_arena.odin @@ -1,6 +1,7 @@ package runtime import "base:intrinsics" +import "base:sanitizer" DEFAULT_ARENA_GROWING_MINIMUM_BLOCK_SIZE :: uint(DEFAULT_TEMP_ALLOCATOR_BACKING_SIZE) @@ -43,6 +44,8 @@ memory_block_alloc :: proc(allocator: Allocator, capacity: uint, alignment: uint block.base = ([^]byte)(uintptr(block) + base_offset) block.capacity = uint(end - uintptr(block.base)) + sanitizer.address_poison(block.base, block.capacity) + // Should be zeroed assert(block.used == 0) assert(block.prev == nil) @@ -52,6 +55,7 @@ memory_block_alloc :: proc(allocator: Allocator, capacity: uint, alignment: uint memory_block_dealloc :: proc(block_to_free: ^Memory_Block, loc := #caller_location) { if block_to_free != nil { allocator := block_to_free.allocator + sanitizer.address_unpoison(block_to_free.base, block_to_free.capacity) mem_free(block_to_free, allocator, loc) } } @@ -83,6 +87,7 @@ alloc_from_memory_block :: proc(block: ^Memory_Block, min_size, alignment: uint) return } data = block.base[block.used+alignment_offset:][:min_size] + sanitizer.address_unpoison(block.base[block.used:block.used+size]) block.used += size return } @@ -162,6 +167,7 @@ arena_free_all :: proc(arena: ^Arena, loc := #caller_location) { if arena.curr_block != nil { intrinsics.mem_zero(arena.curr_block.base, arena.curr_block.used) arena.curr_block.used = 0 + sanitizer.address_poison(arena.curr_block.base, arena.curr_block.capacity) } arena.total_used = 0 } @@ -226,6 +232,7 @@ arena_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, // grow data in-place, adjusting next allocation block.used = uint(new_end) data = block.base[start:new_end] + sanitizer.address_unpoison(data) return } } @@ -299,6 +306,7 @@ arena_temp_end :: proc(temp: Arena_Temp, loc := #caller_location) { assert(block.used >= temp.used, "out of order use of arena_temp_end", loc) amount_to_zero := block.used-temp.used intrinsics.mem_zero(block.base[temp.used:], amount_to_zero) + sanitizer.address_poison(block.base[temp.used:block.capacity]) block.used = temp.used arena.total_used -= amount_to_zero } diff --git a/base/runtime/heap_allocator_windows.odin b/base/runtime/heap_allocator_windows.odin index e07df7559..04a6f149b 100644 --- a/base/runtime/heap_allocator_windows.odin +++ b/base/runtime/heap_allocator_windows.odin @@ -1,5 +1,7 @@ package runtime +import "../sanitizer" + foreign import kernel32 "system:Kernel32.lib" @(private="file") @@ -16,7 +18,10 @@ foreign kernel32 { _heap_alloc :: proc "contextless" (size: int, zero_memory := true) -> rawptr { HEAP_ZERO_MEMORY :: 0x00000008 - return HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY if zero_memory else 0, uint(size)) + ptr := HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY if zero_memory else 0, uint(size)) + // NOTE(lucas): asan not guarunteed to unpoison win32 heap out of the box, do it ourselves + sanitizer.address_unpoison(ptr, size) + return ptr } _heap_resize :: proc "contextless" (ptr: rawptr, new_size: int) -> rawptr { if new_size == 0 { @@ -28,7 +33,10 @@ _heap_resize :: proc "contextless" (ptr: rawptr, new_size: int) -> rawptr { } HEAP_ZERO_MEMORY :: 0x00000008 - return HeapReAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, ptr, uint(new_size)) + new_ptr := HeapReAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, ptr, uint(new_size)) + // NOTE(lucas): asan not guarunteed to unpoison win32 heap out of the box, do it ourselves + sanitizer.address_unpoison(new_ptr, new_size) + return new_ptr } _heap_free :: proc "contextless" (ptr: rawptr) { if ptr == nil { diff --git a/base/runtime/internal.odin b/base/runtime/internal.odin index 59811a525..7c8a8294b 100644 --- a/base/runtime/internal.odin +++ b/base/runtime/internal.odin @@ -1106,3 +1106,12 @@ __read_bits :: proc "contextless" (dst, src: [^]byte, offset: uintptr, size: uin dst[j>>3] |= the_bit<<(j&7) } } + +@(no_sanitize_address) +__asan_unpoison_memory_region :: #force_inline proc "contextless" (address: rawptr, size: uint) { + foreign { + __asan_unpoison_memory_region :: proc(address: rawptr, size: uint) --- + } + __asan_unpoison_memory_region(address, size) +} + diff --git a/base/sanitizer/address.odin b/base/sanitizer/address.odin index 3924e02bf..edfdfc172 100644 --- a/base/sanitizer/address.odin +++ b/base/sanitizer/address.odin @@ -60,6 +60,7 @@ poison or unpoison memory in the same memory region region simultaneously. When asan is not enabled this procedure does nothing. */ +@(no_sanitize_address) address_poison_slice :: proc "contextless" (region: $T/[]$E) { when ASAN_ENABLED { __asan_poison_memory_region(raw_data(region), size_of(E) * len(region)) @@ -75,6 +76,7 @@ can poison or unpoison memory in the same memory region region simultaneously. When asan is not enabled this procedure does nothing. */ +@(no_sanitize_address) address_unpoison_slice :: proc "contextless" (region: $T/[]$E) { when ASAN_ENABLED { __asan_unpoison_memory_region(raw_data(region), size_of(E) * len(region)) @@ -90,6 +92,7 @@ two threads can poison or unpoison memory in the same memory region region simul When asan is not enabled this procedure does nothing. */ +@(no_sanitize_address) address_poison_ptr :: proc "contextless" (ptr: ^$T) { when ASAN_ENABLED { __asan_poison_memory_region(ptr, size_of(T)) @@ -106,6 +109,7 @@ region simultaneously. When asan is not enabled this procedure does nothing. */ +@(no_sanitize_address) address_unpoison_ptr :: proc "contextless" (ptr: ^$T) { when ASAN_ENABLED { __asan_unpoison_memory_region(ptr, size_of(T)) @@ -121,6 +125,7 @@ poison or unpoison memory in the same memory region region simultaneously. When asan is not enabled this procedure does nothing. */ +@(no_sanitize_address) address_poison_rawptr :: proc "contextless" (ptr: rawptr, len: int) { when ASAN_ENABLED { assert_contextless(len >= 0) @@ -128,6 +133,22 @@ address_poison_rawptr :: proc "contextless" (ptr: rawptr, len: int) { } } +/* +Marks the region covering `[ptr, ptr+len)` as unaddressable + +Code instrumented with `-sanitize:address` is forbidden from accessing any address +within the region. This procedure is not thread-safe because no two threads can +poison or unpoison memory in the same memory region region simultaneously. + +When asan is not enabled this procedure does nothing. +*/ +@(no_sanitize_address) +address_poison_rawptr_uint :: proc "contextless" (ptr: rawptr, len: uint) { + when ASAN_ENABLED { + __asan_poison_memory_region(ptr, len) + } +} + /* Marks the region covering `[ptr, ptr+len)` as addressable @@ -137,6 +158,7 @@ threads can poison or unpoison memory in the same memory region region simultane When asan is not enabled this procedure does nothing. */ +@(no_sanitize_address) address_unpoison_rawptr :: proc "contextless" (ptr: rawptr, len: int) { when ASAN_ENABLED { assert_contextless(len >= 0) @@ -144,16 +166,34 @@ address_unpoison_rawptr :: proc "contextless" (ptr: rawptr, len: int) { } } +/* +Marks the region covering `[ptr, ptr+len)` as addressable + +Code instrumented with `-sanitize:address` is allowed to access any address +within the region again. This procedure is not thread-safe because no two +threads can poison or unpoison memory in the same memory region region simultaneously. + +When asan is not enabled this procedure does nothing. +*/ +@(no_sanitize_address) +address_unpoison_rawptr_uint :: proc "contextless" (ptr: rawptr, len: uint) { + when ASAN_ENABLED { + __asan_unpoison_memory_region(ptr, len) + } +} + address_poison :: proc { address_poison_slice, address_poison_ptr, address_poison_rawptr, + address_poison_rawptr_uint, } address_unpoison :: proc { address_unpoison_slice, address_unpoison_ptr, address_unpoison_rawptr, + address_unpoison_rawptr_uint, } /* @@ -164,6 +204,7 @@ This can be used for logging and/or debugging purposes. When asan is not enabled this procedure does nothing. */ +@(no_sanitize_address) address_set_death_callback :: proc "contextless" (callback: Address_Death_Callback) { when ASAN_ENABLED { __sanitizer_set_death_callback(callback) @@ -178,7 +219,8 @@ in an asan error. When asan is not enabled this procedure returns `nil`. */ -address_region_is_poisoned_slice :: proc "contextless" (region: []$T/$E) -> rawptr { +@(no_sanitize_address) +address_region_is_poisoned_slice :: proc "contextless" (region: $T/[]$E) -> rawptr { when ASAN_ENABLED { return __asan_region_is_poisoned(raw_data(region), size_of(E) * len(region)) } else { @@ -194,6 +236,7 @@ in an asan error. When asan is not enabled this procedure returns `nil`. */ +@(no_sanitize_address) address_region_is_poisoned_ptr :: proc "contextless" (ptr: ^$T) -> rawptr { when ASAN_ENABLED { return __asan_region_is_poisoned(ptr, size_of(T)) @@ -210,6 +253,7 @@ in an asan error. When asan is not enabled this procedure returns `nil`. */ +@(no_sanitize_address) address_region_is_poisoned_rawptr :: proc "contextless" (region: rawptr, len: int) -> rawptr { when ASAN_ENABLED { assert_contextless(len >= 0) @@ -219,10 +263,29 @@ address_region_is_poisoned_rawptr :: proc "contextless" (region: rawptr, len: in } } +/* +Checks if the memory region covered by `[ptr, ptr+len)` is poisoned. + +If it is poisoned this procedure returns the address which would result +in an asan error. + +When asan is not enabled this procedure returns `nil`. +*/ +@(no_sanitize_address) +address_region_is_poisoned_rawptr_uint :: proc "contextless" (region: rawptr, len: uint) -> rawptr { + when ASAN_ENABLED { + return __asan_region_is_poisoned(region, len) + } else { + return nil + } +} + + address_region_is_poisoned :: proc { address_region_is_poisoned_slice, address_region_is_poisoned_ptr, address_region_is_poisoned_rawptr, + address_region_is_poisoned_rawptr_uint, } /* @@ -233,6 +296,7 @@ If it is poisoned this procedure returns `true`, otherwise it returns When asan is not enabled this procedure returns `false`. */ +@(no_sanitize_address) address_is_poisoned :: proc "contextless" (address: rawptr) -> bool { when ASAN_ENABLED { return __asan_address_is_poisoned(address) != 0 @@ -248,6 +312,7 @@ This procedure prints the description out to `stdout`. When asan is not enabled this procedure does nothing. */ +@(no_sanitize_address) address_describe_address :: proc "contextless" (address: rawptr) { when ASAN_ENABLED { __asan_describe_address(address) @@ -260,6 +325,7 @@ Returns `true` if an asan error has occured, otherwise it returns When asan is not enabled this procedure returns `false`. */ +@(no_sanitize_address) address_report_present :: proc "contextless" () -> bool { when ASAN_ENABLED { return __asan_report_present() != 0 @@ -275,6 +341,7 @@ If no asan error has occurd `nil` is returned. When asan is not enabled this procedure returns `nil`. */ +@(no_sanitize_address) address_get_report_pc :: proc "contextless" () -> rawptr { when ASAN_ENABLED { return __asan_get_report_pc() @@ -290,6 +357,7 @@ If no asan error has occurd `nil` is returned. When asan is not enabled this procedure returns `nil`. */ +@(no_sanitize_address) address_get_report_bp :: proc "contextless" () -> rawptr { when ASAN_ENABLED { return __asan_get_report_bp() @@ -305,6 +373,7 @@ If no asan error has occurd `nil` is returned. When asan is not enabled this procedure returns `nil`. */ +@(no_sanitize_address) address_get_report_sp :: proc "contextless" () -> rawptr { when ASAN_ENABLED { return __asan_get_report_sp() @@ -320,6 +389,7 @@ If no asan error has occurd `nil` is returned. When asan is not enabled this procedure returns `nil`. */ +@(no_sanitize_address) address_get_report_address :: proc "contextless" () -> rawptr { when ASAN_ENABLED { return __asan_get_report_address() @@ -335,6 +405,7 @@ If no asan error has occurd `.none` is returned. When asan is not enabled this procedure returns `.none`. */ +@(no_sanitize_address) address_get_report_access_type :: proc "contextless" () -> Address_Access_Type { when ASAN_ENABLED { if ! address_report_present() { @@ -353,6 +424,7 @@ If no asan error has occurd `0` is returned. When asan is not enabled this procedure returns `0`. */ +@(no_sanitize_address) address_get_report_access_size :: proc "contextless" () -> uint { when ASAN_ENABLED { return __asan_get_report_access_size() @@ -368,6 +440,7 @@ If no asan error has occurd an empty string is returned. When asan is not enabled this procedure returns an empty string. */ +@(no_sanitize_address) address_get_report_description :: proc "contextless" () -> string { when ASAN_ENABLED { return string(__asan_get_report_description()) @@ -386,6 +459,7 @@ The information provided include: When asan is not enabled this procedure returns zero initialised values. */ +@(no_sanitize_address) address_locate_address :: proc "contextless" (addr: rawptr, data: []byte) -> Address_Located_Address { when ASAN_ENABLED { out_addr: rawptr @@ -404,6 +478,7 @@ The stack trace is filled into the `data` slice. When asan is not enabled this procedure returns a zero initialised value. */ +@(no_sanitize_address) address_get_alloc_stack_trace :: proc "contextless" (addr: rawptr, data: []rawptr) -> ([]rawptr, int) { when ASAN_ENABLED { out_thread: i32 @@ -421,6 +496,7 @@ The stack trace is filled into the `data` slice. When asan is not enabled this procedure returns zero initialised values. */ +@(no_sanitize_address) address_get_free_stack_trace :: proc "contextless" (addr: rawptr, data: []rawptr) -> ([]rawptr, int) { when ASAN_ENABLED { out_thread: i32 @@ -436,6 +512,7 @@ Returns the current asan shadow memory mapping. When asan is not enabled this procedure returns a zero initialised value. */ +@(no_sanitize_address) address_get_shadow_mapping :: proc "contextless" () -> Address_Shadow_Mapping { when ASAN_ENABLED { result: Address_Shadow_Mapping @@ -451,6 +528,7 @@ Prints asan statistics to `stderr` When asan is not enabled this procedure does nothing. */ +@(no_sanitize_address) address_print_accumulated_stats :: proc "contextless" () { when ASAN_ENABLED { __asan_print_accumulated_stats() @@ -464,6 +542,7 @@ This pointer can be then used for `address_is_in_fake_stack`. When asan is not enabled this procedure returns `nil`. */ +@(no_sanitize_address) address_get_current_fake_stack :: proc "contextless" () -> rawptr { when ASAN_ENABLED { return __asan_get_current_fake_stack() @@ -477,6 +556,7 @@ Returns if an address belongs to a given fake stack and if so the region of the When asan is not enabled this procedure returns zero initialised values. */ +@(no_sanitize_address) address_is_in_fake_stack :: proc "contextless" (fake_stack: rawptr, addr: rawptr) -> ([]byte, bool) { when ASAN_ENABLED { begin: rawptr @@ -496,6 +576,7 @@ i.e. a procedure such as `panic` and `os.exit`. When asan is not enabled this procedure does nothing. */ +@(no_sanitize_address) address_handle_no_return :: proc "contextless" () { when ASAN_ENABLED { __asan_handle_no_return() @@ -509,6 +590,7 @@ Returns `true` if successful, otherwise it returns `false`. When asan is not enabled this procedure returns `false`. */ +@(no_sanitize_address) address_update_allocation_context :: proc "contextless" (addr: rawptr) -> bool { when ASAN_ENABLED { return __asan_update_allocation_context(addr) != 0 diff --git a/base/sanitizer/doc.odin b/base/sanitizer/doc.odin index e389842b1..707f41ce0 100644 --- a/base/sanitizer/doc.odin +++ b/base/sanitizer/doc.odin @@ -14,12 +14,14 @@ related bugs. Typically asan interacts with libc but Odin code can be marked up with the asan runtime to extend the memory error detection outside of libc using this package. For more information about asan see: https://clang.llvm.org/docs/AddressSanitizer.html +Procedures can be made exempt from asan when marked up with @(no_sanitize_address) + ## Memory Enabled with `-sanitize:memory` when building an odin project. The memory sanitizer is another runtime memory error detector with the sole purpose to catch the -use of uninitialized memory. This is not a very common bug in Odin as be default everything is +use of uninitialized memory. This is not a very common bug in Odin as by default everything is set to zero when initialised (ZII). For more information about the memory sanitizer see: https://clang.llvm.org/docs/MemorySanitizer.html diff --git a/core/mem/rollback_stack_allocator.odin b/core/mem/rollback_stack_allocator.odin index 43ef10fe9..a00131b7f 100644 --- a/core/mem/rollback_stack_allocator.odin +++ b/core/mem/rollback_stack_allocator.odin @@ -1,6 +1,7 @@ package mem import "base:runtime" +import "base:sanitizer" /* Rollback stack default block size. @@ -47,14 +48,14 @@ Rollback_Stack :: struct { block_allocator: Allocator, } -@(private="file", require_results) +@(private="file", require_results, no_sanitize_address) rb_ptr_in_bounds :: proc(block: ^Rollback_Stack_Block, ptr: rawptr) -> bool { start := raw_data(block.buffer) end := start[block.offset:] return start < ptr && ptr <= end } -@(private="file", require_results) +@(private="file", require_results, no_sanitize_address) rb_find_ptr :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> ( parent: ^Rollback_Stack_Block, block: ^Rollback_Stack_Block, @@ -71,7 +72,7 @@ rb_find_ptr :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> ( return nil, nil, nil, .Invalid_Pointer } -@(private="file", require_results) +@(private="file", require_results, no_sanitize_address) rb_find_last_alloc :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> ( block: ^Rollback_Stack_Block, header: ^Rollback_Stack_Header, @@ -86,9 +87,10 @@ rb_find_last_alloc :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> ( return nil, nil, false } -@(private="file") +@(private="file", no_sanitize_address) rb_rollback_block :: proc(block: ^Rollback_Stack_Block, header: ^Rollback_Stack_Header) { header := header + for block.offset > 0 && header.is_free { block.offset = header.prev_offset block.last_alloc = raw_data(block.buffer)[header.prev_ptr:] @@ -99,9 +101,10 @@ rb_rollback_block :: proc(block: ^Rollback_Stack_Block, header: ^Rollback_Stack_ /* Free memory to a rollback stack allocator. */ -@(private="file", require_results) +@(private="file", require_results, no_sanitize_address) rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error { parent, block, header := rb_find_ptr(stack, ptr) or_return + if header.is_free { return .Invalid_Pointer } @@ -120,7 +123,7 @@ rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error { /* Free all memory owned by the rollback stack allocator. */ -@(private="file") +@(private="file", no_sanitize_address) rb_free_all :: proc(stack: ^Rollback_Stack) { for block := stack.head.next_block; block != nil; /**/ { next_block := block.next_block @@ -131,12 +134,13 @@ rb_free_all :: proc(stack: ^Rollback_Stack) { stack.head.next_block = nil stack.head.last_alloc = nil stack.head.offset = 0 + sanitizer.address_poison(stack.head.buffer) } /* Allocate memory using the rollback stack allocator. */ -@(require_results) +@(require_results, no_sanitize_address) rb_alloc :: proc( stack: ^Rollback_Stack, size: int, @@ -153,7 +157,7 @@ rb_alloc :: proc( /* Allocate memory using the rollback stack allocator. */ -@(require_results) +@(require_results, no_sanitize_address) rb_alloc_bytes :: proc( stack: ^Rollback_Stack, size: int, @@ -170,7 +174,7 @@ rb_alloc_bytes :: proc( /* Allocate non-initialized memory using the rollback stack allocator. */ -@(require_results) +@(require_results, no_sanitize_address) rb_alloc_non_zeroed :: proc( stack: ^Rollback_Stack, size: int, @@ -184,7 +188,7 @@ rb_alloc_non_zeroed :: proc( /* Allocate non-initialized memory using the rollback stack allocator. */ -@(require_results) +@(require_results, no_sanitize_address) rb_alloc_bytes_non_zeroed :: proc( stack: ^Rollback_Stack, size: int, @@ -194,6 +198,7 @@ rb_alloc_bytes_non_zeroed :: proc( assert(size >= 0, "Size must be positive or zero.", loc) assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", loc) parent: ^Rollback_Stack_Block + for block := stack.head; /**/; block = block.next_block { when !ODIN_DISABLE_ASSERT { allocated_new_block: bool @@ -235,7 +240,9 @@ rb_alloc_bytes_non_zeroed :: proc( // Prevent any further allocations on it. block.offset = cast(uintptr)len(block.buffer) } - #no_bounds_check return ptr[:size], nil + res := ptr[:size] + sanitizer.address_unpoison(res) + return res, nil } return nil, .Out_Of_Memory } @@ -243,7 +250,7 @@ rb_alloc_bytes_non_zeroed :: proc( /* Resize an allocation owned by rollback stack allocator. */ -@(require_results) +@(require_results, no_sanitize_address) rb_resize :: proc( stack: ^Rollback_Stack, old_ptr: rawptr, @@ -266,7 +273,7 @@ rb_resize :: proc( /* Resize an allocation owned by rollback stack allocator. */ -@(require_results) +@(require_results, no_sanitize_address) rb_resize_bytes :: proc( stack: ^Rollback_Stack, old_memory: []byte, @@ -289,7 +296,7 @@ rb_resize_bytes :: proc( Resize an allocation owned by rollback stack allocator without explicit zero-initialization. */ -@(require_results) +@(require_results, no_sanitize_address) rb_resize_non_zeroed :: proc( stack: ^Rollback_Stack, old_ptr: rawptr, @@ -306,7 +313,7 @@ rb_resize_non_zeroed :: proc( Resize an allocation owned by rollback stack allocator without explicit zero-initialization. */ -@(require_results) +@(require_results, no_sanitize_address) rb_resize_bytes_non_zeroed :: proc( stack: ^Rollback_Stack, old_memory: []byte, @@ -330,7 +337,9 @@ rb_resize_bytes_non_zeroed :: proc( if len(block.buffer) <= stack.block_size { block.offset += cast(uintptr)size - cast(uintptr)old_size } - #no_bounds_check return (ptr)[:size], nil + res := (ptr)[:size] + sanitizer.address_unpoison(res) + #no_bounds_check return res, nil } } } @@ -340,7 +349,7 @@ rb_resize_bytes_non_zeroed :: proc( return } -@(private="file", require_results) +@(private="file", require_results, no_sanitize_address) rb_make_block :: proc(size: int, allocator: Allocator) -> (block: ^Rollback_Stack_Block, err: Allocator_Error) { buffer := runtime.mem_alloc(size_of(Rollback_Stack_Block) + size, align_of(Rollback_Stack_Block), allocator) or_return block = cast(^Rollback_Stack_Block)raw_data(buffer) @@ -351,6 +360,7 @@ rb_make_block :: proc(size: int, allocator: Allocator) -> (block: ^Rollback_Stac /* Initialize the rollback stack allocator using a fixed backing buffer. */ +@(no_sanitize_address) rollback_stack_init_buffered :: proc(stack: ^Rollback_Stack, buffer: []byte, location := #caller_location) { MIN_SIZE :: size_of(Rollback_Stack_Block) + size_of(Rollback_Stack_Header) + size_of(rawptr) assert(len(buffer) >= MIN_SIZE, "User-provided buffer to Rollback Stack Allocator is too small.", location) @@ -365,6 +375,7 @@ rollback_stack_init_buffered :: proc(stack: ^Rollback_Stack, buffer: []byte, loc /* Initialize the rollback stack alocator using a backing block allocator. */ +@(no_sanitize_address) rollback_stack_init_dynamic :: proc( stack: ^Rollback_Stack, block_size : int = ROLLBACK_STACK_DEFAULT_BLOCK_SIZE, @@ -396,6 +407,7 @@ rollback_stack_init :: proc { /* Destroy a rollback stack. */ +@(no_sanitize_address) rollback_stack_destroy :: proc(stack: ^Rollback_Stack) { if stack.block_allocator.procedure != nil { rb_free_all(stack) @@ -435,7 +447,7 @@ from the last allocation backwards. Each allocation has an overhead of 8 bytes and any extra bytes to satisfy the requested alignment. */ -@(require_results) +@(require_results, no_sanitize_address) rollback_stack_allocator :: proc(stack: ^Rollback_Stack) -> Allocator { return Allocator { data = stack, @@ -443,7 +455,7 @@ rollback_stack_allocator :: proc(stack: ^Rollback_Stack) -> Allocator { } } -@(require_results) +@(require_results, no_sanitize_address) rollback_stack_allocator_proc :: proc( allocator_data: rawptr, mode: Allocator_Mode, diff --git a/core/mem/tlsf/tlsf.odin b/core/mem/tlsf/tlsf.odin index 4ce6e54d9..0ae8c28e0 100644 --- a/core/mem/tlsf/tlsf.odin +++ b/core/mem/tlsf/tlsf.odin @@ -198,4 +198,4 @@ fls :: proc "contextless" (word: u32) -> (bit: i32) { fls_uint :: proc "contextless" (size: uint) -> (bit: i32) { N :: (size_of(uint) * 8) - 1 return i32(N - intrinsics.count_leading_zeros(size)) -} \ No newline at end of file +} diff --git a/core/mem/tlsf/tlsf_internal.odin b/core/mem/tlsf/tlsf_internal.odin index f8a9bf60c..89b875679 100644 --- a/core/mem/tlsf/tlsf_internal.odin +++ b/core/mem/tlsf/tlsf_internal.odin @@ -10,6 +10,7 @@ package mem_tlsf import "base:intrinsics" +import "base:sanitizer" import "base:runtime" // log2 of number of linear subdivisions of block sizes. @@ -209,6 +210,8 @@ alloc_bytes_non_zeroed :: proc(control: ^Allocator, size: uint, align: uint) -> return nil, .Out_Of_Memory } + sanitizer.address_poison(new_pool_buf) + // Allocate a new link in the `control.pool` tracking structure. new_pool := new_clone(Pool{ data = new_pool_buf, @@ -254,7 +257,7 @@ alloc_bytes_non_zeroed :: proc(control: ^Allocator, size: uint, align: uint) -> return block_prepare_used(control, block, adjust) } -@(private, require_results) +@(private, require_results, no_sanitize_address) alloc_bytes :: proc(control: ^Allocator, size: uint, align: uint) -> (res: []byte, err: runtime.Allocator_Error) { res, err = alloc_bytes_non_zeroed(control, size, align) if err == nil { @@ -273,6 +276,7 @@ free_with_size :: proc(control: ^Allocator, ptr: rawptr, size: uint) { block := block_from_ptr(ptr) assert(!block_is_free(block), "block already marked as free") // double free + sanitizer.address_poison(ptr, block.size) block_mark_as_free(block) block = block_merge_prev(control, block) block = block_merge_next(control, block) @@ -316,6 +320,7 @@ resize :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size: uint, align block_trim_used(control, block, adjust) res = ([^]byte)(ptr)[:new_size] + sanitizer.address_unpoison(res) if min_size < new_size { to_zero := ([^]byte)(ptr)[min_size:new_size] @@ -374,95 +379,96 @@ resize_non_zeroed :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size: NOTE: TLSF spec relies on ffs/fls returning a value in the range 0..31. */ -@(private, require_results) +@(private, require_results, no_sanitize_address) block_size :: proc "contextless" (block: ^Block_Header) -> (size: uint) { return block.size &~ (BLOCK_HEADER_FREE | BLOCK_HEADER_PREV_FREE) } -@(private) +@(private, no_sanitize_address) block_set_size :: proc "contextless" (block: ^Block_Header, size: uint) { old_size := block.size block.size = size | (old_size & (BLOCK_HEADER_FREE | BLOCK_HEADER_PREV_FREE)) } -@(private, require_results) +@(private, require_results, no_sanitize_address) block_is_last :: proc "contextless" (block: ^Block_Header) -> (is_last: bool) { return block_size(block) == 0 } -@(private, require_results) +@(private, require_results, no_sanitize_address) block_is_free :: proc "contextless" (block: ^Block_Header) -> (is_free: bool) { return (block.size & BLOCK_HEADER_FREE) == BLOCK_HEADER_FREE } -@(private) +@(private, no_sanitize_address) block_set_free :: proc "contextless" (block: ^Block_Header) { block.size |= BLOCK_HEADER_FREE } -@(private) +@(private, no_sanitize_address) block_set_used :: proc "contextless" (block: ^Block_Header) { block.size &~= BLOCK_HEADER_FREE } -@(private, require_results) +@(private, require_results, no_sanitize_address) block_is_prev_free :: proc "contextless" (block: ^Block_Header) -> (is_prev_free: bool) { return (block.size & BLOCK_HEADER_PREV_FREE) == BLOCK_HEADER_PREV_FREE } -@(private) +@(private, no_sanitize_address) block_set_prev_free :: proc "contextless" (block: ^Block_Header) { block.size |= BLOCK_HEADER_PREV_FREE } -@(private) +@(private, no_sanitize_address) block_set_prev_used :: proc "contextless" (block: ^Block_Header) { block.size &~= BLOCK_HEADER_PREV_FREE } -@(private, require_results) +@(private, require_results, no_sanitize_address) block_from_ptr :: proc(ptr: rawptr) -> (block_ptr: ^Block_Header) { return (^Block_Header)(uintptr(ptr) - BLOCK_START_OFFSET) } -@(private, require_results) +@(private, require_results, no_sanitize_address) block_to_ptr :: proc(block: ^Block_Header) -> (ptr: rawptr) { return rawptr(uintptr(block) + BLOCK_START_OFFSET) } // Return location of next block after block of given size. -@(private, require_results) +@(private, require_results, no_sanitize_address) offset_to_block :: proc(ptr: rawptr, size: uint) -> (block: ^Block_Header) { return (^Block_Header)(uintptr(ptr) + uintptr(size)) } -@(private, require_results) +@(private, require_results, no_sanitize_address) offset_to_block_backwards :: proc(ptr: rawptr, size: uint) -> (block: ^Block_Header) { return (^Block_Header)(uintptr(ptr) - uintptr(size)) } // Return location of previous block. -@(private, require_results) +@(private, require_results, no_sanitize_address) block_prev :: proc(block: ^Block_Header) -> (prev: ^Block_Header) { assert(block_is_prev_free(block), "previous block must be free") + return block.prev_phys_block } // Return location of next existing block. -@(private, require_results) +@(private, require_results, no_sanitize_address) block_next :: proc(block: ^Block_Header) -> (next: ^Block_Header) { return offset_to_block(block_to_ptr(block), block_size(block) - BLOCK_HEADER_OVERHEAD) } // Link a new block with its physical neighbor, return the neighbor. -@(private, require_results) +@(private, require_results, no_sanitize_address) block_link_next :: proc(block: ^Block_Header) -> (next: ^Block_Header) { next = block_next(block) next.prev_phys_block = block return } -@(private) +@(private, no_sanitize_address) block_mark_as_free :: proc(block: ^Block_Header) { // Link the block to the next block, first. next := block_link_next(block) @@ -470,26 +476,26 @@ block_mark_as_free :: proc(block: ^Block_Header) { block_set_free(block) } -@(private) -block_mark_as_used :: proc(block: ^Block_Header) { +@(private, no_sanitize_address) +block_mark_as_used :: proc(block: ^Block_Header, ) { next := block_next(block) block_set_prev_used(next) block_set_used(block) } -@(private, require_results) +@(private, require_results, no_sanitize_address) align_up :: proc(x, align: uint) -> (aligned: uint) { assert(0 == (align & (align - 1)), "must align to a power of two") return (x + (align - 1)) &~ (align - 1) } -@(private, require_results) +@(private, require_results, no_sanitize_address) align_down :: proc(x, align: uint) -> (aligned: uint) { assert(0 == (align & (align - 1)), "must align to a power of two") return x - (x & (align - 1)) } -@(private, require_results) +@(private, require_results, no_sanitize_address) align_ptr :: proc(ptr: rawptr, align: uint) -> (aligned: rawptr) { assert(0 == (align & (align - 1)), "must align to a power of two") align_mask := uintptr(align) - 1 @@ -499,7 +505,7 @@ align_ptr :: proc(ptr: rawptr, align: uint) -> (aligned: rawptr) { } // Adjust an allocation size to be aligned to word size, and no smaller than internal minimum. -@(private, require_results) +@(private, require_results, no_sanitize_address) adjust_request_size :: proc(size, align: uint) -> (adjusted: uint) { if size == 0 { return 0 @@ -513,7 +519,7 @@ adjust_request_size :: proc(size, align: uint) -> (adjusted: uint) { } // Adjust an allocation size to be aligned to word size, and no smaller than internal minimum. -@(private, require_results) +@(private, require_results, no_sanitize_address) adjust_request_size_with_err :: proc(size, align: uint) -> (adjusted: uint, err: runtime.Allocator_Error) { if size == 0 { return 0, nil @@ -531,7 +537,7 @@ adjust_request_size_with_err :: proc(size, align: uint) -> (adjusted: uint, err: // TLSF utility functions. In most cases these are direct translations of // the documentation in the research paper. -@(optimization_mode="favor_size", private, require_results) +@(optimization_mode="favor_size", private, require_results, no_sanitize_address) mapping_insert :: proc(size: uint) -> (fl, sl: i32) { if size < SMALL_BLOCK_SIZE { // Store small blocks in first list. @@ -544,7 +550,7 @@ mapping_insert :: proc(size: uint) -> (fl, sl: i32) { return } -@(optimization_mode="favor_size", private, require_results) +@(optimization_mode="favor_size", private, require_results, no_sanitize_address) mapping_round :: #force_inline proc(size: uint) -> (rounded: uint) { rounded = size if size >= SMALL_BLOCK_SIZE { @@ -555,12 +561,12 @@ mapping_round :: #force_inline proc(size: uint) -> (rounded: uint) { } // This version rounds up to the next block size (for allocations) -@(optimization_mode="favor_size", private, require_results) +@(optimization_mode="favor_size", private, require_results, no_sanitize_address) mapping_search :: proc(size: uint) -> (fl, sl: i32) { return mapping_insert(mapping_round(size)) } -@(private, require_results) +@(private, require_results, no_sanitize_address) search_suitable_block :: proc(control: ^Allocator, fli, sli: ^i32) -> (block: ^Block_Header) { // First, search for a block in the list associated with the given fl/sl index. fl := fli^; sl := sli^ @@ -587,7 +593,7 @@ search_suitable_block :: proc(control: ^Allocator, fli, sli: ^i32) -> (block: ^B } // Remove a free block from the free list. -@(private) +@(private, no_sanitize_address) remove_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl: i32) { prev := block.prev_free next := block.next_free @@ -613,7 +619,7 @@ remove_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl } // Insert a free block into the free block list. -@(private) +@(private, no_sanitize_address) insert_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl: i32) { current := control.blocks[fl][sl] assert(current != nil, "free lists cannot have a nil entry") @@ -631,26 +637,26 @@ insert_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl } // Remove a given block from the free list. -@(private) +@(private, no_sanitize_address) block_remove :: proc(control: ^Allocator, block: ^Block_Header) { fl, sl := mapping_insert(block_size(block)) remove_free_block(control, block, fl, sl) } // Insert a given block into the free list. -@(private) +@(private, no_sanitize_address) block_insert :: proc(control: ^Allocator, block: ^Block_Header) { fl, sl := mapping_insert(block_size(block)) insert_free_block(control, block, fl, sl) } -@(private, require_results) +@(private, require_results, no_sanitize_address) block_can_split :: proc(block: ^Block_Header, size: uint) -> (can_split: bool) { return block_size(block) >= size_of(Block_Header) + size } // Split a block into two, the second of which is free. -@(private, require_results) +@(private, require_results, no_sanitize_address) block_split :: proc(block: ^Block_Header, size: uint) -> (remaining: ^Block_Header) { // Calculate the amount of space left in the remaining block. remaining = offset_to_block(block_to_ptr(block), size - BLOCK_HEADER_OVERHEAD) @@ -671,9 +677,10 @@ block_split :: proc(block: ^Block_Header, size: uint) -> (remaining: ^Block_Head } // Absorb a free block's storage into an adjacent previous free block. -@(private, require_results) +@(private, require_results, no_sanitize_address) block_absorb :: proc(prev: ^Block_Header, block: ^Block_Header) -> (absorbed: ^Block_Header) { assert(!block_is_last(prev), "previous block can't be last") + // Note: Leaves flags untouched. prev.size += block_size(block) + BLOCK_HEADER_OVERHEAD _ = block_link_next(prev) @@ -681,7 +688,7 @@ block_absorb :: proc(prev: ^Block_Header, block: ^Block_Header) -> (absorbed: ^B } // Merge a just-freed block with an adjacent previous free block. -@(private, require_results) +@(private, require_results, no_sanitize_address) block_merge_prev :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: ^Block_Header) { merged = block if (block_is_prev_free(block)) { @@ -695,7 +702,7 @@ block_merge_prev :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: } // Merge a just-freed block with an adjacent free block. -@(private, require_results) +@(private, require_results, no_sanitize_address) block_merge_next :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: ^Block_Header) { merged = block next := block_next(block) @@ -710,7 +717,7 @@ block_merge_next :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: } // Trim any trailing block space off the end of a free block, return to pool. -@(private) +@(private, no_sanitize_address) block_trim_free :: proc(control: ^Allocator, block: ^Block_Header, size: uint) { assert(block_is_free(block), "block must be free") if (block_can_split(block, size)) { @@ -722,7 +729,7 @@ block_trim_free :: proc(control: ^Allocator, block: ^Block_Header, size: uint) { } // Trim any trailing block space off the end of a used block, return to pool. -@(private) +@(private, no_sanitize_address) block_trim_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) { assert(!block_is_free(block), "Block must be used") if (block_can_split(block, size)) { @@ -736,7 +743,7 @@ block_trim_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) { } // Trim leading block space, return to pool. -@(private, require_results) +@(private, require_results, no_sanitize_address) block_trim_free_leading :: proc(control: ^Allocator, block: ^Block_Header, size: uint) -> (remaining: ^Block_Header) { remaining = block if block_can_split(block, size) { @@ -750,7 +757,7 @@ block_trim_free_leading :: proc(control: ^Allocator, block: ^Block_Header, size: return remaining } -@(private, require_results) +@(private, require_results, no_sanitize_address) block_locate_free :: proc(control: ^Allocator, size: uint) -> (block: ^Block_Header) { fl, sl: i32 if size != 0 { @@ -774,13 +781,14 @@ block_locate_free :: proc(control: ^Allocator, size: uint) -> (block: ^Block_Hea return block } -@(private, require_results) +@(private, require_results, no_sanitize_address) block_prepare_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) -> (res: []byte, err: runtime.Allocator_Error) { if block != nil { assert(size != 0, "Size must be non-zero") block_trim_free(control, block, size) block_mark_as_used(block) res = ([^]byte)(block_to_ptr(block))[:size] + sanitizer.address_unpoison(res) } return -} \ No newline at end of file +} diff --git a/core/mem/tracking_allocator.odin b/core/mem/tracking_allocator.odin index 25c547471..01080075e 100644 --- a/core/mem/tracking_allocator.odin +++ b/core/mem/tracking_allocator.odin @@ -64,6 +64,7 @@ This procedure initializes the tracking allocator `t` with a backing allocator specified with `backing_allocator`. The `internals_allocator` will used to allocate the tracked data. */ +@(no_sanitize_address) tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Allocator, internals_allocator := context.allocator) { t.backing = backing_allocator t.allocation_map.allocator = internals_allocator @@ -77,6 +78,7 @@ tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Alloc /* Destroy the tracking allocator. */ +@(no_sanitize_address) tracking_allocator_destroy :: proc(t: ^Tracking_Allocator) { delete(t.allocation_map) delete(t.bad_free_array) @@ -90,6 +92,7 @@ This procedure clears the tracked data from a tracking allocator. **Note**: This procedure clears only the current allocation data while keeping the totals intact. */ +@(no_sanitize_address) tracking_allocator_clear :: proc(t: ^Tracking_Allocator) { sync.mutex_lock(&t.mutex) clear(&t.allocation_map) @@ -103,6 +106,7 @@ Reset the tracking allocator. Reset all of a Tracking Allocator's allocation data back to zero. */ +@(no_sanitize_address) tracking_allocator_reset :: proc(t: ^Tracking_Allocator) { sync.mutex_lock(&t.mutex) clear(&t.allocation_map) @@ -124,6 +128,7 @@ Override Tracking_Allocator.bad_free_callback to have something else happen. For example, you can use tracking_allocator_bad_free_callback_add_to_array to return the tracking allocator to the old behavior, where the bad_free_array was used. */ +@(no_sanitize_address) tracking_allocator_bad_free_callback_panic :: proc(t: ^Tracking_Allocator, memory: rawptr, location: runtime.Source_Code_Location) { runtime.print_caller_location(location) runtime.print_string(" Tracking allocator error: Bad free of pointer ") @@ -136,6 +141,7 @@ tracking_allocator_bad_free_callback_panic :: proc(t: ^Tracking_Allocator, memor Alternative behavior for a bad free: Store in `bad_free_array`. If you use this, then you must make sure to check Tracking_Allocator.bad_free_array at some point. */ +@(no_sanitize_address) tracking_allocator_bad_free_callback_add_to_array :: proc(t: ^Tracking_Allocator, memory: rawptr, location: runtime.Source_Code_Location) { append(&t.bad_free_array, Tracking_Allocator_Bad_Free_Entry { memory = memory, @@ -175,7 +181,7 @@ Example: } } */ -@(require_results) +@(require_results, no_sanitize_address) tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator { return Allocator{ data = data, @@ -183,6 +189,7 @@ tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator { } } +@(no_sanitize_address) tracking_allocator_proc :: proc( allocator_data: rawptr, mode: Allocator_Mode, @@ -191,6 +198,7 @@ tracking_allocator_proc :: proc( old_size: int, loc := #caller_location, ) -> (result: []byte, err: Allocator_Error) { + @(no_sanitize_address) track_alloc :: proc(data: ^Tracking_Allocator, entry: ^Tracking_Allocator_Entry) { data.total_memory_allocated += i64(entry.size) data.total_allocation_count += 1 @@ -200,6 +208,7 @@ tracking_allocator_proc :: proc( } } + @(no_sanitize_address) track_free :: proc(data: ^Tracking_Allocator, entry: ^Tracking_Allocator_Entry) { data.total_memory_freed += i64(entry.size) data.total_free_count += 1 diff --git a/core/mem/virtual/arena.odin b/core/mem/virtual/arena.odin index 4fc2e0e35..4e1cc2466 100644 --- a/core/mem/virtual/arena.odin +++ b/core/mem/virtual/arena.odin @@ -3,6 +3,8 @@ package mem_virtual import "core:mem" import "core:sync" +import "base:sanitizer" + Arena_Kind :: enum uint { Growing = 0, // Chained memory blocks (singly linked list). Static = 1, // Fixed reservation sized. @@ -43,7 +45,7 @@ DEFAULT_ARENA_STATIC_RESERVE_SIZE :: mem.Gigabyte when size_of(uintptr) == 8 els // Initialization of an `Arena` to be a `.Growing` variant. // A growing arena is a linked list of `Memory_Block`s allocated with virtual memory. -@(require_results) +@(require_results, no_sanitize_address) arena_init_growing :: proc(arena: ^Arena, reserved: uint = DEFAULT_ARENA_GROWING_MINIMUM_BLOCK_SIZE) -> (err: Allocator_Error) { arena.kind = .Growing arena.curr_block = memory_block_alloc(0, reserved, {}) or_return @@ -53,24 +55,26 @@ arena_init_growing :: proc(arena: ^Arena, reserved: uint = DEFAULT_ARENA_GROWING if arena.minimum_block_size == 0 { arena.minimum_block_size = reserved } + sanitizer.address_poison(arena.curr_block.base[:arena.curr_block.committed]) return } // Initialization of an `Arena` to be a `.Static` variant. // A static arena contains a single `Memory_Block` allocated with virtual memory. -@(require_results) +@(require_results, no_sanitize_address) arena_init_static :: proc(arena: ^Arena, reserved: uint = DEFAULT_ARENA_STATIC_RESERVE_SIZE, commit_size: uint = DEFAULT_ARENA_STATIC_COMMIT_SIZE) -> (err: Allocator_Error) { arena.kind = .Static arena.curr_block = memory_block_alloc(commit_size, reserved, {}) or_return arena.total_used = 0 arena.total_reserved = arena.curr_block.reserved + sanitizer.address_poison(arena.curr_block.base[:arena.curr_block.committed]) return } // Initialization of an `Arena` to be a `.Buffer` variant. // A buffer arena contains single `Memory_Block` created from a user provided []byte. -@(require_results) +@(require_results, no_sanitize_address) arena_init_buffer :: proc(arena: ^Arena, buffer: []byte) -> (err: Allocator_Error) { if len(buffer) < size_of(Memory_Block) { return .Out_Of_Memory @@ -78,7 +82,7 @@ arena_init_buffer :: proc(arena: ^Arena, buffer: []byte) -> (err: Allocator_Erro arena.kind = .Buffer - mem.zero_slice(buffer) + sanitizer.address_poison(buffer[:]) block_base := raw_data(buffer) block := (^Memory_Block)(block_base) @@ -94,7 +98,7 @@ arena_init_buffer :: proc(arena: ^Arena, buffer: []byte) -> (err: Allocator_Erro } // Allocates memory from the provided arena. -@(require_results) +@(require_results, no_sanitize_address) arena_alloc :: proc(arena: ^Arena, size: uint, alignment: uint, loc := #caller_location) -> (data: []byte, err: Allocator_Error) { assert(alignment & (alignment-1) == 0, "non-power of two alignment", loc) @@ -158,10 +162,13 @@ arena_alloc :: proc(arena: ^Arena, size: uint, alignment: uint, loc := #caller_l data, err = alloc_from_memory_block(arena.curr_block, size, alignment, default_commit_size=0) arena.total_used = arena.curr_block.used } + + sanitizer.address_unpoison(data) return } // Resets the memory of a Static or Buffer arena to a specific `position` (offset) and zeroes the previously used memory. +@(no_sanitize_address) arena_static_reset_to :: proc(arena: ^Arena, pos: uint, loc := #caller_location) -> bool { sync.mutex_guard(&arena.mutex) @@ -175,6 +182,7 @@ arena_static_reset_to :: proc(arena: ^Arena, pos: uint, loc := #caller_location) mem.zero_slice(arena.curr_block.base[arena.curr_block.used:][:prev_pos-pos]) } arena.total_used = arena.curr_block.used + sanitizer.address_poison(arena.curr_block.base[:arena.curr_block.committed]) return true } else if pos == 0 { arena.total_used = 0 @@ -184,6 +192,7 @@ arena_static_reset_to :: proc(arena: ^Arena, pos: uint, loc := #caller_location) } // Frees the last memory block of a Growing Arena +@(no_sanitize_address) arena_growing_free_last_memory_block :: proc(arena: ^Arena, loc := #caller_location) { if free_block := arena.curr_block; free_block != nil { assert(arena.kind == .Growing, "expected a .Growing arena", loc) @@ -191,11 +200,13 @@ arena_growing_free_last_memory_block :: proc(arena: ^Arena, loc := #caller_locat arena.total_reserved -= free_block.reserved arena.curr_block = free_block.prev + sanitizer.address_poison(free_block.base[:free_block.committed]) memory_block_dealloc(free_block) } } // Deallocates all but the first memory block of the arena and resets the allocator's usage to 0. +@(no_sanitize_address) arena_free_all :: proc(arena: ^Arena, loc := #caller_location) { switch arena.kind { case .Growing: @@ -208,7 +219,9 @@ arena_free_all :: proc(arena: ^Arena, loc := #caller_location) { if arena.curr_block != nil { curr_block_used := int(arena.curr_block.used) arena.curr_block.used = 0 + sanitizer.address_unpoison(arena.curr_block.base[:curr_block_used]) mem.zero(arena.curr_block.base, curr_block_used) + sanitizer.address_poison(arena.curr_block.base[:arena.curr_block.committed]) } arena.total_used = 0 case .Static, .Buffer: @@ -219,6 +232,7 @@ arena_free_all :: proc(arena: ^Arena, loc := #caller_location) { // Frees all of the memory allocated by the arena and zeros all of the values of an arena. // A buffer based arena does not `delete` the provided `[]byte` bufffer. +@(no_sanitize_address) arena_destroy :: proc(arena: ^Arena, loc := #caller_location) { sync.mutex_guard(&arena.mutex) switch arena.kind { @@ -250,7 +264,7 @@ arena_static_bootstrap_new :: proc{ } // Ability to bootstrap allocate a struct with an arena within the struct itself using the growing variant strategy. -@(require_results) +@(require_results, no_sanitize_address) arena_growing_bootstrap_new_by_offset :: proc($T: typeid, offset_to_arena: uintptr, minimum_block_size: uint = DEFAULT_ARENA_GROWING_MINIMUM_BLOCK_SIZE) -> (ptr: ^T, err: Allocator_Error) { bootstrap: Arena bootstrap.kind = .Growing @@ -266,13 +280,13 @@ arena_growing_bootstrap_new_by_offset :: proc($T: typeid, offset_to_arena: uintp } // Ability to bootstrap allocate a struct with an arena within the struct itself using the growing variant strategy. -@(require_results) +@(require_results, no_sanitize_address) arena_growing_bootstrap_new_by_name :: proc($T: typeid, $field_name: string, minimum_block_size: uint = DEFAULT_ARENA_GROWING_MINIMUM_BLOCK_SIZE) -> (ptr: ^T, err: Allocator_Error) { return arena_growing_bootstrap_new_by_offset(T, offset_of_by_string(T, field_name), minimum_block_size) } // Ability to bootstrap allocate a struct with an arena within the struct itself using the static variant strategy. -@(require_results) +@(require_results, no_sanitize_address) arena_static_bootstrap_new_by_offset :: proc($T: typeid, offset_to_arena: uintptr, reserved: uint) -> (ptr: ^T, err: Allocator_Error) { bootstrap: Arena bootstrap.kind = .Static @@ -288,19 +302,20 @@ arena_static_bootstrap_new_by_offset :: proc($T: typeid, offset_to_arena: uintpt } // Ability to bootstrap allocate a struct with an arena within the struct itself using the static variant strategy. -@(require_results) +@(require_results, no_sanitize_address) arena_static_bootstrap_new_by_name :: proc($T: typeid, $field_name: string, reserved: uint) -> (ptr: ^T, err: Allocator_Error) { return arena_static_bootstrap_new_by_offset(T, offset_of_by_string(T, field_name), reserved) } // Create an `Allocator` from the provided `Arena` -@(require_results) +@(require_results, no_sanitize_address) arena_allocator :: proc(arena: ^Arena) -> mem.Allocator { return mem.Allocator{arena_allocator_proc, arena} } // The allocator procedure used by an `Allocator` produced by `arena_allocator` +@(no_sanitize_address) arena_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode, size, alignment: int, old_memory: rawptr, old_size: int, @@ -334,6 +349,7 @@ arena_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode, if size < old_size { // shrink data in-place data = old_data[:size] + sanitizer.address_poison(old_data[size:old_size]) return } @@ -347,6 +363,7 @@ arena_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode, _ = alloc_from_memory_block(block, new_end - old_end, 1, default_commit_size=arena.default_commit_size) or_return arena.total_used += block.used - prev_used data = block.base[start:new_end] + sanitizer.address_unpoison(data) return } } @@ -357,6 +374,7 @@ arena_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode, return } copy(new_memory, old_data[:old_size]) + sanitizer.address_poison(old_data[:old_size]) return new_memory, nil case .Query_Features: set := (^mem.Allocator_Mode_Set)(old_memory) @@ -382,7 +400,7 @@ Arena_Temp :: struct { } // Begins the section of temporary arena memory. -@(require_results) +@(require_results, no_sanitize_address) arena_temp_begin :: proc(arena: ^Arena, loc := #caller_location) -> (temp: Arena_Temp) { assert(arena != nil, "nil arena", loc) sync.mutex_guard(&arena.mutex) @@ -397,6 +415,7 @@ arena_temp_begin :: proc(arena: ^Arena, loc := #caller_location) -> (temp: Arena } // Ends the section of temporary arena memory by resetting the memory to the stored position. +@(no_sanitize_address) arena_temp_end :: proc(temp: Arena_Temp, loc := #caller_location) { assert(temp.arena != nil, "nil arena", loc) arena := temp.arena @@ -432,6 +451,7 @@ arena_temp_end :: proc(temp: Arena_Temp, loc := #caller_location) { } // Ignore the use of a `arena_temp_begin` entirely by __not__ resetting to the stored position. +@(no_sanitize_address) arena_temp_ignore :: proc(temp: Arena_Temp, loc := #caller_location) { assert(temp.arena != nil, "nil arena", loc) arena := temp.arena @@ -442,6 +462,7 @@ arena_temp_ignore :: proc(temp: Arena_Temp, loc := #caller_location) { } // Asserts that all uses of `Arena_Temp` has been used by an `Arena` +@(no_sanitize_address) arena_check_temp :: proc(arena: ^Arena, loc := #caller_location) { assert(arena.temp_count == 0, "Arena_Temp not been ended", loc) } diff --git a/core/mem/virtual/virtual.odin b/core/mem/virtual/virtual.odin index 4afc33813..031fb721a 100644 --- a/core/mem/virtual/virtual.odin +++ b/core/mem/virtual/virtual.odin @@ -2,6 +2,7 @@ package mem_virtual import "core:mem" import "base:intrinsics" +import "base:sanitizer" import "base:runtime" _ :: runtime @@ -14,27 +15,33 @@ platform_memory_init :: proc() { Allocator_Error :: mem.Allocator_Error -@(require_results) +@(require_results, no_sanitize_address) reserve :: proc "contextless" (size: uint) -> (data: []byte, err: Allocator_Error) { return _reserve(size) } +@(no_sanitize_address) commit :: proc "contextless" (data: rawptr, size: uint) -> Allocator_Error { + sanitizer.address_unpoison(data, size) return _commit(data, size) } -@(require_results) +@(require_results, no_sanitize_address) reserve_and_commit :: proc "contextless" (size: uint) -> (data: []byte, err: Allocator_Error) { data = reserve(size) or_return commit(raw_data(data), size) or_return return } +@(no_sanitize_address) decommit :: proc "contextless" (data: rawptr, size: uint) { + sanitizer.address_poison(data, size) _decommit(data, size) } +@(no_sanitize_address) release :: proc "contextless" (data: rawptr, size: uint) { + sanitizer.address_unpoison(data, size) _release(data, size) } @@ -46,13 +53,11 @@ Protect_Flag :: enum u32 { Protect_Flags :: distinct bit_set[Protect_Flag; u32] Protect_No_Access :: Protect_Flags{} +@(no_sanitize_address) protect :: proc "contextless" (data: rawptr, size: uint, flags: Protect_Flags) -> bool { return _protect(data, size, flags) } - - - Memory_Block :: struct { prev: ^Memory_Block, base: [^]byte, @@ -66,13 +71,13 @@ Memory_Block_Flag :: enum u32 { Memory_Block_Flags :: distinct bit_set[Memory_Block_Flag; u32] -@(private="file", require_results) +@(private="file", require_results, no_sanitize_address) align_formula :: #force_inline proc "contextless" (size, align: uint) -> uint { result := size + align-1 return result - result%align } -@(require_results) +@(require_results, no_sanitize_address) memory_block_alloc :: proc(committed, reserved: uint, alignment: uint = 0, flags: Memory_Block_Flags = {}) -> (block: ^Memory_Block, err: Allocator_Error) { page_size := DEFAULT_PAGE_SIZE assert(mem.is_power_of_two(uintptr(page_size))) @@ -116,8 +121,9 @@ memory_block_alloc :: proc(committed, reserved: uint, alignment: uint = 0, flags return &pmblock.block, nil } -@(require_results) +@(require_results, no_sanitize_address) alloc_from_memory_block :: proc(block: ^Memory_Block, min_size, alignment: uint, default_commit_size: uint = 0) -> (data: []byte, err: Allocator_Error) { + @(no_sanitize_address) calc_alignment_offset :: proc "contextless" (block: ^Memory_Block, alignment: uintptr) -> uint { alignment_offset := uint(0) ptr := uintptr(block.base[block.used:]) @@ -128,6 +134,7 @@ alloc_from_memory_block :: proc(block: ^Memory_Block, min_size, alignment: uint, return alignment_offset } + @(no_sanitize_address) do_commit_if_necessary :: proc(block: ^Memory_Block, size: uint, default_commit_size: uint) -> (err: Allocator_Error) { if block.committed - block.used < size { pmblock := (^Platform_Memory_Block)(block) @@ -172,10 +179,12 @@ alloc_from_memory_block :: proc(block: ^Memory_Block, min_size, alignment: uint, data = block.base[block.used+alignment_offset:][:min_size] block.used += size + sanitizer.address_unpoison(data) return } +@(no_sanitize_address) memory_block_dealloc :: proc(block_to_free: ^Memory_Block) { if block := (^Platform_Memory_Block)(block_to_free); block != nil { platform_memory_free(block) diff --git a/core/mem/virtual/virtual_platform.odin b/core/mem/virtual/virtual_platform.odin index 31e9cfca8..c9dde4e9d 100644 --- a/core/mem/virtual/virtual_platform.odin +++ b/core/mem/virtual/virtual_platform.odin @@ -7,6 +7,7 @@ Platform_Memory_Block :: struct { reserved: uint, } +@(no_sanitize_address) platform_memory_alloc :: proc "contextless" (to_commit, to_reserve: uint) -> (block: ^Platform_Memory_Block, err: Allocator_Error) { to_commit, to_reserve := to_commit, to_reserve to_reserve = max(to_commit, to_reserve) @@ -26,12 +27,14 @@ platform_memory_alloc :: proc "contextless" (to_commit, to_reserve: uint) -> (bl } +@(no_sanitize_address) platform_memory_free :: proc "contextless" (block: ^Platform_Memory_Block) { if block != nil { release(block, block.reserved) } } +@(no_sanitize_address) platform_memory_commit :: proc "contextless" (block: ^Platform_Memory_Block, to_commit: uint) -> (err: Allocator_Error) { if to_commit < block.committed { return nil diff --git a/core/mem/virtual/virtual_windows.odin b/core/mem/virtual/virtual_windows.odin index acd30ae33..0da8498d5 100644 --- a/core/mem/virtual/virtual_windows.odin +++ b/core/mem/virtual/virtual_windows.odin @@ -83,6 +83,8 @@ foreign Kernel32 { dwNumberOfBytesToMap: uint, ) -> rawptr --- } + +@(no_sanitize_address) _reserve :: proc "contextless" (size: uint) -> (data: []byte, err: Allocator_Error) { result := VirtualAlloc(nil, size, MEM_RESERVE, PAGE_READWRITE) if result == nil { @@ -93,6 +95,7 @@ _reserve :: proc "contextless" (size: uint) -> (data: []byte, err: Allocator_Err return } +@(no_sanitize_address) _commit :: proc "contextless" (data: rawptr, size: uint) -> Allocator_Error { result := VirtualAlloc(data, size, MEM_COMMIT, PAGE_READWRITE) if result == nil { @@ -107,12 +110,18 @@ _commit :: proc "contextless" (data: rawptr, size: uint) -> Allocator_Error { } return nil } + +@(no_sanitize_address) _decommit :: proc "contextless" (data: rawptr, size: uint) { VirtualFree(data, size, MEM_DECOMMIT) } + +@(no_sanitize_address) _release :: proc "contextless" (data: rawptr, size: uint) { VirtualFree(data, 0, MEM_RELEASE) } + +@(no_sanitize_address) _protect :: proc "contextless" (data: rawptr, size: uint, flags: Protect_Flags) -> bool { pflags: u32 pflags = PAGE_NOACCESS @@ -136,7 +145,7 @@ _protect :: proc "contextless" (data: rawptr, size: uint, flags: Protect_Flags) } - +@(no_sanitize_address) _platform_memory_init :: proc() { sys_info: SYSTEM_INFO GetSystemInfo(&sys_info) @@ -147,6 +156,7 @@ _platform_memory_init :: proc() { } +@(no_sanitize_address) _map_file :: proc "contextless" (fd: uintptr, size: i64, flags: Map_File_Flags) -> (data: []byte, error: Map_File_Error) { page_flags: u32 if flags == {.Read} { diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp index 6177fcf6e..de6841ed8 100644 --- a/src/llvm_backend.hpp +++ b/src/llvm_backend.hpp @@ -383,6 +383,8 @@ struct lbProcedure { PtrMap selector_values; PtrMap selector_addr; PtrMap tuple_fix_map; + + Array asan_stack_locals; }; diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp index 421720c4c..dad5d4dd5 100644 --- a/src/llvm_backend_general.cpp +++ b/src/llvm_backend_general.cpp @@ -3070,6 +3070,13 @@ gb_internal lbAddr lb_add_local(lbProcedure *p, Type *type, Entity *e, bool zero if (e != nullptr) { lb_add_entity(p->module, e, val); lb_add_debug_local_variable(p, ptr, type, e->token); + + // NOTE(lucas): In LLVM 20 and below we do not have the option to have asan cleanup poisoned stack + // locals ourselves. So we need to manually track and unpoison these locals on proc return. + // LLVM 21 adds the 'use-after-scope' asan option which does this for us. + if (build_context.sanitizer_flags & SanitizerFlag_Address && !p->entity->Procedure.no_sanitize_address) { + array_add(&p->asan_stack_locals, val); + } } if (zero_init) { diff --git a/src/llvm_backend_proc.cpp b/src/llvm_backend_proc.cpp index 7bd8dea59..8f1619006 100644 --- a/src/llvm_backend_proc.cpp +++ b/src/llvm_backend_proc.cpp @@ -115,12 +115,13 @@ gb_internal lbProcedure *lb_create_procedure(lbModule *m, Entity *entity, bool i p->is_entry_point = false; gbAllocator a = heap_allocator(); - p->children.allocator = a; - p->defer_stmts.allocator = a; - p->blocks.allocator = a; - p->branch_blocks.allocator = a; - p->context_stack.allocator = a; - p->scope_stack.allocator = a; + p->children.allocator = a; + p->defer_stmts.allocator = a; + p->blocks.allocator = a; + p->branch_blocks.allocator = a; + p->context_stack.allocator = a; + p->scope_stack.allocator = a; + p->asan_stack_locals.allocator = a; // map_init(&p->selector_values, 0); // map_init(&p->selector_addr, 0); // map_init(&p->tuple_fix_map, 0); @@ -385,11 +386,12 @@ gb_internal lbProcedure *lb_create_dummy_procedure(lbModule *m, String link_name p->is_entry_point = false; gbAllocator a = permanent_allocator(); - p->children.allocator = a; - p->defer_stmts.allocator = a; - p->blocks.allocator = a; - p->branch_blocks.allocator = a; - p->context_stack.allocator = a; + p->children.allocator = a; + p->defer_stmts.allocator = a; + p->blocks.allocator = a; + p->branch_blocks.allocator = a; + p->context_stack.allocator = a; + p->asan_stack_locals.allocator = a; map_init(&p->tuple_fix_map, 0); diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp index 96a5d0db1..d5e3e4c75 100644 --- a/src/llvm_backend_stmt.cpp +++ b/src/llvm_backend_stmt.cpp @@ -2917,6 +2917,22 @@ gb_internal void lb_emit_defer_stmts(lbProcedure *p, lbDeferExitKind kind, lbBlo } defer (p->branch_location_pos = prev_token_pos); + // TODO(lucas): In LLVM 21 use the 'use-after-scope' asan option which does this for us. + #if LLVM_VERSION_MAJOR < 21 + if (kind == lbDeferExit_Return) { + for_array(i, p->asan_stack_locals) { + lbValue local = p->asan_stack_locals[i]; + + auto args = array_make(temporary_allocator(), 2); + args[0] = lb_emit_conv(p, local, t_rawptr); + args[1] = lb_const_int(p->module, t_int, type_size_of(local.type->Pointer.elem)); + lb_emit_runtime_call(p, "__asan_unpoison_memory_region", args); + } + } + #else + #error "Need to implement LLVM 21 'use-after-scope' asan option" + #endif + isize count = p->defer_stmts.count; isize i = count; while (i --> 0) { -- cgit v1.2.3 From ea65a7b870736311747c517970df3921d227e024 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Wed, 7 May 2025 14:26:10 +0100 Subject: Move raddbg string stuff to a thread-safe queue --- src/llvm_backend.cpp | 172 ++++++++++++++++++++++--------------------- src/llvm_backend.hpp | 1 + src/llvm_backend_debug.cpp | 19 +++++ src/llvm_backend_general.cpp | 1 + 4 files changed, 108 insertions(+), 85 deletions(-) (limited to 'src/llvm_backend_general.cpp') diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index f0c1000c5..b69cbc3a5 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -2678,106 +2678,42 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { } } - TIME_SECTION("LLVM Runtime Objective-C Names Creation"); - gen->objc_names = lb_create_objc_names(default_module); - - TIME_SECTION("LLVM Runtime Startup Creation (Global Variables & @(init))"); - gen->startup_runtime = lb_create_startup_runtime(default_module, gen->objc_names, global_variables); - - TIME_SECTION("LLVM Runtime Cleanup Creation & @(fini)"); - gen->cleanup_runtime = lb_create_cleanup_runtime(default_module); - - if (build_context.ODIN_DEBUG) { - for (auto const &entry : builtin_pkg->scope->elements) { - Entity *e = entry.value; - lb_add_debug_info_for_global_constant_from_entity(gen, e); - } - // Custom `.raddbg` section for its debugger if (build_context.metrics.os == TargetOs_windows) { - LLVMModuleRef m = default_module->mod; - LLVMContextRef c = default_module->ctx; + lbModule *m = default_module; + LLVMModuleRef mod = m->mod; + LLVMContextRef ctx = m->ctx; { - LLVMTypeRef type = LLVMArrayType(LLVMInt8TypeInContext(c), 1); - LLVMValueRef global = LLVMAddGlobal(m, type, "raddbg_is_attached_byte_marker"); + LLVMTypeRef type = LLVMArrayType(LLVMInt8TypeInContext(ctx), 1); + LLVMValueRef global = LLVMAddGlobal(mod, type, "raddbg_is_attached_byte_marker"); LLVMSetInitializer(global, LLVMConstNull(type)); LLVMSetSection(global, ".raddbg"); } - TEMPORARY_ALLOCATOR_GUARD(); - - u32 index = 0; - auto const add_string = [m, c, &index](String const &str) { - LLVMValueRef data = LLVMConstStringInContext(c, cast(char const *)str.text, cast(unsigned)str.len, false); - LLVMTypeRef type = LLVMTypeOf(data); - - gbString global_name = gb_string_make(temporary_allocator(), "raddbg_data__"); - global_name = gb_string_append_fmt(global_name, "%u", index); - index += 1; - - LLVMValueRef global = LLVMAddGlobal(m, type, global_name); - - LLVMSetInitializer(global, data); - LLVMSetAlignment(global, 1); - - LLVMSetSection(global, ".raddbg"); - }; - - auto const add_string1 = [add_string](char const *a) { - add_string(make_string_c(a)); - }; - auto const add_string3 = [add_string](char const *a, char const *b, char const *c) { - add_string(concatenate3_strings(temporary_allocator(), make_string_c(a), make_string_c(b), make_string_c(c))); - }; - - - if (gen->info->entry_point) { - String mangled_name = lb_get_entity_name(default_module, gen->info->entry_point); + String mangled_name = lb_get_entity_name(m, gen->info->entry_point); char const *str = alloc_cstring(temporary_allocator(), mangled_name); - add_string3("entry_point: \"", str, "\""); + lb_add_raddbg_string(m, "entry_point: \"", str, "\""); } + } + } + + TIME_SECTION("LLVM Runtime Objective-C Names Creation"); + gen->objc_names = lb_create_objc_names(default_module); - add_string1("type_view: {type: \"[]?\", expr: \"array(data, len)\"}"); - add_string1("type_view: {type: \"string\", expr: \"array(data, len)\"}"); + TIME_SECTION("LLVM Runtime Startup Creation (Global Variables & @(init))"); + gen->startup_runtime = lb_create_startup_runtime(default_module, gen->objc_names, global_variables); - // column major matrices - add_string1("type_view: {type: \"matrix[1, ?]?\", expr: \"table($.data, $[0])\"}"); - add_string1("type_view: {type: \"matrix[2, ?]?\", expr: \"table($.data, $[0], $[1])\"}"); - add_string1("type_view: {type: \"matrix[3, ?]?\", expr: \"table($.data, $[0], $[1], $[2])\"}"); - add_string1("type_view: {type: \"matrix[4, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3])\"}"); - add_string1("type_view: {type: \"matrix[5, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4])\"}"); - add_string1("type_view: {type: \"matrix[6, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5])\"}"); - add_string1("type_view: {type: \"matrix[7, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6])\"}"); - add_string1("type_view: {type: \"matrix[8, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7])\"}"); - add_string1("type_view: {type: \"matrix[9, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8])\"}"); - add_string1("type_view: {type: \"matrix[10, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9])\"}"); - add_string1("type_view: {type: \"matrix[11, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9], $[10])\"}"); - add_string1("type_view: {type: \"matrix[12, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9], $[10], $[11])\"}"); - add_string1("type_view: {type: \"matrix[13, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9], $[10], $[11], $[12])\"}"); - add_string1("type_view: {type: \"matrix[14, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9], $[10], $[11], $[12], $[13])\"}"); - add_string1("type_view: {type: \"matrix[15, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9], $[10], $[11], $[12], $[13], $[14])\"}"); - add_string1("type_view: {type: \"matrix[16, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9], $[10], $[11], $[12], $[13], $[14], $[15])\"}"); + TIME_SECTION("LLVM Runtime Cleanup Creation & @(fini)"); + gen->cleanup_runtime = lb_create_cleanup_runtime(default_module); - // row major matrices - add_string1("type_view: {type: \"#row_major matrix[?, 1]?\", expr: \"table($.data, $[0])\"}"); - add_string1("type_view: {type: \"#row_major matrix[?, 2]?\", expr: \"table($.data, $[0], $[1])\"}"); - add_string1("type_view: {type: \"#row_major matrix[?, 3]?\", expr: \"table($.data, $[0], $[1], $[2])\"}"); - add_string1("type_view: {type: \"#row_major matrix[?, 4]?\", expr: \"table($.data, $[0], $[1], $[2], $[3])\"}"); - add_string1("type_view: {type: \"#row_major matrix[?, 5]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4])\"}"); - add_string1("type_view: {type: \"#row_major matrix[?, 6]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5])\"}"); - add_string1("type_view: {type: \"#row_major matrix[?, 7]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6])\"}"); - add_string1("type_view: {type: \"#row_major matrix[?, 8]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7])\"}"); - add_string1("type_view: {type: \"#row_major matrix[?, 9]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8])\"}"); - add_string1("type_view: {type: \"#row_major matrix[?, 10]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9])\"}"); - add_string1("type_view: {type: \"#row_major matrix[?, 11]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9], $[10])\"}"); - add_string1("type_view: {type: \"#row_major matrix[?, 12]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9], $[10], $[11])\"}"); - add_string1("type_view: {type: \"#row_major matrix[?, 13]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9], $[10], $[11], $[12])\"}"); - add_string1("type_view: {type: \"#row_major matrix[?, 14]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9], $[10], $[11], $[12], $[13])\"}"); - add_string1("type_view: {type: \"#row_major matrix[?, 15]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9], $[10], $[11], $[12], $[13], $[14])\"}"); - add_string1("type_view: {type: \"#row_major matrix[?, 16]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9], $[10], $[11], $[12], $[13], $[14], $[15])\"}"); + + if (build_context.ODIN_DEBUG) { + for (auto const &entry : builtin_pkg->scope->elements) { + Entity *e = entry.value; + lb_add_debug_info_for_global_constant_from_entity(gen, e); } } @@ -2807,6 +2743,72 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { if (build_context.ODIN_DEBUG) { TIME_SECTION("LLVM Debug Info Complete Types and Finalize"); lb_debug_info_complete_types_and_finalize(gen); + + // Custom `.raddbg` section for its debugger + if (build_context.metrics.os == TargetOs_windows) { + lbModule *m = default_module; + LLVMModuleRef mod = m->mod; + LLVMContextRef ctx = m->ctx; + + lb_add_raddbg_string(m, "type_view: {type: \"[]?\", expr: \"array(data, len)\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"string\", expr: \"array(data, len)\"}"); + + // column major matrices + lb_add_raddbg_string(m, "type_view: {type: \"matrix[1, ?]?\", expr: \"table($.data, $[0])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"matrix[2, ?]?\", expr: \"table($.data, $[0], $[1])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"matrix[3, ?]?\", expr: \"table($.data, $[0], $[1], $[2])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"matrix[4, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"matrix[5, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"matrix[6, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"matrix[7, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"matrix[8, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"matrix[9, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"matrix[10, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"matrix[11, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9], $[10])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"matrix[12, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9], $[10], $[11])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"matrix[13, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9], $[10], $[11], $[12])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"matrix[14, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9], $[10], $[11], $[12], $[13])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"matrix[15, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9], $[10], $[11], $[12], $[13], $[14])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"matrix[16, ?]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9], $[10], $[11], $[12], $[13], $[14], $[15])\"}"); + + // row major matrices + lb_add_raddbg_string(m, "type_view: {type: \"#row_major matrix[?, 1]?\", expr: \"table($.data, $[0])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"#row_major matrix[?, 2]?\", expr: \"table($.data, $[0], $[1])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"#row_major matrix[?, 3]?\", expr: \"table($.data, $[0], $[1], $[2])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"#row_major matrix[?, 4]?\", expr: \"table($.data, $[0], $[1], $[2], $[3])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"#row_major matrix[?, 5]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"#row_major matrix[?, 6]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"#row_major matrix[?, 7]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"#row_major matrix[?, 8]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"#row_major matrix[?, 9]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"#row_major matrix[?, 10]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"#row_major matrix[?, 11]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9], $[10])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"#row_major matrix[?, 12]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9], $[10], $[11])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"#row_major matrix[?, 13]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9], $[10], $[11], $[12])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"#row_major matrix[?, 14]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9], $[10], $[11], $[12], $[13])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"#row_major matrix[?, 15]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9], $[10], $[11], $[12], $[13], $[14])\"}"); + lb_add_raddbg_string(m, "type_view: {type: \"#row_major matrix[?, 16]?\", expr: \"table($.data, $[0], $[1], $[2], $[3], $[4], $[5], $[6], $[7], $[8], $[9], $[10], $[11], $[12], $[13], $[14], $[15])\"}"); + + + TEMPORARY_ALLOCATOR_GUARD(); + + u32 global_name_index = 0; + for (String str = {}; mpsc_dequeue(&gen->raddebug_section_strings, &str); /**/) { + LLVMValueRef data = LLVMConstStringInContext(ctx, cast(char const *)str.text, cast(unsigned)str.len, false); + LLVMTypeRef type = LLVMTypeOf(data); + + gbString global_name = gb_string_make(temporary_allocator(), "raddbg_data__"); + global_name = gb_string_append_fmt(global_name, "%u", global_name_index); + global_name_index += 1; + + LLVMValueRef global = LLVMAddGlobal(mod, type, global_name); + + LLVMSetInitializer(global, data); + LLVMSetAlignment(global, 1); + + LLVMSetSection(global, ".raddbg"); + } + } } if (do_threading) { diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp index de6841ed8..a0764494a 100644 --- a/src/llvm_backend.hpp +++ b/src/llvm_backend.hpp @@ -240,6 +240,7 @@ struct lbGenerator : LinkerData { MPSCQueue entities_to_correct_linkage; MPSCQueue objc_selectors; MPSCQueue objc_classes; + MPSCQueue raddebug_section_strings; }; diff --git a/src/llvm_backend_debug.cpp b/src/llvm_backend_debug.cpp index 0358031d1..464d50cac 100644 --- a/src/llvm_backend_debug.cpp +++ b/src/llvm_backend_debug.cpp @@ -18,6 +18,25 @@ gb_internal void lb_set_llvm_metadata(lbModule *m, void *key, LLVMMetadataRef va } } +gb_internal void lb_add_raddbg_string(lbModule *m, String const &str) { + mpsc_enqueue(&m->gen->raddebug_section_strings, copy_string(permanent_allocator(), str)); +} + +gb_internal void lb_add_raddbg_string(lbModule *m, char const *cstr) { + mpsc_enqueue(&m->gen->raddebug_section_strings, copy_string(permanent_allocator(), make_string_c(cstr))); +} + +gb_internal void lb_add_raddbg_string(lbModule *m, char const *a, char const *b) { + String str = concatenate_strings(permanent_allocator(), make_string_c(a), make_string_c(b)); + mpsc_enqueue(&m->gen->raddebug_section_strings, str); +} + +gb_internal void lb_add_raddbg_string(lbModule *m, char const *a, char const *b, char const *c) { + String str = concatenate3_strings(permanent_allocator(), make_string_c(a), make_string_c(b), make_string_c(c)); + mpsc_enqueue(&m->gen->raddebug_section_strings, str); +} + + gb_internal LLVMMetadataRef lb_get_current_debug_scope(lbProcedure *p) { GB_ASSERT_MSG(p->debug_info != nullptr, "missing debug information for %.*s", LIT(p->name)); diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp index dad5d4dd5..2eaecd8a7 100644 --- a/src/llvm_backend_general.cpp +++ b/src/llvm_backend_general.cpp @@ -173,6 +173,7 @@ gb_internal bool lb_init_generator(lbGenerator *gen, Checker *c) { mpsc_init(&gen->entities_to_correct_linkage, heap_allocator()); mpsc_init(&gen->objc_selectors, heap_allocator()); mpsc_init(&gen->objc_classes, heap_allocator()); + mpsc_init(&gen->raddebug_section_strings, heap_allocator()); return true; } -- cgit v1.2.3 From f9b9e9e7dcbb605bc64bc5af1331855375f58494 Mon Sep 17 00:00:00 2001 From: Laytan Laats Date: Fri, 9 May 2025 22:27:35 +0200 Subject: some ABI fixups and improvements Started with trying to enable asan in the CI for MacOS, noticed it wasn't enabled on the `tests/internal` folder, it came up with a couple of issues with the abi/OdinLLVMBuildTransmute that this also solves. - Looking at clang output for arm64, we should be promoting `{ i64, i32 }` to `{ i64, i64 }` - after doing the previous point, I noticed this is not handled well in OdinLLVMBuildTransmute which was emitting loads and stores into the space of a value that was alignment, asan does not want this, looking at clang output again, a memcpy is the appropriate way of handling this. - Having done this we don't need the hacky "return is packed" set anymore in the amd64 sysv ABI anymore either --- src/llvm_abi.cpp | 42 +++++++++++--------------------- src/llvm_backend_general.cpp | 58 +++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 69 insertions(+), 31 deletions(-) (limited to 'src/llvm_backend_general.cpp') diff --git a/src/llvm_abi.cpp b/src/llvm_abi.cpp index c8e1ca764..baad3f873 100644 --- a/src/llvm_abi.cpp +++ b/src/llvm_abi.cpp @@ -977,7 +977,7 @@ namespace lbAbiAmd64SysV { return types[0]; } - return LLVMStructTypeInContext(c, types.data, cast(unsigned)types.count, sz == 0); + return LLVMStructTypeInContext(c, types.data, cast(unsigned)types.count, false); } gb_internal void classify_with(LLVMTypeRef t, Array *cls, i64 ix, i64 off) { @@ -1231,38 +1231,24 @@ namespace lbAbiArm64 { } } else { i64 size = lb_sizeof(return_type); - if (size <= 16) { - LLVMTypeRef cast_type = nullptr; - - if (size == 0) { - cast_type = LLVMStructTypeInContext(c, nullptr, 0, false); - } else if (size <= 8) { - cast_type = LLVMIntTypeInContext(c, cast(unsigned)(size*8)); - } else { - unsigned count = cast(unsigned)((size+7)/8); - - LLVMTypeRef llvm_i64 = LLVMIntTypeInContext(c, 64); - LLVMTypeRef *types = gb_alloc_array(temporary_allocator(), LLVMTypeRef, count); - - i64 size_copy = size; - for (unsigned i = 0; i < count; i++) { - if (size_copy >= 8) { - types[i] = llvm_i64; - } else { - types[i] = LLVMIntTypeInContext(c, 8*cast(unsigned)size_copy); - } - size_copy -= 8; - } - GB_ASSERT(size_copy <= 0); - cast_type = LLVMStructTypeInContext(c, types, count, true); - } - return lb_arg_type_direct(return_type, cast_type, nullptr, nullptr); - } else { + if (size > 16) { LB_ABI_MODIFY_RETURN_IF_TUPLE_MACRO(); LLVMAttributeRef attr = lb_create_enum_attribute_with_type(c, "sret", return_type); return lb_arg_type_indirect(return_type, attr); } + + GB_ASSERT(size <= 16); + LLVMTypeRef cast_type = nullptr; + if (size == 0) { + cast_type = LLVMStructTypeInContext(c, nullptr, 0, false); + } else if (size <= 8) { + cast_type = LLVMIntTypeInContext(c, cast(unsigned)(size*8)); + } else { + LLVMTypeRef llvm_i64 = LLVMIntTypeInContext(c, 64); + cast_type = LLVMArrayType2(llvm_i64, 2); + } + return lb_arg_type_direct(return_type, cast_type, nullptr, nullptr); } } diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp index c52551b36..504c8234e 100644 --- a/src/llvm_backend_general.cpp +++ b/src/llvm_backend_general.cpp @@ -2525,10 +2525,13 @@ general_end:; } } - src_size = align_formula(src_size, src_align); - dst_size = align_formula(dst_size, dst_align); + // NOTE(laytan): even though this logic seems sound, the Address Sanitizer does not + // want you to load/store the space of a value that is there for alignment. +#if 0 + i64 aligned_src_size = align_formula(src_size, src_align); + i64 aligned_dst_size = align_formula(dst_size, dst_align); - if (LLVMIsALoadInst(val) && (src_size >= dst_size && src_align >= dst_align)) { + if (LLVMIsALoadInst(val) && (aligned_src_size >= aligned_dst_size && src_align >= dst_align)) { LLVMValueRef val_ptr = LLVMGetOperand(val, 0); val_ptr = LLVMBuildPointerCast(p->builder, val_ptr, LLVMPointerType(dst_type, 0), ""); LLVMValueRef loaded_val = OdinLLVMBuildLoad(p, dst_type, val_ptr); @@ -2536,8 +2539,57 @@ general_end:; // LLVMSetAlignment(loaded_val, gb_min(src_align, dst_align)); return loaded_val; + } +#endif + + if (src_size > dst_size) { + GB_ASSERT(p->decl_block != p->curr_block); + // NOTE(laytan): src is bigger than dst, need to memcpy the part of src we want. + + LLVMValueRef val_ptr; + if (LLVMIsALoadInst(val)) { + val_ptr = LLVMGetOperand(val, 0); + } else if (LLVMIsAAllocaInst(val)) { + val_ptr = LLVMBuildPointerCast(p->builder, val, LLVMPointerType(src_type, 0), ""); + } else { + // NOTE(laytan): we need a pointer to memcpy from. + LLVMValueRef val_copy = llvm_alloca(p, src_type, src_align); + val_ptr = LLVMBuildPointerCast(p->builder, val_copy, LLVMPointerType(src_type, 0), ""); + LLVMBuildStore(p->builder, val, val_ptr); + } + + i64 max_align = gb_max(lb_alignof(src_type), lb_alignof(dst_type)); + max_align = gb_max(max_align, 16); + + LLVMValueRef ptr = llvm_alloca(p, dst_type, max_align); + LLVMValueRef nptr = LLVMBuildPointerCast(p->builder, ptr, LLVMPointerType(dst_type, 0), ""); + + LLVMTypeRef types[3] = { + lb_type(p->module, t_rawptr), + lb_type(p->module, t_rawptr), + lb_type(p->module, t_int) + }; + + LLVMValueRef args[4] = { + nptr, + val_ptr, + LLVMConstInt(LLVMIntTypeInContext(p->module->ctx, 8*cast(unsigned)build_context.int_size), dst_size, 0), + LLVMConstInt(LLVMInt1TypeInContext(p->module->ctx), 0, 0), + }; + + lb_call_intrinsic( + p, + "llvm.memcpy.inline", + args, + gb_count_of(args), + types, + gb_count_of(types) + ); + + return OdinLLVMBuildLoad(p, dst_type, ptr); } else { GB_ASSERT(p->decl_block != p->curr_block); + GB_ASSERT(dst_size >= src_size); i64 max_align = gb_max(lb_alignof(src_type), lb_alignof(dst_type)); max_align = gb_max(max_align, 16); -- cgit v1.2.3 From c35a45e823401a1d7a15f11c6fb07e4fe9e6007a Mon Sep 17 00:00:00 2001 From: Laytan Laats Date: Sat, 17 May 2025 16:28:34 +0200 Subject: fix global and static any Fixes #4627 --- src/llvm_backend.cpp | 25 ++++-------------- src/llvm_backend_general.cpp | 43 ++++++++++++++++-------------- src/llvm_backend_stmt.cpp | 52 ++++++++++++++++++++++--------------- tests/internal/test_global_any.odin | 40 ++++++++++++++++++++++++++++ 4 files changed, 100 insertions(+), 60 deletions(-) create mode 100644 tests/internal/test_global_any.odin (limited to 'src/llvm_backend_general.cpp') diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index 7de147058..361a0c46b 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -1973,14 +1973,14 @@ gb_internal lbProcedure *lb_create_startup_runtime(lbModule *main_module, lbProc gbString var_name = gb_string_make(permanent_allocator(), "__$global_any::"); gbString e_str = string_canonical_entity_name(temporary_allocator(), e); var_name = gb_string_append_length(var_name, e_str, gb_strlen(e_str)); - lbAddr g = lb_add_global_generated_with_name(main_module, var_type, var.init, make_string_c(var_name)); + lbAddr g = lb_add_global_generated_with_name(main_module, var_type, {}, make_string_c(var_name)); lb_addr_store(p, g, var.init); lbValue gp = lb_addr_get_ptr(p, g); lbValue data = lb_emit_struct_ep(p, var.var, 0); lbValue ti = lb_emit_struct_ep(p, var.var, 1); lb_emit_store(p, data, lb_emit_conv(p, gp, t_rawptr)); - lb_emit_store(p, ti, lb_type_info(p, var_type)); + lb_emit_store(p, ti, lb_typeid(p->module, var_type)); } else { LLVMTypeRef vt = llvm_addr_type(p->module, var.var); lbValue src0 = lb_emit_conv(p, var.init, t); @@ -3194,24 +3194,9 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { lbValue g = {}; g.value = LLVMAddGlobal(m->mod, lb_type(m, e->type), alloc_cstring(permanent_allocator(), name)); g.type = alloc_type_pointer(e->type); - if (e->Variable.thread_local_model != "") { - LLVMSetThreadLocal(g.value, true); - - String m = e->Variable.thread_local_model; - LLVMThreadLocalMode mode = LLVMGeneralDynamicTLSModel; - if (m == "default") { - mode = LLVMGeneralDynamicTLSModel; - } else if (m == "localdynamic") { - mode = LLVMLocalDynamicTLSModel; - } else if (m == "initialexec") { - mode = LLVMInitialExecTLSModel; - } else if (m == "localexec") { - mode = LLVMLocalExecTLSModel; - } else { - GB_PANIC("Unhandled thread local mode %.*s", LIT(m)); - } - LLVMSetThreadLocalMode(g.value, mode); - } + + lb_apply_thread_local_model(g.value, e->Variable.thread_local_model); + if (is_foreign) { LLVMSetLinkage(g.value, LLVMExternalLinkage); LLVMSetDLLStorageClass(g.value, LLVMDLLImportStorageClass); diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp index 504c8234e..85a165de4 100644 --- a/src/llvm_backend_general.cpp +++ b/src/llvm_backend_general.cpp @@ -2387,6 +2387,29 @@ gb_internal void lb_add_attribute_to_proc_with_string(lbModule *m, LLVMValueRef } +gb_internal bool lb_apply_thread_local_model(LLVMValueRef value, String model) { + if (model != "") { + LLVMSetThreadLocal(value, true); + + LLVMThreadLocalMode mode = LLVMGeneralDynamicTLSModel; + if (model == "default") { + mode = LLVMGeneralDynamicTLSModel; + } else if (model == "localdynamic") { + mode = LLVMLocalDynamicTLSModel; + } else if (model == "initialexec") { + mode = LLVMInitialExecTLSModel; + } else if (model == "localexec") { + mode = LLVMLocalExecTLSModel; + } else { + GB_PANIC("Unhandled thread local mode %.*s", LIT(model)); + } + LLVMSetThreadLocalMode(value, mode); + return true; + } + + return false; +} + gb_internal void lb_add_edge(lbBlock *from, lbBlock *to) { LLVMValueRef instr = LLVMGetLastInstruction(from->block); @@ -2990,25 +3013,7 @@ gb_internal lbValue lb_find_value_from_entity(lbModule *m, Entity *e) { lb_set_entity_from_other_modules_linkage_correctly(other_module, e, name); - if (e->Variable.thread_local_model != "") { - LLVMSetThreadLocal(g.value, true); - - String m = e->Variable.thread_local_model; - LLVMThreadLocalMode mode = LLVMGeneralDynamicTLSModel; - if (m == "default") { - mode = LLVMGeneralDynamicTLSModel; - } else if (m == "localdynamic") { - mode = LLVMLocalDynamicTLSModel; - } else if (m == "initialexec") { - mode = LLVMInitialExecTLSModel; - } else if (m == "localexec") { - mode = LLVMLocalExecTLSModel; - } else { - GB_PANIC("Unhandled thread local mode %.*s", LIT(m)); - } - LLVMSetThreadLocalMode(g.value, mode); - } - + lb_apply_thread_local_model(g.value, e->Variable.thread_local_model); return g; } diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp index 44a78b036..9b5b14626 100644 --- a/src/llvm_backend_stmt.cpp +++ b/src/llvm_backend_stmt.cpp @@ -2022,33 +2022,43 @@ gb_internal void lb_build_static_variables(lbProcedure *p, AstValueDecl *vd) { LLVMValueRef global = LLVMAddGlobal(p->module->mod, lb_type(p->module, e->type), c_name); LLVMSetAlignment(global, cast(u32)type_align_of(e->type)); LLVMSetInitializer(global, LLVMConstNull(lb_type(p->module, e->type))); - if (value.value != nullptr) { - LLVMSetInitializer(global, value.value); - } + if (e->Variable.is_rodata) { LLVMSetGlobalConstant(global, true); } - if (e->Variable.thread_local_model != "") { - LLVMSetThreadLocal(global, true); - - String m = e->Variable.thread_local_model; - LLVMThreadLocalMode mode = LLVMGeneralDynamicTLSModel; - if (m == "default") { - mode = LLVMGeneralDynamicTLSModel; - } else if (m == "localdynamic") { - mode = LLVMLocalDynamicTLSModel; - } else if (m == "initialexec") { - mode = LLVMInitialExecTLSModel; - } else if (m == "localexec") { - mode = LLVMLocalExecTLSModel; - } else { - GB_PANIC("Unhandled thread local mode %.*s", LIT(m)); - } - LLVMSetThreadLocalMode(global, mode); - } else { + + if (!lb_apply_thread_local_model(global, e->Variable.thread_local_model)) { LLVMSetLinkage(global, LLVMInternalLinkage); } + if (value.value != nullptr) { + if (is_type_any(e->type)) { + Type *var_type = default_type(value.type); + + gbString var_name = gb_string_make(temporary_allocator(), "__$static_any::"); + var_name = gb_string_append_length(var_name, mangled_name.text, mangled_name.len); + + lbAddr var_global = lb_add_global_generated_with_name(p->module, var_type, value, make_string_c(var_name), nullptr); + LLVMValueRef var_global_ref = var_global.addr.value; + + if (e->Variable.is_rodata) { + LLVMSetGlobalConstant(var_global_ref, true); + } + + if (!lb_apply_thread_local_model(var_global_ref, e->Variable.thread_local_model)) { + LLVMSetLinkage(var_global_ref, LLVMInternalLinkage); + } + + LLVMValueRef vals[2] = { + lb_emit_conv(p, var_global.addr, t_rawptr).value, + lb_typeid(p->module, var_type).value, + }; + LLVMValueRef init = llvm_const_named_struct(p->module, e->type, vals, gb_count_of(vals)); + LLVMSetInitializer(global, init); + } else { + LLVMSetInitializer(global, value.value); + } + } lbValue global_val = {global, alloc_type_pointer(e->type)}; lb_add_entity(p->module, e, global_val); diff --git a/tests/internal/test_global_any.odin b/tests/internal/test_global_any.odin new file mode 100644 index 000000000..73b70e0a4 --- /dev/null +++ b/tests/internal/test_global_any.odin @@ -0,0 +1,40 @@ +package test_internal + +@(private="file") +global_any_from_proc: any = from_proc() + +from_proc :: proc() -> f32 { + return 1.1 +} + +@(private="file") +global_any: any = 1 + +import "core:testing" + +@(test) +test_global_any :: proc(t: ^testing.T) { + as_f32, is_f32 := global_any_from_proc.(f32) + testing.expect(t, is_f32 == true) + testing.expect(t, as_f32 == 1.1) + + as_int, is_int := global_any.(int) + testing.expect(t, is_int == true) + testing.expect(t, as_int == 1) +} + +@(test) +test_static_any :: proc(t: ^testing.T) { + @(static) + var: any = 3 + + as_int, is_int := var.(int) + testing.expect(t, is_int == true) + testing.expect(t, as_int == 3) + + var = f32(1.1) + + as_f32, is_f32 := var.(f32) + testing.expect(t, is_f32 == true) + testing.expect(t, as_f32 == 1.1) +} -- cgit v1.2.3 From f94fc992d788696a4bc903a3f179307a28accbb9 Mon Sep 17 00:00:00 2001 From: Laytan Laats Date: Mon, 2 Jun 2025 16:53:18 +0200 Subject: fix swizzle in for in statement Fixes #1730 --- src/llvm_backend_general.cpp | 9 ++++++--- src/llvm_backend_stmt.cpp | 18 +++++++++++++++--- tests/issues/run.bat | 1 + tests/issues/run.sh | 1 + tests/issues/test_issue_1730.odin | 21 +++++++++++++++++++++ 5 files changed, 44 insertions(+), 6 deletions(-) create mode 100644 tests/issues/test_issue_1730.odin (limited to 'src/llvm_backend_general.cpp') diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp index 3a099ec55..5aaa7f63a 100644 --- a/src/llvm_backend_general.cpp +++ b/src/llvm_backend_general.cpp @@ -546,8 +546,11 @@ gb_internal lbValue lb_addr_get_ptr(lbProcedure *p, lbAddr const &addr) { break; case lbAddr_Swizzle: + GB_PANIC("lbAddr_Swizzle should be handled elsewhere"); + break; + case lbAddr_SwizzleLarge: - // TOOD(bill): is this good enough logic? + GB_PANIC("lbAddr_SwizzleLarge should be handled elsewhere"); break; } @@ -922,7 +925,7 @@ gb_internal void lb_addr_store(lbProcedure *p, lbAddr addr, lbValue value) { GB_ASSERT(value.value != nullptr); value = lb_emit_conv(p, value, lb_addr_type(addr)); - lbValue dst = lb_addr_get_ptr(p, addr); + lbValue dst = addr.addr; lbValue src = lb_address_from_load_or_generate_local(p, value); { lbValue src_ptrs[4] = {}; @@ -948,7 +951,7 @@ gb_internal void lb_addr_store(lbProcedure *p, lbAddr addr, lbValue value) { GB_ASSERT(value.value != nullptr); value = lb_emit_conv(p, value, lb_addr_type(addr)); - lbValue dst = lb_addr_get_ptr(p, addr); + lbValue dst = addr.addr; lbValue src = lb_address_from_load_or_generate_local(p, value); for_array(i, addr.swizzle_large.indices) { lbValue src_ptr = lb_emit_array_epi(p, src, i); diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp index 9b5b14626..027837f3f 100644 --- a/src/llvm_backend_stmt.cpp +++ b/src/llvm_backend_stmt.cpp @@ -1072,10 +1072,22 @@ gb_internal void lb_build_range_stmt(lbProcedure *p, AstRangeStmt *rs, Scope *sc break; } case Type_Array: { - lbValue array = lb_build_addr_ptr(p, expr); - if (is_type_pointer(type_deref(array.type))) { - array = lb_emit_load(p, array); + lbValue array; + lbAddr addr = lb_build_addr(p, expr); + switch (addr.kind) { + case lbAddr_Swizzle: + case lbAddr_SwizzleLarge: + // NOTE(laytan): apply the swizzle. + array = lb_address_from_load(p, lb_addr_load(p, addr)); + break; + default: + array = lb_addr_get_ptr(p, addr); + if (is_type_pointer(type_deref(array.type))) { + array = lb_emit_load(p, array); + } + break; } + lbAddr count_ptr = lb_add_local_generated(p, t_int, false); lb_addr_store(p, count_ptr, lb_const_int(p->module, t_int, et->Array.count)); lb_build_range_indexed(p, array, val0_type, count_ptr.addr, &val, &key, &loop, &done, rs->reverse); diff --git a/tests/issues/run.bat b/tests/issues/run.bat index db941b55a..156b1754e 100644 --- a/tests/issues/run.bat +++ b/tests/issues/run.bat @@ -9,6 +9,7 @@ set COMMON=-define:ODIN_TEST_FANCY=false -file -vet -strict-style ..\..\..\odin test ..\test_issue_829.odin %COMMON% || exit /b ..\..\..\odin test ..\test_issue_1592.odin %COMMON% || exit /b +..\..\..\odin test ..\test_issue_1730.odin %COMMON% || exit /b ..\..\..\odin test ..\test_issue_2056.odin %COMMON% || exit /b ..\..\..\odin build ..\test_issue_2113.odin %COMMON% -debug || exit /b ..\..\..\odin test ..\test_issue_2466.odin %COMMON% || exit /b diff --git a/tests/issues/run.sh b/tests/issues/run.sh index db0864c3e..cd70f6401 100755 --- a/tests/issues/run.sh +++ b/tests/issues/run.sh @@ -10,6 +10,7 @@ set -x $ODIN test ../test_issue_829.odin $COMMON $ODIN test ../test_issue_1592.odin $COMMON +$ODIN test ../test_issue_1730.odin $COMMON $ODIN test ../test_issue_2056.odin $COMMON $ODIN build ../test_issue_2113.odin $COMMON -debug $ODIN test ../test_issue_2466.odin $COMMON diff --git a/tests/issues/test_issue_1730.odin b/tests/issues/test_issue_1730.odin new file mode 100644 index 000000000..c1c5c4cae --- /dev/null +++ b/tests/issues/test_issue_1730.odin @@ -0,0 +1,21 @@ +package test_issues + +import "core:testing" + +// Tests issue #1730 https://github.com/odin-lang/Odin/issues/1730 +@(test) +test_issue_1730 :: proc(t: ^testing.T) { + ll := [4]int{1, 2, 3, 4} + for l, i in ll.yz { + testing.expect(t, i <= 1) + if i == 0 { + testing.expect_value(t, l, 2) + } else if i == 1 { + testing.expect_value(t, l, 3) + } + } + + out: [4]int + out.yz = ll.yz + testing.expect_value(t, out, [4]int{0, 2, 3, 0}) +} -- cgit v1.2.3