From fe040d1bbd22c78081ffc1d45b3462f40f8eb17a Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 8 Apr 2025 11:36:53 +0100 Subject: Propagate `@(link_section=)` to nested declarations --- src/llvm_backend_stmt.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'src/llvm_backend_stmt.cpp') diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp index 1f783b1be..a0b7e8340 100644 --- a/src/llvm_backend_stmt.cpp +++ b/src/llvm_backend_stmt.cpp @@ -1963,8 +1963,7 @@ gb_internal void lb_build_static_variables(lbProcedure *p, AstValueDecl *vd) { GB_ASSERT(ast_value->tav.mode == Addressing_Constant || ast_value->tav.mode == Addressing_Invalid); - bool allow_local = false; - value = lb_const_value(p->module, ast_value->tav.type, ast_value->tav.value, allow_local); + value = lb_const_value(p->module, ast_value->tav.type, ast_value->tav.value, LB_CONST_CONTEXT_DEFAULT_NO_LOCAL); } Ast *ident = vd->names[i]; -- cgit v1.2.3 From b41a776027b0b4796edc85217c5c607e5498035d Mon Sep 17 00:00:00 2001 From: Barinzaya Date: Thu, 24 Apr 2025 14:15:22 -0400 Subject: Correctly align global and static variables. This can be important if matrices or SIMD vectors are being used in global or static variables, as otherwise it may result in crashes due to aligned instructions accessing misaligned variables. --- src/llvm_backend.cpp | 1 + src/llvm_backend_stmt.cpp | 1 + 2 files changed, 2 insertions(+) (limited to 'src/llvm_backend_stmt.cpp') diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index 083a1d90e..1ada1c070 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -2598,6 +2598,7 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { LLVMSetLinkage(g.value, USE_SEPARATE_MODULES ? LLVMWeakAnyLinkage : LLVMInternalLinkage); } lb_set_linkage_from_entity_flags(m, g.value, e->flags); + LLVMSetAlignment(g.value, type_align_of(e->type)); if (e->Variable.link_section.len > 0) { LLVMSetSection(g.value, alloc_cstring(permanent_allocator(), e->Variable.link_section)); diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp index a0b7e8340..31e1ea595 100644 --- a/src/llvm_backend_stmt.cpp +++ b/src/llvm_backend_stmt.cpp @@ -1984,6 +1984,7 @@ gb_internal void lb_build_static_variables(lbProcedure *p, AstValueDecl *vd) { char *c_name = alloc_cstring(permanent_allocator(), mangled_name); LLVMValueRef global = LLVMAddGlobal(p->module->mod, lb_type(p->module, e->type), c_name); + LLVMSetAlignment(global, type_align_of(e->type)); LLVMSetInitializer(global, LLVMConstNull(lb_type(p->module, e->type))); if (value.value != nullptr) { LLVMSetInitializer(global, value.value); -- cgit v1.2.3 From 9284ebb5e831c6c25e90131f5c607997760a7396 Mon Sep 17 00:00:00 2001 From: Barinzaya Date: Thu, 24 Apr 2025 14:27:39 -0400 Subject: Add missing cast to global/static var alignments. --- src/llvm_backend.cpp | 2 +- src/llvm_backend_stmt.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'src/llvm_backend_stmt.cpp') diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index 1ada1c070..2f861573a 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -2598,7 +2598,7 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { LLVMSetLinkage(g.value, USE_SEPARATE_MODULES ? LLVMWeakAnyLinkage : LLVMInternalLinkage); } lb_set_linkage_from_entity_flags(m, g.value, e->flags); - LLVMSetAlignment(g.value, type_align_of(e->type)); + LLVMSetAlignment(g.value, cast(u32)type_align_of(e->type)); if (e->Variable.link_section.len > 0) { LLVMSetSection(g.value, alloc_cstring(permanent_allocator(), e->Variable.link_section)); diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp index 31e1ea595..96a5d0db1 100644 --- a/src/llvm_backend_stmt.cpp +++ b/src/llvm_backend_stmt.cpp @@ -1984,7 +1984,7 @@ gb_internal void lb_build_static_variables(lbProcedure *p, AstValueDecl *vd) { char *c_name = alloc_cstring(permanent_allocator(), mangled_name); LLVMValueRef global = LLVMAddGlobal(p->module->mod, lb_type(p->module, e->type), c_name); - LLVMSetAlignment(global, type_align_of(e->type)); + LLVMSetAlignment(global, cast(u32)type_align_of(e->type)); LLVMSetInitializer(global, LLVMConstNull(lb_type(p->module, e->type))); if (value.value != nullptr) { LLVMSetInitializer(global, value.value); -- cgit v1.2.3 From 83bc2d3c4a186d6a8c362eed901acd6bc6363a8d Mon Sep 17 00:00:00 2001 From: Lucas Perlind Date: Wed, 30 Apr 2025 19:21:00 +1000 Subject: Add asan support for various allocators --- base/runtime/default_temp_allocator_arena.odin | 8 +++ base/runtime/heap_allocator_windows.odin | 12 +++- base/runtime/internal.odin | 9 +++ base/sanitizer/address.odin | 84 +++++++++++++++++++++- base/sanitizer/doc.odin | 4 +- core/mem/rollback_stack_allocator.odin | 50 +++++++++----- core/mem/tlsf/tlsf.odin | 2 +- core/mem/tlsf/tlsf_internal.odin | 96 ++++++++++++++------------ core/mem/tracking_allocator.odin | 11 ++- core/mem/virtual/arena.odin | 43 +++++++++--- core/mem/virtual/virtual.odin | 25 ++++--- core/mem/virtual/virtual_platform.odin | 3 + core/mem/virtual/virtual_windows.odin | 12 +++- src/llvm_backend.hpp | 2 + src/llvm_backend_general.cpp | 7 ++ src/llvm_backend_proc.cpp | 24 ++++--- src/llvm_backend_stmt.cpp | 16 +++++ 17 files changed, 308 insertions(+), 100 deletions(-) (limited to 'src/llvm_backend_stmt.cpp') diff --git a/base/runtime/default_temp_allocator_arena.odin b/base/runtime/default_temp_allocator_arena.odin index 5f25dac95..74994344a 100644 --- a/base/runtime/default_temp_allocator_arena.odin +++ b/base/runtime/default_temp_allocator_arena.odin @@ -1,6 +1,7 @@ package runtime import "base:intrinsics" +import "base:sanitizer" DEFAULT_ARENA_GROWING_MINIMUM_BLOCK_SIZE :: uint(DEFAULT_TEMP_ALLOCATOR_BACKING_SIZE) @@ -43,6 +44,8 @@ memory_block_alloc :: proc(allocator: Allocator, capacity: uint, alignment: uint block.base = ([^]byte)(uintptr(block) + base_offset) block.capacity = uint(end - uintptr(block.base)) + sanitizer.address_poison(block.base, block.capacity) + // Should be zeroed assert(block.used == 0) assert(block.prev == nil) @@ -52,6 +55,7 @@ memory_block_alloc :: proc(allocator: Allocator, capacity: uint, alignment: uint memory_block_dealloc :: proc(block_to_free: ^Memory_Block, loc := #caller_location) { if block_to_free != nil { allocator := block_to_free.allocator + sanitizer.address_unpoison(block_to_free.base, block_to_free.capacity) mem_free(block_to_free, allocator, loc) } } @@ -83,6 +87,7 @@ alloc_from_memory_block :: proc(block: ^Memory_Block, min_size, alignment: uint) return } data = block.base[block.used+alignment_offset:][:min_size] + sanitizer.address_unpoison(block.base[block.used:block.used+size]) block.used += size return } @@ -162,6 +167,7 @@ arena_free_all :: proc(arena: ^Arena, loc := #caller_location) { if arena.curr_block != nil { intrinsics.mem_zero(arena.curr_block.base, arena.curr_block.used) arena.curr_block.used = 0 + sanitizer.address_poison(arena.curr_block.base, arena.curr_block.capacity) } arena.total_used = 0 } @@ -226,6 +232,7 @@ arena_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, // grow data in-place, adjusting next allocation block.used = uint(new_end) data = block.base[start:new_end] + sanitizer.address_unpoison(data) return } } @@ -299,6 +306,7 @@ arena_temp_end :: proc(temp: Arena_Temp, loc := #caller_location) { assert(block.used >= temp.used, "out of order use of arena_temp_end", loc) amount_to_zero := block.used-temp.used intrinsics.mem_zero(block.base[temp.used:], amount_to_zero) + sanitizer.address_poison(block.base[temp.used:block.capacity]) block.used = temp.used arena.total_used -= amount_to_zero } diff --git a/base/runtime/heap_allocator_windows.odin b/base/runtime/heap_allocator_windows.odin index e07df7559..04a6f149b 100644 --- a/base/runtime/heap_allocator_windows.odin +++ b/base/runtime/heap_allocator_windows.odin @@ -1,5 +1,7 @@ package runtime +import "../sanitizer" + foreign import kernel32 "system:Kernel32.lib" @(private="file") @@ -16,7 +18,10 @@ foreign kernel32 { _heap_alloc :: proc "contextless" (size: int, zero_memory := true) -> rawptr { HEAP_ZERO_MEMORY :: 0x00000008 - return HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY if zero_memory else 0, uint(size)) + ptr := HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY if zero_memory else 0, uint(size)) + // NOTE(lucas): asan not guarunteed to unpoison win32 heap out of the box, do it ourselves + sanitizer.address_unpoison(ptr, size) + return ptr } _heap_resize :: proc "contextless" (ptr: rawptr, new_size: int) -> rawptr { if new_size == 0 { @@ -28,7 +33,10 @@ _heap_resize :: proc "contextless" (ptr: rawptr, new_size: int) -> rawptr { } HEAP_ZERO_MEMORY :: 0x00000008 - return HeapReAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, ptr, uint(new_size)) + new_ptr := HeapReAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, ptr, uint(new_size)) + // NOTE(lucas): asan not guarunteed to unpoison win32 heap out of the box, do it ourselves + sanitizer.address_unpoison(new_ptr, new_size) + return new_ptr } _heap_free :: proc "contextless" (ptr: rawptr) { if ptr == nil { diff --git a/base/runtime/internal.odin b/base/runtime/internal.odin index 59811a525..7c8a8294b 100644 --- a/base/runtime/internal.odin +++ b/base/runtime/internal.odin @@ -1106,3 +1106,12 @@ __read_bits :: proc "contextless" (dst, src: [^]byte, offset: uintptr, size: uin dst[j>>3] |= the_bit<<(j&7) } } + +@(no_sanitize_address) +__asan_unpoison_memory_region :: #force_inline proc "contextless" (address: rawptr, size: uint) { + foreign { + __asan_unpoison_memory_region :: proc(address: rawptr, size: uint) --- + } + __asan_unpoison_memory_region(address, size) +} + diff --git a/base/sanitizer/address.odin b/base/sanitizer/address.odin index 3924e02bf..edfdfc172 100644 --- a/base/sanitizer/address.odin +++ b/base/sanitizer/address.odin @@ -60,6 +60,7 @@ poison or unpoison memory in the same memory region region simultaneously. When asan is not enabled this procedure does nothing. */ +@(no_sanitize_address) address_poison_slice :: proc "contextless" (region: $T/[]$E) { when ASAN_ENABLED { __asan_poison_memory_region(raw_data(region), size_of(E) * len(region)) @@ -75,6 +76,7 @@ can poison or unpoison memory in the same memory region region simultaneously. When asan is not enabled this procedure does nothing. */ +@(no_sanitize_address) address_unpoison_slice :: proc "contextless" (region: $T/[]$E) { when ASAN_ENABLED { __asan_unpoison_memory_region(raw_data(region), size_of(E) * len(region)) @@ -90,6 +92,7 @@ two threads can poison or unpoison memory in the same memory region region simul When asan is not enabled this procedure does nothing. */ +@(no_sanitize_address) address_poison_ptr :: proc "contextless" (ptr: ^$T) { when ASAN_ENABLED { __asan_poison_memory_region(ptr, size_of(T)) @@ -106,6 +109,7 @@ region simultaneously. When asan is not enabled this procedure does nothing. */ +@(no_sanitize_address) address_unpoison_ptr :: proc "contextless" (ptr: ^$T) { when ASAN_ENABLED { __asan_unpoison_memory_region(ptr, size_of(T)) @@ -121,6 +125,7 @@ poison or unpoison memory in the same memory region region simultaneously. When asan is not enabled this procedure does nothing. */ +@(no_sanitize_address) address_poison_rawptr :: proc "contextless" (ptr: rawptr, len: int) { when ASAN_ENABLED { assert_contextless(len >= 0) @@ -128,6 +133,22 @@ address_poison_rawptr :: proc "contextless" (ptr: rawptr, len: int) { } } +/* +Marks the region covering `[ptr, ptr+len)` as unaddressable + +Code instrumented with `-sanitize:address` is forbidden from accessing any address +within the region. This procedure is not thread-safe because no two threads can +poison or unpoison memory in the same memory region region simultaneously. + +When asan is not enabled this procedure does nothing. +*/ +@(no_sanitize_address) +address_poison_rawptr_uint :: proc "contextless" (ptr: rawptr, len: uint) { + when ASAN_ENABLED { + __asan_poison_memory_region(ptr, len) + } +} + /* Marks the region covering `[ptr, ptr+len)` as addressable @@ -137,6 +158,7 @@ threads can poison or unpoison memory in the same memory region region simultane When asan is not enabled this procedure does nothing. */ +@(no_sanitize_address) address_unpoison_rawptr :: proc "contextless" (ptr: rawptr, len: int) { when ASAN_ENABLED { assert_contextless(len >= 0) @@ -144,16 +166,34 @@ address_unpoison_rawptr :: proc "contextless" (ptr: rawptr, len: int) { } } +/* +Marks the region covering `[ptr, ptr+len)` as addressable + +Code instrumented with `-sanitize:address` is allowed to access any address +within the region again. This procedure is not thread-safe because no two +threads can poison or unpoison memory in the same memory region region simultaneously. + +When asan is not enabled this procedure does nothing. +*/ +@(no_sanitize_address) +address_unpoison_rawptr_uint :: proc "contextless" (ptr: rawptr, len: uint) { + when ASAN_ENABLED { + __asan_unpoison_memory_region(ptr, len) + } +} + address_poison :: proc { address_poison_slice, address_poison_ptr, address_poison_rawptr, + address_poison_rawptr_uint, } address_unpoison :: proc { address_unpoison_slice, address_unpoison_ptr, address_unpoison_rawptr, + address_unpoison_rawptr_uint, } /* @@ -164,6 +204,7 @@ This can be used for logging and/or debugging purposes. When asan is not enabled this procedure does nothing. */ +@(no_sanitize_address) address_set_death_callback :: proc "contextless" (callback: Address_Death_Callback) { when ASAN_ENABLED { __sanitizer_set_death_callback(callback) @@ -178,7 +219,8 @@ in an asan error. When asan is not enabled this procedure returns `nil`. */ -address_region_is_poisoned_slice :: proc "contextless" (region: []$T/$E) -> rawptr { +@(no_sanitize_address) +address_region_is_poisoned_slice :: proc "contextless" (region: $T/[]$E) -> rawptr { when ASAN_ENABLED { return __asan_region_is_poisoned(raw_data(region), size_of(E) * len(region)) } else { @@ -194,6 +236,7 @@ in an asan error. When asan is not enabled this procedure returns `nil`. */ +@(no_sanitize_address) address_region_is_poisoned_ptr :: proc "contextless" (ptr: ^$T) -> rawptr { when ASAN_ENABLED { return __asan_region_is_poisoned(ptr, size_of(T)) @@ -210,6 +253,7 @@ in an asan error. When asan is not enabled this procedure returns `nil`. */ +@(no_sanitize_address) address_region_is_poisoned_rawptr :: proc "contextless" (region: rawptr, len: int) -> rawptr { when ASAN_ENABLED { assert_contextless(len >= 0) @@ -219,10 +263,29 @@ address_region_is_poisoned_rawptr :: proc "contextless" (region: rawptr, len: in } } +/* +Checks if the memory region covered by `[ptr, ptr+len)` is poisoned. + +If it is poisoned this procedure returns the address which would result +in an asan error. + +When asan is not enabled this procedure returns `nil`. +*/ +@(no_sanitize_address) +address_region_is_poisoned_rawptr_uint :: proc "contextless" (region: rawptr, len: uint) -> rawptr { + when ASAN_ENABLED { + return __asan_region_is_poisoned(region, len) + } else { + return nil + } +} + + address_region_is_poisoned :: proc { address_region_is_poisoned_slice, address_region_is_poisoned_ptr, address_region_is_poisoned_rawptr, + address_region_is_poisoned_rawptr_uint, } /* @@ -233,6 +296,7 @@ If it is poisoned this procedure returns `true`, otherwise it returns When asan is not enabled this procedure returns `false`. */ +@(no_sanitize_address) address_is_poisoned :: proc "contextless" (address: rawptr) -> bool { when ASAN_ENABLED { return __asan_address_is_poisoned(address) != 0 @@ -248,6 +312,7 @@ This procedure prints the description out to `stdout`. When asan is not enabled this procedure does nothing. */ +@(no_sanitize_address) address_describe_address :: proc "contextless" (address: rawptr) { when ASAN_ENABLED { __asan_describe_address(address) @@ -260,6 +325,7 @@ Returns `true` if an asan error has occured, otherwise it returns When asan is not enabled this procedure returns `false`. */ +@(no_sanitize_address) address_report_present :: proc "contextless" () -> bool { when ASAN_ENABLED { return __asan_report_present() != 0 @@ -275,6 +341,7 @@ If no asan error has occurd `nil` is returned. When asan is not enabled this procedure returns `nil`. */ +@(no_sanitize_address) address_get_report_pc :: proc "contextless" () -> rawptr { when ASAN_ENABLED { return __asan_get_report_pc() @@ -290,6 +357,7 @@ If no asan error has occurd `nil` is returned. When asan is not enabled this procedure returns `nil`. */ +@(no_sanitize_address) address_get_report_bp :: proc "contextless" () -> rawptr { when ASAN_ENABLED { return __asan_get_report_bp() @@ -305,6 +373,7 @@ If no asan error has occurd `nil` is returned. When asan is not enabled this procedure returns `nil`. */ +@(no_sanitize_address) address_get_report_sp :: proc "contextless" () -> rawptr { when ASAN_ENABLED { return __asan_get_report_sp() @@ -320,6 +389,7 @@ If no asan error has occurd `nil` is returned. When asan is not enabled this procedure returns `nil`. */ +@(no_sanitize_address) address_get_report_address :: proc "contextless" () -> rawptr { when ASAN_ENABLED { return __asan_get_report_address() @@ -335,6 +405,7 @@ If no asan error has occurd `.none` is returned. When asan is not enabled this procedure returns `.none`. */ +@(no_sanitize_address) address_get_report_access_type :: proc "contextless" () -> Address_Access_Type { when ASAN_ENABLED { if ! address_report_present() { @@ -353,6 +424,7 @@ If no asan error has occurd `0` is returned. When asan is not enabled this procedure returns `0`. */ +@(no_sanitize_address) address_get_report_access_size :: proc "contextless" () -> uint { when ASAN_ENABLED { return __asan_get_report_access_size() @@ -368,6 +440,7 @@ If no asan error has occurd an empty string is returned. When asan is not enabled this procedure returns an empty string. */ +@(no_sanitize_address) address_get_report_description :: proc "contextless" () -> string { when ASAN_ENABLED { return string(__asan_get_report_description()) @@ -386,6 +459,7 @@ The information provided include: When asan is not enabled this procedure returns zero initialised values. */ +@(no_sanitize_address) address_locate_address :: proc "contextless" (addr: rawptr, data: []byte) -> Address_Located_Address { when ASAN_ENABLED { out_addr: rawptr @@ -404,6 +478,7 @@ The stack trace is filled into the `data` slice. When asan is not enabled this procedure returns a zero initialised value. */ +@(no_sanitize_address) address_get_alloc_stack_trace :: proc "contextless" (addr: rawptr, data: []rawptr) -> ([]rawptr, int) { when ASAN_ENABLED { out_thread: i32 @@ -421,6 +496,7 @@ The stack trace is filled into the `data` slice. When asan is not enabled this procedure returns zero initialised values. */ +@(no_sanitize_address) address_get_free_stack_trace :: proc "contextless" (addr: rawptr, data: []rawptr) -> ([]rawptr, int) { when ASAN_ENABLED { out_thread: i32 @@ -436,6 +512,7 @@ Returns the current asan shadow memory mapping. When asan is not enabled this procedure returns a zero initialised value. */ +@(no_sanitize_address) address_get_shadow_mapping :: proc "contextless" () -> Address_Shadow_Mapping { when ASAN_ENABLED { result: Address_Shadow_Mapping @@ -451,6 +528,7 @@ Prints asan statistics to `stderr` When asan is not enabled this procedure does nothing. */ +@(no_sanitize_address) address_print_accumulated_stats :: proc "contextless" () { when ASAN_ENABLED { __asan_print_accumulated_stats() @@ -464,6 +542,7 @@ This pointer can be then used for `address_is_in_fake_stack`. When asan is not enabled this procedure returns `nil`. */ +@(no_sanitize_address) address_get_current_fake_stack :: proc "contextless" () -> rawptr { when ASAN_ENABLED { return __asan_get_current_fake_stack() @@ -477,6 +556,7 @@ Returns if an address belongs to a given fake stack and if so the region of the When asan is not enabled this procedure returns zero initialised values. */ +@(no_sanitize_address) address_is_in_fake_stack :: proc "contextless" (fake_stack: rawptr, addr: rawptr) -> ([]byte, bool) { when ASAN_ENABLED { begin: rawptr @@ -496,6 +576,7 @@ i.e. a procedure such as `panic` and `os.exit`. When asan is not enabled this procedure does nothing. */ +@(no_sanitize_address) address_handle_no_return :: proc "contextless" () { when ASAN_ENABLED { __asan_handle_no_return() @@ -509,6 +590,7 @@ Returns `true` if successful, otherwise it returns `false`. When asan is not enabled this procedure returns `false`. */ +@(no_sanitize_address) address_update_allocation_context :: proc "contextless" (addr: rawptr) -> bool { when ASAN_ENABLED { return __asan_update_allocation_context(addr) != 0 diff --git a/base/sanitizer/doc.odin b/base/sanitizer/doc.odin index e389842b1..707f41ce0 100644 --- a/base/sanitizer/doc.odin +++ b/base/sanitizer/doc.odin @@ -14,12 +14,14 @@ related bugs. Typically asan interacts with libc but Odin code can be marked up with the asan runtime to extend the memory error detection outside of libc using this package. For more information about asan see: https://clang.llvm.org/docs/AddressSanitizer.html +Procedures can be made exempt from asan when marked up with @(no_sanitize_address) + ## Memory Enabled with `-sanitize:memory` when building an odin project. The memory sanitizer is another runtime memory error detector with the sole purpose to catch the -use of uninitialized memory. This is not a very common bug in Odin as be default everything is +use of uninitialized memory. This is not a very common bug in Odin as by default everything is set to zero when initialised (ZII). For more information about the memory sanitizer see: https://clang.llvm.org/docs/MemorySanitizer.html diff --git a/core/mem/rollback_stack_allocator.odin b/core/mem/rollback_stack_allocator.odin index 43ef10fe9..a00131b7f 100644 --- a/core/mem/rollback_stack_allocator.odin +++ b/core/mem/rollback_stack_allocator.odin @@ -1,6 +1,7 @@ package mem import "base:runtime" +import "base:sanitizer" /* Rollback stack default block size. @@ -47,14 +48,14 @@ Rollback_Stack :: struct { block_allocator: Allocator, } -@(private="file", require_results) +@(private="file", require_results, no_sanitize_address) rb_ptr_in_bounds :: proc(block: ^Rollback_Stack_Block, ptr: rawptr) -> bool { start := raw_data(block.buffer) end := start[block.offset:] return start < ptr && ptr <= end } -@(private="file", require_results) +@(private="file", require_results, no_sanitize_address) rb_find_ptr :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> ( parent: ^Rollback_Stack_Block, block: ^Rollback_Stack_Block, @@ -71,7 +72,7 @@ rb_find_ptr :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> ( return nil, nil, nil, .Invalid_Pointer } -@(private="file", require_results) +@(private="file", require_results, no_sanitize_address) rb_find_last_alloc :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> ( block: ^Rollback_Stack_Block, header: ^Rollback_Stack_Header, @@ -86,9 +87,10 @@ rb_find_last_alloc :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> ( return nil, nil, false } -@(private="file") +@(private="file", no_sanitize_address) rb_rollback_block :: proc(block: ^Rollback_Stack_Block, header: ^Rollback_Stack_Header) { header := header + for block.offset > 0 && header.is_free { block.offset = header.prev_offset block.last_alloc = raw_data(block.buffer)[header.prev_ptr:] @@ -99,9 +101,10 @@ rb_rollback_block :: proc(block: ^Rollback_Stack_Block, header: ^Rollback_Stack_ /* Free memory to a rollback stack allocator. */ -@(private="file", require_results) +@(private="file", require_results, no_sanitize_address) rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error { parent, block, header := rb_find_ptr(stack, ptr) or_return + if header.is_free { return .Invalid_Pointer } @@ -120,7 +123,7 @@ rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error { /* Free all memory owned by the rollback stack allocator. */ -@(private="file") +@(private="file", no_sanitize_address) rb_free_all :: proc(stack: ^Rollback_Stack) { for block := stack.head.next_block; block != nil; /**/ { next_block := block.next_block @@ -131,12 +134,13 @@ rb_free_all :: proc(stack: ^Rollback_Stack) { stack.head.next_block = nil stack.head.last_alloc = nil stack.head.offset = 0 + sanitizer.address_poison(stack.head.buffer) } /* Allocate memory using the rollback stack allocator. */ -@(require_results) +@(require_results, no_sanitize_address) rb_alloc :: proc( stack: ^Rollback_Stack, size: int, @@ -153,7 +157,7 @@ rb_alloc :: proc( /* Allocate memory using the rollback stack allocator. */ -@(require_results) +@(require_results, no_sanitize_address) rb_alloc_bytes :: proc( stack: ^Rollback_Stack, size: int, @@ -170,7 +174,7 @@ rb_alloc_bytes :: proc( /* Allocate non-initialized memory using the rollback stack allocator. */ -@(require_results) +@(require_results, no_sanitize_address) rb_alloc_non_zeroed :: proc( stack: ^Rollback_Stack, size: int, @@ -184,7 +188,7 @@ rb_alloc_non_zeroed :: proc( /* Allocate non-initialized memory using the rollback stack allocator. */ -@(require_results) +@(require_results, no_sanitize_address) rb_alloc_bytes_non_zeroed :: proc( stack: ^Rollback_Stack, size: int, @@ -194,6 +198,7 @@ rb_alloc_bytes_non_zeroed :: proc( assert(size >= 0, "Size must be positive or zero.", loc) assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", loc) parent: ^Rollback_Stack_Block + for block := stack.head; /**/; block = block.next_block { when !ODIN_DISABLE_ASSERT { allocated_new_block: bool @@ -235,7 +240,9 @@ rb_alloc_bytes_non_zeroed :: proc( // Prevent any further allocations on it. block.offset = cast(uintptr)len(block.buffer) } - #no_bounds_check return ptr[:size], nil + res := ptr[:size] + sanitizer.address_unpoison(res) + return res, nil } return nil, .Out_Of_Memory } @@ -243,7 +250,7 @@ rb_alloc_bytes_non_zeroed :: proc( /* Resize an allocation owned by rollback stack allocator. */ -@(require_results) +@(require_results, no_sanitize_address) rb_resize :: proc( stack: ^Rollback_Stack, old_ptr: rawptr, @@ -266,7 +273,7 @@ rb_resize :: proc( /* Resize an allocation owned by rollback stack allocator. */ -@(require_results) +@(require_results, no_sanitize_address) rb_resize_bytes :: proc( stack: ^Rollback_Stack, old_memory: []byte, @@ -289,7 +296,7 @@ rb_resize_bytes :: proc( Resize an allocation owned by rollback stack allocator without explicit zero-initialization. */ -@(require_results) +@(require_results, no_sanitize_address) rb_resize_non_zeroed :: proc( stack: ^Rollback_Stack, old_ptr: rawptr, @@ -306,7 +313,7 @@ rb_resize_non_zeroed :: proc( Resize an allocation owned by rollback stack allocator without explicit zero-initialization. */ -@(require_results) +@(require_results, no_sanitize_address) rb_resize_bytes_non_zeroed :: proc( stack: ^Rollback_Stack, old_memory: []byte, @@ -330,7 +337,9 @@ rb_resize_bytes_non_zeroed :: proc( if len(block.buffer) <= stack.block_size { block.offset += cast(uintptr)size - cast(uintptr)old_size } - #no_bounds_check return (ptr)[:size], nil + res := (ptr)[:size] + sanitizer.address_unpoison(res) + #no_bounds_check return res, nil } } } @@ -340,7 +349,7 @@ rb_resize_bytes_non_zeroed :: proc( return } -@(private="file", require_results) +@(private="file", require_results, no_sanitize_address) rb_make_block :: proc(size: int, allocator: Allocator) -> (block: ^Rollback_Stack_Block, err: Allocator_Error) { buffer := runtime.mem_alloc(size_of(Rollback_Stack_Block) + size, align_of(Rollback_Stack_Block), allocator) or_return block = cast(^Rollback_Stack_Block)raw_data(buffer) @@ -351,6 +360,7 @@ rb_make_block :: proc(size: int, allocator: Allocator) -> (block: ^Rollback_Stac /* Initialize the rollback stack allocator using a fixed backing buffer. */ +@(no_sanitize_address) rollback_stack_init_buffered :: proc(stack: ^Rollback_Stack, buffer: []byte, location := #caller_location) { MIN_SIZE :: size_of(Rollback_Stack_Block) + size_of(Rollback_Stack_Header) + size_of(rawptr) assert(len(buffer) >= MIN_SIZE, "User-provided buffer to Rollback Stack Allocator is too small.", location) @@ -365,6 +375,7 @@ rollback_stack_init_buffered :: proc(stack: ^Rollback_Stack, buffer: []byte, loc /* Initialize the rollback stack alocator using a backing block allocator. */ +@(no_sanitize_address) rollback_stack_init_dynamic :: proc( stack: ^Rollback_Stack, block_size : int = ROLLBACK_STACK_DEFAULT_BLOCK_SIZE, @@ -396,6 +407,7 @@ rollback_stack_init :: proc { /* Destroy a rollback stack. */ +@(no_sanitize_address) rollback_stack_destroy :: proc(stack: ^Rollback_Stack) { if stack.block_allocator.procedure != nil { rb_free_all(stack) @@ -435,7 +447,7 @@ from the last allocation backwards. Each allocation has an overhead of 8 bytes and any extra bytes to satisfy the requested alignment. */ -@(require_results) +@(require_results, no_sanitize_address) rollback_stack_allocator :: proc(stack: ^Rollback_Stack) -> Allocator { return Allocator { data = stack, @@ -443,7 +455,7 @@ rollback_stack_allocator :: proc(stack: ^Rollback_Stack) -> Allocator { } } -@(require_results) +@(require_results, no_sanitize_address) rollback_stack_allocator_proc :: proc( allocator_data: rawptr, mode: Allocator_Mode, diff --git a/core/mem/tlsf/tlsf.odin b/core/mem/tlsf/tlsf.odin index 4ce6e54d9..0ae8c28e0 100644 --- a/core/mem/tlsf/tlsf.odin +++ b/core/mem/tlsf/tlsf.odin @@ -198,4 +198,4 @@ fls :: proc "contextless" (word: u32) -> (bit: i32) { fls_uint :: proc "contextless" (size: uint) -> (bit: i32) { N :: (size_of(uint) * 8) - 1 return i32(N - intrinsics.count_leading_zeros(size)) -} \ No newline at end of file +} diff --git a/core/mem/tlsf/tlsf_internal.odin b/core/mem/tlsf/tlsf_internal.odin index f8a9bf60c..89b875679 100644 --- a/core/mem/tlsf/tlsf_internal.odin +++ b/core/mem/tlsf/tlsf_internal.odin @@ -10,6 +10,7 @@ package mem_tlsf import "base:intrinsics" +import "base:sanitizer" import "base:runtime" // log2 of number of linear subdivisions of block sizes. @@ -209,6 +210,8 @@ alloc_bytes_non_zeroed :: proc(control: ^Allocator, size: uint, align: uint) -> return nil, .Out_Of_Memory } + sanitizer.address_poison(new_pool_buf) + // Allocate a new link in the `control.pool` tracking structure. new_pool := new_clone(Pool{ data = new_pool_buf, @@ -254,7 +257,7 @@ alloc_bytes_non_zeroed :: proc(control: ^Allocator, size: uint, align: uint) -> return block_prepare_used(control, block, adjust) } -@(private, require_results) +@(private, require_results, no_sanitize_address) alloc_bytes :: proc(control: ^Allocator, size: uint, align: uint) -> (res: []byte, err: runtime.Allocator_Error) { res, err = alloc_bytes_non_zeroed(control, size, align) if err == nil { @@ -273,6 +276,7 @@ free_with_size :: proc(control: ^Allocator, ptr: rawptr, size: uint) { block := block_from_ptr(ptr) assert(!block_is_free(block), "block already marked as free") // double free + sanitizer.address_poison(ptr, block.size) block_mark_as_free(block) block = block_merge_prev(control, block) block = block_merge_next(control, block) @@ -316,6 +320,7 @@ resize :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size: uint, align block_trim_used(control, block, adjust) res = ([^]byte)(ptr)[:new_size] + sanitizer.address_unpoison(res) if min_size < new_size { to_zero := ([^]byte)(ptr)[min_size:new_size] @@ -374,95 +379,96 @@ resize_non_zeroed :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size: NOTE: TLSF spec relies on ffs/fls returning a value in the range 0..31. */ -@(private, require_results) +@(private, require_results, no_sanitize_address) block_size :: proc "contextless" (block: ^Block_Header) -> (size: uint) { return block.size &~ (BLOCK_HEADER_FREE | BLOCK_HEADER_PREV_FREE) } -@(private) +@(private, no_sanitize_address) block_set_size :: proc "contextless" (block: ^Block_Header, size: uint) { old_size := block.size block.size = size | (old_size & (BLOCK_HEADER_FREE | BLOCK_HEADER_PREV_FREE)) } -@(private, require_results) +@(private, require_results, no_sanitize_address) block_is_last :: proc "contextless" (block: ^Block_Header) -> (is_last: bool) { return block_size(block) == 0 } -@(private, require_results) +@(private, require_results, no_sanitize_address) block_is_free :: proc "contextless" (block: ^Block_Header) -> (is_free: bool) { return (block.size & BLOCK_HEADER_FREE) == BLOCK_HEADER_FREE } -@(private) +@(private, no_sanitize_address) block_set_free :: proc "contextless" (block: ^Block_Header) { block.size |= BLOCK_HEADER_FREE } -@(private) +@(private, no_sanitize_address) block_set_used :: proc "contextless" (block: ^Block_Header) { block.size &~= BLOCK_HEADER_FREE } -@(private, require_results) +@(private, require_results, no_sanitize_address) block_is_prev_free :: proc "contextless" (block: ^Block_Header) -> (is_prev_free: bool) { return (block.size & BLOCK_HEADER_PREV_FREE) == BLOCK_HEADER_PREV_FREE } -@(private) +@(private, no_sanitize_address) block_set_prev_free :: proc "contextless" (block: ^Block_Header) { block.size |= BLOCK_HEADER_PREV_FREE } -@(private) +@(private, no_sanitize_address) block_set_prev_used :: proc "contextless" (block: ^Block_Header) { block.size &~= BLOCK_HEADER_PREV_FREE } -@(private, require_results) +@(private, require_results, no_sanitize_address) block_from_ptr :: proc(ptr: rawptr) -> (block_ptr: ^Block_Header) { return (^Block_Header)(uintptr(ptr) - BLOCK_START_OFFSET) } -@(private, require_results) +@(private, require_results, no_sanitize_address) block_to_ptr :: proc(block: ^Block_Header) -> (ptr: rawptr) { return rawptr(uintptr(block) + BLOCK_START_OFFSET) } // Return location of next block after block of given size. -@(private, require_results) +@(private, require_results, no_sanitize_address) offset_to_block :: proc(ptr: rawptr, size: uint) -> (block: ^Block_Header) { return (^Block_Header)(uintptr(ptr) + uintptr(size)) } -@(private, require_results) +@(private, require_results, no_sanitize_address) offset_to_block_backwards :: proc(ptr: rawptr, size: uint) -> (block: ^Block_Header) { return (^Block_Header)(uintptr(ptr) - uintptr(size)) } // Return location of previous block. -@(private, require_results) +@(private, require_results, no_sanitize_address) block_prev :: proc(block: ^Block_Header) -> (prev: ^Block_Header) { assert(block_is_prev_free(block), "previous block must be free") + return block.prev_phys_block } // Return location of next existing block. -@(private, require_results) +@(private, require_results, no_sanitize_address) block_next :: proc(block: ^Block_Header) -> (next: ^Block_Header) { return offset_to_block(block_to_ptr(block), block_size(block) - BLOCK_HEADER_OVERHEAD) } // Link a new block with its physical neighbor, return the neighbor. -@(private, require_results) +@(private, require_results, no_sanitize_address) block_link_next :: proc(block: ^Block_Header) -> (next: ^Block_Header) { next = block_next(block) next.prev_phys_block = block return } -@(private) +@(private, no_sanitize_address) block_mark_as_free :: proc(block: ^Block_Header) { // Link the block to the next block, first. next := block_link_next(block) @@ -470,26 +476,26 @@ block_mark_as_free :: proc(block: ^Block_Header) { block_set_free(block) } -@(private) -block_mark_as_used :: proc(block: ^Block_Header) { +@(private, no_sanitize_address) +block_mark_as_used :: proc(block: ^Block_Header, ) { next := block_next(block) block_set_prev_used(next) block_set_used(block) } -@(private, require_results) +@(private, require_results, no_sanitize_address) align_up :: proc(x, align: uint) -> (aligned: uint) { assert(0 == (align & (align - 1)), "must align to a power of two") return (x + (align - 1)) &~ (align - 1) } -@(private, require_results) +@(private, require_results, no_sanitize_address) align_down :: proc(x, align: uint) -> (aligned: uint) { assert(0 == (align & (align - 1)), "must align to a power of two") return x - (x & (align - 1)) } -@(private, require_results) +@(private, require_results, no_sanitize_address) align_ptr :: proc(ptr: rawptr, align: uint) -> (aligned: rawptr) { assert(0 == (align & (align - 1)), "must align to a power of two") align_mask := uintptr(align) - 1 @@ -499,7 +505,7 @@ align_ptr :: proc(ptr: rawptr, align: uint) -> (aligned: rawptr) { } // Adjust an allocation size to be aligned to word size, and no smaller than internal minimum. -@(private, require_results) +@(private, require_results, no_sanitize_address) adjust_request_size :: proc(size, align: uint) -> (adjusted: uint) { if size == 0 { return 0 @@ -513,7 +519,7 @@ adjust_request_size :: proc(size, align: uint) -> (adjusted: uint) { } // Adjust an allocation size to be aligned to word size, and no smaller than internal minimum. -@(private, require_results) +@(private, require_results, no_sanitize_address) adjust_request_size_with_err :: proc(size, align: uint) -> (adjusted: uint, err: runtime.Allocator_Error) { if size == 0 { return 0, nil @@ -531,7 +537,7 @@ adjust_request_size_with_err :: proc(size, align: uint) -> (adjusted: uint, err: // TLSF utility functions. In most cases these are direct translations of // the documentation in the research paper. -@(optimization_mode="favor_size", private, require_results) +@(optimization_mode="favor_size", private, require_results, no_sanitize_address) mapping_insert :: proc(size: uint) -> (fl, sl: i32) { if size < SMALL_BLOCK_SIZE { // Store small blocks in first list. @@ -544,7 +550,7 @@ mapping_insert :: proc(size: uint) -> (fl, sl: i32) { return } -@(optimization_mode="favor_size", private, require_results) +@(optimization_mode="favor_size", private, require_results, no_sanitize_address) mapping_round :: #force_inline proc(size: uint) -> (rounded: uint) { rounded = size if size >= SMALL_BLOCK_SIZE { @@ -555,12 +561,12 @@ mapping_round :: #force_inline proc(size: uint) -> (rounded: uint) { } // This version rounds up to the next block size (for allocations) -@(optimization_mode="favor_size", private, require_results) +@(optimization_mode="favor_size", private, require_results, no_sanitize_address) mapping_search :: proc(size: uint) -> (fl, sl: i32) { return mapping_insert(mapping_round(size)) } -@(private, require_results) +@(private, require_results, no_sanitize_address) search_suitable_block :: proc(control: ^Allocator, fli, sli: ^i32) -> (block: ^Block_Header) { // First, search for a block in the list associated with the given fl/sl index. fl := fli^; sl := sli^ @@ -587,7 +593,7 @@ search_suitable_block :: proc(control: ^Allocator, fli, sli: ^i32) -> (block: ^B } // Remove a free block from the free list. -@(private) +@(private, no_sanitize_address) remove_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl: i32) { prev := block.prev_free next := block.next_free @@ -613,7 +619,7 @@ remove_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl } // Insert a free block into the free block list. -@(private) +@(private, no_sanitize_address) insert_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl: i32) { current := control.blocks[fl][sl] assert(current != nil, "free lists cannot have a nil entry") @@ -631,26 +637,26 @@ insert_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl } // Remove a given block from the free list. -@(private) +@(private, no_sanitize_address) block_remove :: proc(control: ^Allocator, block: ^Block_Header) { fl, sl := mapping_insert(block_size(block)) remove_free_block(control, block, fl, sl) } // Insert a given block into the free list. -@(private) +@(private, no_sanitize_address) block_insert :: proc(control: ^Allocator, block: ^Block_Header) { fl, sl := mapping_insert(block_size(block)) insert_free_block(control, block, fl, sl) } -@(private, require_results) +@(private, require_results, no_sanitize_address) block_can_split :: proc(block: ^Block_Header, size: uint) -> (can_split: bool) { return block_size(block) >= size_of(Block_Header) + size } // Split a block into two, the second of which is free. -@(private, require_results) +@(private, require_results, no_sanitize_address) block_split :: proc(block: ^Block_Header, size: uint) -> (remaining: ^Block_Header) { // Calculate the amount of space left in the remaining block. remaining = offset_to_block(block_to_ptr(block), size - BLOCK_HEADER_OVERHEAD) @@ -671,9 +677,10 @@ block_split :: proc(block: ^Block_Header, size: uint) -> (remaining: ^Block_Head } // Absorb a free block's storage into an adjacent previous free block. -@(private, require_results) +@(private, require_results, no_sanitize_address) block_absorb :: proc(prev: ^Block_Header, block: ^Block_Header) -> (absorbed: ^Block_Header) { assert(!block_is_last(prev), "previous block can't be last") + // Note: Leaves flags untouched. prev.size += block_size(block) + BLOCK_HEADER_OVERHEAD _ = block_link_next(prev) @@ -681,7 +688,7 @@ block_absorb :: proc(prev: ^Block_Header, block: ^Block_Header) -> (absorbed: ^B } // Merge a just-freed block with an adjacent previous free block. -@(private, require_results) +@(private, require_results, no_sanitize_address) block_merge_prev :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: ^Block_Header) { merged = block if (block_is_prev_free(block)) { @@ -695,7 +702,7 @@ block_merge_prev :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: } // Merge a just-freed block with an adjacent free block. -@(private, require_results) +@(private, require_results, no_sanitize_address) block_merge_next :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: ^Block_Header) { merged = block next := block_next(block) @@ -710,7 +717,7 @@ block_merge_next :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: } // Trim any trailing block space off the end of a free block, return to pool. -@(private) +@(private, no_sanitize_address) block_trim_free :: proc(control: ^Allocator, block: ^Block_Header, size: uint) { assert(block_is_free(block), "block must be free") if (block_can_split(block, size)) { @@ -722,7 +729,7 @@ block_trim_free :: proc(control: ^Allocator, block: ^Block_Header, size: uint) { } // Trim any trailing block space off the end of a used block, return to pool. -@(private) +@(private, no_sanitize_address) block_trim_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) { assert(!block_is_free(block), "Block must be used") if (block_can_split(block, size)) { @@ -736,7 +743,7 @@ block_trim_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) { } // Trim leading block space, return to pool. -@(private, require_results) +@(private, require_results, no_sanitize_address) block_trim_free_leading :: proc(control: ^Allocator, block: ^Block_Header, size: uint) -> (remaining: ^Block_Header) { remaining = block if block_can_split(block, size) { @@ -750,7 +757,7 @@ block_trim_free_leading :: proc(control: ^Allocator, block: ^Block_Header, size: return remaining } -@(private, require_results) +@(private, require_results, no_sanitize_address) block_locate_free :: proc(control: ^Allocator, size: uint) -> (block: ^Block_Header) { fl, sl: i32 if size != 0 { @@ -774,13 +781,14 @@ block_locate_free :: proc(control: ^Allocator, size: uint) -> (block: ^Block_Hea return block } -@(private, require_results) +@(private, require_results, no_sanitize_address) block_prepare_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) -> (res: []byte, err: runtime.Allocator_Error) { if block != nil { assert(size != 0, "Size must be non-zero") block_trim_free(control, block, size) block_mark_as_used(block) res = ([^]byte)(block_to_ptr(block))[:size] + sanitizer.address_unpoison(res) } return -} \ No newline at end of file +} diff --git a/core/mem/tracking_allocator.odin b/core/mem/tracking_allocator.odin index 25c547471..01080075e 100644 --- a/core/mem/tracking_allocator.odin +++ b/core/mem/tracking_allocator.odin @@ -64,6 +64,7 @@ This procedure initializes the tracking allocator `t` with a backing allocator specified with `backing_allocator`. The `internals_allocator` will used to allocate the tracked data. */ +@(no_sanitize_address) tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Allocator, internals_allocator := context.allocator) { t.backing = backing_allocator t.allocation_map.allocator = internals_allocator @@ -77,6 +78,7 @@ tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Alloc /* Destroy the tracking allocator. */ +@(no_sanitize_address) tracking_allocator_destroy :: proc(t: ^Tracking_Allocator) { delete(t.allocation_map) delete(t.bad_free_array) @@ -90,6 +92,7 @@ This procedure clears the tracked data from a tracking allocator. **Note**: This procedure clears only the current allocation data while keeping the totals intact. */ +@(no_sanitize_address) tracking_allocator_clear :: proc(t: ^Tracking_Allocator) { sync.mutex_lock(&t.mutex) clear(&t.allocation_map) @@ -103,6 +106,7 @@ Reset the tracking allocator. Reset all of a Tracking Allocator's allocation data back to zero. */ +@(no_sanitize_address) tracking_allocator_reset :: proc(t: ^Tracking_Allocator) { sync.mutex_lock(&t.mutex) clear(&t.allocation_map) @@ -124,6 +128,7 @@ Override Tracking_Allocator.bad_free_callback to have something else happen. For example, you can use tracking_allocator_bad_free_callback_add_to_array to return the tracking allocator to the old behavior, where the bad_free_array was used. */ +@(no_sanitize_address) tracking_allocator_bad_free_callback_panic :: proc(t: ^Tracking_Allocator, memory: rawptr, location: runtime.Source_Code_Location) { runtime.print_caller_location(location) runtime.print_string(" Tracking allocator error: Bad free of pointer ") @@ -136,6 +141,7 @@ tracking_allocator_bad_free_callback_panic :: proc(t: ^Tracking_Allocator, memor Alternative behavior for a bad free: Store in `bad_free_array`. If you use this, then you must make sure to check Tracking_Allocator.bad_free_array at some point. */ +@(no_sanitize_address) tracking_allocator_bad_free_callback_add_to_array :: proc(t: ^Tracking_Allocator, memory: rawptr, location: runtime.Source_Code_Location) { append(&t.bad_free_array, Tracking_Allocator_Bad_Free_Entry { memory = memory, @@ -175,7 +181,7 @@ Example: } } */ -@(require_results) +@(require_results, no_sanitize_address) tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator { return Allocator{ data = data, @@ -183,6 +189,7 @@ tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator { } } +@(no_sanitize_address) tracking_allocator_proc :: proc( allocator_data: rawptr, mode: Allocator_Mode, @@ -191,6 +198,7 @@ tracking_allocator_proc :: proc( old_size: int, loc := #caller_location, ) -> (result: []byte, err: Allocator_Error) { + @(no_sanitize_address) track_alloc :: proc(data: ^Tracking_Allocator, entry: ^Tracking_Allocator_Entry) { data.total_memory_allocated += i64(entry.size) data.total_allocation_count += 1 @@ -200,6 +208,7 @@ tracking_allocator_proc :: proc( } } + @(no_sanitize_address) track_free :: proc(data: ^Tracking_Allocator, entry: ^Tracking_Allocator_Entry) { data.total_memory_freed += i64(entry.size) data.total_free_count += 1 diff --git a/core/mem/virtual/arena.odin b/core/mem/virtual/arena.odin index 4fc2e0e35..4e1cc2466 100644 --- a/core/mem/virtual/arena.odin +++ b/core/mem/virtual/arena.odin @@ -3,6 +3,8 @@ package mem_virtual import "core:mem" import "core:sync" +import "base:sanitizer" + Arena_Kind :: enum uint { Growing = 0, // Chained memory blocks (singly linked list). Static = 1, // Fixed reservation sized. @@ -43,7 +45,7 @@ DEFAULT_ARENA_STATIC_RESERVE_SIZE :: mem.Gigabyte when size_of(uintptr) == 8 els // Initialization of an `Arena` to be a `.Growing` variant. // A growing arena is a linked list of `Memory_Block`s allocated with virtual memory. -@(require_results) +@(require_results, no_sanitize_address) arena_init_growing :: proc(arena: ^Arena, reserved: uint = DEFAULT_ARENA_GROWING_MINIMUM_BLOCK_SIZE) -> (err: Allocator_Error) { arena.kind = .Growing arena.curr_block = memory_block_alloc(0, reserved, {}) or_return @@ -53,24 +55,26 @@ arena_init_growing :: proc(arena: ^Arena, reserved: uint = DEFAULT_ARENA_GROWING if arena.minimum_block_size == 0 { arena.minimum_block_size = reserved } + sanitizer.address_poison(arena.curr_block.base[:arena.curr_block.committed]) return } // Initialization of an `Arena` to be a `.Static` variant. // A static arena contains a single `Memory_Block` allocated with virtual memory. -@(require_results) +@(require_results, no_sanitize_address) arena_init_static :: proc(arena: ^Arena, reserved: uint = DEFAULT_ARENA_STATIC_RESERVE_SIZE, commit_size: uint = DEFAULT_ARENA_STATIC_COMMIT_SIZE) -> (err: Allocator_Error) { arena.kind = .Static arena.curr_block = memory_block_alloc(commit_size, reserved, {}) or_return arena.total_used = 0 arena.total_reserved = arena.curr_block.reserved + sanitizer.address_poison(arena.curr_block.base[:arena.curr_block.committed]) return } // Initialization of an `Arena` to be a `.Buffer` variant. // A buffer arena contains single `Memory_Block` created from a user provided []byte. -@(require_results) +@(require_results, no_sanitize_address) arena_init_buffer :: proc(arena: ^Arena, buffer: []byte) -> (err: Allocator_Error) { if len(buffer) < size_of(Memory_Block) { return .Out_Of_Memory @@ -78,7 +82,7 @@ arena_init_buffer :: proc(arena: ^Arena, buffer: []byte) -> (err: Allocator_Erro arena.kind = .Buffer - mem.zero_slice(buffer) + sanitizer.address_poison(buffer[:]) block_base := raw_data(buffer) block := (^Memory_Block)(block_base) @@ -94,7 +98,7 @@ arena_init_buffer :: proc(arena: ^Arena, buffer: []byte) -> (err: Allocator_Erro } // Allocates memory from the provided arena. -@(require_results) +@(require_results, no_sanitize_address) arena_alloc :: proc(arena: ^Arena, size: uint, alignment: uint, loc := #caller_location) -> (data: []byte, err: Allocator_Error) { assert(alignment & (alignment-1) == 0, "non-power of two alignment", loc) @@ -158,10 +162,13 @@ arena_alloc :: proc(arena: ^Arena, size: uint, alignment: uint, loc := #caller_l data, err = alloc_from_memory_block(arena.curr_block, size, alignment, default_commit_size=0) arena.total_used = arena.curr_block.used } + + sanitizer.address_unpoison(data) return } // Resets the memory of a Static or Buffer arena to a specific `position` (offset) and zeroes the previously used memory. +@(no_sanitize_address) arena_static_reset_to :: proc(arena: ^Arena, pos: uint, loc := #caller_location) -> bool { sync.mutex_guard(&arena.mutex) @@ -175,6 +182,7 @@ arena_static_reset_to :: proc(arena: ^Arena, pos: uint, loc := #caller_location) mem.zero_slice(arena.curr_block.base[arena.curr_block.used:][:prev_pos-pos]) } arena.total_used = arena.curr_block.used + sanitizer.address_poison(arena.curr_block.base[:arena.curr_block.committed]) return true } else if pos == 0 { arena.total_used = 0 @@ -184,6 +192,7 @@ arena_static_reset_to :: proc(arena: ^Arena, pos: uint, loc := #caller_location) } // Frees the last memory block of a Growing Arena +@(no_sanitize_address) arena_growing_free_last_memory_block :: proc(arena: ^Arena, loc := #caller_location) { if free_block := arena.curr_block; free_block != nil { assert(arena.kind == .Growing, "expected a .Growing arena", loc) @@ -191,11 +200,13 @@ arena_growing_free_last_memory_block :: proc(arena: ^Arena, loc := #caller_locat arena.total_reserved -= free_block.reserved arena.curr_block = free_block.prev + sanitizer.address_poison(free_block.base[:free_block.committed]) memory_block_dealloc(free_block) } } // Deallocates all but the first memory block of the arena and resets the allocator's usage to 0. +@(no_sanitize_address) arena_free_all :: proc(arena: ^Arena, loc := #caller_location) { switch arena.kind { case .Growing: @@ -208,7 +219,9 @@ arena_free_all :: proc(arena: ^Arena, loc := #caller_location) { if arena.curr_block != nil { curr_block_used := int(arena.curr_block.used) arena.curr_block.used = 0 + sanitizer.address_unpoison(arena.curr_block.base[:curr_block_used]) mem.zero(arena.curr_block.base, curr_block_used) + sanitizer.address_poison(arena.curr_block.base[:arena.curr_block.committed]) } arena.total_used = 0 case .Static, .Buffer: @@ -219,6 +232,7 @@ arena_free_all :: proc(arena: ^Arena, loc := #caller_location) { // Frees all of the memory allocated by the arena and zeros all of the values of an arena. // A buffer based arena does not `delete` the provided `[]byte` bufffer. +@(no_sanitize_address) arena_destroy :: proc(arena: ^Arena, loc := #caller_location) { sync.mutex_guard(&arena.mutex) switch arena.kind { @@ -250,7 +264,7 @@ arena_static_bootstrap_new :: proc{ } // Ability to bootstrap allocate a struct with an arena within the struct itself using the growing variant strategy. -@(require_results) +@(require_results, no_sanitize_address) arena_growing_bootstrap_new_by_offset :: proc($T: typeid, offset_to_arena: uintptr, minimum_block_size: uint = DEFAULT_ARENA_GROWING_MINIMUM_BLOCK_SIZE) -> (ptr: ^T, err: Allocator_Error) { bootstrap: Arena bootstrap.kind = .Growing @@ -266,13 +280,13 @@ arena_growing_bootstrap_new_by_offset :: proc($T: typeid, offset_to_arena: uintp } // Ability to bootstrap allocate a struct with an arena within the struct itself using the growing variant strategy. -@(require_results) +@(require_results, no_sanitize_address) arena_growing_bootstrap_new_by_name :: proc($T: typeid, $field_name: string, minimum_block_size: uint = DEFAULT_ARENA_GROWING_MINIMUM_BLOCK_SIZE) -> (ptr: ^T, err: Allocator_Error) { return arena_growing_bootstrap_new_by_offset(T, offset_of_by_string(T, field_name), minimum_block_size) } // Ability to bootstrap allocate a struct with an arena within the struct itself using the static variant strategy. -@(require_results) +@(require_results, no_sanitize_address) arena_static_bootstrap_new_by_offset :: proc($T: typeid, offset_to_arena: uintptr, reserved: uint) -> (ptr: ^T, err: Allocator_Error) { bootstrap: Arena bootstrap.kind = .Static @@ -288,19 +302,20 @@ arena_static_bootstrap_new_by_offset :: proc($T: typeid, offset_to_arena: uintpt } // Ability to bootstrap allocate a struct with an arena within the struct itself using the static variant strategy. -@(require_results) +@(require_results, no_sanitize_address) arena_static_bootstrap_new_by_name :: proc($T: typeid, $field_name: string, reserved: uint) -> (ptr: ^T, err: Allocator_Error) { return arena_static_bootstrap_new_by_offset(T, offset_of_by_string(T, field_name), reserved) } // Create an `Allocator` from the provided `Arena` -@(require_results) +@(require_results, no_sanitize_address) arena_allocator :: proc(arena: ^Arena) -> mem.Allocator { return mem.Allocator{arena_allocator_proc, arena} } // The allocator procedure used by an `Allocator` produced by `arena_allocator` +@(no_sanitize_address) arena_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode, size, alignment: int, old_memory: rawptr, old_size: int, @@ -334,6 +349,7 @@ arena_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode, if size < old_size { // shrink data in-place data = old_data[:size] + sanitizer.address_poison(old_data[size:old_size]) return } @@ -347,6 +363,7 @@ arena_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode, _ = alloc_from_memory_block(block, new_end - old_end, 1, default_commit_size=arena.default_commit_size) or_return arena.total_used += block.used - prev_used data = block.base[start:new_end] + sanitizer.address_unpoison(data) return } } @@ -357,6 +374,7 @@ arena_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode, return } copy(new_memory, old_data[:old_size]) + sanitizer.address_poison(old_data[:old_size]) return new_memory, nil case .Query_Features: set := (^mem.Allocator_Mode_Set)(old_memory) @@ -382,7 +400,7 @@ Arena_Temp :: struct { } // Begins the section of temporary arena memory. -@(require_results) +@(require_results, no_sanitize_address) arena_temp_begin :: proc(arena: ^Arena, loc := #caller_location) -> (temp: Arena_Temp) { assert(arena != nil, "nil arena", loc) sync.mutex_guard(&arena.mutex) @@ -397,6 +415,7 @@ arena_temp_begin :: proc(arena: ^Arena, loc := #caller_location) -> (temp: Arena } // Ends the section of temporary arena memory by resetting the memory to the stored position. +@(no_sanitize_address) arena_temp_end :: proc(temp: Arena_Temp, loc := #caller_location) { assert(temp.arena != nil, "nil arena", loc) arena := temp.arena @@ -432,6 +451,7 @@ arena_temp_end :: proc(temp: Arena_Temp, loc := #caller_location) { } // Ignore the use of a `arena_temp_begin` entirely by __not__ resetting to the stored position. +@(no_sanitize_address) arena_temp_ignore :: proc(temp: Arena_Temp, loc := #caller_location) { assert(temp.arena != nil, "nil arena", loc) arena := temp.arena @@ -442,6 +462,7 @@ arena_temp_ignore :: proc(temp: Arena_Temp, loc := #caller_location) { } // Asserts that all uses of `Arena_Temp` has been used by an `Arena` +@(no_sanitize_address) arena_check_temp :: proc(arena: ^Arena, loc := #caller_location) { assert(arena.temp_count == 0, "Arena_Temp not been ended", loc) } diff --git a/core/mem/virtual/virtual.odin b/core/mem/virtual/virtual.odin index 4afc33813..031fb721a 100644 --- a/core/mem/virtual/virtual.odin +++ b/core/mem/virtual/virtual.odin @@ -2,6 +2,7 @@ package mem_virtual import "core:mem" import "base:intrinsics" +import "base:sanitizer" import "base:runtime" _ :: runtime @@ -14,27 +15,33 @@ platform_memory_init :: proc() { Allocator_Error :: mem.Allocator_Error -@(require_results) +@(require_results, no_sanitize_address) reserve :: proc "contextless" (size: uint) -> (data: []byte, err: Allocator_Error) { return _reserve(size) } +@(no_sanitize_address) commit :: proc "contextless" (data: rawptr, size: uint) -> Allocator_Error { + sanitizer.address_unpoison(data, size) return _commit(data, size) } -@(require_results) +@(require_results, no_sanitize_address) reserve_and_commit :: proc "contextless" (size: uint) -> (data: []byte, err: Allocator_Error) { data = reserve(size) or_return commit(raw_data(data), size) or_return return } +@(no_sanitize_address) decommit :: proc "contextless" (data: rawptr, size: uint) { + sanitizer.address_poison(data, size) _decommit(data, size) } +@(no_sanitize_address) release :: proc "contextless" (data: rawptr, size: uint) { + sanitizer.address_unpoison(data, size) _release(data, size) } @@ -46,13 +53,11 @@ Protect_Flag :: enum u32 { Protect_Flags :: distinct bit_set[Protect_Flag; u32] Protect_No_Access :: Protect_Flags{} +@(no_sanitize_address) protect :: proc "contextless" (data: rawptr, size: uint, flags: Protect_Flags) -> bool { return _protect(data, size, flags) } - - - Memory_Block :: struct { prev: ^Memory_Block, base: [^]byte, @@ -66,13 +71,13 @@ Memory_Block_Flag :: enum u32 { Memory_Block_Flags :: distinct bit_set[Memory_Block_Flag; u32] -@(private="file", require_results) +@(private="file", require_results, no_sanitize_address) align_formula :: #force_inline proc "contextless" (size, align: uint) -> uint { result := size + align-1 return result - result%align } -@(require_results) +@(require_results, no_sanitize_address) memory_block_alloc :: proc(committed, reserved: uint, alignment: uint = 0, flags: Memory_Block_Flags = {}) -> (block: ^Memory_Block, err: Allocator_Error) { page_size := DEFAULT_PAGE_SIZE assert(mem.is_power_of_two(uintptr(page_size))) @@ -116,8 +121,9 @@ memory_block_alloc :: proc(committed, reserved: uint, alignment: uint = 0, flags return &pmblock.block, nil } -@(require_results) +@(require_results, no_sanitize_address) alloc_from_memory_block :: proc(block: ^Memory_Block, min_size, alignment: uint, default_commit_size: uint = 0) -> (data: []byte, err: Allocator_Error) { + @(no_sanitize_address) calc_alignment_offset :: proc "contextless" (block: ^Memory_Block, alignment: uintptr) -> uint { alignment_offset := uint(0) ptr := uintptr(block.base[block.used:]) @@ -128,6 +134,7 @@ alloc_from_memory_block :: proc(block: ^Memory_Block, min_size, alignment: uint, return alignment_offset } + @(no_sanitize_address) do_commit_if_necessary :: proc(block: ^Memory_Block, size: uint, default_commit_size: uint) -> (err: Allocator_Error) { if block.committed - block.used < size { pmblock := (^Platform_Memory_Block)(block) @@ -172,10 +179,12 @@ alloc_from_memory_block :: proc(block: ^Memory_Block, min_size, alignment: uint, data = block.base[block.used+alignment_offset:][:min_size] block.used += size + sanitizer.address_unpoison(data) return } +@(no_sanitize_address) memory_block_dealloc :: proc(block_to_free: ^Memory_Block) { if block := (^Platform_Memory_Block)(block_to_free); block != nil { platform_memory_free(block) diff --git a/core/mem/virtual/virtual_platform.odin b/core/mem/virtual/virtual_platform.odin index 31e9cfca8..c9dde4e9d 100644 --- a/core/mem/virtual/virtual_platform.odin +++ b/core/mem/virtual/virtual_platform.odin @@ -7,6 +7,7 @@ Platform_Memory_Block :: struct { reserved: uint, } +@(no_sanitize_address) platform_memory_alloc :: proc "contextless" (to_commit, to_reserve: uint) -> (block: ^Platform_Memory_Block, err: Allocator_Error) { to_commit, to_reserve := to_commit, to_reserve to_reserve = max(to_commit, to_reserve) @@ -26,12 +27,14 @@ platform_memory_alloc :: proc "contextless" (to_commit, to_reserve: uint) -> (bl } +@(no_sanitize_address) platform_memory_free :: proc "contextless" (block: ^Platform_Memory_Block) { if block != nil { release(block, block.reserved) } } +@(no_sanitize_address) platform_memory_commit :: proc "contextless" (block: ^Platform_Memory_Block, to_commit: uint) -> (err: Allocator_Error) { if to_commit < block.committed { return nil diff --git a/core/mem/virtual/virtual_windows.odin b/core/mem/virtual/virtual_windows.odin index acd30ae33..0da8498d5 100644 --- a/core/mem/virtual/virtual_windows.odin +++ b/core/mem/virtual/virtual_windows.odin @@ -83,6 +83,8 @@ foreign Kernel32 { dwNumberOfBytesToMap: uint, ) -> rawptr --- } + +@(no_sanitize_address) _reserve :: proc "contextless" (size: uint) -> (data: []byte, err: Allocator_Error) { result := VirtualAlloc(nil, size, MEM_RESERVE, PAGE_READWRITE) if result == nil { @@ -93,6 +95,7 @@ _reserve :: proc "contextless" (size: uint) -> (data: []byte, err: Allocator_Err return } +@(no_sanitize_address) _commit :: proc "contextless" (data: rawptr, size: uint) -> Allocator_Error { result := VirtualAlloc(data, size, MEM_COMMIT, PAGE_READWRITE) if result == nil { @@ -107,12 +110,18 @@ _commit :: proc "contextless" (data: rawptr, size: uint) -> Allocator_Error { } return nil } + +@(no_sanitize_address) _decommit :: proc "contextless" (data: rawptr, size: uint) { VirtualFree(data, size, MEM_DECOMMIT) } + +@(no_sanitize_address) _release :: proc "contextless" (data: rawptr, size: uint) { VirtualFree(data, 0, MEM_RELEASE) } + +@(no_sanitize_address) _protect :: proc "contextless" (data: rawptr, size: uint, flags: Protect_Flags) -> bool { pflags: u32 pflags = PAGE_NOACCESS @@ -136,7 +145,7 @@ _protect :: proc "contextless" (data: rawptr, size: uint, flags: Protect_Flags) } - +@(no_sanitize_address) _platform_memory_init :: proc() { sys_info: SYSTEM_INFO GetSystemInfo(&sys_info) @@ -147,6 +156,7 @@ _platform_memory_init :: proc() { } +@(no_sanitize_address) _map_file :: proc "contextless" (fd: uintptr, size: i64, flags: Map_File_Flags) -> (data: []byte, error: Map_File_Error) { page_flags: u32 if flags == {.Read} { diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp index 6177fcf6e..de6841ed8 100644 --- a/src/llvm_backend.hpp +++ b/src/llvm_backend.hpp @@ -383,6 +383,8 @@ struct lbProcedure { PtrMap selector_values; PtrMap selector_addr; PtrMap tuple_fix_map; + + Array asan_stack_locals; }; diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp index 421720c4c..dad5d4dd5 100644 --- a/src/llvm_backend_general.cpp +++ b/src/llvm_backend_general.cpp @@ -3070,6 +3070,13 @@ gb_internal lbAddr lb_add_local(lbProcedure *p, Type *type, Entity *e, bool zero if (e != nullptr) { lb_add_entity(p->module, e, val); lb_add_debug_local_variable(p, ptr, type, e->token); + + // NOTE(lucas): In LLVM 20 and below we do not have the option to have asan cleanup poisoned stack + // locals ourselves. So we need to manually track and unpoison these locals on proc return. + // LLVM 21 adds the 'use-after-scope' asan option which does this for us. + if (build_context.sanitizer_flags & SanitizerFlag_Address && !p->entity->Procedure.no_sanitize_address) { + array_add(&p->asan_stack_locals, val); + } } if (zero_init) { diff --git a/src/llvm_backend_proc.cpp b/src/llvm_backend_proc.cpp index 7bd8dea59..8f1619006 100644 --- a/src/llvm_backend_proc.cpp +++ b/src/llvm_backend_proc.cpp @@ -115,12 +115,13 @@ gb_internal lbProcedure *lb_create_procedure(lbModule *m, Entity *entity, bool i p->is_entry_point = false; gbAllocator a = heap_allocator(); - p->children.allocator = a; - p->defer_stmts.allocator = a; - p->blocks.allocator = a; - p->branch_blocks.allocator = a; - p->context_stack.allocator = a; - p->scope_stack.allocator = a; + p->children.allocator = a; + p->defer_stmts.allocator = a; + p->blocks.allocator = a; + p->branch_blocks.allocator = a; + p->context_stack.allocator = a; + p->scope_stack.allocator = a; + p->asan_stack_locals.allocator = a; // map_init(&p->selector_values, 0); // map_init(&p->selector_addr, 0); // map_init(&p->tuple_fix_map, 0); @@ -385,11 +386,12 @@ gb_internal lbProcedure *lb_create_dummy_procedure(lbModule *m, String link_name p->is_entry_point = false; gbAllocator a = permanent_allocator(); - p->children.allocator = a; - p->defer_stmts.allocator = a; - p->blocks.allocator = a; - p->branch_blocks.allocator = a; - p->context_stack.allocator = a; + p->children.allocator = a; + p->defer_stmts.allocator = a; + p->blocks.allocator = a; + p->branch_blocks.allocator = a; + p->context_stack.allocator = a; + p->asan_stack_locals.allocator = a; map_init(&p->tuple_fix_map, 0); diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp index 96a5d0db1..d5e3e4c75 100644 --- a/src/llvm_backend_stmt.cpp +++ b/src/llvm_backend_stmt.cpp @@ -2917,6 +2917,22 @@ gb_internal void lb_emit_defer_stmts(lbProcedure *p, lbDeferExitKind kind, lbBlo } defer (p->branch_location_pos = prev_token_pos); + // TODO(lucas): In LLVM 21 use the 'use-after-scope' asan option which does this for us. + #if LLVM_VERSION_MAJOR < 21 + if (kind == lbDeferExit_Return) { + for_array(i, p->asan_stack_locals) { + lbValue local = p->asan_stack_locals[i]; + + auto args = array_make(temporary_allocator(), 2); + args[0] = lb_emit_conv(p, local, t_rawptr); + args[1] = lb_const_int(p->module, t_int, type_size_of(local.type->Pointer.elem)); + lb_emit_runtime_call(p, "__asan_unpoison_memory_region", args); + } + } + #else + #error "Need to implement LLVM 21 'use-after-scope' asan option" + #endif + isize count = p->defer_stmts.count; isize i = count; while (i --> 0) { -- cgit v1.2.3 From 46e0c7ad74d0868d473dfd95a455dbe8a64bacbf Mon Sep 17 00:00:00 2001 From: Lucas Perlind Date: Wed, 7 May 2025 11:30:58 +1000 Subject: Cleanup --- base/runtime/internal.odin | 4 +--- src/llvm_backend_stmt.cpp | 20 ++++++++------------ 2 files changed, 9 insertions(+), 15 deletions(-) (limited to 'src/llvm_backend_stmt.cpp') diff --git a/base/runtime/internal.odin b/base/runtime/internal.odin index 5d8c5a4de..bff5b8380 100644 --- a/base/runtime/internal.odin +++ b/base/runtime/internal.odin @@ -1107,11 +1107,9 @@ __read_bits :: proc "contextless" (dst, src: [^]byte, offset: uintptr, size: uin } } -@(no_sanitize_address) -__asan_unpoison_memory_region :: #force_inline proc "contextless" (address: rawptr, size: uint) { +when .Address in ODIN_SANITIZER_FLAGS { foreign { __asan_unpoison_memory_region :: proc "system" (address: rawptr, size: uint) --- } - __asan_unpoison_memory_region(address, size) } diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp index d5e3e4c75..89737a454 100644 --- a/src/llvm_backend_stmt.cpp +++ b/src/llvm_backend_stmt.cpp @@ -2918,20 +2918,16 @@ gb_internal void lb_emit_defer_stmts(lbProcedure *p, lbDeferExitKind kind, lbBlo defer (p->branch_location_pos = prev_token_pos); // TODO(lucas): In LLVM 21 use the 'use-after-scope' asan option which does this for us. - #if LLVM_VERSION_MAJOR < 21 - if (kind == lbDeferExit_Return) { - for_array(i, p->asan_stack_locals) { - lbValue local = p->asan_stack_locals[i]; + if (kind == lbDeferExit_Return) { + for_array(i, p->asan_stack_locals) { + lbValue local = p->asan_stack_locals[i]; - auto args = array_make(temporary_allocator(), 2); - args[0] = lb_emit_conv(p, local, t_rawptr); - args[1] = lb_const_int(p->module, t_int, type_size_of(local.type->Pointer.elem)); - lb_emit_runtime_call(p, "__asan_unpoison_memory_region", args); - } + auto args = array_make(temporary_allocator(), 2); + args[0] = lb_emit_conv(p, local, t_rawptr); + args[1] = lb_const_int(p->module, t_int, type_size_of(local.type->Pointer.elem)); + lb_emit_runtime_call(p, "__asan_unpoison_memory_region", args); } - #else - #error "Need to implement LLVM 21 'use-after-scope' asan option" - #endif + } isize count = p->defer_stmts.count; isize i = count; -- cgit v1.2.3 From 2f636886a565c113840d8e0b81454a24c6f7b9a0 Mon Sep 17 00:00:00 2001 From: tf2spi Date: Thu, 15 May 2025 16:11:06 -0400 Subject: Add debug info for labels (#4385) * Emit label debug info w/o location * Insert debug label call * Slight refactor for later fix * Improve debug labels for block statements * Improve debug info with for loops * Generate label lbBlocks w/ debug * Lightly refactor lb_add_debug_label * Revise comments, add null check assertion * Use LLVM-C API for debug labels * Prefer C DILabel API for POSIX, fallback to CPP * Use version check for LLVM-C DILabel --- src/llvm_backend_debug.cpp | 56 ++++++++++++++++++++++++++++++++++++ src/llvm_backend_stmt.cpp | 72 ++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 123 insertions(+), 5 deletions(-) (limited to 'src/llvm_backend_stmt.cpp') diff --git a/src/llvm_backend_debug.cpp b/src/llvm_backend_debug.cpp index 53c007d8d..8339a021b 100644 --- a/src/llvm_backend_debug.cpp +++ b/src/llvm_backend_debug.cpp @@ -1295,3 +1295,59 @@ gb_internal void add_debug_info_for_global_constant_from_entity(lbGenerator *gen } } } + +gb_internal void lb_add_debug_label(lbProcedure *p, Ast *label, lbBlock *target) { +// NOTE(tf2spi): LLVM-C DILabel API used only existed for major versions 20+ +#if LLVM_VERSION_MAJOR >= 20 + if (p == nullptr || p->debug_info == nullptr) { + return; + } + if (target == nullptr || label == nullptr || label->kind != Ast_Label) { + return; + } + Token label_token = label->Label.token; + if (is_blank_ident(label_token.string)) { + return; + } + lbModule *m = p->module; + if (m == nullptr) { + return; + } + + AstFile *file = label->file(); + LLVMMetadataRef llvm_file = lb_get_llvm_metadata(m, file); + if (llvm_file == nullptr) { + debugf("llvm file not found for label\n"); + return; + } + LLVMMetadataRef llvm_scope = p->debug_info; + if(llvm_scope == nullptr) { + debugf("llvm scope not found for label\n"); + return; + } + LLVMMetadataRef llvm_debug_loc = lb_debug_location_from_token_pos(p, label_token.pos); + LLVMBasicBlockRef llvm_block = target->block; + if (llvm_block == nullptr || llvm_debug_loc == nullptr) { + return; + } + LLVMMetadataRef llvm_label = LLVMDIBuilderCreateLabel( + m->debug_builder, + llvm_scope, + (const char *)label_token.string.text, + (size_t)label_token.string.len, + llvm_file, + label_token.pos.line, + + // NOTE(tf2spi): Defaults to false in LLVM API, but I'd rather not take chances + // Always preserve the label no matter what when debugging + true + ); + GB_ASSERT(llvm_label != nullptr); + (void)LLVMDIBuilderInsertLabelAtEnd( + m->debug_builder, + llvm_label, + llvm_debug_loc, + llvm_block + ); +#endif +} diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp index 89737a454..44a78b036 100644 --- a/src/llvm_backend_stmt.cpp +++ b/src/llvm_backend_stmt.cpp @@ -136,7 +136,6 @@ gb_internal lbBranchBlocks lb_lookup_branch_blocks(lbProcedure *p, Ast *ident) { return empty; } - gb_internal lbTargetList *lb_push_target_list(lbProcedure *p, Ast *label, lbBlock *break_, lbBlock *continue_, lbBlock *fallthrough_) { lbTargetList *tl = gb_alloc_item(permanent_allocator(), lbTargetList); tl->prev = p->target_list; @@ -688,6 +687,18 @@ gb_internal void lb_build_range_interval(lbProcedure *p, AstBinaryExpr *node, lbBlock *body = lb_create_block(p, "for.interval.body"); lbBlock *done = lb_create_block(p, "for.interval.done"); + // TODO(tf2spi): This is inlined in more than several places. + // Putting this in a function might be preferred. + // LLVMSetCurrentDebugLocation2 has side effects, + // so I didn't want to hide that before it got reviewed. + if (rs->label != nullptr && p->debug_info != nullptr) { + lbBlock *label = lb_create_block(p, "for.interval.label"); + lb_emit_jump(p, label); + lb_start_block(p, label); + + LLVMSetCurrentDebugLocation2(p->builder, lb_debug_location_from_ast(p, rs->label)); + lb_add_debug_label(p, rs->label, label); + } lb_emit_jump(p, loop); lb_start_block(p, loop); @@ -893,6 +904,14 @@ gb_internal void lb_build_range_stmt_struct_soa(lbProcedure *p, AstRangeStmt *rs lbAddr index = lb_add_local_generated(p, t_int, false); + if (rs->label != nullptr && p->debug_info != nullptr) { + lbBlock *label = lb_create_block(p, "for.soa.label"); + lb_emit_jump(p, label); + lb_start_block(p, label); + + LLVMSetCurrentDebugLocation2(p->builder, lb_debug_location_from_ast(p, rs->label)); + lb_add_debug_label(p, rs->label, label); + } if (!is_reverse) { /* for x, i in array { @@ -970,7 +989,6 @@ gb_internal void lb_build_range_stmt_struct_soa(lbProcedure *p, AstRangeStmt *rs lb_store_range_stmt_val(p, val1, lb_addr_load(p, index)); } - lb_push_target_list(p, rs->label, done, loop, nullptr); lb_build_stmt(p, rs->body); @@ -1029,6 +1047,15 @@ gb_internal void lb_build_range_stmt(lbProcedure *p, AstRangeStmt *rs, Scope *sc lbBlock *done = nullptr; bool is_map = false; + if (rs->label != nullptr && p->debug_info != nullptr) { + lbBlock *label = lb_create_block(p, "for.range.label"); + lb_emit_jump(p, label); + lb_start_block(p, label); + + LLVMSetCurrentDebugLocation2(p->builder, lb_debug_location_from_ast(p, rs->label)); + lb_add_debug_label(p, rs->label, label); + } + if (tav.mode == Addressing_Type) { lb_build_range_enum(p, type_deref(tav.type), val0_type, &val, &key, &loop, &done); } else { @@ -1530,6 +1557,14 @@ gb_internal bool lb_switch_stmt_can_be_trivial_jump_table(AstSwitchStmt *ss, boo gb_internal void lb_build_switch_stmt(lbProcedure *p, AstSwitchStmt *ss, Scope *scope) { lb_open_scope(p, scope); + if (ss->label != nullptr && p->debug_info != nullptr) { + lbBlock *label = lb_create_block(p, "switch.label"); + lb_emit_jump(p, label); + lb_start_block(p, label); + + LLVMSetCurrentDebugLocation2(p->builder, lb_debug_location_from_ast(p, ss->label)); + lb_add_debug_label(p, ss->label, label); + } if (ss->init != nullptr) { lb_build_stmt(p, ss->init); } @@ -1736,6 +1771,7 @@ gb_internal lbAddr lb_store_range_stmt_val(lbProcedure *p, Ast *stmt_val, lbValu gb_internal void lb_type_case_body(lbProcedure *p, Ast *label, Ast *clause, lbBlock *body, lbBlock *done) { ast_node(cc, CaseClause, clause); + // NOTE(tf2spi): Debug info for label not generated here on purpose lb_push_target_list(p, label, done, nullptr, nullptr); lb_build_stmt_list(p, cc->stmts); lb_close_scope(p, lbDeferExit_Default, body, clause); @@ -2307,6 +2343,14 @@ gb_internal void lb_build_if_stmt(lbProcedure *p, Ast *node) { else_ = lb_create_block(p, "if.else"); } if (is->label != nullptr) { + if (p->debug_info != nullptr) { + lbBlock *label = lb_create_block(p, "if.label"); + lb_emit_jump(p, label); + lb_start_block(p, label); + + LLVMSetCurrentDebugLocation2(p->builder, lb_debug_location_from_ast(p, is->label)); + lb_add_debug_label(p, is->label, label); + } lbTargetList *tl = lb_push_target_list(p, is->label, done, nullptr, nullptr); tl->is_block = true; } @@ -2399,12 +2443,19 @@ gb_internal void lb_build_for_stmt(lbProcedure *p, Ast *node) { lb_push_target_list(p, fs->label, done, post, nullptr); + if (fs->label != nullptr && p->debug_info != nullptr) { + lbBlock *label = lb_create_block(p, "for.label"); + lb_emit_jump(p, label); + lb_start_block(p, label); + + LLVMSetCurrentDebugLocation2(p->builder, lb_debug_location_from_ast(p, fs->label)); + lb_add_debug_label(p, fs->label, label); + } if (fs->init != nullptr) { - #if 1 lbBlock *init = lb_create_block(p, "for.init"); lb_emit_jump(p, init); lb_start_block(p, init); - #endif + lb_build_stmt(p, fs->init); } @@ -2420,7 +2471,6 @@ gb_internal void lb_build_for_stmt(lbProcedure *p, Ast *node) { lb_start_block(p, body); } - lb_build_stmt(p, fs->body); lb_pop_target_list(p); @@ -2694,9 +2744,21 @@ gb_internal void lb_build_stmt(lbProcedure *p, Ast *node) { case_ast_node(bs, BlockStmt, node); + lbBlock *body = nullptr; lbBlock *done = nullptr; if (bs->label != nullptr) { + if (p->debug_info != nullptr) { + lbBlock *label = lb_create_block(p, "block.label"); + lb_emit_jump(p, label); + lb_start_block(p, label); + + LLVMSetCurrentDebugLocation2(p->builder, lb_debug_location_from_ast(p, bs->label)); + lb_add_debug_label(p, bs->label, label); + } + body = lb_create_block(p, "block.body"); done = lb_create_block(p, "block.done"); + lb_emit_jump(p, body); + lb_start_block(p, body); lbTargetList *tl = lb_push_target_list(p, bs->label, done, nullptr, nullptr); tl->is_block = true; } -- cgit v1.2.3 From c35a45e823401a1d7a15f11c6fb07e4fe9e6007a Mon Sep 17 00:00:00 2001 From: Laytan Laats Date: Sat, 17 May 2025 16:28:34 +0200 Subject: fix global and static any Fixes #4627 --- src/llvm_backend.cpp | 25 ++++-------------- src/llvm_backend_general.cpp | 43 ++++++++++++++++-------------- src/llvm_backend_stmt.cpp | 52 ++++++++++++++++++++++--------------- tests/internal/test_global_any.odin | 40 ++++++++++++++++++++++++++++ 4 files changed, 100 insertions(+), 60 deletions(-) create mode 100644 tests/internal/test_global_any.odin (limited to 'src/llvm_backend_stmt.cpp') diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index 7de147058..361a0c46b 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -1973,14 +1973,14 @@ gb_internal lbProcedure *lb_create_startup_runtime(lbModule *main_module, lbProc gbString var_name = gb_string_make(permanent_allocator(), "__$global_any::"); gbString e_str = string_canonical_entity_name(temporary_allocator(), e); var_name = gb_string_append_length(var_name, e_str, gb_strlen(e_str)); - lbAddr g = lb_add_global_generated_with_name(main_module, var_type, var.init, make_string_c(var_name)); + lbAddr g = lb_add_global_generated_with_name(main_module, var_type, {}, make_string_c(var_name)); lb_addr_store(p, g, var.init); lbValue gp = lb_addr_get_ptr(p, g); lbValue data = lb_emit_struct_ep(p, var.var, 0); lbValue ti = lb_emit_struct_ep(p, var.var, 1); lb_emit_store(p, data, lb_emit_conv(p, gp, t_rawptr)); - lb_emit_store(p, ti, lb_type_info(p, var_type)); + lb_emit_store(p, ti, lb_typeid(p->module, var_type)); } else { LLVMTypeRef vt = llvm_addr_type(p->module, var.var); lbValue src0 = lb_emit_conv(p, var.init, t); @@ -3194,24 +3194,9 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { lbValue g = {}; g.value = LLVMAddGlobal(m->mod, lb_type(m, e->type), alloc_cstring(permanent_allocator(), name)); g.type = alloc_type_pointer(e->type); - if (e->Variable.thread_local_model != "") { - LLVMSetThreadLocal(g.value, true); - - String m = e->Variable.thread_local_model; - LLVMThreadLocalMode mode = LLVMGeneralDynamicTLSModel; - if (m == "default") { - mode = LLVMGeneralDynamicTLSModel; - } else if (m == "localdynamic") { - mode = LLVMLocalDynamicTLSModel; - } else if (m == "initialexec") { - mode = LLVMInitialExecTLSModel; - } else if (m == "localexec") { - mode = LLVMLocalExecTLSModel; - } else { - GB_PANIC("Unhandled thread local mode %.*s", LIT(m)); - } - LLVMSetThreadLocalMode(g.value, mode); - } + + lb_apply_thread_local_model(g.value, e->Variable.thread_local_model); + if (is_foreign) { LLVMSetLinkage(g.value, LLVMExternalLinkage); LLVMSetDLLStorageClass(g.value, LLVMDLLImportStorageClass); diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp index 504c8234e..85a165de4 100644 --- a/src/llvm_backend_general.cpp +++ b/src/llvm_backend_general.cpp @@ -2387,6 +2387,29 @@ gb_internal void lb_add_attribute_to_proc_with_string(lbModule *m, LLVMValueRef } +gb_internal bool lb_apply_thread_local_model(LLVMValueRef value, String model) { + if (model != "") { + LLVMSetThreadLocal(value, true); + + LLVMThreadLocalMode mode = LLVMGeneralDynamicTLSModel; + if (model == "default") { + mode = LLVMGeneralDynamicTLSModel; + } else if (model == "localdynamic") { + mode = LLVMLocalDynamicTLSModel; + } else if (model == "initialexec") { + mode = LLVMInitialExecTLSModel; + } else if (model == "localexec") { + mode = LLVMLocalExecTLSModel; + } else { + GB_PANIC("Unhandled thread local mode %.*s", LIT(model)); + } + LLVMSetThreadLocalMode(value, mode); + return true; + } + + return false; +} + gb_internal void lb_add_edge(lbBlock *from, lbBlock *to) { LLVMValueRef instr = LLVMGetLastInstruction(from->block); @@ -2990,25 +3013,7 @@ gb_internal lbValue lb_find_value_from_entity(lbModule *m, Entity *e) { lb_set_entity_from_other_modules_linkage_correctly(other_module, e, name); - if (e->Variable.thread_local_model != "") { - LLVMSetThreadLocal(g.value, true); - - String m = e->Variable.thread_local_model; - LLVMThreadLocalMode mode = LLVMGeneralDynamicTLSModel; - if (m == "default") { - mode = LLVMGeneralDynamicTLSModel; - } else if (m == "localdynamic") { - mode = LLVMLocalDynamicTLSModel; - } else if (m == "initialexec") { - mode = LLVMInitialExecTLSModel; - } else if (m == "localexec") { - mode = LLVMLocalExecTLSModel; - } else { - GB_PANIC("Unhandled thread local mode %.*s", LIT(m)); - } - LLVMSetThreadLocalMode(g.value, mode); - } - + lb_apply_thread_local_model(g.value, e->Variable.thread_local_model); return g; } diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp index 44a78b036..9b5b14626 100644 --- a/src/llvm_backend_stmt.cpp +++ b/src/llvm_backend_stmt.cpp @@ -2022,33 +2022,43 @@ gb_internal void lb_build_static_variables(lbProcedure *p, AstValueDecl *vd) { LLVMValueRef global = LLVMAddGlobal(p->module->mod, lb_type(p->module, e->type), c_name); LLVMSetAlignment(global, cast(u32)type_align_of(e->type)); LLVMSetInitializer(global, LLVMConstNull(lb_type(p->module, e->type))); - if (value.value != nullptr) { - LLVMSetInitializer(global, value.value); - } + if (e->Variable.is_rodata) { LLVMSetGlobalConstant(global, true); } - if (e->Variable.thread_local_model != "") { - LLVMSetThreadLocal(global, true); - - String m = e->Variable.thread_local_model; - LLVMThreadLocalMode mode = LLVMGeneralDynamicTLSModel; - if (m == "default") { - mode = LLVMGeneralDynamicTLSModel; - } else if (m == "localdynamic") { - mode = LLVMLocalDynamicTLSModel; - } else if (m == "initialexec") { - mode = LLVMInitialExecTLSModel; - } else if (m == "localexec") { - mode = LLVMLocalExecTLSModel; - } else { - GB_PANIC("Unhandled thread local mode %.*s", LIT(m)); - } - LLVMSetThreadLocalMode(global, mode); - } else { + + if (!lb_apply_thread_local_model(global, e->Variable.thread_local_model)) { LLVMSetLinkage(global, LLVMInternalLinkage); } + if (value.value != nullptr) { + if (is_type_any(e->type)) { + Type *var_type = default_type(value.type); + + gbString var_name = gb_string_make(temporary_allocator(), "__$static_any::"); + var_name = gb_string_append_length(var_name, mangled_name.text, mangled_name.len); + + lbAddr var_global = lb_add_global_generated_with_name(p->module, var_type, value, make_string_c(var_name), nullptr); + LLVMValueRef var_global_ref = var_global.addr.value; + + if (e->Variable.is_rodata) { + LLVMSetGlobalConstant(var_global_ref, true); + } + + if (!lb_apply_thread_local_model(var_global_ref, e->Variable.thread_local_model)) { + LLVMSetLinkage(var_global_ref, LLVMInternalLinkage); + } + + LLVMValueRef vals[2] = { + lb_emit_conv(p, var_global.addr, t_rawptr).value, + lb_typeid(p->module, var_type).value, + }; + LLVMValueRef init = llvm_const_named_struct(p->module, e->type, vals, gb_count_of(vals)); + LLVMSetInitializer(global, init); + } else { + LLVMSetInitializer(global, value.value); + } + } lbValue global_val = {global, alloc_type_pointer(e->type)}; lb_add_entity(p->module, e, global_val); diff --git a/tests/internal/test_global_any.odin b/tests/internal/test_global_any.odin new file mode 100644 index 000000000..73b70e0a4 --- /dev/null +++ b/tests/internal/test_global_any.odin @@ -0,0 +1,40 @@ +package test_internal + +@(private="file") +global_any_from_proc: any = from_proc() + +from_proc :: proc() -> f32 { + return 1.1 +} + +@(private="file") +global_any: any = 1 + +import "core:testing" + +@(test) +test_global_any :: proc(t: ^testing.T) { + as_f32, is_f32 := global_any_from_proc.(f32) + testing.expect(t, is_f32 == true) + testing.expect(t, as_f32 == 1.1) + + as_int, is_int := global_any.(int) + testing.expect(t, is_int == true) + testing.expect(t, as_int == 1) +} + +@(test) +test_static_any :: proc(t: ^testing.T) { + @(static) + var: any = 3 + + as_int, is_int := var.(int) + testing.expect(t, is_int == true) + testing.expect(t, as_int == 3) + + var = f32(1.1) + + as_f32, is_f32 := var.(f32) + testing.expect(t, is_f32 == true) + testing.expect(t, as_f32 == 1.1) +} -- cgit v1.2.3 From f94fc992d788696a4bc903a3f179307a28accbb9 Mon Sep 17 00:00:00 2001 From: Laytan Laats Date: Mon, 2 Jun 2025 16:53:18 +0200 Subject: fix swizzle in for in statement Fixes #1730 --- src/llvm_backend_general.cpp | 9 ++++++--- src/llvm_backend_stmt.cpp | 18 +++++++++++++++--- tests/issues/run.bat | 1 + tests/issues/run.sh | 1 + tests/issues/test_issue_1730.odin | 21 +++++++++++++++++++++ 5 files changed, 44 insertions(+), 6 deletions(-) create mode 100644 tests/issues/test_issue_1730.odin (limited to 'src/llvm_backend_stmt.cpp') diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp index 3a099ec55..5aaa7f63a 100644 --- a/src/llvm_backend_general.cpp +++ b/src/llvm_backend_general.cpp @@ -546,8 +546,11 @@ gb_internal lbValue lb_addr_get_ptr(lbProcedure *p, lbAddr const &addr) { break; case lbAddr_Swizzle: + GB_PANIC("lbAddr_Swizzle should be handled elsewhere"); + break; + case lbAddr_SwizzleLarge: - // TOOD(bill): is this good enough logic? + GB_PANIC("lbAddr_SwizzleLarge should be handled elsewhere"); break; } @@ -922,7 +925,7 @@ gb_internal void lb_addr_store(lbProcedure *p, lbAddr addr, lbValue value) { GB_ASSERT(value.value != nullptr); value = lb_emit_conv(p, value, lb_addr_type(addr)); - lbValue dst = lb_addr_get_ptr(p, addr); + lbValue dst = addr.addr; lbValue src = lb_address_from_load_or_generate_local(p, value); { lbValue src_ptrs[4] = {}; @@ -948,7 +951,7 @@ gb_internal void lb_addr_store(lbProcedure *p, lbAddr addr, lbValue value) { GB_ASSERT(value.value != nullptr); value = lb_emit_conv(p, value, lb_addr_type(addr)); - lbValue dst = lb_addr_get_ptr(p, addr); + lbValue dst = addr.addr; lbValue src = lb_address_from_load_or_generate_local(p, value); for_array(i, addr.swizzle_large.indices) { lbValue src_ptr = lb_emit_array_epi(p, src, i); diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp index 9b5b14626..027837f3f 100644 --- a/src/llvm_backend_stmt.cpp +++ b/src/llvm_backend_stmt.cpp @@ -1072,10 +1072,22 @@ gb_internal void lb_build_range_stmt(lbProcedure *p, AstRangeStmt *rs, Scope *sc break; } case Type_Array: { - lbValue array = lb_build_addr_ptr(p, expr); - if (is_type_pointer(type_deref(array.type))) { - array = lb_emit_load(p, array); + lbValue array; + lbAddr addr = lb_build_addr(p, expr); + switch (addr.kind) { + case lbAddr_Swizzle: + case lbAddr_SwizzleLarge: + // NOTE(laytan): apply the swizzle. + array = lb_address_from_load(p, lb_addr_load(p, addr)); + break; + default: + array = lb_addr_get_ptr(p, addr); + if (is_type_pointer(type_deref(array.type))) { + array = lb_emit_load(p, array); + } + break; } + lbAddr count_ptr = lb_add_local_generated(p, t_int, false); lb_addr_store(p, count_ptr, lb_const_int(p->module, t_int, et->Array.count)); lb_build_range_indexed(p, array, val0_type, count_ptr.addr, &val, &key, &loop, &done, rs->reverse); diff --git a/tests/issues/run.bat b/tests/issues/run.bat index db941b55a..156b1754e 100644 --- a/tests/issues/run.bat +++ b/tests/issues/run.bat @@ -9,6 +9,7 @@ set COMMON=-define:ODIN_TEST_FANCY=false -file -vet -strict-style ..\..\..\odin test ..\test_issue_829.odin %COMMON% || exit /b ..\..\..\odin test ..\test_issue_1592.odin %COMMON% || exit /b +..\..\..\odin test ..\test_issue_1730.odin %COMMON% || exit /b ..\..\..\odin test ..\test_issue_2056.odin %COMMON% || exit /b ..\..\..\odin build ..\test_issue_2113.odin %COMMON% -debug || exit /b ..\..\..\odin test ..\test_issue_2466.odin %COMMON% || exit /b diff --git a/tests/issues/run.sh b/tests/issues/run.sh index db0864c3e..cd70f6401 100755 --- a/tests/issues/run.sh +++ b/tests/issues/run.sh @@ -10,6 +10,7 @@ set -x $ODIN test ../test_issue_829.odin $COMMON $ODIN test ../test_issue_1592.odin $COMMON +$ODIN test ../test_issue_1730.odin $COMMON $ODIN test ../test_issue_2056.odin $COMMON $ODIN build ../test_issue_2113.odin $COMMON -debug $ODIN test ../test_issue_2466.odin $COMMON diff --git a/tests/issues/test_issue_1730.odin b/tests/issues/test_issue_1730.odin new file mode 100644 index 000000000..c1c5c4cae --- /dev/null +++ b/tests/issues/test_issue_1730.odin @@ -0,0 +1,21 @@ +package test_issues + +import "core:testing" + +// Tests issue #1730 https://github.com/odin-lang/Odin/issues/1730 +@(test) +test_issue_1730 :: proc(t: ^testing.T) { + ll := [4]int{1, 2, 3, 4} + for l, i in ll.yz { + testing.expect(t, i <= 1) + if i == 0 { + testing.expect_value(t, l, 2) + } else if i == 1 { + testing.expect_value(t, l, 3) + } + } + + out: [4]int + out.yz = ll.yz + testing.expect_value(t, out, [4]int{0, 2, 3, 0}) +} -- cgit v1.2.3