diff options
| author | Feoramund <161657516+Feoramund@users.noreply.github.com> | 2025-06-15 14:29:58 -0400 |
|---|---|---|
| committer | Feoramund <161657516+Feoramund@users.noreply.github.com> | 2025-06-15 15:06:28 -0400 |
| commit | efba9a1fb72040d527446cd262bdb59894d091c6 (patch) | |
| tree | 13525f69ab76652dddc8fd408db30e2505e84540 /base | |
| parent | 0b2cf9a4ca7e84fb7920a8d7b0d58b7d61f24a10 (diff) | |
Disable usage of AddressSanitizer pending a per-allocator review
It has been discovered that AddressSanitizer does not keep a 1:1 mapping
of which bytes are poisoned and which are not. This can cause issues for
allocations less than 8 bytes and where addresses straddle 8-byte
boundaries.
See the following link for more information:
https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm#mapping
Diffstat (limited to 'base')
| -rw-r--r-- | base/runtime/default_temp_allocator_arena.odin | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/base/runtime/default_temp_allocator_arena.odin b/base/runtime/default_temp_allocator_arena.odin index 74994344a..ca144b66f 100644 --- a/base/runtime/default_temp_allocator_arena.odin +++ b/base/runtime/default_temp_allocator_arena.odin @@ -1,7 +1,7 @@ package runtime import "base:intrinsics" -import "base:sanitizer" +// import "base:sanitizer" DEFAULT_ARENA_GROWING_MINIMUM_BLOCK_SIZE :: uint(DEFAULT_TEMP_ALLOCATOR_BACKING_SIZE) @@ -44,7 +44,7 @@ memory_block_alloc :: proc(allocator: Allocator, capacity: uint, alignment: uint block.base = ([^]byte)(uintptr(block) + base_offset) block.capacity = uint(end - uintptr(block.base)) - sanitizer.address_poison(block.base, block.capacity) + // sanitizer.address_poison(block.base, block.capacity) // Should be zeroed assert(block.used == 0) @@ -55,7 +55,7 @@ memory_block_alloc :: proc(allocator: Allocator, capacity: uint, alignment: uint memory_block_dealloc :: proc(block_to_free: ^Memory_Block, loc := #caller_location) { if block_to_free != nil { allocator := block_to_free.allocator - sanitizer.address_unpoison(block_to_free.base, block_to_free.capacity) + // sanitizer.address_unpoison(block_to_free.base, block_to_free.capacity) mem_free(block_to_free, allocator, loc) } } @@ -87,7 +87,7 @@ alloc_from_memory_block :: proc(block: ^Memory_Block, min_size, alignment: uint) return } data = block.base[block.used+alignment_offset:][:min_size] - sanitizer.address_unpoison(block.base[block.used:block.used+size]) + // sanitizer.address_unpoison(block.base[block.used:block.used+size]) block.used += size return } @@ -167,7 +167,7 @@ arena_free_all :: proc(arena: ^Arena, loc := #caller_location) { if arena.curr_block != nil { intrinsics.mem_zero(arena.curr_block.base, arena.curr_block.used) arena.curr_block.used = 0 - sanitizer.address_poison(arena.curr_block.base, arena.curr_block.capacity) + // sanitizer.address_poison(arena.curr_block.base, arena.curr_block.capacity) } arena.total_used = 0 } @@ -232,7 +232,7 @@ arena_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, // grow data in-place, adjusting next allocation block.used = uint(new_end) data = block.base[start:new_end] - sanitizer.address_unpoison(data) + // sanitizer.address_unpoison(data) return } } @@ -306,7 +306,7 @@ arena_temp_end :: proc(temp: Arena_Temp, loc := #caller_location) { assert(block.used >= temp.used, "out of order use of arena_temp_end", loc) amount_to_zero := block.used-temp.used intrinsics.mem_zero(block.base[temp.used:], amount_to_zero) - sanitizer.address_poison(block.base[temp.used:block.capacity]) + // sanitizer.address_poison(block.base[temp.used:block.capacity]) block.used = temp.used arena.total_used -= amount_to_zero } |