aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeroen van Rijn <Kelimion@users.noreply.github.com>2025-06-19 18:35:17 +0200
committerGitHub <noreply@github.com>2025-06-19 18:35:17 +0200
commit69c0fe83054eb65ec84bd57a92d2dd7ee519fbd5 (patch)
tree8f5693003679d4163011e059e4e3148646069faa
parent0a45d4de0cf24451f8acb3d120a0aced655aa046 (diff)
parent7526549e5106dbdd043c6057833a6351d1693afc (diff)
Merge pull request #5344 from Feoramund/fix-2694
Review `core/mem/allocators.odin`
-rw-r--r--base/runtime/core_builtin.odin4
-rw-r--r--base/runtime/default_temp_allocator_arena.odin14
-rw-r--r--core/mem/allocators.odin570
-rw-r--r--core/mem/rollback_stack_allocator.odin8
-rw-r--r--core/mem/tlsf/tlsf_internal.odin10
-rw-r--r--core/mem/virtual/arena.odin24
-rw-r--r--core/mem/virtual/virtual.odin10
-rw-r--r--tests/core/mem/test_core_mem.odin108
-rw-r--r--tests/issues/run.bat1
-rwxr-xr-xtests/issues/run.sh1
-rw-r--r--tests/issues/test_issue_2694.odin42
11 files changed, 534 insertions, 258 deletions
diff --git a/base/runtime/core_builtin.odin b/base/runtime/core_builtin.odin
index bc201b6e1..e2ba14f3a 100644
--- a/base/runtime/core_builtin.odin
+++ b/base/runtime/core_builtin.odin
@@ -67,7 +67,7 @@ init_global_temporary_allocator :: proc(size: int, backup_allocator := context.a
// Prefer the procedure group `copy`.
@builtin
copy_slice :: proc "contextless" (dst, src: $T/[]$E) -> int {
- n := max(0, min(len(dst), len(src)))
+ n := min(len(dst), len(src))
if n > 0 {
intrinsics.mem_copy(raw_data(dst), raw_data(src), n*size_of(E))
}
@@ -80,7 +80,7 @@ copy_slice :: proc "contextless" (dst, src: $T/[]$E) -> int {
// Prefer the procedure group `copy`.
@builtin
copy_from_string :: proc "contextless" (dst: $T/[]$E/u8, src: $S/string) -> int {
- n := max(0, min(len(dst), len(src)))
+ n := min(len(dst), len(src))
if n > 0 {
intrinsics.mem_copy(raw_data(dst), raw_data(src), n)
}
diff --git a/base/runtime/default_temp_allocator_arena.odin b/base/runtime/default_temp_allocator_arena.odin
index 74994344a..ca144b66f 100644
--- a/base/runtime/default_temp_allocator_arena.odin
+++ b/base/runtime/default_temp_allocator_arena.odin
@@ -1,7 +1,7 @@
package runtime
import "base:intrinsics"
-import "base:sanitizer"
+// import "base:sanitizer"
DEFAULT_ARENA_GROWING_MINIMUM_BLOCK_SIZE :: uint(DEFAULT_TEMP_ALLOCATOR_BACKING_SIZE)
@@ -44,7 +44,7 @@ memory_block_alloc :: proc(allocator: Allocator, capacity: uint, alignment: uint
block.base = ([^]byte)(uintptr(block) + base_offset)
block.capacity = uint(end - uintptr(block.base))
- sanitizer.address_poison(block.base, block.capacity)
+ // sanitizer.address_poison(block.base, block.capacity)
// Should be zeroed
assert(block.used == 0)
@@ -55,7 +55,7 @@ memory_block_alloc :: proc(allocator: Allocator, capacity: uint, alignment: uint
memory_block_dealloc :: proc(block_to_free: ^Memory_Block, loc := #caller_location) {
if block_to_free != nil {
allocator := block_to_free.allocator
- sanitizer.address_unpoison(block_to_free.base, block_to_free.capacity)
+ // sanitizer.address_unpoison(block_to_free.base, block_to_free.capacity)
mem_free(block_to_free, allocator, loc)
}
}
@@ -87,7 +87,7 @@ alloc_from_memory_block :: proc(block: ^Memory_Block, min_size, alignment: uint)
return
}
data = block.base[block.used+alignment_offset:][:min_size]
- sanitizer.address_unpoison(block.base[block.used:block.used+size])
+ // sanitizer.address_unpoison(block.base[block.used:block.used+size])
block.used += size
return
}
@@ -167,7 +167,7 @@ arena_free_all :: proc(arena: ^Arena, loc := #caller_location) {
if arena.curr_block != nil {
intrinsics.mem_zero(arena.curr_block.base, arena.curr_block.used)
arena.curr_block.used = 0
- sanitizer.address_poison(arena.curr_block.base, arena.curr_block.capacity)
+ // sanitizer.address_poison(arena.curr_block.base, arena.curr_block.capacity)
}
arena.total_used = 0
}
@@ -232,7 +232,7 @@ arena_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
// grow data in-place, adjusting next allocation
block.used = uint(new_end)
data = block.base[start:new_end]
- sanitizer.address_unpoison(data)
+ // sanitizer.address_unpoison(data)
return
}
}
@@ -306,7 +306,7 @@ arena_temp_end :: proc(temp: Arena_Temp, loc := #caller_location) {
assert(block.used >= temp.used, "out of order use of arena_temp_end", loc)
amount_to_zero := block.used-temp.used
intrinsics.mem_zero(block.base[temp.used:], amount_to_zero)
- sanitizer.address_poison(block.base[temp.used:block.capacity])
+ // sanitizer.address_poison(block.base[temp.used:block.capacity])
block.used = temp.used
arena.total_used -= amount_to_zero
}
diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin
index 39665d341..21e69c463 100644
--- a/core/mem/allocators.odin
+++ b/core/mem/allocators.odin
@@ -2,7 +2,57 @@ package mem
import "base:intrinsics"
import "base:runtime"
-import "base:sanitizer"
+
+// NOTE(Feoramund): Sanitizer usage in this package has been temporarily
+// disabled pending a thorough review per allocator, as ASan is particular
+// about the addresses and ranges it receives.
+//
+// In short, it keeps track only of 8-byte blocks. This can cause issues if an
+// allocator poisons an entire range but an allocation for less than 8 bytes is
+// desired or if the next allocation address would not be 8-byte aligned.
+//
+// This must be handled carefully on a per-allocator basis and some allocators
+// may not be able to participate.
+//
+// Please see the following link for more information:
+//
+// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm#mapping
+//
+// import "base:sanitizer"
+
+
+/*
+This procedure checks if a byte slice `range` is poisoned and makes sure the
+root address of the poison range is the base pointer of `range`.
+
+This can help guard against buggy allocators returning memory that they already returned.
+
+This has no effect if `-sanitize:address` is not enabled.
+*/
+// @(disabled=.Address not_in ODIN_SANITIZER_FLAGS, private)
+// ensure_poisoned :: proc(range: []u8, loc := #caller_location) {
+// cond := sanitizer.address_region_is_poisoned(range) == raw_data(range)
+// // If this fails, we've overlapped an allocation and it's our fault.
+// ensure(cond, `This allocator has sliced a block of memory of which some part is not poisoned before returning.
+// This is a bug in the core library and should be reported to the Odin developers with a stack trace and minimal example code if possible.`, loc)
+// }
+
+/*
+This procedure checks if a byte slice `range` is not poisoned.
+
+This can help guard against buggy allocators resizing memory that they should not.
+
+This has no effect if `-sanitize:address` is not enabled.
+*/
+// @(disabled=.Address not_in ODIN_SANITIZER_FLAGS, private)
+// ensure_not_poisoned :: proc(range: []u8, loc := #caller_location) {
+// cond := sanitizer.address_region_is_poisoned(range) == nil
+// // If this fails, we've tried to resize memory that is poisoned, which
+// // could be user error caused by an incorrect `old_memory` pointer.
+// ensure(cond, `This allocator has sliced a block of memory of which some part is poisoned before returning.
+// This may be a bug in the core library, or it could be user error due to an invalid pointer passed to a resize operation.
+// If after ensuring your own code is not responsible, report the problem to the Odin developers with a stack trace and minimal example code if possible.`, loc)
+// }
/*
Nil allocator.
@@ -108,11 +158,10 @@ The arena allocator (also known as a linear allocator, bump allocator,
region allocator) is an allocator that uses a single backing buffer for
allocations.
-The buffer is being used contiguously, from start by end. Each subsequent
-allocation occupies the next adjacent region of memory in the buffer. Since
-arena allocator does not keep track of any metadata associated with the
-allocations and their locations, it is impossible to free individual
-allocations.
+The buffer is used contiguously, from start to end. Each subsequent allocation
+occupies the next adjacent region of memory in the buffer. Since the arena
+allocator does not keep track of any metadata associated with the allocations
+and their locations, it is impossible to free individual allocations.
The arena allocator can be used for temporary allocations in frame-based memory
management. Games are one example of such applications. A global arena can be
@@ -131,7 +180,7 @@ arena_allocator :: proc(arena: ^Arena) -> Allocator {
/*
Initialize an arena.
-This procedure initializes the arena `a` with memory region `data` as it's
+This procedure initializes the arena `a` with memory region `data` as its
backing buffer.
*/
arena_init :: proc(a: ^Arena, data: []byte) {
@@ -139,7 +188,7 @@ arena_init :: proc(a: ^Arena, data: []byte) {
a.offset = 0
a.peak_used = 0
a.temp_count = 0
- sanitizer.address_poison(a.data)
+ // sanitizer.address_poison(a.data)
}
/*
@@ -216,7 +265,7 @@ arena_alloc_bytes_non_zeroed :: proc(
loc := #caller_location
) -> ([]byte, Allocator_Error) {
if a.data == nil {
- panic("Arena is not initialized", loc)
+ panic("Allocation on uninitialized Arena allocator.", loc)
}
#no_bounds_check end := &a.data[a.offset]
ptr := align_forward(end, uintptr(alignment))
@@ -227,16 +276,17 @@ arena_alloc_bytes_non_zeroed :: proc(
a.offset += total_size
a.peak_used = max(a.peak_used, a.offset)
result := byte_slice(ptr, size)
- sanitizer.address_unpoison(result)
+ // ensure_poisoned(result)
+ // sanitizer.address_unpoison(result)
return result, nil
}
/*
-Free all memory to an arena.
+Free all memory back to the arena allocator.
*/
arena_free_all :: proc(a: ^Arena) {
a.offset = 0
- sanitizer.address_poison(a.data)
+ // sanitizer.address_poison(a.data)
}
arena_allocator_proc :: proc(
@@ -275,12 +325,12 @@ arena_allocator_proc :: proc(
}
/*
-Temporary memory region of arena.
+Temporary memory region of an `Arena` allocator.
-Temporary memory regions of arena act as "savepoints" for arena. When one is
-created, the subsequent allocations are done inside the temporary memory
-region. When `end_arena_temp_memory` is called, the arena is rolled back, and
-all of the memory that was allocated from the arena will be freed.
+Temporary memory regions of an arena act as "save-points" for the allocator.
+When one is created, the subsequent allocations are done inside the temporary
+memory region. When `end_arena_temp_memory` is called, the arena is rolled
+back, and all of the memory that was allocated from the arena will be freed.
Multiple temporary memory regions can exist at the same time for an arena.
*/
@@ -314,7 +364,7 @@ allocations *inside* the temporary memory region will be freed to the arena.
end_arena_temp_memory :: proc(tmp: Arena_Temp_Memory) {
assert(tmp.arena.offset >= tmp.prev_offset)
assert(tmp.arena.temp_count > 0)
- sanitizer.address_poison(tmp.arena.data[tmp.prev_offset:tmp.arena.offset])
+ // sanitizer.address_poison(tmp.arena.data[tmp.prev_offset:tmp.arena.offset])
tmp.arena.offset = tmp.prev_offset
tmp.arena.temp_count -= 1
}
@@ -328,29 +378,37 @@ scratch_allocator_destroy :: scratch_destroy
Scratch allocator data.
*/
Scratch :: struct {
- data: []byte,
- curr_offset: int,
- prev_allocation: rawptr,
- backup_allocator: Allocator,
- leaked_allocations: [dynamic][]byte,
+ data: []byte,
+ curr_offset: int,
+ prev_allocation: rawptr,
+ prev_allocation_root: rawptr,
+ backup_allocator: Allocator,
+ leaked_allocations: [dynamic][]byte,
}
/*
Scratch allocator.
The scratch allocator works in a similar way to the `Arena` allocator. The
-scratch allocator has a backing buffer, that is being allocated in
-contiguous regions, from start to end.
+scratch allocator has a backing buffer that is allocated in contiguous regions,
+from start to end.
Each subsequent allocation will be the next adjacent region of memory in the
backing buffer. If the allocation doesn't fit into the remaining space of the
backing buffer, this allocation is put at the start of the buffer, and all
-previous allocations will become invalidated. If the allocation doesn't fit
-into the backing buffer as a whole, it will be allocated using a backing
-allocator, and pointer to the allocated memory region will be put into the
-`leaked_allocations` array.
+previous allocations will become invalidated.
+
+If the allocation doesn't fit into the backing buffer as a whole, it will be
+allocated using a backing allocator, and the pointer to the allocated memory
+region will be put into the `leaked_allocations` array. A `Warning`-level log
+message will be sent as well.
-The `leaked_allocations` array is managed by the `context` allocator.
+Allocations which are resized will be resized in-place if they were the last
+allocation. Otherwise, they are re-allocated to avoid overwriting previous
+allocations.
+
+The `leaked_allocations` array is managed by the `context` allocator if no
+`backup_allocator` is specified in `scratch_init`.
*/
@(require_results)
scratch_allocator :: proc(allocator: ^Scratch) -> Allocator {
@@ -361,20 +419,24 @@ scratch_allocator :: proc(allocator: ^Scratch) -> Allocator {
}
/*
-Initialize scratch allocator.
+Initialize a scratch allocator.
*/
scratch_init :: proc(s: ^Scratch, size: int, backup_allocator := context.allocator) -> Allocator_Error {
s.data = make_aligned([]byte, size, 2*align_of(rawptr), backup_allocator) or_return
s.curr_offset = 0
s.prev_allocation = nil
+ s.prev_allocation_root = nil
s.backup_allocator = backup_allocator
s.leaked_allocations.allocator = backup_allocator
- sanitizer.address_poison(s.data)
+ // sanitizer.address_poison(s.data)
return nil
}
/*
Free all data associated with a scratch allocator.
+
+This is distinct from `scratch_free_all` in that it deallocates all memory used
+to setup the allocator, as opposed to all allocations made from that space.
*/
scratch_destroy :: proc(s: ^Scratch) {
if s == nil {
@@ -384,13 +446,13 @@ scratch_destroy :: proc(s: ^Scratch) {
free_bytes(ptr, s.backup_allocator)
}
delete(s.leaked_allocations)
- sanitizer.address_unpoison(s.data)
+ // sanitizer.address_unpoison(s.data)
delete(s.data, s.backup_allocator)
s^ = {}
}
/*
-Allocate memory from scratch allocator.
+Allocate memory from a scratch allocator.
This procedure allocates `size` bytes of memory aligned on a boundary specified
by `alignment`. The allocated memory region is zero-initialized. This procedure
@@ -408,7 +470,7 @@ scratch_alloc :: proc(
}
/*
-Allocate memory from scratch allocator.
+Allocate memory from a scratch allocator.
This procedure allocates `size` bytes of memory aligned on a boundary specified
by `alignment`. The allocated memory region is zero-initialized. This procedure
@@ -429,7 +491,7 @@ scratch_alloc_bytes :: proc(
}
/*
-Allocate non-initialized memory from scratch allocator.
+Allocate non-initialized memory from a scratch allocator.
This procedure allocates `size` bytes of memory aligned on a boundary specified
by `alignment`. The allocated memory region is not explicitly zero-initialized.
@@ -447,7 +509,7 @@ scratch_alloc_non_zeroed :: proc(
}
/*
-Allocate non-initialized memory from scratch allocator.
+Allocate non-initialized memory from a scratch allocator.
This procedure allocates `size` bytes of memory aligned on a boundary specified
by `alignment`. The allocated memory region is not explicitly zero-initialized.
@@ -463,39 +525,47 @@ scratch_alloc_bytes_non_zeroed :: proc(
if s.data == nil {
DEFAULT_BACKING_SIZE :: 4 * Megabyte
if !(context.allocator.procedure != scratch_allocator_proc && context.allocator.data != s) {
- panic("cyclic initialization of the scratch allocator with itself", loc)
+ panic("Cyclic initialization of the scratch allocator with itself.", loc)
}
scratch_init(s, DEFAULT_BACKING_SIZE)
}
- size := size
- size = align_forward_int(size, alignment)
- if size <= len(s.data) {
+ aligned_size := size
+ if alignment > 1 {
+ // It is possible to do this with less bytes, but this is the
+ // mathematically simpler solution, and this being a Scratch allocator,
+ // we don't need to be so strict about every byte.
+ aligned_size += alignment - 1
+ }
+ if aligned_size <= len(s.data) {
offset := uintptr(0)
- if s.curr_offset+size <= len(s.data) {
+ if s.curr_offset+aligned_size <= len(s.data) {
offset = uintptr(s.curr_offset)
} else {
+ // The allocation will cause an overflow past the boundary of the
+ // space available, so reset to the starting offset.
offset = 0
}
start := uintptr(raw_data(s.data))
- ptr := align_forward_uintptr(offset+start, uintptr(alignment))
- s.prev_allocation = rawptr(ptr)
- s.curr_offset = int(offset) + size
- result := byte_slice(rawptr(ptr), size)
- sanitizer.address_unpoison(result)
+ ptr := rawptr(offset+start)
+ // We keep track of the original base pointer without extra alignment
+ // in order to later allow the free operation to work from that point.
+ s.prev_allocation_root = ptr
+ if !is_aligned(ptr, alignment) {
+ ptr = align_forward(ptr, uintptr(alignment))
+ }
+ s.prev_allocation = ptr
+ s.curr_offset = int(offset) + aligned_size
+ result := byte_slice(ptr, size)
+ // ensure_poisoned(result)
+ // sanitizer.address_unpoison(result)
return result, nil
} else {
+ // NOTE: No need to use `aligned_size` here, as the backup allocator will handle alignment for us.
a := s.backup_allocator
- if a.procedure == nil {
- a = context.allocator
- s.backup_allocator = a
- }
ptr, err := alloc_bytes_non_zeroed(size, alignment, a, loc)
if err != nil {
return ptr, err
}
- if s.leaked_allocations == nil {
- s.leaked_allocations, err = make([dynamic][]byte, a)
- }
append(&s.leaked_allocations, ptr)
if logger := context.logger; logger.lowest_level <= .Warning {
if logger.procedure != nil {
@@ -507,7 +577,7 @@ scratch_alloc_bytes_non_zeroed :: proc(
}
/*
-Free memory to the scratch allocator.
+Free memory back to the scratch allocator.
This procedure frees the memory region allocated at pointer `ptr`.
@@ -516,7 +586,7 @@ operation is a no-op.
*/
scratch_free :: proc(s: ^Scratch, ptr: rawptr, loc := #caller_location) -> Allocator_Error {
if s.data == nil {
- panic("Free on an uninitialized scratch allocator", loc)
+ panic("Free on an uninitialized Scratch allocator.", loc)
}
if ptr == nil {
return nil
@@ -525,9 +595,10 @@ scratch_free :: proc(s: ^Scratch, ptr: rawptr, loc := #caller_location) -> Alloc
end := start + uintptr(len(s.data))
old_ptr := uintptr(ptr)
if s.prev_allocation == ptr {
- s.curr_offset = int(uintptr(s.prev_allocation) - start)
- sanitizer.address_poison(s.data[s.curr_offset:])
+ s.curr_offset = int(uintptr(s.prev_allocation_root) - start)
+ // sanitizer.address_poison(s.data[s.curr_offset:])
s.prev_allocation = nil
+ s.prev_allocation_root = nil
return nil
}
if start <= old_ptr && old_ptr < end {
@@ -548,7 +619,7 @@ scratch_free :: proc(s: ^Scratch, ptr: rawptr, loc := #caller_location) -> Alloc
}
/*
-Free all memory to the scratch allocator.
+Free all memory back to the scratch allocator.
*/
scratch_free_all :: proc(s: ^Scratch, loc := #caller_location) {
s.curr_offset = 0
@@ -557,15 +628,15 @@ scratch_free_all :: proc(s: ^Scratch, loc := #caller_location) {
free_bytes(ptr, s.backup_allocator, loc)
}
clear(&s.leaked_allocations)
- sanitizer.address_poison(s.data)
+ // sanitizer.address_poison(s.data)
}
/*
-Resize an allocation.
+Resize an allocation owned by a scratch allocator.
-This procedure resizes a memory region, defined by its location, `old_memory`,
-and its size, `old_size` to have a size `size` and alignment `alignment`. The
-newly allocated memory, if any is zero-initialized.
+This procedure resizes a memory region defined by its location `old_memory`
+and its size `old_size` to have a size `size` and alignment `alignment`. The
+newly allocated memory, if any, is zero-initialized.
If `old_memory` is `nil`, this procedure acts just like `scratch_alloc()`,
allocating a memory region `size` bytes in size, aligned on a boundary specified
@@ -590,10 +661,10 @@ scratch_resize :: proc(
}
/*
-Resize an allocation.
+Resize an allocation owned by a scratch allocator.
-This procedure resizes a memory region, specified by `old_data`, to have a size
-`size` and alignment `alignment`. The newly allocated memory, if any is
+This procedure resizes a memory region specified by `old_data` to have a size
+`size` and alignment `alignment`. The newly allocated memory, if any, is
zero-initialized.
If `old_memory` is `nil`, this procedure acts just like `scratch_alloc()`,
@@ -621,11 +692,11 @@ scratch_resize_bytes :: proc(
}
/*
-Resize an allocation without zero-initialization.
+Resize an allocation owned by a scratch allocator, without zero-initialization.
-This procedure resizes a memory region, defined by its location, `old_memory`,
-and its size, `old_size` to have a size `size` and alignment `alignment`. The
-newly allocated memory, if any is not explicitly zero-initialized.
+This procedure resizes a memory region defined by its location `old_memory`
+and its size `old_size` to have a size `size` and alignment `alignment`. The
+newly allocated memory, if any, is not explicitly zero-initialized.
If `old_memory` is `nil`, this procedure acts just like `scratch_alloc()`,
allocating a memory region `size` bytes in size, aligned on a boundary specified
@@ -650,10 +721,10 @@ scratch_resize_non_zeroed :: proc(
}
/*
-Resize an allocation.
+Resize an allocation owned by a scratch allocator.
-This procedure resizes a memory region, specified by `old_data`, to have a size
-`size` and alignment `alignment`. The newly allocated memory, if any is not
+This procedure resizes a memory region specified by `old_data` to have a size
+`size` and alignment `alignment`. The newly allocated memory, if any, is not
explicitly zero-initialized.
If `old_memory` is `nil`, this procedure acts just like `scratch_alloc()`,
@@ -678,17 +749,24 @@ scratch_resize_bytes_non_zeroed :: proc(
if s.data == nil {
DEFAULT_BACKING_SIZE :: 4 * Megabyte
if !(context.allocator.procedure != scratch_allocator_proc && context.allocator.data != s) {
- panic("cyclic initialization of the scratch allocator with itself", loc)
+ panic("Cyclic initialization of the scratch allocator with itself.", loc)
}
scratch_init(s, DEFAULT_BACKING_SIZE)
}
begin := uintptr(raw_data(s.data))
end := begin + uintptr(len(s.data))
old_ptr := uintptr(old_memory)
- if begin <= old_ptr && old_ptr < end && old_ptr+uintptr(size) < end {
+ // We can only sanely resize the last allocation; to do otherwise may
+ // overwrite memory that could very well just have been allocated.
+ //
+ // Also, the alignments must match, otherwise we must re-allocate to
+ // guarantee the user's request.
+ if s.prev_allocation == old_memory && is_aligned(old_memory, alignment) && old_ptr+uintptr(size) < end {
+ // ensure_not_poisoned(old_data)
+ // sanitizer.address_poison(old_memory)
s.curr_offset = int(old_ptr-begin)+size
result := byte_slice(old_memory, size)
- sanitizer.address_unpoison(result)
+ // sanitizer.address_unpoison(result)
return result, nil
}
data, err := scratch_alloc_bytes_non_zeroed(s, size, alignment, loc)
@@ -780,7 +858,7 @@ stack_allocator :: proc(stack: ^Stack) -> Allocator {
}
/*
-Initialize the stack allocator.
+Initialize a stack allocator.
This procedure initializes the stack allocator with a backing buffer specified
by `data` parameter.
@@ -790,11 +868,11 @@ stack_init :: proc(s: ^Stack, data: []byte) {
s.prev_offset = 0
s.curr_offset = 0
s.peak_used = 0
- sanitizer.address_poison(data)
+ // sanitizer.address_poison(data)
}
/*
-Allocate memory from stack.
+Allocate memory from a stack allocator.
This procedure allocates `size` bytes of memory, aligned to the boundary
specified by `alignment`. The allocated memory is zero-initialized. This
@@ -812,7 +890,7 @@ stack_alloc :: proc(
}
/*
-Allocate memory from stack.
+Allocate memory from a stack allocator.
This procedure allocates `size` bytes of memory, aligned to the boundary
specified by `alignment`. The allocated memory is zero-initialized. This
@@ -833,7 +911,7 @@ stack_alloc_bytes :: proc(
}
/*
-Allocate memory from stack.
+Allocate memory from a stack allocator.
This procedure allocates `size` bytes of memory, aligned to the boundary
specified by `alignment`. The allocated memory is not explicitly
@@ -851,13 +929,13 @@ stack_alloc_non_zeroed :: proc(
}
/*
-Allocate memory from stack.
+Allocate memory from a stack allocator.
This procedure allocates `size` bytes of memory, aligned to the boundary
specified by `alignment`. The allocated memory is not explicitly
zero-initialized. This procedure returns the slice of the allocated memory.
*/
-@(require_results)
+@(require_results, no_sanitize_address)
stack_alloc_bytes_non_zeroed :: proc(
s: ^Stack,
size: int,
@@ -865,7 +943,7 @@ stack_alloc_bytes_non_zeroed :: proc(
loc := #caller_location
) -> ([]byte, Allocator_Error) {
if s.data == nil {
- panic("Stack allocation on an uninitialized stack allocator", loc)
+ panic("Allocation on an uninitialized Stack allocator.", loc)
}
curr_addr := uintptr(raw_data(s.data)) + uintptr(s.curr_offset)
padding := calc_padding_with_header(
@@ -881,21 +959,21 @@ stack_alloc_bytes_non_zeroed :: proc(
s.curr_offset += padding
next_addr := curr_addr + uintptr(padding)
header := (^Stack_Allocation_Header)(next_addr - size_of(Stack_Allocation_Header))
- sanitizer.address_unpoison(header)
header.padding = padding
header.prev_offset = old_offset
s.curr_offset += size
s.peak_used = max(s.peak_used, s.curr_offset)
result := byte_slice(rawptr(next_addr), size)
- sanitizer.address_unpoison(result)
+ // ensure_poisoned(result)
+ // sanitizer.address_unpoison(result)
return result, nil
}
/*
-Free memory to the stack.
+Free memory back to the stack allocator.
This procedure frees the memory region starting at `old_memory` to the stack.
-If the freeing does is an out of order freeing, the `.Invalid_Pointer` error
+If the freeing is an out of order freeing, the `.Invalid_Pointer` error
is returned.
*/
stack_free :: proc(
@@ -904,7 +982,7 @@ stack_free :: proc(
loc := #caller_location,
) -> (Allocator_Error) {
if s.data == nil {
- panic("Stack free on an uninitialized stack allocator", loc)
+ panic("Free on an uninitialized Stack allocator.", loc)
}
if old_memory == nil {
return nil
@@ -913,7 +991,7 @@ stack_free :: proc(
end := start + uintptr(len(s.data))
curr_addr := uintptr(old_memory)
if !(start <= curr_addr && curr_addr < end) {
- panic("Out of bounds memory address passed to stack allocator (free)", loc)
+ panic("Out of bounds memory address passed to Stack allocator. (free)", loc)
}
if curr_addr >= start+uintptr(s.curr_offset) {
// NOTE(bill): Allow double frees
@@ -922,32 +1000,31 @@ stack_free :: proc(
header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
if old_offset != s.prev_offset {
- // panic("Out of order stack allocator free");
return .Invalid_Pointer
}
s.prev_offset = header.prev_offset
- sanitizer.address_poison(s.data[old_offset:s.curr_offset])
+ // sanitizer.address_poison(s.data[old_offset:s.curr_offset])
s.curr_offset = old_offset
return nil
}
/*
-Free all allocations to the stack.
+Free all memory back to the stack allocator.
*/
stack_free_all :: proc(s: ^Stack, loc := #caller_location) {
s.prev_offset = 0
s.curr_offset = 0
- sanitizer.address_poison(s.data)
+ // sanitizer.address_poison(s.data)
}
/*
-Resize an allocation.
+Resize an allocation owned by a stack allocator.
-This procedure resizes a memory region, defined by its location, `old_memory`,
-and its size, `old_size` to have a size `size` and alignment `alignment`. The
-newly allocated memory, if any is zero-initialized.
+This procedure resizes a memory region defined by its location `old_memory`
+and its size `old_size` to have a size `size` and alignment `alignment`. The
+newly allocated memory, if any, is zero-initialized.
If `old_memory` is `nil`, this procedure acts just like `stack_alloc()`,
allocating a memory region `size` bytes in size, aligned on a boundary specified
@@ -972,11 +1049,11 @@ stack_resize :: proc(
}
/*
-Resize an allocation.
+Resize an allocation owned by a stack allocator.
-This procedure resizes a memory region, specified by the `old_data` parameter
-to have a size `size` and alignment `alignment`. The newly allocated memory,
-if any is zero-initialized.
+This procedure resizes a memory region specified by `old_data` to have a size
+`size` and alignment `alignment`. The newly allocated memory, if any, is
+zero-initialized.
If `old_memory` is `nil`, this procedure acts just like `stack_alloc()`,
allocating a memory region `size` bytes in size, aligned on a boundary specified
@@ -995,8 +1072,8 @@ stack_resize_bytes :: proc(
alignment := DEFAULT_ALIGNMENT,
loc := #caller_location,
) -> ([]byte, Allocator_Error) {
- bytes, err := stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
- if bytes != nil {
+ bytes, err := stack_resize_bytes_non_zeroed(s, old_data, size, alignment, loc)
+ if err == nil {
if old_data == nil {
zero_slice(bytes)
} else if size > len(old_data) {
@@ -1007,11 +1084,11 @@ stack_resize_bytes :: proc(
}
/*
-Resize an allocation without zero-initialization.
+Resize an allocation owned by a stack allocator, without zero-initialization.
-This procedure resizes a memory region, defined by its location, `old_memory`,
-and its size, `old_size` to have a size `size` and alignment `alignment`. The
-newly allocated memory, if any is not explicitly zero-initialized.
+This procedure resizes a memory region defined by its location `old_memory`
+and its size `old_size` to have a size `size` and alignment `alignment`. The
+newly allocated memory, if any, is not explicitly zero-initialized.
If `old_memory` is `nil`, this procedure acts just like `stack_alloc()`,
allocating a memory region `size` bytes in size, aligned on a boundary specified
@@ -1036,11 +1113,11 @@ stack_resize_non_zeroed :: proc(
}
/*
-Resize an allocation without zero-initialization.
+Resize an allocation owned by a stack allocator, without zero-initialization.
-This procedure resizes a memory region, specified by the `old_data` parameter
-to have a size `size` and alignment `alignment`. The newly allocated memory,
-if any is not explicitly zero-initialized.
+This procedure resizes a memory region specified by `old_data` to have a size
+`size` and alignment `alignment`. The newly allocated memory, if any, is not
+explicitly zero-initialized.
If `old_memory` is `nil`, this procedure acts just like `stack_alloc()`,
allocating a memory region `size` bytes in size, aligned on a boundary specified
@@ -1062,24 +1139,34 @@ stack_resize_bytes_non_zeroed :: proc(
old_memory := raw_data(old_data)
old_size := len(old_data)
if s.data == nil {
- panic("Stack free all on an uninitialized stack allocator", loc)
+ panic("Resize on an uninitialized Stack allocator.", loc)
}
if old_memory == nil {
return stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
}
if size == 0 {
- return nil, nil
+ return nil, stack_free(s, old_memory, loc)
}
start := uintptr(raw_data(s.data))
end := start + uintptr(len(s.data))
curr_addr := uintptr(old_memory)
if !(start <= curr_addr && curr_addr < end) {
- panic("Out of bounds memory address passed to stack allocator (resize)")
+ panic("Out of bounds memory address passed to Stack allocator. (resize)")
}
if curr_addr >= start+uintptr(s.curr_offset) {
// NOTE(bill): Allow double frees
return nil, nil
}
+ if uintptr(old_memory) & uintptr(alignment-1) != 0 {
+ // A different alignment has been requested and the current address
+ // does not satisfy it.
+ data, err := stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
+ if err == nil {
+ runtime.copy(data, byte_slice(old_memory, old_size))
+ // sanitizer.address_poison(old_memory)
+ }
+ return data, err
+ }
if old_size == size {
return byte_slice(old_memory, size), nil
}
@@ -1089,6 +1176,7 @@ stack_resize_bytes_non_zeroed :: proc(
data, err := stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
if err == nil {
runtime.copy(data, byte_slice(old_memory, old_size))
+ // sanitizer.address_poison(old_memory)
}
return data, err
}
@@ -1098,9 +1186,12 @@ stack_resize_bytes_non_zeroed :: proc(
s.curr_offset += diff // works for smaller sizes too
if diff > 0 {
zero(rawptr(curr_addr + uintptr(diff)), diff)
+ } else {
+ // sanitizer.address_poison(old_data[size:])
}
result := byte_slice(old_memory, size)
- sanitizer.address_unpoison(result)
+ // ensure_poisoned(result)
+ // sanitizer.address_unpoison(result)
return result, nil
}
@@ -1160,7 +1251,7 @@ Small_Stack :: struct {
}
/*
-Initialize small stack.
+Initialize a small stack allocator.
This procedure initializes the small stack allocator with `data` as its backing
buffer.
@@ -1169,15 +1260,18 @@ small_stack_init :: proc(s: ^Small_Stack, data: []byte) {
s.data = data
s.offset = 0
s.peak_used = 0
- sanitizer.address_poison(data)
+ // sanitizer.address_poison(data)
}
/*
Small stack allocator.
-The small stack allocator is just like a stack allocator, with the only
+The small stack allocator is just like a `Stack` allocator, with the only
difference being an extremely small header size. Unlike the stack allocator,
-small stack allows out-of order freeing of memory.
+the small stack allows out-of order freeing of memory, with the stipulation
+that all allocations made after the freed allocation will become invalidated
+upon following allocations as they will begin to overwrite the memory formerly
+used by the freed allocation.
The memory is allocated in the backing buffer linearly, from start to end.
Each subsequent allocation will get the next adjacent memory region.
@@ -1195,7 +1289,7 @@ small_stack_allocator :: proc(stack: ^Small_Stack) -> Allocator {
}
/*
-Allocate memory from small stack.
+Allocate memory from a small stack allocator.
This procedure allocates `size` bytes of memory aligned to a boundary specified
by `alignment`. The allocated memory is zero-initialized. This procedure
@@ -1213,7 +1307,7 @@ small_stack_alloc :: proc(
}
/*
-Allocate memory from small stack.
+Allocate memory from a small stack allocator.
This procedure allocates `size` bytes of memory aligned to a boundary specified
by `alignment`. The allocated memory is zero-initialized. This procedure
@@ -1234,7 +1328,7 @@ small_stack_alloc_bytes :: proc(
}
/*
-Allocate memory from small stack.
+Allocate memory from a small stack allocator.
This procedure allocates `size` bytes of memory aligned to a boundary specified
by `alignment`. The allocated memory is not explicitly zero-initialized. This
@@ -1252,13 +1346,13 @@ small_stack_alloc_non_zeroed :: proc(
}
/*
-Allocate memory from small stack.
+Allocate memory from a small stack allocator.
This procedure allocates `size` bytes of memory aligned to a boundary specified
by `alignment`. The allocated memory is not explicitly zero-initialized. This
procedure returns a slice of the allocated memory region.
*/
-@(require_results)
+@(require_results, no_sanitize_address)
small_stack_alloc_bytes_non_zeroed :: proc(
s: ^Small_Stack,
size: int,
@@ -1266,7 +1360,7 @@ small_stack_alloc_bytes_non_zeroed :: proc(
loc := #caller_location,
) -> ([]byte, Allocator_Error) {
if s.data == nil {
- panic("Small stack is not initialized", loc)
+ panic("Allocation on an uninitialized Small Stack allocator.", loc)
}
alignment := alignment
alignment = clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2)
@@ -1278,17 +1372,21 @@ small_stack_alloc_bytes_non_zeroed :: proc(
s.offset += padding
next_addr := curr_addr + uintptr(padding)
header := (^Small_Stack_Allocation_Header)(next_addr - size_of(Small_Stack_Allocation_Header))
- sanitizer.address_unpoison(header)
- header.padding = auto_cast padding
+ header.padding = cast(u8)padding
+ // We must poison the header, no matter what its state is, because there
+ // may have been an out-of-order free before this point.
+ // sanitizer.address_poison(header)
s.offset += size
s.peak_used = max(s.peak_used, s.offset)
result := byte_slice(rawptr(next_addr), size)
- sanitizer.address_unpoison(result)
+ // NOTE: We cannot ensure the poison state of this allocation, because this
+ // allocator allows out-of-order frees with overwriting.
+ // sanitizer.address_unpoison(result)
return result, nil
}
/*
-Allocate memory from small stack.
+Allocate memory from a small stack allocator.
This procedure allocates `size` bytes of memory aligned to a boundary specified
by `alignment`. The allocated memory is not explicitly zero-initialized. This
@@ -1300,7 +1398,7 @@ small_stack_free :: proc(
loc := #caller_location,
) -> Allocator_Error {
if s.data == nil {
- panic("Small stack is not initialized", loc)
+ panic("Free on an uninitialized Small Stack allocator.", loc)
}
if old_memory == nil {
return nil
@@ -1309,8 +1407,7 @@ small_stack_free :: proc(
end := start + uintptr(len(s.data))
curr_addr := uintptr(old_memory)
if !(start <= curr_addr && curr_addr < end) {
- // panic("Out of bounds memory address passed to stack allocator (free)");
- return .Invalid_Pointer
+ panic("Out of bounds memory address passed to Small Stack allocator. (free)", loc)
}
if curr_addr >= start+uintptr(s.offset) {
// NOTE(bill): Allow double frees
@@ -1318,25 +1415,25 @@ small_stack_free :: proc(
}
header := (^Small_Stack_Allocation_Header)(curr_addr - size_of(Small_Stack_Allocation_Header))
old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
- sanitizer.address_poison(s.data[old_offset:s.offset])
+ // sanitizer.address_poison(s.data[old_offset:s.offset])
s.offset = old_offset
return nil
}
/*
-Free all memory to small stack.
+Free all memory back to the small stack allocator.
*/
small_stack_free_all :: proc(s: ^Small_Stack) {
s.offset = 0
- sanitizer.address_poison(s.data)
+ // sanitizer.address_poison(s.data)
}
/*
-Resize an allocation.
+Resize an allocation owned by a small stack allocator.
-This procedure resizes a memory region, defined by its location, `old_memory`,
-and its size, `old_size` to have a size `size` and alignment `alignment`. The
-newly allocated memory, if any is zero-initialized.
+This procedure resizes a memory region defined by its location `old_memory`
+and its size `old_size` to have a size `size` and alignment `alignment`. The
+newly allocated memory, if any, is zero-initialized.
If `old_memory` is `nil`, this procedure acts just like `small_stack_alloc()`,
allocating a memory region `size` bytes in size, aligned on a boundary specified
@@ -1361,11 +1458,11 @@ small_stack_resize :: proc(
}
/*
-Resize an allocation.
+Resize an allocation owned by a small stack allocator.
-This procedure resizes a memory region, specified by the `old_data` parameter
-to have a size `size` and alignment `alignment`. The newly allocated memory,
-if any is zero-initialized.
+This procedure resizes a memory region specified by `old_data` to have a size
+`size` and alignment `alignment`. The newly allocated memory, if any, is
+zero-initialized.
If `old_memory` is `nil`, this procedure acts just like `small_stack_alloc()`,
allocating a memory region `size` bytes in size, aligned on a boundary specified
@@ -1396,11 +1493,11 @@ small_stack_resize_bytes :: proc(
}
/*
-Resize an allocation without zero-initialization.
+Resize an allocation owned by a small stack allocator, without zero-initialization.
-This procedure resizes a memory region, defined by its location, `old_memory`,
-and its size, `old_size` to have a size `size` and alignment `alignment`. The
-newly allocated memory, if any is not explicitly zero-initialized.
+This procedure resizes a memory region defined by its location `old_memory`
+and its size `old_size` to have a size `size` and alignment `alignment`. The
+newly allocated memory, if any, is not explicitly zero-initialized.
If `old_memory` is `nil`, this procedure acts just like `small_stack_alloc()`,
allocating a memory region `size` bytes in size, aligned on a boundary specified
@@ -1425,11 +1522,11 @@ small_stack_resize_non_zeroed :: proc(
}
/*
-Resize an allocation without zero-initialization.
+Resize an allocation owned by a small stack allocator, without zero-initialization.
-This procedure resizes a memory region, specified by the `old_data` parameter
-to have a size `size` and alignment `alignment`. The newly allocated memory,
-if any is not explicitly zero-initialized.
+This procedure resizes a memory region specified by `old_data` to have a size
+`size` and alignment `alignment`. The newly allocated memory, if any, is not
+explicitly zero-initialized.
If `old_memory` is `nil`, this procedure acts just like `small_stack_alloc()`,
allocating a memory region `size` bytes in size, aligned on a boundary specified
@@ -1449,7 +1546,7 @@ small_stack_resize_bytes_non_zeroed :: proc(
loc := #caller_location,
) -> ([]byte, Allocator_Error) {
if s.data == nil {
- panic("Small stack is not initialized", loc)
+ panic("Resize on an uninitialized Small Stack allocator.", loc)
}
old_memory := raw_data(old_data)
old_size := len(old_data)
@@ -1459,22 +1556,31 @@ small_stack_resize_bytes_non_zeroed :: proc(
return small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
}
if size == 0 {
- return nil, nil
+ return nil, small_stack_free(s, old_memory, loc)
}
start := uintptr(raw_data(s.data))
end := start + uintptr(len(s.data))
curr_addr := uintptr(old_memory)
if !(start <= curr_addr && curr_addr < end) {
- // panic("Out of bounds memory address passed to stack allocator (resize)");
- return nil, .Invalid_Pointer
+ panic("Out of bounds memory address passed to Small Stack allocator. (resize)", loc)
}
if curr_addr >= start+uintptr(s.offset) {
// NOTE(bill): Treat as a double free
return nil, nil
}
+ if uintptr(old_memory) & uintptr(alignment-1) != 0 {
+ // A different alignment has been requested and the current address
+ // does not satisfy it.
+ data, err := small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
+ if err == nil {
+ runtime.copy(data, byte_slice(old_memory, old_size))
+ // sanitizer.address_poison(old_memory)
+ }
+ return data, err
+ }
if old_size == size {
result := byte_slice(old_memory, size)
- sanitizer.address_unpoison(result)
+ // sanitizer.address_unpoison(result)
return result, nil
}
data, err := small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
@@ -1592,18 +1698,18 @@ dynamic_arena_init :: proc(
Dynamic arena allocator.
The dynamic arena allocator uses blocks of a specific size, allocated on-demand
-using the block allocator. This allocator acts similarly to arena. All
+using the block allocator. This allocator acts similarly to `Arena`. All
allocations in a block happen contiguously, from start to end. If an allocation
-does not fit into the remaining space of the block, and its size is smaller
+does not fit into the remaining space of the block and its size is smaller
than the specified out-band size, a new block is allocated using the
`block_allocator` and the allocation is performed from a newly-allocated block.
-If an allocation has bigger size than the specified out-band size, a new block
+If an allocation is larger than the specified out-band size, a new block
is allocated such that the allocation fits into this new block. This is referred
to as an *out-band allocation*. The out-band blocks are kept separately from
normal blocks.
-Just like arena, the dynamic arena does not support freeing of individual
+Just like `Arena`, the dynamic arena does not support freeing of individual
objects.
*/
@(require_results)
@@ -1617,7 +1723,7 @@ dynamic_arena_allocator :: proc(a: ^Dynamic_Arena) -> Allocator {
/*
Destroy a dynamic arena.
-This procedure frees all allocations, made on a dynamic arena, including the
+This procedure frees all allocations made on a dynamic arena, including the
unused blocks, as well as the arrays for storing blocks.
*/
dynamic_arena_destroy :: proc(a: ^Dynamic_Arena) {
@@ -1631,7 +1737,7 @@ dynamic_arena_destroy :: proc(a: ^Dynamic_Arena) {
@(private="file")
_dynamic_arena_cycle_new_block :: proc(a: ^Dynamic_Arena, loc := #caller_location) -> (err: Allocator_Error) {
if a.block_allocator.procedure == nil {
- panic("You must call arena_init on a Pool before using it", loc)
+ panic("You must call `dynamic_arena_init` on a Dynamic Arena before using it.", loc)
}
if a.current_block != nil {
append(&a.used_blocks, a.current_block, loc=loc)
@@ -1649,6 +1755,7 @@ _dynamic_arena_cycle_new_block :: proc(a: ^Dynamic_Arena, loc := #caller_locatio
nil,
0,
)
+ // sanitizer.address_poison(data)
new_block = raw_data(data)
}
a.bytes_left = a.block_size
@@ -1665,7 +1772,7 @@ by `alignment` from a dynamic arena `a`. The allocated memory is
zero-initialized. This procedure returns a pointer to the newly allocated memory
region.
*/
-@(private, require_results)
+@(require_results)
dynamic_arena_alloc :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> (rawptr, Allocator_Error) {
data, err := dynamic_arena_alloc_bytes(a, size, loc)
return raw_data(data), err
@@ -1736,21 +1843,26 @@ dynamic_arena_alloc_bytes_non_zeroed :: proc(a: ^Dynamic_Arena, size: int, loc :
memory := a.current_pos
a.current_pos = ([^]byte)(a.current_pos)[n:]
a.bytes_left -= n
- return ([^]byte)(memory)[:size], nil
+ result := ([^]byte)(memory)[:size]
+ // ensure_poisoned(result)
+ // sanitizer.address_unpoison(result)
+ return result, nil
}
/*
-Reset the dynamic arena.
+Reset a dynamic arena allocator.
-This procedure frees all the allocations, owned by the dynamic arena, excluding
+This procedure frees all the allocations owned by the dynamic arena, excluding
the unused blocks.
*/
dynamic_arena_reset :: proc(a: ^Dynamic_Arena, loc := #caller_location) {
if a.current_block != nil {
+ // sanitizer.address_poison(a.current_block, a.block_size)
append(&a.unused_blocks, a.current_block, loc=loc)
a.current_block = nil
}
for block in a.used_blocks {
+ // sanitizer.address_poison(block, a.block_size)
append(&a.unused_blocks, block, loc=loc)
}
clear(&a.used_blocks)
@@ -1762,33 +1874,31 @@ dynamic_arena_reset :: proc(a: ^Dynamic_Arena, loc := #caller_location) {
}
/*
-Free all memory from a dynamic arena.
+Free all memory back to the dynamic arena allocator.
-This procedure frees all the allocations, owned by the dynamic arena, including
+This procedure frees all the allocations owned by the dynamic arena, including
the unused blocks.
*/
dynamic_arena_free_all :: proc(a: ^Dynamic_Arena, loc := #caller_location) {
dynamic_arena_reset(a)
for block in a.unused_blocks {
+ // sanitizer.address_unpoison(block, a.block_size)
free(block, a.block_allocator, loc)
}
clear(&a.unused_blocks)
}
/*
-Resize an allocation.
+Resize an allocation owned by a dynamic arena allocator.
-This procedure resizes a memory region, defined by its location, `old_memory`,
-and its size, `old_size` to have a size `size` and alignment `alignment`. The
-newly allocated memory, if any is zero-initialized.
+This procedure resizes a memory region defined by its location `old_memory`
+and its size `old_size` to have a size `size` and alignment `alignment`. The
+newly allocated memory, if any, is zero-initialized.
If `old_memory` is `nil`, this procedure acts just like `dynamic_arena_alloc()`,
allocating a memory region `size` bytes in size, aligned on a boundary specified
by `alignment`.
-If `size` is 0, this procedure acts just like `dynamic_arena_free()`, freeing
-the memory region located at an address specified by `old_memory`.
-
This procedure returns the pointer to the resized memory region.
*/
@(require_results)
@@ -1804,19 +1914,16 @@ dynamic_arena_resize :: proc(
}
/*
-Resize an allocation.
+Resize an allocation owned by a dynamic arena allocator.
-This procedure resizes a memory region, specified by `old_data`, to have a size
-`size` and alignment `alignment`. The newly allocated memory, if any is
+This procedure resizes a memory region specified by `old_data` to have a size
+`size` and alignment `alignment`. The newly allocated memory, if any, is
zero-initialized.
If `old_memory` is `nil`, this procedure acts just like `dynamic_arena_alloc()`,
allocating a memory region `size` bytes in size, aligned on a boundary specified
by `alignment`.
-If `size` is 0, this procedure acts just like `dynamic_arena_free()`, freeing the
-memory region located at an address specified by `old_memory`.
-
This procedure returns the slice of the resized memory region.
*/
@(require_results)
@@ -1826,6 +1933,10 @@ dynamic_arena_resize_bytes :: proc(
size: int,
loc := #caller_location,
) -> ([]byte, Allocator_Error) {
+ if size == 0 {
+ // NOTE: This allocator has no Free mode.
+ return nil, nil
+ }
bytes, err := dynamic_arena_resize_bytes_non_zeroed(a, old_data, size, loc)
if bytes != nil {
if old_data == nil {
@@ -1838,19 +1949,16 @@ dynamic_arena_resize_bytes :: proc(
}
/*
-Resize an allocation without zero-initialization.
+Resize an allocation owned by a dynamic arena allocator, without zero-initialization.
-This procedure resizes a memory region, defined by its location, `old_memory`,
-and its size, `old_size` to have a size `size` and alignment `alignment`. The
-newly allocated memory, if any is not explicitly zero-initialized.
+This procedure resizes a memory region defined by its location `old_memory`
+and its size `old_size` to have a size `size` and alignment `alignment`. The
+newly allocated memory, if any, is not explicitly zero-initialized.
If `old_memory` is `nil`, this procedure acts just like `dynamic_arena_alloc()`,
allocating a memory region `size` bytes in size, aligned on a boundary specified
by `alignment`.
-If `size` is 0, this procedure acts just like `dynamic_arena_free()`, freeing the
-memory region located at an address specified by `old_memory`.
-
This procedure returns the pointer to the resized memory region.
*/
@(require_results)
@@ -1866,19 +1974,16 @@ dynamic_arena_resize_non_zeroed :: proc(
}
/*
-Resize an allocation.
+Resize an allocation owned by a dynamic arena allocator, without zero-initialization.
-This procedure resizes a memory region, specified by `old_data`, to have a size
-`size` and alignment `alignment`. The newly allocated memory, if any is not
+This procedure resizes a memory region specified by `old_data` to have a size
+`size` and alignment `alignment`. The newly allocated memory, if any, is not
explicitly zero-initialized.
If `old_memory` is `nil`, this procedure acts just like `dynamic_arena_alloc()`,
allocating a memory region `size` bytes in size, aligned on a boundary specified
by `alignment`.
-If `size` is 0, this procedure acts just like `dynamic_arena_free()`, freeing
-the memory region located at an address specified by `old_memory`.
-
This procedure returns the slice of the resized memory region.
*/
@(require_results)
@@ -1888,11 +1993,18 @@ dynamic_arena_resize_bytes_non_zeroed :: proc(
size: int,
loc := #caller_location,
) -> ([]byte, Allocator_Error) {
+ if size == 0 {
+ // NOTE: This allocator has no Free mode.
+ return nil, nil
+ }
old_memory := raw_data(old_data)
old_size := len(old_data)
if old_size >= size {
+ // sanitizer.address_poison(old_data[size:])
return byte_slice(old_memory, size), nil
}
+ // No information is kept about allocations in this allocator, thus we
+ // cannot truly resize anything and must reallocate.
data, err := dynamic_arena_alloc_bytes_non_zeroed(a, size, loc)
if err == nil {
runtime.copy(data, byte_slice(old_memory, old_size))
@@ -1953,7 +2065,7 @@ Buddy_Block :: struct #align(align_of(uint)) {
/*
Obtain the next buddy block.
*/
-@(require_results)
+@(require_results, no_sanitize_address)
buddy_block_next :: proc(block: ^Buddy_Block) -> ^Buddy_Block {
return (^Buddy_Block)(([^]byte)(block)[block.size:])
}
@@ -1961,7 +2073,7 @@ buddy_block_next :: proc(block: ^Buddy_Block) -> ^Buddy_Block {
/*
Split the block into two, by truncating the given block to a given size.
*/
-@(require_results)
+@(require_results, no_sanitize_address)
buddy_block_split :: proc(block: ^Buddy_Block, size: uint) -> ^Buddy_Block {
block := block
if block != nil && size != 0 {
@@ -1984,6 +2096,7 @@ buddy_block_split :: proc(block: ^Buddy_Block, size: uint) -> ^Buddy_Block {
/*
Coalesce contiguous blocks in a range of blocks into one.
*/
+@(no_sanitize_address)
buddy_block_coalescence :: proc(head, tail: ^Buddy_Block) {
for {
// Keep looping until there are no more buddies to coalesce
@@ -2020,7 +2133,7 @@ buddy_block_coalescence :: proc(head, tail: ^Buddy_Block) {
/*
Find the best block for storing a given size in a range of blocks.
*/
-@(require_results)
+@(require_results, no_sanitize_address)
buddy_block_find_best :: proc(head, tail: ^Buddy_Block, size: uint) -> ^Buddy_Block {
assert(size != 0)
best_block: ^Buddy_Block
@@ -2106,7 +2219,7 @@ buddy_allocator :: proc(b: ^Buddy_Allocator) -> Allocator {
}
/*
-Initialize the buddy allocator.
+Initialize a buddy allocator.
This procedure initializes the buddy allocator `b` with a backing buffer `data`
and block alignment specified by `alignment`.
@@ -2126,6 +2239,7 @@ buddy_allocator_init :: proc(b: ^Buddy_Allocator, data: []byte, alignment: uint,
b.head.is_free = true
b.tail = buddy_block_next(b.head)
b.alignment = alignment
+ // sanitizer.address_poison(data)
}
/*
@@ -2146,11 +2260,12 @@ buddy_block_size_required :: proc(b: ^Buddy_Allocator, size: uint) -> uint {
/*
Allocate memory from a buddy allocator.
-This procedure allocates `size` bytes of memory aligned on a boundary specified
-by `alignment`. The allocated memory region is zero-initialized. This procedure
-returns a pointer to the allocated memory region.
+This procedure allocates `size` bytes of memory. The allocation's alignment is
+fixed to the `alignment` specified at initialization. The allocated memory
+region is zero-initialized. This procedure returns a pointer to the allocated
+memory region.
*/
-@(require_results)
+@(require_results, no_sanitize_address)
buddy_allocator_alloc :: proc(b: ^Buddy_Allocator, size: uint) -> (rawptr, Allocator_Error) {
bytes, err := buddy_allocator_alloc_bytes(b, size)
return raw_data(bytes), err
@@ -2159,11 +2274,12 @@ buddy_allocator_alloc :: proc(b: ^Buddy_Allocator, size: uint) -> (rawptr, Alloc
/*
Allocate memory from a buddy allocator.
-This procedure allocates `size` bytes of memory aligned on a boundary specified
-by `alignment`. The allocated memory region is zero-initialized. This procedure
-returns a slice of the allocated memory region.
+This procedure allocates `size` bytes of memory. The allocation's alignment is
+fixed to the `alignment` specified at initialization. The allocated memory
+region is zero-initialized. This procedure returns a slice of the allocated
+memory region.
*/
-@(require_results)
+@(require_results, no_sanitize_address)
buddy_allocator_alloc_bytes :: proc(b: ^Buddy_Allocator, size: uint) -> ([]byte, Allocator_Error) {
bytes, err := buddy_allocator_alloc_bytes_non_zeroed(b, size)
if bytes != nil {
@@ -2175,11 +2291,12 @@ buddy_allocator_alloc_bytes :: proc(b: ^Buddy_Allocator, size: uint) -> ([]byte,
/*
Allocate non-initialized memory from a buddy allocator.
-This procedure allocates `size` bytes of memory aligned on a boundary specified
-by `alignment`. The allocated memory region is not explicitly zero-initialized.
-This procedure returns a pointer to the allocated memory region.
+This procedure allocates `size` bytes of memory. The allocation's alignment is
+fixed to the `alignment` specified at initialization. The allocated memory
+region is not explicitly zero-initialized. This procedure returns a pointer to
+the allocated memory region.
*/
-@(require_results)
+@(require_results, no_sanitize_address)
buddy_allocator_alloc_non_zeroed :: proc(b: ^Buddy_Allocator, size: uint) -> (rawptr, Allocator_Error) {
bytes, err := buddy_allocator_alloc_bytes_non_zeroed(b, size)
return raw_data(bytes), err
@@ -2188,11 +2305,12 @@ buddy_allocator_alloc_non_zeroed :: proc(b: ^Buddy_Allocator, size: uint) -> (ra
/*
Allocate non-initialized memory from a buddy allocator.
-This procedure allocates `size` bytes of memory aligned on a boundary specified
-by `alignment`. The allocated memory region is not explicitly zero-initialized.
-This procedure returns a slice of the allocated memory region.
+This procedure allocates `size` bytes of memory. The allocation's alignment is
+fixed to the `alignment` specified at initialization. The allocated memory
+region is not explicitly zero-initialized. This procedure returns a slice of
+the allocated memory region.
*/
-@(require_results)
+@(require_results, no_sanitize_address)
buddy_allocator_alloc_bytes_non_zeroed :: proc(b: ^Buddy_Allocator, size: uint) -> ([]byte, Allocator_Error) {
if size != 0 {
actual_size := buddy_block_size_required(b, size)
@@ -2207,25 +2325,29 @@ buddy_allocator_alloc_bytes_non_zeroed :: proc(b: ^Buddy_Allocator, size: uint)
}
found.is_free = false
data := ([^]byte)(found)[b.alignment:][:size]
+ // ensure_poisoned(data)
+ // sanitizer.address_unpoison(data)
return data, nil
}
return nil, nil
}
/*
-Free memory to the buddy allocator.
+Free memory back to the buddy allocator.
This procedure frees the memory region allocated at pointer `ptr`.
If `ptr` is not the latest allocation and is not a leaked allocation, this
operation is a no-op.
*/
+@(no_sanitize_address)
buddy_allocator_free :: proc(b: ^Buddy_Allocator, ptr: rawptr) -> Allocator_Error {
if ptr != nil {
if !(b.head <= ptr && ptr <= b.tail) {
return .Invalid_Pointer
}
block := (^Buddy_Block)(([^]byte)(ptr)[-b.alignment:])
+ // sanitizer.address_poison(ptr, block.size)
block.is_free = true
buddy_block_coalescence(b.head, b.tail)
}
@@ -2233,16 +2355,18 @@ buddy_allocator_free :: proc(b: ^Buddy_Allocator, ptr: rawptr) -> Allocator_Erro
}
/*
-Free all memory to the buddy allocator.
+Free all memory back to the buddy allocator.
*/
+@(no_sanitize_address)
buddy_allocator_free_all :: proc(b: ^Buddy_Allocator) {
alignment := b.alignment
head := ([^]byte)(b.head)
tail := ([^]byte)(b.tail)
data := head[:ptr_sub(tail, head)]
- buddy_allocator_init(b, data, alignment)
+ buddy_allocator_init(b, data, alignment)
}
+@(no_sanitize_address)
buddy_allocator_proc :: proc(
allocator_data: rawptr,
mode: Allocator_Mode,
@@ -2324,8 +2448,8 @@ compat_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
@(no_sanitize_address)
get_unpoisoned_header :: #force_inline proc(ptr: rawptr) -> Header {
header := ([^]Header)(ptr)[-1]
- a := max(header.alignment, size_of(Header))
- sanitizer.address_unpoison(rawptr(uintptr(ptr)-uintptr(a)), a)
+ // a := max(header.alignment, size_of(Header))
+ // sanitizer.address_unpoison(rawptr(uintptr(ptr)-uintptr(a)), a)
return header
}
@@ -2344,7 +2468,7 @@ compat_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
alignment = alignment,
}
- sanitizer.address_poison(raw_data(allocation), a)
+ // sanitizer.address_poison(raw_data(allocation), a)
return
case .Free:
@@ -2375,7 +2499,7 @@ compat_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
alignment = new_alignment,
}
- sanitizer.address_poison(raw_data(allocation), a)
+ // sanitizer.address_poison(raw_data(allocation), a)
return
case .Free_All:
diff --git a/core/mem/rollback_stack_allocator.odin b/core/mem/rollback_stack_allocator.odin
index a00131b7f..3f16a2897 100644
--- a/core/mem/rollback_stack_allocator.odin
+++ b/core/mem/rollback_stack_allocator.odin
@@ -1,7 +1,7 @@
package mem
import "base:runtime"
-import "base:sanitizer"
+// import "base:sanitizer"
/*
Rollback stack default block size.
@@ -134,7 +134,7 @@ rb_free_all :: proc(stack: ^Rollback_Stack) {
stack.head.next_block = nil
stack.head.last_alloc = nil
stack.head.offset = 0
- sanitizer.address_poison(stack.head.buffer)
+ // sanitizer.address_poison(stack.head.buffer)
}
/*
@@ -241,7 +241,7 @@ rb_alloc_bytes_non_zeroed :: proc(
block.offset = cast(uintptr)len(block.buffer)
}
res := ptr[:size]
- sanitizer.address_unpoison(res)
+ // sanitizer.address_unpoison(res)
return res, nil
}
return nil, .Out_Of_Memory
@@ -338,7 +338,7 @@ rb_resize_bytes_non_zeroed :: proc(
block.offset += cast(uintptr)size - cast(uintptr)old_size
}
res := (ptr)[:size]
- sanitizer.address_unpoison(res)
+ // sanitizer.address_unpoison(res)
#no_bounds_check return res, nil
}
}
diff --git a/core/mem/tlsf/tlsf_internal.odin b/core/mem/tlsf/tlsf_internal.odin
index 1f85c27dc..e53d76d61 100644
--- a/core/mem/tlsf/tlsf_internal.odin
+++ b/core/mem/tlsf/tlsf_internal.odin
@@ -10,7 +10,7 @@
package mem_tlsf
import "base:intrinsics"
-import "base:sanitizer"
+// import "base:sanitizer"
import "base:runtime"
// log2 of number of linear subdivisions of block sizes.
@@ -210,7 +210,7 @@ alloc_bytes_non_zeroed :: proc(control: ^Allocator, size: uint, align: uint) ->
return nil, .Out_Of_Memory
}
- sanitizer.address_poison(new_pool_buf)
+ // sanitizer.address_poison(new_pool_buf)
// Allocate a new link in the `control.pool` tracking structure.
new_pool := new_clone(Pool{
@@ -277,7 +277,7 @@ free_with_size :: proc(control: ^Allocator, ptr: rawptr, size: uint) {
block := block_from_ptr(ptr)
assert(!block_is_free(block), "block already marked as free") // double free
- sanitizer.address_poison(ptr, block.size)
+ // sanitizer.address_poison(ptr, block.size)
block_mark_as_free(block)
block = block_merge_prev(control, block)
block = block_merge_next(control, block)
@@ -321,7 +321,7 @@ resize :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size: uint, align
block_trim_used(control, block, adjust)
res = ([^]byte)(ptr)[:new_size]
- sanitizer.address_unpoison(res)
+ // sanitizer.address_unpoison(res)
if min_size < new_size {
to_zero := ([^]byte)(ptr)[min_size:new_size]
@@ -789,7 +789,7 @@ block_prepare_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint
block_trim_free(control, block, size)
block_mark_as_used(block)
res = ([^]byte)(block_to_ptr(block))[:size]
- sanitizer.address_unpoison(res)
+ // sanitizer.address_unpoison(res)
}
return
}
diff --git a/core/mem/virtual/arena.odin b/core/mem/virtual/arena.odin
index 4e1cc2466..4f7bd445d 100644
--- a/core/mem/virtual/arena.odin
+++ b/core/mem/virtual/arena.odin
@@ -3,7 +3,7 @@ package mem_virtual
import "core:mem"
import "core:sync"
-import "base:sanitizer"
+// import "base:sanitizer"
Arena_Kind :: enum uint {
Growing = 0, // Chained memory blocks (singly linked list).
@@ -55,7 +55,7 @@ arena_init_growing :: proc(arena: ^Arena, reserved: uint = DEFAULT_ARENA_GROWING
if arena.minimum_block_size == 0 {
arena.minimum_block_size = reserved
}
- sanitizer.address_poison(arena.curr_block.base[:arena.curr_block.committed])
+ // sanitizer.address_poison(arena.curr_block.base[:arena.curr_block.committed])
return
}
@@ -68,7 +68,7 @@ arena_init_static :: proc(arena: ^Arena, reserved: uint = DEFAULT_ARENA_STATIC_R
arena.curr_block = memory_block_alloc(commit_size, reserved, {}) or_return
arena.total_used = 0
arena.total_reserved = arena.curr_block.reserved
- sanitizer.address_poison(arena.curr_block.base[:arena.curr_block.committed])
+ // sanitizer.address_poison(arena.curr_block.base[:arena.curr_block.committed])
return
}
@@ -82,7 +82,7 @@ arena_init_buffer :: proc(arena: ^Arena, buffer: []byte) -> (err: Allocator_Erro
arena.kind = .Buffer
- sanitizer.address_poison(buffer[:])
+ // sanitizer.address_poison(buffer[:])
block_base := raw_data(buffer)
block := (^Memory_Block)(block_base)
@@ -163,7 +163,7 @@ arena_alloc :: proc(arena: ^Arena, size: uint, alignment: uint, loc := #caller_l
arena.total_used = arena.curr_block.used
}
- sanitizer.address_unpoison(data)
+ // sanitizer.address_unpoison(data)
return
}
@@ -182,7 +182,7 @@ arena_static_reset_to :: proc(arena: ^Arena, pos: uint, loc := #caller_location)
mem.zero_slice(arena.curr_block.base[arena.curr_block.used:][:prev_pos-pos])
}
arena.total_used = arena.curr_block.used
- sanitizer.address_poison(arena.curr_block.base[:arena.curr_block.committed])
+ // sanitizer.address_poison(arena.curr_block.base[:arena.curr_block.committed])
return true
} else if pos == 0 {
arena.total_used = 0
@@ -200,7 +200,7 @@ arena_growing_free_last_memory_block :: proc(arena: ^Arena, loc := #caller_locat
arena.total_reserved -= free_block.reserved
arena.curr_block = free_block.prev
- sanitizer.address_poison(free_block.base[:free_block.committed])
+ // sanitizer.address_poison(free_block.base[:free_block.committed])
memory_block_dealloc(free_block)
}
}
@@ -219,9 +219,9 @@ arena_free_all :: proc(arena: ^Arena, loc := #caller_location) {
if arena.curr_block != nil {
curr_block_used := int(arena.curr_block.used)
arena.curr_block.used = 0
- sanitizer.address_unpoison(arena.curr_block.base[:curr_block_used])
+ // sanitizer.address_unpoison(arena.curr_block.base[:curr_block_used])
mem.zero(arena.curr_block.base, curr_block_used)
- sanitizer.address_poison(arena.curr_block.base[:arena.curr_block.committed])
+ // sanitizer.address_poison(arena.curr_block.base[:arena.curr_block.committed])
}
arena.total_used = 0
case .Static, .Buffer:
@@ -349,7 +349,7 @@ arena_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode,
if size < old_size {
// shrink data in-place
data = old_data[:size]
- sanitizer.address_poison(old_data[size:old_size])
+ // sanitizer.address_poison(old_data[size:old_size])
return
}
@@ -363,7 +363,7 @@ arena_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode,
_ = alloc_from_memory_block(block, new_end - old_end, 1, default_commit_size=arena.default_commit_size) or_return
arena.total_used += block.used - prev_used
data = block.base[start:new_end]
- sanitizer.address_unpoison(data)
+ // sanitizer.address_unpoison(data)
return
}
}
@@ -374,7 +374,7 @@ arena_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode,
return
}
copy(new_memory, old_data[:old_size])
- sanitizer.address_poison(old_data[:old_size])
+ // sanitizer.address_poison(old_data[:old_size])
return new_memory, nil
case .Query_Features:
set := (^mem.Allocator_Mode_Set)(old_memory)
diff --git a/core/mem/virtual/virtual.odin b/core/mem/virtual/virtual.odin
index 031fb721a..c4c3b1727 100644
--- a/core/mem/virtual/virtual.odin
+++ b/core/mem/virtual/virtual.odin
@@ -2,7 +2,7 @@ package mem_virtual
import "core:mem"
import "base:intrinsics"
-import "base:sanitizer"
+// import "base:sanitizer"
import "base:runtime"
_ :: runtime
@@ -22,7 +22,7 @@ reserve :: proc "contextless" (size: uint) -> (data: []byte, err: Allocator_Erro
@(no_sanitize_address)
commit :: proc "contextless" (data: rawptr, size: uint) -> Allocator_Error {
- sanitizer.address_unpoison(data, size)
+ // sanitizer.address_unpoison(data, size)
return _commit(data, size)
}
@@ -35,13 +35,13 @@ reserve_and_commit :: proc "contextless" (size: uint) -> (data: []byte, err: All
@(no_sanitize_address)
decommit :: proc "contextless" (data: rawptr, size: uint) {
- sanitizer.address_poison(data, size)
+ // sanitizer.address_poison(data, size)
_decommit(data, size)
}
@(no_sanitize_address)
release :: proc "contextless" (data: rawptr, size: uint) {
- sanitizer.address_unpoison(data, size)
+ // sanitizer.address_unpoison(data, size)
_release(data, size)
}
@@ -179,7 +179,7 @@ alloc_from_memory_block :: proc(block: ^Memory_Block, min_size, alignment: uint,
data = block.base[block.used+alignment_offset:][:min_size]
block.used += size
- sanitizer.address_unpoison(data)
+ // sanitizer.address_unpoison(data)
return
}
diff --git a/tests/core/mem/test_core_mem.odin b/tests/core/mem/test_core_mem.odin
index bd072b4e9..c1cb59c68 100644
--- a/tests/core/mem/test_core_mem.odin
+++ b/tests/core/mem/test_core_mem.odin
@@ -193,3 +193,111 @@ fail_if_allocations_overlap :: proc(t: ^testing.T, a, b: []byte) {
testing.fail_now(t, "Allocations overlapped")
}
}
+
+
+// This merely does a few simple operations to test basic sanity.
+//
+// A serious test of an allocator would require hooking it up to a benchmark or
+// a large, complicated program in order to get all manner of usage patterns.
+basic_sanity_test :: proc(t: ^testing.T, allocator: mem.Allocator, limit: int, loc := #caller_location) -> bool {
+ context.allocator = allocator
+
+ {
+ a := make([dynamic]u8)
+ for i in 0..<limit {
+ append(&a, u8(i))
+ }
+ testing.expect_value(t, len(a), limit, loc) or_return
+ for i in 0..<limit {
+ testing.expect_value(t, a[i], u8(i), loc) or_return
+ }
+ delete(a)
+ }
+
+ {
+ v := make([]u8, limit)
+ testing.expect_value(t, len(v), limit, loc) or_return
+ for i in 0..<limit {
+ v[i] = u8(i)
+ testing.expect_value(t, v[i], u8(i), loc) or_return
+ }
+ delete(v)
+ }
+
+ {
+ for i in 0..<limit {
+ v := make([]u8, 1)
+ v[0] = u8(i)
+ testing.expect_value(t, v[0], u8(i), loc) or_return
+ delete(v)
+ }
+ }
+
+ return true
+}
+
+@test
+test_scratch :: proc(t: ^testing.T) {
+ N :: 4096
+ sa: mem.Scratch_Allocator
+ mem.scratch_init(&sa, N)
+ defer mem.scratch_destroy(&sa)
+ basic_sanity_test(t, mem.scratch_allocator(&sa), N / 4)
+ basic_sanity_test(t, mem.scratch_allocator(&sa), N / 4)
+}
+
+@test
+test_stack :: proc(t: ^testing.T) {
+ N :: 4096
+ buf: [N]u8
+
+ sa: mem.Stack
+ mem.stack_init(&sa, buf[:])
+ basic_sanity_test(t, mem.stack_allocator(&sa), N / 4)
+ basic_sanity_test(t, mem.stack_allocator(&sa), N / 4)
+}
+
+@test
+test_small_stack :: proc(t: ^testing.T) {
+ N :: 4096
+ buf: [N]u8
+
+ ss: mem.Small_Stack
+ mem.small_stack_init(&ss, buf[:])
+ basic_sanity_test(t, mem.small_stack_allocator(&ss), N / 4)
+ // The test cannot be run a second time on top of the last for a Small
+ // Stack because the dynamic array inside will resize and leave a gap, thus
+ // limiting the amount of space.
+ basic_sanity_test(t, mem.small_stack_allocator(&ss), N / 8)
+}
+
+@test
+test_dynamic_arena :: proc(t: ^testing.T) {
+ da: mem.Dynamic_Arena
+ mem.dynamic_arena_init(&da)
+ defer mem.dynamic_arena_destroy(&da)
+ basic_sanity_test(t, mem.dynamic_arena_allocator(&da), da.block_size / 4)
+ basic_sanity_test(t, mem.dynamic_arena_allocator(&da), da.block_size / 4)
+}
+
+@test
+test_buddy :: proc(t: ^testing.T) {
+ N :: 4096
+ buf: [N]u8
+
+ ba: mem.Buddy_Allocator
+ mem.buddy_allocator_init(&ba, buf[:], align_of(u8))
+ basic_sanity_test(t, mem.buddy_allocator(&ba), N / 8)
+ basic_sanity_test(t, mem.buddy_allocator(&ba), N / 8)
+}
+
+@test
+test_rollback :: proc(t: ^testing.T) {
+ N :: 4096
+ buf: [N]u8
+
+ rb: mem.Rollback_Stack
+ mem.rollback_stack_init(&rb, buf[:])
+ basic_sanity_test(t, mem.rollback_stack_allocator(&rb), N / 8)
+ basic_sanity_test(t, mem.rollback_stack_allocator(&rb), N / 8)
+}
diff --git a/tests/issues/run.bat b/tests/issues/run.bat
index 8e71c3f3d..76d8f58b6 100644
--- a/tests/issues/run.bat
+++ b/tests/issues/run.bat
@@ -16,6 +16,7 @@ set COMMON=-define:ODIN_TEST_FANCY=false -file -vet -strict-style
..\..\..\odin test ..\test_issue_2615.odin %COMMON% || exit /b
..\..\..\odin test ..\test_issue_2637.odin %COMMON% || exit /b
..\..\..\odin test ..\test_issue_2666.odin %COMMON% || exit /b
+..\..\..\odin test ..\test_issue_2694.odin %COMMON% || exit /b
..\..\..\odin test ..\test_issue_4210.odin %COMMON% || exit /b
..\..\..\odin test ..\test_issue_4364.odin %COMMON% || exit /b
..\..\..\odin test ..\test_issue_4584.odin %COMMON% || exit /b
diff --git a/tests/issues/run.sh b/tests/issues/run.sh
index fc8ab513f..305329e7d 100755
--- a/tests/issues/run.sh
+++ b/tests/issues/run.sh
@@ -17,6 +17,7 @@ $ODIN test ../test_issue_2466.odin $COMMON
$ODIN test ../test_issue_2615.odin $COMMON
$ODIN test ../test_issue_2637.odin $COMMON
$ODIN test ../test_issue_2666.odin $COMMON
+$ODIN test ../test_issue_2694.odin $COMMON
$ODIN test ../test_issue_4210.odin $COMMON
$ODIN test ../test_issue_4364.odin $COMMON
$ODIN test ../test_issue_4584.odin $COMMON
diff --git a/tests/issues/test_issue_2694.odin b/tests/issues/test_issue_2694.odin
new file mode 100644
index 000000000..01860d603
--- /dev/null
+++ b/tests/issues/test_issue_2694.odin
@@ -0,0 +1,42 @@
+package test_issues
+
+import "core:fmt"
+import "core:encoding/json"
+import "core:log"
+import "core:mem"
+import "core:testing"
+
+// This is a minimal reproduction of the code in #2694.
+// It exemplifies the original problem as briefly as possible.
+
+SAMPLE_JSON :: `
+{
+ "foo": 0,
+ "things": [
+ { "a": "ZZZZ"},
+ ]
+}
+`
+
+@test
+test_issue_2694 :: proc(t: ^testing.T) {
+ into: struct {
+ foo: int,
+ things: []json.Object,
+ }
+
+ scratch := new(mem.Scratch_Allocator)
+ defer free(scratch)
+ if mem.scratch_allocator_init(scratch, 4 * mem.Megabyte) != .None {
+ log.error("unable to initialize scratch allocator")
+ return
+ }
+ defer mem.scratch_allocator_destroy(scratch)
+
+ err := json.unmarshal_string(SAMPLE_JSON, &into, allocator = mem.scratch_allocator(scratch))
+ testing.expect(t, err == nil)
+
+ output := fmt.tprintf("%v", into)
+ expected := `{foo = 0, things = [map[a="ZZZZ"]]}`
+ testing.expectf(t, output == expected, "\n\texpected: %q\n\tgot: %q", expected, output)
+}