aboutsummaryrefslogtreecommitdiff
path: root/core/mem/rollback_stack_allocator.odin
diff options
context:
space:
mode:
authorflysand7 <thebumboni@gmail.com>2024-09-14 10:46:35 +1100
committerflysand7 <thebumboni@gmail.com>2024-09-14 10:46:35 +1100
commit016d1a84d4e09ea06491d9e5a4661e313906e3aa (patch)
treec14ce4a701d9fffc2ec67af25b9bac7a902e0890 /core/mem/rollback_stack_allocator.odin
parent3ed2ab6e2c18081d80961168a57155e6f31ac573 (diff)
[mem]: Document mutex, rollback stack and tracking allocators
Diffstat (limited to 'core/mem/rollback_stack_allocator.odin')
-rw-r--r--core/mem/rollback_stack_allocator.odin135
1 files changed, 74 insertions, 61 deletions
diff --git a/core/mem/rollback_stack_allocator.odin b/core/mem/rollback_stack_allocator.odin
index 761435552..61ec73546 100644
--- a/core/mem/rollback_stack_allocator.odin
+++ b/core/mem/rollback_stack_allocator.odin
@@ -1,39 +1,15 @@
package mem
-/*
-The Rollback Stack Allocator was designed for the test runner to be fast,
-able to grow, and respect the Tracking Allocator's requirement for
-individual frees. It is not overly concerned with fragmentation, however.
-
-It has support for expansion when configured with a block allocator and
-limited support for out-of-order frees.
-
-Allocation has constant-time best and usual case performance.
-At worst, it is linear according to the number of memory blocks.
-
-Allocation follows a first-fit strategy when there are multiple memory
-blocks.
-
-Freeing has constant-time best and usual case performance.
-At worst, it is linear according to the number of memory blocks and number
-of freed items preceding the last item in a block.
-
-Resizing has constant-time performance, if it's the last item in a block, or
-the new size is smaller. Naturally, this becomes linear-time if there are
-multiple blocks to search for the pointer's owning block. Otherwise, the
-allocator defaults to a combined alloc & free operation internally.
-
-Out-of-order freeing is accomplished by collapsing a run of freed items
-from the last allocation backwards.
-
-Each allocation has an overhead of 8 bytes and any extra bytes to satisfy
-the requested alignment.
-*/
import "base:runtime"
+/*
+Rollback stack default block size.
+*/
ROLLBACK_STACK_DEFAULT_BLOCK_SIZE :: 4 * Megabyte
/*
+Rollback stack max head block size.
+
This limitation is due to the size of `prev_ptr`, but it is only for the
head block; any allocation in excess of the allocator's `block_size` is
valid, so long as the block allocator can handle it.
@@ -43,12 +19,18 @@ within is freed; they are immediately returned to the block allocator.
*/
ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE :: 2 * Gigabyte
+/*
+Allocation header of the rollback stack allocator.
+*/
Rollback_Stack_Header :: bit_field u64 {
prev_offset: uintptr | 32,
is_free: bool | 1,
prev_ptr: uintptr | 31,
}
+/*
+Block header of the rollback stack allocator.
+*/
Rollback_Stack_Block :: struct {
next_block: ^Rollback_Stack_Block,
last_alloc: rawptr,
@@ -56,6 +38,9 @@ Rollback_Stack_Block :: struct {
buffer: []byte,
}
+/*
+Rollback stack allocator data.
+*/
Rollback_Stack :: struct {
head: ^Rollback_Stack_Block,
block_size: int,
@@ -111,6 +96,9 @@ rb_rollback_block :: proc(block: ^Rollback_Stack_Block, header: ^Rollback_Stack_
}
}
+/*
+Free memory to a rollback stack allocator.
+*/
@(private="file", require_results)
rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error {
parent, block, header := rb_find_ptr(stack, ptr) or_return
@@ -129,6 +117,9 @@ rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error {
return nil
}
+/*
+Free all memory owned by the rollback stack allocator.
+*/
@(private="file")
rb_free_all :: proc(stack: ^Rollback_Stack) {
for block := stack.head.next_block; block != nil; /**/ {
@@ -142,14 +133,16 @@ rb_free_all :: proc(stack: ^Rollback_Stack) {
stack.head.offset = 0
}
+/*
+Resize an allocation made on a rollback stack allocator.
+*/
@(private="file", require_results)
-rb_resize :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment: int) -> (result: []byte, err: Allocator_Error) {
+rb_resize_non_zeroed :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment: int) -> (result: []byte, err: Allocator_Error) {
if ptr != nil {
if block, _, ok := rb_find_last_alloc(stack, ptr); ok {
// `block.offset` should never underflow because it is contingent
// on `old_size` in the first place, assuming sane arguments.
assert(block.offset >= cast(uintptr)old_size, "Rollback Stack Allocator received invalid `old_size`.")
-
if block.offset + cast(uintptr)size - cast(uintptr)old_size < cast(uintptr)len(block.buffer) {
// Prevent singleton allocations from fragmenting by forbidding
// them to shrink, removing the possibility of overflow bugs.
@@ -160,27 +153,26 @@ rb_resize :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment
}
}
}
-
- result = rb_alloc(stack, size, alignment) or_return
+ result = rb_alloc_non_zeroed(stack, size, alignment) or_return
runtime.mem_copy_non_overlapping(raw_data(result), ptr, old_size)
err = rb_free(stack, ptr)
-
return
}
+/*
+Allocate memory using the rollback stack allocator.
+*/
@(private="file", require_results)
-rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byte, err: Allocator_Error) {
+rb_alloc_non_zeroed :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byte, err: Allocator_Error) {
parent: ^Rollback_Stack_Block
for block := stack.head; /**/; block = block.next_block {
when !ODIN_DISABLE_ASSERT {
allocated_new_block: bool
}
-
if block == nil {
if stack.block_allocator.procedure == nil {
return nil, .Out_Of_Memory
}
-
minimum_size_required := size_of(Rollback_Stack_Header) + size + alignment - 1
new_block_size := max(minimum_size_required, stack.block_size)
block = rb_make_block(new_block_size, stack.block_allocator) or_return
@@ -189,10 +181,8 @@ rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byt
allocated_new_block = true
}
}
-
start := raw_data(block.buffer)[block.offset:]
padding := cast(uintptr)calc_padding_with_header(cast(uintptr)start, cast(uintptr)alignment, size_of(Rollback_Stack_Header))
-
if block.offset + padding + cast(uintptr)size > cast(uintptr)len(block.buffer) {
when !ODIN_DISABLE_ASSERT {
if allocated_new_block {
@@ -202,54 +192,50 @@ rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byt
parent = block
continue
}
-
header := cast(^Rollback_Stack_Header)(start[padding - size_of(Rollback_Stack_Header):])
ptr := start[padding:]
-
header^ = {
prev_offset = block.offset,
prev_ptr = uintptr(0) if block.last_alloc == nil else cast(uintptr)block.last_alloc - cast(uintptr)raw_data(block.buffer),
is_free = false,
}
-
block.last_alloc = ptr
block.offset += padding + cast(uintptr)size
-
if len(block.buffer) > stack.block_size {
// This block exceeds the allocator's standard block size and is considered a singleton.
// Prevent any further allocations on it.
block.offset = cast(uintptr)len(block.buffer)
}
-
#no_bounds_check return ptr[:size], nil
}
-
return nil, .Out_Of_Memory
}
@(private="file", require_results)
rb_make_block :: proc(size: int, allocator: Allocator) -> (block: ^Rollback_Stack_Block, err: Allocator_Error) {
buffer := runtime.mem_alloc(size_of(Rollback_Stack_Block) + size, align_of(Rollback_Stack_Block), allocator) or_return
-
block = cast(^Rollback_Stack_Block)raw_data(buffer)
#no_bounds_check block.buffer = buffer[size_of(Rollback_Stack_Block):]
return
}
-
+/*
+Initialize the rollback stack allocator using a fixed backing buffer.
+*/
rollback_stack_init_buffered :: proc(stack: ^Rollback_Stack, buffer: []byte, location := #caller_location) {
MIN_SIZE :: size_of(Rollback_Stack_Block) + size_of(Rollback_Stack_Header) + size_of(rawptr)
assert(len(buffer) >= MIN_SIZE, "User-provided buffer to Rollback Stack Allocator is too small.", location)
-
block := cast(^Rollback_Stack_Block)raw_data(buffer)
block^ = {}
#no_bounds_check block.buffer = buffer[size_of(Rollback_Stack_Block):]
-
stack^ = {}
stack.head = block
stack.block_size = len(block.buffer)
}
+/*
+Initialize the rollback stack alocator using a backing block allocator.
+*/
rollback_stack_init_dynamic :: proc(
stack: ^Rollback_Stack,
block_size : int = ROLLBACK_STACK_DEFAULT_BLOCK_SIZE,
@@ -262,22 +248,25 @@ rollback_stack_init_dynamic :: proc(
// size is insufficient; check only on platforms with big enough ints.
assert(block_size <= ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE, "Rollback Stack Allocators cannot support head blocks larger than 2 gigabytes.", location)
}
-
block := rb_make_block(block_size, block_allocator) or_return
-
stack^ = {}
stack.head = block
stack.block_size = block_size
stack.block_allocator = block_allocator
-
return nil
}
+/*
+Initialize the rollback stack.
+*/
rollback_stack_init :: proc {
rollback_stack_init_buffered,
rollback_stack_init_dynamic,
}
+/*
+Destroy a rollback stack.
+*/
rollback_stack_destroy :: proc(stack: ^Rollback_Stack) {
if stack.block_allocator.procedure != nil {
rb_free_all(stack)
@@ -286,6 +275,37 @@ rollback_stack_destroy :: proc(stack: ^Rollback_Stack) {
stack^ = {}
}
+/*
+Rollback stack allocator.
+
+The Rollback Stack Allocator was designed for the test runner to be fast,
+able to grow, and respect the Tracking Allocator's requirement for
+individual frees. It is not overly concerned with fragmentation, however.
+
+It has support for expansion when configured with a block allocator and
+limited support for out-of-order frees.
+
+Allocation has constant-time best and usual case performance.
+At worst, it is linear according to the number of memory blocks.
+
+Allocation follows a first-fit strategy when there are multiple memory
+blocks.
+
+Freeing has constant-time best and usual case performance.
+At worst, it is linear according to the number of memory blocks and number
+of freed items preceding the last item in a block.
+
+Resizing has constant-time performance, if it's the last item in a block, or
+the new size is smaller. Naturally, this becomes linear-time if there are
+multiple blocks to search for the pointer's owning block. Otherwise, the
+allocator defaults to a combined alloc & free operation internally.
+
+Out-of-order freeing is accomplished by collapsing a run of freed items
+from the last allocation backwards.
+
+Each allocation has an overhead of 8 bytes and any extra bytes to satisfy
+the requested alignment.
+*/
@(require_results)
rollback_stack_allocator :: proc(stack: ^Rollback_Stack) -> Allocator {
return Allocator {
@@ -309,38 +329,31 @@ rollback_stack_allocator_proc :: proc(
case .Alloc, .Alloc_Non_Zeroed:
assert(size >= 0, "Size must be positive or zero.", location)
assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", location)
- result = rb_alloc(stack, size, alignment) or_return
-
+ result = rb_alloc_non_zeroed(stack, size, alignment) or_return
if mode == .Alloc {
zero_slice(result)
}
-
case .Free:
err = rb_free(stack, old_memory)
case .Free_All:
rb_free_all(stack)
-
case .Resize, .Resize_Non_Zeroed:
assert(size >= 0, "Size must be positive or zero.", location)
assert(old_size >= 0, "Old size must be positive or zero.", location)
assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", location)
- result = rb_resize(stack, old_memory, old_size, size, alignment) or_return
-
+ result = rb_resize_non_zeroed(stack, old_memory, old_size, size, alignment) or_return
#no_bounds_check if mode == .Resize && size > old_size {
zero_slice(result[old_size:])
}
-
case .Query_Features:
set := (^Allocator_Mode_Set)(old_memory)
if set != nil {
set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed}
}
return nil, nil
-
case .Query_Info:
return nil, .Mode_Not_Implemented
}
-
return
}