From 50dffaf131452f64a2b28477718850ad2a8b78f2 Mon Sep 17 00:00:00 2001 From: Feoramund <161657516+Feoramund@users.noreply.github.com> Date: Mon, 27 May 2024 16:29:34 -0400 Subject: Add `mem.Rollback_Stack` --- core/mem/rollback_stack_allocator.odin | 319 +++++++++++++++++++++++++++++++++ 1 file changed, 319 insertions(+) create mode 100644 core/mem/rollback_stack_allocator.odin (limited to 'core/mem/rollback_stack_allocator.odin') diff --git a/core/mem/rollback_stack_allocator.odin b/core/mem/rollback_stack_allocator.odin new file mode 100644 index 000000000..bf397d2c8 --- /dev/null +++ b/core/mem/rollback_stack_allocator.odin @@ -0,0 +1,319 @@ +package mem + +// The Rollback Stack Allocator was designed for the test runner to be fast, +// able to grow, and respect the Tracking Allocator's requirement for +// individual frees. It is not overly concerned with fragmentation, however. +// +// It has support for expansion when configured with a block allocator and +// limited support for out-of-order frees. +// +// Allocation has constant-time best and usual case performance. +// At worst, it is linear according to the number of memory blocks. +// +// Allocation follows a first-fit strategy when there are multiple memory +// blocks. +// +// Freeing has constant-time best and usual case performance. +// At worst, it is linear according to the number of memory blocks and number +// of freed items preceding the last item in a block. +// +// Resizing has constant-time performance, if it's the last item in a block, or +// the new size is smaller. Naturally, this becomes linear-time if there are +// multiple blocks to search for the pointer's owning block. Otherwise, the +// allocator defaults to a combined alloc & free operation internally. +// +// Out-of-order freeing is accomplished by collapsing a run of freed items +// from the last allocation backwards. +// +// Each allocation has an overhead of 8 bytes and any extra bytes to satisfy +// the requested alignment. + +import "base:runtime" + +ROLLBACK_STACK_DEFAULT_BLOCK_SIZE :: 4 * Megabyte + +// This limitation is due to the size of `prev_ptr`, but it is only for the +// head block; any allocation in excess of the allocator's `block_size` is +// valid, so long as the block allocator can handle it. +// +// This is because allocations over the block size are not split up if the item +// within is freed; they are immediately returned to the block allocator. +ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE :: 1 * Gigabyte + + +Rollback_Stack_Header :: bit_field u64 { + prev_offset: int | 32, + is_free: bool | 1, + prev_ptr: int | 31, +} + +Rollback_Stack_Block :: struct { + next_block: ^Rollback_Stack_Block, + last_alloc: rawptr, + offset: int, + buffer: []byte, +} + +Rollback_Stack :: struct { + head: ^Rollback_Stack_Block, + block_size: int, + block_allocator: Allocator, +} + + +@(private="file") +@(require_results) +rb_ptr_in_bounds :: proc(block: ^Rollback_Stack_Block, ptr: rawptr) -> bool { + start := cast(uintptr)raw_data(block.buffer) + end := cast(uintptr)raw_data(block.buffer) + cast(uintptr)block.offset + return start < cast(uintptr)ptr && cast(uintptr)ptr <= end +} + +@(private="file") +@(require_results) +rb_find_ptr :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> ( + parent: ^Rollback_Stack_Block, + block: ^Rollback_Stack_Block, + header: ^Rollback_Stack_Header, + err: Allocator_Error, +) { + for block = stack.head; block != nil; block = block.next_block { + if rb_ptr_in_bounds(block, ptr) { + header = cast(^Rollback_Stack_Header)(cast(uintptr)ptr - size_of(Rollback_Stack_Header)) + return + } + parent = block + } + return nil, nil, nil, .Invalid_Pointer +} + +@(private="file") +@(require_results) +rb_find_last_alloc :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> ( + block: ^Rollback_Stack_Block, + header: ^Rollback_Stack_Header, + ok: bool, +) { + for block = stack.head; block != nil; block = block.next_block { + if block.last_alloc == ptr { + header = cast(^Rollback_Stack_Header)(cast(uintptr)ptr - size_of(Rollback_Stack_Header)) + return block, header, true + } + } + return nil, nil, false +} + +@(private="file") +rb_rollback_block :: proc(block: ^Rollback_Stack_Block, header: ^Rollback_Stack_Header) { + header := header + for block.offset > 0 && header.is_free { + block.offset = header.prev_offset + block.last_alloc = cast(rawptr)(cast(uintptr)raw_data(block.buffer) + cast(uintptr)header.prev_ptr) + header = cast(^Rollback_Stack_Header)(cast(uintptr)raw_data(block.buffer) + cast(uintptr)header.prev_ptr - size_of(Rollback_Stack_Header)) + } +} + +@(private="file") +@(require_results) +rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error { + parent, block, header := rb_find_ptr(stack, ptr) or_return + if header.is_free { + return .Invalid_Pointer + } + header.is_free = true + if block.last_alloc == ptr { + block.offset = header.prev_offset + rb_rollback_block(block, header) + } + if parent != nil && block.offset == 0 { + parent.next_block = block.next_block + runtime.mem_free_with_size(block, size_of(Rollback_Stack_Block) + len(block.buffer), stack.block_allocator) + } + return nil +} + +@(private="file") +rb_free_all :: proc(stack: ^Rollback_Stack) { + for block := stack.head.next_block; block != nil; /**/ { + next_block := block.next_block + runtime.mem_free_with_size(block, size_of(Rollback_Stack_Block) + len(block.buffer), stack.block_allocator) + block = next_block + } + + stack.head.next_block = nil + stack.head.last_alloc = nil + stack.head.offset = 0 +} + +@(private="file") +@(require_results) +rb_resize :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment: int) -> (result: []byte, err: Allocator_Error) { + if ptr != nil { + if block, _, ok := rb_find_last_alloc(stack, ptr); ok { + if block.offset + (size - old_size) < len(block.buffer) { + block.offset += (size - old_size) + #no_bounds_check return (cast([^]byte)ptr)[:size], nil + } + } + } + + result = rb_alloc(stack, size, alignment) or_return + runtime.mem_copy_non_overlapping(raw_data(result), ptr, old_size) + err = rb_free(stack, ptr) + + return +} + +@(private="file") +@(require_results) +rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byte, err: Allocator_Error) { + parent: ^Rollback_Stack_Block + for block := stack.head; /**/; block = block.next_block { + if block == nil { + if stack.block_allocator.procedure == nil { + return nil, .Out_Of_Memory + } + + minimum_size_required := size_of(Rollback_Stack_Header) + size + alignment - 1 + new_block_size := max(minimum_size_required, stack.block_size) + block = rb_make_block(new_block_size, stack.block_allocator) or_return + parent.next_block = block + } + + start := cast(uintptr)raw_data(block.buffer) + cast(uintptr)block.offset + padding := calc_padding_with_header(start, cast(uintptr)alignment, size_of(Rollback_Stack_Header)) + + if block.offset + padding + size > len(block.buffer) { + parent = block + continue + } + + header := cast(^Rollback_Stack_Header)(start + cast(uintptr)padding - size_of(Rollback_Stack_Header)) + ptr := (cast([^]byte)(start + cast(uintptr)padding)) + + header^ = { + prev_offset = block.offset, + prev_ptr = max(0, cast(int)(cast(uintptr)block.last_alloc - cast(uintptr)raw_data(block.buffer))), + is_free = false, + } + + block.last_alloc = ptr + block.offset += padding + size + + if len(block.buffer) > stack.block_size { + // This block exceeds the allocator's standard block size and is considered a singleton. + // Prevent any further allocations on it. + block.offset = len(block.buffer) + } + + #no_bounds_check return ptr[:size], nil + } + + return nil, .Out_Of_Memory +} + +@(private="file") +@(require_results) +rb_make_block :: proc(size: int, allocator: Allocator) -> (block: ^Rollback_Stack_Block, err: Allocator_Error) { + buffer := runtime.mem_alloc(size_of(Rollback_Stack_Block) + size, align_of(Rollback_Stack_Block), allocator) or_return + + block = cast(^Rollback_Stack_Block)raw_data(buffer) + #no_bounds_check block.buffer = buffer[size_of(Rollback_Stack_Block):] + return +} + + +rollback_stack_init_buffered :: proc(stack: ^Rollback_Stack, buffer: []byte) { + MIN_SIZE :: size_of(Rollback_Stack_Block) + size_of(Rollback_Stack_Header) + size_of(rawptr) + assert(len(buffer) >= MIN_SIZE, "User-provided buffer to Rollback Stack Allocator is too small.") + + block := cast(^Rollback_Stack_Block)raw_data(buffer) + block^ = {} + #no_bounds_check block.buffer = buffer[size_of(Rollback_Stack_Block):] + + stack^ = {} + stack.head = block + stack.block_size = len(block.buffer) +} + +rollback_stack_init_dynamic :: proc( + stack: ^Rollback_Stack, + block_size := ROLLBACK_STACK_DEFAULT_BLOCK_SIZE, + block_allocator := context.allocator, +) -> Allocator_Error { + assert(block_size >= size_of(Rollback_Stack_Header) + size_of(rawptr), "Rollback Stack Allocator block size is too small.") + assert(block_size <= ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE, "Rollback Stack Allocators cannot support head blocks larger than 1 gigabyte.") + + block := rb_make_block(block_size, block_allocator) or_return + + stack^ = {} + stack.head = block + stack.block_size = block_size + stack.block_allocator = block_allocator + + return nil +} + +rollback_stack_init :: proc { + rollback_stack_init_buffered, + rollback_stack_init_dynamic, +} + +rollback_stack_destroy :: proc(stack: ^Rollback_Stack) { + if stack.block_allocator.procedure != nil { + rb_free_all(stack) + free(stack.head, stack.block_allocator) + } + stack^ = {} +} + +@(require_results) +rollback_stack_allocator :: proc(stack: ^Rollback_Stack) -> Allocator { + return Allocator { + data = stack, + procedure = rollback_stack_allocator_proc, + } +} + +@(require_results) +rollback_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, + size, alignment: int, + old_memory: rawptr, old_size: int, location := #caller_location, +) -> (result: []byte, err: Allocator_Error) { + stack := cast(^Rollback_Stack)allocator_data + + switch mode { + case .Alloc, .Alloc_Non_Zeroed: + assert(is_power_of_two(cast(uintptr)alignment), "alignment must be a power of two", location) + result = rb_alloc(stack, size, alignment) or_return + + if mode == .Alloc { + zero_slice(result) + } + + case .Free: + err = rb_free(stack, old_memory) + + case .Free_All: + rb_free_all(stack) + + case .Resize, .Resize_Non_Zeroed: + result = rb_resize(stack, old_memory, old_size, size, alignment) or_return + + #no_bounds_check if mode == .Resize && size > old_size { + zero_slice(result[old_size:]) + } + + case .Query_Features: + set := (^Allocator_Mode_Set)(old_memory) + if set != nil { + set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed} + } + return nil, nil + + case .Query_Info: + return nil, .Mode_Not_Implemented + } + + return +} -- cgit v1.2.3 From 568b746c9897ea4b343d454309d6d1275a4a5320 Mon Sep 17 00:00:00 2001 From: Feoramund <161657516+Feoramund@users.noreply.github.com> Date: Tue, 28 May 2024 16:39:20 -0400 Subject: Fix indentation --- core/mem/rollback_stack_allocator.odin | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'core/mem/rollback_stack_allocator.odin') diff --git a/core/mem/rollback_stack_allocator.odin b/core/mem/rollback_stack_allocator.odin index bf397d2c8..6fa86ab0b 100644 --- a/core/mem/rollback_stack_allocator.odin +++ b/core/mem/rollback_stack_allocator.odin @@ -292,7 +292,7 @@ rollback_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mo } case .Free: - err = rb_free(stack, old_memory) + err = rb_free(stack, old_memory) case .Free_All: rb_free_all(stack) -- cgit v1.2.3 From 0f675fa4368253e8ebdf9dad325bbba2101ecd22 Mon Sep 17 00:00:00 2001 From: Feoramund <161657516+Feoramund@users.noreply.github.com> Date: Tue, 28 May 2024 19:36:20 -0400 Subject: Use `uintptr` where applicable in `mem.Rollback_Stack` --- core/mem/rollback_stack_allocator.odin | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) (limited to 'core/mem/rollback_stack_allocator.odin') diff --git a/core/mem/rollback_stack_allocator.odin b/core/mem/rollback_stack_allocator.odin index 6fa86ab0b..104ad0e95 100644 --- a/core/mem/rollback_stack_allocator.odin +++ b/core/mem/rollback_stack_allocator.odin @@ -38,19 +38,19 @@ ROLLBACK_STACK_DEFAULT_BLOCK_SIZE :: 4 * Megabyte // // This is because allocations over the block size are not split up if the item // within is freed; they are immediately returned to the block allocator. -ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE :: 1 * Gigabyte +ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE :: 2 * Gigabyte Rollback_Stack_Header :: bit_field u64 { - prev_offset: int | 32, - is_free: bool | 1, - prev_ptr: int | 31, + prev_offset: uintptr | 32, + is_free: bool | 1, + prev_ptr: uintptr | 31, } Rollback_Stack_Block :: struct { next_block: ^Rollback_Stack_Block, last_alloc: rawptr, - offset: int, + offset: uintptr, buffer: []byte, } @@ -65,7 +65,7 @@ Rollback_Stack :: struct { @(require_results) rb_ptr_in_bounds :: proc(block: ^Rollback_Stack_Block, ptr: rawptr) -> bool { start := cast(uintptr)raw_data(block.buffer) - end := cast(uintptr)raw_data(block.buffer) + cast(uintptr)block.offset + end := cast(uintptr)raw_data(block.buffer) + block.offset return start < cast(uintptr)ptr && cast(uintptr)ptr <= end } @@ -150,8 +150,8 @@ rb_free_all :: proc(stack: ^Rollback_Stack) { rb_resize :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment: int) -> (result: []byte, err: Allocator_Error) { if ptr != nil { if block, _, ok := rb_find_last_alloc(stack, ptr); ok { - if block.offset + (size - old_size) < len(block.buffer) { - block.offset += (size - old_size) + if block.offset + cast(uintptr)size - cast(uintptr)old_size < cast(uintptr)len(block.buffer) { + block.offset += cast(uintptr)size - cast(uintptr)old_size #no_bounds_check return (cast([^]byte)ptr)[:size], nil } } @@ -180,10 +180,10 @@ rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byt parent.next_block = block } - start := cast(uintptr)raw_data(block.buffer) + cast(uintptr)block.offset - padding := calc_padding_with_header(start, cast(uintptr)alignment, size_of(Rollback_Stack_Header)) + start := cast(uintptr)raw_data(block.buffer) + block.offset + padding := cast(uintptr)calc_padding_with_header(start, cast(uintptr)alignment, size_of(Rollback_Stack_Header)) - if block.offset + padding + size > len(block.buffer) { + if block.offset + padding + cast(uintptr)size > cast(uintptr)len(block.buffer) { parent = block continue } @@ -193,17 +193,17 @@ rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byt header^ = { prev_offset = block.offset, - prev_ptr = max(0, cast(int)(cast(uintptr)block.last_alloc - cast(uintptr)raw_data(block.buffer))), + prev_ptr = uintptr(0) if block.last_alloc == nil else cast(uintptr)block.last_alloc - cast(uintptr)raw_data(block.buffer), is_free = false, } block.last_alloc = ptr - block.offset += padding + size + block.offset += padding + cast(uintptr)size if len(block.buffer) > stack.block_size { // This block exceeds the allocator's standard block size and is considered a singleton. // Prevent any further allocations on it. - block.offset = len(block.buffer) + block.offset = cast(uintptr)len(block.buffer) } #no_bounds_check return ptr[:size], nil @@ -242,7 +242,7 @@ rollback_stack_init_dynamic :: proc( block_allocator := context.allocator, ) -> Allocator_Error { assert(block_size >= size_of(Rollback_Stack_Header) + size_of(rawptr), "Rollback Stack Allocator block size is too small.") - assert(block_size <= ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE, "Rollback Stack Allocators cannot support head blocks larger than 1 gigabyte.") + assert(block_size <= ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE, "Rollback Stack Allocators cannot support head blocks larger than 2 gigabytes.") block := rb_make_block(block_size, block_allocator) or_return -- cgit v1.2.3 From 09ef08f0354cd68ca589e3c6d094f08d08a48d78 Mon Sep 17 00:00:00 2001 From: Feoramund <161657516+Feoramund@users.noreply.github.com> Date: Tue, 28 May 2024 18:23:05 -0400 Subject: Add more sanity checking to `mem.Rollback_Stack` --- core/mem/rollback_stack_allocator.odin | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) (limited to 'core/mem/rollback_stack_allocator.odin') diff --git a/core/mem/rollback_stack_allocator.odin b/core/mem/rollback_stack_allocator.odin index 104ad0e95..a3f6647cf 100644 --- a/core/mem/rollback_stack_allocator.odin +++ b/core/mem/rollback_stack_allocator.odin @@ -150,6 +150,10 @@ rb_free_all :: proc(stack: ^Rollback_Stack) { rb_resize :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment: int) -> (result: []byte, err: Allocator_Error) { if ptr != nil { if block, _, ok := rb_find_last_alloc(stack, ptr); ok { + // `block.offset` should never underflow because it is contingent + // on `old_size` in the first place, assuming sane arguments. + assert(block.offset >= cast(uintptr)old_size, "Rollback Stack Allocator received invalid `old_size`.") + if block.offset + cast(uintptr)size - cast(uintptr)old_size < cast(uintptr)len(block.buffer) { block.offset += cast(uintptr)size - cast(uintptr)old_size #no_bounds_check return (cast([^]byte)ptr)[:size], nil @@ -169,6 +173,10 @@ rb_resize :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byte, err: Allocator_Error) { parent: ^Rollback_Stack_Block for block := stack.head; /**/; block = block.next_block { + when !ODIN_DISABLE_ASSERT { + allocated_new_block: bool + } + if block == nil { if stack.block_allocator.procedure == nil { return nil, .Out_Of_Memory @@ -178,12 +186,20 @@ rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byt new_block_size := max(minimum_size_required, stack.block_size) block = rb_make_block(new_block_size, stack.block_allocator) or_return parent.next_block = block + when !ODIN_DISABLE_ASSERT { + allocated_new_block = true + } } start := cast(uintptr)raw_data(block.buffer) + block.offset padding := cast(uintptr)calc_padding_with_header(start, cast(uintptr)alignment, size_of(Rollback_Stack_Header)) if block.offset + padding + cast(uintptr)size > cast(uintptr)len(block.buffer) { + when !ODIN_DISABLE_ASSERT { + if allocated_new_block { + panic("Rollback Stack Allocator allocated a new block but did not use it.") + } + } parent = block continue } @@ -223,9 +239,9 @@ rb_make_block :: proc(size: int, allocator: Allocator) -> (block: ^Rollback_Stac } -rollback_stack_init_buffered :: proc(stack: ^Rollback_Stack, buffer: []byte) { +rollback_stack_init_buffered :: proc(stack: ^Rollback_Stack, buffer: []byte, location := #caller_location) { MIN_SIZE :: size_of(Rollback_Stack_Block) + size_of(Rollback_Stack_Header) + size_of(rawptr) - assert(len(buffer) >= MIN_SIZE, "User-provided buffer to Rollback Stack Allocator is too small.") + assert(len(buffer) >= MIN_SIZE, "User-provided buffer to Rollback Stack Allocator is too small.", location) block := cast(^Rollback_Stack_Block)raw_data(buffer) block^ = {} @@ -240,9 +256,10 @@ rollback_stack_init_dynamic :: proc( stack: ^Rollback_Stack, block_size := ROLLBACK_STACK_DEFAULT_BLOCK_SIZE, block_allocator := context.allocator, + location := #caller_location, ) -> Allocator_Error { - assert(block_size >= size_of(Rollback_Stack_Header) + size_of(rawptr), "Rollback Stack Allocator block size is too small.") - assert(block_size <= ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE, "Rollback Stack Allocators cannot support head blocks larger than 2 gigabytes.") + assert(block_size >= size_of(Rollback_Stack_Header) + size_of(rawptr), "Rollback Stack Allocator block size is too small.", location) + assert(block_size <= ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE, "Rollback Stack Allocators cannot support head blocks larger than 2 gigabytes.", location) block := rb_make_block(block_size, block_allocator) or_return @@ -284,7 +301,8 @@ rollback_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mo switch mode { case .Alloc, .Alloc_Non_Zeroed: - assert(is_power_of_two(cast(uintptr)alignment), "alignment must be a power of two", location) + assert(size >= 0, "Size must be positive or zero.", location) + assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", location) result = rb_alloc(stack, size, alignment) or_return if mode == .Alloc { @@ -298,6 +316,9 @@ rollback_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mo rb_free_all(stack) case .Resize, .Resize_Non_Zeroed: + assert(size >= 0, "Size must be positive or zero.", location) + assert(old_size >= 0, "Old size must be positive or zero.", location) + assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", location) result = rb_resize(stack, old_memory, old_size, size, alignment) or_return #no_bounds_check if mode == .Resize && size > old_size { -- cgit v1.2.3 From eadfbb13185a94a30d87c52eb50e83fcec8e0d52 Mon Sep 17 00:00:00 2001 From: Feoramund <161657516+Feoramund@users.noreply.github.com> Date: Tue, 28 May 2024 19:49:56 -0400 Subject: Forbid singleton allocations from shrinking their block offset --- core/mem/rollback_stack_allocator.odin | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'core/mem/rollback_stack_allocator.odin') diff --git a/core/mem/rollback_stack_allocator.odin b/core/mem/rollback_stack_allocator.odin index a3f6647cf..b86f514ec 100644 --- a/core/mem/rollback_stack_allocator.odin +++ b/core/mem/rollback_stack_allocator.odin @@ -155,7 +155,11 @@ rb_resize :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment assert(block.offset >= cast(uintptr)old_size, "Rollback Stack Allocator received invalid `old_size`.") if block.offset + cast(uintptr)size - cast(uintptr)old_size < cast(uintptr)len(block.buffer) { - block.offset += cast(uintptr)size - cast(uintptr)old_size + // Prevent singleton allocations from fragmenting by forbidding + // them to shrink, removing the possibility of overflow bugs. + if len(block.buffer) <= stack.block_size { + block.offset += cast(uintptr)size - cast(uintptr)old_size + } #no_bounds_check return (cast([^]byte)ptr)[:size], nil } } -- cgit v1.2.3 From 89d8df28bec7b5aebaac1e38ef642754ef57e1f1 Mon Sep 17 00:00:00 2001 From: Feoramund <161657516+Feoramund@users.noreply.github.com> Date: Tue, 28 May 2024 19:58:35 -0400 Subject: Combine multi-line attributes onto one line --- core/mem/rollback_stack_allocator.odin | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) (limited to 'core/mem/rollback_stack_allocator.odin') diff --git a/core/mem/rollback_stack_allocator.odin b/core/mem/rollback_stack_allocator.odin index b86f514ec..75b3cd745 100644 --- a/core/mem/rollback_stack_allocator.odin +++ b/core/mem/rollback_stack_allocator.odin @@ -61,16 +61,14 @@ Rollback_Stack :: struct { } -@(private="file") -@(require_results) +@(private="file", require_results) rb_ptr_in_bounds :: proc(block: ^Rollback_Stack_Block, ptr: rawptr) -> bool { start := cast(uintptr)raw_data(block.buffer) end := cast(uintptr)raw_data(block.buffer) + block.offset return start < cast(uintptr)ptr && cast(uintptr)ptr <= end } -@(private="file") -@(require_results) +@(private="file", require_results) rb_find_ptr :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> ( parent: ^Rollback_Stack_Block, block: ^Rollback_Stack_Block, @@ -87,8 +85,7 @@ rb_find_ptr :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> ( return nil, nil, nil, .Invalid_Pointer } -@(private="file") -@(require_results) +@(private="file", require_results) rb_find_last_alloc :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> ( block: ^Rollback_Stack_Block, header: ^Rollback_Stack_Header, @@ -113,8 +110,7 @@ rb_rollback_block :: proc(block: ^Rollback_Stack_Block, header: ^Rollback_Stack_ } } -@(private="file") -@(require_results) +@(private="file", require_results) rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error { parent, block, header := rb_find_ptr(stack, ptr) or_return if header.is_free { @@ -145,8 +141,7 @@ rb_free_all :: proc(stack: ^Rollback_Stack) { stack.head.offset = 0 } -@(private="file") -@(require_results) +@(private="file", require_results) rb_resize :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment: int) -> (result: []byte, err: Allocator_Error) { if ptr != nil { if block, _, ok := rb_find_last_alloc(stack, ptr); ok { @@ -172,8 +167,7 @@ rb_resize :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment return } -@(private="file") -@(require_results) +@(private="file", require_results) rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byte, err: Allocator_Error) { parent: ^Rollback_Stack_Block for block := stack.head; /**/; block = block.next_block { @@ -232,8 +226,7 @@ rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byt return nil, .Out_Of_Memory } -@(private="file") -@(require_results) +@(private="file", require_results) rb_make_block :: proc(size: int, allocator: Allocator) -> (block: ^Rollback_Stack_Block, err: Allocator_Error) { buffer := runtime.mem_alloc(size_of(Rollback_Stack_Block) + size, align_of(Rollback_Stack_Block), allocator) or_return -- cgit v1.2.3 From bf42e39b1ce77da0bc45f64ff35322e618a59b9c Mon Sep 17 00:00:00 2001 From: Feoramund <161657516+Feoramund@users.noreply.github.com> Date: Wed, 29 May 2024 16:19:06 -0400 Subject: Be specific about `int` size for `Rollback_Stack` asserts This should fix tests failing on 32-bit platforms. --- core/mem/rollback_stack_allocator.odin | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'core/mem/rollback_stack_allocator.odin') diff --git a/core/mem/rollback_stack_allocator.odin b/core/mem/rollback_stack_allocator.odin index 75b3cd745..d159897cd 100644 --- a/core/mem/rollback_stack_allocator.odin +++ b/core/mem/rollback_stack_allocator.odin @@ -251,12 +251,16 @@ rollback_stack_init_buffered :: proc(stack: ^Rollback_Stack, buffer: []byte, loc rollback_stack_init_dynamic :: proc( stack: ^Rollback_Stack, - block_size := ROLLBACK_STACK_DEFAULT_BLOCK_SIZE, + block_size : int = ROLLBACK_STACK_DEFAULT_BLOCK_SIZE, block_allocator := context.allocator, location := #caller_location, ) -> Allocator_Error { assert(block_size >= size_of(Rollback_Stack_Header) + size_of(rawptr), "Rollback Stack Allocator block size is too small.", location) - assert(block_size <= ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE, "Rollback Stack Allocators cannot support head blocks larger than 2 gigabytes.", location) + when size_of(int) > 4 { + // It's impossible to specify an argument in excess when your integer + // size is insufficient; check only on platforms with big enough ints. + assert(block_size <= ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE, "Rollback Stack Allocators cannot support head blocks larger than 2 gigabytes.", location) + } block := rb_make_block(block_size, block_allocator) or_return -- cgit v1.2.3 From ccdbd4b6cecfb5712edac97d9b22cc37c141a542 Mon Sep 17 00:00:00 2001 From: Feoramund <161657516+Feoramund@users.noreply.github.com> Date: Sat, 1 Jun 2024 07:30:02 -0400 Subject: Simplify casts in `mem.Rollback_Stack` procs --- core/mem/rollback_stack_allocator.odin | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'core/mem/rollback_stack_allocator.odin') diff --git a/core/mem/rollback_stack_allocator.odin b/core/mem/rollback_stack_allocator.odin index d159897cd..f5e428d87 100644 --- a/core/mem/rollback_stack_allocator.odin +++ b/core/mem/rollback_stack_allocator.odin @@ -63,9 +63,9 @@ Rollback_Stack :: struct { @(private="file", require_results) rb_ptr_in_bounds :: proc(block: ^Rollback_Stack_Block, ptr: rawptr) -> bool { - start := cast(uintptr)raw_data(block.buffer) - end := cast(uintptr)raw_data(block.buffer) + block.offset - return start < cast(uintptr)ptr && cast(uintptr)ptr <= end + start := raw_data(block.buffer) + end := start[block.offset:] + return start < ptr && ptr <= end } @(private="file", require_results) @@ -105,8 +105,8 @@ rb_rollback_block :: proc(block: ^Rollback_Stack_Block, header: ^Rollback_Stack_ header := header for block.offset > 0 && header.is_free { block.offset = header.prev_offset - block.last_alloc = cast(rawptr)(cast(uintptr)raw_data(block.buffer) + cast(uintptr)header.prev_ptr) - header = cast(^Rollback_Stack_Header)(cast(uintptr)raw_data(block.buffer) + cast(uintptr)header.prev_ptr - size_of(Rollback_Stack_Header)) + block.last_alloc = raw_data(block.buffer)[header.prev_ptr:] + header = cast(^Rollback_Stack_Header)(raw_data(block.buffer)[header.prev_ptr - size_of(Rollback_Stack_Header):]) } } @@ -189,8 +189,8 @@ rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byt } } - start := cast(uintptr)raw_data(block.buffer) + block.offset - padding := cast(uintptr)calc_padding_with_header(start, cast(uintptr)alignment, size_of(Rollback_Stack_Header)) + start := raw_data(block.buffer)[block.offset:] + padding := cast(uintptr)calc_padding_with_header(cast(uintptr)start, cast(uintptr)alignment, size_of(Rollback_Stack_Header)) if block.offset + padding + cast(uintptr)size > cast(uintptr)len(block.buffer) { when !ODIN_DISABLE_ASSERT { @@ -202,8 +202,8 @@ rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byt continue } - header := cast(^Rollback_Stack_Header)(start + cast(uintptr)padding - size_of(Rollback_Stack_Header)) - ptr := (cast([^]byte)(start + cast(uintptr)padding)) + header := cast(^Rollback_Stack_Header)(start[padding - size_of(Rollback_Stack_Header):]) + ptr := start[padding:] header^ = { prev_offset = block.offset, -- cgit v1.2.3