aboutsummaryrefslogtreecommitdiff
path: root/core/mem
diff options
context:
space:
mode:
authorJeroen van Rijn <Kelimion@users.noreply.github.com>2025-04-14 17:13:27 +0200
committerJeroen van Rijn <Kelimion@users.noreply.github.com>2025-04-14 17:13:27 +0200
commit7088284ff43d0c69553a4619b37dfdf85616cf45 (patch)
treed67700146569c2956cadbded6ea290a2db07d704 /core/mem
parent0e9cd0fb6abab5b0ac8ef8328423594ebd8f2060 (diff)
Refactor `core:mem/tlsf`, add `free_all` support.
TODO: Allow the TLSF allocator to add additional pools when it would ordinarily OOM by calling its backing allocator.
Diffstat (limited to 'core/mem')
-rw-r--r--core/mem/tlsf/tlsf.odin40
-rw-r--r--core/mem/tlsf/tlsf_internal.odin508
2 files changed, 285 insertions, 263 deletions
diff --git a/core/mem/tlsf/tlsf.odin b/core/mem/tlsf/tlsf.odin
index a525f5113..e5ab95947 100644
--- a/core/mem/tlsf/tlsf.odin
+++ b/core/mem/tlsf/tlsf.odin
@@ -10,6 +10,7 @@
// package mem_tlsf implements a Two Level Segregated Fit memory allocator.
package mem_tlsf
+import "base:intrinsics"
import "base:runtime"
Error :: enum byte {
@@ -42,7 +43,7 @@ Allocator :: struct {
// If we're expected to grow when we run out of memory,
// how much should we ask the backing allocator for?
- new_pool_size: int,
+ new_pool_size: uint,
}
#assert(size_of(Allocator) % ALIGN_SIZE == 0)
@@ -74,12 +75,11 @@ init_from_buffer :: proc(control: ^Allocator, buf: []byte) -> Error {
allocator = {},
}
- clear(control)
- return pool_add(control, buf[:])
+ return free_all(control)
}
@(require_results)
-init_from_allocator :: proc(control: ^Allocator, backing: runtime.Allocator, initial_pool_size: int, new_pool_size := 0) -> Error {
+init_from_allocator :: proc(control: ^Allocator, backing: runtime.Allocator, initial_pool_size: int) -> Error {
assert(control != nil)
pool_bytes := align_up(uint(initial_pool_size), ALIGN_SIZE) + INITIAL_POOL_OVERHEAD
if pool_bytes < BLOCK_SIZE_MIN {
@@ -98,8 +98,9 @@ init_from_allocator :: proc(control: ^Allocator, backing: runtime.Allocator, ini
allocator = backing,
}
- clear(control)
- return pool_add(control, buf[:])
+ // TODO(Jeroen): Add automatically growing the pools from the backing allocator
+
+ return free_all(control)
}
init :: proc{init_from_buffer, init_from_allocator}
@@ -112,8 +113,6 @@ destroy :: proc(control: ^Allocator) {
// No need to call `pool_remove` or anything, as they're they're embedded in the backing memory.
// We do however need to free the `Pool` tracking entities and the backing memory itself.
- // As `Allocator` is embedded in the first backing slice, the `control` pointer will be
- // invalid after this call.
for p := control.pool.next; p != nil; {
next := p.next
@@ -145,9 +144,8 @@ allocator_proc :: proc(allocator_data: rawptr, mode: runtime.Allocator_Mode,
return nil, nil
case .Free_All:
- // NOTE: this doesn't work right at the moment, Jeroen has it on his to-do list :)
- // clear(control)
- return nil, .Mode_Not_Implemented
+ free_all(control)
+ return nil, nil
case .Resize:
return resize(control, old_memory, uint(old_size), uint(size), uint(alignment))
@@ -168,3 +166,23 @@ allocator_proc :: proc(allocator_data: rawptr, mode: runtime.Allocator_Mode,
return nil, nil
}
+
+// Exported solely to facilitate testing
+@(require_results)
+ffs :: proc "contextless" (word: u32) -> (bit: i32) {
+ return -1 if word == 0 else i32(intrinsics.count_trailing_zeros(word))
+}
+
+// Exported solely to facilitate testing
+@(require_results)
+fls :: proc "contextless" (word: u32) -> (bit: i32) {
+ N :: (size_of(u32) * 8) - 1
+ return i32(N - intrinsics.count_leading_zeros(word))
+}
+
+// Exported solely to facilitate testing
+@(require_results)
+fls_uint :: proc "contextless" (size: uint) -> (bit: i32) {
+ N :: (size_of(uint) * 8) - 1
+ return i32(N - intrinsics.count_leading_zeros(size))
+} \ No newline at end of file
diff --git a/core/mem/tlsf/tlsf_internal.odin b/core/mem/tlsf/tlsf_internal.odin
index 1f04515bf..53c658f90 100644
--- a/core/mem/tlsf/tlsf_internal.odin
+++ b/core/mem/tlsf/tlsf_internal.odin
@@ -7,7 +7,6 @@
Jeroen van Rijn: Source port
*/
-
package mem_tlsf
import "base:intrinsics"
@@ -106,6 +105,220 @@ bits for `FL_INDEX`.
BLOCK_SIZE_MIN :: uint(size_of(Block_Header) - size_of(^Block_Header))
BLOCK_SIZE_MAX :: uint(1) << FL_INDEX_MAX
+// Clear control structure and point all empty lists at the null block
+@(private)
+free_all :: proc(control: ^Allocator) -> (err: Error) {
+ // Clear internal structures
+ control.block_null.next_free = &control.block_null
+ control.block_null.prev_free = &control.block_null
+ control.fl_bitmap = 0
+ for i in 0..<FL_INDEX_COUNT {
+ control.sl_bitmap[i] = 0
+ for j in 0..<SL_INDEX_COUNT {
+ control.blocks[i][j] = &control.block_null
+ }
+ }
+
+ // Add backing pool(s)
+ for p := &control.pool; p != nil; p = p.next {
+ pool_add(control, p.data) or_return
+ }
+ return
+}
+
+@(private, require_results)
+pool_add :: proc(control: ^Allocator, pool: []u8) -> (err: Error) {
+ assert(uintptr(raw_data(pool)) % ALIGN_SIZE == 0, "Added memory must be aligned")
+
+ pool_overhead := POOL_OVERHEAD
+ pool_bytes := align_down(len(pool) - pool_overhead, ALIGN_SIZE)
+
+ if pool_bytes < BLOCK_SIZE_MIN {
+ return .Backing_Buffer_Too_Small
+ } else if pool_bytes > BLOCK_SIZE_MAX {
+ return .Backing_Buffer_Too_Large
+ }
+
+ // Create the main free block. Offset the start of the block slightly,
+ // so that the `prev_phys_block` field falls outside of the pool -
+ // it will never be used.
+ block := offset_to_block_backwards(raw_data(pool), BLOCK_HEADER_OVERHEAD)
+
+ block_set_size(block, pool_bytes)
+ block_set_free(block)
+ block_set_prev_used(block)
+ block_insert(control, block)
+
+ // Split the block to create a zero-size sentinel block
+ next := block_link_next(block)
+ block_set_size(next, 0)
+ block_set_used(next)
+ block_set_prev_free(next)
+
+ return
+}
+
+@(private)
+pool_remove :: proc(control: ^Allocator, pool: []u8) {
+ block := offset_to_block_backwards(raw_data(pool), BLOCK_HEADER_OVERHEAD)
+
+ assert(block_is_free(block), "Block should be free")
+ assert(!block_is_free(block_next(block)), "Next block should not be free")
+ assert(block_size(block_next(block)) == 0, "Next block size should be zero")
+
+ fl, sl := mapping_insert(block_size(block))
+ remove_free_block(control, block, fl, sl)
+}
+
+@(private, require_results)
+alloc_bytes_non_zeroed :: proc(control: ^Allocator, size: uint, align: uint) -> (res: []byte, err: runtime.Allocator_Error) {
+ assert(control != nil)
+ adjust := adjust_request_size(size, ALIGN_SIZE)
+
+ GAP_MINIMUM :: size_of(Block_Header)
+ size_with_gap := adjust_request_size(adjust + align + GAP_MINIMUM, align)
+
+ aligned_size := size_with_gap if adjust != 0 && align > ALIGN_SIZE else adjust
+ if aligned_size == 0 && size > 0 {
+ return nil, .Out_Of_Memory
+ }
+
+ block := block_locate_free(control, aligned_size)
+ if block == nil {
+ return nil, .Out_Of_Memory
+ }
+ ptr := block_to_ptr(block)
+ aligned := align_ptr(ptr, align)
+ gap := uint(int(uintptr(aligned)) - int(uintptr(ptr)))
+
+ if gap != 0 && gap < GAP_MINIMUM {
+ gap_remain := GAP_MINIMUM - gap
+ offset := uintptr(max(gap_remain, align))
+ next_aligned := rawptr(uintptr(aligned) + offset)
+
+ aligned = align_ptr(next_aligned, align)
+
+ gap = uint(int(uintptr(aligned)) - int(uintptr(ptr)))
+ }
+
+ if gap != 0 {
+ assert(gap >= GAP_MINIMUM, "gap size too small")
+ block = block_trim_free_leading(control, block, gap)
+ }
+
+ return block_prepare_used(control, block, adjust)
+}
+
+@(private, require_results)
+alloc_bytes :: proc(control: ^Allocator, size: uint, align: uint) -> (res: []byte, err: runtime.Allocator_Error) {
+ res, err = alloc_bytes_non_zeroed(control, size, align)
+ if err == nil {
+ intrinsics.mem_zero(raw_data(res), len(res))
+ }
+ return
+}
+
+
+free_with_size :: proc(control: ^Allocator, ptr: rawptr, size: uint) {
+ assert(control != nil)
+ // `size` is currently ignored
+ if ptr == nil {
+ return
+ }
+
+ block := block_from_ptr(ptr)
+ assert(!block_is_free(block), "block already marked as free") // double free
+ block_mark_as_free(block)
+ block = block_merge_prev(control, block)
+ block = block_merge_next(control, block)
+ block_insert(control, block)
+}
+
+
+@(private, require_results)
+resize :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size: uint, alignment: uint) -> (res: []byte, err: runtime.Allocator_Error) {
+ assert(control != nil)
+ if ptr != nil && new_size == 0 {
+ free_with_size(control, ptr, old_size)
+ return
+ } else if ptr == nil {
+ return alloc_bytes(control, new_size, alignment)
+ }
+
+ block := block_from_ptr(ptr)
+ next := block_next(block)
+
+ curr_size := block_size(block)
+ combined := curr_size + block_size(next) + BLOCK_HEADER_OVERHEAD
+ adjust := adjust_request_size(new_size, max(ALIGN_SIZE, alignment))
+
+ assert(!block_is_free(block), "block already marked as free") // double free
+
+ min_size := min(curr_size, new_size, old_size)
+
+ if adjust > curr_size && (!block_is_free(next) || adjust > combined) {
+ res = alloc_bytes(control, new_size, alignment) or_return
+ if res != nil {
+ copy(res, ([^]byte)(ptr)[:min_size])
+ free_with_size(control, ptr, curr_size)
+ }
+ return
+ }
+ if adjust > curr_size {
+ _ = block_merge_next(control, block)
+ block_mark_as_used(block)
+ }
+
+ block_trim_used(control, block, adjust)
+ res = ([^]byte)(ptr)[:new_size]
+
+ if min_size < new_size {
+ to_zero := ([^]byte)(ptr)[min_size:new_size]
+ runtime.mem_zero(raw_data(to_zero), len(to_zero))
+ }
+ return
+}
+
+@(private, require_results)
+resize_non_zeroed :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size: uint, alignment: uint) -> (res: []byte, err: runtime.Allocator_Error) {
+ assert(control != nil)
+ if ptr != nil && new_size == 0 {
+ free_with_size(control, ptr, old_size)
+ return
+ } else if ptr == nil {
+ return alloc_bytes_non_zeroed(control, new_size, alignment)
+ }
+
+ block := block_from_ptr(ptr)
+ next := block_next(block)
+
+ curr_size := block_size(block)
+ combined := curr_size + block_size(next) + BLOCK_HEADER_OVERHEAD
+ adjust := adjust_request_size(new_size, max(ALIGN_SIZE, alignment))
+
+ assert(!block_is_free(block), "block already marked as free") // double free
+
+ min_size := min(curr_size, new_size, old_size)
+
+ if adjust > curr_size && (!block_is_free(next) || adjust > combined) {
+ res = alloc_bytes_non_zeroed(control, new_size, alignment) or_return
+ if res != nil {
+ copy(res, ([^]byte)(ptr)[:min_size])
+ free_with_size(control, ptr, old_size)
+ }
+ return
+ }
+
+ if adjust > curr_size {
+ _ = block_merge_next(control, block)
+ block_mark_as_used(block)
+ }
+
+ block_trim_used(control, block, adjust)
+ res = ([^]byte)(ptr)[:new_size]
+ return
+}
+
/*
TLSF achieves O(1) cost for `alloc` and `free` operations by limiting
the search for a free block to a free list of guaranteed size
@@ -116,106 +329,95 @@ BLOCK_SIZE_MAX :: uint(1) << FL_INDEX_MAX
NOTE: TLSF spec relies on ffs/fls returning a value in the range 0..31.
*/
-@(require_results)
-ffs :: proc "contextless" (word: u32) -> (bit: i32) {
- return -1 if word == 0 else i32(intrinsics.count_trailing_zeros(word))
-}
-
-@(require_results)
-fls :: proc "contextless" (word: u32) -> (bit: i32) {
- N :: (size_of(u32) * 8) - 1
- return i32(N - intrinsics.count_leading_zeros(word))
-}
-
-@(require_results)
-fls_uint :: proc "contextless" (size: uint) -> (bit: i32) {
- N :: (size_of(uint) * 8) - 1
- return i32(N - intrinsics.count_leading_zeros(size))
-}
-
-@(require_results)
+@(private, require_results)
block_size :: proc "contextless" (block: ^Block_Header) -> (size: uint) {
return block.size &~ (BLOCK_HEADER_FREE | BLOCK_HEADER_PREV_FREE)
}
+@(private)
block_set_size :: proc "contextless" (block: ^Block_Header, size: uint) {
old_size := block.size
block.size = size | (old_size & (BLOCK_HEADER_FREE | BLOCK_HEADER_PREV_FREE))
}
-@(require_results)
+@(private, require_results)
block_is_last :: proc "contextless" (block: ^Block_Header) -> (is_last: bool) {
return block_size(block) == 0
}
-@(require_results)
+@(private, require_results)
block_is_free :: proc "contextless" (block: ^Block_Header) -> (is_free: bool) {
return (block.size & BLOCK_HEADER_FREE) == BLOCK_HEADER_FREE
}
+@(private)
block_set_free :: proc "contextless" (block: ^Block_Header) {
block.size |= BLOCK_HEADER_FREE
}
+@(private)
block_set_used :: proc "contextless" (block: ^Block_Header) {
block.size &~= BLOCK_HEADER_FREE
}
-@(require_results)
+@(private, require_results)
block_is_prev_free :: proc "contextless" (block: ^Block_Header) -> (is_prev_free: bool) {
return (block.size & BLOCK_HEADER_PREV_FREE) == BLOCK_HEADER_PREV_FREE
}
+@(private)
block_set_prev_free :: proc "contextless" (block: ^Block_Header) {
block.size |= BLOCK_HEADER_PREV_FREE
}
+@(private)
block_set_prev_used :: proc "contextless" (block: ^Block_Header) {
block.size &~= BLOCK_HEADER_PREV_FREE
}
-@(require_results)
+@(private, require_results)
block_from_ptr :: proc(ptr: rawptr) -> (block_ptr: ^Block_Header) {
return (^Block_Header)(uintptr(ptr) - BLOCK_START_OFFSET)
}
-@(require_results)
+@(private, require_results)
block_to_ptr :: proc(block: ^Block_Header) -> (ptr: rawptr) {
return rawptr(uintptr(block) + BLOCK_START_OFFSET)
}
// Return location of next block after block of given size.
-@(require_results)
+@(private, require_results)
offset_to_block :: proc(ptr: rawptr, size: uint) -> (block: ^Block_Header) {
return (^Block_Header)(uintptr(ptr) + uintptr(size))
}
-@(require_results)
+@(private, require_results)
offset_to_block_backwards :: proc(ptr: rawptr, size: uint) -> (block: ^Block_Header) {
return (^Block_Header)(uintptr(ptr) - uintptr(size))
}
// Return location of previous block.
-@(require_results)
+@(private, require_results)
block_prev :: proc(block: ^Block_Header) -> (prev: ^Block_Header) {
assert(block_is_prev_free(block), "previous block must be free")
return block.prev_phys_block
}
// Return location of next existing block.
-@(require_results)
+@(private, require_results)
block_next :: proc(block: ^Block_Header) -> (next: ^Block_Header) {
return offset_to_block(block_to_ptr(block), block_size(block) - BLOCK_HEADER_OVERHEAD)
}
// Link a new block with its physical neighbor, return the neighbor.
-@(require_results)
+@(private, require_results)
block_link_next :: proc(block: ^Block_Header) -> (next: ^Block_Header) {
next = block_next(block)
next.prev_phys_block = block
return
}
+@(private)
block_mark_as_free :: proc(block: ^Block_Header) {
// Link the block to the next block, first.
next := block_link_next(block)
@@ -223,25 +425,26 @@ block_mark_as_free :: proc(block: ^Block_Header) {
block_set_free(block)
}
+@(private)
block_mark_as_used :: proc(block: ^Block_Header) {
next := block_next(block)
block_set_prev_used(next)
block_set_used(block)
}
-@(require_results)
+@(private, require_results)
align_up :: proc(x, align: uint) -> (aligned: uint) {
assert(0 == (align & (align - 1)), "must align to a power of two")
return (x + (align - 1)) &~ (align - 1)
}
-@(require_results)
+@(private, require_results)
align_down :: proc(x, align: uint) -> (aligned: uint) {
assert(0 == (align & (align - 1)), "must align to a power of two")
return x - (x & (align - 1))
}
-@(require_results)
+@(private, require_results)
align_ptr :: proc(ptr: rawptr, align: uint) -> (aligned: rawptr) {
assert(0 == (align & (align - 1)), "must align to a power of two")
align_mask := uintptr(align) - 1
@@ -251,7 +454,7 @@ align_ptr :: proc(ptr: rawptr, align: uint) -> (aligned: rawptr) {
}
// Adjust an allocation size to be aligned to word size, and no smaller than internal minimum.
-@(require_results)
+@(private, require_results)
adjust_request_size :: proc(size, align: uint) -> (adjusted: uint) {
if size == 0 {
return 0
@@ -265,7 +468,7 @@ adjust_request_size :: proc(size, align: uint) -> (adjusted: uint) {
}
// Adjust an allocation size to be aligned to word size, and no smaller than internal minimum.
-@(require_results)
+@(private, require_results)
adjust_request_size_with_err :: proc(size, align: uint) -> (adjusted: uint, err: runtime.Allocator_Error) {
if size == 0 {
return 0, nil
@@ -283,7 +486,7 @@ adjust_request_size_with_err :: proc(size, align: uint) -> (adjusted: uint, err:
// TLSF utility functions. In most cases these are direct translations of
// the documentation in the research paper.
-@(optimization_mode="favor_size", require_results)
+@(optimization_mode="favor_size", private, require_results)
mapping_insert :: proc(size: uint) -> (fl, sl: i32) {
if size < SMALL_BLOCK_SIZE {
// Store small blocks in first list.
@@ -296,7 +499,7 @@ mapping_insert :: proc(size: uint) -> (fl, sl: i32) {
return
}
-@(optimization_mode="favor_size", require_results)
+@(optimization_mode="favor_size", private, require_results)
mapping_round :: #force_inline proc(size: uint) -> (rounded: uint) {
rounded = size
if size >= SMALL_BLOCK_SIZE {
@@ -307,12 +510,12 @@ mapping_round :: #force_inline proc(size: uint) -> (rounded: uint) {
}
// This version rounds up to the next block size (for allocations)
-@(optimization_mode="favor_size", require_results)
+@(optimization_mode="favor_size", private, require_results)
mapping_search :: proc(size: uint) -> (fl, sl: i32) {
return mapping_insert(mapping_round(size))
}
-@(require_results)
+@(private, require_results)
search_suitable_block :: proc(control: ^Allocator, fli, sli: ^i32) -> (block: ^Block_Header) {
// First, search for a block in the list associated with the given fl/sl index.
fl := fli^; sl := sli^
@@ -339,6 +542,7 @@ search_suitable_block :: proc(control: ^Allocator, fli, sli: ^i32) -> (block: ^B
}
// Remove a free block from the free list.
+@(private)
remove_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl: i32) {
prev := block.prev_free
next := block.next_free
@@ -364,6 +568,7 @@ remove_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl
}
// Insert a free block into the free block list.
+@(private)
insert_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl: i32) {
current := control.blocks[fl][sl]
assert(current != nil, "free lists cannot have a nil entry")
@@ -381,24 +586,26 @@ insert_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl
}
// Remove a given block from the free list.
+@(private)
block_remove :: proc(control: ^Allocator, block: ^Block_Header) {
fl, sl := mapping_insert(block_size(block))
remove_free_block(control, block, fl, sl)
}
// Insert a given block into the free list.
+@(private)
block_insert :: proc(control: ^Allocator, block: ^Block_Header) {
fl, sl := mapping_insert(block_size(block))
insert_free_block(control, block, fl, sl)
}
-@(require_results)
+@(private, require_results)
block_can_split :: proc(block: ^Block_Header, size: uint) -> (can_split: bool) {
return block_size(block) >= size_of(Block_Header) + size
}
// Split a block into two, the second of which is free.
-@(require_results)
+@(private, require_results)
block_split :: proc(block: ^Block_Header, size: uint) -> (remaining: ^Block_Header) {
// Calculate the amount of space left in the remaining block.
remaining = offset_to_block(block_to_ptr(block), size - BLOCK_HEADER_OVERHEAD)
@@ -419,7 +626,7 @@ block_split :: proc(block: ^Block_Header, size: uint) -> (remaining: ^Block_Head
}
// Absorb a free block's storage into an adjacent previous free block.
-@(require_results)
+@(private, require_results)
block_absorb :: proc(prev: ^Block_Header, block: ^Block_Header) -> (absorbed: ^Block_Header) {
assert(!block_is_last(prev), "previous block can't be last")
// Note: Leaves flags untouched.
@@ -429,7 +636,7 @@ block_absorb :: proc(prev: ^Block_Header, block: ^Block_Header) -> (absorbed: ^B
}
// Merge a just-freed block with an adjacent previous free block.
-@(require_results)
+@(private, require_results)
block_merge_prev :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: ^Block_Header) {
merged = block
if (block_is_prev_free(block)) {
@@ -443,7 +650,7 @@ block_merge_prev :: proc(control: ^Allocator, block: ^Block_Header) -> (merged:
}
// Merge a just-freed block with an adjacent free block.
-@(require_results)
+@(private, require_results)
block_merge_next :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: ^Block_Header) {
merged = block
next := block_next(block)
@@ -458,6 +665,7 @@ block_merge_next :: proc(control: ^Allocator, block: ^Block_Header) -> (merged:
}
// Trim any trailing block space off the end of a free block, return to pool.
+@(private)
block_trim_free :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
assert(block_is_free(block), "block must be free")
if (block_can_split(block, size)) {
@@ -469,6 +677,7 @@ block_trim_free :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
}
// Trim any trailing block space off the end of a used block, return to pool.
+@(private)
block_trim_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
assert(!block_is_free(block), "Block must be used")
if (block_can_split(block, size)) {
@@ -482,7 +691,7 @@ block_trim_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
}
// Trim leading block space, return to pool.
-@(require_results)
+@(private, require_results)
block_trim_free_leading :: proc(control: ^Allocator, block: ^Block_Header, size: uint) -> (remaining: ^Block_Header) {
remaining = block
if block_can_split(block, size) {
@@ -496,7 +705,7 @@ block_trim_free_leading :: proc(control: ^Allocator, block: ^Block_Header, size:
return remaining
}
-@(require_results)
+@(private, require_results)
block_locate_free :: proc(control: ^Allocator, size: uint) -> (block: ^Block_Header) {
fl, sl: i32
if size != 0 {
@@ -520,7 +729,7 @@ block_locate_free :: proc(control: ^Allocator, size: uint) -> (block: ^Block_Hea
return block
}
-@(require_results)
+@(private, require_results)
block_prepare_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) -> (res: []byte, err: runtime.Allocator_Error) {
if block != nil {
assert(size != 0, "Size must be non-zero")
@@ -529,209 +738,4 @@ block_prepare_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint
res = ([^]byte)(block_to_ptr(block))[:size]
}
return
-}
-
-// Clear control structure and point all empty lists at the null block
-clear :: proc(control: ^Allocator) {
- control.block_null.next_free = &control.block_null
- control.block_null.prev_free = &control.block_null
-
- control.fl_bitmap = 0
- for i in 0..<FL_INDEX_COUNT {
- control.sl_bitmap[i] = 0
- for j in 0..<SL_INDEX_COUNT {
- control.blocks[i][j] = &control.block_null
- }
- }
-}
-
-@(require_results)
-pool_add :: proc(control: ^Allocator, pool: []u8) -> (err: Error) {
- assert(uintptr(raw_data(pool)) % ALIGN_SIZE == 0, "Added memory must be aligned")
-
- pool_overhead := POOL_OVERHEAD
- pool_bytes := align_down(len(pool) - pool_overhead, ALIGN_SIZE)
-
- if pool_bytes < BLOCK_SIZE_MIN {
- return .Backing_Buffer_Too_Small
- } else if pool_bytes > BLOCK_SIZE_MAX {
- return .Backing_Buffer_Too_Large
- }
-
- // Create the main free block. Offset the start of the block slightly,
- // so that the `prev_phys_block` field falls outside of the pool -
- // it will never be used.
- block := offset_to_block_backwards(raw_data(pool), BLOCK_HEADER_OVERHEAD)
-
- block_set_size(block, pool_bytes)
- block_set_free(block)
- block_set_prev_used(block)
- block_insert(control, block)
-
- // Split the block to create a zero-size sentinel block
- next := block_link_next(block)
- block_set_size(next, 0)
- block_set_used(next)
- block_set_prev_free(next)
- return
-}
-
-pool_remove :: proc(control: ^Allocator, pool: []u8) {
- block := offset_to_block_backwards(raw_data(pool), BLOCK_HEADER_OVERHEAD)
-
- assert(block_is_free(block), "Block should be free")
- assert(!block_is_free(block_next(block)), "Next block should not be free")
- assert(block_size(block_next(block)) == 0, "Next block size should be zero")
-
- fl, sl := mapping_insert(block_size(block))
- remove_free_block(control, block, fl, sl)
-}
-
-@(require_results)
-alloc_bytes_non_zeroed :: proc(control: ^Allocator, size: uint, align: uint) -> (res: []byte, err: runtime.Allocator_Error) {
- assert(control != nil)
- adjust := adjust_request_size(size, ALIGN_SIZE)
-
- GAP_MINIMUM :: size_of(Block_Header)
- size_with_gap := adjust_request_size(adjust + align + GAP_MINIMUM, align)
-
- aligned_size := size_with_gap if adjust != 0 && align > ALIGN_SIZE else adjust
- if aligned_size == 0 && size > 0 {
- return nil, .Out_Of_Memory
- }
-
- block := block_locate_free(control, aligned_size)
- if block == nil {
- return nil, .Out_Of_Memory
- }
- ptr := block_to_ptr(block)
- aligned := align_ptr(ptr, align)
- gap := uint(int(uintptr(aligned)) - int(uintptr(ptr)))
-
- if gap != 0 && gap < GAP_MINIMUM {
- gap_remain := GAP_MINIMUM - gap
- offset := uintptr(max(gap_remain, align))
- next_aligned := rawptr(uintptr(aligned) + offset)
-
- aligned = align_ptr(next_aligned, align)
-
- gap = uint(int(uintptr(aligned)) - int(uintptr(ptr)))
- }
-
- if gap != 0 {
- assert(gap >= GAP_MINIMUM, "gap size too small")
- block = block_trim_free_leading(control, block, gap)
- }
-
- return block_prepare_used(control, block, adjust)
-}
-
-@(require_results)
-alloc_bytes :: proc(control: ^Allocator, size: uint, align: uint) -> (res: []byte, err: runtime.Allocator_Error) {
- res, err = alloc_bytes_non_zeroed(control, size, align)
- if err == nil {
- intrinsics.mem_zero(raw_data(res), len(res))
- }
- return
-}
-
-
-free_with_size :: proc(control: ^Allocator, ptr: rawptr, size: uint) {
- assert(control != nil)
- // `size` is currently ignored
- if ptr == nil {
- return
- }
-
- block := block_from_ptr(ptr)
- assert(!block_is_free(block), "block already marked as free") // double free
- block_mark_as_free(block)
- block = block_merge_prev(control, block)
- block = block_merge_next(control, block)
- block_insert(control, block)
-}
-
-
-@(require_results)
-resize :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size: uint, alignment: uint) -> (res: []byte, err: runtime.Allocator_Error) {
- assert(control != nil)
- if ptr != nil && new_size == 0 {
- free_with_size(control, ptr, old_size)
- return
- } else if ptr == nil {
- return alloc_bytes(control, new_size, alignment)
- }
-
- block := block_from_ptr(ptr)
- next := block_next(block)
-
- curr_size := block_size(block)
- combined := curr_size + block_size(next) + BLOCK_HEADER_OVERHEAD
- adjust := adjust_request_size(new_size, max(ALIGN_SIZE, alignment))
-
- assert(!block_is_free(block), "block already marked as free") // double free
-
- min_size := min(curr_size, new_size, old_size)
-
- if adjust > curr_size && (!block_is_free(next) || adjust > combined) {
- res = alloc_bytes(control, new_size, alignment) or_return
- if res != nil {
- copy(res, ([^]byte)(ptr)[:min_size])
- free_with_size(control, ptr, curr_size)
- }
- return
- }
- if adjust > curr_size {
- _ = block_merge_next(control, block)
- block_mark_as_used(block)
- }
-
- block_trim_used(control, block, adjust)
- res = ([^]byte)(ptr)[:new_size]
-
- if min_size < new_size {
- to_zero := ([^]byte)(ptr)[min_size:new_size]
- runtime.mem_zero(raw_data(to_zero), len(to_zero))
- }
- return
-}
-
-@(require_results)
-resize_non_zeroed :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size: uint, alignment: uint) -> (res: []byte, err: runtime.Allocator_Error) {
- assert(control != nil)
- if ptr != nil && new_size == 0 {
- free_with_size(control, ptr, old_size)
- return
- } else if ptr == nil {
- return alloc_bytes_non_zeroed(control, new_size, alignment)
- }
-
- block := block_from_ptr(ptr)
- next := block_next(block)
-
- curr_size := block_size(block)
- combined := curr_size + block_size(next) + BLOCK_HEADER_OVERHEAD
- adjust := adjust_request_size(new_size, max(ALIGN_SIZE, alignment))
-
- assert(!block_is_free(block), "block already marked as free") // double free
-
- min_size := min(curr_size, new_size, old_size)
-
- if adjust > curr_size && (!block_is_free(next) || adjust > combined) {
- res = alloc_bytes_non_zeroed(control, new_size, alignment) or_return
- if res != nil {
- copy(res, ([^]byte)(ptr)[:min_size])
- free_with_size(control, ptr, old_size)
- }
- return
- }
-
- if adjust > curr_size {
- _ = block_merge_next(control, block)
- block_mark_as_used(block)
- }
-
- block_trim_used(control, block, adjust)
- res = ([^]byte)(ptr)[:new_size]
- return
-}
+} \ No newline at end of file