diff options
| author | Jeroen van Rijn <Kelimion@users.noreply.github.com> | 2025-04-14 19:49:55 +0200 |
|---|---|---|
| committer | Jeroen van Rijn <Kelimion@users.noreply.github.com> | 2025-04-14 19:49:55 +0200 |
| commit | beee27dec06d59898e0e71d12a9d77e9d15d8e61 (patch) | |
| tree | 2bcfbb64905de7aaa32aeb33342439348deae167 /core/mem/tlsf | |
| parent | 66d99c1be39cb1d20003c56e02d26795fb6e0cea (diff) | |
Allow `core:mem/tlsf` to automatically add new pools.
New features:
- If TLSF can't service an allocation made on it, and it's initialized with `new_pool_size` > 0, it will ask the backing allocator for additional memory.
- `estimate_pool_size` can tell you what size your initial (and `new_pool_size`) ought to be if you want to make `count` allocations of `size` and `alignment`, or in its other form, how much backing memory is needed for `count` allocations of `type` and its corresponding size and alignment.
Diffstat (limited to 'core/mem/tlsf')
| -rw-r--r-- | core/mem/tlsf/tlsf.odin | 25 | ||||
| -rw-r--r-- | core/mem/tlsf/tlsf_internal.odin | 35 |
2 files changed, 54 insertions, 6 deletions
diff --git a/core/mem/tlsf/tlsf.odin b/core/mem/tlsf/tlsf.odin index e5ab95947..4bfcc22d2 100644 --- a/core/mem/tlsf/tlsf.odin +++ b/core/mem/tlsf/tlsf.odin @@ -22,7 +22,6 @@ Error :: enum byte { Backing_Allocator_Error = 5, } - Allocator :: struct { // Empty lists point at this block to indicate they are free. block_null: Block_Header, @@ -44,7 +43,6 @@ Allocator :: struct { // If we're expected to grow when we run out of memory, // how much should we ask the backing allocator for? new_pool_size: uint, - } #assert(size_of(Allocator) % ALIGN_SIZE == 0) @@ -56,6 +54,21 @@ allocator :: proc(t: ^Allocator) -> runtime.Allocator { } } +// Tries to estimate a pool size sufficient for `count` allocations, each of `size` and with `alignment`. +estimate_pool_from_size_alignment :: proc(count: int, size: int, alignment: int) -> (pool_size: int) { + per_allocation := align_up(uint(size + alignment) + BLOCK_HEADER_OVERHEAD, ALIGN_SIZE) + return count * int(per_allocation) + int(INITIAL_POOL_OVERHEAD) +} + +// Tries to estimate a pool size sufficient for `count` allocations of `type`. +estimate_pool_from_typeid :: proc(count: int, type: typeid) -> (pool_size: int) { + ti := type_info_of(type) + return estimate_pool_size(count, ti.size, ti.align) +} + +estimate_pool_size :: proc{estimate_pool_from_size_alignment, estimate_pool_from_typeid} + + @(require_results) init_from_buffer :: proc(control: ^Allocator, buf: []byte) -> Error { assert(control != nil) @@ -63,7 +76,7 @@ init_from_buffer :: proc(control: ^Allocator, buf: []byte) -> Error { return .Invalid_Alignment } - pool_bytes := align_down(len(buf), ALIGN_SIZE) - INITIAL_POOL_OVERHEAD + pool_bytes := align_down(len(buf) - INITIAL_POOL_OVERHEAD, ALIGN_SIZE) if pool_bytes < BLOCK_SIZE_MIN { return .Backing_Buffer_Too_Small } else if pool_bytes > BLOCK_SIZE_MAX { @@ -79,9 +92,9 @@ init_from_buffer :: proc(control: ^Allocator, buf: []byte) -> Error { } @(require_results) -init_from_allocator :: proc(control: ^Allocator, backing: runtime.Allocator, initial_pool_size: int) -> Error { +init_from_allocator :: proc(control: ^Allocator, backing: runtime.Allocator, initial_pool_size: int, new_pool_size := 0) -> Error { assert(control != nil) - pool_bytes := align_up(uint(initial_pool_size), ALIGN_SIZE) + INITIAL_POOL_OVERHEAD + pool_bytes := uint(estimate_pool_size(1, initial_pool_size, ALIGN_SIZE)) if pool_bytes < BLOCK_SIZE_MIN { return .Backing_Buffer_Too_Small } else if pool_bytes > BLOCK_SIZE_MAX { @@ -98,6 +111,8 @@ init_from_allocator :: proc(control: ^Allocator, backing: runtime.Allocator, ini allocator = backing, } + control.new_pool_size = uint(new_pool_size) + // TODO(Jeroen): Add automatically growing the pools from the backing allocator return free_all(control) diff --git a/core/mem/tlsf/tlsf_internal.odin b/core/mem/tlsf/tlsf_internal.odin index 53c658f90..9aee2097d 100644 --- a/core/mem/tlsf/tlsf_internal.odin +++ b/core/mem/tlsf/tlsf_internal.odin @@ -185,7 +185,40 @@ alloc_bytes_non_zeroed :: proc(control: ^Allocator, size: uint, align: uint) -> block := block_locate_free(control, aligned_size) if block == nil { - return nil, .Out_Of_Memory + // OOM: Couldn't find block of `aligned_size` bytes. + if control.new_pool_size > 0 && control.pool.allocator.procedure != nil { + // TLSF is configured to grow. Trying to allocate a new pool of `control.new_pool_size` bytes. + + new_pool_buf := runtime.make_aligned([]byte, control.new_pool_size, ALIGN_SIZE, control.pool.allocator) or_return + + // Add new pool to control structure + if pool_add_err := pool_add(control, new_pool_buf); pool_add_err != .None { + delete(new_pool_buf, control.pool.allocator) + return nil, .Out_Of_Memory + } + + // Allocate a new link in the `control.pool` tracking structure. + new_pool := new_clone(Pool{ + data = new_pool_buf, + allocator = control.pool.allocator, + next = nil, + }, control.pool.allocator) or_return + + p := &control.pool + for p.next != nil { + p = p.next + } + p.next = new_pool + + // Try again to find free block + block = block_locate_free(control, aligned_size) + if block == nil { + return nil, .Out_Of_Memory + } + } else { + // TLSF is non-growing. We're done. + return nil, .Out_Of_Memory + } } ptr := block_to_ptr(block) aligned := align_ptr(ptr, align) |