diff options
| author | gingerBill <bill@gingerbill.org> | 2021-04-19 12:31:31 +0100 |
|---|---|---|
| committer | gingerBill <bill@gingerbill.org> | 2021-04-19 12:31:31 +0100 |
| commit | f98c4d683791e34a077e628bdcac0a0ed32dc065 (patch) | |
| tree | 976ec0dc20463352aafb4833637ce24529cbcd90 /core/runtime | |
| parent | a4d0092b160cbdd0c5796f1f74e6f1407cb074b6 (diff) | |
Improve the `Allocator` interface to support returning `Allocator_Error` to allow for safer calls
Virtually all code (except for user-written custom allocators) should work as normal. Extra features will need to be added to make the current procedures support the `Allocator_Error` return value (akin to #optional_ok)
Diffstat (limited to 'core/runtime')
| -rw-r--r-- | core/runtime/core.odin | 10 | ||||
| -rw-r--r-- | core/runtime/core_builtin.odin | 26 | ||||
| -rw-r--r-- | core/runtime/core_builtin_soa.odin | 11 | ||||
| -rw-r--r-- | core/runtime/default_allocators.odin | 68 | ||||
| -rw-r--r-- | core/runtime/dynamic_array_internal.odin | 9 | ||||
| -rw-r--r-- | core/runtime/dynamic_map_internal.odin | 4 | ||||
| -rw-r--r-- | core/runtime/internal.odin | 43 | ||||
| -rw-r--r-- | core/runtime/os_specific_windows.odin | 18 |
8 files changed, 116 insertions, 73 deletions
diff --git a/core/runtime/core.odin b/core/runtime/core.odin index 8763b9b8b..05f7a1e57 100644 --- a/core/runtime/core.odin +++ b/core/runtime/core.odin @@ -252,7 +252,6 @@ Source_Code_Location :: struct { Assertion_Failure_Proc :: #type proc(prefix, message: string, loc: Source_Code_Location); - // Allocation Stuff Allocator_Mode :: enum byte { Alloc, @@ -271,9 +270,16 @@ Allocator_Query_Info :: struct { alignment: Maybe(int), } +Allocator_Error :: enum byte { + None = 0, + Out_Of_Memory = 1, + Invalid_Pointer = 2, +} + Allocator_Proc :: #type proc(allocator_data: rawptr, mode: Allocator_Mode, size, alignment: int, - old_memory: rawptr, old_size: int, flags: u64 = 0, location: Source_Code_Location = #caller_location) -> rawptr; + old_memory: rawptr, old_size: int, + location: Source_Code_Location = #caller_location) -> ([]byte, Allocator_Error); Allocator :: struct { procedure: Allocator_Proc, data: rawptr, diff --git a/core/runtime/core_builtin.odin b/core/runtime/core_builtin.odin index 9f6d1b35b..7446b496b 100644 --- a/core/runtime/core_builtin.odin +++ b/core/runtime/core_builtin.odin @@ -180,12 +180,14 @@ DEFAULT_RESERVE_CAPACITY :: 16; make_aligned :: proc($T: typeid/[]$E, auto_cast len: int, alignment: int, allocator := context.allocator, loc := #caller_location) -> T { make_slice_error_loc(loc, len); - data := mem_alloc(size_of(E)*len, alignment, allocator, loc); - if data == nil && size_of(E) != 0 { + data, err := mem_alloc_bytes(size_of(E)*len, alignment, allocator, loc); + switch { + case err != nil: + return nil; + case data == nil && size_of(E) != 0: return nil; } - // mem_zero(data, size_of(E)*len); - s := Raw_Slice{data, len}; + s := Raw_Slice{raw_data(data), len}; return transmute(T)s; } @@ -449,15 +451,15 @@ reserve_dynamic_array :: proc(array: ^$T/[dynamic]$E, capacity: int, loc := #cal new_size := capacity * size_of(E); allocator := a.allocator; - new_data := allocator.procedure( + new_data, err := allocator.procedure( allocator.data, .Resize, new_size, align_of(E), - a.data, old_size, 0, loc, + a.data, old_size, loc, ); - if new_data == nil { + if new_data == nil || err != nil { return false; } - a.data = new_data; + a.data = raw_data(new_data); a.cap = capacity; return true; } @@ -483,15 +485,15 @@ resize_dynamic_array :: proc(array: ^$T/[dynamic]$E, length: int, loc := #caller new_size := length * size_of(E); allocator := a.allocator; - new_data := allocator.procedure( + new_data, err := allocator.procedure( allocator.data, .Resize, new_size, align_of(E), - a.data, old_size, 0, loc, + a.data, old_size, loc, ); - if new_data == nil { + if new_data == nil || err != nil { return false; } - a.data = new_data; + a.data = raw_data(new_data); a.len = length; a.cap = length; return true; diff --git a/core/runtime/core_builtin_soa.odin b/core/runtime/core_builtin_soa.odin index fea0d7305..d7de057d2 100644 --- a/core/runtime/core_builtin_soa.odin +++ b/core/runtime/core_builtin_soa.odin @@ -226,13 +226,14 @@ reserve_soa :: proc(array: ^$T/#soa[dynamic]$E, capacity: int, loc := #caller_lo old_data := (^rawptr)(array)^; - new_data := array.allocator.procedure( + new_bytes, err := array.allocator.procedure( array.allocator.data, .Alloc, new_size, max_align, - nil, old_size, 0, loc, + nil, old_size, loc, ); - if new_data == nil { + if new_bytes == nil || err != nil { return false; } + new_data := raw_data(new_bytes); footer.cap = capacity; @@ -256,9 +257,9 @@ reserve_soa :: proc(array: ^$T/#soa[dynamic]$E, capacity: int, loc := #caller_lo new_offset += type.size * capacity; } - array.allocator.procedure( + _, err = array.allocator.procedure( array.allocator.data, .Free, 0, max_align, - old_data, old_size, 0, loc, + old_data, old_size, loc, ); return true; diff --git a/core/runtime/default_allocators.odin b/core/runtime/default_allocators.odin index 03bc454d0..64229ec6c 100644 --- a/core/runtime/default_allocators.odin +++ b/core/runtime/default_allocators.odin @@ -5,8 +5,8 @@ when ODIN_DEFAULT_TO_NIL_ALLOCATOR || ODIN_OS == "freestanding" { default_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode, size, alignment: int, - old_memory: rawptr, old_size: int, flags: u64 = 0, loc := #caller_location) -> rawptr { - return nil; + old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) { + return nil, .None; } default_allocator :: proc() -> Allocator { @@ -26,6 +26,13 @@ when ODIN_DEFAULT_TO_NIL_ALLOCATOR || ODIN_OS == "freestanding" { } } +@(private) +byte_slice :: #force_inline proc "contextless" (data: rawptr, len: int) -> (res: []byte) { + r := (^Raw_Slice)(&res); + r.data, r.len = data, len; + return; +} + DEFAULT_TEMP_ALLOCATOR_BACKING_SIZE: int : #config(DEFAULT_TEMP_ALLOCATOR_BACKING_SIZE, 1<<22); @@ -35,7 +42,7 @@ Default_Temp_Allocator :: struct { curr_offset: int, prev_allocation: rawptr, backup_allocator: Allocator, - leaked_allocations: [dynamic]rawptr, + leaked_allocations: [dynamic][]byte, } default_temp_allocator_init :: proc(s: ^Default_Temp_Allocator, size: int, backup_allocator := context.allocator) { @@ -51,7 +58,7 @@ default_temp_allocator_destroy :: proc(s: ^Default_Temp_Allocator) { return; } for ptr in s.leaked_allocations { - free(ptr, s.backup_allocator); + free(raw_data(ptr), s.backup_allocator); } delete(s.leaked_allocations); delete(s.data, s.backup_allocator); @@ -60,7 +67,7 @@ default_temp_allocator_destroy :: proc(s: ^Default_Temp_Allocator) { default_temp_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, size, alignment: int, - old_memory: rawptr, old_size: int, flags: u64 = 0, loc := #caller_location) -> rawptr { + old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) { s := (^Default_Temp_Allocator)(allocator_data); @@ -84,7 +91,7 @@ default_temp_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode s.prev_allocation = rawptr(ptr); offset := int(ptr - start); s.curr_offset = offset + size; - return rawptr(ptr); + return byte_slice(rawptr(ptr), size), .None; case size <= len(s.data): start := uintptr(raw_data(s.data)); @@ -94,7 +101,7 @@ default_temp_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode s.prev_allocation = rawptr(ptr); offset := int(ptr - start); s.curr_offset = offset + size; - return rawptr(ptr); + return byte_slice(rawptr(ptr), size), .None; } a := s.backup_allocator; if a.procedure == nil { @@ -102,11 +109,14 @@ default_temp_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode s.backup_allocator = a; } - ptr := mem_alloc(size, alignment, a, loc); + data, err := mem_alloc_bytes(size, alignment, a, loc); + if err != nil { + return data, err; + } if s.leaked_allocations == nil { - s.leaked_allocations = make([dynamic]rawptr, a); + s.leaked_allocations = make([dynamic][]byte, a); } - append(&s.leaked_allocations, ptr); + append(&s.leaked_allocations, data); // TODO(bill): Should leaks be notified about? if logger := context.logger; logger.lowest_level <= .Warning { @@ -115,11 +125,11 @@ default_temp_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode } } - return ptr; + return data, .None; case .Free: if old_memory == nil { - return nil; + return nil, .None; } start := uintptr(raw_data(s.data)); @@ -129,30 +139,32 @@ default_temp_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode if s.prev_allocation == old_memory { s.curr_offset = int(uintptr(s.prev_allocation) - start); s.prev_allocation = nil; - return nil; + return nil, .None; } if start <= old_ptr && old_ptr < end { // NOTE(bill): Cannot free this pointer but it is valid - return nil; + return nil, .None; } if len(s.leaked_allocations) != 0 { - for ptr, i in s.leaked_allocations { + for data, i in s.leaked_allocations { + ptr := raw_data(data); if ptr == old_memory { free(ptr, s.backup_allocator); ordered_remove(&s.leaked_allocations, i); - return nil; + return nil, .None; } } } - panic("invalid pointer passed to default_temp_allocator"); + return nil, .Invalid_Pointer; + // panic("invalid pointer passed to default_temp_allocator"); case .Free_All: s.curr_offset = 0; s.prev_allocation = nil; - for ptr in s.leaked_allocations { - free(ptr, s.backup_allocator); + for data in s.leaked_allocations { + free(raw_data(data), s.backup_allocator); } clear(&s.leaked_allocations); @@ -163,26 +175,28 @@ default_temp_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode if old_memory == s.prev_allocation && old_ptr & uintptr(alignment)-1 == 0 { if old_ptr+uintptr(size) < end { s.curr_offset = int(old_ptr-begin)+size; - return old_memory; + return byte_slice(old_memory, size), .None; } } - ptr := default_temp_allocator_proc(allocator_data, .Alloc, size, alignment, old_memory, old_size, flags, loc); - mem_copy(ptr, old_memory, old_size); - default_temp_allocator_proc(allocator_data, .Free, 0, alignment, old_memory, old_size, flags, loc); - return ptr; + ptr, err := default_temp_allocator_proc(allocator_data, .Alloc, size, alignment, old_memory, old_size, loc); + if err == .None { + copy(ptr, byte_slice(old_memory, old_size)); + _, err = default_temp_allocator_proc(allocator_data, .Free, 0, alignment, old_memory, old_size, loc); + } + return ptr, err; case .Query_Features: set := (^Allocator_Mode_Set)(old_memory); if set != nil { set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features}; } - return set; + return nil, nil; case .Query_Info: - return nil; + return nil, .None; } - return nil; + return nil, .None; } default_temp_allocator :: proc(allocator: ^Default_Temp_Allocator) -> Allocator { diff --git a/core/runtime/dynamic_array_internal.odin b/core/runtime/dynamic_array_internal.odin index 55289bbe4..340f3be5e 100644 --- a/core/runtime/dynamic_array_internal.odin +++ b/core/runtime/dynamic_array_internal.odin @@ -29,10 +29,13 @@ __dynamic_array_reserve :: proc(array_: rawptr, elem_size, elem_align: int, cap: new_size := cap * elem_size; allocator := array.allocator; - new_data := allocator.procedure(allocator.data, .Resize, new_size, elem_align, array.data, old_size, 0, loc); + new_data, err := allocator.procedure(allocator.data, .Resize, new_size, elem_align, array.data, old_size, loc); + if err != nil { + return false; + } if new_data != nil || elem_size == 0 { - array.data = new_data; - array.cap = cap; + array.data = raw_data(new_data); + array.cap = min(cap, len(new_data)/elem_size); return true; } return false; diff --git a/core/runtime/dynamic_map_internal.odin b/core/runtime/dynamic_map_internal.odin index 8b63e6a7b..aff3b9859 100644 --- a/core/runtime/dynamic_map_internal.odin +++ b/core/runtime/dynamic_map_internal.odin @@ -173,8 +173,8 @@ __slice_resize :: proc(array_: ^$T/[]$E, new_count: int, allocator: Allocator, l old_size := array.len*size_of(T); new_size := new_count*size_of(T); - new_data := mem_resize(array.data, old_size, new_size, align_of(T), allocator, loc); - if new_data == nil { + new_data, err := mem_resize(array.data, old_size, new_size, align_of(T), allocator, loc); + if new_data == nil || err != nil { return false; } array.data = new_data; diff --git a/core/runtime/internal.odin b/core/runtime/internal.odin index 5381ee276..51f478a67 100644 --- a/core/runtime/internal.odin +++ b/core/runtime/internal.odin @@ -159,6 +159,16 @@ mem_copy_non_overlapping :: proc "contextless" (dst, src: rawptr, len: int) -> r DEFAULT_ALIGNMENT :: 2*align_of(rawptr); +mem_alloc_bytes :: #force_inline proc(size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, loc := #caller_location) -> ([]byte, Allocator_Error) { + if size == 0 { + return nil, nil; + } + if allocator.procedure == nil { + return nil, nil; + } + return allocator.procedure(allocator.data, .Alloc, size, alignment, nil, 0, loc); +} + mem_alloc :: #force_inline proc(size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, loc := #caller_location) -> rawptr { if size == 0 { return nil; @@ -166,36 +176,43 @@ mem_alloc :: #force_inline proc(size: int, alignment: int = DEFAULT_ALIGNMENT, a if allocator.procedure == nil { return nil; } - return allocator.procedure(allocator.data, .Alloc, size, alignment, nil, 0, 0, loc); + data, err := allocator.procedure(allocator.data, .Alloc, size, alignment, nil, 0, loc); + _ = err; + return raw_data(data); } -mem_free :: #force_inline proc(ptr: rawptr, allocator := context.allocator, loc := #caller_location) { +mem_free :: #force_inline proc(ptr: rawptr, allocator := context.allocator, loc := #caller_location) -> Allocator_Error { if ptr == nil { - return; + return .None; } if allocator.procedure == nil { - return; + return .None; } - allocator.procedure(allocator.data, .Free, 0, 0, ptr, 0, 0, loc); + _, err := allocator.procedure(allocator.data, .Free, 0, 0, ptr, 0, loc); + return err; } -mem_free_all :: #force_inline proc(allocator := context.allocator, loc := #caller_location) { +mem_free_all :: #force_inline proc(allocator := context.allocator, loc := #caller_location) -> (err: Allocator_Error) { if allocator.procedure != nil { - allocator.procedure(allocator.data, .Free_All, 0, 0, nil, 0, 0, loc); + _, err = allocator.procedure(allocator.data, .Free_All, 0, 0, nil, 0, loc); } + return; } -mem_resize :: #force_inline proc(ptr: rawptr, old_size, new_size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, loc := #caller_location) -> rawptr { +mem_resize :: #force_inline proc(ptr: rawptr, old_size, new_size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, loc := #caller_location) -> (new_ptr: rawptr, err: Allocator_Error) { + new_data: []byte; switch { case allocator.procedure == nil: - return nil; + return; case new_size == 0: - allocator.procedure(allocator.data, .Free, 0, 0, ptr, 0, 0, loc); - return nil; + new_data, err = allocator.procedure(allocator.data, .Free, 0, 0, ptr, 0, loc); case ptr == nil: - return allocator.procedure(allocator.data, .Alloc, new_size, alignment, nil, 0, 0, loc); + new_data, err = allocator.procedure(allocator.data, .Alloc, new_size, alignment, nil, 0, loc); + case: + new_data, err = allocator.procedure(allocator.data, .Resize, new_size, alignment, ptr, old_size, loc); } - return allocator.procedure(allocator.data, .Resize, new_size, alignment, ptr, old_size, 0, loc); + new_ptr = raw_data(new_data); + return; } memory_equal :: proc "contextless" (a, b: rawptr, n: int) -> bool { return memory_compare(a, b, n) == 0; diff --git a/core/runtime/os_specific_windows.odin b/core/runtime/os_specific_windows.odin index ad01196ed..0cf9d28f4 100644 --- a/core/runtime/os_specific_windows.odin +++ b/core/runtime/os_specific_windows.odin @@ -88,7 +88,7 @@ heap_free :: proc "contextless" (ptr: rawptr) { default_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, size, alignment: int, - old_memory: rawptr, old_size: int, flags: u64 = 0, loc := #caller_location) -> rawptr { + old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) { // // NOTE(tetra, 2020-01-14): The heap doesn't respect alignment. @@ -97,7 +97,7 @@ default_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, // the pointer we return to the user. // - aligned_alloc :: proc "contextless" (size, alignment: int, old_ptr: rawptr = nil) -> rawptr { + aligned_alloc :: proc "contextless" (size, alignment: int, old_ptr: rawptr = nil) -> ([]byte, Allocator_Error) { a := max(alignment, align_of(rawptr)); space := size + a - 1; @@ -114,13 +114,13 @@ default_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, aligned_ptr := (ptr - 1 + uintptr(a)) & -uintptr(a); diff := int(aligned_ptr - ptr); if (size + diff) > space { - return nil; + return nil, .Out_Of_Memory; } aligned_mem = rawptr(aligned_ptr); ptr_offset((^rawptr)(aligned_mem), -1)^ = allocated_mem; - return aligned_mem; + return byte_slice(aligned_mem, size), nil; } aligned_free :: proc "contextless" (p: rawptr) { @@ -129,9 +129,9 @@ default_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, } } - aligned_resize :: proc "contextless" (p: rawptr, old_size: int, new_size: int, new_alignment: int) -> rawptr { + aligned_resize :: proc "contextless" (p: rawptr, old_size: int, new_size: int, new_alignment: int) -> ([]byte, Allocator_Error) { if p == nil { - return nil; + return nil, nil; } return aligned_alloc(new_size, new_alignment, p); } @@ -157,13 +157,13 @@ default_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, if set != nil { set^ = {.Alloc, .Free, .Resize, .Query_Features}; } - return set; + return nil, nil; case .Query_Info: - return nil; + return nil, nil; } - return nil; + return nil, nil; } default_allocator :: proc() -> Allocator { |