aboutsummaryrefslogtreecommitdiff
path: root/src/common
diff options
context:
space:
mode:
Diffstat (limited to 'src/common')
-rw-r--r--src/common/allocator.odin251
-rw-r--r--src/common/track_allocator.odin192
2 files changed, 130 insertions, 313 deletions
diff --git a/src/common/allocator.odin b/src/common/allocator.odin
index 4f72f81..ca0abe6 100644
--- a/src/common/allocator.odin
+++ b/src/common/allocator.odin
@@ -1,121 +1,130 @@
-package common
-
-import "core:mem"
-
-Scratch_Allocator :: struct {
- data: []byte,
- curr_offset: int,
- prev_allocation: rawptr,
- backup_allocator: mem.Allocator,
- leaked_allocations: [dynamic]rawptr,
-}
-
-scratch_allocator_init :: proc (s: ^Scratch_Allocator, size: int, backup_allocator := context.allocator) {
- s.data = mem.make_aligned([]byte, size, 2 * align_of(rawptr), backup_allocator);
- s.curr_offset = 0;
- s.prev_allocation = nil;
- s.backup_allocator = backup_allocator;
- s.leaked_allocations.allocator = backup_allocator;
-}
-
-scratch_allocator_destroy :: proc (s: ^Scratch_Allocator) {
- if s == nil {
- return;
- }
- for ptr in s.leaked_allocations {
- free(ptr, s.backup_allocator);
- }
- delete(s.leaked_allocations);
- delete(s.data, s.backup_allocator);
- s^ = {};
-}
-
-scratch_allocator_proc :: proc (allocator_data: rawptr, mode: mem.Allocator_Mode,
-size, alignment: int,
-old_memory: rawptr, old_size: int, flags: u64 = 0, loc := #caller_location) -> rawptr {
-
- s := (^Scratch_Allocator)(allocator_data);
-
- if s.data == nil {
- DEFAULT_BACKING_SIZE :: 1 << 22;
- if !(context.allocator.procedure != scratch_allocator_proc &&
- context.allocator.data != allocator_data) {
- panic("cyclic initialization of the scratch allocator with itself");
- }
- scratch_allocator_init(s, DEFAULT_BACKING_SIZE);
- }
-
- size := size;
-
- switch mode {
- case .Alloc:
- size = mem.align_forward_int(size, alignment);
-
- switch {
- case s.curr_offset + size <= len(s.data):
- start := uintptr(raw_data(s.data));
- ptr := start + uintptr(s.curr_offset);
- ptr = mem.align_forward_uintptr(ptr, uintptr(alignment));
- mem.zero(rawptr(ptr), size);
-
- s.prev_allocation = rawptr(ptr);
- offset := int(ptr - start);
- s.curr_offset = offset + size;
- return rawptr(ptr);
- }
- a := s.backup_allocator;
- if a.procedure == nil {
- a = context.allocator;
- s.backup_allocator = a;
- }
-
- ptr := mem.alloc(size, alignment, a, loc);
- if s.leaked_allocations == nil {
- s.leaked_allocations = make([dynamic]rawptr, a);
- }
- append(&s.leaked_allocations, ptr);
-
- return ptr;
-
- case .Free:
- case .Free_All:
- s.curr_offset = 0;
- s.prev_allocation = nil;
- for ptr in s.leaked_allocations {
- free(ptr, s.backup_allocator);
- }
- clear(&s.leaked_allocations);
-
- case .Resize:
- begin := uintptr(raw_data(s.data));
- end := begin + uintptr(len(s.data));
- old_ptr := uintptr(old_memory);
- //if begin <= old_ptr && old_ptr < end && old_ptr+uintptr(size) < end {
- // s.curr_offset = int(old_ptr-begin)+size;
- // return old_memory;
- //}
- ptr := scratch_allocator_proc(allocator_data, .Alloc, size, alignment, old_memory, old_size, flags, loc);
- mem.copy(ptr, old_memory, old_size);
- scratch_allocator_proc(allocator_data, .Free, 0, alignment, old_memory, old_size, flags, loc);
- return ptr;
-
- case .Query_Features:
- set := (^mem.Allocator_Mode_Set)(old_memory);
- if set != nil {
- set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features};
- }
- return set;
-
- case .Query_Info:
- return nil;
- }
-
- return nil;
-}
-
-scratch_allocator :: proc (allocator: ^Scratch_Allocator) -> mem.Allocator {
- return mem.Allocator {
- procedure = scratch_allocator_proc,
- data = allocator,
- };
-}
+package common
+
+import "core:mem"
+import "core:runtime"
+
+Scratch_Allocator :: struct {
+ data: []byte,
+ curr_offset: int,
+ prev_allocation: rawptr,
+ backup_allocator: mem.Allocator,
+ leaked_allocations: [dynamic][]byte,
+}
+
+scratch_allocator_init :: proc (s: ^Scratch_Allocator, size: int, backup_allocator := context.allocator) {
+ s.data = mem.make_aligned([]byte, size, 2 * align_of(rawptr), backup_allocator);
+ s.curr_offset = 0;
+ s.prev_allocation = nil;
+ s.backup_allocator = backup_allocator;
+ s.leaked_allocations.allocator = backup_allocator;
+}
+
+scratch_allocator_destroy :: proc (s: ^Scratch_Allocator) {
+ if s == nil {
+ return;
+ }
+ for ptr in s.leaked_allocations {
+ mem.free_bytes(ptr, s.backup_allocator);
+ }
+ delete(s.leaked_allocations);
+ delete(s.data, s.backup_allocator);
+ s^ = {};
+}
+
+scratch_allocator_proc :: proc (allocator_data: rawptr, mode: mem.Allocator_Mode, size, alignment: int, old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, mem.Allocator_Error) {
+
+ s := (^Scratch_Allocator)(allocator_data);
+
+ if s.data == nil {
+ DEFAULT_BACKING_SIZE :: 1 << 22;
+ if !(context.allocator.procedure != scratch_allocator_proc &&
+ context.allocator.data != allocator_data) {
+ panic("cyclic initialization of the scratch allocator with itself");
+ }
+ scratch_allocator_init(s, DEFAULT_BACKING_SIZE);
+ }
+
+ size := size;
+
+ switch mode {
+ case .Alloc:
+ size = mem.align_forward_int(size, alignment);
+
+ switch {
+ case s.curr_offset + size <= len(s.data):
+ start := uintptr(raw_data(s.data));
+ ptr := start + uintptr(s.curr_offset);
+ ptr = mem.align_forward_uintptr(ptr, uintptr(alignment));
+ mem.zero(rawptr(ptr), size);
+
+ s.prev_allocation = rawptr(ptr);
+ offset := int(ptr - start);
+ s.curr_offset = offset + size;
+ return mem.byte_slice(rawptr(ptr), size), nil;
+ }
+
+ a := s.backup_allocator;
+ if a.procedure == nil {
+ a = context.allocator;
+ s.backup_allocator = a;
+ }
+
+ ptr, err := mem.alloc_bytes(size, alignment, a, loc);
+ if err != nil {
+ return ptr, err;
+ }
+ if s.leaked_allocations == nil {
+ s.leaked_allocations = make([dynamic][]byte, a);
+ }
+ append(&s.leaked_allocations, ptr);
+
+ if logger := context.logger; logger.lowest_level <= .Warning {
+ if logger.procedure != nil {
+ logger.procedure(logger.data, .Warning, "mem.Scratch_Allocator resorted to backup_allocator" , logger.options, loc);
+ }
+ }
+
+ return ptr, err;
+
+ case .Free:
+ case .Free_All:
+ s.curr_offset = 0;
+ s.prev_allocation = nil;
+ for ptr in s.leaked_allocations {
+ mem.free_bytes(ptr, s.backup_allocator);
+ }
+ clear(&s.leaked_allocations);
+
+ case .Resize:
+ begin := uintptr(raw_data(s.data));
+ end := begin + uintptr(len(s.data));
+ old_ptr := uintptr(old_memory);
+
+ data, err := scratch_allocator_proc(allocator_data, .Alloc, size, alignment, old_memory, old_size, loc);
+ if err != nil {
+ return data, err;
+ }
+
+ runtime.copy(data, mem.byte_slice(old_memory, old_size));
+ _, err = scratch_allocator_proc(allocator_data, .Free, 0, alignment, old_memory, old_size, loc);
+ return data, err;
+
+ case .Query_Features:
+ set := (^mem.Allocator_Mode_Set)(old_memory);
+ if set != nil {
+ set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features};
+ }
+ return nil, nil;
+ case .Query_Info:
+ return nil, nil;
+ }
+
+ return nil, nil;
+}
+
+scratch_allocator :: proc (allocator: ^Scratch_Allocator) -> mem.Allocator {
+ return mem.Allocator {
+ procedure = scratch_allocator_proc,
+ data = allocator,
+ };
+}
diff --git a/src/common/track_allocator.odin b/src/common/track_allocator.odin
deleted file mode 100644
index 978a74e..0000000
--- a/src/common/track_allocator.odin
+++ /dev/null
@@ -1,192 +0,0 @@
-package common
-
-/*
- https://gist.github.com/jharler/7ee9a4d5b46e31f7f9399da49cfabe72
-*/
-
-import "core:mem"
-import "core:fmt"
-import "core:runtime"
-import "core:sync"
-import "core:log"
-
-// ----------------------------------------------------------------------------------------------------
-
-ThreadSafe_Allocator_Data :: struct {
- actual_allocator: mem.Allocator,
- mutex: sync.Mutex,
-}
-
-// ----------------------------------------------------------------------------------------------------
-
-threadsafe_allocator :: proc(allocator: mem.Allocator) -> mem.Allocator {
- data := new(ThreadSafe_Allocator_Data);
- data.actual_allocator = allocator;
- sync.mutex_init(&data.mutex);
-
- return mem.Allocator {procedure = threadsafe_allocator_proc, data = data};
-}
-
-// ----------------------------------------------------------------------------------------------------
-
-threadsafe_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode, size, alignment: int,
-old_memory: rawptr, old_size: int, flags: u64 = 0, loc := #caller_location) -> rawptr {
-
- data := cast(^ThreadSafe_Allocator_Data)allocator_data;
-
- sync.mutex_lock(&data.mutex);
- defer sync.mutex_unlock(&data.mutex);
-
- return data.actual_allocator.procedure(data.actual_allocator.data, mode, size, alignment, old_memory, old_size, flags, loc);
-}
-
-// ----------------------------------------------------------------------------------------------------
-
-Memleak_Allocator_Data :: struct {
- actual_allocator: mem.Allocator,
- allocations: map[rawptr]Memleak_Entry,
- frees: map[rawptr]Memleak_Entry,
- allocation_count: u32,
- unexpected_frees: u32,
- mutex: sync.Mutex,
- track_frees: bool,
-}
-
-// ----------------------------------------------------------------------------------------------------
-
-Memleak_Entry :: struct {
- location: runtime.Source_Code_Location,
- size: int,
- index: u32,
-}
-
-// ----------------------------------------------------------------------------------------------------
-
-memleak_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode, size, alignment: int,
-old_memory: rawptr, old_size: int, flags: u64 = 0, loc := #caller_location) -> rawptr {
-
- memleak := cast(^Memleak_Allocator_Data)allocator_data;
-
- sync.mutex_lock(&memleak.mutex);
- defer sync.mutex_unlock(&memleak.mutex);
-
- if mode == .Free {
- if old_memory not_in memleak.allocations {
- if memleak.track_frees {
- if old_memory in memleak.frees {
- fmt.println(fmt.tprintf("{0}({1}:{2}) {3} freed memory already freed by this memleak allocator", loc.file_path, loc.line, loc.column, loc.procedure));
- free_loc := memleak.frees[old_memory].location;
- fmt.println(fmt.tprintf("{0}({1}:{2}) {3} <<< freed here", loc.file_path, loc.line, loc.column, loc.procedure));
- } else {
- fmt.println(fmt.tprintf("{0}({1}:{2}) {3} freed memory not allocated or previously freed by this memleak allocator", loc.file_path, loc.line, loc.column, loc.procedure));
- }
- } else {
- fmt.println(fmt.tprintf("{0}({1}:{2}) {3} freed memory not allocated by this memleak allocator", loc.file_path, loc.line, loc.column, loc.procedure));
- }
- memleak.unexpected_frees += 1;
- return nil;
- } else {
- //entry := &memleak.allocations[old_memory];
- delete_key(&memleak.allocations, old_memory);
-
- if memleak.track_frees {
- memleak.frees[old_memory] = Memleak_Entry {
- location = loc,
- size = size,
- index = 0,
- };
- }
- }
- }
-
- result := memleak.actual_allocator.procedure(memleak.actual_allocator.data, mode, size, alignment, old_memory, old_size, flags, loc);
-
- if mode == .Resize && result != old_memory {
- delete_key(&memleak.allocations, old_memory);
- }
-
- if mode != .Free {
- // using a conditional breakpoint with memleak.allocation_count in the condition
- // can be very useful for inspecting the stack trace of a particular allocation
-
- memleak.allocations[result] = Memleak_Entry {
- location = loc,
- size = size,
- index = memleak.allocation_count,
- };
-
- memleak.allocation_count += 1;
-
- if memleak.track_frees {
- if result in memleak.frees {
- delete_key(&memleak.frees, result);
- }
- }
- }
-
- return result;
-}
-
-// ----------------------------------------------------------------------------------------------------
-
-memleak_allocator :: proc(track_frees: bool) -> mem.Allocator {
-
- make([]byte, 1, context.temp_allocator); // so the temp allocation doesn't clutter our results
-
- data := new(Memleak_Allocator_Data);
- data.actual_allocator = context.allocator;
- data.allocations = make(map[rawptr]Memleak_Entry);
-
- if track_frees {
- data.track_frees = true;
- data.frees = make(map[rawptr]Memleak_Entry);
- }
-
- sync.mutex_init(&data.mutex);
-
- return mem.Allocator {procedure = memleak_allocator_proc, data = data};
-}
-
-// ----------------------------------------------------------------------------------------------------
-
-memleak_detected_leaks :: proc() -> bool {
- if context.allocator.procedure == memleak_allocator_proc {
- memleak := cast(^Memleak_Allocator_Data)context.allocator.data;
- return len(memleak.allocations) > 0;
- }
-
- return false;
-}
-
-// ----------------------------------------------------------------------------------------------------
-
-memleak_dump :: proc(memleak_alloc: mem.Allocator, dump_proc: proc(message: string, user_data: rawptr), user_data: rawptr) {
- memleak := cast(^Memleak_Allocator_Data)memleak_alloc.data;
-
- context.allocator = memleak.actual_allocator;
-
- // check for an ignore default_temp_allocator_proc allocations
- tmp_check := 0;
- for _, leak in &memleak.allocations {
- if leak.location.procedure == "default_temp_allocator_proc" {
- tmp_check += 1;
- }
- }
-
- dump_proc(fmt.tprintf("{0} memory leaks detected!", len(memleak.allocations) - tmp_check), user_data);
- dump_proc(fmt.tprintf("{0} unexpected frees", memleak.unexpected_frees), user_data);
-
- for _, leak in &memleak.allocations {
- if leak.location.procedure != "default_temp_allocator_proc" {
- dump_proc(fmt.tprintf("{0}({1}:{2}) {3} allocated {4} bytes [{5}]", leak.location.file_path, leak.location.line, leak.location.column, leak.location.procedure, leak.size, leak.index), user_data);
- }
- }
-
- context.allocator = mem.Allocator {procedure = memleak_allocator_proc, data = memleak};
-}
-
-// ----------------------------------------------------------------------------------------------------
-
-log_dump :: proc(message: string, user_data: rawptr) {
- log.info(message);
-}