diff options
| author | Karl Zylinski <karl@zylinski.se> | 2024-12-21 15:47:26 +0100 |
|---|---|---|
| committer | Karl Zylinski <karl@zylinski.se> | 2024-12-21 15:49:48 +0100 |
| commit | e5f32e145512a1fb0db142a214dee2d3b42ac073 (patch) | |
| tree | 58b35cc719a679a11125f9bc47bc248ffd9735ef /core | |
| parent | 314c41ef33a2d11e4313ecca0c708b8d02cd59d7 (diff) | |
Makes tracking allocator default to crashing on a bad free instead of add to bad_free_array. The bad_free_array remains to not break old code. The new default behavior is implemented in a callback that you can override, there's a second provided callback that provides the old behavior where an element was added to bad_free_array. Rationale: Many people are just checking the allocation_map, but don't check the bad free array. Several examples throughout core that use tracking allocator don't check bad_free_array either, so people have been taught not to check it.
Diffstat (limited to 'core')
| -rw-r--r-- | core/mem/tracking_allocator.odin | 50 | ||||
| -rw-r--r-- | core/testing/runner.odin | 1 |
2 files changed, 42 insertions, 9 deletions
diff --git a/core/mem/tracking_allocator.odin b/core/mem/tracking_allocator.odin index cf780de3f..25c547471 100644 --- a/core/mem/tracking_allocator.odin +++ b/core/mem/tracking_allocator.odin @@ -35,11 +35,17 @@ Tracking_Allocator_Bad_Free_Entry :: struct { } /* +Callback type for when tracking allocator runs into a bad free. +*/ +Tracking_Allocator_Bad_Free_Callback :: proc(t: ^Tracking_Allocator, memory: rawptr, location: runtime.Source_Code_Location) + +/* Tracking allocator data. */ Tracking_Allocator :: struct { backing: Allocator, allocation_map: map[rawptr]Tracking_Allocator_Entry, + bad_free_callback: Tracking_Allocator_Bad_Free_Callback, bad_free_array: [dynamic]Tracking_Allocator_Bad_Free_Entry, mutex: sync.Mutex, clear_on_free_all: bool, @@ -61,6 +67,7 @@ allocate the tracked data. tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Allocator, internals_allocator := context.allocator) { t.backing = backing_allocator t.allocation_map.allocator = internals_allocator + t.bad_free_callback = tracking_allocator_bad_free_callback_panic t.bad_free_array.allocator = internals_allocator if .Free_All in query_features(t.backing) { t.clear_on_free_all = true @@ -110,14 +117,43 @@ tracking_allocator_reset :: proc(t: ^Tracking_Allocator) { } /* +Default behavior for a bad free: Crash with error message that says where the +bad free happened. + +Override Tracking_Allocator.bad_free_callback to have something else happen. For +example, you can use tracking_allocator_bad_free_callback_add_to_array to return +the tracking allocator to the old behavior, where the bad_free_array was used. +*/ +tracking_allocator_bad_free_callback_panic :: proc(t: ^Tracking_Allocator, memory: rawptr, location: runtime.Source_Code_Location) { + runtime.print_caller_location(location) + runtime.print_string(" Tracking allocator error: Bad free of pointer ") + runtime.print_uintptr(uintptr(memory)) + runtime.print_string("\n") + runtime.trap() +} + +/* +Alternative behavior for a bad free: Store in `bad_free_array`. If you use this, +then you must make sure to check Tracking_Allocator.bad_free_array at some point. +*/ +tracking_allocator_bad_free_callback_add_to_array :: proc(t: ^Tracking_Allocator, memory: rawptr, location: runtime.Source_Code_Location) { + append(&t.bad_free_array, Tracking_Allocator_Bad_Free_Entry { + memory = memory, + location = location, + }) +} + +/* Tracking allocator. The tracking allocator is an allocator wrapper that tracks memory allocations. This allocator stores all the allocations in a map. Whenever a pointer that's not inside of the map is freed, the `bad_free_array` entry is added. -An example of how to use the `Tracking_Allocator` to track subsequent allocations -in your program and report leaks and bad frees: +Here follows an example of how to use the `Tracking_Allocator` to track +subsequent allocations in your program and report leaks. By default, the +tracking allocator will crash on bad frees. You can override that behavior by +overriding `track.bad_free_callback`. Example: @@ -137,9 +173,6 @@ Example: for _, leak in track.allocation_map { fmt.printf("%v leaked %m\n", leak.location, leak.size) } - for bad_free in track.bad_free_array { - fmt.printf("%v allocation %p was freed badly\n", bad_free.location, bad_free.memory) - } } */ @(require_results) @@ -191,10 +224,9 @@ tracking_allocator_proc :: proc( } if mode == .Free && old_memory != nil && old_memory not_in data.allocation_map { - append(&data.bad_free_array, Tracking_Allocator_Bad_Free_Entry{ - memory = old_memory, - location = loc, - }) + if data.bad_free_callback != nil { + data.bad_free_callback(data, old_memory, loc) + } } else { result = data.backing.procedure(data.backing.data, mode, size, alignment, old_memory, old_size, loc) or_return } diff --git a/core/testing/runner.odin b/core/testing/runner.odin index 6b9d610ed..83a5ac4e7 100644 --- a/core/testing/runner.odin +++ b/core/testing/runner.odin @@ -391,6 +391,7 @@ runner :: proc(internal_tests: []Internal_Test) -> bool { fmt.assertf(alloc_error == nil, "Error allocating memory for task allocator #%i: %v", i, alloc_error) when TRACKING_MEMORY { mem.tracking_allocator_init(&task_memory_trackers[i], mem.rollback_stack_allocator(&task_allocators[i])) + task_memory_trackers[i].bad_free_callback = mem.tracking_allocator_bad_free_callback_add_to_array } } |