aboutsummaryrefslogtreecommitdiff
path: root/core/mem
diff options
context:
space:
mode:
authorLucas Perlind <perlindluca@gmail.com>2025-04-30 19:21:00 +1000
committerLucas Perlind <perlindluca@gmail.com>2025-05-06 14:55:50 +1000
commit83bc2d3c4a186d6a8c362eed901acd6bc6363a8d (patch)
treeea5c4b6bccb6aa226e6a3ed8036ae3fd24a1915c /core/mem
parent8032db348411ae85397441de7f2ce9ebd1029112 (diff)
Add asan support for various allocators
Diffstat (limited to 'core/mem')
-rw-r--r--core/mem/rollback_stack_allocator.odin50
-rw-r--r--core/mem/tlsf/tlsf.odin2
-rw-r--r--core/mem/tlsf/tlsf_internal.odin96
-rw-r--r--core/mem/tracking_allocator.odin11
-rw-r--r--core/mem/virtual/arena.odin43
-rw-r--r--core/mem/virtual/virtual.odin25
-rw-r--r--core/mem/virtual/virtual_platform.odin3
-rw-r--r--core/mem/virtual/virtual_windows.odin12
8 files changed, 157 insertions, 85 deletions
diff --git a/core/mem/rollback_stack_allocator.odin b/core/mem/rollback_stack_allocator.odin
index 43ef10fe9..a00131b7f 100644
--- a/core/mem/rollback_stack_allocator.odin
+++ b/core/mem/rollback_stack_allocator.odin
@@ -1,6 +1,7 @@
package mem
import "base:runtime"
+import "base:sanitizer"
/*
Rollback stack default block size.
@@ -47,14 +48,14 @@ Rollback_Stack :: struct {
block_allocator: Allocator,
}
-@(private="file", require_results)
+@(private="file", require_results, no_sanitize_address)
rb_ptr_in_bounds :: proc(block: ^Rollback_Stack_Block, ptr: rawptr) -> bool {
start := raw_data(block.buffer)
end := start[block.offset:]
return start < ptr && ptr <= end
}
-@(private="file", require_results)
+@(private="file", require_results, no_sanitize_address)
rb_find_ptr :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> (
parent: ^Rollback_Stack_Block,
block: ^Rollback_Stack_Block,
@@ -71,7 +72,7 @@ rb_find_ptr :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> (
return nil, nil, nil, .Invalid_Pointer
}
-@(private="file", require_results)
+@(private="file", require_results, no_sanitize_address)
rb_find_last_alloc :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> (
block: ^Rollback_Stack_Block,
header: ^Rollback_Stack_Header,
@@ -86,9 +87,10 @@ rb_find_last_alloc :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> (
return nil, nil, false
}
-@(private="file")
+@(private="file", no_sanitize_address)
rb_rollback_block :: proc(block: ^Rollback_Stack_Block, header: ^Rollback_Stack_Header) {
header := header
+
for block.offset > 0 && header.is_free {
block.offset = header.prev_offset
block.last_alloc = raw_data(block.buffer)[header.prev_ptr:]
@@ -99,9 +101,10 @@ rb_rollback_block :: proc(block: ^Rollback_Stack_Block, header: ^Rollback_Stack_
/*
Free memory to a rollback stack allocator.
*/
-@(private="file", require_results)
+@(private="file", require_results, no_sanitize_address)
rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error {
parent, block, header := rb_find_ptr(stack, ptr) or_return
+
if header.is_free {
return .Invalid_Pointer
}
@@ -120,7 +123,7 @@ rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error {
/*
Free all memory owned by the rollback stack allocator.
*/
-@(private="file")
+@(private="file", no_sanitize_address)
rb_free_all :: proc(stack: ^Rollback_Stack) {
for block := stack.head.next_block; block != nil; /**/ {
next_block := block.next_block
@@ -131,12 +134,13 @@ rb_free_all :: proc(stack: ^Rollback_Stack) {
stack.head.next_block = nil
stack.head.last_alloc = nil
stack.head.offset = 0
+ sanitizer.address_poison(stack.head.buffer)
}
/*
Allocate memory using the rollback stack allocator.
*/
-@(require_results)
+@(require_results, no_sanitize_address)
rb_alloc :: proc(
stack: ^Rollback_Stack,
size: int,
@@ -153,7 +157,7 @@ rb_alloc :: proc(
/*
Allocate memory using the rollback stack allocator.
*/
-@(require_results)
+@(require_results, no_sanitize_address)
rb_alloc_bytes :: proc(
stack: ^Rollback_Stack,
size: int,
@@ -170,7 +174,7 @@ rb_alloc_bytes :: proc(
/*
Allocate non-initialized memory using the rollback stack allocator.
*/
-@(require_results)
+@(require_results, no_sanitize_address)
rb_alloc_non_zeroed :: proc(
stack: ^Rollback_Stack,
size: int,
@@ -184,7 +188,7 @@ rb_alloc_non_zeroed :: proc(
/*
Allocate non-initialized memory using the rollback stack allocator.
*/
-@(require_results)
+@(require_results, no_sanitize_address)
rb_alloc_bytes_non_zeroed :: proc(
stack: ^Rollback_Stack,
size: int,
@@ -194,6 +198,7 @@ rb_alloc_bytes_non_zeroed :: proc(
assert(size >= 0, "Size must be positive or zero.", loc)
assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", loc)
parent: ^Rollback_Stack_Block
+
for block := stack.head; /**/; block = block.next_block {
when !ODIN_DISABLE_ASSERT {
allocated_new_block: bool
@@ -235,7 +240,9 @@ rb_alloc_bytes_non_zeroed :: proc(
// Prevent any further allocations on it.
block.offset = cast(uintptr)len(block.buffer)
}
- #no_bounds_check return ptr[:size], nil
+ res := ptr[:size]
+ sanitizer.address_unpoison(res)
+ return res, nil
}
return nil, .Out_Of_Memory
}
@@ -243,7 +250,7 @@ rb_alloc_bytes_non_zeroed :: proc(
/*
Resize an allocation owned by rollback stack allocator.
*/
-@(require_results)
+@(require_results, no_sanitize_address)
rb_resize :: proc(
stack: ^Rollback_Stack,
old_ptr: rawptr,
@@ -266,7 +273,7 @@ rb_resize :: proc(
/*
Resize an allocation owned by rollback stack allocator.
*/
-@(require_results)
+@(require_results, no_sanitize_address)
rb_resize_bytes :: proc(
stack: ^Rollback_Stack,
old_memory: []byte,
@@ -289,7 +296,7 @@ rb_resize_bytes :: proc(
Resize an allocation owned by rollback stack allocator without explicit
zero-initialization.
*/
-@(require_results)
+@(require_results, no_sanitize_address)
rb_resize_non_zeroed :: proc(
stack: ^Rollback_Stack,
old_ptr: rawptr,
@@ -306,7 +313,7 @@ rb_resize_non_zeroed :: proc(
Resize an allocation owned by rollback stack allocator without explicit
zero-initialization.
*/
-@(require_results)
+@(require_results, no_sanitize_address)
rb_resize_bytes_non_zeroed :: proc(
stack: ^Rollback_Stack,
old_memory: []byte,
@@ -330,7 +337,9 @@ rb_resize_bytes_non_zeroed :: proc(
if len(block.buffer) <= stack.block_size {
block.offset += cast(uintptr)size - cast(uintptr)old_size
}
- #no_bounds_check return (ptr)[:size], nil
+ res := (ptr)[:size]
+ sanitizer.address_unpoison(res)
+ #no_bounds_check return res, nil
}
}
}
@@ -340,7 +349,7 @@ rb_resize_bytes_non_zeroed :: proc(
return
}
-@(private="file", require_results)
+@(private="file", require_results, no_sanitize_address)
rb_make_block :: proc(size: int, allocator: Allocator) -> (block: ^Rollback_Stack_Block, err: Allocator_Error) {
buffer := runtime.mem_alloc(size_of(Rollback_Stack_Block) + size, align_of(Rollback_Stack_Block), allocator) or_return
block = cast(^Rollback_Stack_Block)raw_data(buffer)
@@ -351,6 +360,7 @@ rb_make_block :: proc(size: int, allocator: Allocator) -> (block: ^Rollback_Stac
/*
Initialize the rollback stack allocator using a fixed backing buffer.
*/
+@(no_sanitize_address)
rollback_stack_init_buffered :: proc(stack: ^Rollback_Stack, buffer: []byte, location := #caller_location) {
MIN_SIZE :: size_of(Rollback_Stack_Block) + size_of(Rollback_Stack_Header) + size_of(rawptr)
assert(len(buffer) >= MIN_SIZE, "User-provided buffer to Rollback Stack Allocator is too small.", location)
@@ -365,6 +375,7 @@ rollback_stack_init_buffered :: proc(stack: ^Rollback_Stack, buffer: []byte, loc
/*
Initialize the rollback stack alocator using a backing block allocator.
*/
+@(no_sanitize_address)
rollback_stack_init_dynamic :: proc(
stack: ^Rollback_Stack,
block_size : int = ROLLBACK_STACK_DEFAULT_BLOCK_SIZE,
@@ -396,6 +407,7 @@ rollback_stack_init :: proc {
/*
Destroy a rollback stack.
*/
+@(no_sanitize_address)
rollback_stack_destroy :: proc(stack: ^Rollback_Stack) {
if stack.block_allocator.procedure != nil {
rb_free_all(stack)
@@ -435,7 +447,7 @@ from the last allocation backwards.
Each allocation has an overhead of 8 bytes and any extra bytes to satisfy
the requested alignment.
*/
-@(require_results)
+@(require_results, no_sanitize_address)
rollback_stack_allocator :: proc(stack: ^Rollback_Stack) -> Allocator {
return Allocator {
data = stack,
@@ -443,7 +455,7 @@ rollback_stack_allocator :: proc(stack: ^Rollback_Stack) -> Allocator {
}
}
-@(require_results)
+@(require_results, no_sanitize_address)
rollback_stack_allocator_proc :: proc(
allocator_data: rawptr,
mode: Allocator_Mode,
diff --git a/core/mem/tlsf/tlsf.odin b/core/mem/tlsf/tlsf.odin
index 4ce6e54d9..0ae8c28e0 100644
--- a/core/mem/tlsf/tlsf.odin
+++ b/core/mem/tlsf/tlsf.odin
@@ -198,4 +198,4 @@ fls :: proc "contextless" (word: u32) -> (bit: i32) {
fls_uint :: proc "contextless" (size: uint) -> (bit: i32) {
N :: (size_of(uint) * 8) - 1
return i32(N - intrinsics.count_leading_zeros(size))
-} \ No newline at end of file
+}
diff --git a/core/mem/tlsf/tlsf_internal.odin b/core/mem/tlsf/tlsf_internal.odin
index f8a9bf60c..89b875679 100644
--- a/core/mem/tlsf/tlsf_internal.odin
+++ b/core/mem/tlsf/tlsf_internal.odin
@@ -10,6 +10,7 @@
package mem_tlsf
import "base:intrinsics"
+import "base:sanitizer"
import "base:runtime"
// log2 of number of linear subdivisions of block sizes.
@@ -209,6 +210,8 @@ alloc_bytes_non_zeroed :: proc(control: ^Allocator, size: uint, align: uint) ->
return nil, .Out_Of_Memory
}
+ sanitizer.address_poison(new_pool_buf)
+
// Allocate a new link in the `control.pool` tracking structure.
new_pool := new_clone(Pool{
data = new_pool_buf,
@@ -254,7 +257,7 @@ alloc_bytes_non_zeroed :: proc(control: ^Allocator, size: uint, align: uint) ->
return block_prepare_used(control, block, adjust)
}
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
alloc_bytes :: proc(control: ^Allocator, size: uint, align: uint) -> (res: []byte, err: runtime.Allocator_Error) {
res, err = alloc_bytes_non_zeroed(control, size, align)
if err == nil {
@@ -273,6 +276,7 @@ free_with_size :: proc(control: ^Allocator, ptr: rawptr, size: uint) {
block := block_from_ptr(ptr)
assert(!block_is_free(block), "block already marked as free") // double free
+ sanitizer.address_poison(ptr, block.size)
block_mark_as_free(block)
block = block_merge_prev(control, block)
block = block_merge_next(control, block)
@@ -316,6 +320,7 @@ resize :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size: uint, align
block_trim_used(control, block, adjust)
res = ([^]byte)(ptr)[:new_size]
+ sanitizer.address_unpoison(res)
if min_size < new_size {
to_zero := ([^]byte)(ptr)[min_size:new_size]
@@ -374,95 +379,96 @@ resize_non_zeroed :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size:
NOTE: TLSF spec relies on ffs/fls returning a value in the range 0..31.
*/
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
block_size :: proc "contextless" (block: ^Block_Header) -> (size: uint) {
return block.size &~ (BLOCK_HEADER_FREE | BLOCK_HEADER_PREV_FREE)
}
-@(private)
+@(private, no_sanitize_address)
block_set_size :: proc "contextless" (block: ^Block_Header, size: uint) {
old_size := block.size
block.size = size | (old_size & (BLOCK_HEADER_FREE | BLOCK_HEADER_PREV_FREE))
}
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
block_is_last :: proc "contextless" (block: ^Block_Header) -> (is_last: bool) {
return block_size(block) == 0
}
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
block_is_free :: proc "contextless" (block: ^Block_Header) -> (is_free: bool) {
return (block.size & BLOCK_HEADER_FREE) == BLOCK_HEADER_FREE
}
-@(private)
+@(private, no_sanitize_address)
block_set_free :: proc "contextless" (block: ^Block_Header) {
block.size |= BLOCK_HEADER_FREE
}
-@(private)
+@(private, no_sanitize_address)
block_set_used :: proc "contextless" (block: ^Block_Header) {
block.size &~= BLOCK_HEADER_FREE
}
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
block_is_prev_free :: proc "contextless" (block: ^Block_Header) -> (is_prev_free: bool) {
return (block.size & BLOCK_HEADER_PREV_FREE) == BLOCK_HEADER_PREV_FREE
}
-@(private)
+@(private, no_sanitize_address)
block_set_prev_free :: proc "contextless" (block: ^Block_Header) {
block.size |= BLOCK_HEADER_PREV_FREE
}
-@(private)
+@(private, no_sanitize_address)
block_set_prev_used :: proc "contextless" (block: ^Block_Header) {
block.size &~= BLOCK_HEADER_PREV_FREE
}
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
block_from_ptr :: proc(ptr: rawptr) -> (block_ptr: ^Block_Header) {
return (^Block_Header)(uintptr(ptr) - BLOCK_START_OFFSET)
}
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
block_to_ptr :: proc(block: ^Block_Header) -> (ptr: rawptr) {
return rawptr(uintptr(block) + BLOCK_START_OFFSET)
}
// Return location of next block after block of given size.
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
offset_to_block :: proc(ptr: rawptr, size: uint) -> (block: ^Block_Header) {
return (^Block_Header)(uintptr(ptr) + uintptr(size))
}
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
offset_to_block_backwards :: proc(ptr: rawptr, size: uint) -> (block: ^Block_Header) {
return (^Block_Header)(uintptr(ptr) - uintptr(size))
}
// Return location of previous block.
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
block_prev :: proc(block: ^Block_Header) -> (prev: ^Block_Header) {
assert(block_is_prev_free(block), "previous block must be free")
+
return block.prev_phys_block
}
// Return location of next existing block.
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
block_next :: proc(block: ^Block_Header) -> (next: ^Block_Header) {
return offset_to_block(block_to_ptr(block), block_size(block) - BLOCK_HEADER_OVERHEAD)
}
// Link a new block with its physical neighbor, return the neighbor.
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
block_link_next :: proc(block: ^Block_Header) -> (next: ^Block_Header) {
next = block_next(block)
next.prev_phys_block = block
return
}
-@(private)
+@(private, no_sanitize_address)
block_mark_as_free :: proc(block: ^Block_Header) {
// Link the block to the next block, first.
next := block_link_next(block)
@@ -470,26 +476,26 @@ block_mark_as_free :: proc(block: ^Block_Header) {
block_set_free(block)
}
-@(private)
-block_mark_as_used :: proc(block: ^Block_Header) {
+@(private, no_sanitize_address)
+block_mark_as_used :: proc(block: ^Block_Header, ) {
next := block_next(block)
block_set_prev_used(next)
block_set_used(block)
}
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
align_up :: proc(x, align: uint) -> (aligned: uint) {
assert(0 == (align & (align - 1)), "must align to a power of two")
return (x + (align - 1)) &~ (align - 1)
}
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
align_down :: proc(x, align: uint) -> (aligned: uint) {
assert(0 == (align & (align - 1)), "must align to a power of two")
return x - (x & (align - 1))
}
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
align_ptr :: proc(ptr: rawptr, align: uint) -> (aligned: rawptr) {
assert(0 == (align & (align - 1)), "must align to a power of two")
align_mask := uintptr(align) - 1
@@ -499,7 +505,7 @@ align_ptr :: proc(ptr: rawptr, align: uint) -> (aligned: rawptr) {
}
// Adjust an allocation size to be aligned to word size, and no smaller than internal minimum.
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
adjust_request_size :: proc(size, align: uint) -> (adjusted: uint) {
if size == 0 {
return 0
@@ -513,7 +519,7 @@ adjust_request_size :: proc(size, align: uint) -> (adjusted: uint) {
}
// Adjust an allocation size to be aligned to word size, and no smaller than internal minimum.
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
adjust_request_size_with_err :: proc(size, align: uint) -> (adjusted: uint, err: runtime.Allocator_Error) {
if size == 0 {
return 0, nil
@@ -531,7 +537,7 @@ adjust_request_size_with_err :: proc(size, align: uint) -> (adjusted: uint, err:
// TLSF utility functions. In most cases these are direct translations of
// the documentation in the research paper.
-@(optimization_mode="favor_size", private, require_results)
+@(optimization_mode="favor_size", private, require_results, no_sanitize_address)
mapping_insert :: proc(size: uint) -> (fl, sl: i32) {
if size < SMALL_BLOCK_SIZE {
// Store small blocks in first list.
@@ -544,7 +550,7 @@ mapping_insert :: proc(size: uint) -> (fl, sl: i32) {
return
}
-@(optimization_mode="favor_size", private, require_results)
+@(optimization_mode="favor_size", private, require_results, no_sanitize_address)
mapping_round :: #force_inline proc(size: uint) -> (rounded: uint) {
rounded = size
if size >= SMALL_BLOCK_SIZE {
@@ -555,12 +561,12 @@ mapping_round :: #force_inline proc(size: uint) -> (rounded: uint) {
}
// This version rounds up to the next block size (for allocations)
-@(optimization_mode="favor_size", private, require_results)
+@(optimization_mode="favor_size", private, require_results, no_sanitize_address)
mapping_search :: proc(size: uint) -> (fl, sl: i32) {
return mapping_insert(mapping_round(size))
}
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
search_suitable_block :: proc(control: ^Allocator, fli, sli: ^i32) -> (block: ^Block_Header) {
// First, search for a block in the list associated with the given fl/sl index.
fl := fli^; sl := sli^
@@ -587,7 +593,7 @@ search_suitable_block :: proc(control: ^Allocator, fli, sli: ^i32) -> (block: ^B
}
// Remove a free block from the free list.
-@(private)
+@(private, no_sanitize_address)
remove_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl: i32) {
prev := block.prev_free
next := block.next_free
@@ -613,7 +619,7 @@ remove_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl
}
// Insert a free block into the free block list.
-@(private)
+@(private, no_sanitize_address)
insert_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl: i32) {
current := control.blocks[fl][sl]
assert(current != nil, "free lists cannot have a nil entry")
@@ -631,26 +637,26 @@ insert_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl
}
// Remove a given block from the free list.
-@(private)
+@(private, no_sanitize_address)
block_remove :: proc(control: ^Allocator, block: ^Block_Header) {
fl, sl := mapping_insert(block_size(block))
remove_free_block(control, block, fl, sl)
}
// Insert a given block into the free list.
-@(private)
+@(private, no_sanitize_address)
block_insert :: proc(control: ^Allocator, block: ^Block_Header) {
fl, sl := mapping_insert(block_size(block))
insert_free_block(control, block, fl, sl)
}
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
block_can_split :: proc(block: ^Block_Header, size: uint) -> (can_split: bool) {
return block_size(block) >= size_of(Block_Header) + size
}
// Split a block into two, the second of which is free.
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
block_split :: proc(block: ^Block_Header, size: uint) -> (remaining: ^Block_Header) {
// Calculate the amount of space left in the remaining block.
remaining = offset_to_block(block_to_ptr(block), size - BLOCK_HEADER_OVERHEAD)
@@ -671,9 +677,10 @@ block_split :: proc(block: ^Block_Header, size: uint) -> (remaining: ^Block_Head
}
// Absorb a free block's storage into an adjacent previous free block.
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
block_absorb :: proc(prev: ^Block_Header, block: ^Block_Header) -> (absorbed: ^Block_Header) {
assert(!block_is_last(prev), "previous block can't be last")
+
// Note: Leaves flags untouched.
prev.size += block_size(block) + BLOCK_HEADER_OVERHEAD
_ = block_link_next(prev)
@@ -681,7 +688,7 @@ block_absorb :: proc(prev: ^Block_Header, block: ^Block_Header) -> (absorbed: ^B
}
// Merge a just-freed block with an adjacent previous free block.
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
block_merge_prev :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: ^Block_Header) {
merged = block
if (block_is_prev_free(block)) {
@@ -695,7 +702,7 @@ block_merge_prev :: proc(control: ^Allocator, block: ^Block_Header) -> (merged:
}
// Merge a just-freed block with an adjacent free block.
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
block_merge_next :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: ^Block_Header) {
merged = block
next := block_next(block)
@@ -710,7 +717,7 @@ block_merge_next :: proc(control: ^Allocator, block: ^Block_Header) -> (merged:
}
// Trim any trailing block space off the end of a free block, return to pool.
-@(private)
+@(private, no_sanitize_address)
block_trim_free :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
assert(block_is_free(block), "block must be free")
if (block_can_split(block, size)) {
@@ -722,7 +729,7 @@ block_trim_free :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
}
// Trim any trailing block space off the end of a used block, return to pool.
-@(private)
+@(private, no_sanitize_address)
block_trim_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
assert(!block_is_free(block), "Block must be used")
if (block_can_split(block, size)) {
@@ -736,7 +743,7 @@ block_trim_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
}
// Trim leading block space, return to pool.
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
block_trim_free_leading :: proc(control: ^Allocator, block: ^Block_Header, size: uint) -> (remaining: ^Block_Header) {
remaining = block
if block_can_split(block, size) {
@@ -750,7 +757,7 @@ block_trim_free_leading :: proc(control: ^Allocator, block: ^Block_Header, size:
return remaining
}
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
block_locate_free :: proc(control: ^Allocator, size: uint) -> (block: ^Block_Header) {
fl, sl: i32
if size != 0 {
@@ -774,13 +781,14 @@ block_locate_free :: proc(control: ^Allocator, size: uint) -> (block: ^Block_Hea
return block
}
-@(private, require_results)
+@(private, require_results, no_sanitize_address)
block_prepare_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) -> (res: []byte, err: runtime.Allocator_Error) {
if block != nil {
assert(size != 0, "Size must be non-zero")
block_trim_free(control, block, size)
block_mark_as_used(block)
res = ([^]byte)(block_to_ptr(block))[:size]
+ sanitizer.address_unpoison(res)
}
return
-} \ No newline at end of file
+}
diff --git a/core/mem/tracking_allocator.odin b/core/mem/tracking_allocator.odin
index 25c547471..01080075e 100644
--- a/core/mem/tracking_allocator.odin
+++ b/core/mem/tracking_allocator.odin
@@ -64,6 +64,7 @@ This procedure initializes the tracking allocator `t` with a backing allocator
specified with `backing_allocator`. The `internals_allocator` will used to
allocate the tracked data.
*/
+@(no_sanitize_address)
tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Allocator, internals_allocator := context.allocator) {
t.backing = backing_allocator
t.allocation_map.allocator = internals_allocator
@@ -77,6 +78,7 @@ tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Alloc
/*
Destroy the tracking allocator.
*/
+@(no_sanitize_address)
tracking_allocator_destroy :: proc(t: ^Tracking_Allocator) {
delete(t.allocation_map)
delete(t.bad_free_array)
@@ -90,6 +92,7 @@ This procedure clears the tracked data from a tracking allocator.
**Note**: This procedure clears only the current allocation data while keeping
the totals intact.
*/
+@(no_sanitize_address)
tracking_allocator_clear :: proc(t: ^Tracking_Allocator) {
sync.mutex_lock(&t.mutex)
clear(&t.allocation_map)
@@ -103,6 +106,7 @@ Reset the tracking allocator.
Reset all of a Tracking Allocator's allocation data back to zero.
*/
+@(no_sanitize_address)
tracking_allocator_reset :: proc(t: ^Tracking_Allocator) {
sync.mutex_lock(&t.mutex)
clear(&t.allocation_map)
@@ -124,6 +128,7 @@ Override Tracking_Allocator.bad_free_callback to have something else happen. For
example, you can use tracking_allocator_bad_free_callback_add_to_array to return
the tracking allocator to the old behavior, where the bad_free_array was used.
*/
+@(no_sanitize_address)
tracking_allocator_bad_free_callback_panic :: proc(t: ^Tracking_Allocator, memory: rawptr, location: runtime.Source_Code_Location) {
runtime.print_caller_location(location)
runtime.print_string(" Tracking allocator error: Bad free of pointer ")
@@ -136,6 +141,7 @@ tracking_allocator_bad_free_callback_panic :: proc(t: ^Tracking_Allocator, memor
Alternative behavior for a bad free: Store in `bad_free_array`. If you use this,
then you must make sure to check Tracking_Allocator.bad_free_array at some point.
*/
+@(no_sanitize_address)
tracking_allocator_bad_free_callback_add_to_array :: proc(t: ^Tracking_Allocator, memory: rawptr, location: runtime.Source_Code_Location) {
append(&t.bad_free_array, Tracking_Allocator_Bad_Free_Entry {
memory = memory,
@@ -175,7 +181,7 @@ Example:
}
}
*/
-@(require_results)
+@(require_results, no_sanitize_address)
tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator {
return Allocator{
data = data,
@@ -183,6 +189,7 @@ tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator {
}
}
+@(no_sanitize_address)
tracking_allocator_proc :: proc(
allocator_data: rawptr,
mode: Allocator_Mode,
@@ -191,6 +198,7 @@ tracking_allocator_proc :: proc(
old_size: int,
loc := #caller_location,
) -> (result: []byte, err: Allocator_Error) {
+ @(no_sanitize_address)
track_alloc :: proc(data: ^Tracking_Allocator, entry: ^Tracking_Allocator_Entry) {
data.total_memory_allocated += i64(entry.size)
data.total_allocation_count += 1
@@ -200,6 +208,7 @@ tracking_allocator_proc :: proc(
}
}
+ @(no_sanitize_address)
track_free :: proc(data: ^Tracking_Allocator, entry: ^Tracking_Allocator_Entry) {
data.total_memory_freed += i64(entry.size)
data.total_free_count += 1
diff --git a/core/mem/virtual/arena.odin b/core/mem/virtual/arena.odin
index 4fc2e0e35..4e1cc2466 100644
--- a/core/mem/virtual/arena.odin
+++ b/core/mem/virtual/arena.odin
@@ -3,6 +3,8 @@ package mem_virtual
import "core:mem"
import "core:sync"
+import "base:sanitizer"
+
Arena_Kind :: enum uint {
Growing = 0, // Chained memory blocks (singly linked list).
Static = 1, // Fixed reservation sized.
@@ -43,7 +45,7 @@ DEFAULT_ARENA_STATIC_RESERVE_SIZE :: mem.Gigabyte when size_of(uintptr) == 8 els
// Initialization of an `Arena` to be a `.Growing` variant.
// A growing arena is a linked list of `Memory_Block`s allocated with virtual memory.
-@(require_results)
+@(require_results, no_sanitize_address)
arena_init_growing :: proc(arena: ^Arena, reserved: uint = DEFAULT_ARENA_GROWING_MINIMUM_BLOCK_SIZE) -> (err: Allocator_Error) {
arena.kind = .Growing
arena.curr_block = memory_block_alloc(0, reserved, {}) or_return
@@ -53,24 +55,26 @@ arena_init_growing :: proc(arena: ^Arena, reserved: uint = DEFAULT_ARENA_GROWING
if arena.minimum_block_size == 0 {
arena.minimum_block_size = reserved
}
+ sanitizer.address_poison(arena.curr_block.base[:arena.curr_block.committed])
return
}
// Initialization of an `Arena` to be a `.Static` variant.
// A static arena contains a single `Memory_Block` allocated with virtual memory.
-@(require_results)
+@(require_results, no_sanitize_address)
arena_init_static :: proc(arena: ^Arena, reserved: uint = DEFAULT_ARENA_STATIC_RESERVE_SIZE, commit_size: uint = DEFAULT_ARENA_STATIC_COMMIT_SIZE) -> (err: Allocator_Error) {
arena.kind = .Static
arena.curr_block = memory_block_alloc(commit_size, reserved, {}) or_return
arena.total_used = 0
arena.total_reserved = arena.curr_block.reserved
+ sanitizer.address_poison(arena.curr_block.base[:arena.curr_block.committed])
return
}
// Initialization of an `Arena` to be a `.Buffer` variant.
// A buffer arena contains single `Memory_Block` created from a user provided []byte.
-@(require_results)
+@(require_results, no_sanitize_address)
arena_init_buffer :: proc(arena: ^Arena, buffer: []byte) -> (err: Allocator_Error) {
if len(buffer) < size_of(Memory_Block) {
return .Out_Of_Memory
@@ -78,7 +82,7 @@ arena_init_buffer :: proc(arena: ^Arena, buffer: []byte) -> (err: Allocator_Erro
arena.kind = .Buffer
- mem.zero_slice(buffer)
+ sanitizer.address_poison(buffer[:])
block_base := raw_data(buffer)
block := (^Memory_Block)(block_base)
@@ -94,7 +98,7 @@ arena_init_buffer :: proc(arena: ^Arena, buffer: []byte) -> (err: Allocator_Erro
}
// Allocates memory from the provided arena.
-@(require_results)
+@(require_results, no_sanitize_address)
arena_alloc :: proc(arena: ^Arena, size: uint, alignment: uint, loc := #caller_location) -> (data: []byte, err: Allocator_Error) {
assert(alignment & (alignment-1) == 0, "non-power of two alignment", loc)
@@ -158,10 +162,13 @@ arena_alloc :: proc(arena: ^Arena, size: uint, alignment: uint, loc := #caller_l
data, err = alloc_from_memory_block(arena.curr_block, size, alignment, default_commit_size=0)
arena.total_used = arena.curr_block.used
}
+
+ sanitizer.address_unpoison(data)
return
}
// Resets the memory of a Static or Buffer arena to a specific `position` (offset) and zeroes the previously used memory.
+@(no_sanitize_address)
arena_static_reset_to :: proc(arena: ^Arena, pos: uint, loc := #caller_location) -> bool {
sync.mutex_guard(&arena.mutex)
@@ -175,6 +182,7 @@ arena_static_reset_to :: proc(arena: ^Arena, pos: uint, loc := #caller_location)
mem.zero_slice(arena.curr_block.base[arena.curr_block.used:][:prev_pos-pos])
}
arena.total_used = arena.curr_block.used
+ sanitizer.address_poison(arena.curr_block.base[:arena.curr_block.committed])
return true
} else if pos == 0 {
arena.total_used = 0
@@ -184,6 +192,7 @@ arena_static_reset_to :: proc(arena: ^Arena, pos: uint, loc := #caller_location)
}
// Frees the last memory block of a Growing Arena
+@(no_sanitize_address)
arena_growing_free_last_memory_block :: proc(arena: ^Arena, loc := #caller_location) {
if free_block := arena.curr_block; free_block != nil {
assert(arena.kind == .Growing, "expected a .Growing arena", loc)
@@ -191,11 +200,13 @@ arena_growing_free_last_memory_block :: proc(arena: ^Arena, loc := #caller_locat
arena.total_reserved -= free_block.reserved
arena.curr_block = free_block.prev
+ sanitizer.address_poison(free_block.base[:free_block.committed])
memory_block_dealloc(free_block)
}
}
// Deallocates all but the first memory block of the arena and resets the allocator's usage to 0.
+@(no_sanitize_address)
arena_free_all :: proc(arena: ^Arena, loc := #caller_location) {
switch arena.kind {
case .Growing:
@@ -208,7 +219,9 @@ arena_free_all :: proc(arena: ^Arena, loc := #caller_location) {
if arena.curr_block != nil {
curr_block_used := int(arena.curr_block.used)
arena.curr_block.used = 0
+ sanitizer.address_unpoison(arena.curr_block.base[:curr_block_used])
mem.zero(arena.curr_block.base, curr_block_used)
+ sanitizer.address_poison(arena.curr_block.base[:arena.curr_block.committed])
}
arena.total_used = 0
case .Static, .Buffer:
@@ -219,6 +232,7 @@ arena_free_all :: proc(arena: ^Arena, loc := #caller_location) {
// Frees all of the memory allocated by the arena and zeros all of the values of an arena.
// A buffer based arena does not `delete` the provided `[]byte` bufffer.
+@(no_sanitize_address)
arena_destroy :: proc(arena: ^Arena, loc := #caller_location) {
sync.mutex_guard(&arena.mutex)
switch arena.kind {
@@ -250,7 +264,7 @@ arena_static_bootstrap_new :: proc{
}
// Ability to bootstrap allocate a struct with an arena within the struct itself using the growing variant strategy.
-@(require_results)
+@(require_results, no_sanitize_address)
arena_growing_bootstrap_new_by_offset :: proc($T: typeid, offset_to_arena: uintptr, minimum_block_size: uint = DEFAULT_ARENA_GROWING_MINIMUM_BLOCK_SIZE) -> (ptr: ^T, err: Allocator_Error) {
bootstrap: Arena
bootstrap.kind = .Growing
@@ -266,13 +280,13 @@ arena_growing_bootstrap_new_by_offset :: proc($T: typeid, offset_to_arena: uintp
}
// Ability to bootstrap allocate a struct with an arena within the struct itself using the growing variant strategy.
-@(require_results)
+@(require_results, no_sanitize_address)
arena_growing_bootstrap_new_by_name :: proc($T: typeid, $field_name: string, minimum_block_size: uint = DEFAULT_ARENA_GROWING_MINIMUM_BLOCK_SIZE) -> (ptr: ^T, err: Allocator_Error) {
return arena_growing_bootstrap_new_by_offset(T, offset_of_by_string(T, field_name), minimum_block_size)
}
// Ability to bootstrap allocate a struct with an arena within the struct itself using the static variant strategy.
-@(require_results)
+@(require_results, no_sanitize_address)
arena_static_bootstrap_new_by_offset :: proc($T: typeid, offset_to_arena: uintptr, reserved: uint) -> (ptr: ^T, err: Allocator_Error) {
bootstrap: Arena
bootstrap.kind = .Static
@@ -288,19 +302,20 @@ arena_static_bootstrap_new_by_offset :: proc($T: typeid, offset_to_arena: uintpt
}
// Ability to bootstrap allocate a struct with an arena within the struct itself using the static variant strategy.
-@(require_results)
+@(require_results, no_sanitize_address)
arena_static_bootstrap_new_by_name :: proc($T: typeid, $field_name: string, reserved: uint) -> (ptr: ^T, err: Allocator_Error) {
return arena_static_bootstrap_new_by_offset(T, offset_of_by_string(T, field_name), reserved)
}
// Create an `Allocator` from the provided `Arena`
-@(require_results)
+@(require_results, no_sanitize_address)
arena_allocator :: proc(arena: ^Arena) -> mem.Allocator {
return mem.Allocator{arena_allocator_proc, arena}
}
// The allocator procedure used by an `Allocator` produced by `arena_allocator`
+@(no_sanitize_address)
arena_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode,
size, alignment: int,
old_memory: rawptr, old_size: int,
@@ -334,6 +349,7 @@ arena_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode,
if size < old_size {
// shrink data in-place
data = old_data[:size]
+ sanitizer.address_poison(old_data[size:old_size])
return
}
@@ -347,6 +363,7 @@ arena_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode,
_ = alloc_from_memory_block(block, new_end - old_end, 1, default_commit_size=arena.default_commit_size) or_return
arena.total_used += block.used - prev_used
data = block.base[start:new_end]
+ sanitizer.address_unpoison(data)
return
}
}
@@ -357,6 +374,7 @@ arena_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode,
return
}
copy(new_memory, old_data[:old_size])
+ sanitizer.address_poison(old_data[:old_size])
return new_memory, nil
case .Query_Features:
set := (^mem.Allocator_Mode_Set)(old_memory)
@@ -382,7 +400,7 @@ Arena_Temp :: struct {
}
// Begins the section of temporary arena memory.
-@(require_results)
+@(require_results, no_sanitize_address)
arena_temp_begin :: proc(arena: ^Arena, loc := #caller_location) -> (temp: Arena_Temp) {
assert(arena != nil, "nil arena", loc)
sync.mutex_guard(&arena.mutex)
@@ -397,6 +415,7 @@ arena_temp_begin :: proc(arena: ^Arena, loc := #caller_location) -> (temp: Arena
}
// Ends the section of temporary arena memory by resetting the memory to the stored position.
+@(no_sanitize_address)
arena_temp_end :: proc(temp: Arena_Temp, loc := #caller_location) {
assert(temp.arena != nil, "nil arena", loc)
arena := temp.arena
@@ -432,6 +451,7 @@ arena_temp_end :: proc(temp: Arena_Temp, loc := #caller_location) {
}
// Ignore the use of a `arena_temp_begin` entirely by __not__ resetting to the stored position.
+@(no_sanitize_address)
arena_temp_ignore :: proc(temp: Arena_Temp, loc := #caller_location) {
assert(temp.arena != nil, "nil arena", loc)
arena := temp.arena
@@ -442,6 +462,7 @@ arena_temp_ignore :: proc(temp: Arena_Temp, loc := #caller_location) {
}
// Asserts that all uses of `Arena_Temp` has been used by an `Arena`
+@(no_sanitize_address)
arena_check_temp :: proc(arena: ^Arena, loc := #caller_location) {
assert(arena.temp_count == 0, "Arena_Temp not been ended", loc)
}
diff --git a/core/mem/virtual/virtual.odin b/core/mem/virtual/virtual.odin
index 4afc33813..031fb721a 100644
--- a/core/mem/virtual/virtual.odin
+++ b/core/mem/virtual/virtual.odin
@@ -2,6 +2,7 @@ package mem_virtual
import "core:mem"
import "base:intrinsics"
+import "base:sanitizer"
import "base:runtime"
_ :: runtime
@@ -14,27 +15,33 @@ platform_memory_init :: proc() {
Allocator_Error :: mem.Allocator_Error
-@(require_results)
+@(require_results, no_sanitize_address)
reserve :: proc "contextless" (size: uint) -> (data: []byte, err: Allocator_Error) {
return _reserve(size)
}
+@(no_sanitize_address)
commit :: proc "contextless" (data: rawptr, size: uint) -> Allocator_Error {
+ sanitizer.address_unpoison(data, size)
return _commit(data, size)
}
-@(require_results)
+@(require_results, no_sanitize_address)
reserve_and_commit :: proc "contextless" (size: uint) -> (data: []byte, err: Allocator_Error) {
data = reserve(size) or_return
commit(raw_data(data), size) or_return
return
}
+@(no_sanitize_address)
decommit :: proc "contextless" (data: rawptr, size: uint) {
+ sanitizer.address_poison(data, size)
_decommit(data, size)
}
+@(no_sanitize_address)
release :: proc "contextless" (data: rawptr, size: uint) {
+ sanitizer.address_unpoison(data, size)
_release(data, size)
}
@@ -46,13 +53,11 @@ Protect_Flag :: enum u32 {
Protect_Flags :: distinct bit_set[Protect_Flag; u32]
Protect_No_Access :: Protect_Flags{}
+@(no_sanitize_address)
protect :: proc "contextless" (data: rawptr, size: uint, flags: Protect_Flags) -> bool {
return _protect(data, size, flags)
}
-
-
-
Memory_Block :: struct {
prev: ^Memory_Block,
base: [^]byte,
@@ -66,13 +71,13 @@ Memory_Block_Flag :: enum u32 {
Memory_Block_Flags :: distinct bit_set[Memory_Block_Flag; u32]
-@(private="file", require_results)
+@(private="file", require_results, no_sanitize_address)
align_formula :: #force_inline proc "contextless" (size, align: uint) -> uint {
result := size + align-1
return result - result%align
}
-@(require_results)
+@(require_results, no_sanitize_address)
memory_block_alloc :: proc(committed, reserved: uint, alignment: uint = 0, flags: Memory_Block_Flags = {}) -> (block: ^Memory_Block, err: Allocator_Error) {
page_size := DEFAULT_PAGE_SIZE
assert(mem.is_power_of_two(uintptr(page_size)))
@@ -116,8 +121,9 @@ memory_block_alloc :: proc(committed, reserved: uint, alignment: uint = 0, flags
return &pmblock.block, nil
}
-@(require_results)
+@(require_results, no_sanitize_address)
alloc_from_memory_block :: proc(block: ^Memory_Block, min_size, alignment: uint, default_commit_size: uint = 0) -> (data: []byte, err: Allocator_Error) {
+ @(no_sanitize_address)
calc_alignment_offset :: proc "contextless" (block: ^Memory_Block, alignment: uintptr) -> uint {
alignment_offset := uint(0)
ptr := uintptr(block.base[block.used:])
@@ -128,6 +134,7 @@ alloc_from_memory_block :: proc(block: ^Memory_Block, min_size, alignment: uint,
return alignment_offset
}
+ @(no_sanitize_address)
do_commit_if_necessary :: proc(block: ^Memory_Block, size: uint, default_commit_size: uint) -> (err: Allocator_Error) {
if block.committed - block.used < size {
pmblock := (^Platform_Memory_Block)(block)
@@ -172,10 +179,12 @@ alloc_from_memory_block :: proc(block: ^Memory_Block, min_size, alignment: uint,
data = block.base[block.used+alignment_offset:][:min_size]
block.used += size
+ sanitizer.address_unpoison(data)
return
}
+@(no_sanitize_address)
memory_block_dealloc :: proc(block_to_free: ^Memory_Block) {
if block := (^Platform_Memory_Block)(block_to_free); block != nil {
platform_memory_free(block)
diff --git a/core/mem/virtual/virtual_platform.odin b/core/mem/virtual/virtual_platform.odin
index 31e9cfca8..c9dde4e9d 100644
--- a/core/mem/virtual/virtual_platform.odin
+++ b/core/mem/virtual/virtual_platform.odin
@@ -7,6 +7,7 @@ Platform_Memory_Block :: struct {
reserved: uint,
}
+@(no_sanitize_address)
platform_memory_alloc :: proc "contextless" (to_commit, to_reserve: uint) -> (block: ^Platform_Memory_Block, err: Allocator_Error) {
to_commit, to_reserve := to_commit, to_reserve
to_reserve = max(to_commit, to_reserve)
@@ -26,12 +27,14 @@ platform_memory_alloc :: proc "contextless" (to_commit, to_reserve: uint) -> (bl
}
+@(no_sanitize_address)
platform_memory_free :: proc "contextless" (block: ^Platform_Memory_Block) {
if block != nil {
release(block, block.reserved)
}
}
+@(no_sanitize_address)
platform_memory_commit :: proc "contextless" (block: ^Platform_Memory_Block, to_commit: uint) -> (err: Allocator_Error) {
if to_commit < block.committed {
return nil
diff --git a/core/mem/virtual/virtual_windows.odin b/core/mem/virtual/virtual_windows.odin
index acd30ae33..0da8498d5 100644
--- a/core/mem/virtual/virtual_windows.odin
+++ b/core/mem/virtual/virtual_windows.odin
@@ -83,6 +83,8 @@ foreign Kernel32 {
dwNumberOfBytesToMap: uint,
) -> rawptr ---
}
+
+@(no_sanitize_address)
_reserve :: proc "contextless" (size: uint) -> (data: []byte, err: Allocator_Error) {
result := VirtualAlloc(nil, size, MEM_RESERVE, PAGE_READWRITE)
if result == nil {
@@ -93,6 +95,7 @@ _reserve :: proc "contextless" (size: uint) -> (data: []byte, err: Allocator_Err
return
}
+@(no_sanitize_address)
_commit :: proc "contextless" (data: rawptr, size: uint) -> Allocator_Error {
result := VirtualAlloc(data, size, MEM_COMMIT, PAGE_READWRITE)
if result == nil {
@@ -107,12 +110,18 @@ _commit :: proc "contextless" (data: rawptr, size: uint) -> Allocator_Error {
}
return nil
}
+
+@(no_sanitize_address)
_decommit :: proc "contextless" (data: rawptr, size: uint) {
VirtualFree(data, size, MEM_DECOMMIT)
}
+
+@(no_sanitize_address)
_release :: proc "contextless" (data: rawptr, size: uint) {
VirtualFree(data, 0, MEM_RELEASE)
}
+
+@(no_sanitize_address)
_protect :: proc "contextless" (data: rawptr, size: uint, flags: Protect_Flags) -> bool {
pflags: u32
pflags = PAGE_NOACCESS
@@ -136,7 +145,7 @@ _protect :: proc "contextless" (data: rawptr, size: uint, flags: Protect_Flags)
}
-
+@(no_sanitize_address)
_platform_memory_init :: proc() {
sys_info: SYSTEM_INFO
GetSystemInfo(&sys_info)
@@ -147,6 +156,7 @@ _platform_memory_init :: proc() {
}
+@(no_sanitize_address)
_map_file :: proc "contextless" (fd: uintptr, size: i64, flags: Map_File_Flags) -> (data: []byte, error: Map_File_Error) {
page_flags: u32
if flags == {.Read} {