aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorKarl Zylinski <karl@zylinski.se>2024-09-17 19:36:17 +0200
committerKarl Zylinski <karl@zylinski.se>2024-09-17 19:36:17 +0200
commit093ade050445b3e348177e30fb1fc9d726f7b289 (patch)
tree5122cfefa5fefbab9d27d5d8adacd8739eeeb5de /core
parent3d7b92426081cd9f3197b13f7384a52dbac5379a (diff)
parent6ef779cd5c8260b2e6979e676d28489fd53dd599 (diff)
Merge branch 'master' into file-tags-without-comments
Diffstat (limited to 'core')
-rw-r--r--core/bytes/bytes.odin4
-rw-r--r--core/mem/alloc.odin1002
-rw-r--r--core/mem/allocators.odin2322
-rw-r--r--core/mem/doc.odin126
-rw-r--r--core/mem/mem.odin485
-rw-r--r--core/mem/mutex_allocator.odin27
-rw-r--r--core/mem/raw.odin96
-rw-r--r--core/mem/rollback_stack_allocator.odin347
-rw-r--r--core/mem/tracking_allocator.odin119
-rw-r--r--core/odin/parser/parser.odin10
-rw-r--r--core/os/os_freebsd.odin2
-rw-r--r--core/os/os_js.odin89
-rw-r--r--core/os/os_netbsd.odin2
-rw-r--r--core/strings/strings.odin14
-rw-r--r--core/sync/chan/chan.odin83
-rw-r--r--core/sync/extended.odin75
-rw-r--r--core/sync/futex_darwin.odin25
-rw-r--r--core/sync/futex_freebsd.odin8
-rw-r--r--core/sync/futex_linux.odin8
-rw-r--r--core/sync/futex_netbsd.odin8
-rw-r--r--core/sync/futex_openbsd.odin8
-rw-r--r--core/sync/futex_wasm.odin8
-rw-r--r--core/sync/primitives.odin20
-rw-r--r--core/sync/primitives_atomic.odin4
-rw-r--r--core/sys/darwin/sync.odin11
-rw-r--r--core/sys/info/platform_darwin.odin4
-rw-r--r--core/testing/runner.odin4
-rw-r--r--core/testing/runner_windows.odin22
-rw-r--r--core/testing/signal_handler_libc.odin11
-rw-r--r--core/testing/testing.odin12
-rw-r--r--core/thread/thread.odin12
-rw-r--r--core/thread/thread_pool.odin1
-rw-r--r--core/thread/thread_unix.odin32
-rw-r--r--core/thread/thread_windows.odin6
34 files changed, 3926 insertions, 1081 deletions
diff --git a/core/bytes/bytes.odin b/core/bytes/bytes.odin
index 45eb44307..c0d25bcce 100644
--- a/core/bytes/bytes.odin
+++ b/core/bytes/bytes.odin
@@ -334,7 +334,7 @@ Inputs:
Returns:
- index: The index of the byte `c`, or -1 if it was not found.
*/
-index_byte :: proc(s: []byte, c: byte) -> (index: int) #no_bounds_check {
+index_byte :: proc "contextless" (s: []byte, c: byte) -> (index: int) #no_bounds_check {
i, l := 0, len(s)
// Guard against small strings. On modern systems, it is ALWAYS
@@ -469,7 +469,7 @@ Inputs:
Returns:
- index: The index of the byte `c`, or -1 if it was not found.
*/
-last_index_byte :: proc(s: []byte, c: byte) -> int #no_bounds_check {
+last_index_byte :: proc "contextless" (s: []byte, c: byte) -> int #no_bounds_check {
i := len(s)
// Guard against small strings. On modern systems, it is ALWAYS
diff --git a/core/mem/alloc.odin b/core/mem/alloc.odin
index e51d971e1..fac58daaf 100644
--- a/core/mem/alloc.odin
+++ b/core/mem/alloc.odin
@@ -2,109 +2,711 @@ package mem
import "base:runtime"
-// NOTE(bill, 2019-12-31): These are defined in `package runtime` as they are used in the `context`. This is to prevent an import definition cycle.
-Allocator_Mode :: runtime.Allocator_Mode
+//NOTE(bill, 2019-12-31): These are defined in `package runtime` as they are used in the `context`. This is to prevent an import definition cycle.
+
/*
-Allocator_Mode :: enum byte {
- Alloc,
- Free,
- Free_All,
- Resize,
- Query_Features,
- Alloc_Non_Zeroed,
- Resize_Non_Zeroed,
-}
+A request to allocator procedure.
+
+This type represents a type of allocation request made to an allocator
+procedure. There is one allocator procedure per allocator, and this value is
+used to discriminate between different functions of the allocator.
+
+The type is defined as follows:
+
+ Allocator_Mode :: enum byte {
+ Alloc,
+ Alloc_Non_Zeroed,
+ Free,
+ Free_All,
+ Resize,
+ Resize_Non_Zeroed,
+ Query_Features,
+ }
+
+Depending on which value is used, the allocator procedure will perform different
+functions:
+
+- `Alloc`: Allocates a memory region with a given `size` and `alignment`.
+- `Alloc_Non_Zeroed`: Same as `Alloc` without explicit zero-initialization of
+ the memory region.
+- `Free`: Free a memory region located at address `ptr` with a given `size`.
+- `Free_All`: Free all memory allocated using this allocator.
+- `Resize`: Resize a memory region located at address `old_ptr` with size
+ `old_size` to be `size` bytes in length and have the specified `alignment`,
+ in case a re-alllocation occurs.
+- `Resize_Non_Zeroed`: Same as `Resize`, without explicit zero-initialization.
*/
+Allocator_Mode :: runtime.Allocator_Mode
-Allocator_Mode_Set :: runtime.Allocator_Mode_Set
/*
-Allocator_Mode_Set :: distinct bit_set[Allocator_Mode];
+A set of allocator features.
+
+This type represents values that contain a set of features an allocator has.
+Currently the type is defined as follows:
+
+ Allocator_Mode_Set :: distinct bit_set[Allocator_Mode];
*/
+Allocator_Mode_Set :: runtime.Allocator_Mode_Set
-Allocator_Query_Info :: runtime.Allocator_Query_Info
/*
-Allocator_Query_Info :: struct {
- pointer: rawptr,
- size: Maybe(int),
- alignment: Maybe(int),
-}
+Allocator information.
+
+This type represents information about a given allocator at a specific point
+in time. Currently the type is defined as follows:
+
+ Allocator_Query_Info :: struct {
+ pointer: rawptr,
+ size: Maybe(int),
+ alignment: Maybe(int),
+ }
+
+- `pointer`: Pointer to a backing buffer.
+- `size`: Size of the backing buffer.
+- `alignment`: The allocator's alignment.
+
+If not applicable, any of these fields may be `nil`.
*/
+Allocator_Query_Info :: runtime.Allocator_Query_Info
+
+/*
+An allocation request error.
+This type represents error values the allocators may return upon requests.
+
+ Allocator_Error :: enum byte {
+ None = 0,
+ Out_Of_Memory = 1,
+ Invalid_Pointer = 2,
+ Invalid_Argument = 3,
+ Mode_Not_Implemented = 4,
+ }
+
+The meaning of the errors is as follows:
+
+- `None`: No error.
+- `Out_Of_Memory`: Either:
+ 1. The allocator has ran out of the backing buffer, or the requested
+ allocation size is too large to fit into a backing buffer.
+ 2. The operating system error during memory allocation.
+ 3. The backing allocator was used to allocate a new backing buffer and the
+ backing allocator returned Out_Of_Memory.
+- `Invalid_Pointer`: The pointer referring to a memory region does not belong
+ to any of the allocators backing buffers or does not point to a valid start
+ of an allocation made in that allocator.
+- `Invalid_Argument`: Can occur if one of the arguments makes it impossible to
+ satisfy a request (i.e. having alignment larger than the backing buffer
+ of the allocation).
+- `Mode_Not_Implemented`: The allocator does not support the specified
+ operation. For example, an arena does not support freeing individual
+ allocations.
+*/
Allocator_Error :: runtime.Allocator_Error
+
/*
-Allocator_Error :: enum byte {
- None = 0,
- Out_Of_Memory = 1,
- Invalid_Pointer = 2,
- Invalid_Argument = 3,
- Mode_Not_Implemented = 4,
-}
+The allocator procedure.
+
+This type represents allocation procedures. An allocation procedure is a single
+procedure, implementing all allocator functions such as allocating the memory,
+freeing the memory, etc.
+
+Currently the type is defined as follows:
+
+ Allocator_Proc :: #type proc(
+ allocator_data: rawptr,
+ mode: Allocator_Mode,
+ size: int,
+ alignment: int,
+ old_memory: rawptr,
+ old_size: int,
+ location: Source_Code_Location = #caller_location,
+ ) -> ([]byte, Allocator_Error);
+
+The function of this procedure and the meaning of parameters depends on the
+value of the `mode` parameter. For any operation the following constraints
+apply:
+
+- The `alignment` must be a power of two.
+- The `size` must be a positive integer.
+
+## 1. `.Alloc`, `.Alloc_Non_Zeroed`
+
+Allocates a memory region of size `size`, aligned on a boundary specified by
+`alignment`.
+
+**Inputs**:
+- `allocator_data`: Pointer to the allocator data.
+- `mode`: `.Alloc` or `.Alloc_Non_Zeroed`.
+- `size`: The desired size of the memory region.
+- `alignment`: The desired alignmnet of the allocation.
+- `old_memory`: Unused, should be `nil`.
+- `old_size`: Unused, should be 0.
+
+**Returns**:
+1. The memory region, if allocated successfully, or `nil` otherwise.
+2. An error, if allocation failed.
+
+**Note**: The nil allocator may return `nil`, even if no error is returned.
+Always check both the error and the allocated buffer.
+
+**Note**: The `.Alloc` mode is required to be implemented for an allocator
+and can not return a `.Mode_Not_Implemented` error.
+
+## 2. `Free`
+
+Frees a memory region located at the address specified by `old_memory`. If the
+allocator does not track sizes of allocations, the size should be specified in
+the `old_size` parameter.
+
+**Inputs**:
+- `allocator_data`: Pointer to the allocator data.
+- `mode`: `.Free`.
+- `size`: Unused, should be 0.
+- `alignment`: Unused, should be 0.
+- `old_memory`: Pointer to the memory region to free.
+- `old_size`: The size of the memory region to free. This parameter is optional
+ if the allocator keeps track of the sizes of allocations.
+
+**Returns**:
+1. `nil`
+2. Error, if freeing failed.
+
+## 3. `Free_All`
+
+Frees all allocations, associated with the allocator, making it available for
+further allocations using the same backing buffers.
+
+**Inputs**:
+- `allocator_data`: Pointer to the allocator data.
+- `mode`: `.Free_All`.
+- `size`: Unused, should be 0.
+- `alignment`: Unused, should be 0.
+- `old_memory`: Unused, should be `nil`.
+- `old_size`: Unused, should be `0`.
+
+**Returns**:
+1. `nil`.
+2. Error, if freeing failed.
+
+## 4. `Resize`, `Resize_Non_Zeroed`
+
+Resizes the memory region, of the size `old_size` located at the address
+specified by `old_memory` to have the new size `size`. The slice of the new
+memory region is returned from the procedure. The allocator may attempt to
+keep the new memory region at the same address as the previous allocation,
+however no such guarantee is made. Do not assume the new memory region will
+be at the same address as the old memory region.
+
+If `old_memory` pointer is `nil`, this function acts just like `.Alloc` or
+`.Alloc_Non_Zeroed`, using `size` and `alignment` to allocate a new memory
+region.
+
+If `new_size` is `nil`, the procedure acts just like `.Free`, freeing the
+memory region `old_size` bytes in length, located at the address specified by
+`old_memory`.
+
+If the `old_memory` pointer is not aligned to the boundary specified by
+`alignment`, the procedure relocates the buffer such that the reallocated
+buffer is aligned to the boundary specified by `alignment`.
+
+**Inputs**:
+- `allocator_data`: Pointer to the allocator data.
+- `mode`: `.Resize` or `.Resize_All`.
+- `size`: The desired new size of the memory region.
+- `alignment`: The alignment of the new memory region, if its allocated
+- `old_memory`: The pointer to the memory region to resize.
+- `old_size`: The size of the memory region to resize. If the allocator
+ keeps track of the sizes of allocations, this parameter is optional.
+
+**Returns**:
+1. The slice of the memory region after resize operation, if successfull,
+ `nil` otherwise.
+2. An error, if the resize failed.
+
+**Note**: Some allocators may return `nil`, even if no error is returned.
+Always check both the error and the allocated buffer.
+
+**Note**: if `old_size` is `0` and `old_memory` is `nil`, this operation is a
+no-op, and should not return errors.
*/
Allocator_Proc :: runtime.Allocator_Proc
+
/*
-Allocator_Proc :: #type proc(allocator_data: rawptr, mode: Allocator_Mode,
- size, alignment: int,
- old_memory: rawptr, old_size: int, location: Source_Code_Location = #caller_location) -> ([]byte, Allocator_Error);
-*/
+Allocator.
+
+This type represents generic interface for all allocators. Currently this type
+is defined as follows:
+ Allocator :: struct {
+ procedure: Allocator_Proc,
+ data: rawptr,
+ }
+
+- `procedure`: Pointer to the allocation procedure.
+- `data`: Pointer to the allocator data.
+*/
Allocator :: runtime.Allocator
+
/*
-Allocator :: struct {
- procedure: Allocator_Proc,
- data: rawptr,
-}
-*/
+Default alignment.
+This value is the default alignment for all platforms that is used, if the
+alignment is not specified explicitly.
+*/
DEFAULT_ALIGNMENT :: 2*align_of(rawptr)
+/*
+Default page size.
+
+This value is the default page size for the current platform.
+*/
DEFAULT_PAGE_SIZE ::
64 * 1024 when ODIN_ARCH == .wasm32 || ODIN_ARCH == .wasm64p32 else
16 * 1024 when ODIN_OS == .Darwin && ODIN_ARCH == .arm64 else
4 * 1024
+/*
+Allocate memory.
+
+This function allocates `size` bytes of memory, aligned to a boundary specified
+by `alignment` using the allocator specified by `allocator`.
+
+If the `size` parameter is `0`, the operation is a no-op.
+
+**Inputs**:
+- `size`: The desired size of the allocated memory region.
+- `alignment`: The desired alignment of the allocated memory region.
+- `allocator`: The allocator to allocate from.
+
+**Returns**:
+1. Pointer to the allocated memory, or `nil` if allocation failed.
+2. Error, if the allocation failed.
+
+**Errors**:
+- `None`: If no error occurred.
+- `Out_Of_Memory`: Occurs when the allocator runs out of space in any of its
+ backing buffers, the backing allocator has ran out of space, or an operating
+ system failure occurred.
+- `Invalid_Argument`: If the supplied `size` is negative, alignment is not a
+ power of two.
+*/
@(require_results)
-alloc :: proc(size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, loc := #caller_location) -> (rawptr, Allocator_Error) {
+alloc :: proc(
+ size: int,
+ alignment: int = DEFAULT_ALIGNMENT,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> (rawptr, Allocator_Error) {
data, err := runtime.mem_alloc(size, alignment, allocator, loc)
return raw_data(data), err
}
+/*
+Allocate memory.
+
+This function allocates `size` bytes of memory, aligned to a boundary specified
+by `alignment` using the allocator specified by `allocator`.
+
+**Inputs**:
+- `size`: The desired size of the allocated memory region.
+- `alignment`: The desired alignment of the allocated memory region.
+- `allocator`: The allocator to allocate from.
+
+**Returns**:
+1. Slice of the allocated memory region, or `nil` if allocation failed.
+2. Error, if the allocation failed.
+
+**Errors**:
+- `None`: If no error occurred.
+- `Out_Of_Memory`: Occurs when the allocator runs out of space in any of its
+ backing buffers, the backing allocator has ran out of space, or an operating
+ system failure occurred.
+- `Invalid_Argument`: If the supplied `size` is negative, alignment is not a
+ power of two.
+*/
@(require_results)
-alloc_bytes :: proc(size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, loc := #caller_location) -> ([]byte, Allocator_Error) {
+alloc_bytes :: proc(
+ size: int,
+ alignment: int = DEFAULT_ALIGNMENT,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
return runtime.mem_alloc(size, alignment, allocator, loc)
}
+/*
+Allocate non-zeroed memory.
+
+This function allocates `size` bytes of memory, aligned to a boundary specified
+by `alignment` using the allocator specified by `allocator`. This procedure
+does not explicitly zero-initialize allocated memory region.
+
+**Inputs**:
+- `size`: The desired size of the allocated memory region.
+- `alignment`: The desired alignment of the allocated memory region.
+- `allocator`: The allocator to allocate from.
+
+**Returns**:
+1. Slice of the allocated memory region, or `nil` if allocation failed.
+2. Error, if the allocation failed.
+
+**Errors**:
+- `None`: If no error occurred.
+- `Out_Of_Memory`: Occurs when the allocator runs out of space in any of its
+ backing buffers, the backing allocator has ran out of space, or an operating
+ system failure occurred.
+- `Invalid_Argument`: If the supplied `size` is negative, alignment is not a
+ power of two.
+*/
@(require_results)
-alloc_bytes_non_zeroed :: proc(size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, loc := #caller_location) -> ([]byte, Allocator_Error) {
+alloc_bytes_non_zeroed :: proc(
+ size: int,
+ alignment: int = DEFAULT_ALIGNMENT,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
return runtime.mem_alloc_non_zeroed(size, alignment, allocator, loc)
}
-free :: proc(ptr: rawptr, allocator := context.allocator, loc := #caller_location) -> Allocator_Error {
+/*
+Free memory.
+
+This procedure frees memory region located at the address, specified by `ptr`,
+allocated from the allocator specified by `allocator`.
+
+**Inputs**:
+- `ptr`: Pointer to the memory region to free.
+- `allocator`: The allocator to free to.
+
+**Returns**:
+- The error, if freeing failed.
+
+**Errors**:
+- `None`: When no error has occurred.
+- `Invalid_Pointer`: The specified pointer is not owned by the specified allocator,
+ or does not point to a valid allocation.
+- `Mode_Not_Implemented`: If the specified allocator does not support the `.Free`
+mode.
+*/
+free :: proc(
+ ptr: rawptr,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> Allocator_Error {
return runtime.mem_free(ptr, allocator, loc)
}
-free_with_size :: proc(ptr: rawptr, byte_count: int, allocator := context.allocator, loc := #caller_location) -> Allocator_Error {
- return runtime.mem_free_with_size(ptr, byte_count, allocator, loc)
+/*
+Free a memory region.
+
+This procedure frees `size` bytes of memory region located at the address,
+specified by `ptr`, allocated from the allocator specified by `allocator`.
+
+If the `size` parameter is `0`, this call is equivalent to `free()`.
+
+**Inputs**:
+- `ptr`: Pointer to the memory region to free.
+- `size`: The size of the memory region to free.
+- `allocator`: The allocator to free to.
+
+**Returns**:
+- The error, if freeing failed.
+
+**Errors**:
+- `None`: When no error has occurred.
+- `Invalid_Pointer`: The specified pointer is not owned by the specified allocator,
+ or does not point to a valid allocation.
+- `Mode_Not_Implemented`: If the specified allocator does not support the `.Free`
+mode.
+*/
+free_with_size :: proc(
+ ptr: rawptr,
+ size: int,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> Allocator_Error {
+ return runtime.mem_free_with_size(ptr, size, allocator, loc)
}
-free_bytes :: proc(bytes: []byte, allocator := context.allocator, loc := #caller_location) -> Allocator_Error {
+/*
+Free a memory region.
+
+This procedure frees memory region, specified by `bytes`, allocated from the
+allocator specified by `allocator`.
+
+If the length of the specified slice is zero, the `.Invalid_Argument` error
+is returned.
+
+**Inputs**:
+- `bytes`: The memory region to free.
+- `allocator`: The allocator to free to.
+
+**Returns**:
+- The error, if freeing failed.
+
+**Errors**:
+- `None`: When no error has occurred.
+- `Invalid_Pointer`: The specified pointer is not owned by the specified allocator,
+ or does not point to a valid allocation.
+- `Mode_Not_Implemented`: If the specified allocator does not support the `.Free`
+mode.
+*/
+free_bytes :: proc(
+ bytes: []byte,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> Allocator_Error {
return runtime.mem_free_bytes(bytes, allocator, loc)
}
+/*
+Free all allocations.
+
+This procedure frees all allocations made on the allocator specified by
+`allocator` to that allocator, making it available for further allocations.
+
+**Inputs**:
+- `allocator`: The allocator to free to.
+
+**Errors**:
+- `None`: When no error has occurred.
+- `Mode_Not_Implemented`: If the specified allocator does not support the `.Free`
+mode.
+*/
free_all :: proc(allocator := context.allocator, loc := #caller_location) -> Allocator_Error {
return runtime.mem_free_all(allocator, loc)
}
+/*
+Resize a memory region.
+
+This procedure resizes a memory region, `old_size` bytes in size, located at
+the address specified by `ptr`, such that it has a new size, specified by
+`new_size` and and is aligned on a boundary specified by `alignment`.
+
+If the `ptr` parameter is `nil`, `resize()` acts just like `alloc()`, allocating
+`new_size` bytes, aligned on a boundary specified by `alignment`.
+
+If the `new_size` parameter is `0`, `resize()` acts just like `free()`, freeing
+the memory region `old_size` bytes in length, located at the address specified
+by `ptr`.
+
+If the `old_memory` pointer is not aligned to the boundary specified by
+`alignment`, the procedure relocates the buffer such that the reallocated
+buffer is aligned to the boundary specified by `alignment`.
+
+**Inputs**:
+- `ptr`: Pointer to the memory region to resize.
+- `old_size`: Size of the memory region to resize.
+- `new_size`: The desired size of the resized memory region.
+- `alignment`: The desired alignment of the resized memory region.
+- `allocator`: The owner of the memory region to resize.
+
+**Returns**:
+1. The pointer to the resized memory region, if successfull, `nil` otherwise.
+2. Error, if resize failed.
+
+**Errors**:
+- `None`: No error.
+- `Out_Of_Memory`: When the allocator's backing buffer or it's backing
+ allocator does not have enough space to fit in an allocation with the new
+ size, or an operating system failure occurs.
+- `Invalid_Pointer`: The pointer referring to a memory region does not belong
+ to any of the allocators backing buffers or does not point to a valid start
+ of an allocation made in that allocator.
+- `Invalid_Argument`: When `size` is negative, alignment is not a power of two,
+ or the `old_size` argument is incorrect.
+- `Mode_Not_Implemented`: The allocator does not support the `.Realloc` mode.
+
+**Note**: if `old_size` is `0` and `old_memory` is `nil`, this operation is a
+no-op, and should not return errors.
+*/
@(require_results)
-resize :: proc(ptr: rawptr, old_size, new_size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, loc := #caller_location) -> (rawptr, Allocator_Error) {
+resize :: proc(
+ ptr: rawptr,
+ old_size: int,
+ new_size: int,
+ alignment: int = DEFAULT_ALIGNMENT,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> (rawptr, Allocator_Error) {
data, err := runtime.mem_resize(ptr, old_size, new_size, alignment, allocator, loc)
return raw_data(data), err
}
+/*
+Resize a memory region without zero-initialization.
+
+This procedure resizes a memory region, `old_size` bytes in size, located at
+the address specified by `ptr`, such that it has a new size, specified by
+`new_size` and and is aligned on a boundary specified by `alignment`.
+
+If the `ptr` parameter is `nil`, `resize()` acts just like `alloc()`, allocating
+`new_size` bytes, aligned on a boundary specified by `alignment`.
+
+If the `new_size` parameter is `0`, `resize()` acts just like `free()`, freeing
+the memory region `old_size` bytes in length, located at the address specified
+by `ptr`.
+
+If the `old_memory` pointer is not aligned to the boundary specified by
+`alignment`, the procedure relocates the buffer such that the reallocated
+buffer is aligned to the boundary specified by `alignment`.
+
+Unlike `resize()`, this procedure does not explicitly zero-initialize any new
+memory.
+
+**Inputs**:
+- `ptr`: Pointer to the memory region to resize.
+- `old_size`: Size of the memory region to resize.
+- `new_size`: The desired size of the resized memory region.
+- `alignment`: The desired alignment of the resized memory region.
+- `allocator`: The owner of the memory region to resize.
+
+**Returns**:
+1. The pointer to the resized memory region, if successfull, `nil` otherwise.
+2. Error, if resize failed.
+
+**Errors**:
+- `None`: No error.
+- `Out_Of_Memory`: When the allocator's backing buffer or it's backing
+ allocator does not have enough space to fit in an allocation with the new
+ size, or an operating system failure occurs.
+- `Invalid_Pointer`: The pointer referring to a memory region does not belong
+ to any of the allocators backing buffers or does not point to a valid start
+ of an allocation made in that allocator.
+- `Invalid_Argument`: When `size` is negative, alignment is not a power of two,
+ or the `old_size` argument is incorrect.
+- `Mode_Not_Implemented`: The allocator does not support the `.Realloc` mode.
+
+**Note**: if `old_size` is `0` and `old_memory` is `nil`, this operation is a
+no-op, and should not return errors.
+*/
@(require_results)
-resize_bytes :: proc(old_data: []byte, new_size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, loc := #caller_location) -> ([]byte, Allocator_Error) {
+resize_non_zeroed :: proc(
+ ptr: rawptr,
+ old_size: int,
+ new_size: int,
+ alignment: int = DEFAULT_ALIGNMENT,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> (rawptr, Allocator_Error) {
+ data, err := runtime.non_zero_mem_resize(ptr, old_size, new_size, alignment, allocator, loc)
+ return raw_data(data), err
+}
+
+/*
+Resize a memory region.
+
+This procedure resizes a memory region, specified by `old_data`, such that it
+has a new size, specified by `new_size` and and is aligned on a boundary
+specified by `alignment`.
+
+If the `old_data` parameter is `nil`, `resize_bytes()` acts just like
+`alloc_bytes()`, allocating `new_size` bytes, aligned on a boundary specified
+by `alignment`.
+
+If the `new_size` parameter is `0`, `resize_bytes()` acts just like
+`free_bytes()`, freeing the memory region specified by `old_data`.
+
+If the `old_memory` pointer is not aligned to the boundary specified by
+`alignment`, the procedure relocates the buffer such that the reallocated
+buffer is aligned to the boundary specified by `alignment`.
+
+**Inputs**:
+- `old_data`: Pointer to the memory region to resize.
+- `new_size`: The desired size of the resized memory region.
+- `alignment`: The desired alignment of the resized memory region.
+- `allocator`: The owner of the memory region to resize.
+
+**Returns**:
+1. The resized memory region, if successfull, `nil` otherwise.
+2. Error, if resize failed.
+
+**Errors**:
+- `None`: No error.
+- `Out_Of_Memory`: When the allocator's backing buffer or it's backing
+ allocator does not have enough space to fit in an allocation with the new
+ size, or an operating system failure occurs.
+- `Invalid_Pointer`: The pointer referring to a memory region does not belong
+ to any of the allocators backing buffers or does not point to a valid start
+ of an allocation made in that allocator.
+- `Invalid_Argument`: When `size` is negative, alignment is not a power of two,
+ or the `old_size` argument is incorrect.
+- `Mode_Not_Implemented`: The allocator does not support the `.Realloc` mode.
+
+**Note**: if `old_size` is `0` and `old_memory` is `nil`, this operation is a
+no-op, and should not return errors.
+*/
+@(require_results)
+resize_bytes :: proc(
+ old_data: []byte,
+ new_size: int,
+ alignment: int = DEFAULT_ALIGNMENT,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
return runtime.mem_resize(raw_data(old_data), len(old_data), new_size, alignment, allocator, loc)
}
+/*
+Resize a memory region.
+
+This procedure resizes a memory region, specified by `old_data`, such that it
+has a new size, specified by `new_size` and and is aligned on a boundary
+specified by `alignment`.
+
+If the `old_data` parameter is `nil`, `resize_bytes()` acts just like
+`alloc_bytes()`, allocating `new_size` bytes, aligned on a boundary specified
+by `alignment`.
+
+If the `new_size` parameter is `0`, `resize_bytes()` acts just like
+`free_bytes()`, freeing the memory region specified by `old_data`.
+
+If the `old_memory` pointer is not aligned to the boundary specified by
+`alignment`, the procedure relocates the buffer such that the reallocated
+buffer is aligned to the boundary specified by `alignment`.
+
+Unlike `resize_bytes()`, this procedure does not explicitly zero-initialize
+any new memory.
+
+**Inputs**:
+- `old_data`: Pointer to the memory region to resize.
+- `new_size`: The desired size of the resized memory region.
+- `alignment`: The desired alignment of the resized memory region.
+- `allocator`: The owner of the memory region to resize.
+
+**Returns**:
+1. The resized memory region, if successfull, `nil` otherwise.
+2. Error, if resize failed.
+
+**Errors**:
+- `None`: No error.
+- `Out_Of_Memory`: When the allocator's backing buffer or it's backing
+ allocator does not have enough space to fit in an allocation with the new
+ size, or an operating system failure occurs.
+- `Invalid_Pointer`: The pointer referring to a memory region does not belong
+ to any of the allocators backing buffers or does not point to a valid start
+ of an allocation made in that allocator.
+- `Invalid_Argument`: When `size` is negative, alignment is not a power of two,
+ or the `old_size` argument is incorrect.
+- `Mode_Not_Implemented`: The allocator does not support the `.Realloc` mode.
+
+**Note**: if `old_size` is `0` and `old_memory` is `nil`, this operation is a
+no-op, and should not return errors.
+*/
+@(require_results)
+resize_bytes_non_zeroed :: proc(
+ old_data: []byte,
+ new_size: int,
+ alignment: int = DEFAULT_ALIGNMENT,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
+ return runtime.non_zero_mem_resize(raw_data(old_data), len(old_data), new_size, alignment, allocator, loc)
+}
+
+/*
+Query allocator features.
+*/
@(require_results)
query_features :: proc(allocator: Allocator, loc := #caller_location) -> (set: Allocator_Mode_Set) {
if allocator.procedure != nil {
@@ -114,8 +716,15 @@ query_features :: proc(allocator: Allocator, loc := #caller_location) -> (set: A
return nil
}
+/*
+Query allocator information.
+*/
@(require_results)
-query_info :: proc(pointer: rawptr, allocator: Allocator, loc := #caller_location) -> (props: Allocator_Query_Info) {
+query_info :: proc(
+ pointer: rawptr,
+ allocator: Allocator,
+ loc := #caller_location,
+) -> (props: Allocator_Query_Info) {
props.pointer = pointer
if allocator.procedure != nil {
allocator.procedure(allocator.data, .Query_Info, 0, 0, &props, 0, loc)
@@ -123,25 +732,62 @@ query_info :: proc(pointer: rawptr, allocator: Allocator, loc := #caller_locatio
return
}
-
-
-delete_string :: proc(str: string, allocator := context.allocator, loc := #caller_location) -> Allocator_Error {
+/*
+Free a string.
+*/
+delete_string :: proc(
+ str: string,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> Allocator_Error {
return runtime.delete_string(str, allocator, loc)
}
-delete_cstring :: proc(str: cstring, allocator := context.allocator, loc := #caller_location) -> Allocator_Error {
+
+/*
+Free a cstring.
+*/
+delete_cstring :: proc(
+ str: cstring,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> Allocator_Error {
return runtime.delete_cstring(str, allocator, loc)
}
-delete_dynamic_array :: proc(array: $T/[dynamic]$E, loc := #caller_location) -> Allocator_Error {
+
+/*
+Free a dynamic array.
+*/
+delete_dynamic_array :: proc(
+ array: $T/[dynamic]$E,
+ loc := #caller_location,
+) -> Allocator_Error {
return runtime.delete_dynamic_array(array, loc)
}
-delete_slice :: proc(array: $T/[]$E, allocator := context.allocator, loc := #caller_location) -> Allocator_Error {
+
+/*
+Free a slice.
+*/
+delete_slice :: proc(
+ array: $T/[]$E,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> Allocator_Error {
return runtime.delete_slice(array, allocator, loc)
}
-delete_map :: proc(m: $T/map[$K]$V, loc := #caller_location) -> Allocator_Error {
+
+/*
+Free a map.
+*/
+delete_map :: proc(
+ m: $T/map[$K]$V,
+ loc := #caller_location,
+) -> Allocator_Error {
return runtime.delete_map(m, loc)
}
-
+/*
+Free.
+*/
delete :: proc{
delete_string,
delete_cstring,
@@ -150,49 +796,177 @@ delete :: proc{
delete_map,
}
+/*
+Allocate a new object.
+This procedure allocates a new object of type `T` using an allocator specified
+by `allocator`, and returns a pointer to the allocated object, if allocated
+successfully, or `nil` otherwise.
+*/
@(require_results)
-new :: proc($T: typeid, allocator := context.allocator, loc := #caller_location) -> (^T, Allocator_Error) {
+new :: proc(
+ $T: typeid,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> (^T, Allocator_Error) {
return new_aligned(T, align_of(T), allocator, loc)
}
+
+/*
+Allocate a new object with alignment.
+
+This procedure allocates a new object of type `T` using an allocator specified
+by `allocator`, and returns a pointer, aligned on a boundary specified by
+`alignment` to the allocated object, if allocated successfully, or `nil`
+otherwise.
+*/
@(require_results)
-new_aligned :: proc($T: typeid, alignment: int, allocator := context.allocator, loc := #caller_location) -> (t: ^T, err: Allocator_Error) {
+new_aligned :: proc(
+ $T: typeid,
+ alignment: int,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> (t: ^T, err: Allocator_Error) {
return runtime.new_aligned(T, alignment, allocator, loc)
}
+
+/*
+Allocate a new object and initialize it with a value.
+
+This procedure allocates a new object of type `T` using an allocator specified
+by `allocator`, and returns a pointer, aligned on a boundary specified by
+`alignment` to the allocated object, if allocated successfully, or `nil`
+otherwise. The allocated object is initialized with `data`.
+*/
@(require_results)
-new_clone :: proc(data: $T, allocator := context.allocator, loc := #caller_location) -> (t: ^T, err: Allocator_Error) {
+new_clone :: proc(
+ data: $T,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> (t: ^T, err: Allocator_Error) {
return runtime.new_clone(data, allocator, loc)
}
+/*
+Allocate a new slice with alignment.
+
+This procedure allocates a new slice of type `T` with length `len`, aligned
+on a boundary specified by `alignment` from an allocator specified by
+`allocator`, and returns the allocated slice.
+*/
@(require_results)
-make_aligned :: proc($T: typeid/[]$E, #any_int len: int, alignment: int, allocator := context.allocator, loc := #caller_location) -> (slice: T, err: Allocator_Error) {
+make_aligned :: proc(
+ $T: typeid/[]$E,
+ #any_int len: int,
+ alignment: int,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> (slice: T, err: Allocator_Error) {
return runtime.make_aligned(T, len, alignment, allocator, loc)
}
+
+/*
+Allocate a new slice.
+
+This procedure allocates a new slice of type `T` with length `len`, from an
+allocator specified by `allocator`, and returns the allocated slice.
+*/
@(require_results)
-make_slice :: proc($T: typeid/[]$E, #any_int len: int, allocator := context.allocator, loc := #caller_location) -> (T, Allocator_Error) {
+make_slice :: proc(
+ $T: typeid/[]$E,
+ #any_int len: int,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> (T, Allocator_Error) {
return runtime.make_slice(T, len, allocator, loc)
}
+
+/*
+Allocate a dynamic array.
+
+This procedure creates a dynamic array of type `T`, with `allocator` as its
+backing allocator, and initial length and capacity of `0`.
+*/
@(require_results)
-make_dynamic_array :: proc($T: typeid/[dynamic]$E, allocator := context.allocator, loc := #caller_location) -> (T, Allocator_Error) {
+make_dynamic_array :: proc(
+ $T: typeid/[dynamic]$E,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> (T, Allocator_Error) {
return runtime.make_dynamic_array(T, allocator, loc)
}
+
+/*
+Allocate a dynamic array with initial length.
+
+This procedure creates a dynamic array of type `T`, with `allocator` as its
+backing allocator, and initial capacity of `0`, and initial length specified by
+`len`.
+*/
@(require_results)
-make_dynamic_array_len :: proc($T: typeid/[dynamic]$E, #any_int len: int, allocator := context.allocator, loc := #caller_location) -> (T, Allocator_Error) {
+make_dynamic_array_len :: proc(
+ $T: typeid/[dynamic]$E,
+ #any_int len: int,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> (T, Allocator_Error) {
return runtime.make_dynamic_array_len_cap(T, len, len, allocator, loc)
}
+
+/*
+Allocate a dynamic array with initial length and capacity.
+
+This procedure creates a dynamic array of type `T`, with `allocator` as its
+backing allocator, and initial capacity specified by `cap`, and initial length
+specified by `len`.
+*/
@(require_results)
-make_dynamic_array_len_cap :: proc($T: typeid/[dynamic]$E, #any_int len: int, #any_int cap: int, allocator := context.allocator, loc := #caller_location) -> (array: T, err: Allocator_Error) {
+make_dynamic_array_len_cap :: proc(
+ $T: typeid/[dynamic]$E,
+ #any_int len: int,
+ #any_int cap: int,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> (array: T, err: Allocator_Error) {
return runtime.make_dynamic_array_len_cap(T, len, cap, allocator, loc)
}
+
+/*
+Allocate a map.
+
+This procedure creates a map of type `T` with initial capacity specified by
+`cap`, that is using an allocator specified by `allocator` as its backing
+allocator.
+*/
@(require_results)
-make_map :: proc($T: typeid/map[$K]$E, #any_int cap: int = 1<<runtime.MAP_MIN_LOG2_CAPACITY, allocator := context.allocator, loc := #caller_location) -> (m: T, err: Allocator_Error) {
+make_map :: proc(
+ $T: typeid/map[$K]$E,
+ #any_int cap: int = 1<<runtime.MAP_MIN_LOG2_CAPACITY,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> (m: T, err: Allocator_Error) {
return runtime.make_map(T, cap, allocator, loc)
}
+
+/*
+Allocate a multi pointer.
+
+This procedure allocates a multipointer of type `T` pointing to `len` elements,
+from an allocator specified by `allocator`.
+*/
@(require_results)
-make_multi_pointer :: proc($T: typeid/[^]$E, #any_int len: int, allocator := context.allocator, loc := #caller_location) -> (mp: T, err: Allocator_Error) {
+make_multi_pointer :: proc(
+ $T: typeid/[^]$E,
+ #any_int len: int,
+ allocator := context.allocator,
+ loc := #caller_location
+) -> (mp: T, err: Allocator_Error) {
return runtime.make_multi_pointer(T, len, allocator, loc)
}
+/*
+Allocate.
+*/
make :: proc{
make_slice,
make_dynamic_array,
@@ -202,26 +976,112 @@ make :: proc{
make_multi_pointer,
}
+/*
+Default resize procedure.
+
+When allocator does not support resize operation, but supports `.Alloc` and
+`.Free`, this procedure is used to implement allocator's default behavior on
+resize.
+The behavior of the function is as follows:
+
+- If `new_size` is `0`, the function acts like `free()`, freeing the memory
+ region of `old_size` bytes located at `old_memory`.
+- If `old_memory` is `nil`, the function acts like `alloc()`, allocating
+ `new_size` bytes of memory aligned on a boundary specified by `alignment`.
+- Otherwise, a new memory region of size `new_size` is allocated, then the
+ data from the old memory region is copied and the old memory region is
+ freed.
+*/
@(require_results)
-default_resize_align :: proc(old_memory: rawptr, old_size, new_size, alignment: int, allocator := context.allocator, loc := #caller_location) -> (res: rawptr, err: Allocator_Error) {
+default_resize_align :: proc(
+ old_memory: rawptr,
+ old_size: int,
+ new_size: int,
+ alignment: int,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> (res: rawptr, err: Allocator_Error) {
data: []byte
- data, err = default_resize_bytes_align(([^]byte)(old_memory)[:old_size], new_size, alignment, allocator, loc)
+ data, err = default_resize_bytes_align(
+ ([^]byte) (old_memory)[:old_size],
+ new_size,
+ alignment,
+ allocator,
+ loc,
+ )
res = raw_data(data)
return
}
+/*
+Default resize procedure.
+
+When allocator does not support resize operation, but supports
+`.Alloc_Non_Zeroed` and `.Free`, this procedure is used to implement allocator's
+default behavior on resize.
+
+Unlike `default_resize_align` no new memory is being explicitly
+zero-initialized.
+
+The behavior of the function is as follows:
+
+- If `new_size` is `0`, the function acts like `free()`, freeing the memory
+ region of `old_size` bytes located at `old_memory`.
+- If `old_memory` is `nil`, the function acts like `alloc()`, allocating
+ `new_size` bytes of memory aligned on a boundary specified by `alignment`.
+- Otherwise, a new memory region of size `new_size` is allocated, then the
+ data from the old memory region is copied and the old memory region is
+ freed.
+*/
@(require_results)
-default_resize_bytes_align_non_zeroed :: proc(old_data: []byte, new_size, alignment: int, allocator := context.allocator, loc := #caller_location) -> ([]byte, Allocator_Error) {
+default_resize_bytes_align_non_zeroed :: proc(
+ old_data: []byte,
+ new_size: int,
+ alignment: int,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
return _default_resize_bytes_align(old_data, new_size, alignment, false, allocator, loc)
}
+
+/*
+Default resize procedure.
+
+When allocator does not support resize operation, but supports `.Alloc` and
+`.Free`, this procedure is used to implement allocator's default behavior on
+resize.
+
+The behavior of the function is as follows:
+
+- If `new_size` is `0`, the function acts like `free()`, freeing the memory
+ region specified by `old_data`.
+- If `old_data` is `nil`, the function acts like `alloc()`, allocating
+ `new_size` bytes of memory aligned on a boundary specified by `alignment`.
+- Otherwise, a new memory region of size `new_size` is allocated, then the
+ data from the old memory region is copied and the old memory region is
+ freed.
+*/
@(require_results)
-default_resize_bytes_align :: proc(old_data: []byte, new_size, alignment: int, allocator := context.allocator, loc := #caller_location) -> ([]byte, Allocator_Error) {
+default_resize_bytes_align :: proc(
+ old_data: []byte,
+ new_size: int,
+ alignment: int,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
return _default_resize_bytes_align(old_data, new_size, alignment, true, allocator, loc)
}
@(require_results)
-_default_resize_bytes_align :: #force_inline proc(old_data: []byte, new_size, alignment: int, should_zero: bool, allocator := context.allocator, loc := #caller_location) -> ([]byte, Allocator_Error) {
+_default_resize_bytes_align :: #force_inline proc(
+ old_data: []byte,
+ new_size: int,
+ alignment: int,
+ should_zero: bool,
+ allocator := context.allocator,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
old_memory := raw_data(old_data)
old_size := len(old_data)
if old_memory == nil {
@@ -231,16 +1091,13 @@ _default_resize_bytes_align :: #force_inline proc(old_data: []byte, new_size, al
return alloc_bytes_non_zeroed(new_size, alignment, allocator, loc)
}
}
-
if new_size == 0 {
err := free_bytes(old_data, allocator, loc)
return nil, err
}
-
- if new_size == old_size {
+ if new_size == old_size && is_aligned(old_memory, alignment) {
return old_data, .None
}
-
new_memory : []byte
err : Allocator_Error
if should_zero {
@@ -251,7 +1108,6 @@ _default_resize_bytes_align :: #force_inline proc(old_data: []byte, new_size, al
if new_memory == nil || err != nil {
return nil, err
}
-
runtime.copy(new_memory, old_data)
free_bytes(old_data, allocator, loc)
return new_memory, err
diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin
index a5b93ad05..d729b902c 100644
--- a/core/mem/allocators.odin
+++ b/core/mem/allocators.odin
@@ -3,12 +3,14 @@ package mem
import "base:intrinsics"
import "base:runtime"
-nil_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
- size, alignment: int,
- old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
- return nil, nil
-}
+/*
+Nil allocator.
+The `nil` allocator returns `nil` on every allocation attempt. This type of
+allocator can be used in scenarios where memory doesn't need to be allocated,
+but an attempt to allocate memory is not an error.
+*/
+@(require_results)
nil_allocator :: proc() -> Allocator {
return Allocator{
procedure = nil_allocator_proc,
@@ -16,8 +18,81 @@ nil_allocator :: proc() -> Allocator {
}
}
-// Custom allocators
+nil_allocator_proc :: proc(
+ allocator_data: rawptr,
+ mode: Allocator_Mode,
+ size, alignment: int,
+ old_memory: rawptr,
+ old_size: int,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
+ return nil, nil
+}
+
+
+/*
+Panic allocator.
+
+The panic allocator is a type of allocator that panics on any allocation
+attempt. This type of allocator can be used in scenarios where memory should
+not be allocated, and an attempt to allocate memory is an error.
+*/
+@(require_results)
+panic_allocator :: proc() -> Allocator {
+ return Allocator{
+ procedure = panic_allocator_proc,
+ data = nil,
+ }
+}
+
+panic_allocator_proc :: proc(
+ allocator_data: rawptr,
+ mode: Allocator_Mode,
+ size, alignment: int,
+ old_memory: rawptr,
+ old_size: int,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
+ switch mode {
+ case .Alloc:
+ if size > 0 {
+ panic("mem: panic allocator, .Alloc called", loc=loc)
+ }
+ case .Alloc_Non_Zeroed:
+ if size > 0 {
+ panic("mem: panic allocator, .Alloc_Non_Zeroed called", loc=loc)
+ }
+ case .Resize:
+ if size > 0 {
+ panic("mem: panic allocator, .Resize called", loc=loc)
+ }
+ case .Resize_Non_Zeroed:
+ if size > 0 {
+ panic("mem: panic allocator, .Resize_Non_Zeroed called", loc=loc)
+ }
+ case .Free:
+ if old_memory != nil {
+ panic("mem: panic allocator, .Free called", loc=loc)
+ }
+ case .Free_All:
+ panic("mem: panic allocator, .Free_All called", loc=loc)
+ case .Query_Features:
+ set := (^Allocator_Mode_Set)(old_memory)
+ if set != nil {
+ set^ = {.Query_Features}
+ }
+ return nil, nil
+
+ case .Query_Info:
+ panic("mem: panic allocator, .Query_Info called", loc=loc)
+ }
+ return nil, nil
+}
+
+/*
+Arena allocator data.
+*/
Arena :: struct {
data: []byte,
offset: int,
@@ -25,12 +100,39 @@ Arena :: struct {
temp_count: int,
}
-Arena_Temp_Memory :: struct {
- arena: ^Arena,
- prev_offset: int,
+/*
+Arena allocator.
+
+The arena allocator (also known as a linear allocator, bump allocator,
+region allocator) is an allocator that uses a single backing buffer for
+allocations.
+
+The buffer is being used contiguously, from start by end. Each subsequent
+allocation occupies the next adjacent region of memory in the buffer. Since
+arena allocator does not keep track of any metadata associated with the
+allocations and their locations, it is impossible to free individual
+allocations.
+
+The arena allocator can be used for temporary allocations in frame-based memory
+management. Games are one example of such applications. A global arena can be
+used for any temporary memory allocations, and at the end of each frame all
+temporary allocations are freed. Since no temporary object is going to live
+longer than a frame, no lifetimes are violated.
+*/
+@(require_results)
+arena_allocator :: proc(arena: ^Arena) -> Allocator {
+ return Allocator{
+ procedure = arena_allocator_proc,
+ data = arena,
+ }
}
+/*
+Initialize an arena.
+This procedure initializes the arena `a` with memory region `data` as it's
+backing buffer.
+*/
arena_init :: proc(a: ^Arena, data: []byte) {
a.data = data
a.offset = 0
@@ -46,64 +148,157 @@ init_arena :: proc(a: ^Arena, data: []byte) {
a.temp_count = 0
}
+/*
+Allocate memory from an arena.
+
+This procedure allocates `size` bytes of memory aligned on a boundary specified
+by `alignment` from an arena `a`. The allocated memory is zero-initialized.
+This procedure returns a pointer to the newly allocated memory region.
+*/
@(require_results)
-arena_allocator :: proc(arena: ^Arena) -> Allocator {
- return Allocator{
- procedure = arena_allocator_proc,
- data = arena,
- }
+arena_alloc :: proc(
+ a: ^Arena,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> (rawptr, Allocator_Error) {
+ bytes, err := arena_alloc_bytes(a, size, alignment, loc)
+ return raw_data(bytes), err
}
-arena_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
- size, alignment: int,
- old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
- arena := cast(^Arena)allocator_data
+/*
+Allocate memory from an arena.
- switch mode {
- case .Alloc, .Alloc_Non_Zeroed:
- #no_bounds_check end := &arena.data[arena.offset]
+This procedure allocates `size` bytes of memory aligned on a boundary specified
+by `alignment` from an arena `a`. The allocated memory is zero-initialized.
+This procedure returns a slice of the newly allocated memory region.
+*/
+@(require_results)
+arena_alloc_bytes :: proc(
+ a: ^Arena,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
+ bytes, err := arena_alloc_bytes_non_zeroed(a, size, alignment, loc)
+ if bytes != nil {
+ zero_slice(bytes)
+ }
+ return bytes, err
+}
- ptr := align_forward(end, uintptr(alignment))
+/*
+Allocate non-initialized memory from an arena.
- total_size := size + ptr_sub((^byte)(ptr), (^byte)(end))
+This procedure allocates `size` bytes of memory aligned on a boundary specified
+by `alignment` from an arena `a`. The allocated memory is not explicitly
+zero-initialized. This procedure returns a pointer to the newly allocated
+memory region.
+*/
+@(require_results)
+arena_alloc_non_zeroed :: proc(
+ a: ^Arena,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> (rawptr, Allocator_Error) {
+ bytes, err := arena_alloc_bytes_non_zeroed(a, size, alignment, loc)
+ return raw_data(bytes), err
+}
- if arena.offset + total_size > len(arena.data) {
- return nil, .Out_Of_Memory
- }
+/*
+Allocate non-initialized memory from an arena.
- arena.offset += total_size
- arena.peak_used = max(arena.peak_used, arena.offset)
- if mode != .Alloc_Non_Zeroed {
- zero(ptr, size)
- }
- return byte_slice(ptr, size), nil
+This procedure allocates `size` bytes of memory aligned on a boundary specified
+by `alignment` from an arena `a`. The allocated memory is not explicitly
+zero-initialized. This procedure returns a slice of the newly allocated
+memory region.
+*/
+@(require_results)
+arena_alloc_bytes_non_zeroed :: proc(
+ a: ^Arena,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location
+) -> ([]byte, Allocator_Error) {
+ if a.data == nil {
+ panic("Arena is not initialized", loc)
+ }
+ #no_bounds_check end := &a.data[a.offset]
+ ptr := align_forward(end, uintptr(alignment))
+ total_size := size + ptr_sub((^byte)(ptr), (^byte)(end))
+ if a.offset + total_size > len(a.data) {
+ return nil, .Out_Of_Memory
+ }
+ a.offset += total_size
+ a.peak_used = max(a.peak_used, a.offset)
+ return byte_slice(ptr, size), nil
+}
+
+/*
+Free all memory to an arena.
+*/
+arena_free_all :: proc(a: ^Arena) {
+ a.offset = 0
+}
+arena_allocator_proc :: proc(
+ allocator_data: rawptr,
+ mode: Allocator_Mode,
+ size: int,
+ alignment: int,
+ old_memory: rawptr,
+ old_size: int,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
+ arena := cast(^Arena)allocator_data
+ switch mode {
+ case .Alloc:
+ return arena_alloc_bytes(arena, size, alignment, loc)
+ case .Alloc_Non_Zeroed:
+ return arena_alloc_bytes_non_zeroed(arena, size, alignment, loc)
case .Free:
return nil, .Mode_Not_Implemented
-
case .Free_All:
- arena.offset = 0
-
+ arena_free_all(arena)
case .Resize:
- return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena))
-
+ return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena), loc)
case .Resize_Non_Zeroed:
- return default_resize_bytes_align_non_zeroed(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena))
-
+ return default_resize_bytes_align_non_zeroed(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena), loc)
case .Query_Features:
set := (^Allocator_Mode_Set)(old_memory)
if set != nil {
set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
}
return nil, nil
-
case .Query_Info:
return nil, .Mode_Not_Implemented
}
-
return nil, nil
}
+/*
+Temporary memory region of arena.
+
+Temporary memory regions of arena act as "savepoints" for arena. When one is
+created, the subsequent allocations are done inside the temporary memory
+region. When `end_arena_temp_memory` is called, the arena is rolled back, and
+all of the memory that was allocated from the arena will be freed.
+
+Multiple temporary memory regions can exist at the same time for an arena.
+*/
+Arena_Temp_Memory :: struct {
+ arena: ^Arena,
+ prev_offset: int,
+}
+
+/*
+Start a temporary memory region.
+
+This procedure creates a temporary memory region. After a temporary memory
+region is created, all allocations are said to be *inside* the temporary memory
+region, until `end_arena_temp_memory` is called.
+*/
@(require_results)
begin_arena_temp_memory :: proc(a: ^Arena) -> Arena_Temp_Memory {
tmp: Arena_Temp_Memory
@@ -113,6 +308,12 @@ begin_arena_temp_memory :: proc(a: ^Arena) -> Arena_Temp_Memory {
return tmp
}
+/*
+End a temporary memory region.
+
+This procedure ends the temporary memory region for an arena. All of the
+allocations *inside* the temporary memory region will be freed to the arena.
+*/
end_arena_temp_memory :: proc(tmp: Arena_Temp_Memory) {
assert(tmp.arena.offset >= tmp.prev_offset)
assert(tmp.arena.temp_count > 0)
@@ -120,9 +321,15 @@ end_arena_temp_memory :: proc(tmp: Arena_Temp_Memory) {
tmp.arena.temp_count -= 1
}
+/* Preserved for compatibility */
+Scratch_Allocator :: Scratch
+scratch_allocator_init :: scratch_init
+scratch_allocator_destroy :: scratch_destroy
-
-Scratch_Allocator :: struct {
+/*
+Scratch allocator data.
+*/
+Scratch :: struct {
data: []byte,
curr_offset: int,
prev_allocation: rawptr,
@@ -130,7 +337,35 @@ Scratch_Allocator :: struct {
leaked_allocations: [dynamic][]byte,
}
-scratch_allocator_init :: proc(s: ^Scratch_Allocator, size: int, backup_allocator := context.allocator) -> Allocator_Error {
+/*
+Scratch allocator.
+
+The scratch allocator works in a similar way to the `Arena` allocator. The
+scratch allocator has a backing buffer, that is being allocated in
+contiguous regions, from start to end.
+
+Each subsequent allocation will be the next adjacent region of memory in the
+backing buffer. If the allocation doesn't fit into the remaining space of the
+backing buffer, this allocation is put at the start of the buffer, and all
+previous allocations will become invalidated. If the allocation doesn't fit
+into the backing buffer as a whole, it will be allocated using a backing
+allocator, and pointer to the allocated memory region will be put into the
+`leaked_allocations` array.
+
+The `leaked_allocations` array is managed by the `context` allocator.
+*/
+@(require_results)
+scratch_allocator :: proc(allocator: ^Scratch) -> Allocator {
+ return Allocator{
+ procedure = scratch_allocator_proc,
+ data = allocator,
+ }
+}
+
+/*
+Initialize scratch allocator.
+*/
+scratch_init :: proc(s: ^Scratch, size: int, backup_allocator := context.allocator) -> Allocator_Error {
s.data = make_aligned([]byte, size, 2*align_of(rawptr), backup_allocator) or_return
s.curr_offset = 0
s.prev_allocation = nil
@@ -139,7 +374,10 @@ scratch_allocator_init :: proc(s: ^Scratch_Allocator, size: int, backup_allocato
return nil
}
-scratch_allocator_destroy :: proc(s: ^Scratch_Allocator) {
+/*
+Free all data associated with a scratch allocator.
+*/
+scratch_destroy :: proc(s: ^Scratch) {
if s == nil {
return
}
@@ -151,60 +389,105 @@ scratch_allocator_destroy :: proc(s: ^Scratch_Allocator) {
s^ = {}
}
-scratch_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
- size, alignment: int,
- old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
+/*
+Allocate memory from scratch allocator.
- s := (^Scratch_Allocator)(allocator_data)
+This procedure allocates `size` bytes of memory aligned on a boundary specified
+by `alignment`. The allocated memory region is zero-initialized. This procedure
+returns a pointer to the allocated memory region.
+*/
+@(require_results)
+scratch_alloc :: proc(
+ s: ^Scratch,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> (rawptr, Allocator_Error) {
+ bytes, err := scratch_alloc_bytes(s, size, alignment, loc)
+ return raw_data(bytes), err
+}
- if s.data == nil {
- DEFAULT_BACKING_SIZE :: 4 * Megabyte
- if !(context.allocator.procedure != scratch_allocator_proc &&
- context.allocator.data != allocator_data) {
- panic("cyclic initialization of the scratch allocator with itself")
- }
- scratch_allocator_init(s, DEFAULT_BACKING_SIZE)
- }
+/*
+Allocate memory from scratch allocator.
- size := size
+This procedure allocates `size` bytes of memory aligned on a boundary specified
+by `alignment`. The allocated memory region is zero-initialized. This procedure
+returns a slice of the allocated memory region.
+*/
+@(require_results)
+scratch_alloc_bytes :: proc(
+ s: ^Scratch,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
+ bytes, err := scratch_alloc_bytes_non_zeroed(s, size, alignment, loc)
+ if bytes != nil {
+ zero_slice(bytes)
+ }
+ return bytes, err
+}
- switch mode {
- case .Alloc, .Alloc_Non_Zeroed:
- size = align_forward_int(size, alignment)
-
- switch {
- case s.curr_offset+size <= len(s.data):
- start := uintptr(raw_data(s.data))
- ptr := start + uintptr(s.curr_offset)
- ptr = align_forward_uintptr(ptr, uintptr(alignment))
- if mode != .Alloc_Non_Zeroed {
- zero(rawptr(ptr), size)
- }
+/*
+Allocate non-initialized memory from scratch allocator.
- s.prev_allocation = rawptr(ptr)
- offset := int(ptr - start)
- s.curr_offset = offset + size
- return byte_slice(rawptr(ptr), size), nil
+This procedure allocates `size` bytes of memory aligned on a boundary specified
+by `alignment`. The allocated memory region is not explicitly zero-initialized.
+This procedure returns a pointer to the allocated memory region.
+*/
+@(require_results)
+scratch_alloc_non_zeroed :: proc(
+ s: ^Scratch,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> (rawptr, Allocator_Error) {
+ bytes, err := scratch_alloc_bytes_non_zeroed(s, size, alignment, loc)
+ return raw_data(bytes), err
+}
- case size <= len(s.data):
- start := uintptr(raw_data(s.data))
- ptr := align_forward_uintptr(start, uintptr(alignment))
- if mode != .Alloc_Non_Zeroed {
- zero(rawptr(ptr), size)
- }
+/*
+Allocate non-initialized memory from scratch allocator.
- s.prev_allocation = rawptr(ptr)
- offset := int(ptr - start)
- s.curr_offset = offset + size
- return byte_slice(rawptr(ptr), size), nil
+This procedure allocates `size` bytes of memory aligned on a boundary specified
+by `alignment`. The allocated memory region is not explicitly zero-initialized.
+This procedure returns a slice of the allocated memory region.
+*/
+@(require_results)
+scratch_alloc_bytes_non_zeroed :: proc(
+ s: ^Scratch,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
+ if s.data == nil {
+ DEFAULT_BACKING_SIZE :: 4 * Megabyte
+ if !(context.allocator.procedure != scratch_allocator_proc && context.allocator.data != s) {
+ panic("cyclic initialization of the scratch allocator with itself", loc)
}
+ scratch_init(s, DEFAULT_BACKING_SIZE)
+ }
+ size := size
+ size = align_forward_int(size, alignment)
+ if size <= len(s.data) {
+ offset := uintptr(0)
+ if s.curr_offset+size <= len(s.data) {
+ offset = uintptr(s.curr_offset)
+ } else {
+ offset = 0
+ }
+ start := uintptr(raw_data(s.data))
+ ptr := align_forward_uintptr(offset+start, uintptr(alignment))
+ s.prev_allocation = rawptr(ptr)
+ s.curr_offset = int(offset) + size
+ return byte_slice(rawptr(ptr), size), nil
+ } else {
a := s.backup_allocator
if a.procedure == nil {
a = context.allocator
s.backup_allocator = a
}
-
- ptr, err := alloc_bytes(size, alignment, a, loc)
+ ptr, err := alloc_bytes_non_zeroed(size, alignment, a, loc)
if err != nil {
return ptr, err
}
@@ -212,110 +495,290 @@ scratch_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
s.leaked_allocations, err = make([dynamic][]byte, a)
}
append(&s.leaked_allocations, ptr)
-
if logger := context.logger; logger.lowest_level <= .Warning {
if logger.procedure != nil {
- logger.procedure(logger.data, .Warning, "mem.Scratch_Allocator resorted to backup_allocator" , logger.options, loc)
+ logger.procedure(logger.data, .Warning, "mem.Scratch resorted to backup_allocator" , logger.options, loc)
}
}
-
return ptr, err
+ }
+}
- case .Free:
- if old_memory == nil {
- return nil, nil
- }
- start := uintptr(raw_data(s.data))
- end := start + uintptr(len(s.data))
- old_ptr := uintptr(old_memory)
-
- if s.prev_allocation == old_memory {
- s.curr_offset = int(uintptr(s.prev_allocation) - start)
- s.prev_allocation = nil
- return nil, nil
- }
+/*
+Free memory to the scratch allocator.
- if start <= old_ptr && old_ptr < end {
- // NOTE(bill): Cannot free this pointer but it is valid
- return nil, nil
- }
+This procedure frees the memory region allocated at pointer `ptr`.
- if len(s.leaked_allocations) != 0 {
- for data, i in s.leaked_allocations {
- ptr := raw_data(data)
- if ptr == old_memory {
- free_bytes(data, s.backup_allocator)
- ordered_remove(&s.leaked_allocations, i)
- return nil, nil
- }
+If `ptr` is not the latest allocation and is not a leaked allocation, this
+operation is a no-op.
+*/
+scratch_free :: proc(s: ^Scratch, ptr: rawptr, loc := #caller_location) -> Allocator_Error {
+ if s.data == nil {
+ panic("Free on an uninitialized scratch allocator", loc)
+ }
+ if ptr == nil {
+ return nil
+ }
+ start := uintptr(raw_data(s.data))
+ end := start + uintptr(len(s.data))
+ old_ptr := uintptr(ptr)
+ if s.prev_allocation == ptr {
+ s.curr_offset = int(uintptr(s.prev_allocation) - start)
+ s.prev_allocation = nil
+ return nil
+ }
+ if start <= old_ptr && old_ptr < end {
+ // NOTE(bill): Cannot free this pointer but it is valid
+ return nil
+ }
+ if len(s.leaked_allocations) != 0 {
+ for data, i in s.leaked_allocations {
+ ptr := raw_data(data)
+ if ptr == ptr {
+ free_bytes(data, s.backup_allocator, loc)
+ ordered_remove(&s.leaked_allocations, i, loc)
+ return nil
}
}
- return nil, .Invalid_Pointer
- // panic("invalid pointer passed to default_temp_allocator");
+ }
+ return .Invalid_Pointer
+}
- case .Free_All:
- s.curr_offset = 0
- s.prev_allocation = nil
- for ptr in s.leaked_allocations {
- free_bytes(ptr, s.backup_allocator)
- }
- clear(&s.leaked_allocations)
+/*
+Free all memory to the scratch allocator.
+*/
+scratch_free_all :: proc(s: ^Scratch, loc := #caller_location) {
+ s.curr_offset = 0
+ s.prev_allocation = nil
+ for ptr in s.leaked_allocations {
+ free_bytes(ptr, s.backup_allocator, loc)
+ }
+ clear(&s.leaked_allocations)
+}
- case .Resize, .Resize_Non_Zeroed:
- begin := uintptr(raw_data(s.data))
- end := begin + uintptr(len(s.data))
- old_ptr := uintptr(old_memory)
- if begin <= old_ptr && old_ptr < end && old_ptr+uintptr(size) < end {
- s.curr_offset = int(old_ptr-begin)+size
- return byte_slice(old_memory, size), nil
- }
- data, err := scratch_allocator_proc(allocator_data, .Alloc, size, alignment, old_memory, old_size, loc)
- if err != nil {
- return data, err
+/*
+Resize an allocation.
+
+This procedure resizes a memory region, defined by its location, `old_memory`,
+and its size, `old_size` to have a size `size` and alignment `alignment`. The
+newly allocated memory, if any is zero-initialized.
+
+If `old_memory` is `nil`, this procedure acts just like `scratch_alloc()`,
+allocating a memory region `size` bytes in size, aligned on a boundary specified
+by `alignment`.
+
+If `size` is 0, this procedure acts just like `scratch_free()`, freeing the
+memory region located at an address specified by `old_memory`.
+
+This procedure returns the pointer to the resized memory region.
+*/
+@(require_results)
+scratch_resize :: proc(
+ s: ^Scratch,
+ old_memory: rawptr,
+ old_size: int,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location
+) -> (rawptr, Allocator_Error) {
+ bytes, err := scratch_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc)
+ return raw_data(bytes), err
+}
+
+/*
+Resize an allocation.
+
+This procedure resizes a memory region, specified by `old_data`, to have a size
+`size` and alignment `alignment`. The newly allocated memory, if any is
+zero-initialized.
+
+If `old_memory` is `nil`, this procedure acts just like `scratch_alloc()`,
+allocating a memory region `size` bytes in size, aligned on a boundary specified
+by `alignment`.
+
+If `size` is 0, this procedure acts just like `scratch_free()`, freeing the
+memory region located at an address specified by `old_memory`.
+
+This procedure returns the slice of the resized memory region.
+*/
+@(require_results)
+scratch_resize_bytes :: proc(
+ s: ^Scratch,
+ old_data: []byte,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location
+) -> ([]byte, Allocator_Error) {
+ bytes, err := scratch_resize_bytes_non_zeroed(s, old_data, size, alignment, loc)
+ if bytes != nil && size > len(old_data) {
+ zero_slice(bytes[size:])
+ }
+ return bytes, err
+}
+
+/*
+Resize an allocation without zero-initialization.
+
+This procedure resizes a memory region, defined by its location, `old_memory`,
+and its size, `old_size` to have a size `size` and alignment `alignment`. The
+newly allocated memory, if any is not explicitly zero-initialized.
+
+If `old_memory` is `nil`, this procedure acts just like `scratch_alloc()`,
+allocating a memory region `size` bytes in size, aligned on a boundary specified
+by `alignment`.
+
+If `size` is 0, this procedure acts just like `scratch_free()`, freeing the
+memory region located at an address specified by `old_memory`.
+
+This procedure returns the pointer to the resized memory region.
+*/
+@(require_results)
+scratch_resize_non_zeroed :: proc(
+ s: ^Scratch,
+ old_memory: rawptr,
+ old_size: int,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location
+) -> (rawptr, Allocator_Error) {
+ bytes, err := scratch_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc)
+ return raw_data(bytes), err
+}
+
+/*
+Resize an allocation.
+
+This procedure resizes a memory region, specified by `old_data`, to have a size
+`size` and alignment `alignment`. The newly allocated memory, if any is not
+explicitly zero-initialized.
+
+If `old_memory` is `nil`, this procedure acts just like `scratch_alloc()`,
+allocating a memory region `size` bytes in size, aligned on a boundary specified
+by `alignment`.
+
+If `size` is 0, this procedure acts just like `scratch_free()`, freeing the
+memory region located at an address specified by `old_memory`.
+
+This procedure returns the slice of the resized memory region.
+*/
+@(require_results)
+scratch_resize_bytes_non_zeroed :: proc(
+ s: ^Scratch,
+ old_data: []byte,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location
+) -> ([]byte, Allocator_Error) {
+ old_memory := raw_data(old_data)
+ old_size := len(old_data)
+ if s.data == nil {
+ DEFAULT_BACKING_SIZE :: 4 * Megabyte
+ if !(context.allocator.procedure != scratch_allocator_proc && context.allocator.data != s) {
+ panic("cyclic initialization of the scratch allocator with itself", loc)
}
- runtime.copy(data, byte_slice(old_memory, old_size))
- _, err = scratch_allocator_proc(allocator_data, .Free, 0, alignment, old_memory, old_size, loc)
+ scratch_init(s, DEFAULT_BACKING_SIZE)
+ }
+ begin := uintptr(raw_data(s.data))
+ end := begin + uintptr(len(s.data))
+ old_ptr := uintptr(old_memory)
+ if begin <= old_ptr && old_ptr < end && old_ptr+uintptr(size) < end {
+ s.curr_offset = int(old_ptr-begin)+size
+ return byte_slice(old_memory, size), nil
+ }
+ data, err := scratch_alloc_bytes_non_zeroed(s, size, alignment, loc)
+ if err != nil {
return data, err
+ }
+ runtime.copy(data, byte_slice(old_memory, old_size))
+ err = scratch_free(s, old_memory, loc)
+ return data, err
+}
+scratch_allocator_proc :: proc(
+ allocator_data: rawptr,
+ mode: Allocator_Mode,
+ size, alignment: int,
+ old_memory: rawptr,
+ old_size: int,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
+ s := (^Scratch)(allocator_data)
+ size := size
+ switch mode {
+ case .Alloc:
+ return scratch_alloc_bytes(s, size, alignment, loc)
+ case .Alloc_Non_Zeroed:
+ return scratch_alloc_bytes_non_zeroed(s, size, alignment, loc)
+ case .Free:
+ return nil, scratch_free(s, old_memory, loc)
+ case .Free_All:
+ scratch_free_all(s, loc)
+ case .Resize:
+ return scratch_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc)
+ case .Resize_Non_Zeroed:
+ return scratch_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc)
case .Query_Features:
set := (^Allocator_Mode_Set)(old_memory)
if set != nil {
set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
}
return nil, nil
-
case .Query_Info:
return nil, .Mode_Not_Implemented
}
-
return nil, nil
}
-@(require_results)
-scratch_allocator :: proc(allocator: ^Scratch_Allocator) -> Allocator {
- return Allocator{
- procedure = scratch_allocator_proc,
- data = allocator,
- }
-}
-
-
+/*
+Stack allocator data.
+*/
+Stack :: struct {
+ data: []byte,
+ prev_offset: int,
+ curr_offset: int,
+ peak_used: int,
+}
+/*
+Header of a stack allocation.
+*/
Stack_Allocation_Header :: struct {
prev_offset: int,
padding: int,
}
-// Stack is a stack-like allocator which has a strict memory freeing order
-Stack :: struct {
- data: []byte,
- prev_offset: int,
- curr_offset: int,
- peak_used: int,
+/*
+Stack allocator.
+
+The stack allocator is an allocator that allocates data in the backing buffer
+linearly, from start to end. Each subsequent allocation will get the next
+adjacent memory region.
+
+Unlike arena allocator, the stack allocator saves allocation metadata and has
+a strict freeing order. Only the last allocated element can be freed. After the
+last allocated element is freed, the next previous allocated element becomes
+available for freeing.
+
+The metadata is stored in the allocation headers, that are located before the
+start of each allocated memory region. Each header points to the start of the
+previous allocation header.
+*/
+@(require_results)
+stack_allocator :: proc(stack: ^Stack) -> Allocator {
+ return Allocator{
+ procedure = stack_allocator_proc,
+ data = stack,
+ }
}
+/*
+Initialize the stack allocator.
+
+This procedure initializes the stack allocator with a backing buffer specified
+by `data` parameter.
+*/
stack_init :: proc(s: ^Stack, data: []byte) {
s.data = data
s.prev_offset = 0
@@ -331,129 +794,333 @@ init_stack :: proc(s: ^Stack, data: []byte) {
s.peak_used = 0
}
+/*
+Allocate memory from stack.
+
+This procedure allocates `size` bytes of memory, aligned to the boundary
+specified by `alignment`. The allocated memory is zero-initialized. This
+procedure returns the pointer to the allocated memory.
+*/
@(require_results)
-stack_allocator :: proc(stack: ^Stack) -> Allocator {
- return Allocator{
- procedure = stack_allocator_proc,
- data = stack,
+stack_alloc :: proc(
+ s: ^Stack,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location
+) -> (rawptr, Allocator_Error) {
+ bytes, err := stack_alloc_bytes(s, size, alignment, loc)
+ return raw_data(bytes), err
+}
+
+/*
+Allocate memory from stack.
+
+This procedure allocates `size` bytes of memory, aligned to the boundary
+specified by `alignment`. The allocated memory is zero-initialized. This
+procedure returns the slice of the allocated memory.
+*/
+@(require_results)
+stack_alloc_bytes :: proc(
+ s: ^Stack,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location
+) -> ([]byte, Allocator_Error) {
+ bytes, err := stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
+ if bytes != nil {
+ zero_slice(bytes)
}
+ return bytes, err
}
+/*
+Allocate memory from stack.
-stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
- size, alignment: int,
- old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
- s := cast(^Stack)allocator_data
+This procedure allocates `size` bytes of memory, aligned to the boundary
+specified by `alignment`. The allocated memory is not explicitly
+zero-initialized. This procedure returns the pointer to the allocated memory.
+*/
+@(require_results)
+stack_alloc_non_zeroed :: proc(
+ s: ^Stack,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location
+) -> (rawptr, Allocator_Error) {
+ bytes, err := stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
+ return raw_data(bytes), err
+}
+
+/*
+Allocate memory from stack.
+This procedure allocates `size` bytes of memory, aligned to the boundary
+specified by `alignment`. The allocated memory is not explicitly
+zero-initialized. This procedure returns the slice of the allocated memory.
+*/
+@(require_results)
+stack_alloc_bytes_non_zeroed :: proc(
+ s: ^Stack,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location
+) -> ([]byte, Allocator_Error) {
if s.data == nil {
- return nil, .Invalid_Argument
+ panic("Stack allocation on an uninitialized stack allocator", loc)
}
+ curr_addr := uintptr(raw_data(s.data)) + uintptr(s.curr_offset)
+ padding := calc_padding_with_header(
+ curr_addr,
+ uintptr(alignment),
+ size_of(Stack_Allocation_Header),
+ )
+ if s.curr_offset + padding + size > len(s.data) {
+ return nil, .Out_Of_Memory
+ }
+ s.prev_offset = s.curr_offset
+ s.curr_offset += padding
+ next_addr := curr_addr + uintptr(padding)
+ header := (^Stack_Allocation_Header)(next_addr - size_of(Stack_Allocation_Header))
+ header.padding = padding
+ header.prev_offset = s.prev_offset
+ s.curr_offset += size
+ s.peak_used = max(s.peak_used, s.curr_offset)
+ return byte_slice(rawptr(next_addr), size), nil
+}
- raw_alloc :: proc(s: ^Stack, size, alignment: int, zero_memory: bool) -> ([]byte, Allocator_Error) {
- curr_addr := uintptr(raw_data(s.data)) + uintptr(s.curr_offset)
- padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Stack_Allocation_Header))
- if s.curr_offset + padding + size > len(s.data) {
- return nil, .Out_Of_Memory
- }
- s.prev_offset = s.curr_offset
- s.curr_offset += padding
-
- next_addr := curr_addr + uintptr(padding)
- header := (^Stack_Allocation_Header)(next_addr - size_of(Stack_Allocation_Header))
- header.padding = padding
- header.prev_offset = s.prev_offset
+/*
+Free memory to the stack.
+
+This procedure frees the memory region starting at `old_memory` to the stack.
+If the freeing does is an out of order freeing, the `.Invalid_Pointer` error
+is returned.
+*/
+stack_free :: proc(
+ s: ^Stack,
+ old_memory: rawptr,
+ loc := #caller_location,
+) -> (Allocator_Error) {
+ if s.data == nil {
+ panic("Stack free on an uninitialized stack allocator", loc)
+ }
+ if old_memory == nil {
+ return nil
+ }
+ start := uintptr(raw_data(s.data))
+ end := start + uintptr(len(s.data))
+ curr_addr := uintptr(old_memory)
+ if !(start <= curr_addr && curr_addr < end) {
+ panic("Out of bounds memory address passed to stack allocator (free)", loc)
+ }
+ if curr_addr >= start+uintptr(s.curr_offset) {
+ // NOTE(bill): Allow double frees
+ return nil
+ }
+ header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
+ old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
+ if old_offset != header.prev_offset {
+ // panic("Out of order stack allocator free");
+ return .Invalid_Pointer
+ }
+ s.curr_offset = old_offset
+ s.prev_offset = header.prev_offset
+ return nil
+}
- s.curr_offset += size
+/*
+Free all allocations to the stack.
+*/
+stack_free_all :: proc(s: ^Stack, loc := #caller_location) {
+ s.prev_offset = 0
+ s.curr_offset = 0
+}
- s.peak_used = max(s.peak_used, s.curr_offset)
+/*
+Resize an allocation.
- if zero_memory {
- zero(rawptr(next_addr), size)
- }
- return byte_slice(rawptr(next_addr), size), nil
- }
+This procedure resizes a memory region, defined by its location, `old_memory`,
+and its size, `old_size` to have a size `size` and alignment `alignment`. The
+newly allocated memory, if any is zero-initialized.
- switch mode {
- case .Alloc, .Alloc_Non_Zeroed:
- return raw_alloc(s, size, alignment, mode == .Alloc)
- case .Free:
- if old_memory == nil {
- return nil, nil
- }
- start := uintptr(raw_data(s.data))
- end := start + uintptr(len(s.data))
- curr_addr := uintptr(old_memory)
+If `old_memory` is `nil`, this procedure acts just like `stack_alloc()`,
+allocating a memory region `size` bytes in size, aligned on a boundary specified
+by `alignment`.
- if !(start <= curr_addr && curr_addr < end) {
- panic("Out of bounds memory address passed to stack allocator (free)")
- }
+If `size` is 0, this procedure acts just like `stack_free()`, freeing the
+memory region located at an address specified by `old_memory`.
- if curr_addr >= start+uintptr(s.curr_offset) {
- // NOTE(bill): Allow double frees
- return nil, nil
- }
+This procedure returns the pointer to the resized memory region.
+*/
+@(require_results)
+stack_resize :: proc(
+ s: ^Stack,
+ old_memory: rawptr,
+ old_size: int,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> (rawptr, Allocator_Error) {
+ bytes, err := stack_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment)
+ return raw_data(bytes), err
+}
- header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
- old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
+/*
+Resize an allocation.
- if old_offset != header.prev_offset {
- // panic("Out of order stack allocator free");
- return nil, .Invalid_Pointer
- }
+This procedure resizes a memory region, specified by the `old_data` parameter
+to have a size `size` and alignment `alignment`. The newly allocated memory,
+if any is zero-initialized.
- s.curr_offset = old_offset
- s.prev_offset = header.prev_offset
+If `old_memory` is `nil`, this procedure acts just like `stack_alloc()`,
+allocating a memory region `size` bytes in size, aligned on a boundary specified
+by `alignment`.
- case .Free_All:
- s.prev_offset = 0
- s.curr_offset = 0
+If `size` is 0, this procedure acts just like `stack_free()`, freeing the
+memory region located at an address specified by `old_memory`.
- case .Resize, .Resize_Non_Zeroed:
- if old_memory == nil {
- return raw_alloc(s, size, alignment, mode == .Resize)
- }
- if size == 0 {
- return nil, nil
+This procedure returns the slice of the resized memory region.
+*/
+@(require_results)
+stack_resize_bytes :: proc(
+ s: ^Stack,
+ old_data: []byte,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
+ bytes, err := stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
+ if bytes != nil {
+ if old_data == nil {
+ zero_slice(bytes)
+ } else if size > len(old_data) {
+ zero_slice(bytes[len(old_data):])
}
+ }
+ return bytes, err
+}
- start := uintptr(raw_data(s.data))
- end := start + uintptr(len(s.data))
- curr_addr := uintptr(old_memory)
- if !(start <= curr_addr && curr_addr < end) {
- panic("Out of bounds memory address passed to stack allocator (resize)")
- }
+/*
+Resize an allocation without zero-initialization.
- if curr_addr >= start+uintptr(s.curr_offset) {
- // NOTE(bill): Allow double frees
- return nil, nil
- }
+This procedure resizes a memory region, defined by its location, `old_memory`,
+and its size, `old_size` to have a size `size` and alignment `alignment`. The
+newly allocated memory, if any is not explicitly zero-initialized.
- if old_size == size {
- return byte_slice(old_memory, size), nil
- }
+If `old_memory` is `nil`, this procedure acts just like `stack_alloc()`,
+allocating a memory region `size` bytes in size, aligned on a boundary specified
+by `alignment`.
- header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
- old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
+If `size` is 0, this procedure acts just like `stack_free()`, freeing the
+memory region located at an address specified by `old_memory`.
- if old_offset != header.prev_offset {
- data, err := raw_alloc(s, size, alignment, mode == .Resize)
- if err == nil {
- runtime.copy(data, byte_slice(old_memory, old_size))
- }
- return data, err
- }
+This procedure returns the pointer to the resized memory region.
+*/
+@(require_results)
+stack_resize_non_zeroed :: proc(
+ s: ^Stack,
+ old_memory: rawptr,
+ old_size: int,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> (rawptr, Allocator_Error) {
+ bytes, err := stack_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment)
+ return raw_data(bytes), err
+}
- old_memory_size := uintptr(s.curr_offset) - (curr_addr - start)
- assert(old_memory_size == uintptr(old_size))
+/*
+Resize an allocation without zero-initialization.
- diff := size - old_size
- s.curr_offset += diff // works for smaller sizes too
- if diff > 0 {
- zero(rawptr(curr_addr + uintptr(diff)), diff)
- }
+This procedure resizes a memory region, specified by the `old_data` parameter
+to have a size `size` and alignment `alignment`. The newly allocated memory,
+if any is not explicitly zero-initialized.
+
+If `old_memory` is `nil`, this procedure acts just like `stack_alloc()`,
+allocating a memory region `size` bytes in size, aligned on a boundary specified
+by `alignment`.
+If `size` is 0, this procedure acts just like `stack_free()`, freeing the
+memory region located at an address specified by `old_memory`.
+
+This procedure returns the slice of the resized memory region.
+*/
+@(require_results)
+stack_resize_bytes_non_zeroed :: proc(
+ s: ^Stack,
+ old_data: []byte,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
+ old_memory := raw_data(old_data)
+ old_size := len(old_data)
+ if s.data == nil {
+ panic("Stack free all on an uninitialized stack allocator", loc)
+ }
+ if old_memory == nil {
+ return stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
+ }
+ if size == 0 {
+ return nil, nil
+ }
+ start := uintptr(raw_data(s.data))
+ end := start + uintptr(len(s.data))
+ curr_addr := uintptr(old_memory)
+ if !(start <= curr_addr && curr_addr < end) {
+ panic("Out of bounds memory address passed to stack allocator (resize)")
+ }
+ if curr_addr >= start+uintptr(s.curr_offset) {
+ // NOTE(bill): Allow double frees
+ return nil, nil
+ }
+ if old_size == size {
return byte_slice(old_memory, size), nil
+ }
+ header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header))
+ old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
+ if old_offset != header.prev_offset {
+ data, err := stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
+ if err == nil {
+ runtime.copy(data, byte_slice(old_memory, old_size))
+ }
+ return data, err
+ }
+ old_memory_size := uintptr(s.curr_offset) - (curr_addr - start)
+ assert(old_memory_size == uintptr(old_size))
+ diff := size - old_size
+ s.curr_offset += diff // works for smaller sizes too
+ if diff > 0 {
+ zero(rawptr(curr_addr + uintptr(diff)), diff)
+ }
+ return byte_slice(old_memory, size), nil
+}
+stack_allocator_proc :: proc(
+ allocator_data: rawptr,
+ mode: Allocator_Mode,
+ size: int,
+ alignment: int,
+ old_memory: rawptr,
+ old_size: int,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
+ s := cast(^Stack)allocator_data
+ if s.data == nil {
+ return nil, .Invalid_Argument
+ }
+ switch mode {
+ case .Alloc:
+ return stack_alloc_bytes(s, size, alignment, loc)
+ case .Alloc_Non_Zeroed:
+ return stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
+ case .Free:
+ return nil, stack_free(s, old_memory, loc)
+ case .Free_All:
+ stack_free_all(s, loc)
+ case .Resize:
+ return stack_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc)
+ case .Resize_Non_Zeroed:
+ return stack_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc)
case .Query_Features:
set := (^Allocator_Mode_Set)(old_memory)
if set != nil {
@@ -463,27 +1130,32 @@ stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
case .Query_Info:
return nil, .Mode_Not_Implemented
}
-
return nil, nil
}
-
-
-
-
-
+/*
+Allocation header of the small stack allocator.
+*/
Small_Stack_Allocation_Header :: struct {
padding: u8,
}
-// Small_Stack is a stack-like allocator which uses the smallest possible header but at the cost of non-strict memory freeing order
+/*
+Small stack allocator data.
+*/
Small_Stack :: struct {
data: []byte,
offset: int,
peak_used: int,
}
+/*
+Initialize small stack.
+
+This procedure initializes the small stack allocator with `data` as its backing
+buffer.
+*/
small_stack_init :: proc(s: ^Small_Stack, data: []byte) {
s.data = data
s.offset = 0
@@ -497,6 +1169,20 @@ init_small_stack :: proc(s: ^Small_Stack, data: []byte) {
s.peak_used = 0
}
+/*
+Small stack allocator.
+
+The small stack allocator is just like a stack allocator, with the only
+difference being an extremely small header size. Unlike the stack allocator,
+small stack allows out-of order freeing of memory.
+
+The memory is allocated in the backing buffer linearly, from start to end.
+Each subsequent allocation will get the next adjacent memory region.
+
+The metadata is stored in the allocation headers, that are located before the
+start of each allocated memory region. Each header contains the amount of
+padding bytes between that header and end of the previous allocation.
+*/
@(require_results)
small_stack_allocator :: proc(stack: ^Small_Stack) -> Allocator {
return Allocator{
@@ -505,375 +1191,766 @@ small_stack_allocator :: proc(stack: ^Small_Stack) -> Allocator {
}
}
-small_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
- size, alignment: int,
- old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) {
- s := cast(^Small_Stack)allocator_data
+/*
+Allocate memory from small stack.
- if s.data == nil {
- return nil, .Invalid_Argument
- }
+This procedure allocates `size` bytes of memory aligned to a boundary specified
+by `alignment`. The allocated memory is zero-initialized. This procedure
+returns a pointer to the allocated memory region.
+*/
+@(require_results)
+small_stack_alloc :: proc(
+ s: ^Small_Stack,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> (rawptr, Allocator_Error) {
+ bytes, err := small_stack_alloc_bytes(s, size, alignment, loc)
+ return raw_data(bytes), err
+}
- align := clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2)
+/*
+Allocate memory from small stack.
- raw_alloc :: proc(s: ^Small_Stack, size, alignment: int, zero_memory: bool) -> ([]byte, Allocator_Error) {
- curr_addr := uintptr(raw_data(s.data)) + uintptr(s.offset)
- padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Small_Stack_Allocation_Header))
- if s.offset + padding + size > len(s.data) {
- return nil, .Out_Of_Memory
- }
- s.offset += padding
+This procedure allocates `size` bytes of memory aligned to a boundary specified
+by `alignment`. The allocated memory is zero-initialized. This procedure
+returns a slice of the allocated memory region.
+*/
+@(require_results)
+small_stack_alloc_bytes :: proc(
+ s: ^Small_Stack,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
+ bytes, err := small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
+ if bytes != nil {
+ zero_slice(bytes)
+ }
+ return bytes, err
+}
- next_addr := curr_addr + uintptr(padding)
- header := (^Small_Stack_Allocation_Header)(next_addr - size_of(Small_Stack_Allocation_Header))
- header.padding = auto_cast padding
+/*
+Allocate memory from small stack.
- s.offset += size
+This procedure allocates `size` bytes of memory aligned to a boundary specified
+by `alignment`. The allocated memory is not explicitly zero-initialized. This
+procedure returns a pointer to the allocated memory region.
+*/
+@(require_results)
+small_stack_alloc_non_zeroed :: proc(
+ s: ^Small_Stack,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> (rawptr, Allocator_Error) {
+ bytes, err := small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
+ return raw_data(bytes), err
+}
- s.peak_used = max(s.peak_used, s.offset)
+/*
+Allocate memory from small stack.
- if zero_memory {
- zero(rawptr(next_addr), size)
- }
- return byte_slice(rawptr(next_addr), size), nil
+This procedure allocates `size` bytes of memory aligned to a boundary specified
+by `alignment`. The allocated memory is not explicitly zero-initialized. This
+procedure returns a slice of the allocated memory region.
+*/
+@(require_results)
+small_stack_alloc_bytes_non_zeroed :: proc(
+ s: ^Small_Stack,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
+ if s.data == nil {
+ panic("Small stack is not initialized", loc)
}
+ alignment := alignment
+ alignment = clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2)
+ curr_addr := uintptr(raw_data(s.data)) + uintptr(s.offset)
+ padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Small_Stack_Allocation_Header))
+ if s.offset + padding + size > len(s.data) {
+ return nil, .Out_Of_Memory
+ }
+ s.offset += padding
+ next_addr := curr_addr + uintptr(padding)
+ header := (^Small_Stack_Allocation_Header)(next_addr - size_of(Small_Stack_Allocation_Header))
+ header.padding = auto_cast padding
+ s.offset += size
+ s.peak_used = max(s.peak_used, s.offset)
+ return byte_slice(rawptr(next_addr), size), nil
+}
- switch mode {
- case .Alloc, .Alloc_Non_Zeroed:
- return raw_alloc(s, size, align, mode == .Alloc)
- case .Free:
- if old_memory == nil {
- return nil, nil
- }
- start := uintptr(raw_data(s.data))
- end := start + uintptr(len(s.data))
- curr_addr := uintptr(old_memory)
+/*
+Allocate memory from small stack.
+
+This procedure allocates `size` bytes of memory aligned to a boundary specified
+by `alignment`. The allocated memory is not explicitly zero-initialized. This
+procedure returns a slice of the allocated memory region.
+*/
+small_stack_free :: proc(
+ s: ^Small_Stack,
+ old_memory: rawptr,
+ loc := #caller_location,
+) -> Allocator_Error {
+ if s.data == nil {
+ panic("Small stack is not initialized", loc)
+ }
+ if old_memory == nil {
+ return nil
+ }
+ start := uintptr(raw_data(s.data))
+ end := start + uintptr(len(s.data))
+ curr_addr := uintptr(old_memory)
+ if !(start <= curr_addr && curr_addr < end) {
+ // panic("Out of bounds memory address passed to stack allocator (free)");
+ return .Invalid_Pointer
+ }
+ if curr_addr >= start+uintptr(s.offset) {
+ // NOTE(bill): Allow double frees
+ return nil
+ }
+ header := (^Small_Stack_Allocation_Header)(curr_addr - size_of(Small_Stack_Allocation_Header))
+ old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
+ s.offset = old_offset
+ return nil
+}
- if !(start <= curr_addr && curr_addr < end) {
- // panic("Out of bounds memory address passed to stack allocator (free)");
- return nil, .Invalid_Pointer
- }
+/*
+Free all memory to small stack.
+*/
+small_stack_free_all :: proc(s: ^Small_Stack) {
+ s.offset = 0
+}
- if curr_addr >= start+uintptr(s.offset) {
- // NOTE(bill): Allow double frees
- return nil, nil
- }
+/*
+Resize an allocation.
- header := (^Small_Stack_Allocation_Header)(curr_addr - size_of(Small_Stack_Allocation_Header))
- old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data)))
+This procedure resizes a memory region, defined by its location, `old_memory`,
+and its size, `old_size` to have a size `size` and alignment `alignment`. The
+newly allocated memory, if any is zero-initialized.
- s.offset = old_offset
+If `old_memory` is `nil`, this procedure acts just like `small_stack_alloc()`,
+allocating a memory region `size` bytes in size, aligned on a boundary specified
+by `alignment`.
- case .Free_All:
- s.offset = 0
+If `size` is 0, this procedure acts just like `small_stack_free()`, freeing the
+memory region located at an address specified by `old_memory`.
- case .Resize, .Resize_Non_Zeroed:
- if old_memory == nil {
- return raw_alloc(s, size, align, mode == .Resize)
- }
- if size == 0 {
- return nil, nil
- }
+This procedure returns the pointer to the resized memory region.
+*/
+@(require_results)
+small_stack_resize :: proc(
+ s: ^Small_Stack,
+ old_memory: rawptr,
+ old_size: int,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> (rawptr, Allocator_Error) {
+ bytes, err := small_stack_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc)
+ return raw_data(bytes), err
+}
- start := uintptr(raw_data(s.data))
- end := start + uintptr(len(s.data))
- curr_addr := uintptr(old_memory)
- if !(start <= curr_addr && curr_addr < end) {
- // panic("Out of bounds memory address passed to stack allocator (resize)");
- return nil, .Invalid_Pointer
- }
+/*
+Resize an allocation.
- if curr_addr >= start+uintptr(s.offset) {
- // NOTE(bill): Treat as a double free
- return nil, nil
- }
+This procedure resizes a memory region, specified by the `old_data` parameter
+to have a size `size` and alignment `alignment`. The newly allocated memory,
+if any is zero-initialized.
- if old_size == size {
- return byte_slice(old_memory, size), nil
- }
+If `old_memory` is `nil`, this procedure acts just like `small_stack_alloc()`,
+allocating a memory region `size` bytes in size, aligned on a boundary specified
+by `alignment`.
- data, err := raw_alloc(s, size, align, mode == .Resize)
- if err == nil {
- runtime.copy(data, byte_slice(old_memory, old_size))
- }
- return data, err
+If `size` is 0, this procedure acts just like `small_stack_free()`, freeing the
+memory region located at an address specified by `old_memory`.
- case .Query_Features:
- set := (^Allocator_Mode_Set)(old_memory)
- if set != nil {
- set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
+This procedure returns the slice of the resized memory region.
+*/
+@(require_results)
+small_stack_resize_bytes :: proc(
+ s: ^Small_Stack,
+ old_data: []byte,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
+ bytes, err := small_stack_resize_bytes_non_zeroed(s, old_data, size, alignment, loc)
+ if bytes != nil {
+ if old_data == nil {
+ zero_slice(bytes)
+ } else if size > len(old_data) {
+ zero_slice(bytes[len(old_data):])
}
- return nil, nil
-
- case .Query_Info:
- return nil, .Mode_Not_Implemented
}
-
- return nil, nil
+ return bytes, err
}
+/*
+Resize an allocation without zero-initialization.
+This procedure resizes a memory region, defined by its location, `old_memory`,
+and its size, `old_size` to have a size `size` and alignment `alignment`. The
+newly allocated memory, if any is not explicitly zero-initialized.
+If `old_memory` is `nil`, this procedure acts just like `small_stack_alloc()`,
+allocating a memory region `size` bytes in size, aligned on a boundary specified
+by `alignment`.
+If `size` is 0, this procedure acts just like `small_stack_free()`, freeing the
+memory region located at an address specified by `old_memory`.
-Dynamic_Pool :: struct {
- block_size: int,
- out_band_size: int,
- alignment: int,
-
- unused_blocks: [dynamic]rawptr,
- used_blocks: [dynamic]rawptr,
- out_band_allocations: [dynamic]rawptr,
-
- current_block: rawptr,
- current_pos: rawptr,
- bytes_left: int,
-
- block_allocator: Allocator,
+This procedure returns the pointer to the resized memory region.
+*/
+@(require_results)
+small_stack_resize_non_zeroed :: proc(
+ s: ^Small_Stack,
+ old_memory: rawptr,
+ old_size: int,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> (rawptr, Allocator_Error) {
+ bytes, err := small_stack_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc)
+ return raw_data(bytes), err
}
+/*
+Resize an allocation without zero-initialization.
-DYNAMIC_POOL_BLOCK_SIZE_DEFAULT :: 65536
-DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT :: 6554
+This procedure resizes a memory region, specified by the `old_data` parameter
+to have a size `size` and alignment `alignment`. The newly allocated memory,
+if any is not explicitly zero-initialized.
+If `old_memory` is `nil`, this procedure acts just like `small_stack_alloc()`,
+allocating a memory region `size` bytes in size, aligned on a boundary specified
+by `alignment`.
+If `size` is 0, this procedure acts just like `small_stack_free()`, freeing the
+memory region located at an address specified by `old_memory`.
-dynamic_pool_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
- size, alignment: int,
- old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
- pool := (^Dynamic_Pool)(allocator_data)
+This procedure returns the slice of the resized memory region.
+*/
+@(require_results)
+small_stack_resize_bytes_non_zeroed :: proc(
+ s: ^Small_Stack,
+ old_data: []byte,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
+ if s.data == nil {
+ panic("Small stack is not initialized", loc)
+ }
+ old_memory := raw_data(old_data)
+ old_size := len(old_data)
+ alignment := alignment
+ alignment = clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2)
+ if old_memory == nil {
+ return small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
+ }
+ if size == 0 {
+ return nil, nil
+ }
+ start := uintptr(raw_data(s.data))
+ end := start + uintptr(len(s.data))
+ curr_addr := uintptr(old_memory)
+ if !(start <= curr_addr && curr_addr < end) {
+ // panic("Out of bounds memory address passed to stack allocator (resize)");
+ return nil, .Invalid_Pointer
+ }
+ if curr_addr >= start+uintptr(s.offset) {
+ // NOTE(bill): Treat as a double free
+ return nil, nil
+ }
+ if old_size == size {
+ return byte_slice(old_memory, size), nil
+ }
+ data, err := small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
+ if err == nil {
+ runtime.copy(data, byte_slice(old_memory, old_size))
+ }
+ return data, err
+
+}
+small_stack_allocator_proc :: proc(
+ allocator_data: rawptr,
+ mode: Allocator_Mode,
+ size, alignment: int,
+ old_memory: rawptr,
+ old_size: int,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
+ s := cast(^Small_Stack)allocator_data
+ if s.data == nil {
+ return nil, .Invalid_Argument
+ }
switch mode {
- case .Alloc, .Alloc_Non_Zeroed:
- return dynamic_pool_alloc_bytes(pool, size)
+ case .Alloc:
+ return small_stack_alloc_bytes(s, size, alignment, loc)
+ case .Alloc_Non_Zeroed:
+ return small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc)
case .Free:
- return nil, .Mode_Not_Implemented
+ return nil, small_stack_free(s, old_memory, loc)
case .Free_All:
- dynamic_pool_free_all(pool)
- return nil, nil
- case .Resize, .Resize_Non_Zeroed:
- if old_size >= size {
- return byte_slice(old_memory, size), nil
- }
- data, err := dynamic_pool_alloc_bytes(pool, size)
- if err == nil {
- runtime.copy(data, byte_slice(old_memory, old_size))
- }
- return data, err
-
+ small_stack_free_all(s)
+ case .Resize:
+ return small_stack_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc)
+ case .Resize_Non_Zeroed:
+ return small_stack_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc)
case .Query_Features:
set := (^Allocator_Mode_Set)(old_memory)
if set != nil {
- set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features, .Query_Info}
+ set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
}
return nil, nil
-
case .Query_Info:
- info := (^Allocator_Query_Info)(old_memory)
- if info != nil && info.pointer != nil {
- info.size = pool.block_size
- info.alignment = pool.alignment
- return byte_slice(info, size_of(info^)), nil
- }
- return nil, nil
+ return nil, .Mode_Not_Implemented
}
return nil, nil
}
-@(require_results)
-dynamic_pool_allocator :: proc(pool: ^Dynamic_Pool) -> Allocator {
- return Allocator{
- procedure = dynamic_pool_allocator_proc,
- data = pool,
- }
+/* Preserved for compatibility */
+Dynamic_Pool :: Dynamic_Arena
+DYNAMIC_POOL_BLOCK_SIZE_DEFAULT :: DYNAMIC_ARENA_BLOCK_SIZE_DEFAULT
+DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT :: DYNAMIC_ARENA_OUT_OF_BAND_SIZE_DEFAULT
+dynamic_pool_allocator_proc :: dynamic_arena_allocator_proc
+dynamic_pool_free_all :: dynamic_arena_free_all
+dynamic_pool_reset :: dynamic_arena_reset
+dynamic_pool_alloc_bytes :: dynamic_arena_alloc_bytes
+dynamic_pool_alloc :: dynamic_arena_alloc
+dynamic_pool_init :: dynamic_arena_init
+dynamic_pool_allocator :: dynamic_arena_allocator
+dynamic_pool_destroy :: dynamic_arena_destroy
+
+/*
+Default block size for dynamic arena.
+*/
+DYNAMIC_ARENA_BLOCK_SIZE_DEFAULT :: 65536
+
+/*
+Default out-band size of the dynamic arena.
+*/
+DYNAMIC_ARENA_OUT_OF_BAND_SIZE_DEFAULT :: 6554
+
+/*
+Dynamic arena allocator data.
+*/
+Dynamic_Arena :: struct {
+ block_size: int,
+ out_band_size: int,
+ alignment: int,
+ unused_blocks: [dynamic]rawptr,
+ used_blocks: [dynamic]rawptr,
+ out_band_allocations: [dynamic]rawptr,
+ current_block: rawptr,
+ current_pos: rawptr,
+ bytes_left: int,
+ block_allocator: Allocator,
}
-dynamic_pool_init :: proc(pool: ^Dynamic_Pool,
- block_allocator := context.allocator,
- array_allocator := context.allocator,
- block_size := DYNAMIC_POOL_BLOCK_SIZE_DEFAULT,
- out_band_size := DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT,
- alignment := 8) {
- pool.block_size = block_size
- pool.out_band_size = out_band_size
- pool.alignment = alignment
+/*
+Initialize a dynamic arena.
+
+This procedure initializes a dynamic arena. The specified `block_allocator`
+will be used to allocate arena blocks, and `array_allocator` to allocate
+arrays of blocks and out-band blocks. The blocks have the default size of
+`block_size` and out-band threshold will be `out_band_size`. All allocations
+will be aligned to a boundary specified by `alignment`.
+*/
+dynamic_arena_init :: proc(
+ pool: ^Dynamic_Arena,
+ block_allocator := context.allocator,
+ array_allocator := context.allocator,
+ block_size := DYNAMIC_ARENA_BLOCK_SIZE_DEFAULT,
+ out_band_size := DYNAMIC_ARENA_OUT_OF_BAND_SIZE_DEFAULT,
+ alignment := DEFAULT_ALIGNMENT,
+) {
+ pool.block_size = block_size
+ pool.out_band_size = out_band_size
+ pool.alignment = alignment
pool.block_allocator = block_allocator
pool.out_band_allocations.allocator = array_allocator
- pool. unused_blocks.allocator = array_allocator
- pool. used_blocks.allocator = array_allocator
+ pool.unused_blocks.allocator = array_allocator
+ pool.used_blocks.allocator = array_allocator
}
-dynamic_pool_destroy :: proc(pool: ^Dynamic_Pool) {
- dynamic_pool_free_all(pool)
- delete(pool.unused_blocks)
- delete(pool.used_blocks)
- delete(pool.out_band_allocations)
+/*
+Dynamic arena allocator.
- zero(pool, size_of(pool^))
-}
+The dynamic arena allocator uses blocks of a specific size, allocated on-demand
+using the block allocator. This allocator acts similarly to arena. All
+allocations in a block happen contiguously, from start to end. If an allocation
+does not fit into the remaining space of the block, and its size is smaller
+than the specified out-band size, a new block is allocated using the
+`block_allocator` and the allocation is performed from a newly-allocated block.
+If an allocation has bigger size than the specified out-band size, a new block
+is allocated such that the allocation fits into this new block. This is referred
+to as an *out-band allocation*. The out-band blocks are kept separately from
+normal blocks.
+Just like arena, the dynamic arena does not support freeing of individual
+objects.
+*/
@(require_results)
-dynamic_pool_alloc :: proc(pool: ^Dynamic_Pool, bytes: int) -> (rawptr, Allocator_Error) {
- data, err := dynamic_pool_alloc_bytes(pool, bytes)
+dynamic_arena_allocator :: proc(a: ^Dynamic_Arena) -> Allocator {
+ return Allocator{
+ procedure = dynamic_arena_allocator_proc,
+ data = a,
+ }
+}
+
+/*
+Destroy a dynamic arena.
+
+This procedure frees all allocations, made on a dynamic arena, including the
+unused blocks, as well as the arrays for storing blocks.
+*/
+dynamic_arena_destroy :: proc(a: ^Dynamic_Arena) {
+ dynamic_arena_free_all(a)
+ delete(a.unused_blocks)
+ delete(a.used_blocks)
+ delete(a.out_band_allocations)
+ zero(a, size_of(a^))
+}
+
+@(private="file")
+_dynamic_arena_cycle_new_block :: proc(a: ^Dynamic_Arena, loc := #caller_location) -> (err: Allocator_Error) {
+ if a.block_allocator.procedure == nil {
+ panic("You must call arena_init on a Pool before using it", loc)
+ }
+ if a.current_block != nil {
+ append(&a.used_blocks, a.current_block, loc=loc)
+ }
+ new_block: rawptr
+ if len(a.unused_blocks) > 0 {
+ new_block = pop(&a.unused_blocks)
+ } else {
+ data: []byte
+ data, err = a.block_allocator.procedure(
+ a.block_allocator.data,
+ Allocator_Mode.Alloc,
+ a.block_size,
+ a.alignment,
+ nil,
+ 0,
+ )
+ new_block = raw_data(data)
+ }
+ a.bytes_left = a.block_size
+ a.current_pos = new_block
+ a.current_block = new_block
+ return
+}
+
+/*
+Allocate memory from a dynamic arena.
+
+This procedure allocates `size` bytes of memory aligned on a boundary specified
+by `alignment` from a dynamic arena `a`. The allocated memory is
+zero-initialized. This procedure returns a pointer to the newly allocated memory
+region.
+*/
+@(private, require_results)
+dynamic_arena_alloc :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> (rawptr, Allocator_Error) {
+ data, err := dynamic_arena_alloc_bytes(a, size, loc)
return raw_data(data), err
}
+/*
+Allocate memory from a dynamic arena.
+
+This procedure allocates `size` bytes of memory aligned on a boundary specified
+by `alignment` from a dynamic arena `a`. The allocated memory is
+zero-initialized. This procedure returns a slice of the newly allocated memory
+region.
+*/
@(require_results)
-dynamic_pool_alloc_bytes :: proc(p: ^Dynamic_Pool, bytes: int) -> ([]byte, Allocator_Error) {
- cycle_new_block :: proc(p: ^Dynamic_Pool) -> (err: Allocator_Error) {
- if p.block_allocator.procedure == nil {
- panic("You must call pool_init on a Pool before using it")
- }
+dynamic_arena_alloc_bytes :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
+ bytes, err := dynamic_arena_alloc_bytes_non_zeroed(a, size, loc)
+ if bytes != nil {
+ zero_slice(bytes)
+ }
+ return bytes, err
+}
- if p.current_block != nil {
- append(&p.used_blocks, p.current_block)
- }
+/*
+Allocate non-initialized memory from a dynamic arena.
- new_block: rawptr
- if len(p.unused_blocks) > 0 {
- new_block = pop(&p.unused_blocks)
- } else {
- data: []byte
- data, err = p.block_allocator.procedure(p.block_allocator.data, Allocator_Mode.Alloc,
- p.block_size, p.alignment,
- nil, 0)
- new_block = raw_data(data)
- }
+This procedure allocates `size` bytes of memory aligned on a boundary specified
+by `alignment` from a dynamic arena `a`. The allocated memory is not explicitly
+zero-initialized. This procedure returns a pointer to the newly allocated
+memory region.
+*/
+@(require_results)
+dynamic_arena_alloc_non_zeroed :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> (rawptr, Allocator_Error) {
+ data, err := dynamic_arena_alloc_bytes_non_zeroed(a, size, loc)
+ return raw_data(data), err
+}
- p.bytes_left = p.block_size
- p.current_pos = new_block
- p.current_block = new_block
- return
- }
+/*
+Allocate non-initialized memory from a dynamic arena.
- n := align_formula(bytes, p.alignment)
- if n > p.block_size {
+This procedure allocates `size` bytes of memory aligned on a boundary specified
+by `alignment` from a dynamic arena `a`. The allocated memory is not explicitly
+zero-initialized. This procedure returns a slice of the newly allocated
+memory region.
+*/
+@(require_results)
+dynamic_arena_alloc_bytes_non_zeroed :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
+ n := align_formula(size, a.alignment)
+ if n > a.block_size {
return nil, .Invalid_Argument
}
- if n >= p.out_band_size {
- assert(p.block_allocator.procedure != nil)
- memory, err := p.block_allocator.procedure(p.block_allocator.data, Allocator_Mode.Alloc,
- p.block_size, p.alignment,
- nil, 0)
+ if n >= a.out_band_size {
+ assert(a.block_allocator.procedure != nil, "Backing block allocator must be initialized", loc=loc)
+ memory, err := alloc_bytes_non_zeroed(a.block_size, a.alignment, a.block_allocator, loc)
if memory != nil {
- append(&p.out_band_allocations, raw_data(memory))
+ append(&a.out_band_allocations, raw_data(memory), loc = loc)
}
return memory, err
}
-
- if p.bytes_left < n {
- err := cycle_new_block(p)
+ if a.bytes_left < n {
+ err := _dynamic_arena_cycle_new_block(a, loc)
if err != nil {
return nil, err
}
- if p.current_block == nil {
+ if a.current_block == nil {
return nil, .Out_Of_Memory
}
}
-
- memory := p.current_pos
- p.current_pos = ([^]byte)(p.current_pos)[n:]
- p.bytes_left -= n
- return ([^]byte)(memory)[:bytes], nil
+ memory := a.current_pos
+ a.current_pos = ([^]byte)(a.current_pos)[n:]
+ a.bytes_left -= n
+ return ([^]byte)(memory)[:size], nil
}
+/*
+Reset the dynamic arena.
-dynamic_pool_reset :: proc(p: ^Dynamic_Pool) {
- if p.current_block != nil {
- append(&p.unused_blocks, p.current_block)
- p.current_block = nil
+This procedure frees all the allocations, owned by the dynamic arena, excluding
+the unused blocks.
+*/
+dynamic_arena_reset :: proc(a: ^Dynamic_Arena, loc := #caller_location) {
+ if a.current_block != nil {
+ append(&a.unused_blocks, a.current_block, loc=loc)
+ a.current_block = nil
}
-
- for block in p.used_blocks {
- append(&p.unused_blocks, block)
+ for block in a.used_blocks {
+ append(&a.unused_blocks, block, loc=loc)
}
- clear(&p.used_blocks)
+ clear(&a.used_blocks)
+ for allocation in a.out_band_allocations {
+ free(allocation, a.block_allocator, loc=loc)
+ }
+ clear(&a.out_band_allocations)
+ a.bytes_left = 0 // Make new allocations call `_dynamic_arena_cycle_new_block` again.
+}
+
+/*
+Free all memory from a dynamic arena.
- for a in p.out_band_allocations {
- free(a, p.block_allocator)
+This procedure frees all the allocations, owned by the dynamic arena, including
+the unused blocks.
+*/
+dynamic_arena_free_all :: proc(a: ^Dynamic_Arena, loc := #caller_location) {
+ dynamic_arena_reset(a)
+ for block in a.unused_blocks {
+ free(block, a.block_allocator, loc)
}
- clear(&p.out_band_allocations)
+ clear(&a.unused_blocks)
+}
+
+/*
+Resize an allocation.
+
+This procedure resizes a memory region, defined by its location, `old_memory`,
+and its size, `old_size` to have a size `size` and alignment `alignment`. The
+newly allocated memory, if any is zero-initialized.
- p.bytes_left = 0 // Make new allocations call `cycle_new_block` again.
+If `old_memory` is `nil`, this procedure acts just like `dynamic_arena_alloc()`,
+allocating a memory region `size` bytes in size, aligned on a boundary specified
+by `alignment`.
+
+If `size` is 0, this procedure acts just like `dynamic_arena_free()`, freeing
+the memory region located at an address specified by `old_memory`.
+
+This procedure returns the pointer to the resized memory region.
+*/
+@(require_results)
+dynamic_arena_resize :: proc(
+ a: ^Dynamic_Arena,
+ old_memory: rawptr,
+ old_size: int,
+ size: int,
+ loc := #caller_location,
+) -> (rawptr, Allocator_Error) {
+ bytes, err := dynamic_arena_resize_bytes(a, byte_slice(old_memory, old_size), size, loc)
+ return raw_data(bytes), err
}
-dynamic_pool_free_all :: proc(p: ^Dynamic_Pool) {
- dynamic_pool_reset(p)
+/*
+Resize an allocation.
+
+This procedure resizes a memory region, specified by `old_data`, to have a size
+`size` and alignment `alignment`. The newly allocated memory, if any is
+zero-initialized.
+
+If `old_memory` is `nil`, this procedure acts just like `dynamic_arena_alloc()`,
+allocating a memory region `size` bytes in size, aligned on a boundary specified
+by `alignment`.
- for block in p.unused_blocks {
- free(block, p.block_allocator)
+If `size` is 0, this procedure acts just like `dynamic_arena_free()`, freeing the
+memory region located at an address specified by `old_memory`.
+
+This procedure returns the slice of the resized memory region.
+*/
+@(require_results)
+dynamic_arena_resize_bytes :: proc(
+ a: ^Dynamic_Arena,
+ old_data: []byte,
+ size: int,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
+ bytes, err := dynamic_arena_resize_bytes_non_zeroed(a, old_data, size, loc)
+ if bytes != nil {
+ if old_data == nil {
+ zero_slice(bytes)
+ } else if size > len(old_data) {
+ zero_slice(bytes[len(old_data):])
+ }
}
- clear(&p.unused_blocks)
+ return bytes, err
}
+/*
+Resize an allocation without zero-initialization.
-panic_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
- size, alignment: int,
- old_memory: rawptr, old_size: int,loc := #caller_location) -> ([]byte, Allocator_Error) {
+This procedure resizes a memory region, defined by its location, `old_memory`,
+and its size, `old_size` to have a size `size` and alignment `alignment`. The
+newly allocated memory, if any is not explicitly zero-initialized.
+
+If `old_memory` is `nil`, this procedure acts just like `dynamic_arena_alloc()`,
+allocating a memory region `size` bytes in size, aligned on a boundary specified
+by `alignment`.
+
+If `size` is 0, this procedure acts just like `dynamic_arena_free()`, freeing the
+memory region located at an address specified by `old_memory`.
+
+This procedure returns the pointer to the resized memory region.
+*/
+@(require_results)
+dynamic_arena_resize_non_zeroed :: proc(
+ a: ^Dynamic_Arena,
+ old_memory: rawptr,
+ old_size: int,
+ size: int,
+ loc := #caller_location,
+) -> (rawptr, Allocator_Error) {
+ bytes, err := dynamic_arena_resize_bytes_non_zeroed(a, byte_slice(old_memory, old_size), size, loc)
+ return raw_data(bytes), err
+}
+
+/*
+Resize an allocation.
+This procedure resizes a memory region, specified by `old_data`, to have a size
+`size` and alignment `alignment`. The newly allocated memory, if any is not
+explicitly zero-initialized.
+
+If `old_memory` is `nil`, this procedure acts just like `dynamic_arena_alloc()`,
+allocating a memory region `size` bytes in size, aligned on a boundary specified
+by `alignment`.
+
+If `size` is 0, this procedure acts just like `dynamic_arena_free()`, freeing
+the memory region located at an address specified by `old_memory`.
+
+This procedure returns the slice of the resized memory region.
+*/
+@(require_results)
+dynamic_arena_resize_bytes_non_zeroed :: proc(
+ a: ^Dynamic_Arena,
+ old_data: []byte,
+ size: int,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
+ old_memory := raw_data(old_data)
+ old_size := len(old_data)
+ if old_size >= size {
+ return byte_slice(old_memory, size), nil
+ }
+ data, err := dynamic_arena_alloc_bytes_non_zeroed(a, size, loc)
+ if err == nil {
+ runtime.copy(data, byte_slice(old_memory, old_size))
+ }
+ return data, err
+}
+
+dynamic_arena_allocator_proc :: proc(
+ allocator_data: rawptr,
+ mode: Allocator_Mode,
+ size: int,
+ alignment: int,
+ old_memory: rawptr,
+ old_size: int,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
+ arena := (^Dynamic_Arena)(allocator_data)
switch mode {
case .Alloc:
- if size > 0 {
- panic("mem: panic allocator, .Alloc called", loc=loc)
- }
+ return dynamic_arena_alloc_bytes(arena, size, loc)
case .Alloc_Non_Zeroed:
- if size > 0 {
- panic("mem: panic allocator, .Alloc_Non_Zeroed called", loc=loc)
- }
- case .Resize:
- if size > 0 {
- panic("mem: panic allocator, .Resize called", loc=loc)
- }
- case .Resize_Non_Zeroed:
- if size > 0 {
- panic("mem: panic allocator, .Resize_Non_Zeroed called", loc=loc)
- }
+ return dynamic_arena_alloc_bytes_non_zeroed(arena, size, loc)
case .Free:
- if old_memory != nil {
- panic("mem: panic allocator, .Free called", loc=loc)
- }
+ return nil, .Mode_Not_Implemented
case .Free_All:
- panic("mem: panic allocator, .Free_All called", loc=loc)
-
+ dynamic_arena_free_all(arena, loc)
+ case .Resize:
+ return dynamic_arena_resize_bytes(arena, byte_slice(old_memory, old_size), size, loc)
+ case .Resize_Non_Zeroed:
+ return dynamic_arena_resize_bytes_non_zeroed(arena, byte_slice(old_memory, old_size), size, loc)
case .Query_Features:
set := (^Allocator_Mode_Set)(old_memory)
if set != nil {
- set^ = {.Query_Features}
+ set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features, .Query_Info}
}
return nil, nil
-
case .Query_Info:
- panic("mem: panic allocator, .Query_Info called", loc=loc)
+ info := (^Allocator_Query_Info)(old_memory)
+ if info != nil && info.pointer != nil {
+ info.size = arena.block_size
+ info.alignment = arena.alignment
+ return byte_slice(info, size_of(info^)), nil
+ }
+ return nil, nil
}
-
return nil, nil
}
-@(require_results)
-panic_allocator :: proc() -> Allocator {
- return Allocator{
- procedure = panic_allocator_proc,
- data = nil,
- }
-}
-
-
-
-
-
+/*
+Header of the buddy block.
+*/
Buddy_Block :: struct #align(align_of(uint)) {
size: uint,
is_free: bool,
}
+/*
+Obtain the next buddy block.
+*/
@(require_results)
buddy_block_next :: proc(block: ^Buddy_Block) -> ^Buddy_Block {
return (^Buddy_Block)(([^]byte)(block)[block.size:])
}
+/*
+Split the block into two, by truncating the given block to a given size.
+*/
@(require_results)
buddy_block_split :: proc(block: ^Buddy_Block, size: uint) -> ^Buddy_Block {
block := block
@@ -894,12 +1971,14 @@ buddy_block_split :: proc(block: ^Buddy_Block, size: uint) -> ^Buddy_Block {
return nil
}
+/*
+Coalesce contiguous blocks in a range of blocks into one.
+*/
buddy_block_coalescence :: proc(head, tail: ^Buddy_Block) {
for {
// Keep looping until there are no more buddies to coalesce
block := head
buddy := buddy_block_next(block)
-
no_coalescence := true
for block < tail && buddy < tail { // make sure the buddies are within the range
if block.is_free && buddy.is_free && block.size == buddy.size {
@@ -922,28 +2001,26 @@ buddy_block_coalescence :: proc(head, tail: ^Buddy_Block) {
}
}
}
-
if no_coalescence {
return
}
}
}
-
+/*
+Find the best block for storing a given size in a range of blocks.
+*/
@(require_results)
buddy_block_find_best :: proc(head, tail: ^Buddy_Block, size: uint) -> ^Buddy_Block {
assert(size != 0)
-
best_block: ^Buddy_Block
block := head // left
buddy := buddy_block_next(block) // right
-
// The entire memory section between head and tail is free,
// just call 'buddy_block_split' to get the allocation
if buddy == tail && block.is_free {
return buddy_block_split(block, size)
}
-
// Find the block which is the 'best_block' to requested allocation sized
for block < tail && buddy < tail { // make sure the buddies are within the range
// If both buddies are free, coalesce them together
@@ -954,7 +2031,6 @@ buddy_block_find_best :: proc(head, tail: ^Buddy_Block, size: uint) -> ^Buddy_Bl
if size <= block.size && (best_block == nil || block.size <= best_block.size) {
best_block = block
}
-
block = buddy_block_next(buddy)
if block < tail {
// Delay the buddy block for the next iteration
@@ -962,20 +2038,16 @@ buddy_block_find_best :: proc(head, tail: ^Buddy_Block, size: uint) -> ^Buddy_Bl
}
continue
}
-
-
if block.is_free && size <= block.size &&
(best_block == nil || block.size <= best_block.size) {
best_block = block
}
-
if buddy.is_free && size <= buddy.size &&
(best_block == nil || buddy.size < best_block.size) {
// If each buddy are the same size, then it makes more sense
// to pick the buddy as it "bounces around" less
best_block = buddy
}
-
if (block.size <= buddy.size) {
block = buddy_block_next(buddy)
if (block < tail) {
@@ -988,23 +2060,33 @@ buddy_block_find_best :: proc(head, tail: ^Buddy_Block, size: uint) -> ^Buddy_Bl
buddy = buddy_block_next(buddy)
}
}
-
if best_block != nil {
// This will handle the case if the 'best_block' is also the perfect fit
return buddy_block_split(best_block, size)
}
-
// Maybe out of memory
return nil
}
-
+/*
+The buddy allocator data.
+*/
Buddy_Allocator :: struct {
head: ^Buddy_Block,
tail: ^Buddy_Block,
alignment: uint,
}
+/*
+Buddy allocator.
+
+The buddy allocator is a type of allocator that splits the backing buffer into
+multiple regions called buddy blocks. Initially, the allocator only has one
+block with the size of the backing buffer. Upon each allocation, the allocator
+finds the smallest block that can fit the size of requested memory region, and
+splits the block according to the allocation size. If no block can be found,
+the contiguous free blocks are coalesced and the search is performed again.
+*/
@(require_results)
buddy_allocator :: proc(b: ^Buddy_Allocator) -> Allocator {
return Allocator{
@@ -1013,48 +2095,97 @@ buddy_allocator :: proc(b: ^Buddy_Allocator) -> Allocator {
}
}
-buddy_allocator_init :: proc(b: ^Buddy_Allocator, data: []byte, alignment: uint) {
- assert(data != nil)
- assert(is_power_of_two(uintptr(len(data))))
- assert(is_power_of_two(uintptr(alignment)))
+/*
+Initialize the buddy allocator.
+This procedure initializes the buddy allocator `b` with a backing buffer `data`
+and block alignment specified by `alignment`.
+*/
+buddy_allocator_init :: proc(b: ^Buddy_Allocator, data: []byte, alignment: uint, loc := #caller_location) {
+ assert(data != nil)
+ assert(is_power_of_two(uintptr(len(data))), "Size of the backing buffer must be power of two", loc)
+ assert(is_power_of_two(uintptr(alignment)), "Alignment must be a power of two", loc)
alignment := alignment
if alignment < size_of(Buddy_Block) {
alignment = size_of(Buddy_Block)
}
-
ptr := raw_data(data)
- assert(uintptr(ptr) % uintptr(alignment) == 0, "data is not aligned to minimum alignment")
-
+ assert(uintptr(ptr) % uintptr(alignment) == 0, "data is not aligned to minimum alignment", loc)
b.head = (^Buddy_Block)(ptr)
-
b.head.size = len(data)
b.head.is_free = true
-
b.tail = buddy_block_next(b.head)
-
b.alignment = alignment
}
+/*
+Get required block size to fit in the allocation as well as the alignment padding.
+*/
@(require_results)
buddy_block_size_required :: proc(b: ^Buddy_Allocator, size: uint) -> uint {
size := size
actual_size := b.alignment
size += size_of(Buddy_Block)
size = align_forward_uint(size, b.alignment)
-
for size > actual_size {
actual_size <<= 1
}
-
return actual_size
}
+/*
+Allocate memory from a buddy allocator.
+
+This procedure allocates `size` bytes of memory aligned on a boundary specified
+by `alignment`. The allocated memory region is zero-initialized. This procedure
+returns a pointer to the allocated memory region.
+*/
@(require_results)
-buddy_allocator_alloc :: proc(b: ^Buddy_Allocator, size: uint, zeroed: bool) -> ([]byte, Allocator_Error) {
+buddy_allocator_alloc :: proc(b: ^Buddy_Allocator, size: uint) -> (rawptr, Allocator_Error) {
+ bytes, err := buddy_allocator_alloc_bytes(b, size)
+ return raw_data(bytes), err
+}
+
+/*
+Allocate memory from a buddy allocator.
+
+This procedure allocates `size` bytes of memory aligned on a boundary specified
+by `alignment`. The allocated memory region is zero-initialized. This procedure
+returns a slice of the allocated memory region.
+*/
+@(require_results)
+buddy_allocator_alloc_bytes :: proc(b: ^Buddy_Allocator, size: uint) -> ([]byte, Allocator_Error) {
+ bytes, err := buddy_allocator_alloc_bytes_non_zeroed(b, size)
+ if bytes != nil {
+ zero_slice(bytes)
+ }
+ return bytes, err
+}
+
+/*
+Allocate non-initialized memory from a buddy allocator.
+
+This procedure allocates `size` bytes of memory aligned on a boundary specified
+by `alignment`. The allocated memory region is not explicitly zero-initialized.
+This procedure returns a pointer to the allocated memory region.
+*/
+@(require_results)
+buddy_allocator_alloc_non_zeroed :: proc(b: ^Buddy_Allocator, size: uint) -> (rawptr, Allocator_Error) {
+ bytes, err := buddy_allocator_alloc_bytes_non_zeroed(b, size)
+ return raw_data(bytes), err
+}
+
+/*
+Allocate non-initialized memory from a buddy allocator.
+
+This procedure allocates `size` bytes of memory aligned on a boundary specified
+by `alignment`. The allocated memory region is not explicitly zero-initialized.
+This procedure returns a slice of the allocated memory region.
+*/
+@(require_results)
+buddy_allocator_alloc_bytes_non_zeroed :: proc(b: ^Buddy_Allocator, size: uint) -> ([]byte, Allocator_Error) {
if size != 0 {
actual_size := buddy_block_size_required(b, size)
-
found := buddy_block_find_best(b.head, b.tail, actual_size)
if found != nil {
// Try to coalesce all the free buddy blocks and then search again
@@ -1065,60 +2196,71 @@ buddy_allocator_alloc :: proc(b: ^Buddy_Allocator, size: uint, zeroed: bool) ->
return nil, .Out_Of_Memory
}
found.is_free = false
-
data := ([^]byte)(found)[b.alignment:][:size]
- if zeroed {
- zero_slice(data)
- }
return data, nil
}
return nil, nil
}
+/*
+Free memory to the buddy allocator.
+
+This procedure frees the memory region allocated at pointer `ptr`.
+
+If `ptr` is not the latest allocation and is not a leaked allocation, this
+operation is a no-op.
+*/
buddy_allocator_free :: proc(b: ^Buddy_Allocator, ptr: rawptr) -> Allocator_Error {
if ptr != nil {
if !(b.head <= ptr && ptr <= b.tail) {
return .Invalid_Pointer
}
-
block := (^Buddy_Block)(([^]byte)(ptr)[-b.alignment:])
block.is_free = true
-
buddy_block_coalescence(b.head, b.tail)
}
return nil
}
-buddy_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
- size, alignment: int,
- old_memory: rawptr, old_size: int,loc := #caller_location) -> ([]byte, Allocator_Error) {
+/*
+Free all memory to the buddy allocator.
+*/
+buddy_allocator_free_all :: proc(b: ^Buddy_Allocator) {
+ alignment := b.alignment
+ head := ([^]byte)(b.head)
+ tail := ([^]byte)(b.tail)
+ data := head[:ptr_sub(tail, head)]
+ buddy_allocator_init(b, data, alignment)
+}
+buddy_allocator_proc :: proc(
+ allocator_data: rawptr,
+ mode: Allocator_Mode,
+ size, alignment: int,
+ old_memory: rawptr,
+ old_size: int,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
b := (^Buddy_Allocator)(allocator_data)
-
switch mode {
- case .Alloc, .Alloc_Non_Zeroed:
- return buddy_allocator_alloc(b, uint(size), mode == .Alloc)
+ case .Alloc:
+ return buddy_allocator_alloc_bytes(b, uint(size))
+ case .Alloc_Non_Zeroed:
+ return buddy_allocator_alloc_bytes_non_zeroed(b, uint(size))
case .Resize:
- return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, buddy_allocator(b))
+ return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, buddy_allocator(b), loc)
case .Resize_Non_Zeroed:
- return default_resize_bytes_align_non_zeroed(byte_slice(old_memory, old_size), size, alignment, buddy_allocator(b))
+ return default_resize_bytes_align_non_zeroed(byte_slice(old_memory, old_size), size, alignment, buddy_allocator(b), loc)
case .Free:
return nil, buddy_allocator_free(b, old_memory)
case .Free_All:
-
- alignment := b.alignment
- head := ([^]byte)(b.head)
- tail := ([^]byte)(b.tail)
- data := head[:ptr_sub(tail, head)]
- buddy_allocator_init(b, data, alignment)
-
+ buddy_allocator_free_all(b)
case .Query_Features:
set := (^Allocator_Mode_Set)(old_memory)
if set != nil {
set^ = {.Query_Features, .Alloc, .Alloc_Non_Zeroed, .Resize, .Resize_Non_Zeroed, .Free, .Free_All, .Query_Info}
}
return nil, nil
-
case .Query_Info:
info := (^Allocator_Query_Info)(old_memory)
if info != nil && info.pointer != nil {
@@ -1126,7 +2268,6 @@ buddy_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
if !(b.head <= ptr && ptr <= b.tail) {
return nil, .Invalid_Pointer
}
-
block := (^Buddy_Block)(([^]byte)(ptr)[-b.alignment:])
info.size = int(block.size)
info.alignment = int(b.alignment)
@@ -1134,6 +2275,83 @@ buddy_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
}
return nil, nil
}
-
return nil, nil
}
+
+// An allocator that keeps track of allocation sizes and passes it along to resizes.
+// This is useful if you are using a library that needs an equivalent of `realloc` but want to use
+// the Odin allocator interface.
+//
+// You want to wrap your allocator into this one if you are trying to use any allocator that relies
+// on the old size to work.
+//
+// The overhead of this allocator is an extra max(alignment, size_of(Header)) bytes allocated for each allocation, these bytes are
+// used to store the size and original pointer.
+Compat_Allocator :: struct {
+ parent: Allocator,
+}
+
+compat_allocator_init :: proc(rra: ^Compat_Allocator, allocator := context.allocator) {
+ rra.parent = allocator
+}
+
+compat_allocator :: proc(rra: ^Compat_Allocator) -> Allocator {
+ return Allocator{
+ data = rra,
+ procedure = compat_allocator_proc,
+ }
+}
+
+compat_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
+ size, alignment: int,
+ old_memory: rawptr, old_size: int,
+ location := #caller_location) -> (data: []byte, err: Allocator_Error) {
+ size, old_size := size, old_size
+
+ Header :: struct {
+ size: int,
+ ptr: rawptr,
+ }
+
+ rra := (^Compat_Allocator)(allocator_data)
+ switch mode {
+ case .Alloc, .Alloc_Non_Zeroed:
+ a := max(alignment, size_of(Header))
+ size += a
+ assert(size >= 0, "overflow")
+
+ allocation := rra.parent.procedure(rra.parent.data, mode, size, alignment, old_memory, old_size, location) or_return
+ #no_bounds_check data = allocation[a:]
+
+ ([^]Header)(raw_data(data))[-1] = {
+ size = size,
+ ptr = raw_data(allocation),
+ }
+ return
+
+ case .Free:
+ header := ([^]Header)(old_memory)[-1]
+ return rra.parent.procedure(rra.parent.data, mode, size, alignment, header.ptr, header.size, location)
+
+ case .Resize, .Resize_Non_Zeroed:
+ header := ([^]Header)(old_memory)[-1]
+
+ a := max(alignment, size_of(header))
+ size += a
+ assert(size >= 0, "overflow")
+
+ allocation := rra.parent.procedure(rra.parent.data, mode, size, alignment, header.ptr, header.size, location) or_return
+ #no_bounds_check data = allocation[a:]
+
+ ([^]Header)(raw_data(data))[-1] = {
+ size = size,
+ ptr = raw_data(allocation),
+ }
+ return
+
+ case .Free_All, .Query_Info, .Query_Features:
+ return rra.parent.procedure(rra.parent.data, mode, size, alignment, old_memory, old_size, location)
+
+ case: unreachable()
+ }
+}
diff --git a/core/mem/doc.odin b/core/mem/doc.odin
index 44c93f798..98755d797 100644
--- a/core/mem/doc.odin
+++ b/core/mem/doc.odin
@@ -1,34 +1,114 @@
/*
-package mem implements various types of allocators.
+The `mem` package implements various allocators and provides utility procedures
+for dealing with memory, pointers and slices.
+The documentation below describes basic concepts, applicable to the `mem`
+package.
-An example of how to use the `Tracking_Allocator` to track subsequent allocations
-in your program and report leaks and bad frees:
+## Pointers, multipointers, and slices
-Example:
- package foo
+A *pointer* is an abstraction of an *address*, a numberic value representing the
+location of an object in memory. That object is said to be *pointed to* by the
+pointer. To obtain the address of a pointer, cast it to `uintptr`.
- import "core:mem"
- import "core:fmt"
+A multipointer is a pointer that points to multiple objects. Unlike a pointer,
+a multipointer can be indexed, but does not have a definite length. A slice is
+a pointer that points to multiple objects equipped with the length, specifying
+the amount of objects a slice points to.
- _main :: proc() {
- // do stuff
- }
+When object's values are read through a pointer, that operation is called a
+*load* operation. When memory is read through a pointer, that operation is
+called a *store* operation. Both of these operations can be called a *memory
+access operation*.
- main :: proc() {
- track: mem.Tracking_Allocator
- mem.tracking_allocator_init(&track, context.allocator)
- defer mem.tracking_allocator_destroy(&track)
- context.allocator = mem.tracking_allocator(&track)
+## Allocators
- _main()
+In C and C++ memory models, allocations of objects in memory are typically
+treated individually with a generic allocator (The `malloc` procedure). Which in
+some scenarios can lead to poor cache utilization, slowdowns on individual
+objects' memory management and growing complexity of the code needing to keep
+track of the pointers and their lifetimes.
- for _, leak in track.allocation_map {
- fmt.printf("%v leaked %m\n", leak.location, leak.size)
- }
- for bad_free in track.bad_free_array {
- fmt.printf("%v allocation %p was freed badly\n", bad_free.location, bad_free.memory)
- }
- }
+Using different kinds of *allocators* for different purposes can solve these
+problems. The allocators are typically optimized for specific use-cases and
+can potentially simplify the memory management code.
+
+For example, in the context of making a game, having an Arena allocator could
+simplify allocations of any temporary memory, because the programmer doesn't
+have to keep track of which objects need to be freed every time they are
+allocated, because at the end of every frame the whole allocator is reset to
+its initial state and all objects are freed at once.
+
+The allocators have different kinds of restrictions on object lifetimes, sizes,
+alignment and can be a significant gain, if used properly. Odin supports
+allocators on a language level.
+
+Operations such as `new`, `free` and `delete` by default will use
+`context.allocator`, which can be overridden by the user. When an override
+happens all called procedures will inherit the new context and use the same
+allocator.
+
+We will define one concept to simplify the description of some allocator-related
+procedures, which is ownership. If the memory was allocated via a specific
+allocator, that allocator is said to be the *owner* of that memory region. To
+note, unlike Rust, in Odin the memory ownership model is not strict.
+
+## Alignment
+
+An address is said to be *aligned to `N` bytes*, if the addresses's numeric
+value is divisible by `N`. The number `N` in this case can be referred to as
+the *alignment boundary*. Typically an alignment is a power of two integer
+value.
+
+A *natural alignment* of an object is typically equal to its size. For example
+a 16 bit integer has a natural alignment of 2 bytes. When an object is not
+located on its natural alignment boundary, accesses to that object are
+considered *unaligned*.
+
+Some machines issue a hardware **exception**, or experience **slowdowns** when a
+memory access operation occurs from an unaligned address. Examples of such
+operations are:
+
+- SIMD instructions on x86. These instructions require all memory accesses to be
+ on an address that is aligned to 16 bytes.
+- On ARM unaligned loads have an extra cycle penalty.
+
+As such, many operations that allocate memory in this package allow to
+explicitly specify the alignment of allocated pointers/slices. The default
+alignment for all operations is specified in a constant `mem.DEFAULT_ALIGNMENT`.
+
+## Zero by default
+
+Whenever new memory is allocated, via an allocator, or on the stack, by default
+Odin will zero-initialize that memory, even if it wasn't explicitly
+initialized. This allows for some convenience in certain scenarios and ease of
+debugging, which will not be described in detail here.
+
+However zero-initialization can be a cause of slowdowns, when allocating large
+buffers. For this reason, allocators have `*_non_zeroed` modes of allocation
+that allow the user to request for uninitialized memory and will avoid a
+relatively expensive zero-filling of the buffer.
+
+## Naming conventions
+
+The word `size` is used to denote the **size in bytes**. The word `length` is
+used to denote the count of objects.
+
+The allocation procedures use the following conventions:
+
+- If the name contains `alloc_bytes` or `resize_bytes`, then the procedure takes
+ in slice parameters and returns slices.
+- If the procedure name contains `alloc` or `resize`, then the procedure takes
+ in a raw pointer and returns raw pointers.
+- If the procedure name contains `free_bytes`, then the procedure takes in a
+ slice.
+- If the procedure name contains `free`, then the procedure takes in a pointer.
+
+Higher-level allocation procedures follow the following naming scheme:
+
+- `new`: Allocates a single object
+- `free`: Free a single object (opposite of `new`)
+- `make`: Allocate a group of objects
+- `delete`: Free a group of objects (opposite of `make`)
*/
package mem
diff --git a/core/mem/mem.odin b/core/mem/mem.odin
index d423cc1eb..67ed56c39 100644
--- a/core/mem/mem.odin
+++ b/core/mem/mem.odin
@@ -3,49 +3,185 @@ package mem
import "base:runtime"
import "base:intrinsics"
-Byte :: runtime.Byte
+/*
+The size, in bytes, of a single byte.
+
+This constant is equal to the value of `1`.
+*/
+Byte :: runtime.Byte
+
+/*
+The size, in bytes, of one kilobyte.
+
+This constant is equal to the amount of bytes in one kilobyte (also known as
+kibibyte), which is equal to 1024 bytes.
+*/
Kilobyte :: runtime.Kilobyte
+
+/*
+The size, in bytes, of one megabyte.
+
+This constant is equal to the amount of bytes in one megabyte (also known as
+mebibyte), which is equal to 1024 kilobyte.
+*/
Megabyte :: runtime.Megabyte
+
+/*
+The size, in bytes, of one gigabyte.
+
+This constant is equal to the amount of bytes in one gigabyte (also known as
+gibiibyte), which is equal to 1024 megabytes.
+*/
Gigabyte :: runtime.Gigabyte
+
+/*
+The size, in bytes, of one terabyte.
+
+This constant is equal to the amount of bytes in one terabyte (also known as
+tebiibyte), which is equal to 1024 gigabytes.
+*/
Terabyte :: runtime.Terabyte
+
+/*
+The size, in bytes, of one petabyte.
+
+This constant is equal to the amount of bytes in one petabyte (also known as
+pebiibyte), which is equal to 1024 terabytes.
+*/
Petabyte :: runtime.Petabyte
-Exabyte :: runtime.Exabyte
+/*
+The size, in bytes, of one exabyte.
+
+This constant is equal to the amount of bytes in one exabyte (also known as
+exbibyte), which is equal to 1024 petabytes.
+*/
+Exabyte :: runtime.Exabyte
+
+/*
+Set each byte of a memory range to a specific value.
+
+This procedure copies value specified by the `value` parameter into each of the
+`len` bytes of a memory range, located at address `data`.
+
+This procedure returns the pointer to `data`.
+*/
set :: proc "contextless" (data: rawptr, value: byte, len: int) -> rawptr {
return runtime.memset(data, i32(value), len)
}
+
+/*
+Set each byte of a memory range to zero.
+
+This procedure copies the value `0` into the `len` bytes of a memory range,
+starting at address `data`.
+
+This procedure returns the pointer to `data`.
+*/
zero :: proc "contextless" (data: rawptr, len: int) -> rawptr {
intrinsics.mem_zero(data, len)
return data
}
+
+/*
+Set each byte of a memory range to zero.
+
+This procedure copies the value `0` into the `len` bytes of a memory range,
+starting at address `data`.
+
+This procedure returns the pointer to `data`.
+
+Unlike the `zero()` procedure, which can be optimized away or reordered by the
+compiler under certain circumstances, `zero_explicit()` procedure can not be
+optimized away or reordered with other memory access operations, and the
+compiler assumes volatile semantics of the memory.
+*/
zero_explicit :: proc "contextless" (data: rawptr, len: int) -> rawptr {
// This routine tries to avoid the compiler optimizing away the call,
- // so that it is always executed. It is intended to provided
+ // so that it is always executed. It is intended to provide
// equivalent semantics to those provided by the C11 Annex K 3.7.4.1
// memset_s call.
intrinsics.mem_zero_volatile(data, len) // Use the volatile mem_zero
intrinsics.atomic_thread_fence(.Seq_Cst) // Prevent reordering
return data
}
+
+/*
+Zero-fill the memory of an object.
+
+This procedure sets each byte of the object pointed to by the pointer `item`
+to zero, and returns the pointer to `item`.
+*/
zero_item :: proc "contextless" (item: $P/^$T) -> P {
intrinsics.mem_zero(item, size_of(T))
return item
}
+
+/*
+Zero-fill the memory of the slice.
+
+This procedure sets each byte of the slice pointed to by the slice `data`
+to zero, and returns the slice `data`.
+*/
zero_slice :: proc "contextless" (data: $T/[]$E) -> T {
zero(raw_data(data), size_of(E)*len(data))
return data
}
+/*
+Copy bytes from one memory range to another.
+This procedure copies `len` bytes of data, from the memory range pointed to by
+the `src` pointer into the memory range pointed to by the `dst` pointer, and
+returns the `dst` pointer.
+*/
copy :: proc "contextless" (dst, src: rawptr, len: int) -> rawptr {
intrinsics.mem_copy(dst, src, len)
return dst
}
+
+/*
+Copy bytes between two non-overlapping memory ranges.
+
+This procedure copies `len` bytes of data, from the memory range pointed to by
+the `src` pointer into the memory range pointed to by the `dst` pointer, and
+returns the `dst` pointer.
+
+This is a slightly more optimized version of the `copy` procedure that requires
+that memory ranges specified by the parameters to this procedure are not
+overlapping. If the memory ranges specified by `dst` and `src` pointers overlap,
+the behavior of this function may be unpredictable.
+*/
copy_non_overlapping :: proc "contextless" (dst, src: rawptr, len: int) -> rawptr {
intrinsics.mem_copy_non_overlapping(dst, src, len)
return dst
}
+/*
+Compare two memory ranges defined by slices.
+
+This procedure performs a byte-by-byte comparison between memory ranges
+specified by slices `a` and `b`, and returns a value, specifying their relative
+ordering.
+
+If the return value is:
+- Equal to `-1`, then `a` is "smaller" than `b`.
+- Equal to `+1`, then `a` is "bigger" than `b`.
+- Equal to `0`, then `a` and `b` are equal.
+
+The comparison is performed as follows:
+1. Each byte, upto `min(len(a), len(b))` bytes is compared between `a` and `b`.
+ - If the byte in slice `a` is smaller than a byte in slice `b`, then comparison
+ stops and this procedure returns `-1`.
+ - If the byte in slice `a` is bigger than a byte in slice `b`, then comparison
+ stops and this procedure returns `+1`.
+ - Otherwise the comparison continues until `min(len(a), len(b))` are compared.
+2. If all the bytes in the range are equal, then the lengths of the slices are
+ compared.
+ - If the length of slice `a` is smaller than the length of slice `b`, then `-1` is returned.
+ - If the length of slice `b` is smaller than the length of slice `b`, then `+1` is returned.
+ - Otherwise `0` is returned.
+*/
@(require_results)
compare :: proc "contextless" (a, b: []byte) -> int {
res := compare_byte_ptrs(raw_data(a), raw_data(b), min(len(a), len(b)))
@@ -57,16 +193,89 @@ compare :: proc "contextless" (a, b: []byte) -> int {
return res
}
+/*
+Compare two memory ranges defined by byte pointers.
+
+This procedure performs a byte-by-byte comparison between memory ranges of size
+`n` located at addresses `a` and `b`, and returns a value, specifying their relative
+ordering.
+
+If the return value is:
+- Equal to `-1`, then `a` is "smaller" than `b`.
+- Equal to `+1`, then `a` is "bigger" than `b`.
+- Equal to `0`, then `a` and `b` are equal.
+
+The comparison is performed as follows:
+1. Each byte, upto `n` bytes is compared between `a` and `b`.
+ - If the byte in `a` is smaller than a byte in `b`, then comparison stops
+ and this procedure returns `-1`.
+ - If the byte in `a` is bigger than a byte in `b`, then comparison stops
+ and this procedure returns `+1`.
+ - Otherwise the comparison continues until `n` bytes are compared.
+2. If all the bytes in the range are equal, this procedure returns `0`.
+*/
@(require_results)
compare_byte_ptrs :: proc "contextless" (a, b: ^byte, n: int) -> int #no_bounds_check {
return runtime.memory_compare(a, b, n)
}
+/*
+Compare two memory ranges defined by pointers.
+
+This procedure performs a byte-by-byte comparison between memory ranges of size
+`n` located at addresses `a` and `b`, and returns a value, specifying their relative
+ordering.
+
+If the return value is:
+- Equal to `-1`, then `a` is "smaller" than `b`.
+- Equal to `+1`, then `a` is "bigger" than `b`.
+- Equal to `0`, then `a` and `b` are equal.
+
+The comparison is performed as follows:
+1. Each byte, upto `n` bytes is compared between `a` and `b`.
+ - If the byte in `a` is smaller than a byte in `b`, then comparison stops
+ and this procedure returns `-1`.
+ - If the byte in `a` is bigger than a byte in `b`, then comparison stops
+ and this procedure returns `+1`.
+ - Otherwise the comparison continues until `n` bytes are compared.
+2. If all the bytes in the range are equal, this procedure returns `0`.
+*/
+@(require_results)
+compare_ptrs :: proc "contextless" (a, b: rawptr, n: int) -> int {
+ return compare_byte_ptrs((^byte)(a), (^byte)(b), n)
+}
+
+/*
+Check whether two objects are equal on binary level.
+
+This procedure checks whether the memory ranges occupied by objects `a` and
+`b` are equal. See `compare_byte_ptrs()` for how this comparison is done.
+*/
+@(require_results)
+simple_equal :: proc "contextless" (a, b: $T) -> bool where intrinsics.type_is_simple_compare(T) {
+ a, b := a, b
+ return compare_byte_ptrs((^byte)(&a), (^byte)(&b), size_of(T)) == 0
+}
+
+/*
+Check if the memory range defined by a slice is zero-filled.
+
+This procedure checks whether every byte, pointed to by the slice, specified
+by the parameter `data`, is zero. If all bytes of the slice are zero, this
+procedure returns `true`. Otherwise this procedure returns `false`.
+*/
@(require_results)
check_zero :: proc(data: []byte) -> bool {
return check_zero_ptr(raw_data(data), len(data))
}
+/*
+Check if the memory range defined defined by a pointer is zero-filled.
+
+This procedure checks whether each of the `len` bytes, starting at address
+`ptr` is zero. If all bytes of this range are zero, this procedure returns
+`true`. Otherwise this procedure returns `false`.
+*/
@(require_results)
check_zero_ptr :: proc(ptr: rawptr, len: int) -> bool {
switch {
@@ -81,57 +290,99 @@ check_zero_ptr :: proc(ptr: rawptr, len: int) -> bool {
case 4: return intrinsics.unaligned_load((^u32)(ptr)) == 0
case 8: return intrinsics.unaligned_load((^u64)(ptr)) == 0
}
-
start := uintptr(ptr)
start_aligned := align_forward_uintptr(start, align_of(uintptr))
end := start + uintptr(len)
end_aligned := align_backward_uintptr(end, align_of(uintptr))
-
for b in start..<start_aligned {
if (^byte)(b)^ != 0 {
return false
}
}
-
for b := start_aligned; b < end_aligned; b += size_of(uintptr) {
if (^uintptr)(b)^ != 0 {
return false
}
}
-
for b in end_aligned..<end {
if (^byte)(b)^ != 0 {
return false
}
}
-
return true
}
-@(require_results)
-simple_equal :: proc "contextless" (a, b: $T) -> bool where intrinsics.type_is_simple_compare(T) {
- a, b := a, b
- return compare_byte_ptrs((^byte)(&a), (^byte)(&b), size_of(T)) == 0
-}
+/*
+Offset a given pointer by a given amount.
-@(require_results)
-compare_ptrs :: proc "contextless" (a, b: rawptr, n: int) -> int {
- return compare_byte_ptrs((^byte)(a), (^byte)(b), n)
-}
+This procedure offsets the pointer `ptr` to an object of type `T`, by the amount
+of bytes specified by `offset*size_of(T)`, and returns the pointer `ptr`.
+**Note**: Prefer to use multipointer types, if possible.
+*/
ptr_offset :: intrinsics.ptr_offset
+
+/*
+Offset a given pointer by a given amount backwards.
+
+This procedure offsets the pointer `ptr` to an object of type `T`, by the amount
+of bytes specified by `offset*size_of(T)` in the negative direction, and
+returns the pointer `ptr`.
+*/
ptr_sub :: intrinsics.ptr_sub
+/*
+Construct a slice from pointer and length.
+
+This procedure creates a slice, that points to `len` amount of objects located
+at an address, specified by `ptr`.
+*/
@(require_results)
slice_ptr :: proc "contextless" (ptr: ^$T, len: int) -> []T {
return ([^]T)(ptr)[:len]
}
+/*
+Construct a byte slice from raw pointer and length.
+
+This procedure creates a byte slice, that points to `len` amount of bytes
+located at an address specified by `data`.
+*/
@(require_results)
byte_slice :: #force_inline proc "contextless" (data: rawptr, #any_int len: int) -> []byte {
return ([^]u8)(data)[:max(len, 0)]
}
+/*
+Create a byte slice from pointer and length.
+
+This procedure creates a byte slice, pointing to `len` objects, starting from
+the address specified by `ptr`.
+*/
+@(require_results)
+ptr_to_bytes :: proc "contextless" (ptr: ^$T, len := 1) -> []byte {
+ return transmute([]byte)Raw_Slice{ptr, len*size_of(T)}
+}
+
+/*
+Obtain the slice, pointing to the contents of `any`.
+
+This procedure returns the slice, pointing to the contents of the specified
+value of the `any` type.
+*/
+@(require_results)
+any_to_bytes :: proc "contextless" (val: any) -> []byte {
+ ti := type_info_of(val.id)
+ size := ti != nil ? ti.size : 0
+ return transmute([]byte)Raw_Slice{val.data, size}
+}
+
+/*
+Obtain a byte slice from any slice.
+
+This procedure returns a slice, that points to the same bytes as the slice,
+specified by `slice` and returns the resulting byte slice.
+*/
@(require_results)
slice_to_bytes :: proc "contextless" (slice: $E/[]$T) -> []byte {
s := transmute(Raw_Slice)slice
@@ -139,6 +390,15 @@ slice_to_bytes :: proc "contextless" (slice: $E/[]$T) -> []byte {
return transmute([]byte)s
}
+/*
+Transmute slice to a different type.
+
+This procedure performs an operation similar to transmute, returning a slice of
+type `T` that points to the same bytes as the slice specified by `slice`
+parameter. Unlike plain transmute operation, this procedure adjusts the length
+of the resulting slice, such that the resulting slice points to the correct
+amount of objects to cover the memory region pointed to by `slice`.
+*/
@(require_results)
slice_data_cast :: proc "contextless" ($T: typeid/[]$A, slice: $S/[]$B) -> T {
when size_of(A) == 0 || size_of(B) == 0 {
@@ -150,12 +410,25 @@ slice_data_cast :: proc "contextless" ($T: typeid/[]$A, slice: $S/[]$B) -> T {
}
}
+/*
+Obtain data and length of a slice.
+
+This procedure returns the pointer to the start of the memory region pointed to
+by slice `slice` and the length of the slice.
+*/
@(require_results)
slice_to_components :: proc "contextless" (slice: $E/[]$T) -> (data: ^T, len: int) {
s := transmute(Raw_Slice)slice
return (^T)(s.data), s.len
}
+/*
+Create a dynamic array from slice.
+
+This procedure creates a dynamic array, using slice `backing` as the backing
+buffer for the dynamic array. The resulting dynamic array can not grow beyond
+the size of the specified slice.
+*/
@(require_results)
buffer_from_slice :: proc "contextless" (backing: $T/[]$E) -> [dynamic]E {
return transmute([dynamic]E)Raw_Dynamic_Array{
@@ -169,19 +442,12 @@ buffer_from_slice :: proc "contextless" (backing: $T/[]$E) -> [dynamic]E {
}
}
-@(require_results)
-ptr_to_bytes :: proc "contextless" (ptr: ^$T, len := 1) -> []byte {
- return transmute([]byte)Raw_Slice{ptr, len*size_of(T)}
-}
-
-@(require_results)
-any_to_bytes :: proc "contextless" (val: any) -> []byte {
- ti := type_info_of(val.id)
- size := ti != nil ? ti.size : 0
- return transmute([]byte)Raw_Slice{val.data, size}
-}
-
+/*
+Check whether a number is a power of two.
+This procedure checks whether a given pointer-sized unsigned integer contains
+a power-of-two value.
+*/
@(require_results)
is_power_of_two :: proc "contextless" (x: uintptr) -> bool {
if x <= 0 {
@@ -190,66 +456,167 @@ is_power_of_two :: proc "contextless" (x: uintptr) -> bool {
return (x & (x-1)) == 0
}
-@(require_results)
-align_forward :: proc(ptr: rawptr, align: uintptr) -> rawptr {
- return rawptr(align_forward_uintptr(uintptr(ptr), align))
+/*
+Check if a pointer is aligned.
+
+This procedure checks whether a pointer `x` is aligned to a boundary specified
+by `align`, and returns `true` if the pointer is aligned, and false otherwise.
+*/
+is_aligned :: proc "contextless" (x: rawptr, align: int) -> bool {
+ p := uintptr(x)
+ return (p & (1<<uintptr(align) - 1)) == 0
}
+/*
+Align uintptr forward.
+
+This procedure returns the next address after `ptr`, that is located on the
+alignment boundary specified by `align`. If `ptr` is already aligned to `align`
+bytes, `ptr` is returned.
+
+The specified alignment must be a power of 2.
+*/
@(require_results)
align_forward_uintptr :: proc(ptr, align: uintptr) -> uintptr {
assert(is_power_of_two(align))
+ return (ptr + align-1) & ~(align-1)
+}
- p := ptr
- modulo := p & (align-1)
- if modulo != 0 {
- p += align - modulo
- }
- return p
+/*
+Align pointer forward.
+
+This procedure returns the next address after `ptr`, that is located on the
+alignment boundary specified by `align`. If `ptr` is already aligned to `align`
+bytes, `ptr` is returned.
+
+The specified alignment must be a power of 2.
+*/
+@(require_results)
+align_forward :: proc(ptr: rawptr, align: uintptr) -> rawptr {
+ return rawptr(align_forward_uintptr(uintptr(ptr), align))
}
+/*
+Align int forward.
+
+This procedure returns the next address after `ptr`, that is located on the
+alignment boundary specified by `align`. If `ptr` is already aligned to `align`
+bytes, `ptr` is returned.
+
+The specified alignment must be a power of 2.
+*/
@(require_results)
align_forward_int :: proc(ptr, align: int) -> int {
return int(align_forward_uintptr(uintptr(ptr), uintptr(align)))
}
+
+/*
+Align uint forward.
+
+This procedure returns the next address after `ptr`, that is located on the
+alignment boundary specified by `align`. If `ptr` is already aligned to `align`
+bytes, `ptr` is returned.
+
+The specified alignment must be a power of 2.
+*/
@(require_results)
align_forward_uint :: proc(ptr, align: uint) -> uint {
return uint(align_forward_uintptr(uintptr(ptr), uintptr(align)))
}
+/*
+Align uintptr backwards.
+
+This procedure returns the previous address before `ptr`, that is located on the
+alignment boundary specified by `align`. If `ptr` is already aligned to `align`
+bytes, `ptr` is returned.
+
+The specified alignment must be a power of 2.
+*/
@(require_results)
-align_backward :: proc(ptr: rawptr, align: uintptr) -> rawptr {
- return rawptr(align_backward_uintptr(uintptr(ptr), align))
+align_backward_uintptr :: proc(ptr, align: uintptr) -> uintptr {
+ assert(is_power_of_two(align))
+ return ptr & ~(align-1)
}
+/*
+Align rawptr backwards.
+
+This procedure returns the previous address before `ptr`, that is located on the
+alignment boundary specified by `align`. If `ptr` is already aligned to `align`
+bytes, `ptr` is returned.
+
+The specified alignment must be a power of 2.
+*/
@(require_results)
-align_backward_uintptr :: proc(ptr, align: uintptr) -> uintptr {
- return align_forward_uintptr(ptr - align + 1, align)
+align_backward :: proc(ptr: rawptr, align: uintptr) -> rawptr {
+ return rawptr(align_backward_uintptr(uintptr(ptr), align))
}
+/*
+Align int backwards.
+
+This procedure returns the previous address before `ptr`, that is located on the
+alignment boundary specified by `align`. If `ptr` is already aligned to `align`
+bytes, `ptr` is returned.
+
+The specified alignment must be a power of 2.
+*/
@(require_results)
align_backward_int :: proc(ptr, align: int) -> int {
return int(align_backward_uintptr(uintptr(ptr), uintptr(align)))
}
+
+/*
+Align uint backwards.
+
+This procedure returns the previous address before `ptr`, that is located on the
+alignment boundary specified by `align`. If `ptr` is already aligned to `align`
+bytes, `ptr` is returned.
+
+The specified alignment must be a power of 2.
+*/
@(require_results)
align_backward_uint :: proc(ptr, align: uint) -> uint {
return uint(align_backward_uintptr(uintptr(ptr), uintptr(align)))
}
+/*
+Create a context with a given allocator.
+
+This procedure returns a copy of the current context with the allocator replaced
+by the allocator `a`.
+*/
@(require_results)
context_from_allocator :: proc(a: Allocator) -> type_of(context) {
context.allocator = a
return context
}
+/*
+Copy the value from a pointer into a value.
+
+This procedure copies the object of type `T` pointed to by the pointer `ptr`
+into a new stack-allocated value and returns that value.
+*/
@(require_results)
reinterpret_copy :: proc "contextless" ($T: typeid, ptr: rawptr) -> (value: T) {
copy(&value, ptr, size_of(T))
return
}
+/*
+Dynamic array with a fixed capacity buffer.
+This type represents dynamic arrays with a fixed-size backing buffer. Upon
+allocating memory beyond reaching the maximum capacity, allocations from fixed
+byte buffers return `nil` and no error.
+*/
Fixed_Byte_Buffer :: distinct [dynamic]byte
+/*
+Create a fixed byte buffer from a slice.
+*/
@(require_results)
make_fixed_byte_buffer :: proc "contextless" (backing: []byte) -> Fixed_Byte_Buffer {
s := transmute(Raw_Slice)backing
@@ -264,40 +631,60 @@ make_fixed_byte_buffer :: proc "contextless" (backing: []byte) -> Fixed_Byte_Buf
return transmute(Fixed_Byte_Buffer)d
}
+/*
+General-purpose align formula.
-
+This procedure is equivalent to `align_forward`, but it does not require the
+alignment to be a power of two.
+*/
@(require_results)
align_formula :: proc "contextless" (size, align: int) -> int {
result := size + align-1
return result - result%align
}
+/*
+Calculate the padding for header preceding aligned data.
+
+This procedure returns the padding, following the specified pointer `ptr` that
+will be able to fit in a header of the size `header_size`, immediately
+preceding the memory region, aligned on a boundary specified by `align`. See
+the following diagram for a visual representation.
+
+ header size
+ |<------>|
+ +---+--------+------------- - - -
+ | HEADER | DATA...
+ +---+--------+------------- - - -
+ ^ ^
+ |<---------->|
+ | padding |
+ ptr aligned ptr
+
+The function takes in `ptr` and `header_size`, as well as the required
+alignment for `DATA`. The return value of the function is the padding between
+`ptr` and `aligned_ptr` that will be able to fit the header.
+*/
@(require_results)
calc_padding_with_header :: proc "contextless" (ptr: uintptr, align: uintptr, header_size: int) -> int {
p, a := ptr, align
modulo := p & (a-1)
-
padding := uintptr(0)
if modulo != 0 {
padding = a - modulo
}
-
needed_space := uintptr(header_size)
if padding < needed_space {
needed_space -= padding
-
if needed_space & (a-1) > 0 {
padding += align * (1+(needed_space/align))
} else {
padding += align * (needed_space/align)
}
}
-
return int(padding)
}
-
-
@(require_results, deprecated="prefer 'slice.clone'")
clone_slice :: proc(slice: $T/[]$E, allocator := context.allocator, loc := #caller_location) -> (new_slice: T) {
new_slice, _ = make(T, len(slice), allocator, loc)
diff --git a/core/mem/mutex_allocator.odin b/core/mem/mutex_allocator.odin
index 6ede219ac..b8062bca1 100644
--- a/core/mem/mutex_allocator.odin
+++ b/core/mem/mutex_allocator.odin
@@ -3,17 +3,31 @@ package mem
import "core:sync"
+/*
+The data for mutex allocator.
+*/
Mutex_Allocator :: struct {
backing: Allocator,
mutex: sync.Mutex,
}
+/*
+Initialize the mutex allocator.
+
+This procedure initializes the mutex allocator using `backin_allocator` as the
+allocator that will be used to pass all allocation requests through.
+*/
mutex_allocator_init :: proc(m: ^Mutex_Allocator, backing_allocator: Allocator) {
m.backing = backing_allocator
m.mutex = {}
}
+/*
+Mutex allocator.
+The mutex allocator is a wrapper for allocators that is used to serialize all
+allocator requests across multiple threads.
+*/
@(require_results)
mutex_allocator :: proc(m: ^Mutex_Allocator) -> Allocator {
return Allocator{
@@ -22,11 +36,16 @@ mutex_allocator :: proc(m: ^Mutex_Allocator) -> Allocator {
}
}
-mutex_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
- size, alignment: int,
- old_memory: rawptr, old_size: int, loc := #caller_location) -> (result: []byte, err: Allocator_Error) {
+mutex_allocator_proc :: proc(
+ allocator_data: rawptr,
+ mode: Allocator_Mode,
+ size: int,
+ alignment: int,
+ old_memory: rawptr,
+ old_size: int,
+ loc := #caller_location,
+) -> (result: []byte, err: Allocator_Error) {
m := (^Mutex_Allocator)(allocator_data)
-
sync.mutex_guard(&m.mutex)
return m.backing.procedure(m.backing.data, mode, size, alignment, old_memory, old_size, loc)
}
diff --git a/core/mem/raw.odin b/core/mem/raw.odin
index f56206957..41c91555e 100644
--- a/core/mem/raw.odin
+++ b/core/mem/raw.odin
@@ -3,26 +3,100 @@ package mem
import "base:builtin"
import "base:runtime"
-Raw_Any :: runtime.Raw_Any
-Raw_String :: runtime.Raw_String
-Raw_Cstring :: runtime.Raw_Cstring
-Raw_Slice :: runtime.Raw_Slice
+/*
+Memory layout of the `any` type.
+*/
+Raw_Any :: runtime.Raw_Any
+
+/*
+Memory layout of the `string` type.
+*/
+Raw_String :: runtime.Raw_String
+
+/*
+Memory layout of the `cstring` type.
+*/
+Raw_Cstring :: runtime.Raw_Cstring
+
+/*
+Memory layout of `[]T` types.
+*/
+Raw_Slice :: runtime.Raw_Slice
+
+/*
+Memory layout of `[dynamic]T` types.
+*/
Raw_Dynamic_Array :: runtime.Raw_Dynamic_Array
-Raw_Map :: runtime.Raw_Map
-Raw_Soa_Pointer :: runtime.Raw_Soa_Pointer
-Raw_Complex32 :: runtime.Raw_Complex32
-Raw_Complex64 :: runtime.Raw_Complex64
-Raw_Complex128 :: runtime.Raw_Complex128
-Raw_Quaternion64 :: runtime.Raw_Quaternion64
+/*
+Memory layout of `map[K]V` types.
+*/
+Raw_Map :: runtime.Raw_Map
+
+/*
+Memory layout of `#soa []T` types.
+*/
+Raw_Soa_Pointer :: runtime.Raw_Soa_Pointer
+
+/*
+Memory layout of the `complex32` type.
+*/
+Raw_Complex32 :: runtime.Raw_Complex32
+
+/*
+Memory layout of the `complex64` type.
+*/
+Raw_Complex64 :: runtime.Raw_Complex64
+
+/*
+Memory layout of the `complex128` type.
+*/
+Raw_Complex128 :: runtime.Raw_Complex128
+
+/*
+Memory layout of the `quaternion64` type.
+*/
+Raw_Quaternion64 :: runtime.Raw_Quaternion64
+
+/*
+Memory layout of the `quaternion128` type.
+*/
Raw_Quaternion128 :: runtime.Raw_Quaternion128
+
+/*
+Memory layout of the `quaternion256` type.
+*/
Raw_Quaternion256 :: runtime.Raw_Quaternion256
-Raw_Quaternion64_Vector_Scalar :: runtime.Raw_Quaternion64_Vector_Scalar
+
+/*
+Memory layout of the `quaternion64` type.
+*/
+Raw_Quaternion64_Vector_Scalar :: runtime.Raw_Quaternion64_Vector_Scalar
+
+/*
+Memory layout of the `quaternion128` type.
+*/
Raw_Quaternion128_Vector_Scalar :: runtime.Raw_Quaternion128_Vector_Scalar
+
+/*
+Memory layout of the `quaternion256` type.
+*/
Raw_Quaternion256_Vector_Scalar :: runtime.Raw_Quaternion256_Vector_Scalar
+/*
+Create a value of the any type.
+
+This procedure creates a value with type `any` that points to an object with
+typeid `id` located at an address specified by `data`.
+*/
make_any :: proc "contextless" (data: rawptr, id: typeid) -> any {
return transmute(any)Raw_Any{data, id}
}
+/*
+Obtain pointer to the data.
+
+This procedure returns the pointer to the data of a slice, string, or a dynamic
+array.
+*/
raw_data :: builtin.raw_data
diff --git a/core/mem/rollback_stack_allocator.odin b/core/mem/rollback_stack_allocator.odin
index f5e428d87..43ef10fe9 100644
--- a/core/mem/rollback_stack_allocator.odin
+++ b/core/mem/rollback_stack_allocator.odin
@@ -1,52 +1,36 @@
package mem
-// The Rollback Stack Allocator was designed for the test runner to be fast,
-// able to grow, and respect the Tracking Allocator's requirement for
-// individual frees. It is not overly concerned with fragmentation, however.
-//
-// It has support for expansion when configured with a block allocator and
-// limited support for out-of-order frees.
-//
-// Allocation has constant-time best and usual case performance.
-// At worst, it is linear according to the number of memory blocks.
-//
-// Allocation follows a first-fit strategy when there are multiple memory
-// blocks.
-//
-// Freeing has constant-time best and usual case performance.
-// At worst, it is linear according to the number of memory blocks and number
-// of freed items preceding the last item in a block.
-//
-// Resizing has constant-time performance, if it's the last item in a block, or
-// the new size is smaller. Naturally, this becomes linear-time if there are
-// multiple blocks to search for the pointer's owning block. Otherwise, the
-// allocator defaults to a combined alloc & free operation internally.
-//
-// Out-of-order freeing is accomplished by collapsing a run of freed items
-// from the last allocation backwards.
-//
-// Each allocation has an overhead of 8 bytes and any extra bytes to satisfy
-// the requested alignment.
-
import "base:runtime"
+/*
+Rollback stack default block size.
+*/
ROLLBACK_STACK_DEFAULT_BLOCK_SIZE :: 4 * Megabyte
-// This limitation is due to the size of `prev_ptr`, but it is only for the
-// head block; any allocation in excess of the allocator's `block_size` is
-// valid, so long as the block allocator can handle it.
-//
-// This is because allocations over the block size are not split up if the item
-// within is freed; they are immediately returned to the block allocator.
-ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE :: 2 * Gigabyte
+/*
+Rollback stack max head block size.
+This limitation is due to the size of `prev_ptr`, but it is only for the
+head block; any allocation in excess of the allocator's `block_size` is
+valid, so long as the block allocator can handle it.
+
+This is because allocations over the block size are not split up if the item
+within is freed; they are immediately returned to the block allocator.
+*/
+ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE :: 2 * Gigabyte
+/*
+Allocation header of the rollback stack allocator.
+*/
Rollback_Stack_Header :: bit_field u64 {
prev_offset: uintptr | 32,
is_free: bool | 1,
prev_ptr: uintptr | 31,
}
+/*
+Block header of the rollback stack allocator.
+*/
Rollback_Stack_Block :: struct {
next_block: ^Rollback_Stack_Block,
last_alloc: rawptr,
@@ -54,13 +38,15 @@ Rollback_Stack_Block :: struct {
buffer: []byte,
}
+/*
+Rollback stack allocator data.
+*/
Rollback_Stack :: struct {
head: ^Rollback_Stack_Block,
block_size: int,
block_allocator: Allocator,
}
-
@(private="file", require_results)
rb_ptr_in_bounds :: proc(block: ^Rollback_Stack_Block, ptr: rawptr) -> bool {
start := raw_data(block.buffer)
@@ -110,6 +96,9 @@ rb_rollback_block :: proc(block: ^Rollback_Stack_Block, header: ^Rollback_Stack_
}
}
+/*
+Free memory to a rollback stack allocator.
+*/
@(private="file", require_results)
rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error {
parent, block, header := rb_find_ptr(stack, ptr) or_return
@@ -128,6 +117,9 @@ rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error {
return nil
}
+/*
+Free all memory owned by the rollback stack allocator.
+*/
@(private="file")
rb_free_all :: proc(stack: ^Rollback_Stack) {
for block := stack.head.next_block; block != nil; /**/ {
@@ -141,45 +133,75 @@ rb_free_all :: proc(stack: ^Rollback_Stack) {
stack.head.offset = 0
}
-@(private="file", require_results)
-rb_resize :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment: int) -> (result: []byte, err: Allocator_Error) {
- if ptr != nil {
- if block, _, ok := rb_find_last_alloc(stack, ptr); ok {
- // `block.offset` should never underflow because it is contingent
- // on `old_size` in the first place, assuming sane arguments.
- assert(block.offset >= cast(uintptr)old_size, "Rollback Stack Allocator received invalid `old_size`.")
-
- if block.offset + cast(uintptr)size - cast(uintptr)old_size < cast(uintptr)len(block.buffer) {
- // Prevent singleton allocations from fragmenting by forbidding
- // them to shrink, removing the possibility of overflow bugs.
- if len(block.buffer) <= stack.block_size {
- block.offset += cast(uintptr)size - cast(uintptr)old_size
- }
- #no_bounds_check return (cast([^]byte)ptr)[:size], nil
- }
- }
+/*
+Allocate memory using the rollback stack allocator.
+*/
+@(require_results)
+rb_alloc :: proc(
+ stack: ^Rollback_Stack,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> (rawptr, Allocator_Error) {
+ bytes, err := rb_alloc_bytes_non_zeroed(stack, size, alignment, loc)
+ if bytes != nil {
+ zero_slice(bytes)
}
+ return raw_data(bytes), err
+}
- result = rb_alloc(stack, size, alignment) or_return
- runtime.mem_copy_non_overlapping(raw_data(result), ptr, old_size)
- err = rb_free(stack, ptr)
+/*
+Allocate memory using the rollback stack allocator.
+*/
+@(require_results)
+rb_alloc_bytes :: proc(
+ stack: ^Rollback_Stack,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> ([]byte, Allocator_Error) {
+ bytes, err := rb_alloc_bytes_non_zeroed(stack, size, alignment, loc)
+ if bytes != nil {
+ zero_slice(bytes)
+ }
+ return bytes, err
+}
- return
+/*
+Allocate non-initialized memory using the rollback stack allocator.
+*/
+@(require_results)
+rb_alloc_non_zeroed :: proc(
+ stack: ^Rollback_Stack,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> (rawptr, Allocator_Error) {
+ bytes, err := rb_alloc_bytes_non_zeroed(stack, size, alignment, loc)
+ return raw_data(bytes), err
}
-@(private="file", require_results)
-rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byte, err: Allocator_Error) {
+/*
+Allocate non-initialized memory using the rollback stack allocator.
+*/
+@(require_results)
+rb_alloc_bytes_non_zeroed :: proc(
+ stack: ^Rollback_Stack,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> (result: []byte, err: Allocator_Error) {
+ assert(size >= 0, "Size must be positive or zero.", loc)
+ assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", loc)
parent: ^Rollback_Stack_Block
for block := stack.head; /**/; block = block.next_block {
when !ODIN_DISABLE_ASSERT {
allocated_new_block: bool
}
-
if block == nil {
if stack.block_allocator.procedure == nil {
return nil, .Out_Of_Memory
}
-
minimum_size_required := size_of(Rollback_Stack_Header) + size + alignment - 1
new_block_size := max(minimum_size_required, stack.block_size)
block = rb_make_block(new_block_size, stack.block_allocator) or_return
@@ -188,10 +210,8 @@ rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byt
allocated_new_block = true
}
}
-
start := raw_data(block.buffer)[block.offset:]
padding := cast(uintptr)calc_padding_with_header(cast(uintptr)start, cast(uintptr)alignment, size_of(Rollback_Stack_Header))
-
if block.offset + padding + cast(uintptr)size > cast(uintptr)len(block.buffer) {
when !ODIN_DISABLE_ASSERT {
if allocated_new_block {
@@ -201,54 +221,150 @@ rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byt
parent = block
continue
}
-
header := cast(^Rollback_Stack_Header)(start[padding - size_of(Rollback_Stack_Header):])
ptr := start[padding:]
-
header^ = {
prev_offset = block.offset,
prev_ptr = uintptr(0) if block.last_alloc == nil else cast(uintptr)block.last_alloc - cast(uintptr)raw_data(block.buffer),
is_free = false,
}
-
block.last_alloc = ptr
block.offset += padding + cast(uintptr)size
-
if len(block.buffer) > stack.block_size {
// This block exceeds the allocator's standard block size and is considered a singleton.
// Prevent any further allocations on it.
block.offset = cast(uintptr)len(block.buffer)
}
-
#no_bounds_check return ptr[:size], nil
}
-
return nil, .Out_Of_Memory
}
+/*
+Resize an allocation owned by rollback stack allocator.
+*/
+@(require_results)
+rb_resize :: proc(
+ stack: ^Rollback_Stack,
+ old_ptr: rawptr,
+ old_size: int,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> (rawptr, Allocator_Error) {
+ bytes, err := rb_resize_bytes_non_zeroed(stack, byte_slice(old_ptr, old_size), size, alignment, loc)
+ if bytes != nil {
+ if old_ptr == nil {
+ zero_slice(bytes)
+ } else if size > old_size {
+ zero_slice(bytes[old_size:])
+ }
+ }
+ return raw_data(bytes), err
+}
+
+/*
+Resize an allocation owned by rollback stack allocator.
+*/
+@(require_results)
+rb_resize_bytes :: proc(
+ stack: ^Rollback_Stack,
+ old_memory: []byte,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> ([]u8, Allocator_Error) {
+ bytes, err := rb_resize_bytes_non_zeroed(stack, old_memory, size, alignment, loc)
+ if bytes != nil {
+ if old_memory == nil {
+ zero_slice(bytes)
+ } else if size > len(old_memory) {
+ zero_slice(bytes[len(old_memory):])
+ }
+ }
+ return bytes, err
+}
+
+/*
+Resize an allocation owned by rollback stack allocator without explicit
+zero-initialization.
+*/
+@(require_results)
+rb_resize_non_zeroed :: proc(
+ stack: ^Rollback_Stack,
+ old_ptr: rawptr,
+ old_size: int,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> (rawptr, Allocator_Error) {
+ bytes, err := rb_resize_bytes_non_zeroed(stack, byte_slice(old_ptr, old_size), size, alignment, loc)
+ return raw_data(bytes), err
+}
+
+/*
+Resize an allocation owned by rollback stack allocator without explicit
+zero-initialization.
+*/
+@(require_results)
+rb_resize_bytes_non_zeroed :: proc(
+ stack: ^Rollback_Stack,
+ old_memory: []byte,
+ size: int,
+ alignment := DEFAULT_ALIGNMENT,
+ loc := #caller_location,
+) -> (result: []byte, err: Allocator_Error) {
+ old_size := len(old_memory)
+ ptr := raw_data(old_memory)
+ assert(size >= 0, "Size must be positive or zero.", loc)
+ assert(old_size >= 0, "Old size must be positive or zero.", loc)
+ assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", loc)
+ if ptr != nil {
+ if block, _, ok := rb_find_last_alloc(stack, ptr); ok {
+ // `block.offset` should never underflow because it is contingent
+ // on `old_size` in the first place, assuming sane arguments.
+ assert(block.offset >= cast(uintptr)old_size, "Rollback Stack Allocator received invalid `old_size`.")
+ if block.offset + cast(uintptr)size - cast(uintptr)old_size < cast(uintptr)len(block.buffer) {
+ // Prevent singleton allocations from fragmenting by forbidding
+ // them to shrink, removing the possibility of overflow bugs.
+ if len(block.buffer) <= stack.block_size {
+ block.offset += cast(uintptr)size - cast(uintptr)old_size
+ }
+ #no_bounds_check return (ptr)[:size], nil
+ }
+ }
+ }
+ result = rb_alloc_bytes_non_zeroed(stack, size, alignment) or_return
+ runtime.mem_copy_non_overlapping(raw_data(result), ptr, old_size)
+ err = rb_free(stack, ptr)
+ return
+}
+
@(private="file", require_results)
rb_make_block :: proc(size: int, allocator: Allocator) -> (block: ^Rollback_Stack_Block, err: Allocator_Error) {
buffer := runtime.mem_alloc(size_of(Rollback_Stack_Block) + size, align_of(Rollback_Stack_Block), allocator) or_return
-
block = cast(^Rollback_Stack_Block)raw_data(buffer)
#no_bounds_check block.buffer = buffer[size_of(Rollback_Stack_Block):]
return
}
-
+/*
+Initialize the rollback stack allocator using a fixed backing buffer.
+*/
rollback_stack_init_buffered :: proc(stack: ^Rollback_Stack, buffer: []byte, location := #caller_location) {
MIN_SIZE :: size_of(Rollback_Stack_Block) + size_of(Rollback_Stack_Header) + size_of(rawptr)
assert(len(buffer) >= MIN_SIZE, "User-provided buffer to Rollback Stack Allocator is too small.", location)
-
block := cast(^Rollback_Stack_Block)raw_data(buffer)
block^ = {}
#no_bounds_check block.buffer = buffer[size_of(Rollback_Stack_Block):]
-
stack^ = {}
stack.head = block
stack.block_size = len(block.buffer)
}
+/*
+Initialize the rollback stack alocator using a backing block allocator.
+*/
rollback_stack_init_dynamic :: proc(
stack: ^Rollback_Stack,
block_size : int = ROLLBACK_STACK_DEFAULT_BLOCK_SIZE,
@@ -261,22 +377,25 @@ rollback_stack_init_dynamic :: proc(
// size is insufficient; check only on platforms with big enough ints.
assert(block_size <= ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE, "Rollback Stack Allocators cannot support head blocks larger than 2 gigabytes.", location)
}
-
block := rb_make_block(block_size, block_allocator) or_return
-
stack^ = {}
stack.head = block
stack.block_size = block_size
stack.block_allocator = block_allocator
-
return nil
}
+/*
+Initialize the rollback stack.
+*/
rollback_stack_init :: proc {
rollback_stack_init_buffered,
rollback_stack_init_dynamic,
}
+/*
+Destroy a rollback stack.
+*/
rollback_stack_destroy :: proc(stack: ^Rollback_Stack) {
if stack.block_allocator.procedure != nil {
rb_free_all(stack)
@@ -285,6 +404,37 @@ rollback_stack_destroy :: proc(stack: ^Rollback_Stack) {
stack^ = {}
}
+/*
+Rollback stack allocator.
+
+The Rollback Stack Allocator was designed for the test runner to be fast,
+able to grow, and respect the Tracking Allocator's requirement for
+individual frees. It is not overly concerned with fragmentation, however.
+
+It has support for expansion when configured with a block allocator and
+limited support for out-of-order frees.
+
+Allocation has constant-time best and usual case performance.
+At worst, it is linear according to the number of memory blocks.
+
+Allocation follows a first-fit strategy when there are multiple memory
+blocks.
+
+Freeing has constant-time best and usual case performance.
+At worst, it is linear according to the number of memory blocks and number
+of freed items preceding the last item in a block.
+
+Resizing has constant-time performance, if it's the last item in a block, or
+the new size is smaller. Naturally, this becomes linear-time if there are
+multiple blocks to search for the pointer's owning block. Otherwise, the
+allocator defaults to a combined alloc & free operation internally.
+
+Out-of-order freeing is accomplished by collapsing a run of freed items
+from the last allocation backwards.
+
+Each allocation has an overhead of 8 bytes and any extra bytes to satisfy
+the requested alignment.
+*/
@(require_results)
rollback_stack_allocator :: proc(stack: ^Rollback_Stack) -> Allocator {
return Allocator {
@@ -294,48 +444,37 @@ rollback_stack_allocator :: proc(stack: ^Rollback_Stack) -> Allocator {
}
@(require_results)
-rollback_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
- size, alignment: int,
- old_memory: rawptr, old_size: int, location := #caller_location,
+rollback_stack_allocator_proc :: proc(
+ allocator_data: rawptr,
+ mode: Allocator_Mode,
+ size, alignment: int,
+ old_memory: rawptr,
+ old_size: int,
+ loc := #caller_location,
) -> (result: []byte, err: Allocator_Error) {
stack := cast(^Rollback_Stack)allocator_data
-
switch mode {
- case .Alloc, .Alloc_Non_Zeroed:
- assert(size >= 0, "Size must be positive or zero.", location)
- assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", location)
- result = rb_alloc(stack, size, alignment) or_return
-
- if mode == .Alloc {
- zero_slice(result)
- }
-
+ case .Alloc:
+ return rb_alloc_bytes(stack, size, alignment, loc)
+ case .Alloc_Non_Zeroed:
+ return rb_alloc_bytes_non_zeroed(stack, size, alignment, loc)
case .Free:
- err = rb_free(stack, old_memory)
-
+ return nil, rb_free(stack, old_memory)
case .Free_All:
rb_free_all(stack)
-
- case .Resize, .Resize_Non_Zeroed:
- assert(size >= 0, "Size must be positive or zero.", location)
- assert(old_size >= 0, "Old size must be positive or zero.", location)
- assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", location)
- result = rb_resize(stack, old_memory, old_size, size, alignment) or_return
-
- #no_bounds_check if mode == .Resize && size > old_size {
- zero_slice(result[old_size:])
- }
-
+ return nil, nil
+ case .Resize:
+ return rb_resize_bytes(stack, byte_slice(old_memory, old_size), size, alignment, loc)
+ case .Resize_Non_Zeroed:
+ return rb_resize_bytes_non_zeroed(stack, byte_slice(old_memory, old_size), size, alignment, loc)
case .Query_Features:
set := (^Allocator_Mode_Set)(old_memory)
if set != nil {
set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed}
}
return nil, nil
-
case .Query_Info:
return nil, .Mode_Not_Implemented
}
-
return
}
diff --git a/core/mem/tracking_allocator.odin b/core/mem/tracking_allocator.odin
index 7a95888dc..5da38e62f 100644
--- a/core/mem/tracking_allocator.odin
+++ b/core/mem/tracking_allocator.odin
@@ -4,50 +4,85 @@ package mem
import "base:runtime"
import "core:sync"
+/*
+Allocation entry for the tracking allocator.
+
+This structure stores the data related to an allocation.
+*/
Tracking_Allocator_Entry :: struct {
- memory: rawptr,
- size: int,
+ // Pointer to an allocated region.
+ memory: rawptr,
+ // Size of the allocated memory region.
+ size: int,
+ // Requested alignment.
alignment: int,
- mode: Allocator_Mode,
- err: Allocator_Error,
+ // Mode of the operation.
+ mode: Allocator_Mode,
+ // Error.
+ err: Allocator_Error,
+ // Location of the allocation.
location: runtime.Source_Code_Location,
}
+
+/*
+Bad free entry for a tracking allocator.
+*/
Tracking_Allocator_Bad_Free_Entry :: struct {
- memory: rawptr,
+ // Pointer, on which free operation was called.
+ memory: rawptr,
+ // The source location of where the operation was called.
location: runtime.Source_Code_Location,
}
+
+/*
+Tracking allocator data.
+*/
Tracking_Allocator :: struct {
- backing: Allocator,
- allocation_map: map[rawptr]Tracking_Allocator_Entry,
- bad_free_array: [dynamic]Tracking_Allocator_Bad_Free_Entry,
- mutex: sync.Mutex,
+ backing: Allocator,
+ allocation_map: map[rawptr]Tracking_Allocator_Entry,
+ bad_free_array: [dynamic]Tracking_Allocator_Bad_Free_Entry,
+ mutex: sync.Mutex,
clear_on_free_all: bool,
-
- total_memory_allocated: i64,
- total_allocation_count: i64,
- total_memory_freed: i64,
- total_free_count: i64,
- peak_memory_allocated: i64,
+ total_memory_allocated: i64,
+ total_allocation_count: i64,
+ total_memory_freed: i64,
+ total_free_count: i64,
+ peak_memory_allocated: i64,
current_memory_allocated: i64,
}
+/*
+Initialize the tracking allocator.
+
+This procedure initializes the tracking allocator `t` with a backing allocator
+specified with `backing_allocator`. The `internals_allocator` will used to
+allocate the tracked data.
+*/
tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Allocator, internals_allocator := context.allocator) {
t.backing = backing_allocator
t.allocation_map.allocator = internals_allocator
t.bad_free_array.allocator = internals_allocator
-
if .Free_All in query_features(t.backing) {
t.clear_on_free_all = true
}
}
+/*
+Destroy the tracking allocator.
+*/
tracking_allocator_destroy :: proc(t: ^Tracking_Allocator) {
delete(t.allocation_map)
delete(t.bad_free_array)
}
+/*
+Clear the tracking allocator.
+
+This procedure clears the tracked data from a tracking allocator.
-// Clear only the current allocation data while keeping the totals intact.
+**Note**: This procedure clears only the current allocation data while keeping
+the totals intact.
+*/
tracking_allocator_clear :: proc(t: ^Tracking_Allocator) {
sync.mutex_lock(&t.mutex)
clear(&t.allocation_map)
@@ -56,7 +91,11 @@ tracking_allocator_clear :: proc(t: ^Tracking_Allocator) {
sync.mutex_unlock(&t.mutex)
}
-// Reset all of a Tracking Allocator's allocation data back to zero.
+/*
+Reset the tracking allocator.
+
+Reset all of a Tracking Allocator's allocation data back to zero.
+*/
tracking_allocator_reset :: proc(t: ^Tracking_Allocator) {
sync.mutex_lock(&t.mutex)
clear(&t.allocation_map)
@@ -70,6 +109,39 @@ tracking_allocator_reset :: proc(t: ^Tracking_Allocator) {
sync.mutex_unlock(&t.mutex)
}
+/*
+Tracking allocator.
+
+The tracking allocator is an allocator wrapper that tracks memory allocations.
+This allocator stores all the allocations in a map. Whenever a pointer that's
+not inside of the map is freed, the `bad_free_array` entry is added.
+
+An example of how to use the `Tracking_Allocator` to track subsequent allocations
+in your program and report leaks and bad frees:
+
+Example:
+
+ package foo
+
+ import "core:mem"
+ import "core:fmt"
+
+ main :: proc() {
+ track: mem.Tracking_Allocator
+ mem.tracking_allocator_init(&track, context.allocator)
+ defer mem.tracking_allocator_destroy(&track)
+ context.allocator = mem.tracking_allocator(&track)
+
+ do_stuff()
+
+ for _, leak in track.allocation_map {
+ fmt.printf("%v leaked %m\n", leak.location, leak.size)
+ }
+ for bad_free in track.bad_free_array {
+ fmt.printf("%v allocation %p was freed badly\n", bad_free.location, bad_free.memory)
+ }
+ }
+*/
@(require_results)
tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator {
return Allocator{
@@ -78,9 +150,14 @@ tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator {
}
}
-tracking_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
- size, alignment: int,
- old_memory: rawptr, old_size: int, loc := #caller_location) -> (result: []byte, err: Allocator_Error) {
+tracking_allocator_proc :: proc(
+ allocator_data: rawptr,
+ mode: Allocator_Mode,
+ size, alignment: int,
+ old_memory: rawptr,
+ old_size: int,
+ loc := #caller_location,
+) -> (result: []byte, err: Allocator_Error) {
track_alloc :: proc(data: ^Tracking_Allocator, entry: ^Tracking_Allocator_Entry) {
data.total_memory_allocated += i64(entry.size)
data.total_allocation_count += 1
diff --git a/core/odin/parser/parser.odin b/core/odin/parser/parser.odin
index 9dfca40b5..d42766bde 100644
--- a/core/odin/parser/parser.odin
+++ b/core/odin/parser/parser.odin
@@ -2302,6 +2302,16 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
bd.name = name.text
return bd
+ case "caller_expression":
+ bd := ast.new(ast.Basic_Directive, tok.pos, end_pos(name))
+ bd.tok = tok
+ bd.name = name.text
+
+ if peek_token_kind(p, .Open_Paren) {
+ return parse_call_expr(p, bd)
+ }
+ return bd
+
case "location", "exists", "load", "load_directory", "load_hash", "hash", "assert", "panic", "defined", "config":
bd := ast.new(ast.Basic_Directive, tok.pos, end_pos(name))
bd.tok = tok
diff --git a/core/os/os_freebsd.odin b/core/os/os_freebsd.odin
index c05a06129..241f42c0b 100644
--- a/core/os/os_freebsd.odin
+++ b/core/os/os_freebsd.odin
@@ -920,7 +920,7 @@ get_page_size :: proc() -> int {
_processor_core_count :: proc() -> int {
count : int = 0
count_size := size_of(count)
- if _sysctlbyname("hw.logicalcpu", &count, &count_size, nil, 0) == 0 {
+ if _sysctlbyname("hw.ncpu", &count, &count_size, nil, 0) == 0 {
if count > 0 {
return count
}
diff --git a/core/os/os_js.odin b/core/os/os_js.odin
index 5a544c3f7..8bf1d988b 100644
--- a/core/os/os_js.odin
+++ b/core/os/os_js.odin
@@ -3,33 +3,38 @@ package os
import "base:runtime"
+foreign import "odin_env"
+
@(require_results)
is_path_separator :: proc(c: byte) -> bool {
return c == '/' || c == '\\'
}
+Handle :: distinct u32
+
+stdout: Handle = 1
+stderr: Handle = 2
+
@(require_results)
open :: proc(path: string, mode: int = O_RDONLY, perm: int = 0) -> (Handle, Error) {
unimplemented("core:os procedure not supported on JS target")
}
close :: proc(fd: Handle) -> Error {
- unimplemented("core:os procedure not supported on JS target")
+ return nil
}
flush :: proc(fd: Handle) -> (err: Error) {
- unimplemented("core:os procedure not supported on JS target")
+ return nil
}
-
-
write :: proc(fd: Handle, data: []byte) -> (int, Error) {
- unimplemented("core:os procedure not supported on JS target")
-}
-
-@(private="file")
-read_console :: proc(handle: Handle, b: []byte) -> (n: int, err: Error) {
- unimplemented("core:os procedure not supported on JS target")
+ foreign odin_env {
+ @(link_name="write")
+ _write :: proc "contextless" (fd: Handle, p: []byte) ---
+ }
+ _write(fd, data)
+ return len(data), nil
}
read :: proc(fd: Handle, data: []byte) -> (int, Error) {
@@ -45,19 +50,6 @@ file_size :: proc(fd: Handle) -> (i64, Error) {
unimplemented("core:os procedure not supported on JS target")
}
-
-@(private)
-MAX_RW :: 1<<30
-
-@(private)
-pread :: proc(fd: Handle, data: []byte, offset: i64) -> (int, Error) {
- unimplemented("core:os procedure not supported on JS target")
-}
-@(private)
-pwrite :: proc(fd: Handle, data: []byte, offset: i64) -> (int, Error) {
- unimplemented("core:os procedure not supported on JS target")
-}
-
read_at :: proc(fd: Handle, data: []byte, offset: i64) -> (n: int, err: Error) {
unimplemented("core:os procedure not supported on JS target")
}
@@ -65,16 +57,6 @@ write_at :: proc(fd: Handle, data: []byte, offset: i64) -> (n: int, err: Error)
unimplemented("core:os procedure not supported on JS target")
}
-stdout: Handle = 1
-stderr: Handle = 2
-
-@(require_results)
-get_std_handle :: proc "contextless" (h: uint) -> Handle {
- context = runtime.default_context()
- unimplemented("core:os procedure not supported on JS target")
-}
-
-
@(require_results)
exists :: proc(path: string) -> bool {
unimplemented("core:os procedure not supported on JS target")
@@ -90,9 +72,6 @@ is_dir :: proc(path: string) -> bool {
unimplemented("core:os procedure not supported on JS target")
}
-// NOTE(tetra): GetCurrentDirectory is not thread safe with SetCurrentDirectory and GetFullPathName
-//@private cwd_lock := win32.SRWLOCK{} // zero is initialized
-
@(require_results)
get_current_directory :: proc(allocator := context.allocator) -> string {
unimplemented("core:os procedure not supported on JS target")
@@ -118,18 +97,6 @@ remove_directory :: proc(path: string) -> (err: Error) {
}
-
-@(private, require_results)
-is_abs :: proc(path: string) -> bool {
- unimplemented("core:os procedure not supported on JS target")
-}
-
-@(private, require_results)
-fix_long_path :: proc(path: string) -> string {
- unimplemented("core:os procedure not supported on JS target")
-}
-
-
link :: proc(old_name, new_name: string) -> (err: Error) {
unimplemented("core:os procedure not supported on JS target")
}
@@ -169,7 +136,6 @@ read_dir :: proc(fd: Handle, n: int, allocator := context.allocator) -> (fi: []F
unimplemented("core:os procedure not supported on JS target")
}
-Handle :: distinct uintptr
File_Time :: distinct u64
_Platform_Error :: enum i32 {
@@ -254,12 +220,7 @@ WSAECONNRESET :: Platform_Error.WSAECONNRESET
ERROR_FILE_IS_PIPE :: General_Error.File_Is_Pipe
ERROR_FILE_IS_NOT_DIR :: General_Error.Not_Dir
-// "Argv" arguments converted to Odin strings
-args := _alloc_command_line_arguments()
-
-
-
-
+args: []string
@(require_results)
last_write_time :: proc(fd: Handle) -> (File_Time, Error) {
@@ -279,26 +240,14 @@ get_page_size :: proc() -> int {
@(private, require_results)
_processor_core_count :: proc() -> int {
- unimplemented("core:os procedure not supported on JS target")
+ return 1
}
exit :: proc "contextless" (code: int) -> ! {
- context = runtime.default_context()
- unimplemented("core:os procedure not supported on JS target")
+ unimplemented_contextless("core:os procedure not supported on JS target")
}
-
-
@(require_results)
current_thread_id :: proc "contextless" () -> int {
- context = runtime.default_context()
- unimplemented("core:os procedure not supported on JS target")
+ return 0
}
-
-
-
-@(require_results)
-_alloc_command_line_arguments :: proc() -> []string {
- return nil
-}
-
diff --git a/core/os/os_netbsd.odin b/core/os/os_netbsd.odin
index a56c0b784..ba9b40fc3 100644
--- a/core/os/os_netbsd.odin
+++ b/core/os/os_netbsd.odin
@@ -978,7 +978,7 @@ get_page_size :: proc() -> int {
_processor_core_count :: proc() -> int {
count : int = 0
count_size := size_of(count)
- if _sysctlbyname("hw.logicalcpu", &count, &count_size, nil, 0) == 0 {
+ if _sysctlbyname("hw.ncpu", &count, &count_size, nil, 0) == 0 {
if count > 0 {
return count
}
diff --git a/core/strings/strings.odin b/core/strings/strings.odin
index b69c4a0e0..dbc84f8b7 100644
--- a/core/strings/strings.odin
+++ b/core/strings/strings.odin
@@ -93,7 +93,7 @@ Inputs:
Returns:
- res: A string created from the null-terminated byte pointer and length
*/
-string_from_null_terminated_ptr :: proc(ptr: [^]byte, len: int) -> (res: string) {
+string_from_null_terminated_ptr :: proc "contextless" (ptr: [^]byte, len: int) -> (res: string) {
s := string(ptr[:len])
s = truncate_to_byte(s, 0)
return s
@@ -139,7 +139,7 @@ NOTE: Failure to find the byte results in returning the entire string.
Returns:
- res: The truncated string
*/
-truncate_to_byte :: proc(str: string, b: byte) -> (res: string) {
+truncate_to_byte :: proc "contextless" (str: string, b: byte) -> (res: string) {
n := index_byte(str, b)
if n < 0 {
n = len(str)
@@ -261,7 +261,7 @@ Inputs:
Returns:
- result: `-1` if `lhs` comes first, `1` if `rhs` comes first, or `0` if they are equal
*/
-compare :: proc(lhs, rhs: string) -> (result: int) {
+compare :: proc "contextless" (lhs, rhs: string) -> (result: int) {
return mem.compare(transmute([]byte)lhs, transmute([]byte)rhs)
}
/*
@@ -1447,7 +1447,7 @@ Output:
-1
*/
-index_byte :: proc(s: string, c: byte) -> (res: int) {
+index_byte :: proc "contextless" (s: string, c: byte) -> (res: int) {
return #force_inline bytes.index_byte(transmute([]u8)s, c)
}
/*
@@ -1482,7 +1482,7 @@ Output:
-1
*/
-last_index_byte :: proc(s: string, c: byte) -> (res: int) {
+last_index_byte :: proc "contextless" (s: string, c: byte) -> (res: int) {
return #force_inline bytes.last_index_byte(transmute([]u8)s, c)
}
/*
@@ -1576,8 +1576,8 @@ Output:
-1
*/
-index :: proc(s, substr: string) -> (res: int) {
- hash_str_rabin_karp :: proc(s: string) -> (hash: u32 = 0, pow: u32 = 1) {
+index :: proc "contextless" (s, substr: string) -> (res: int) {
+ hash_str_rabin_karp :: proc "contextless" (s: string) -> (hash: u32 = 0, pow: u32 = 1) {
for i := 0; i < len(s); i += 1 {
hash = hash*PRIME_RABIN_KARP + u32(s[i])
}
diff --git a/core/sync/chan/chan.odin b/core/sync/chan/chan.odin
index 53a3bff4b..c470d15f3 100644
--- a/core/sync/chan/chan.odin
+++ b/core/sync/chan/chan.odin
@@ -22,19 +22,17 @@ Raw_Chan :: struct {
allocator: runtime.Allocator,
allocation_size: int,
msg_size: u16,
- closed: b16, // atomic
+ closed: b16, // guarded by `mutex`
mutex: sync.Mutex,
r_cond: sync.Cond,
w_cond: sync.Cond,
- r_waiting: int, // atomic
- w_waiting: int, // atomic
+ r_waiting: int, // guarded by `mutex`
+ w_waiting: int, // guarded by `mutex`
// Buffered
queue: ^Raw_Queue,
// Unbuffered
- r_mutex: sync.Mutex,
- w_mutex: sync.Mutex,
unbuffered_data: rawptr,
}
@@ -164,27 +162,30 @@ send_raw :: proc "contextless" (c: ^Raw_Chan, msg_in: rawptr) -> (ok: bool) {
}
if c.queue != nil { // buffered
sync.guard(&c.mutex)
- for c.queue.len == c.queue.cap {
- sync.atomic_add(&c.w_waiting, 1)
+ for !c.closed && c.queue.len == c.queue.cap {
+ c.w_waiting += 1
sync.wait(&c.w_cond, &c.mutex)
- sync.atomic_sub(&c.w_waiting, 1)
+ c.w_waiting -= 1
+ }
+
+ if c.closed {
+ return false
}
ok = raw_queue_push(c.queue, msg_in)
- if sync.atomic_load(&c.r_waiting) > 0 {
+ if c.r_waiting > 0 {
sync.signal(&c.r_cond)
}
} else if c.unbuffered_data != nil { // unbuffered
- sync.guard(&c.w_mutex)
sync.guard(&c.mutex)
- if sync.atomic_load(&c.closed) {
+ if c.closed {
return false
}
mem.copy(c.unbuffered_data, msg_in, int(c.msg_size))
- sync.atomic_add(&c.w_waiting, 1)
- if sync.atomic_load(&c.r_waiting) > 0 {
+ c.w_waiting += 1
+ if c.r_waiting > 0 {
sync.signal(&c.r_cond)
}
sync.wait(&c.w_cond, &c.mutex)
@@ -201,13 +202,13 @@ recv_raw :: proc "contextless" (c: ^Raw_Chan, msg_out: rawptr) -> (ok: bool) {
if c.queue != nil { // buffered
sync.guard(&c.mutex)
for c.queue.len == 0 {
- if sync.atomic_load(&c.closed) {
+ if c.closed {
return
}
- sync.atomic_add(&c.r_waiting, 1)
+ c.r_waiting += 1
sync.wait(&c.r_cond, &c.mutex)
- sync.atomic_sub(&c.r_waiting, 1)
+ c.r_waiting -= 1
}
msg := raw_queue_pop(c.queue)
@@ -215,27 +216,26 @@ recv_raw :: proc "contextless" (c: ^Raw_Chan, msg_out: rawptr) -> (ok: bool) {
mem.copy(msg_out, msg, int(c.msg_size))
}
- if sync.atomic_load(&c.w_waiting) > 0 {
+ if c.w_waiting > 0 {
sync.signal(&c.w_cond)
}
ok = true
} else if c.unbuffered_data != nil { // unbuffered
- sync.guard(&c.r_mutex)
sync.guard(&c.mutex)
- for !sync.atomic_load(&c.closed) &&
- sync.atomic_load(&c.w_waiting) == 0 {
- sync.atomic_add(&c.r_waiting, 1)
+ for !c.closed &&
+ c.w_waiting == 0 {
+ c.r_waiting += 1
sync.wait(&c.r_cond, &c.mutex)
- sync.atomic_sub(&c.r_waiting, 1)
+ c.r_waiting -= 1
}
- if sync.atomic_load(&c.closed) {
+ if c.closed {
return
}
mem.copy(msg_out, c.unbuffered_data, int(c.msg_size))
- sync.atomic_sub(&c.w_waiting, 1)
+ c.w_waiting -= 1
sync.signal(&c.w_cond)
ok = true
@@ -255,21 +255,24 @@ try_send_raw :: proc "contextless" (c: ^Raw_Chan, msg_in: rawptr) -> (ok: bool)
return false
}
+ if c.closed {
+ return false
+ }
+
ok = raw_queue_push(c.queue, msg_in)
- if sync.atomic_load(&c.r_waiting) > 0 {
+ if c.r_waiting > 0 {
sync.signal(&c.r_cond)
}
} else if c.unbuffered_data != nil { // unbuffered
- sync.guard(&c.w_mutex)
sync.guard(&c.mutex)
- if sync.atomic_load(&c.closed) {
+ if c.closed {
return false
}
mem.copy(c.unbuffered_data, msg_in, int(c.msg_size))
- sync.atomic_add(&c.w_waiting, 1)
- if sync.atomic_load(&c.r_waiting) > 0 {
+ c.w_waiting += 1
+ if c.r_waiting > 0 {
sync.signal(&c.r_cond)
}
sync.wait(&c.w_cond, &c.mutex)
@@ -294,21 +297,19 @@ try_recv_raw :: proc "contextless" (c: ^Raw_Chan, msg_out: rawptr) -> bool {
mem.copy(msg_out, msg, int(c.msg_size))
}
- if sync.atomic_load(&c.w_waiting) > 0 {
+ if c.w_waiting > 0 {
sync.signal(&c.w_cond)
}
return true
} else if c.unbuffered_data != nil { // unbuffered
- sync.guard(&c.r_mutex)
sync.guard(&c.mutex)
- if sync.atomic_load(&c.closed) ||
- sync.atomic_load(&c.w_waiting) == 0 {
+ if c.closed || c.w_waiting == 0 {
return false
}
mem.copy(msg_out, c.unbuffered_data, int(c.msg_size))
- sync.atomic_sub(&c.w_waiting, 1)
+ c.w_waiting -= 1
sync.signal(&c.w_cond)
return true
@@ -351,10 +352,10 @@ close :: proc "contextless" (c: ^Raw_Chan) -> bool {
return false
}
sync.guard(&c.mutex)
- if sync.atomic_load(&c.closed) {
+ if c.closed {
return false
}
- sync.atomic_store(&c.closed, true)
+ c.closed = true
sync.broadcast(&c.r_cond)
sync.broadcast(&c.w_cond)
return true
@@ -366,7 +367,7 @@ is_closed :: proc "contextless" (c: ^Raw_Chan) -> bool {
return true
}
sync.guard(&c.mutex)
- return bool(sync.atomic_load(&c.closed))
+ return bool(c.closed)
}
@@ -423,9 +424,9 @@ raw_queue_pop :: proc "contextless" (q: ^Raw_Queue) -> (data: rawptr) {
can_recv :: proc "contextless" (c: ^Raw_Chan) -> bool {
sync.guard(&c.mutex)
if is_buffered(c) {
- return len(c) > 0
+ return c.queue.len > 0
}
- return sync.atomic_load(&c.w_waiting) > 0
+ return c.w_waiting > 0
}
@@ -435,7 +436,7 @@ can_send :: proc "contextless" (c: ^Raw_Chan) -> bool {
if is_buffered(c) {
return c.queue.len < c.queue.cap
}
- return sync.atomic_load(&c.r_waiting) > 0
+ return c.w_waiting == 0
}
@@ -484,4 +485,4 @@ select_raw :: proc "odin" (recvs: []^Raw_Chan, sends: []^Raw_Chan, send_msgs: []
ok = send_raw(sends[sel.idx], send_msgs[sel.idx])
}
return
-} \ No newline at end of file
+}
diff --git a/core/sync/extended.odin b/core/sync/extended.odin
index b446fefa0..30b1b2770 100644
--- a/core/sync/extended.odin
+++ b/core/sync/extended.odin
@@ -8,7 +8,7 @@ _ :: vg
Wait group.
Wait group is a synchronization primitive used by the waiting thread to wait,
-until a all working threads finish work.
+until all working threads finish work.
The waiting thread first sets the number of working threads it will expect to
wait for using `wait_group_add` call, and start waiting using `wait_group_wait`
@@ -35,7 +35,7 @@ Wait_Group :: struct #no_copy {
/*
Increment an internal counter of a wait group.
-This procedure atomicaly increments a number to the specified wait group's
+This procedure atomically increments a number to the specified wait group's
internal counter by a specified amount. This operation can be done on any
thread.
*/
@@ -48,12 +48,12 @@ wait_group_add :: proc "contextless" (wg: ^Wait_Group, delta: int) {
atomic_add(&wg.counter, delta)
if wg.counter < 0 {
- _panic("sync.Wait_Group negative counter")
+ panic_contextless("sync.Wait_Group negative counter")
}
if wg.counter == 0 {
cond_broadcast(&wg.cond)
if wg.counter != 0 {
- _panic("sync.Wait_Group misuse: sync.wait_group_add called concurrently with sync.wait_group_wait")
+ panic_contextless("sync.Wait_Group misuse: sync.wait_group_add called concurrently with sync.wait_group_wait")
}
}
}
@@ -81,7 +81,7 @@ wait_group_wait :: proc "contextless" (wg: ^Wait_Group) {
if wg.counter != 0 {
cond_wait(&wg.cond, &wg.mutex)
if wg.counter != 0 {
- _panic("sync.Wait_Group misuse: sync.wait_group_add called concurrently with sync.wait_group_wait")
+ panic_contextless("sync.Wait_Group misuse: sync.wait_group_add called concurrently with sync.wait_group_wait")
}
}
}
@@ -105,7 +105,7 @@ wait_group_wait_with_timeout :: proc "contextless" (wg: ^Wait_Group, duration: t
return false
}
if wg.counter != 0 {
- _panic("sync.Wait_Group misuse: sync.wait_group_add called concurrently with sync.wait_group_wait")
+ panic_contextless("sync.Wait_Group misuse: sync.wait_group_add called concurrently with sync.wait_group_wait")
}
}
return true
@@ -121,7 +121,7 @@ When `barrier_wait` procedure is called by any thread, that thread will block
the execution, until all threads associated with the barrier reach the same
point of execution and also call `barrier_wait`.
-when barrier is initialized, a `thread_count` parameter is passed, signifying
+When a barrier is initialized, a `thread_count` parameter is passed, signifying
the amount of participant threads of the barrier. The barrier also keeps track
of an internal atomic counter. When a thread calls `barrier_wait`, the internal
counter is incremented. When the internal counter reaches `thread_count`, it is
@@ -208,7 +208,7 @@ Represents a thread synchronization primitive that, when signalled, releases one
single waiting thread and then resets automatically to a state where it can be
signalled again.
-When a thread calls `auto_reset_event_wait`, it's execution will be blocked,
+When a thread calls `auto_reset_event_wait`, its execution will be blocked,
until the event is signalled by another thread. The call to
`auto_reset_event_signal` wakes up exactly one thread waiting for the event.
*/
@@ -228,15 +228,15 @@ thread.
*/
auto_reset_event_signal :: proc "contextless" (e: ^Auto_Reset_Event) {
old_status := atomic_load_explicit(&e.status, .Relaxed)
+ new_status := old_status + 1 if old_status < 1 else 1
for {
- new_status := old_status + 1 if old_status < 1 else 1
if _, ok := atomic_compare_exchange_weak_explicit(&e.status, old_status, new_status, .Release, .Relaxed); ok {
break
}
-
- if old_status < 0 {
- sema_post(&e.sema)
- }
+ cpu_relax()
+ }
+ if old_status < 0 {
+ sema_post(&e.sema)
}
}
@@ -297,7 +297,7 @@ waiting to acquire the lock, exactly one of those threads is unblocked and
allowed into the critical section.
*/
ticket_mutex_unlock :: #force_inline proc "contextless" (m: ^Ticket_Mutex) {
- atomic_add_explicit(&m.serving, 1, .Relaxed)
+ atomic_add_explicit(&m.serving, 1, .Release)
}
/*
@@ -331,8 +331,8 @@ Benaphore.
A benaphore is a combination of an atomic variable and a semaphore that can
improve locking efficiency in a no-contention system. Acquiring a benaphore
-lock doesn't call into an internal semaphore, if no other thread in a middle of
-a critical section.
+lock doesn't call into an internal semaphore, if no other thread is in the
+middle of a critical section.
Once a lock on a benaphore is acquired by a thread, no other thread is allowed
into any critical sections, associted with the same benaphore, until the lock
@@ -355,7 +355,7 @@ from entering any critical sections associated with the same benaphore, until
until the lock is released.
*/
benaphore_lock :: proc "contextless" (b: ^Benaphore) {
- if atomic_add_explicit(&b.counter, 1, .Acquire) > 1 {
+ if atomic_add_explicit(&b.counter, 1, .Acquire) > 0 {
sema_wait(&b.sema)
}
}
@@ -381,10 +381,10 @@ Release a lock on a benaphore.
This procedure releases a lock on the specified benaphore. If any of the threads
are waiting on the lock, exactly one thread is allowed into a critical section
-associated with the same banaphore.
+associated with the same benaphore.
*/
benaphore_unlock :: proc "contextless" (b: ^Benaphore) {
- if atomic_sub_explicit(&b.counter, 1, .Release) > 0 {
+ if atomic_sub_explicit(&b.counter, 1, .Release) > 1 {
sema_post(&b.sema)
}
}
@@ -418,8 +418,8 @@ benaphore_guard :: proc "contextless" (m: ^Benaphore) -> bool {
/*
Recursive benaphore.
-Recurisve benaphore is just like a plain benaphore, except it allows reentrancy
-into the critical section.
+A recursive benaphore is just like a plain benaphore, except it allows
+reentrancy into the critical section.
When a lock is acquired on a benaphore, all other threads attempting to
acquire a lock on the same benaphore will be blocked from any critical sections,
@@ -449,13 +449,15 @@ recursive benaphore, until the lock is released.
*/
recursive_benaphore_lock :: proc "contextless" (b: ^Recursive_Benaphore) {
tid := current_thread_id()
- if atomic_add_explicit(&b.counter, 1, .Acquire) > 1 {
- if tid != b.owner {
- sema_wait(&b.sema)
+ check_owner: if tid != atomic_load_explicit(&b.owner, .Acquire) {
+ atomic_add_explicit(&b.counter, 1, .Relaxed)
+ if _, ok := atomic_compare_exchange_strong_explicit(&b.owner, 0, tid, .Release, .Relaxed); ok {
+ break check_owner
}
+ sema_wait(&b.sema)
+ atomic_store_explicit(&b.owner, tid, .Release)
}
// inside the lock
- b.owner = tid
b.recursion += 1
}
@@ -472,15 +474,14 @@ benaphore, until the lock is released.
*/
recursive_benaphore_try_lock :: proc "contextless" (b: ^Recursive_Benaphore) -> bool {
tid := current_thread_id()
- if b.owner == tid {
- atomic_add_explicit(&b.counter, 1, .Acquire)
- }
-
- if v, _ := atomic_compare_exchange_strong_explicit(&b.counter, 0, 1, .Acquire, .Acquire); v != 0 {
+ check_owner: if tid != atomic_load_explicit(&b.owner, .Acquire) {
+ if _, ok := atomic_compare_exchange_strong_explicit(&b.owner, 0, tid, .Release, .Relaxed); ok {
+ atomic_add_explicit(&b.counter, 1, .Relaxed)
+ break check_owner
+ }
return false
}
// inside the lock
- b.owner = tid
b.recursion += 1
return true
}
@@ -494,14 +495,14 @@ for other threads for entering.
*/
recursive_benaphore_unlock :: proc "contextless" (b: ^Recursive_Benaphore) {
tid := current_thread_id()
- _assert(tid == b.owner, "tid != b.owner")
+ assert_contextless(tid == atomic_load_explicit(&b.owner, .Relaxed), "tid != b.owner")
b.recursion -= 1
recursion := b.recursion
+
if recursion == 0 {
- b.owner = 0
- }
- if atomic_sub_explicit(&b.counter, 1, .Release) > 0 {
- if recursion == 0 {
+ if atomic_sub_explicit(&b.counter, 1, .Relaxed) == 1 {
+ atomic_store_explicit(&b.owner, 0, .Release)
+ } else {
sema_post(&b.sema)
}
}
@@ -740,4 +741,4 @@ Make event available.
one_shot_event_signal :: proc "contextless" (e: ^One_Shot_Event) {
atomic_store_explicit(&e.state, 1, .Release)
futex_broadcast(&e.state)
-} \ No newline at end of file
+}
diff --git a/core/sync/futex_darwin.odin b/core/sync/futex_darwin.odin
index 5b5f53c20..10ff7bfbb 100644
--- a/core/sync/futex_darwin.odin
+++ b/core/sync/futex_darwin.odin
@@ -12,6 +12,8 @@ foreign System {
// __ulock_wait is not available on 10.15
// See https://github.com/odin-lang/Odin/issues/1959
__ulock_wait :: proc "c" (operation: u32, addr: rawptr, value: u64, timeout_us: u32) -> c.int ---
+ // >= MacOS 11.
+ __ulock_wait2 :: proc "c" (operation: u32, addr: rawptr, value: u64, timeout_ns: u64, value2: u64) -> c.int ---
__ulock_wake :: proc "c" (operation: u32, addr: rawptr, wake_value: u64) -> c.int ---
}
@@ -48,22 +50,29 @@ _futex_wait_with_timeout :: proc "contextless" (f: ^Futex, expected: u32, durati
case -ETIMEDOUT:
return false
case:
- _panic("darwin.os_sync_wait_on_address_with_timeout failure")
+ panic_contextless("darwin.os_sync_wait_on_address_with_timeout failure")
}
} else {
- timeout_ns := u32(duration)
- s := __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, f, u64(expected), timeout_ns)
+ when darwin.ULOCK_WAIT_2_AVAILABLE {
+ timeout_ns := u64(duration)
+ s := __ulock_wait2(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, f, u64(expected), timeout_ns, 0)
+ } else {
+ timeout_us := u32(duration / time.Microsecond)
+ s := __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, f, u64(expected), timeout_us)
+ }
+
if s >= 0 {
return true
}
+
switch s {
case EINTR, EFAULT:
return true
case ETIMEDOUT:
return false
case:
- _panic("futex_wait failure")
+ panic_contextless("futex_wait failure")
}
return true
@@ -83,7 +92,7 @@ _futex_signal :: proc "contextless" (f: ^Futex) {
case -ENOENT:
return
case:
- _panic("darwin.os_sync_wake_by_address_any failure")
+ panic_contextless("darwin.os_sync_wake_by_address_any failure")
}
}
} else {
@@ -99,7 +108,7 @@ _futex_signal :: proc "contextless" (f: ^Futex) {
case ENOENT:
return
case:
- _panic("futex_wake_single failure")
+ panic_contextless("futex_wake_single failure")
}
}
@@ -119,7 +128,7 @@ _futex_broadcast :: proc "contextless" (f: ^Futex) {
case -ENOENT:
return
case:
- _panic("darwin.os_sync_wake_by_address_all failure")
+ panic_contextless("darwin.os_sync_wake_by_address_all failure")
}
}
} else {
@@ -135,7 +144,7 @@ _futex_broadcast :: proc "contextless" (f: ^Futex) {
case ENOENT:
return
case:
- _panic("futex_wake_all failure")
+ panic_contextless("futex_wake_all failure")
}
}
diff --git a/core/sync/futex_freebsd.odin b/core/sync/futex_freebsd.odin
index 4fbb33ee0..e3f95b146 100644
--- a/core/sync/futex_freebsd.odin
+++ b/core/sync/futex_freebsd.odin
@@ -21,7 +21,7 @@ _futex_wait :: proc "contextless" (f: ^Futex, expected: u32) -> bool {
continue
}
- _panic("_futex_wait failure")
+ panic_contextless("_futex_wait failure")
}
unreachable()
@@ -44,14 +44,14 @@ _futex_wait_with_timeout :: proc "contextless" (f: ^Futex, expected: u32, durati
return false
}
- _panic("_futex_wait_with_timeout failure")
+ panic_contextless("_futex_wait_with_timeout failure")
}
_futex_signal :: proc "contextless" (f: ^Futex) {
errno := freebsd._umtx_op(f, .WAKE, 1, nil, nil)
if errno != nil {
- _panic("_futex_signal failure")
+ panic_contextless("_futex_signal failure")
}
}
@@ -59,6 +59,6 @@ _futex_broadcast :: proc "contextless" (f: ^Futex) {
errno := freebsd._umtx_op(f, .WAKE, cast(c.ulong)max(i32), nil, nil)
if errno != nil {
- _panic("_futex_broadcast failure")
+ panic_contextless("_futex_broadcast failure")
}
}
diff --git a/core/sync/futex_linux.odin b/core/sync/futex_linux.odin
index 846a82486..52143880b 100644
--- a/core/sync/futex_linux.odin
+++ b/core/sync/futex_linux.odin
@@ -15,7 +15,7 @@ _futex_wait :: proc "contextless" (futex: ^Futex, expected: u32) -> bool {
return true
case:
// TODO(flysand): More descriptive panic messages based on the vlaue of `errno`
- _panic("futex_wait failure")
+ panic_contextless("futex_wait failure")
}
}
@@ -34,7 +34,7 @@ _futex_wait_with_timeout :: proc "contextless" (futex: ^Futex, expected: u32, du
case .NONE, .EINTR, .EAGAIN:
return true
case:
- _panic("futex_wait_with_timeout failure")
+ panic_contextless("futex_wait_with_timeout failure")
}
}
@@ -44,7 +44,7 @@ _futex_signal :: proc "contextless" (futex: ^Futex) {
case .NONE:
return
case:
- _panic("futex_wake_single failure")
+ panic_contextless("futex_wake_single failure")
}
}
@@ -57,6 +57,6 @@ _futex_broadcast :: proc "contextless" (futex: ^Futex) {
case .NONE:
return
case:
- _panic("_futex_wake_all failure")
+ panic_contextless("_futex_wake_all failure")
}
}
diff --git a/core/sync/futex_netbsd.odin b/core/sync/futex_netbsd.odin
index b98346434..e49b25b02 100644
--- a/core/sync/futex_netbsd.odin
+++ b/core/sync/futex_netbsd.odin
@@ -35,7 +35,7 @@ _futex_wait :: proc "contextless" (futex: ^Futex, expected: u32) -> bool {
case EINTR, EAGAIN:
return true
case:
- _panic("futex_wait failure")
+ panic_contextless("futex_wait failure")
}
}
return true
@@ -55,7 +55,7 @@ _futex_wait_with_timeout :: proc "contextless" (futex: ^Futex, expected: u32, du
case ETIMEDOUT:
return false
case:
- _panic("futex_wait_with_timeout failure")
+ panic_contextless("futex_wait_with_timeout failure")
}
}
return true
@@ -63,12 +63,12 @@ _futex_wait_with_timeout :: proc "contextless" (futex: ^Futex, expected: u32, du
_futex_signal :: proc "contextless" (futex: ^Futex) {
if _, ok := intrinsics.syscall_bsd(unix.SYS___futex, uintptr(futex), FUTEX_WAKE_PRIVATE, 1, 0, 0, 0); !ok {
- _panic("futex_wake_single failure")
+ panic_contextless("futex_wake_single failure")
}
}
_futex_broadcast :: proc "contextless" (futex: ^Futex) {
if _, ok := intrinsics.syscall_bsd(unix.SYS___futex, uintptr(futex), FUTEX_WAKE_PRIVATE, uintptr(max(i32)), 0, 0, 0); !ok {
- _panic("_futex_wake_all failure")
+ panic_contextless("_futex_wake_all failure")
}
}
diff --git a/core/sync/futex_openbsd.odin b/core/sync/futex_openbsd.odin
index 83055b950..7d3cc8578 100644
--- a/core/sync/futex_openbsd.odin
+++ b/core/sync/futex_openbsd.odin
@@ -36,7 +36,7 @@ _futex_wait :: proc "contextless" (f: ^Futex, expected: u32) -> bool {
return false
}
- _panic("futex_wait failure")
+ panic_contextless("futex_wait failure")
}
_futex_wait_with_timeout :: proc "contextless" (f: ^Futex, expected: u32, duration: time.Duration) -> bool {
@@ -62,14 +62,14 @@ _futex_wait_with_timeout :: proc "contextless" (f: ^Futex, expected: u32, durati
return false
}
- _panic("futex_wait_with_timeout failure")
+ panic_contextless("futex_wait_with_timeout failure")
}
_futex_signal :: proc "contextless" (f: ^Futex) {
res := _unix_futex(f, FUTEX_WAKE_PRIVATE, 1, nil)
if res == -1 {
- _panic("futex_wake_single failure")
+ panic_contextless("futex_wake_single failure")
}
}
@@ -77,6 +77,6 @@ _futex_broadcast :: proc "contextless" (f: ^Futex) {
res := _unix_futex(f, FUTEX_WAKE_PRIVATE, u32(max(i32)), nil)
if res == -1 {
- _panic("_futex_wake_all failure")
+ panic_contextless("_futex_wake_all failure")
}
}
diff --git a/core/sync/futex_wasm.odin b/core/sync/futex_wasm.odin
index 013c425e8..0f9659a02 100644
--- a/core/sync/futex_wasm.odin
+++ b/core/sync/futex_wasm.odin
@@ -10,7 +10,7 @@ import "core:time"
_futex_wait :: proc "contextless" (f: ^Futex, expected: u32) -> bool {
when !intrinsics.has_target_feature("atomics") {
- _panic("usage of `core:sync` requires the `-target-feature:\"atomics\"` or a `-microarch` that supports it")
+ panic_contextless("usage of `core:sync` requires the `-target-feature:\"atomics\"` or a `-microarch` that supports it")
} else {
s := intrinsics.wasm_memory_atomic_wait32((^u32)(f), expected, -1)
return s != 0
@@ -19,7 +19,7 @@ _futex_wait :: proc "contextless" (f: ^Futex, expected: u32) -> bool {
_futex_wait_with_timeout :: proc "contextless" (f: ^Futex, expected: u32, duration: time.Duration) -> bool {
when !intrinsics.has_target_feature("atomics") {
- _panic("usage of `core:sync` requires the `-target-feature:\"atomics\"` or a `-microarch` that supports it")
+ panic_contextless("usage of `core:sync` requires the `-target-feature:\"atomics\"` or a `-microarch` that supports it")
} else {
s := intrinsics.wasm_memory_atomic_wait32((^u32)(f), expected, i64(duration))
return s != 0
@@ -28,7 +28,7 @@ _futex_wait_with_timeout :: proc "contextless" (f: ^Futex, expected: u32, durati
_futex_signal :: proc "contextless" (f: ^Futex) {
when !intrinsics.has_target_feature("atomics") {
- _panic("usage of `core:sync` requires the `-target-feature:\"atomics\"` or a `-microarch` that supports it")
+ panic_contextless("usage of `core:sync` requires the `-target-feature:\"atomics\"` or a `-microarch` that supports it")
} else {
loop: for {
s := intrinsics.wasm_memory_atomic_notify32((^u32)(f), 1)
@@ -41,7 +41,7 @@ _futex_signal :: proc "contextless" (f: ^Futex) {
_futex_broadcast :: proc "contextless" (f: ^Futex) {
when !intrinsics.has_target_feature("atomics") {
- _panic("usage of `core:sync` requires the `-target-feature:\"atomics\"` or a `-microarch` that supports it")
+ panic_contextless("usage of `core:sync` requires the `-target-feature:\"atomics\"` or a `-microarch` that supports it")
} else {
loop: for {
s := intrinsics.wasm_memory_atomic_notify32((^u32)(f), ~u32(0))
diff --git a/core/sync/primitives.odin b/core/sync/primitives.odin
index a22824481..f091de045 100644
--- a/core/sync/primitives.odin
+++ b/core/sync/primitives.odin
@@ -1,6 +1,5 @@
package sync
-import "base:runtime"
import "core:time"
/*
@@ -390,7 +389,7 @@ recursive_mutex_guard :: proc "contextless" (m: ^Recursive_Mutex) -> bool {
A condition variable.
`Cond` implements a condition variable, a rendezvous point for threads waiting
-for signalling the occurence of an event. Condition variables are used on
+for signalling the occurence of an event. Condition variables are used in
conjuction with mutexes to provide a shared access to one or more shared
variable.
@@ -560,7 +559,7 @@ futex_wait :: proc "contextless" (f: ^Futex, expected: u32) {
return
}
ok := _futex_wait(f, expected)
- _assert(ok, "futex_wait failure")
+ assert_contextless(ok, "futex_wait failure")
}
/*
@@ -597,18 +596,3 @@ Wake up multiple threads waiting on a futex.
futex_broadcast :: proc "contextless" (f: ^Futex) {
_futex_broadcast(f)
}
-
-
-@(private)
-_assert :: proc "contextless" (cond: bool, msg: string) {
- if !cond {
- _panic(msg)
- }
-}
-
-@(private)
-_panic :: proc "contextless" (msg: string) -> ! {
- runtime.print_string(msg)
- runtime.print_byte('\n')
- runtime.trap()
-}
diff --git a/core/sync/primitives_atomic.odin b/core/sync/primitives_atomic.odin
index 1d8e423db..3c4324eb7 100644
--- a/core/sync/primitives_atomic.odin
+++ b/core/sync/primitives_atomic.odin
@@ -240,7 +240,7 @@ atomic_recursive_mutex_lock :: proc "contextless" (m: ^Atomic_Recursive_Mutex) {
atomic_recursive_mutex_unlock :: proc "contextless" (m: ^Atomic_Recursive_Mutex) {
tid := current_thread_id()
- _assert(tid == m.owner, "tid != m.owner")
+ assert_contextless(tid == m.owner, "tid != m.owner")
m.recursion -= 1
recursion := m.recursion
if recursion == 0 {
@@ -361,7 +361,7 @@ atomic_sema_wait_with_timeout :: proc "contextless" (s: ^Atomic_Sema, duration:
if !futex_wait_with_timeout(&s.count, u32(original_count), remaining) {
return false
}
- original_count = s.count
+ original_count = atomic_load_explicit(&s.count, .Relaxed)
}
if original_count == atomic_compare_exchange_strong_explicit(&s.count, original_count, original_count-1, .Acquire, .Acquire) {
return true
diff --git a/core/sys/darwin/sync.odin b/core/sys/darwin/sync.odin
index 121d3edef..58fc7c9e4 100644
--- a/core/sys/darwin/sync.odin
+++ b/core/sys/darwin/sync.odin
@@ -5,6 +5,7 @@ foreign import system "system:System.framework"
// #define OS_WAIT_ON_ADDR_AVAILABILITY \
// __API_AVAILABLE(macos(14.4), ios(17.4), tvos(17.4), watchos(10.4))
when ODIN_OS == .Darwin {
+
when ODIN_PLATFORM_SUBTARGET == .iOS && ODIN_MINIMUM_OS_VERSION >= 17_04_00 {
WAIT_ON_ADDRESS_AVAILABLE :: true
} else when ODIN_MINIMUM_OS_VERSION >= 14_04_00 {
@@ -12,8 +13,18 @@ when ODIN_OS == .Darwin {
} else {
WAIT_ON_ADDRESS_AVAILABLE :: false
}
+
+ when ODIN_PLATFORM_SUBTARGET == .iOS && ODIN_MINIMUM_OS_VERSION >= 14_00_00 {
+ ULOCK_WAIT_2_AVAILABLE :: true
+ } else when ODIN_MINIMUM_OS_VERSION >= 11_00_00 {
+ ULOCK_WAIT_2_AVAILABLE :: true
+ } else {
+ ULOCK_WAIT_2_AVAILABLE :: false
+ }
+
} else {
WAIT_ON_ADDRESS_AVAILABLE :: false
+ ULOCK_WAIT_2_AVAILABLE :: false
}
os_sync_wait_on_address_flag :: enum u32 {
diff --git a/core/sys/info/platform_darwin.odin b/core/sys/info/platform_darwin.odin
index 3fd857bfe..493f038f0 100644
--- a/core/sys/info/platform_darwin.odin
+++ b/core/sys/info/platform_darwin.odin
@@ -530,6 +530,10 @@ macos_release_map: map[string]Darwin_To_Release = {
"23F79" = {{23, 5, 0}, "macOS", {"Sonoma", {14, 5, 0}}},
"23G80" = {{23, 6, 0}, "macOS", {"Sonoma", {14, 6, 0}}},
"23G93" = {{23, 6, 0}, "macOS", {"Sonoma", {14, 6, 1}}},
+ "23H124" = {{23, 6, 0}, "macOS", {"Sonoma", {14, 7, 0}}},
+
+ // MacOS Sequoia
+ "24A335" = {{24, 0, 0}, "macOS", {"Sequoia", {15, 0, 0}}},
}
@(private)
diff --git a/core/testing/runner.odin b/core/testing/runner.odin
index 683db9d15..6b9d610ed 100644
--- a/core/testing/runner.odin
+++ b/core/testing/runner.odin
@@ -204,6 +204,10 @@ runner :: proc(internal_tests: []Internal_Test) -> bool {
}
}
+ when ODIN_OS == .Windows {
+ console_ansi_init()
+ }
+
stdout := io.to_writer(os.stream_from_handle(os.stdout))
stderr := io.to_writer(os.stream_from_handle(os.stderr))
diff --git a/core/testing/runner_windows.odin b/core/testing/runner_windows.odin
new file mode 100644
index 000000000..fa233ff84
--- /dev/null
+++ b/core/testing/runner_windows.odin
@@ -0,0 +1,22 @@
+//+private
+package testing
+
+import win32 "core:sys/windows"
+
+console_ansi_init :: proc() {
+ stdout := win32.GetStdHandle(win32.STD_OUTPUT_HANDLE)
+ if stdout != win32.INVALID_HANDLE && stdout != nil {
+ old_console_mode: u32
+ if win32.GetConsoleMode(stdout, &old_console_mode) {
+ win32.SetConsoleMode(stdout, old_console_mode | win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING)
+ }
+ }
+
+ stderr := win32.GetStdHandle(win32.STD_ERROR_HANDLE)
+ if stderr != win32.INVALID_HANDLE && stderr != nil {
+ old_console_mode: u32
+ if win32.GetConsoleMode(stderr, &old_console_mode) {
+ win32.SetConsoleMode(stderr, old_console_mode | win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING)
+ }
+ }
+}
diff --git a/core/testing/signal_handler_libc.odin b/core/testing/signal_handler_libc.odin
index b665b047c..7442c100c 100644
--- a/core/testing/signal_handler_libc.odin
+++ b/core/testing/signal_handler_libc.odin
@@ -26,6 +26,8 @@ import "core:os"
@(private="file", thread_local)
local_test_index: libc.sig_atomic_t
+@(private="file", thread_local)
+local_test_index_set: bool
// Windows does not appear to have a SIGTRAP, so this is defined here, instead
// of in the libc package, just so there's no confusion about it being
@@ -45,6 +47,13 @@ stop_runner_callback :: proc "c" (sig: libc.int) {
@(private="file")
stop_test_callback :: proc "c" (sig: libc.int) {
+ if !local_test_index_set {
+ // We're a thread created by a test thread.
+ //
+ // There's nothing we can do to inform the test runner about who
+ // signalled, so hopefully the test will handle their own sub-threads.
+ return
+ }
if local_test_index == -1 {
// We're the test runner, and we ourselves have caught a signal from
// which there is no recovery.
@@ -114,6 +123,7 @@ This is a dire bug and should be reported to the Odin developers.
_setup_signal_handler :: proc() {
local_test_index = -1
+ local_test_index_set = true
// Catch user interrupt / CTRL-C.
libc.signal(libc.SIGINT, stop_runner_callback)
@@ -135,6 +145,7 @@ _setup_signal_handler :: proc() {
_setup_task_signal_handler :: proc(test_index: int) {
local_test_index = cast(libc.sig_atomic_t)test_index
+ local_test_index_set = true
}
_should_stop_runner :: proc() -> bool {
diff --git a/core/testing/testing.odin b/core/testing/testing.odin
index d5e7c6830..09bf6dc0e 100644
--- a/core/testing/testing.odin
+++ b/core/testing/testing.odin
@@ -105,9 +105,13 @@ cleanup :: proc(t: ^T, procedure: proc(rawptr), user_data: rawptr) {
append(&t.cleanups, Internal_Cleanup{procedure, user_data, context})
}
-expect :: proc(t: ^T, ok: bool, msg: string = "", loc := #caller_location) -> bool {
+expect :: proc(t: ^T, ok: bool, msg := "", expr := #caller_expression(ok), loc := #caller_location) -> bool {
if !ok {
- log.error(msg, location=loc)
+ if msg == "" {
+ log.errorf("expected %v to be true", expr, location=loc)
+ } else {
+ log.error(msg, location=loc)
+ }
}
return ok
}
@@ -119,10 +123,10 @@ expectf :: proc(t: ^T, ok: bool, format: string, args: ..any, loc := #caller_loc
return ok
}
-expect_value :: proc(t: ^T, value, expected: $T, loc := #caller_location) -> bool where intrinsics.type_is_comparable(T) {
+expect_value :: proc(t: ^T, value, expected: $T, loc := #caller_location, value_expr := #caller_expression(value)) -> bool where intrinsics.type_is_comparable(T) {
ok := value == expected || reflect.is_nil(value) && reflect.is_nil(expected)
if !ok {
- log.errorf("expected %v, got %v", expected, value, location=loc)
+ log.errorf("expected %v to be %v, got %v", value_expr, expected, value, location=loc)
}
return ok
}
diff --git a/core/thread/thread.odin b/core/thread/thread.odin
index 17ba1a0a2..c1cbceb42 100644
--- a/core/thread/thread.odin
+++ b/core/thread/thread.odin
@@ -272,7 +272,7 @@ create_and_start :: proc(fn: proc(), init_context: Maybe(runtime.Context) = nil,
t := create(thread_proc, priority)
t.data = rawptr(fn)
if self_cleanup {
- t.flags += {.Self_Cleanup}
+ intrinsics.atomic_or(&t.flags, {.Self_Cleanup})
}
t.init_context = init_context
start(t)
@@ -307,7 +307,7 @@ create_and_start_with_data :: proc(data: rawptr, fn: proc(data: rawptr), init_co
t.user_index = 1
t.user_args[0] = data
if self_cleanup {
- t.flags += {.Self_Cleanup}
+ intrinsics.atomic_or(&t.flags, {.Self_Cleanup})
}
t.init_context = init_context
start(t)
@@ -347,7 +347,7 @@ create_and_start_with_poly_data :: proc(data: $T, fn: proc(data: T), init_contex
mem.copy(&t.user_args[0], &data, size_of(T))
if self_cleanup {
- t.flags += {.Self_Cleanup}
+ intrinsics.atomic_or(&t.flags, {.Self_Cleanup})
}
t.init_context = init_context
@@ -394,7 +394,7 @@ create_and_start_with_poly_data2 :: proc(arg1: $T1, arg2: $T2, fn: proc(T1, T2),
_ = copy(user_args[n:], mem.ptr_to_bytes(&arg2))
if self_cleanup {
- t.flags += {.Self_Cleanup}
+ intrinsics.atomic_or(&t.flags, {.Self_Cleanup})
}
t.init_context = init_context
@@ -443,7 +443,7 @@ create_and_start_with_poly_data3 :: proc(arg1: $T1, arg2: $T2, arg3: $T3, fn: pr
_ = copy(user_args[n:], mem.ptr_to_bytes(&arg3))
if self_cleanup {
- t.flags += {.Self_Cleanup}
+ intrinsics.atomic_or(&t.flags, {.Self_Cleanup})
}
t.init_context = init_context
@@ -494,7 +494,7 @@ create_and_start_with_poly_data4 :: proc(arg1: $T1, arg2: $T2, arg3: $T3, arg4:
_ = copy(user_args[n:], mem.ptr_to_bytes(&arg4))
if self_cleanup {
- t.flags += {.Self_Cleanup}
+ intrinsics.atomic_or(&t.flags, {.Self_Cleanup})
}
t.init_context = init_context
diff --git a/core/thread/thread_pool.odin b/core/thread/thread_pool.odin
index 9bcc42968..d9166b450 100644
--- a/core/thread/thread_pool.odin
+++ b/core/thread/thread_pool.odin
@@ -60,6 +60,7 @@ pool_thread_runner :: proc(t: ^Thread) {
if task, ok := pool_pop_waiting(pool); ok {
data.task = task
pool_do_work(pool, task)
+ sync.guard(&pool.mutex)
data.task = {}
}
}
diff --git a/core/thread/thread_unix.odin b/core/thread/thread_unix.odin
index 2e196c3e6..9576a3040 100644
--- a/core/thread/thread_unix.odin
+++ b/core/thread/thread_unix.odin
@@ -5,18 +5,14 @@ package thread
import "base:runtime"
import "core:sync"
import "core:sys/unix"
-import "core:time"
_IS_SUPPORTED :: true
-CAS :: sync.atomic_compare_exchange_strong
-
// NOTE(tetra): Aligned here because of core/unix/pthread_linux.odin/pthread_t.
// Also see core/sys/darwin/mach_darwin.odin/semaphore_t.
Thread_Os_Specific :: struct #align(16) {
unix_thread: unix.pthread_t, // NOTE: very large on Darwin, small on Linux.
- cond: sync.Cond,
- mutex: sync.Mutex,
+ start_ok: sync.Sema,
}
//
// Creates a thread which will run the given procedure.
@@ -29,14 +25,10 @@ _create :: proc(procedure: Thread_Proc, priority: Thread_Priority) -> ^Thread {
// We need to give the thread a moment to start up before we enable cancellation.
can_set_thread_cancel_state := unix.pthread_setcancelstate(unix.PTHREAD_CANCEL_ENABLE, nil) == 0
- sync.lock(&t.mutex)
-
t.id = sync.current_thread_id()
- for (.Started not_in sync.atomic_load(&t.flags)) {
- // HACK: use a timeout so in the event that the condition is signalled at THIS comment's exact point
- // (after checking flags, before starting the wait) it gets itself out of that deadlock after a ms.
- sync.wait_with_timeout(&t.cond, &t.mutex, time.Millisecond)
+ if .Started not_in sync.atomic_load(&t.flags) {
+ sync.wait(&t.start_ok)
}
if .Joined in sync.atomic_load(&t.flags) {
@@ -66,8 +58,6 @@ _create :: proc(procedure: Thread_Proc, priority: Thread_Priority) -> ^Thread {
sync.atomic_or(&t.flags, { .Done })
- sync.unlock(&t.mutex)
-
if .Self_Cleanup in sync.atomic_load(&t.flags) {
res := unix.pthread_detach(t.unix_thread)
assert_contextless(res == 0)
@@ -132,7 +122,7 @@ _create :: proc(procedure: Thread_Proc, priority: Thread_Priority) -> ^Thread {
_start :: proc(t: ^Thread) {
sync.atomic_or(&t.flags, { .Started })
- sync.signal(&t.cond)
+ sync.post(&t.start_ok)
}
_is_done :: proc(t: ^Thread) -> bool {
@@ -140,24 +130,18 @@ _is_done :: proc(t: ^Thread) -> bool {
}
_join :: proc(t: ^Thread) {
- // sync.guard(&t.mutex)
-
if unix.pthread_equal(unix.pthread_self(), t.unix_thread) {
return
}
- // Preserve other flags besides `.Joined`, like `.Started`.
- unjoined := sync.atomic_load(&t.flags) - {.Joined}
- joined := unjoined + {.Joined}
-
- // Try to set `t.flags` from unjoined to joined. If it returns joined,
- // it means the previous value had that flag set and we can return.
- if res, ok := CAS(&t.flags, unjoined, joined); res == joined && !ok {
+ // If the previous value was already `Joined`, then we can return.
+ if .Joined in sync.atomic_or(&t.flags, {.Joined}) {
return
}
+
// Prevent non-started threads from blocking main thread with initial wait
// condition.
- if .Started not_in unjoined {
+ if .Started not_in sync.atomic_load(&t.flags) {
_start(t)
}
unix.pthread_join(t.unix_thread, nil)
diff --git a/core/thread/thread_windows.odin b/core/thread/thread_windows.odin
index 300673191..cc73a2d6a 100644
--- a/core/thread/thread_windows.odin
+++ b/core/thread/thread_windows.odin
@@ -27,7 +27,7 @@ _create :: proc(procedure: Thread_Proc, priority: Thread_Priority) -> ^Thread {
__windows_thread_entry_proc :: proc "system" (t_: rawptr) -> win32.DWORD {
t := (^Thread)(t_)
- if .Joined in t.flags {
+ if .Joined in sync.atomic_load(&t.flags) {
return 0
}
@@ -48,9 +48,9 @@ _create :: proc(procedure: Thread_Proc, priority: Thread_Priority) -> ^Thread {
t.procedure(t)
}
- intrinsics.atomic_store(&t.flags, t.flags + {.Done})
+ intrinsics.atomic_or(&t.flags, {.Done})
- if .Self_Cleanup in t.flags {
+ if .Self_Cleanup in sync.atomic_load(&t.flags) {
win32.CloseHandle(t.win32_thread)
t.win32_thread = win32.INVALID_HANDLE
// NOTE(ftphikari): It doesn't matter which context 'free' received, right?