aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorkorvahkh <92224397+korvahkh@users.noreply.github.com>2024-06-13 01:27:44 +0000
committerGitHub <noreply@github.com>2024-06-13 01:27:44 +0000
commit104ca2ce22c269b71df08edb00cb26bee4daf59d (patch)
treeee0a3275d3b42ae9aa85d09bf01f278d3965cc31 /core
parenta7a6ff8c693be92929327660fd446dfc0af62e01 (diff)
parenta67df0739245d85e7aa773e7271a64121ca534c5 (diff)
Merge branch 'odin-lang:master' into fix-omitempty-comma
Diffstat (limited to 'core')
-rw-r--r--core/bufio/reader.odin4
-rw-r--r--core/bytes/buffer.odin70
-rw-r--r--core/c/libc/signal.odin2
-rw-r--r--core/c/libc/stdio.odin14
-rw-r--r--core/crypto/_aes/aes.odin28
-rw-r--r--core/crypto/_aes/ct64/api.odin96
-rw-r--r--core/crypto/_aes/ct64/ct64.odin265
-rw-r--r--core/crypto/_aes/ct64/ct64_dec.odin135
-rw-r--r--core/crypto/_aes/ct64/ct64_enc.odin95
-rw-r--r--core/crypto/_aes/ct64/ct64_keysched.odin179
-rw-r--r--core/crypto/_aes/ct64/ghash.odin136
-rw-r--r--core/crypto/_aes/ct64/helpers.odin75
-rw-r--r--core/crypto/aes/aes.odin22
-rw-r--r--core/crypto/aes/aes_ctr.odin199
-rw-r--r--core/crypto/aes/aes_ecb.odin57
-rw-r--r--core/crypto/aes/aes_gcm.odin253
-rw-r--r--core/crypto/aes/aes_impl.odin41
-rw-r--r--core/crypto/aes/aes_impl_hw_gen.odin43
-rw-r--r--core/crypto/rand_darwin.odin4
-rw-r--r--core/crypto/rand_linux.odin2
-rw-r--r--core/crypto/rand_windows.odin20
-rw-r--r--core/encoding/ansi/ansi.odin137
-rw-r--r--core/encoding/ansi/doc.odin20
-rw-r--r--core/encoding/cbor/cbor.odin4
-rw-r--r--core/encoding/cbor/coding.odin107
-rw-r--r--core/encoding/cbor/marshal.odin16
-rw-r--r--core/encoding/cbor/unmarshal.odin102
-rw-r--r--core/encoding/entity/entity.odin123
-rw-r--r--core/encoding/hex/hex.odin11
-rw-r--r--core/encoding/hxa/hxa.odin31
-rw-r--r--core/encoding/hxa/read.odin42
-rw-r--r--core/encoding/ini/ini.odin189
-rw-r--r--core/encoding/json/marshal.odin4
-rw-r--r--core/encoding/json/parser.odin65
-rw-r--r--core/encoding/json/types.odin14
-rw-r--r--core/encoding/xml/tokenizer.odin43
-rw-r--r--core/encoding/xml/xml_reader.odin24
-rw-r--r--core/fmt/fmt.odin26
-rw-r--r--core/fmt/fmt_os.odin1
-rw-r--r--core/image/bmp/bmp.odin746
-rw-r--r--core/image/bmp/bmp_js.odin4
-rw-r--r--core/image/bmp/bmp_os.odin34
-rw-r--r--core/image/common.odin162
-rw-r--r--core/image/png/png.odin66
-rw-r--r--core/log/file_console_logger.odin41
-rw-r--r--core/log/multi_logger.odin5
-rw-r--r--core/math/big/combinatorics.odin60
-rw-r--r--core/math/big/prime.odin3
-rw-r--r--core/math/big/radix.odin1
-rw-r--r--core/math/cmplx/cmplx_trig.odin2
-rw-r--r--core/math/linalg/general.odin25
-rw-r--r--core/math/linalg/specific.odin6
-rw-r--r--core/math/linalg/specific_euler_angles_f16.odin2
-rw-r--r--core/math/linalg/specific_euler_angles_f32.odin2
-rw-r--r--core/math/linalg/specific_euler_angles_f64.odin2
-rw-r--r--core/math/math.odin18
-rw-r--r--core/math/math_gamma.odin6
-rw-r--r--core/math/math_lgamma.odin14
-rw-r--r--core/math/math_sincos.odin2
-rw-r--r--core/math/rand/exp.odin6
-rw-r--r--core/math/rand/normal.odin6
-rw-r--r--core/mem/raw.odin15
-rw-r--r--core/mem/rollback_stack_allocator.odin341
-rw-r--r--core/mem/tlsf/LICENSE36
-rw-r--r--core/mem/tlsf/tlsf.odin156
-rw-r--r--core/mem/tlsf/tlsf_internal.odin738
-rw-r--r--core/mem/tracking_allocator.odin14
-rw-r--r--core/odin/ast/ast.odin4
-rw-r--r--core/odin/ast/clone.odin2
-rw-r--r--core/odin/ast/walk.odin1
-rw-r--r--core/odin/parser/parser.odin47
-rw-r--r--core/os/dir_windows.odin6
-rw-r--r--core/os/os2/internal_util.odin2
-rw-r--r--core/os/os_darwin.odin2
-rw-r--r--core/os/os_freebsd.odin61
-rw-r--r--core/os/os_netbsd.odin8
-rw-r--r--core/path/filepath/path_unix.odin2
-rw-r--r--core/simd/x86/aes.odin49
-rw-r--r--core/slice/permute.odin105
-rw-r--r--core/strconv/generic_float.odin2
-rw-r--r--core/strconv/strconv.odin292
-rw-r--r--core/strings/builder.odin8
-rw-r--r--core/sync/futex_darwin.odin2
-rw-r--r--core/sync/primitives_netbsd.odin8
-rw-r--r--core/sys/info/platform_darwin.odin1
-rw-r--r--core/sys/linux/sys.odin5
-rw-r--r--core/sys/linux/types.odin9
-rw-r--r--core/sys/unix/pthread_freebsd.odin4
-rw-r--r--core/sys/unix/pthread_openbsd.odin4
-rw-r--r--core/sys/unix/pthread_unix.odin1
-rwxr-xr-x[-rw-r--r--]core/sys/windows/kernel32.odin17
-rw-r--r--core/testing/events.odin48
-rw-r--r--core/testing/logging.odin71
-rw-r--r--core/testing/reporting.odin329
-rw-r--r--core/testing/runner.odin822
-rw-r--r--core/testing/runner_other.odin14
-rw-r--r--core/testing/runner_windows.odin235
-rw-r--r--core/testing/signal_handler.odin33
-rw-r--r--core/testing/signal_handler_libc.odin149
-rw-r--r--core/testing/signal_handler_other.odin19
-rw-r--r--core/testing/testing.odin69
-rw-r--r--core/text/i18n/qt_linguist.odin2
-rw-r--r--core/thread/thread_pool.odin133
-rw-r--r--core/thread/thread_unix.odin23
-rw-r--r--core/time/datetime/datetime.odin4
-rw-r--r--core/time/iso8601.odin (renamed from core/time/iso8061.odin)0
-rw-r--r--core/time/time.odin1
-rw-r--r--core/time/time_orca.odin24
-rw-r--r--core/unicode/tables.odin10
109 files changed, 6967 insertions, 873 deletions
diff --git a/core/bufio/reader.odin b/core/bufio/reader.odin
index 8ec736a66..a875c732d 100644
--- a/core/bufio/reader.odin
+++ b/core/bufio/reader.odin
@@ -29,12 +29,12 @@ MIN_READ_BUFFER_SIZE :: 16
@(private)
DEFAULT_MAX_CONSECUTIVE_EMPTY_READS :: 128
-reader_init :: proc(b: ^Reader, rd: io.Reader, size: int = DEFAULT_BUF_SIZE, allocator := context.allocator) {
+reader_init :: proc(b: ^Reader, rd: io.Reader, size: int = DEFAULT_BUF_SIZE, allocator := context.allocator, loc := #caller_location) {
size := size
size = max(size, MIN_READ_BUFFER_SIZE)
reader_reset(b, rd)
b.buf_allocator = allocator
- b.buf = make([]byte, size, allocator)
+ b.buf = make([]byte, size, allocator, loc)
}
reader_init_with_buf :: proc(b: ^Reader, rd: io.Reader, buf: []byte) {
diff --git a/core/bytes/buffer.odin b/core/bytes/buffer.odin
index cb2ef9c62..a7e9b1c64 100644
--- a/core/bytes/buffer.odin
+++ b/core/bytes/buffer.odin
@@ -27,19 +27,19 @@ Read_Op :: enum i8 {
}
-buffer_init :: proc(b: ^Buffer, buf: []byte) {
- resize(&b.buf, len(buf))
+buffer_init :: proc(b: ^Buffer, buf: []byte, loc := #caller_location) {
+ resize(&b.buf, len(buf), loc=loc)
copy(b.buf[:], buf)
}
-buffer_init_string :: proc(b: ^Buffer, s: string) {
- resize(&b.buf, len(s))
+buffer_init_string :: proc(b: ^Buffer, s: string, loc := #caller_location) {
+ resize(&b.buf, len(s), loc=loc)
copy(b.buf[:], s)
}
-buffer_init_allocator :: proc(b: ^Buffer, len, cap: int, allocator := context.allocator) {
+buffer_init_allocator :: proc(b: ^Buffer, len, cap: int, allocator := context.allocator, loc := #caller_location) {
if b.buf == nil {
- b.buf = make([dynamic]byte, len, cap, allocator)
+ b.buf = make([dynamic]byte, len, cap, allocator, loc)
return
}
@@ -96,28 +96,28 @@ buffer_truncate :: proc(b: ^Buffer, n: int) {
}
@(private)
-_buffer_try_grow :: proc(b: ^Buffer, n: int) -> (int, bool) {
+_buffer_try_grow :: proc(b: ^Buffer, n: int, loc := #caller_location) -> (int, bool) {
if l := len(b.buf); n <= cap(b.buf)-l {
- resize(&b.buf, l+n)
+ resize(&b.buf, l+n, loc=loc)
return l, true
}
return 0, false
}
@(private)
-_buffer_grow :: proc(b: ^Buffer, n: int) -> int {
+_buffer_grow :: proc(b: ^Buffer, n: int, loc := #caller_location) -> int {
m := buffer_length(b)
if m == 0 && b.off != 0 {
buffer_reset(b)
}
- if i, ok := _buffer_try_grow(b, n); ok {
+ if i, ok := _buffer_try_grow(b, n, loc=loc); ok {
return i
}
if b.buf == nil && n <= SMALL_BUFFER_SIZE {
// Fixes #2756 by preserving allocator if already set on Buffer via init_buffer_allocator
- reserve(&b.buf, SMALL_BUFFER_SIZE)
- resize(&b.buf, n)
+ reserve(&b.buf, SMALL_BUFFER_SIZE, loc=loc)
+ resize(&b.buf, n, loc=loc)
return 0
}
@@ -127,31 +127,31 @@ _buffer_grow :: proc(b: ^Buffer, n: int) -> int {
} else if c > max(int) - c - n {
panic("bytes.Buffer: too large")
} else {
- resize(&b.buf, 2*c + n)
+ resize(&b.buf, 2*c + n, loc=loc)
copy(b.buf[:], b.buf[b.off:])
}
b.off = 0
- resize(&b.buf, m+n)
+ resize(&b.buf, m+n, loc=loc)
return m
}
-buffer_grow :: proc(b: ^Buffer, n: int) {
+buffer_grow :: proc(b: ^Buffer, n: int, loc := #caller_location) {
if n < 0 {
panic("bytes.buffer_grow: negative count")
}
- m := _buffer_grow(b, n)
- resize(&b.buf, m)
+ m := _buffer_grow(b, n, loc=loc)
+ resize(&b.buf, m, loc=loc)
}
-buffer_write_at :: proc(b: ^Buffer, p: []byte, offset: int) -> (n: int, err: io.Error) {
+buffer_write_at :: proc(b: ^Buffer, p: []byte, offset: int, loc := #caller_location) -> (n: int, err: io.Error) {
b.last_read = .Invalid
if offset < 0 {
err = .Invalid_Offset
return
}
- _, ok := _buffer_try_grow(b, offset+len(p))
+ _, ok := _buffer_try_grow(b, offset+len(p), loc=loc)
if !ok {
- _ = _buffer_grow(b, offset+len(p))
+ _ = _buffer_grow(b, offset+len(p), loc=loc)
}
if len(b.buf) <= offset {
return 0, .Short_Write
@@ -160,47 +160,47 @@ buffer_write_at :: proc(b: ^Buffer, p: []byte, offset: int) -> (n: int, err: io.
}
-buffer_write :: proc(b: ^Buffer, p: []byte) -> (n: int, err: io.Error) {
+buffer_write :: proc(b: ^Buffer, p: []byte, loc := #caller_location) -> (n: int, err: io.Error) {
b.last_read = .Invalid
- m, ok := _buffer_try_grow(b, len(p))
+ m, ok := _buffer_try_grow(b, len(p), loc=loc)
if !ok {
- m = _buffer_grow(b, len(p))
+ m = _buffer_grow(b, len(p), loc=loc)
}
return copy(b.buf[m:], p), nil
}
-buffer_write_ptr :: proc(b: ^Buffer, ptr: rawptr, size: int) -> (n: int, err: io.Error) {
- return buffer_write(b, ([^]byte)(ptr)[:size])
+buffer_write_ptr :: proc(b: ^Buffer, ptr: rawptr, size: int, loc := #caller_location) -> (n: int, err: io.Error) {
+ return buffer_write(b, ([^]byte)(ptr)[:size], loc=loc)
}
-buffer_write_string :: proc(b: ^Buffer, s: string) -> (n: int, err: io.Error) {
+buffer_write_string :: proc(b: ^Buffer, s: string, loc := #caller_location) -> (n: int, err: io.Error) {
b.last_read = .Invalid
- m, ok := _buffer_try_grow(b, len(s))
+ m, ok := _buffer_try_grow(b, len(s), loc=loc)
if !ok {
- m = _buffer_grow(b, len(s))
+ m = _buffer_grow(b, len(s), loc=loc)
}
return copy(b.buf[m:], s), nil
}
-buffer_write_byte :: proc(b: ^Buffer, c: byte) -> io.Error {
+buffer_write_byte :: proc(b: ^Buffer, c: byte, loc := #caller_location) -> io.Error {
b.last_read = .Invalid
- m, ok := _buffer_try_grow(b, 1)
+ m, ok := _buffer_try_grow(b, 1, loc=loc)
if !ok {
- m = _buffer_grow(b, 1)
+ m = _buffer_grow(b, 1, loc=loc)
}
b.buf[m] = c
return nil
}
-buffer_write_rune :: proc(b: ^Buffer, r: rune) -> (n: int, err: io.Error) {
+buffer_write_rune :: proc(b: ^Buffer, r: rune, loc := #caller_location) -> (n: int, err: io.Error) {
if r < utf8.RUNE_SELF {
- buffer_write_byte(b, byte(r))
+ buffer_write_byte(b, byte(r), loc=loc)
return 1, nil
}
b.last_read = .Invalid
- m, ok := _buffer_try_grow(b, utf8.UTF_MAX)
+ m, ok := _buffer_try_grow(b, utf8.UTF_MAX, loc=loc)
if !ok {
- m = _buffer_grow(b, utf8.UTF_MAX)
+ m = _buffer_grow(b, utf8.UTF_MAX, loc=loc)
}
res: [4]byte
res, n = utf8.encode_rune(r)
diff --git a/core/c/libc/signal.odin b/core/c/libc/signal.odin
index 186b74d8c..1489779fe 100644
--- a/core/c/libc/signal.odin
+++ b/core/c/libc/signal.odin
@@ -34,7 +34,7 @@ when ODIN_OS == .Windows {
SIGTERM :: 15
}
-when ODIN_OS == .Linux || ODIN_OS == .FreeBSD {
+when ODIN_OS == .Linux || ODIN_OS == .FreeBSD || ODIN_OS == .Haiku || ODIN_OS == .OpenBSD || ODIN_OS == .NetBSD {
SIG_ERR :: rawptr(~uintptr(0))
SIG_DFL :: rawptr(uintptr(0))
SIG_IGN :: rawptr(uintptr(1))
diff --git a/core/c/libc/stdio.odin b/core/c/libc/stdio.odin
index f17d3bd06..3e1d0f5a2 100644
--- a/core/c/libc/stdio.odin
+++ b/core/c/libc/stdio.odin
@@ -102,10 +102,12 @@ when ODIN_OS == .OpenBSD || ODIN_OS == .NetBSD {
SEEK_END :: 2
foreign libc {
- stderr: ^FILE
- stdin: ^FILE
- stdout: ^FILE
+ __sF: [3]FILE
}
+
+ stdin: ^FILE = &__sF[0]
+ stdout: ^FILE = &__sF[1]
+ stderr: ^FILE = &__sF[2]
}
when ODIN_OS == .FreeBSD {
@@ -127,9 +129,9 @@ when ODIN_OS == .FreeBSD {
SEEK_END :: 2
foreign libc {
- stderr: ^FILE
- stdin: ^FILE
- stdout: ^FILE
+ @(link_name="__stderrp") stderr: ^FILE
+ @(link_name="__stdinp") stdin: ^FILE
+ @(link_name="__stdoutp") stdout: ^FILE
}
}
diff --git a/core/crypto/_aes/aes.odin b/core/crypto/_aes/aes.odin
new file mode 100644
index 000000000..4f52485d2
--- /dev/null
+++ b/core/crypto/_aes/aes.odin
@@ -0,0 +1,28 @@
+package _aes
+
+// KEY_SIZE_128 is the AES-128 key size in bytes.
+KEY_SIZE_128 :: 16
+// KEY_SIZE_192 is the AES-192 key size in bytes.
+KEY_SIZE_192 :: 24
+// KEY_SIZE_256 is the AES-256 key size in bytes.
+KEY_SIZE_256 :: 32
+
+// BLOCK_SIZE is the AES block size in bytes.
+BLOCK_SIZE :: 16
+
+// ROUNDS_128 is the number of rounds for AES-128.
+ROUNDS_128 :: 10
+// ROUNDS_192 is the number of rounds for AES-192.
+ROUNDS_192 :: 12
+// ROUNDS_256 is the number of rounds for AES-256.
+ROUNDS_256 :: 14
+
+// GHASH_KEY_SIZE is the GHASH key size in bytes.
+GHASH_KEY_SIZE :: 16
+// GHASH_BLOCK_SIZE is the GHASH block size in bytes.
+GHASH_BLOCK_SIZE :: 16
+// GHASH_TAG_SIZE is the GHASH tag size in bytes.
+GHASH_TAG_SIZE :: 16
+
+// RCON is the AES keyschedule round constants.
+RCON := [10]byte{0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36}
diff --git a/core/crypto/_aes/ct64/api.odin b/core/crypto/_aes/ct64/api.odin
new file mode 100644
index 000000000..ae624971c
--- /dev/null
+++ b/core/crypto/_aes/ct64/api.odin
@@ -0,0 +1,96 @@
+package aes_ct64
+
+import "base:intrinsics"
+import "core:mem"
+
+STRIDE :: 4
+
+// Context is a keyed AES (ECB) instance.
+Context :: struct {
+ _sk_exp: [120]u64,
+ _num_rounds: int,
+ _is_initialized: bool,
+}
+
+// init initializes a context for AES with the provided key.
+init :: proc(ctx: ^Context, key: []byte) {
+ skey: [30]u64 = ---
+
+ ctx._num_rounds = keysched(skey[:], key)
+ skey_expand(ctx._sk_exp[:], skey[:], ctx._num_rounds)
+ ctx._is_initialized = true
+}
+
+// encrypt_block sets `dst` to `AES-ECB-Encrypt(src)`.
+encrypt_block :: proc(ctx: ^Context, dst, src: []byte) {
+ assert(ctx._is_initialized)
+
+ q: [8]u64
+ load_blockx1(&q, src)
+ _encrypt(&q, ctx._sk_exp[:], ctx._num_rounds)
+ store_blockx1(dst, &q)
+}
+
+// encrypt_block sets `dst` to `AES-ECB-Decrypt(src)`.
+decrypt_block :: proc(ctx: ^Context, dst, src: []byte) {
+ assert(ctx._is_initialized)
+
+ q: [8]u64
+ load_blockx1(&q, src)
+ _decrypt(&q, ctx._sk_exp[:], ctx._num_rounds)
+ store_blockx1(dst, &q)
+}
+
+// encrypt_blocks sets `dst` to `AES-ECB-Encrypt(src[0], .. src[n])`.
+encrypt_blocks :: proc(ctx: ^Context, dst, src: [][]byte) {
+ assert(ctx._is_initialized)
+
+ q: [8]u64 = ---
+ src, dst := src, dst
+
+ n := len(src)
+ for n > 4 {
+ load_blocks(&q, src[0:4])
+ _encrypt(&q, ctx._sk_exp[:], ctx._num_rounds)
+ store_blocks(dst[0:4], &q)
+
+ src = src[4:]
+ dst = dst[4:]
+ n -= 4
+ }
+ if n > 0 {
+ load_blocks(&q, src)
+ _encrypt(&q, ctx._sk_exp[:], ctx._num_rounds)
+ store_blocks(dst, &q)
+ }
+}
+
+// decrypt_blocks sets dst to `AES-ECB-Decrypt(src[0], .. src[n])`.
+decrypt_blocks :: proc(ctx: ^Context, dst, src: [][]byte) {
+ assert(ctx._is_initialized)
+
+ q: [8]u64 = ---
+ src, dst := src, dst
+
+ n := len(src)
+ for n > 4 {
+ load_blocks(&q, src[0:4])
+ _decrypt(&q, ctx._sk_exp[:], ctx._num_rounds)
+ store_blocks(dst[0:4], &q)
+
+ src = src[4:]
+ dst = dst[4:]
+ n -= 4
+ }
+ if n > 0 {
+ load_blocks(&q, src)
+ _decrypt(&q, ctx._sk_exp[:], ctx._num_rounds)
+ store_blocks(dst, &q)
+ }
+}
+
+// reset sanitizes the Context. The Context must be re-initialized to
+// be used again.
+reset :: proc(ctx: ^Context) {
+ mem.zero_explicit(ctx, size_of(ctx))
+}
diff --git a/core/crypto/_aes/ct64/ct64.odin b/core/crypto/_aes/ct64/ct64.odin
new file mode 100644
index 000000000..f198cab81
--- /dev/null
+++ b/core/crypto/_aes/ct64/ct64.odin
@@ -0,0 +1,265 @@
+// Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHORS “AS IS” AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package aes_ct64
+
+import "base:intrinsics"
+
+// Bitsliced AES for 64-bit general purpose (integer) registers. Each
+// invocation will process up to 4 blocks at a time. This implementation
+// is derived from the BearSSL ct64 code, and distributed under a 1-clause
+// BSD license with permission from the original author.
+//
+// WARNING: "hic sunt dracones"
+//
+// This package also deliberately exposes enough internals to be able to
+// function as a replacement for `AESENC` and `AESDEC` from AES-NI, to
+// allow the implementation of non-AES primitives that use the AES round
+// function such as AEGIS and Deoxys-II. This should ONLY be done when
+// implementing something other than AES itself.
+
+sub_bytes :: proc "contextless" (q: ^[8]u64) {
+ // This S-box implementation is a straightforward translation of
+ // the circuit described by Boyar and Peralta in "A new
+ // combinational logic minimization technique with applications
+ // to cryptology" (https://eprint.iacr.org/2009/191.pdf).
+ //
+ // Note that variables x* (input) and s* (output) are numbered
+ // in "reverse" order (x0 is the high bit, x7 is the low bit).
+
+ x0 := q[7]
+ x1 := q[6]
+ x2 := q[5]
+ x3 := q[4]
+ x4 := q[3]
+ x5 := q[2]
+ x6 := q[1]
+ x7 := q[0]
+
+ // Top linear transformation.
+ y14 := x3 ~ x5
+ y13 := x0 ~ x6
+ y9 := x0 ~ x3
+ y8 := x0 ~ x5
+ t0 := x1 ~ x2
+ y1 := t0 ~ x7
+ y4 := y1 ~ x3
+ y12 := y13 ~ y14
+ y2 := y1 ~ x0
+ y5 := y1 ~ x6
+ y3 := y5 ~ y8
+ t1 := x4 ~ y12
+ y15 := t1 ~ x5
+ y20 := t1 ~ x1
+ y6 := y15 ~ x7
+ y10 := y15 ~ t0
+ y11 := y20 ~ y9
+ y7 := x7 ~ y11
+ y17 := y10 ~ y11
+ y19 := y10 ~ y8
+ y16 := t0 ~ y11
+ y21 := y13 ~ y16
+ y18 := x0 ~ y16
+
+ // Non-linear section.
+ t2 := y12 & y15
+ t3 := y3 & y6
+ t4 := t3 ~ t2
+ t5 := y4 & x7
+ t6 := t5 ~ t2
+ t7 := y13 & y16
+ t8 := y5 & y1
+ t9 := t8 ~ t7
+ t10 := y2 & y7
+ t11 := t10 ~ t7
+ t12 := y9 & y11
+ t13 := y14 & y17
+ t14 := t13 ~ t12
+ t15 := y8 & y10
+ t16 := t15 ~ t12
+ t17 := t4 ~ t14
+ t18 := t6 ~ t16
+ t19 := t9 ~ t14
+ t20 := t11 ~ t16
+ t21 := t17 ~ y20
+ t22 := t18 ~ y19
+ t23 := t19 ~ y21
+ t24 := t20 ~ y18
+
+ t25 := t21 ~ t22
+ t26 := t21 & t23
+ t27 := t24 ~ t26
+ t28 := t25 & t27
+ t29 := t28 ~ t22
+ t30 := t23 ~ t24
+ t31 := t22 ~ t26
+ t32 := t31 & t30
+ t33 := t32 ~ t24
+ t34 := t23 ~ t33
+ t35 := t27 ~ t33
+ t36 := t24 & t35
+ t37 := t36 ~ t34
+ t38 := t27 ~ t36
+ t39 := t29 & t38
+ t40 := t25 ~ t39
+
+ t41 := t40 ~ t37
+ t42 := t29 ~ t33
+ t43 := t29 ~ t40
+ t44 := t33 ~ t37
+ t45 := t42 ~ t41
+ z0 := t44 & y15
+ z1 := t37 & y6
+ z2 := t33 & x7
+ z3 := t43 & y16
+ z4 := t40 & y1
+ z5 := t29 & y7
+ z6 := t42 & y11
+ z7 := t45 & y17
+ z8 := t41 & y10
+ z9 := t44 & y12
+ z10 := t37 & y3
+ z11 := t33 & y4
+ z12 := t43 & y13
+ z13 := t40 & y5
+ z14 := t29 & y2
+ z15 := t42 & y9
+ z16 := t45 & y14
+ z17 := t41 & y8
+
+ // Bottom linear transformation.
+ t46 := z15 ~ z16
+ t47 := z10 ~ z11
+ t48 := z5 ~ z13
+ t49 := z9 ~ z10
+ t50 := z2 ~ z12
+ t51 := z2 ~ z5
+ t52 := z7 ~ z8
+ t53 := z0 ~ z3
+ t54 := z6 ~ z7
+ t55 := z16 ~ z17
+ t56 := z12 ~ t48
+ t57 := t50 ~ t53
+ t58 := z4 ~ t46
+ t59 := z3 ~ t54
+ t60 := t46 ~ t57
+ t61 := z14 ~ t57
+ t62 := t52 ~ t58
+ t63 := t49 ~ t58
+ t64 := z4 ~ t59
+ t65 := t61 ~ t62
+ t66 := z1 ~ t63
+ s0 := t59 ~ t63
+ s6 := t56 ~ ~t62
+ s7 := t48 ~ ~t60
+ t67 := t64 ~ t65
+ s3 := t53 ~ t66
+ s4 := t51 ~ t66
+ s5 := t47 ~ t65
+ s1 := t64 ~ ~s3
+ s2 := t55 ~ ~t67
+
+ q[7] = s0
+ q[6] = s1
+ q[5] = s2
+ q[4] = s3
+ q[3] = s4
+ q[2] = s5
+ q[1] = s6
+ q[0] = s7
+}
+
+orthogonalize :: proc "contextless" (q: ^[8]u64) {
+ CL2 :: 0x5555555555555555
+ CH2 :: 0xAAAAAAAAAAAAAAAA
+ q[0], q[1] = (q[0] & CL2) | ((q[1] & CL2) << 1), ((q[0] & CH2) >> 1) | (q[1] & CH2)
+ q[2], q[3] = (q[2] & CL2) | ((q[3] & CL2) << 1), ((q[2] & CH2) >> 1) | (q[3] & CH2)
+ q[4], q[5] = (q[4] & CL2) | ((q[5] & CL2) << 1), ((q[4] & CH2) >> 1) | (q[5] & CH2)
+ q[6], q[7] = (q[6] & CL2) | ((q[7] & CL2) << 1), ((q[6] & CH2) >> 1) | (q[7] & CH2)
+
+ CL4 :: 0x3333333333333333
+ CH4 :: 0xCCCCCCCCCCCCCCCC
+ q[0], q[2] = (q[0] & CL4) | ((q[2] & CL4) << 2), ((q[0] & CH4) >> 2) | (q[2] & CH4)
+ q[1], q[3] = (q[1] & CL4) | ((q[3] & CL4) << 2), ((q[1] & CH4) >> 2) | (q[3] & CH4)
+ q[4], q[6] = (q[4] & CL4) | ((q[6] & CL4) << 2), ((q[4] & CH4) >> 2) | (q[6] & CH4)
+ q[5], q[7] = (q[5] & CL4) | ((q[7] & CL4) << 2), ((q[5] & CH4) >> 2) | (q[7] & CH4)
+
+ CL8 :: 0x0F0F0F0F0F0F0F0F
+ CH8 :: 0xF0F0F0F0F0F0F0F0
+ q[0], q[4] = (q[0] & CL8) | ((q[4] & CL8) << 4), ((q[0] & CH8) >> 4) | (q[4] & CH8)
+ q[1], q[5] = (q[1] & CL8) | ((q[5] & CL8) << 4), ((q[1] & CH8) >> 4) | (q[5] & CH8)
+ q[2], q[6] = (q[2] & CL8) | ((q[6] & CL8) << 4), ((q[2] & CH8) >> 4) | (q[6] & CH8)
+ q[3], q[7] = (q[3] & CL8) | ((q[7] & CL8) << 4), ((q[3] & CH8) >> 4) | (q[7] & CH8)
+}
+
+@(require_results)
+interleave_in :: proc "contextless" (w: []u32) -> (q0, q1: u64) #no_bounds_check {
+ if len(w) < 4 {
+ intrinsics.trap()
+ }
+ x0, x1, x2, x3 := u64(w[0]), u64(w[1]), u64(w[2]), u64(w[3])
+ x0 |= (x0 << 16)
+ x1 |= (x1 << 16)
+ x2 |= (x2 << 16)
+ x3 |= (x3 << 16)
+ x0 &= 0x0000FFFF0000FFFF
+ x1 &= 0x0000FFFF0000FFFF
+ x2 &= 0x0000FFFF0000FFFF
+ x3 &= 0x0000FFFF0000FFFF
+ x0 |= (x0 << 8)
+ x1 |= (x1 << 8)
+ x2 |= (x2 << 8)
+ x3 |= (x3 << 8)
+ x0 &= 0x00FF00FF00FF00FF
+ x1 &= 0x00FF00FF00FF00FF
+ x2 &= 0x00FF00FF00FF00FF
+ x3 &= 0x00FF00FF00FF00FF
+ q0 = x0 | (x2 << 8)
+ q1 = x1 | (x3 << 8)
+ return
+}
+
+@(require_results)
+interleave_out :: proc "contextless" (q0, q1: u64) -> (w0, w1, w2, w3: u32) {
+ x0 := q0 & 0x00FF00FF00FF00FF
+ x1 := q1 & 0x00FF00FF00FF00FF
+ x2 := (q0 >> 8) & 0x00FF00FF00FF00FF
+ x3 := (q1 >> 8) & 0x00FF00FF00FF00FF
+ x0 |= (x0 >> 8)
+ x1 |= (x1 >> 8)
+ x2 |= (x2 >> 8)
+ x3 |= (x3 >> 8)
+ x0 &= 0x0000FFFF0000FFFF
+ x1 &= 0x0000FFFF0000FFFF
+ x2 &= 0x0000FFFF0000FFFF
+ x3 &= 0x0000FFFF0000FFFF
+ w0 = u32(x0) | u32(x0 >> 16)
+ w1 = u32(x1) | u32(x1 >> 16)
+ w2 = u32(x2) | u32(x2 >> 16)
+ w3 = u32(x3) | u32(x3 >> 16)
+ return
+}
+
+@(private)
+rotr32 :: #force_inline proc "contextless" (x: u64) -> u64 {
+ return (x << 32) | (x >> 32)
+}
diff --git a/core/crypto/_aes/ct64/ct64_dec.odin b/core/crypto/_aes/ct64/ct64_dec.odin
new file mode 100644
index 000000000..408ee6002
--- /dev/null
+++ b/core/crypto/_aes/ct64/ct64_dec.odin
@@ -0,0 +1,135 @@
+// Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHORS “AS IS” AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package aes_ct64
+
+import "base:intrinsics"
+
+inv_sub_bytes :: proc "contextless" (q: ^[8]u64) {
+ // AES S-box is:
+ // S(x) = A(I(x)) ^ 0x63
+ // where I() is inversion in GF(256), and A() is a linear
+ // transform (0 is formally defined to be its own inverse).
+ // Since inversion is an involution, the inverse S-box can be
+ // computed from the S-box as:
+ // iS(x) = B(S(B(x ^ 0x63)) ^ 0x63)
+ // where B() is the inverse of A(). Indeed, for any y in GF(256):
+ // iS(S(y)) = B(A(I(B(A(I(y)) ^ 0x63 ^ 0x63))) ^ 0x63 ^ 0x63) = y
+ //
+ // Note: we reuse the implementation of the forward S-box,
+ // instead of duplicating it here, so that total code size is
+ // lower. By merging the B() transforms into the S-box circuit
+ // we could make faster CBC decryption, but CBC decryption is
+ // already quite faster than CBC encryption because we can
+ // process four blocks in parallel.
+
+ q0 := ~q[0]
+ q1 := ~q[1]
+ q2 := q[2]
+ q3 := q[3]
+ q4 := q[4]
+ q5 := ~q[5]
+ q6 := ~q[6]
+ q7 := q[7]
+ q[7] = q1 ~ q4 ~ q6
+ q[6] = q0 ~ q3 ~ q5
+ q[5] = q7 ~ q2 ~ q4
+ q[4] = q6 ~ q1 ~ q3
+ q[3] = q5 ~ q0 ~ q2
+ q[2] = q4 ~ q7 ~ q1
+ q[1] = q3 ~ q6 ~ q0
+ q[0] = q2 ~ q5 ~ q7
+
+ sub_bytes(q)
+
+ q0 = ~q[0]
+ q1 = ~q[1]
+ q2 = q[2]
+ q3 = q[3]
+ q4 = q[4]
+ q5 = ~q[5]
+ q6 = ~q[6]
+ q7 = q[7]
+ q[7] = q1 ~ q4 ~ q6
+ q[6] = q0 ~ q3 ~ q5
+ q[5] = q7 ~ q2 ~ q4
+ q[4] = q6 ~ q1 ~ q3
+ q[3] = q5 ~ q0 ~ q2
+ q[2] = q4 ~ q7 ~ q1
+ q[1] = q3 ~ q6 ~ q0
+ q[0] = q2 ~ q5 ~ q7
+}
+
+inv_shift_rows :: proc "contextless" (q: ^[8]u64) {
+ for x, i in q {
+ q[i] =
+ (x & 0x000000000000FFFF) |
+ ((x & 0x000000000FFF0000) << 4) |
+ ((x & 0x00000000F0000000) >> 12) |
+ ((x & 0x000000FF00000000) << 8) |
+ ((x & 0x0000FF0000000000) >> 8) |
+ ((x & 0x000F000000000000) << 12) |
+ ((x & 0xFFF0000000000000) >> 4)
+ }
+}
+
+inv_mix_columns :: proc "contextless" (q: ^[8]u64) {
+ q0 := q[0]
+ q1 := q[1]
+ q2 := q[2]
+ q3 := q[3]
+ q4 := q[4]
+ q5 := q[5]
+ q6 := q[6]
+ q7 := q[7]
+ r0 := (q0 >> 16) | (q0 << 48)
+ r1 := (q1 >> 16) | (q1 << 48)
+ r2 := (q2 >> 16) | (q2 << 48)
+ r3 := (q3 >> 16) | (q3 << 48)
+ r4 := (q4 >> 16) | (q4 << 48)
+ r5 := (q5 >> 16) | (q5 << 48)
+ r6 := (q6 >> 16) | (q6 << 48)
+ r7 := (q7 >> 16) | (q7 << 48)
+
+ q[0] = q5 ~ q6 ~ q7 ~ r0 ~ r5 ~ r7 ~ rotr32(q0 ~ q5 ~ q6 ~ r0 ~ r5)
+ q[1] = q0 ~ q5 ~ r0 ~ r1 ~ r5 ~ r6 ~ r7 ~ rotr32(q1 ~ q5 ~ q7 ~ r1 ~ r5 ~ r6)
+ q[2] = q0 ~ q1 ~ q6 ~ r1 ~ r2 ~ r6 ~ r7 ~ rotr32(q0 ~ q2 ~ q6 ~ r2 ~ r6 ~ r7)
+ q[3] = q0 ~ q1 ~ q2 ~ q5 ~ q6 ~ r0 ~ r2 ~ r3 ~ r5 ~ rotr32(q0 ~ q1 ~ q3 ~ q5 ~ q6 ~ q7 ~ r0 ~ r3 ~ r5 ~ r7)
+ q[4] = q1 ~ q2 ~ q3 ~ q5 ~ r1 ~ r3 ~ r4 ~ r5 ~ r6 ~ r7 ~ rotr32(q1 ~ q2 ~ q4 ~ q5 ~ q7 ~ r1 ~ r4 ~ r5 ~ r6)
+ q[5] = q2 ~ q3 ~ q4 ~ q6 ~ r2 ~ r4 ~ r5 ~ r6 ~ r7 ~ rotr32(q2 ~ q3 ~ q5 ~ q6 ~ r2 ~ r5 ~ r6 ~ r7)
+ q[6] = q3 ~ q4 ~ q5 ~ q7 ~ r3 ~ r5 ~ r6 ~ r7 ~ rotr32(q3 ~ q4 ~ q6 ~ q7 ~ r3 ~ r6 ~ r7)
+ q[7] = q4 ~ q5 ~ q6 ~ r4 ~ r6 ~ r7 ~ rotr32(q4 ~ q5 ~ q7 ~ r4 ~ r7)
+}
+
+@(private)
+_decrypt :: proc "contextless" (q: ^[8]u64, skey: []u64, num_rounds: int) {
+ add_round_key(q, skey[num_rounds << 3:])
+ for u := num_rounds - 1; u > 0; u -= 1 {
+ inv_shift_rows(q)
+ inv_sub_bytes(q)
+ add_round_key(q, skey[u << 3:])
+ inv_mix_columns(q)
+ }
+ inv_shift_rows(q)
+ inv_sub_bytes(q)
+ add_round_key(q, skey)
+}
diff --git a/core/crypto/_aes/ct64/ct64_enc.odin b/core/crypto/_aes/ct64/ct64_enc.odin
new file mode 100644
index 000000000..36d4aebc8
--- /dev/null
+++ b/core/crypto/_aes/ct64/ct64_enc.odin
@@ -0,0 +1,95 @@
+// Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHORS “AS IS” AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package aes_ct64
+
+import "base:intrinsics"
+
+add_round_key :: proc "contextless" (q: ^[8]u64, sk: []u64) #no_bounds_check {
+ if len(sk) < 8 {
+ intrinsics.trap()
+ }
+
+ q[0] ~= sk[0]
+ q[1] ~= sk[1]
+ q[2] ~= sk[2]
+ q[3] ~= sk[3]
+ q[4] ~= sk[4]
+ q[5] ~= sk[5]
+ q[6] ~= sk[6]
+ q[7] ~= sk[7]
+}
+
+shift_rows :: proc "contextless" (q: ^[8]u64) {
+ for x, i in q {
+ q[i] =
+ (x & 0x000000000000FFFF) |
+ ((x & 0x00000000FFF00000) >> 4) |
+ ((x & 0x00000000000F0000) << 12) |
+ ((x & 0x0000FF0000000000) >> 8) |
+ ((x & 0x000000FF00000000) << 8) |
+ ((x & 0xF000000000000000) >> 12) |
+ ((x & 0x0FFF000000000000) << 4)
+ }
+}
+
+mix_columns :: proc "contextless" (q: ^[8]u64) {
+ q0 := q[0]
+ q1 := q[1]
+ q2 := q[2]
+ q3 := q[3]
+ q4 := q[4]
+ q5 := q[5]
+ q6 := q[6]
+ q7 := q[7]
+ r0 := (q0 >> 16) | (q0 << 48)
+ r1 := (q1 >> 16) | (q1 << 48)
+ r2 := (q2 >> 16) | (q2 << 48)
+ r3 := (q3 >> 16) | (q3 << 48)
+ r4 := (q4 >> 16) | (q4 << 48)
+ r5 := (q5 >> 16) | (q5 << 48)
+ r6 := (q6 >> 16) | (q6 << 48)
+ r7 := (q7 >> 16) | (q7 << 48)
+
+ q[0] = q7 ~ r7 ~ r0 ~ rotr32(q0 ~ r0)
+ q[1] = q0 ~ r0 ~ q7 ~ r7 ~ r1 ~ rotr32(q1 ~ r1)
+ q[2] = q1 ~ r1 ~ r2 ~ rotr32(q2 ~ r2)
+ q[3] = q2 ~ r2 ~ q7 ~ r7 ~ r3 ~ rotr32(q3 ~ r3)
+ q[4] = q3 ~ r3 ~ q7 ~ r7 ~ r4 ~ rotr32(q4 ~ r4)
+ q[5] = q4 ~ r4 ~ r5 ~ rotr32(q5 ~ r5)
+ q[6] = q5 ~ r5 ~ r6 ~ rotr32(q6 ~ r6)
+ q[7] = q6 ~ r6 ~ r7 ~ rotr32(q7 ~ r7)
+}
+
+@(private)
+_encrypt :: proc "contextless" (q: ^[8]u64, skey: []u64, num_rounds: int) {
+ add_round_key(q, skey)
+ for u in 1 ..< num_rounds {
+ sub_bytes(q)
+ shift_rows(q)
+ mix_columns(q)
+ add_round_key(q, skey[u << 3:])
+ }
+ sub_bytes(q)
+ shift_rows(q)
+ add_round_key(q, skey[num_rounds << 3:])
+}
diff --git a/core/crypto/_aes/ct64/ct64_keysched.odin b/core/crypto/_aes/ct64/ct64_keysched.odin
new file mode 100644
index 000000000..060a2c03e
--- /dev/null
+++ b/core/crypto/_aes/ct64/ct64_keysched.odin
@@ -0,0 +1,179 @@
+// Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHORS “AS IS” AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package aes_ct64
+
+import "base:intrinsics"
+import "core:crypto/_aes"
+import "core:encoding/endian"
+import "core:mem"
+
+@(private, require_results)
+sub_word :: proc "contextless" (x: u32) -> u32 {
+ q := [8]u64{u64(x), 0, 0, 0, 0, 0, 0, 0}
+
+ orthogonalize(&q)
+ sub_bytes(&q)
+ orthogonalize(&q)
+ ret := u32(q[0])
+
+ mem.zero_explicit(&q[0], size_of(u64))
+
+ return ret
+}
+
+@(private, require_results)
+keysched :: proc(comp_skey: []u64, key: []byte) -> int {
+ num_rounds, key_len := 0, len(key)
+ switch key_len {
+ case _aes.KEY_SIZE_128:
+ num_rounds = _aes.ROUNDS_128
+ case _aes.KEY_SIZE_192:
+ num_rounds = _aes.ROUNDS_192
+ case _aes.KEY_SIZE_256:
+ num_rounds = _aes.ROUNDS_256
+ case:
+ panic("crypto/aes: invalid AES key size")
+ }
+
+ skey: [60]u32 = ---
+ nk, nkf := key_len >> 2, (num_rounds + 1) << 2
+ for i in 0 ..< nk {
+ skey[i] = endian.unchecked_get_u32le(key[i << 2:])
+ }
+ tmp := skey[(key_len >> 2) - 1]
+ for i, j, k := nk, 0, 0; i < nkf; i += 1 {
+ if j == 0 {
+ tmp = (tmp << 24) | (tmp >> 8)
+ tmp = sub_word(tmp) ~ u32(_aes.RCON[k])
+ } else if nk > 6 && j == 4 {
+ tmp = sub_word(tmp)
+ }
+ tmp ~= skey[i - nk]
+ skey[i] = tmp
+ if j += 1; j == nk {
+ j = 0
+ k += 1
+ }
+ }
+
+ q: [8]u64 = ---
+ for i, j := 0, 0; i < nkf; i, j = i + 4, j + 2 {
+ q[0], q[4] = interleave_in(skey[i:])
+ q[1] = q[0]
+ q[2] = q[0]
+ q[3] = q[0]
+ q[5] = q[4]
+ q[6] = q[4]
+ q[7] = q[4]
+ orthogonalize(&q)
+ comp_skey[j + 0] =
+ (q[0] & 0x1111111111111111) |
+ (q[1] & 0x2222222222222222) |
+ (q[2] & 0x4444444444444444) |
+ (q[3] & 0x8888888888888888)
+ comp_skey[j + 1] =
+ (q[4] & 0x1111111111111111) |
+ (q[5] & 0x2222222222222222) |
+ (q[6] & 0x4444444444444444) |
+ (q[7] & 0x8888888888888888)
+ }
+
+ mem.zero_explicit(&skey, size_of(skey))
+ mem.zero_explicit(&q, size_of(q))
+
+ return num_rounds
+}
+
+@(private)
+skey_expand :: proc "contextless" (skey, comp_skey: []u64, num_rounds: int) {
+ n := (num_rounds + 1) << 1
+ for u, v := 0, 0; u < n; u, v = u + 1, v + 4 {
+ x0 := comp_skey[u]
+ x1, x2, x3 := x0, x0, x0
+ x0 &= 0x1111111111111111
+ x1 &= 0x2222222222222222
+ x2 &= 0x4444444444444444
+ x3 &= 0x8888888888888888
+ x1 >>= 1
+ x2 >>= 2
+ x3 >>= 3
+ skey[v + 0] = (x0 << 4) - x0
+ skey[v + 1] = (x1 << 4) - x1
+ skey[v + 2] = (x2 << 4) - x2
+ skey[v + 3] = (x3 << 4) - x3
+ }
+}
+
+orthogonalize_roundkey :: proc "contextless" (qq: []u64, key: []byte) {
+ if len(qq) < 8 || len(key) != 16 {
+ intrinsics.trap()
+ }
+
+ skey: [4]u32 = ---
+ skey[0] = endian.unchecked_get_u32le(key[0:])
+ skey[1] = endian.unchecked_get_u32le(key[4:])
+ skey[2] = endian.unchecked_get_u32le(key[8:])
+ skey[3] = endian.unchecked_get_u32le(key[12:])
+
+ q: [8]u64 = ---
+ q[0], q[4] = interleave_in(skey[:])
+ q[1] = q[0]
+ q[2] = q[0]
+ q[3] = q[0]
+ q[5] = q[4]
+ q[6] = q[4]
+ q[7] = q[4]
+ orthogonalize(&q)
+
+ comp_skey: [2]u64 = ---
+ comp_skey[0] =
+ (q[0] & 0x1111111111111111) |
+ (q[1] & 0x2222222222222222) |
+ (q[2] & 0x4444444444444444) |
+ (q[3] & 0x8888888888888888)
+ comp_skey[1] =
+ (q[4] & 0x1111111111111111) |
+ (q[5] & 0x2222222222222222) |
+ (q[6] & 0x4444444444444444) |
+ (q[7] & 0x8888888888888888)
+
+ for x, u in comp_skey {
+ x0 := x
+ x1, x2, x3 := x0, x0, x0
+ x0 &= 0x1111111111111111
+ x1 &= 0x2222222222222222
+ x2 &= 0x4444444444444444
+ x3 &= 0x8888888888888888
+ x1 >>= 1
+ x2 >>= 2
+ x3 >>= 3
+ qq[u * 4 + 0] = (x0 << 4) - x0
+ qq[u * 4 + 1] = (x1 << 4) - x1
+ qq[u * 4 + 2] = (x2 << 4) - x2
+ qq[u * 4 + 3] = (x3 << 4) - x3
+ }
+
+ mem.zero_explicit(&skey, size_of(skey))
+ mem.zero_explicit(&q, size_of(q))
+ mem.zero_explicit(&comp_skey, size_of(comp_skey))
+}
diff --git a/core/crypto/_aes/ct64/ghash.odin b/core/crypto/_aes/ct64/ghash.odin
new file mode 100644
index 000000000..21ac2ca97
--- /dev/null
+++ b/core/crypto/_aes/ct64/ghash.odin
@@ -0,0 +1,136 @@
+// Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHORS “AS IS” AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package aes_ct64
+
+import "base:intrinsics"
+import "core:crypto/_aes"
+import "core:encoding/endian"
+
+@(private = "file")
+bmul64 :: proc "contextless" (x, y: u64) -> u64 {
+ x0 := x & 0x1111111111111111
+ x1 := x & 0x2222222222222222
+ x2 := x & 0x4444444444444444
+ x3 := x & 0x8888888888888888
+ y0 := y & 0x1111111111111111
+ y1 := y & 0x2222222222222222
+ y2 := y & 0x4444444444444444
+ y3 := y & 0x8888888888888888
+ z0 := (x0 * y0) ~ (x1 * y3) ~ (x2 * y2) ~ (x3 * y1)
+ z1 := (x0 * y1) ~ (x1 * y0) ~ (x2 * y3) ~ (x3 * y2)
+ z2 := (x0 * y2) ~ (x1 * y1) ~ (x2 * y0) ~ (x3 * y3)
+ z3 := (x0 * y3) ~ (x1 * y2) ~ (x2 * y1) ~ (x3 * y0)
+ z0 &= 0x1111111111111111
+ z1 &= 0x2222222222222222
+ z2 &= 0x4444444444444444
+ z3 &= 0x8888888888888888
+ return z0 | z1 | z2 | z3
+}
+
+@(private = "file")
+rev64 :: proc "contextless" (x: u64) -> u64 {
+ x := x
+ x = ((x & 0x5555555555555555) << 1) | ((x >> 1) & 0x5555555555555555)
+ x = ((x & 0x3333333333333333) << 2) | ((x >> 2) & 0x3333333333333333)
+ x = ((x & 0x0F0F0F0F0F0F0F0F) << 4) | ((x >> 4) & 0x0F0F0F0F0F0F0F0F)
+ x = ((x & 0x00FF00FF00FF00FF) << 8) | ((x >> 8) & 0x00FF00FF00FF00FF)
+ x = ((x & 0x0000FFFF0000FFFF) << 16) | ((x >> 16) & 0x0000FFFF0000FFFF)
+ return (x << 32) | (x >> 32)
+}
+
+// ghash calculates the GHASH of data, with the key `key`, and input `dst`
+// and `data`, and stores the resulting digest in `dst`.
+//
+// Note: `dst` is both an input and an output, to support easy implementation
+// of GCM.
+ghash :: proc "contextless" (dst, key, data: []byte) {
+ if len(dst) != _aes.GHASH_BLOCK_SIZE || len(key) != _aes.GHASH_BLOCK_SIZE {
+ intrinsics.trap()
+ }
+
+ buf := data
+ l := len(buf)
+
+ y1 := endian.unchecked_get_u64be(dst[0:])
+ y0 := endian.unchecked_get_u64be(dst[8:])
+ h1 := endian.unchecked_get_u64be(key[0:])
+ h0 := endian.unchecked_get_u64be(key[8:])
+ h0r := rev64(h0)
+ h1r := rev64(h1)
+ h2 := h0 ~ h1
+ h2r := h0r ~ h1r
+
+ src: []byte
+ for l > 0 {
+ if l >= _aes.GHASH_BLOCK_SIZE {
+ src = buf
+ buf = buf[_aes.GHASH_BLOCK_SIZE:]
+ l -= _aes.GHASH_BLOCK_SIZE
+ } else {
+ tmp: [_aes.GHASH_BLOCK_SIZE]byte
+ copy(tmp[:], buf)
+ src = tmp[:]
+ l = 0
+ }
+ y1 ~= endian.unchecked_get_u64be(src)
+ y0 ~= endian.unchecked_get_u64be(src[8:])
+
+ y0r := rev64(y0)
+ y1r := rev64(y1)
+ y2 := y0 ~ y1
+ y2r := y0r ~ y1r
+
+ z0 := bmul64(y0, h0)
+ z1 := bmul64(y1, h1)
+ z2 := bmul64(y2, h2)
+ z0h := bmul64(y0r, h0r)
+ z1h := bmul64(y1r, h1r)
+ z2h := bmul64(y2r, h2r)
+ z2 ~= z0 ~ z1
+ z2h ~= z0h ~ z1h
+ z0h = rev64(z0h) >> 1
+ z1h = rev64(z1h) >> 1
+ z2h = rev64(z2h) >> 1
+
+ v0 := z0
+ v1 := z0h ~ z2
+ v2 := z1 ~ z2h
+ v3 := z1h
+
+ v3 = (v3 << 1) | (v2 >> 63)
+ v2 = (v2 << 1) | (v1 >> 63)
+ v1 = (v1 << 1) | (v0 >> 63)
+ v0 = (v0 << 1)
+
+ v2 ~= v0 ~ (v0 >> 1) ~ (v0 >> 2) ~ (v0 >> 7)
+ v1 ~= (v0 << 63) ~ (v0 << 62) ~ (v0 << 57)
+ v3 ~= v1 ~ (v1 >> 1) ~ (v1 >> 2) ~ (v1 >> 7)
+ v2 ~= (v1 << 63) ~ (v1 << 62) ~ (v1 << 57)
+
+ y0 = v2
+ y1 = v3
+ }
+
+ endian.unchecked_put_u64be(dst[0:], y1)
+ endian.unchecked_put_u64be(dst[8:], y0)
+}
diff --git a/core/crypto/_aes/ct64/helpers.odin b/core/crypto/_aes/ct64/helpers.odin
new file mode 100644
index 000000000..169271f6d
--- /dev/null
+++ b/core/crypto/_aes/ct64/helpers.odin
@@ -0,0 +1,75 @@
+package aes_ct64
+
+import "base:intrinsics"
+import "core:crypto/_aes"
+import "core:encoding/endian"
+
+load_blockx1 :: proc "contextless" (q: ^[8]u64, src: []byte) {
+ if len(src) != _aes.BLOCK_SIZE {
+ intrinsics.trap()
+ }
+
+ w: [4]u32 = ---
+ w[0] = endian.unchecked_get_u32le(src[0:])
+ w[1] = endian.unchecked_get_u32le(src[4:])
+ w[2] = endian.unchecked_get_u32le(src[8:])
+ w[3] = endian.unchecked_get_u32le(src[12:])
+ q[0], q[4] = interleave_in(w[:])
+ orthogonalize(q)
+}
+
+store_blockx1 :: proc "contextless" (dst: []byte, q: ^[8]u64) {
+ if len(dst) != _aes.BLOCK_SIZE {
+ intrinsics.trap()
+ }
+
+ orthogonalize(q)
+ w0, w1, w2, w3 := interleave_out(q[0], q[4])
+ endian.unchecked_put_u32le(dst[0:], w0)
+ endian.unchecked_put_u32le(dst[4:], w1)
+ endian.unchecked_put_u32le(dst[8:], w2)
+ endian.unchecked_put_u32le(dst[12:], w3)
+}
+
+load_blocks :: proc "contextless" (q: ^[8]u64, src: [][]byte) {
+ if n := len(src); n > STRIDE || n == 0 {
+ intrinsics.trap()
+ }
+
+ w: [4]u32 = ---
+ for s, i in src {
+ if len(s) != _aes.BLOCK_SIZE {
+ intrinsics.trap()
+ }
+
+ w[0] = endian.unchecked_get_u32le(s[0:])
+ w[1] = endian.unchecked_get_u32le(s[4:])
+ w[2] = endian.unchecked_get_u32le(s[8:])
+ w[3] = endian.unchecked_get_u32le(s[12:])
+ q[i], q[i + 4] = interleave_in(w[:])
+ }
+ orthogonalize(q)
+}
+
+store_blocks :: proc "contextless" (dst: [][]byte, q: ^[8]u64) {
+ if n := len(dst); n > STRIDE || n == 0 {
+ intrinsics.trap()
+ }
+
+ orthogonalize(q)
+ for d, i in dst {
+ // Allow storing [0,4] blocks.
+ if d == nil {
+ break
+ }
+ if len(d) != _aes.BLOCK_SIZE {
+ intrinsics.trap()
+ }
+
+ w0, w1, w2, w3 := interleave_out(q[i], q[i + 4])
+ endian.unchecked_put_u32le(d[0:], w0)
+ endian.unchecked_put_u32le(d[4:], w1)
+ endian.unchecked_put_u32le(d[8:], w2)
+ endian.unchecked_put_u32le(d[12:], w3)
+ }
+}
diff --git a/core/crypto/aes/aes.odin b/core/crypto/aes/aes.odin
new file mode 100644
index 000000000..e895c5fe0
--- /dev/null
+++ b/core/crypto/aes/aes.odin
@@ -0,0 +1,22 @@
+/*
+package aes implements the AES block cipher and some common modes.
+
+See:
+- https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.197-upd1.pdf
+- https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38a.pdf
+- https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38d.pdf
+*/
+
+package aes
+
+import "core:crypto/_aes"
+
+// KEY_SIZE_128 is the AES-128 key size in bytes.
+KEY_SIZE_128 :: _aes.KEY_SIZE_128
+// KEY_SIZE_192 is the AES-192 key size in bytes.
+KEY_SIZE_192 :: _aes.KEY_SIZE_192
+// KEY_SIZE_256 is the AES-256 key size in bytes.
+KEY_SIZE_256 :: _aes.KEY_SIZE_256
+
+// BLOCK_SIZE is the AES block size in bytes.
+BLOCK_SIZE :: _aes.BLOCK_SIZE
diff --git a/core/crypto/aes/aes_ctr.odin b/core/crypto/aes/aes_ctr.odin
new file mode 100644
index 000000000..1821a7bdf
--- /dev/null
+++ b/core/crypto/aes/aes_ctr.odin
@@ -0,0 +1,199 @@
+package aes
+
+import "core:crypto/_aes/ct64"
+import "core:encoding/endian"
+import "core:math/bits"
+import "core:mem"
+
+// CTR_IV_SIZE is the size of the CTR mode IV in bytes.
+CTR_IV_SIZE :: 16
+
+// Context_CTR is a keyed AES-CTR instance.
+Context_CTR :: struct {
+ _impl: Context_Impl,
+ _buffer: [BLOCK_SIZE]byte,
+ _off: int,
+ _ctr_hi: u64,
+ _ctr_lo: u64,
+ _is_initialized: bool,
+}
+
+// init_ctr initializes a Context_CTR with the provided key and IV.
+init_ctr :: proc(ctx: ^Context_CTR, key, iv: []byte, impl := Implementation.Hardware) {
+ if len(iv) != CTR_IV_SIZE {
+ panic("crypto/aes: invalid CTR IV size")
+ }
+
+ init_impl(&ctx._impl, key, impl)
+ ctx._off = BLOCK_SIZE
+ ctx._ctr_hi = endian.unchecked_get_u64be(iv[0:])
+ ctx._ctr_lo = endian.unchecked_get_u64be(iv[8:])
+ ctx._is_initialized = true
+}
+
+// xor_bytes_ctr XORs each byte in src with bytes taken from the AES-CTR
+// keystream, and writes the resulting output to dst. dst and src MUST
+// alias exactly or not at all.
+xor_bytes_ctr :: proc(ctx: ^Context_CTR, dst, src: []byte) {
+ assert(ctx._is_initialized)
+
+ // TODO: Enforcing that dst and src alias exactly or not at all
+ // is a good idea, though odd aliasing should be extremely uncommon.
+
+ src, dst := src, dst
+ if dst_len := len(dst); dst_len < len(src) {
+ src = src[:dst_len]
+ }
+
+ for remaining := len(src); remaining > 0; {
+ // Process multiple blocks at once
+ if ctx._off == BLOCK_SIZE {
+ if nr_blocks := remaining / BLOCK_SIZE; nr_blocks > 0 {
+ direct_bytes := nr_blocks * BLOCK_SIZE
+ ctr_blocks(ctx, dst, src, nr_blocks)
+ remaining -= direct_bytes
+ if remaining == 0 {
+ return
+ }
+ dst = dst[direct_bytes:]
+ src = src[direct_bytes:]
+ }
+
+ // If there is a partial block, generate and buffer 1 block
+ // worth of keystream.
+ ctr_blocks(ctx, ctx._buffer[:], nil, 1)
+ ctx._off = 0
+ }
+
+ // Process partial blocks from the buffered keystream.
+ to_xor := min(BLOCK_SIZE - ctx._off, remaining)
+ buffered_keystream := ctx._buffer[ctx._off:]
+ for i := 0; i < to_xor; i = i + 1 {
+ dst[i] = buffered_keystream[i] ~ src[i]
+ }
+ ctx._off += to_xor
+ dst = dst[to_xor:]
+ src = src[to_xor:]
+ remaining -= to_xor
+ }
+}
+
+// keystream_bytes_ctr fills dst with the raw AES-CTR keystream output.
+keystream_bytes_ctr :: proc(ctx: ^Context_CTR, dst: []byte) {
+ assert(ctx._is_initialized)
+
+ dst := dst
+ for remaining := len(dst); remaining > 0; {
+ // Process multiple blocks at once
+ if ctx._off == BLOCK_SIZE {
+ if nr_blocks := remaining / BLOCK_SIZE; nr_blocks > 0 {
+ direct_bytes := nr_blocks * BLOCK_SIZE
+ ctr_blocks(ctx, dst, nil, nr_blocks)
+ remaining -= direct_bytes
+ if remaining == 0 {
+ return
+ }
+ dst = dst[direct_bytes:]
+ }
+
+ // If there is a partial block, generate and buffer 1 block
+ // worth of keystream.
+ ctr_blocks(ctx, ctx._buffer[:], nil, 1)
+ ctx._off = 0
+ }
+
+ // Process partial blocks from the buffered keystream.
+ to_copy := min(BLOCK_SIZE - ctx._off, remaining)
+ buffered_keystream := ctx._buffer[ctx._off:]
+ copy(dst[:to_copy], buffered_keystream[:to_copy])
+ ctx._off += to_copy
+ dst = dst[to_copy:]
+ remaining -= to_copy
+ }
+}
+
+// reset_ctr sanitizes the Context_CTR. The Context_CTR must be
+// re-initialized to be used again.
+reset_ctr :: proc "contextless" (ctx: ^Context_CTR) {
+ reset_impl(&ctx._impl)
+ ctx._off = 0
+ ctx._ctr_hi = 0
+ ctx._ctr_lo = 0
+ mem.zero_explicit(&ctx._buffer, size_of(ctx._buffer))
+ ctx._is_initialized = false
+}
+
+@(private)
+ctr_blocks :: proc(ctx: ^Context_CTR, dst, src: []byte, nr_blocks: int) {
+ // Use the optimized hardware implementation if available.
+ if _, is_hw := ctx._impl.(Context_Impl_Hardware); is_hw {
+ ctr_blocks_hw(ctx, dst, src, nr_blocks)
+ return
+ }
+
+ // Portable implementation.
+ ct64_inc_ctr := #force_inline proc "contextless" (dst: []byte, hi, lo: u64) -> (u64, u64) {
+ endian.unchecked_put_u64be(dst[0:], hi)
+ endian.unchecked_put_u64be(dst[8:], lo)
+
+ hi, lo := hi, lo
+ carry: u64
+ lo, carry = bits.add_u64(lo, 1, 0)
+ hi, _ = bits.add_u64(hi, 0, carry)
+ return hi, lo
+ }
+
+ impl := &ctx._impl.(ct64.Context)
+ src, dst := src, dst
+ nr_blocks := nr_blocks
+ ctr_hi, ctr_lo := ctx._ctr_hi, ctx._ctr_lo
+
+ tmp: [ct64.STRIDE][BLOCK_SIZE]byte = ---
+ ctrs: [ct64.STRIDE][]byte = ---
+ for i in 0 ..< ct64.STRIDE {
+ ctrs[i] = tmp[i][:]
+ }
+ for nr_blocks > 0 {
+ n := min(ct64.STRIDE, nr_blocks)
+ blocks := ctrs[:n]
+
+ for i in 0 ..< n {
+ ctr_hi, ctr_lo = ct64_inc_ctr(blocks[i], ctr_hi, ctr_lo)
+ }
+ ct64.encrypt_blocks(impl, blocks, blocks)
+
+ xor_blocks(dst, src, blocks)
+
+ if src != nil {
+ src = src[n * BLOCK_SIZE:]
+ }
+ dst = dst[n * BLOCK_SIZE:]
+ nr_blocks -= n
+ }
+
+ // Write back the counter.
+ ctx._ctr_hi, ctx._ctr_lo = ctr_hi, ctr_lo
+
+ mem.zero_explicit(&tmp, size_of(tmp))
+}
+
+@(private)
+xor_blocks :: #force_inline proc "contextless" (dst, src: []byte, blocks: [][]byte) {
+ // Note: This would be faster `core:simd` was used, however if
+ // performance of this implementation matters to where that
+ // optimization would be worth it, use chacha20poly1305, or a
+ // CPU that isn't e-waste.
+ if src != nil {
+ #no_bounds_check {
+ for i in 0 ..< len(blocks) {
+ off := i * BLOCK_SIZE
+ for j in 0 ..< BLOCK_SIZE {
+ blocks[i][j] ~= src[off + j]
+ }
+ }
+ }
+ }
+ for i in 0 ..< len(blocks) {
+ copy(dst[i * BLOCK_SIZE:], blocks[i])
+ }
+}
diff --git a/core/crypto/aes/aes_ecb.odin b/core/crypto/aes/aes_ecb.odin
new file mode 100644
index 000000000..498429e29
--- /dev/null
+++ b/core/crypto/aes/aes_ecb.odin
@@ -0,0 +1,57 @@
+package aes
+
+import "core:crypto/_aes/ct64"
+
+// Context_ECB is a keyed AES-ECB instance.
+//
+// WARNING: Using ECB mode is strongly discouraged unless it is being
+// used to implement higher level constructs.
+Context_ECB :: struct {
+ _impl: Context_Impl,
+ _is_initialized: bool,
+}
+
+// init_ecb initializes a Context_ECB with the provided key.
+init_ecb :: proc(ctx: ^Context_ECB, key: []byte, impl := Implementation.Hardware) {
+ init_impl(&ctx._impl, key, impl)
+ ctx._is_initialized = true
+}
+
+// encrypt_ecb encrypts the BLOCK_SIZE buffer src, and writes the result to dst.
+encrypt_ecb :: proc(ctx: ^Context_ECB, dst, src: []byte) {
+ assert(ctx._is_initialized)
+
+ if len(dst) != BLOCK_SIZE || len(src) != BLOCK_SIZE {
+ panic("crypto/aes: invalid buffer size(s)")
+ }
+
+ switch &impl in ctx._impl {
+ case ct64.Context:
+ ct64.encrypt_block(&impl, dst, src)
+ case Context_Impl_Hardware:
+ encrypt_block_hw(&impl, dst, src)
+ }
+}
+
+// decrypt_ecb decrypts the BLOCK_SIZE buffer src, and writes the result to dst.
+decrypt_ecb :: proc(ctx: ^Context_ECB, dst, src: []byte) {
+ assert(ctx._is_initialized)
+
+ if len(dst) != BLOCK_SIZE || len(src) != BLOCK_SIZE {
+ panic("crypto/aes: invalid buffer size(s)")
+ }
+
+ switch &impl in ctx._impl {
+ case ct64.Context:
+ ct64.decrypt_block(&impl, dst, src)
+ case Context_Impl_Hardware:
+ decrypt_block_hw(&impl, dst, src)
+ }
+}
+
+// reset_ecb sanitizes the Context_ECB. The Context_ECB must be
+// re-initialized to be used again.
+reset_ecb :: proc "contextless" (ctx: ^Context_ECB) {
+ reset_impl(&ctx._impl)
+ ctx._is_initialized = false
+}
diff --git a/core/crypto/aes/aes_gcm.odin b/core/crypto/aes/aes_gcm.odin
new file mode 100644
index 000000000..66ef48db2
--- /dev/null
+++ b/core/crypto/aes/aes_gcm.odin
@@ -0,0 +1,253 @@
+package aes
+
+import "core:crypto"
+import "core:crypto/_aes"
+import "core:crypto/_aes/ct64"
+import "core:encoding/endian"
+import "core:mem"
+
+// GCM_NONCE_SIZE is the size of the GCM nonce in bytes.
+GCM_NONCE_SIZE :: 12
+// GCM_TAG_SIZE is the size of a GCM tag in bytes.
+GCM_TAG_SIZE :: _aes.GHASH_TAG_SIZE
+
+@(private)
+GCM_A_MAX :: max(u64) / 8 // 2^64 - 1 bits -> bytes
+@(private)
+GCM_P_MAX :: 0xfffffffe0 // 2^39 - 256 bits -> bytes
+
+// Context_GCM is a keyed AES-GCM instance.
+Context_GCM :: struct {
+ _impl: Context_Impl,
+ _is_initialized: bool,
+}
+
+// init_gcm initializes a Context_GCM with the provided key.
+init_gcm :: proc(ctx: ^Context_GCM, key: []byte, impl := Implementation.Hardware) {
+ init_impl(&ctx._impl, key, impl)
+ ctx._is_initialized = true
+}
+
+// seal_gcm encrypts the plaintext and authenticates the aad and ciphertext,
+// with the provided Context_GCM and nonce, stores the output in dst and tag.
+//
+// dst and plaintext MUST alias exactly or not at all.
+seal_gcm :: proc(ctx: ^Context_GCM, dst, tag, nonce, aad, plaintext: []byte) {
+ assert(ctx._is_initialized)
+
+ gcm_validate_common_slice_sizes(tag, nonce, aad, plaintext)
+ if len(dst) != len(plaintext) {
+ panic("crypto/aes: invalid destination ciphertext size")
+ }
+
+ if impl, is_hw := ctx._impl.(Context_Impl_Hardware); is_hw {
+ gcm_seal_hw(&impl, dst, tag, nonce, aad, plaintext)
+ return
+ }
+
+ h: [_aes.GHASH_KEY_SIZE]byte
+ j0: [_aes.GHASH_BLOCK_SIZE]byte
+ s: [_aes.GHASH_TAG_SIZE]byte
+ init_ghash_ct64(ctx, &h, &j0, nonce)
+
+ // Note: Our GHASH implementation handles appending padding.
+ ct64.ghash(s[:], h[:], aad)
+ gctr_ct64(ctx, dst, &s, plaintext, &h, nonce, true)
+ final_ghash_ct64(&s, &h, &j0, len(aad), len(plaintext))
+ copy(tag, s[:])
+
+ mem.zero_explicit(&h, len(h))
+ mem.zero_explicit(&j0, len(j0))
+}
+
+// open_gcm authenticates the aad and ciphertext, and decrypts the ciphertext,
+// with the provided Context_GCM, nonce, and tag, and stores the output in dst,
+// returning true iff the authentication was successful. If authentication
+// fails, the destination buffer will be zeroed.
+//
+// dst and plaintext MUST alias exactly or not at all.
+open_gcm :: proc(ctx: ^Context_GCM, dst, nonce, aad, ciphertext, tag: []byte) -> bool {
+ assert(ctx._is_initialized)
+
+ gcm_validate_common_slice_sizes(tag, nonce, aad, ciphertext)
+ if len(dst) != len(ciphertext) {
+ panic("crypto/aes: invalid destination plaintext size")
+ }
+
+ if impl, is_hw := ctx._impl.(Context_Impl_Hardware); is_hw {
+ return gcm_open_hw(&impl, dst, nonce, aad, ciphertext, tag)
+ }
+
+ h: [_aes.GHASH_KEY_SIZE]byte
+ j0: [_aes.GHASH_BLOCK_SIZE]byte
+ s: [_aes.GHASH_TAG_SIZE]byte
+ init_ghash_ct64(ctx, &h, &j0, nonce)
+
+ ct64.ghash(s[:], h[:], aad)
+ gctr_ct64(ctx, dst, &s, ciphertext, &h, nonce, false)
+ final_ghash_ct64(&s, &h, &j0, len(aad), len(ciphertext))
+
+ ok := crypto.compare_constant_time(s[:], tag) == 1
+ if !ok {
+ mem.zero_explicit(raw_data(dst), len(dst))
+ }
+
+ mem.zero_explicit(&h, len(h))
+ mem.zero_explicit(&j0, len(j0))
+ mem.zero_explicit(&s, len(s))
+
+ return ok
+}
+
+// reset_ctr sanitizes the Context_GCM. The Context_GCM must be
+// re-initialized to be used again.
+reset_gcm :: proc "contextless" (ctx: ^Context_GCM) {
+ reset_impl(&ctx._impl)
+ ctx._is_initialized = false
+}
+
+@(private)
+gcm_validate_common_slice_sizes :: proc(tag, nonce, aad, text: []byte) {
+ if len(tag) != GCM_TAG_SIZE {
+ panic("crypto/aes: invalid GCM tag size")
+ }
+
+ // The specification supports nonces in the range [1, 2^64) bits
+ // however per NIST SP 800-38D 5.2.1.1:
+ //
+ // > For IVs, it is recommended that implementations restrict support
+ // > to the length of 96 bits, to promote interoperability, efficiency,
+ // > and simplicity of design.
+ if len(nonce) != GCM_NONCE_SIZE {
+ panic("crypto/aes: invalid GCM nonce size")
+ }
+
+ if aad_len := u64(len(aad)); aad_len > GCM_A_MAX {
+ panic("crypto/aes: oversized GCM aad")
+ }
+ if text_len := u64(len(text)); text_len > GCM_P_MAX {
+ panic("crypto/aes: oversized GCM src data")
+ }
+}
+
+@(private = "file")
+init_ghash_ct64 :: proc(
+ ctx: ^Context_GCM,
+ h: ^[_aes.GHASH_KEY_SIZE]byte,
+ j0: ^[_aes.GHASH_BLOCK_SIZE]byte,
+ nonce: []byte,
+) {
+ impl := &ctx._impl.(ct64.Context)
+
+ // 1. Let H = CIPH(k, 0^128)
+ ct64.encrypt_block(impl, h[:], h[:])
+
+ // ECB encrypt j0, so that we can just XOR with the tag. In theory
+ // this could be processed along with the final GCTR block, to
+ // potentially save a call to AES-ECB, but... just use AES-NI.
+ copy(j0[:], nonce)
+ j0[_aes.GHASH_BLOCK_SIZE - 1] = 1
+ ct64.encrypt_block(impl, j0[:], j0[:])
+}
+
+@(private = "file")
+final_ghash_ct64 :: proc(
+ s: ^[_aes.GHASH_BLOCK_SIZE]byte,
+ h: ^[_aes.GHASH_KEY_SIZE]byte,
+ j0: ^[_aes.GHASH_BLOCK_SIZE]byte,
+ a_len: int,
+ t_len: int,
+) {
+ blk: [_aes.GHASH_BLOCK_SIZE]byte
+ endian.unchecked_put_u64be(blk[0:], u64(a_len) * 8)
+ endian.unchecked_put_u64be(blk[8:], u64(t_len) * 8)
+
+ ct64.ghash(s[:], h[:], blk[:])
+ for i in 0 ..< len(s) {
+ s[i] ~= j0[i]
+ }
+}
+
+@(private = "file")
+gctr_ct64 :: proc(
+ ctx: ^Context_GCM,
+ dst: []byte,
+ s: ^[_aes.GHASH_BLOCK_SIZE]byte,
+ src: []byte,
+ h: ^[_aes.GHASH_KEY_SIZE]byte,
+ nonce: []byte,
+ is_seal: bool,
+) {
+ ct64_inc_ctr32 := #force_inline proc "contextless" (dst: []byte, ctr: u32) -> u32 {
+ endian.unchecked_put_u32be(dst[12:], ctr)
+ return ctr + 1
+ }
+
+ // 2. Define a block J_0 as follows:
+ // if len(IV) = 96, then let J0 = IV || 0^31 || 1
+ //
+ // Note: We only support 96 bit IVs.
+ tmp, tmp2: [ct64.STRIDE][BLOCK_SIZE]byte = ---, ---
+ ctrs, blks: [ct64.STRIDE][]byte = ---, ---
+ ctr: u32 = 2
+ for i in 0 ..< ct64.STRIDE {
+ // Setup scratch space for the keystream.
+ blks[i] = tmp2[i][:]
+
+ // Pre-copy the IV to all the counter blocks.
+ ctrs[i] = tmp[i][:]
+ copy(ctrs[i], nonce)
+ }
+
+ // We stitch the GCTR and GHASH operations together, so that only
+ // one pass over the ciphertext is required.
+
+ impl := &ctx._impl.(ct64.Context)
+ src, dst := src, dst
+
+ nr_blocks := len(src) / BLOCK_SIZE
+ for nr_blocks > 0 {
+ n := min(ct64.STRIDE, nr_blocks)
+ l := n * BLOCK_SIZE
+
+ if !is_seal {
+ ct64.ghash(s[:], h[:], src[:l])
+ }
+
+ // The keystream is written to a separate buffer, as we will
+ // reuse the first 96-bits of each counter.
+ for i in 0 ..< n {
+ ctr = ct64_inc_ctr32(ctrs[i], ctr)
+ }
+ ct64.encrypt_blocks(impl, blks[:n], ctrs[:n])
+
+ xor_blocks(dst, src, blks[:n])
+
+ if is_seal {
+ ct64.ghash(s[:], h[:], dst[:l])
+ }
+
+ src = src[l:]
+ dst = dst[l:]
+ nr_blocks -= n
+ }
+ if l := len(src); l > 0 {
+ if !is_seal {
+ ct64.ghash(s[:], h[:], src[:l])
+ }
+
+ ct64_inc_ctr32(ctrs[0], ctr)
+ ct64.encrypt_block(impl, ctrs[0], ctrs[0])
+
+ for i in 0 ..< l {
+ dst[i] = src[i] ~ ctrs[0][i]
+ }
+
+ if is_seal {
+ ct64.ghash(s[:], h[:], dst[:l])
+ }
+ }
+
+ mem.zero_explicit(&tmp, size_of(tmp))
+ mem.zero_explicit(&tmp2, size_of(tmp2))
+}
diff --git a/core/crypto/aes/aes_impl.odin b/core/crypto/aes/aes_impl.odin
new file mode 100644
index 000000000..03747f1fb
--- /dev/null
+++ b/core/crypto/aes/aes_impl.odin
@@ -0,0 +1,41 @@
+package aes
+
+import "core:crypto/_aes/ct64"
+import "core:mem"
+import "core:reflect"
+
+@(private)
+Context_Impl :: union {
+ ct64.Context,
+ Context_Impl_Hardware,
+}
+
+// Implementation is an AES implementation. Most callers will not need
+// to use this as the package will automatically select the most performant
+// implementation available (See `is_hardware_accelerated()`).
+Implementation :: enum {
+ Portable,
+ Hardware,
+}
+
+@(private)
+init_impl :: proc(ctx: ^Context_Impl, key: []byte, impl: Implementation) {
+ impl := impl
+ if !is_hardware_accelerated() {
+ impl = .Portable
+ }
+
+ switch impl {
+ case .Portable:
+ reflect.set_union_variant_typeid(ctx^, typeid_of(ct64.Context))
+ ct64.init(&ctx.(ct64.Context), key)
+ case .Hardware:
+ reflect.set_union_variant_typeid(ctx^, typeid_of(Context_Impl_Hardware))
+ init_impl_hw(&ctx.(Context_Impl_Hardware), key)
+ }
+}
+
+@(private)
+reset_impl :: proc "contextless" (ctx: ^Context_Impl) {
+ mem.zero_explicit(ctx, size_of(Context_Impl))
+}
diff --git a/core/crypto/aes/aes_impl_hw_gen.odin b/core/crypto/aes/aes_impl_hw_gen.odin
new file mode 100644
index 000000000..94815f61c
--- /dev/null
+++ b/core/crypto/aes/aes_impl_hw_gen.odin
@@ -0,0 +1,43 @@
+package aes
+
+@(private = "file")
+ERR_HW_NOT_SUPPORTED :: "crypto/aes: hardware implementation unsupported"
+
+// is_hardware_accelerated returns true iff hardware accelerated AES
+// is supported.
+is_hardware_accelerated :: proc "contextless" () -> bool {
+ return false
+}
+
+@(private)
+Context_Impl_Hardware :: struct {}
+
+@(private)
+init_impl_hw :: proc(ctx: ^Context_Impl_Hardware, key: []byte) {
+ panic(ERR_HW_NOT_SUPPORTED)
+}
+
+@(private)
+encrypt_block_hw :: proc(ctx: ^Context_Impl_Hardware, dst, src: []byte) {
+ panic(ERR_HW_NOT_SUPPORTED)
+}
+
+@(private)
+decrypt_block_hw :: proc(ctx: ^Context_Impl_Hardware, dst, src: []byte) {
+ panic(ERR_HW_NOT_SUPPORTED)
+}
+
+@(private)
+ctr_blocks_hw :: proc(ctx: ^Context_CTR, dst, src: []byte, nr_blocks: int) {
+ panic(ERR_HW_NOT_SUPPORTED)
+}
+
+@(private)
+gcm_seal_hw :: proc(ctx: ^Context_Impl_Hardware, dst, tag, nonce, aad, plaintext: []byte) {
+ panic(ERR_HW_NOT_SUPPORTED)
+}
+
+@(private)
+gcm_open_hw :: proc(ctx: ^Context_Impl_Hardware, dst, nonce, aad, ciphertext, tag: []byte) -> bool {
+ panic(ERR_HW_NOT_SUPPORTED)
+}
diff --git a/core/crypto/rand_darwin.odin b/core/crypto/rand_darwin.odin
index 5355f31c5..56acb5d22 100644
--- a/core/crypto/rand_darwin.odin
+++ b/core/crypto/rand_darwin.odin
@@ -11,7 +11,7 @@ HAS_RAND_BYTES :: true
_rand_bytes :: proc(dst: []byte) {
err := Sec.RandomCopyBytes(count=len(dst), bytes=raw_data(dst))
if err != .Success {
- msg := CF.StringCopyToOdinString(Sec.CopyErrorMessageString(err))
- panic(fmt.tprintf("crypto/rand_bytes: SecRandomCopyBytes returned non-zero result: %v %s", err, msg))
+ msg := CF.StringCopyToOdinString(Sec.CopyErrorMessageString(err))
+ fmt.panicf("crypto/rand_bytes: SecRandomCopyBytes returned non-zero result: %v %s", err, msg)
}
}
diff --git a/core/crypto/rand_linux.odin b/core/crypto/rand_linux.odin
index 43b3b3075..7e0edbb7e 100644
--- a/core/crypto/rand_linux.odin
+++ b/core/crypto/rand_linux.odin
@@ -32,7 +32,7 @@ _rand_bytes :: proc (dst: []byte) {
// All other failures are things that should NEVER happen
// unless the kernel interface changes (ie: the Linux
// developers break userland).
- panic(fmt.tprintf("crypto: getrandom failed: %v", errno))
+ fmt.panicf("crypto: getrandom failed: %v", errno)
}
l -= n_read
dst = dst[n_read:]
diff --git a/core/crypto/rand_windows.odin b/core/crypto/rand_windows.odin
index a92d376cb..9cd647cc1 100644
--- a/core/crypto/rand_windows.odin
+++ b/core/crypto/rand_windows.odin
@@ -11,16 +11,16 @@ _rand_bytes :: proc(dst: []byte) {
ret := (os.Errno)(win32.BCryptGenRandom(nil, raw_data(dst), u32(len(dst)), win32.BCRYPT_USE_SYSTEM_PREFERRED_RNG))
if ret != os.ERROR_NONE {
switch ret {
- case os.ERROR_INVALID_HANDLE:
- // The handle to the first parameter is invalid.
- // This should not happen here, since we explicitly pass nil to it
- panic("crypto: BCryptGenRandom Invalid handle for hAlgorithm")
- case os.ERROR_INVALID_PARAMETER:
- // One of the parameters was invalid
- panic("crypto: BCryptGenRandom Invalid parameter")
- case:
- // Unknown error
- panic(fmt.tprintf("crypto: BCryptGenRandom failed: %d\n", ret))
+ case os.ERROR_INVALID_HANDLE:
+ // The handle to the first parameter is invalid.
+ // This should not happen here, since we explicitly pass nil to it
+ panic("crypto: BCryptGenRandom Invalid handle for hAlgorithm")
+ case os.ERROR_INVALID_PARAMETER:
+ // One of the parameters was invalid
+ panic("crypto: BCryptGenRandom Invalid parameter")
+ case:
+ // Unknown error
+ fmt.panicf("crypto: BCryptGenRandom failed: %d\n", ret)
}
}
}
diff --git a/core/encoding/ansi/ansi.odin b/core/encoding/ansi/ansi.odin
new file mode 100644
index 000000000..5550a1671
--- /dev/null
+++ b/core/encoding/ansi/ansi.odin
@@ -0,0 +1,137 @@
+package ansi
+
+BEL :: "\a" // Bell
+BS :: "\b" // Backspace
+ESC :: "\e" // Escape
+
+// Fe Escape sequences
+
+CSI :: ESC + "[" // Control Sequence Introducer
+OSC :: ESC + "]" // Operating System Command
+ST :: ESC + "\\" // String Terminator
+
+// CSI sequences
+
+CUU :: "A" // Cursor Up
+CUD :: "B" // Cursor Down
+CUF :: "C" // Cursor Forward
+CUB :: "D" // Cursor Back
+CNL :: "E" // Cursor Next Line
+CPL :: "F" // Cursor Previous Line
+CHA :: "G" // Cursor Horizontal Absolute
+CUP :: "H" // Cursor Position
+ED :: "J" // Erase in Display
+EL :: "K" // Erase in Line
+SU :: "S" // Scroll Up
+SD :: "T" // Scroll Down
+HVP :: "f" // Horizontal Vertical Position
+SGR :: "m" // Select Graphic Rendition
+AUX_ON :: "5i" // AUX Port On
+AUX_OFF :: "4i" // AUX Port Off
+DSR :: "6n" // Device Status Report
+
+// CSI: private sequences
+
+SCP :: "s" // Save Current Cursor Position
+RCP :: "u" // Restore Saved Cursor Position
+DECAWM_ON :: "?7h" // Auto Wrap Mode (Enabled)
+DECAWM_OFF :: "?7l" // Auto Wrap Mode (Disabled)
+DECTCEM_SHOW :: "?25h" // Text Cursor Enable Mode (Visible)
+DECTCEM_HIDE :: "?25l" // Text Cursor Enable Mode (Invisible)
+
+// SGR sequences
+
+RESET :: "0"
+BOLD :: "1"
+FAINT :: "2"
+ITALIC :: "3" // Not widely supported.
+UNDERLINE :: "4"
+BLINK_SLOW :: "5"
+BLINK_RAPID :: "6" // Not widely supported.
+INVERT :: "7" // Also known as reverse video.
+HIDE :: "8" // Not widely supported.
+STRIKE :: "9"
+FONT_PRIMARY :: "10"
+FONT_ALT1 :: "11"
+FONT_ALT2 :: "12"
+FONT_ALT3 :: "13"
+FONT_ALT4 :: "14"
+FONT_ALT5 :: "15"
+FONT_ALT6 :: "16"
+FONT_ALT7 :: "17"
+FONT_ALT8 :: "18"
+FONT_ALT9 :: "19"
+FONT_FRAKTUR :: "20" // Rarely supported.
+UNDERLINE_DOUBLE :: "21" // May be interpreted as "disable bold."
+NO_BOLD_FAINT :: "22"
+NO_ITALIC_BLACKLETTER :: "23"
+NO_UNDERLINE :: "24"
+NO_BLINK :: "25"
+PROPORTIONAL_SPACING :: "26"
+NO_REVERSE :: "27"
+NO_HIDE :: "28"
+NO_STRIKE :: "29"
+
+FG_BLACK :: "30"
+FG_RED :: "31"
+FG_GREEN :: "32"
+FG_YELLOW :: "33"
+FG_BLUE :: "34"
+FG_MAGENTA :: "35"
+FG_CYAN :: "36"
+FG_WHITE :: "37"
+FG_COLOR :: "38"
+FG_COLOR_8_BIT :: "38;5" // Followed by ";n" where n is in 0..=255
+FG_COLOR_24_BIT :: "38;2" // Followed by ";r;g;b" where r,g,b are in 0..=255
+FG_DEFAULT :: "39"
+
+BG_BLACK :: "40"
+BG_RED :: "41"
+BG_GREEN :: "42"
+BG_YELLOW :: "43"
+BG_BLUE :: "44"
+BG_MAGENTA :: "45"
+BG_CYAN :: "46"
+BG_WHITE :: "47"
+BG_COLOR :: "48"
+BG_COLOR_8_BIT :: "48;5" // Followed by ";n" where n is in 0..=255
+BG_COLOR_24_BIT :: "48;2" // Followed by ";r;g;b" where r,g,b are in 0..=255
+BG_DEFAULT :: "49"
+
+NO_PROPORTIONAL_SPACING :: "50"
+FRAMED :: "51"
+ENCIRCLED :: "52"
+OVERLINED :: "53"
+NO_FRAME_ENCIRCLE :: "54"
+NO_OVERLINE :: "55"
+
+// SGR: non-standard bright colors
+
+FG_BRIGHT_BLACK :: "90" // Also known as grey.
+FG_BRIGHT_RED :: "91"
+FG_BRIGHT_GREEN :: "92"
+FG_BRIGHT_YELLOW :: "93"
+FG_BRIGHT_BLUE :: "94"
+FG_BRIGHT_MAGENTA :: "95"
+FG_BRIGHT_CYAN :: "96"
+FG_BRIGHT_WHITE :: "97"
+
+BG_BRIGHT_BLACK :: "100" // Also known as grey.
+BG_BRIGHT_RED :: "101"
+BG_BRIGHT_GREEN :: "102"
+BG_BRIGHT_YELLOW :: "103"
+BG_BRIGHT_BLUE :: "104"
+BG_BRIGHT_MAGENTA :: "105"
+BG_BRIGHT_CYAN :: "106"
+BG_BRIGHT_WHITE :: "107"
+
+// Fp Escape sequences
+
+DECSC :: ESC + "7" // DEC Save Cursor
+DECRC :: ESC + "8" // DEC Restore Cursor
+
+// OSC sequences
+
+WINDOW_TITLE :: "2" // Followed by ";<text>" ST.
+HYPERLINK :: "8" // Followed by ";[params];<URI>" ST. Closed by OSC HYPERLINK ";;" ST.
+CLIPBOARD :: "52" // Followed by ";c;<Base64-encoded string>" ST.
diff --git a/core/encoding/ansi/doc.odin b/core/encoding/ansi/doc.odin
new file mode 100644
index 000000000..a0945c581
--- /dev/null
+++ b/core/encoding/ansi/doc.odin
@@ -0,0 +1,20 @@
+/*
+package ansi implements constant references to many widely-supported ANSI
+escape codes, primarily used in terminal emulators for enhanced graphics, such
+as colors, text styling, and animated displays.
+
+For example, you can print out a line of cyan text like this:
+ fmt.println(ansi.CSI + ansi.FG_CYAN + ansi.SGR + "Hellope!" + ansi.CSI + ansi.RESET + ansi.SGR)
+
+Multiple SGR (Select Graphic Rendition) codes can be joined by semicolons:
+ fmt.println(ansi.CSI + ansi.BOLD + ";" + ansi.FG_BLUE + ansi.SGR + "Hellope!" + ansi.CSI + ansi.RESET + ansi.SGR)
+
+If your terminal supports 24-bit true color mode, you can also do this:
+ fmt.println(ansi.CSI + ansi.FG_COLOR_24_BIT + ";0;255;255" + ansi.SGR + "Hellope!" + ansi.CSI + ansi.RESET + ansi.SGR)
+
+For more information, see:
+ 1. https://en.wikipedia.org/wiki/ANSI_escape_code
+ 2. https://www.vt100.net/docs/vt102-ug/chapter5.html
+ 3. https://invisible-island.net/xterm/ctlseqs/ctlseqs.html
+*/
+package ansi
diff --git a/core/encoding/cbor/cbor.odin b/core/encoding/cbor/cbor.odin
index d0e406ab1..7897b2a37 100644
--- a/core/encoding/cbor/cbor.odin
+++ b/core/encoding/cbor/cbor.odin
@@ -320,8 +320,8 @@ to_diagnostic_format :: proc {
// Turns the given CBOR value into a human-readable string.
// See docs on the proc group `diagnose` for more info.
-to_diagnostic_format_string :: proc(val: Value, padding := 0, allocator := context.allocator) -> (string, mem.Allocator_Error) #optional_allocator_error {
- b := strings.builder_make(allocator)
+to_diagnostic_format_string :: proc(val: Value, padding := 0, allocator := context.allocator, loc := #caller_location) -> (string, mem.Allocator_Error) #optional_allocator_error {
+ b := strings.builder_make(allocator, loc)
w := strings.to_stream(&b)
err := to_diagnostic_format_writer(w, val, padding)
if err == .EOF {
diff --git a/core/encoding/cbor/coding.odin b/core/encoding/cbor/coding.odin
index 0d276a7a1..07f0637a6 100644
--- a/core/encoding/cbor/coding.odin
+++ b/core/encoding/cbor/coding.odin
@@ -95,24 +95,25 @@ decode :: decode_from
// Decodes the given string as CBOR.
// See docs on the proc group `decode` for more information.
-decode_from_string :: proc(s: string, flags: Decoder_Flags = {}, allocator := context.allocator) -> (v: Value, err: Decode_Error) {
+decode_from_string :: proc(s: string, flags: Decoder_Flags = {}, allocator := context.allocator, loc := #caller_location) -> (v: Value, err: Decode_Error) {
r: strings.Reader
strings.reader_init(&r, s)
- return decode_from_reader(strings.reader_to_stream(&r), flags, allocator)
+ return decode_from_reader(strings.reader_to_stream(&r), flags, allocator, loc)
}
// Reads a CBOR value from the given reader.
// See docs on the proc group `decode` for more information.
-decode_from_reader :: proc(r: io.Reader, flags: Decoder_Flags = {}, allocator := context.allocator) -> (v: Value, err: Decode_Error) {
+decode_from_reader :: proc(r: io.Reader, flags: Decoder_Flags = {}, allocator := context.allocator, loc := #caller_location) -> (v: Value, err: Decode_Error) {
return decode_from_decoder(
Decoder{ DEFAULT_MAX_PRE_ALLOC, flags, r },
allocator=allocator,
+ loc = loc,
)
}
// Reads a CBOR value from the given decoder.
// See docs on the proc group `decode` for more information.
-decode_from_decoder :: proc(d: Decoder, allocator := context.allocator) -> (v: Value, err: Decode_Error) {
+decode_from_decoder :: proc(d: Decoder, allocator := context.allocator, loc := #caller_location) -> (v: Value, err: Decode_Error) {
context.allocator = allocator
d := d
@@ -121,13 +122,13 @@ decode_from_decoder :: proc(d: Decoder, allocator := context.allocator) -> (v: V
d.max_pre_alloc = DEFAULT_MAX_PRE_ALLOC
}
- v, err = _decode_from_decoder(d)
+ v, err = _decode_from_decoder(d, {}, allocator, loc)
// Normal EOF does not exist here, we try to read the exact amount that is said to be provided.
if err == .EOF { err = .Unexpected_EOF }
return
}
-_decode_from_decoder :: proc(d: Decoder, hdr: Header = Header(0)) -> (v: Value, err: Decode_Error) {
+_decode_from_decoder :: proc(d: Decoder, hdr: Header = Header(0), allocator := context.allocator, loc := #caller_location) -> (v: Value, err: Decode_Error) {
hdr := hdr
r := d.reader
if hdr == Header(0) { hdr = _decode_header(r) or_return }
@@ -161,11 +162,11 @@ _decode_from_decoder :: proc(d: Decoder, hdr: Header = Header(0)) -> (v: Value,
switch maj {
case .Unsigned: return _decode_tiny_u8(add)
case .Negative: return Negative_U8(_decode_tiny_u8(add) or_return), nil
- case .Bytes: return _decode_bytes_ptr(d, add)
- case .Text: return _decode_text_ptr(d, add)
- case .Array: return _decode_array_ptr(d, add)
- case .Map: return _decode_map_ptr(d, add)
- case .Tag: return _decode_tag_ptr(d, add)
+ case .Bytes: return _decode_bytes_ptr(d, add, .Bytes, allocator, loc)
+ case .Text: return _decode_text_ptr(d, add, allocator, loc)
+ case .Array: return _decode_array_ptr(d, add, allocator, loc)
+ case .Map: return _decode_map_ptr(d, add, allocator, loc)
+ case .Tag: return _decode_tag_ptr(d, add, allocator, loc)
case .Other: return _decode_tiny_simple(add)
case: return nil, .Bad_Major
}
@@ -203,27 +204,27 @@ encode :: encode_into
// Encodes the CBOR value into binary CBOR allocated on the given allocator.
// See the docs on the proc group `encode_into` for more info.
-encode_into_bytes :: proc(v: Value, flags := ENCODE_SMALL, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (data: []byte, err: Encode_Error) {
- b := strings.builder_make(allocator) or_return
+encode_into_bytes :: proc(v: Value, flags := ENCODE_SMALL, allocator := context.allocator, temp_allocator := context.temp_allocator, loc := #caller_location) -> (data: []byte, err: Encode_Error) {
+ b := strings.builder_make(allocator, loc) or_return
encode_into_builder(&b, v, flags, temp_allocator) or_return
return b.buf[:], nil
}
// Encodes the CBOR value into binary CBOR written to the given builder.
// See the docs on the proc group `encode_into` for more info.
-encode_into_builder :: proc(b: ^strings.Builder, v: Value, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator) -> Encode_Error {
- return encode_into_writer(strings.to_stream(b), v, flags, temp_allocator)
+encode_into_builder :: proc(b: ^strings.Builder, v: Value, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator, loc := #caller_location) -> Encode_Error {
+ return encode_into_writer(strings.to_stream(b), v, flags, temp_allocator, loc=loc)
}
// Encodes the CBOR value into binary CBOR written to the given writer.
// See the docs on the proc group `encode_into` for more info.
-encode_into_writer :: proc(w: io.Writer, v: Value, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator) -> Encode_Error {
- return encode_into_encoder(Encoder{flags, w, temp_allocator}, v)
+encode_into_writer :: proc(w: io.Writer, v: Value, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator, loc := #caller_location) -> Encode_Error {
+ return encode_into_encoder(Encoder{flags, w, temp_allocator}, v, loc=loc)
}
// Encodes the CBOR value into binary CBOR written to the given encoder.
// See the docs on the proc group `encode_into` for more info.
-encode_into_encoder :: proc(e: Encoder, v: Value) -> Encode_Error {
+encode_into_encoder :: proc(e: Encoder, v: Value, loc := #caller_location) -> Encode_Error {
e := e
if e.temp_allocator.procedure == nil {
@@ -366,21 +367,21 @@ _encode_u64_exact :: proc(w: io.Writer, v: u64, major: Major = .Unsigned) -> (er
return
}
-_decode_bytes_ptr :: proc(d: Decoder, add: Add, type: Major = .Bytes) -> (v: ^Bytes, err: Decode_Error) {
- v = new(Bytes) or_return
- defer if err != nil { free(v) }
+_decode_bytes_ptr :: proc(d: Decoder, add: Add, type: Major = .Bytes, allocator := context.allocator, loc := #caller_location) -> (v: ^Bytes, err: Decode_Error) {
+ v = new(Bytes, allocator, loc) or_return
+ defer if err != nil { free(v, allocator, loc) }
- v^ = _decode_bytes(d, add, type) or_return
+ v^ = _decode_bytes(d, add, type, allocator, loc) or_return
return
}
-_decode_bytes :: proc(d: Decoder, add: Add, type: Major = .Bytes, allocator := context.allocator) -> (v: Bytes, err: Decode_Error) {
+_decode_bytes :: proc(d: Decoder, add: Add, type: Major = .Bytes, allocator := context.allocator, loc := #caller_location) -> (v: Bytes, err: Decode_Error) {
context.allocator = allocator
add := add
n, scap := _decode_len_str(d, add) or_return
- buf := strings.builder_make(0, scap) or_return
+ buf := strings.builder_make(0, scap, allocator, loc) or_return
defer if err != nil { strings.builder_destroy(&buf) }
buf_stream := strings.to_stream(&buf)
@@ -426,40 +427,40 @@ _encode_bytes :: proc(e: Encoder, val: Bytes, major: Major = .Bytes) -> (err: En
return
}
-_decode_text_ptr :: proc(d: Decoder, add: Add) -> (v: ^Text, err: Decode_Error) {
- v = new(Text) or_return
+_decode_text_ptr :: proc(d: Decoder, add: Add, allocator := context.allocator, loc := #caller_location) -> (v: ^Text, err: Decode_Error) {
+ v = new(Text, allocator, loc) or_return
defer if err != nil { free(v) }
- v^ = _decode_text(d, add) or_return
+ v^ = _decode_text(d, add, allocator, loc) or_return
return
}
-_decode_text :: proc(d: Decoder, add: Add, allocator := context.allocator) -> (v: Text, err: Decode_Error) {
- return (Text)(_decode_bytes(d, add, .Text, allocator) or_return), nil
+_decode_text :: proc(d: Decoder, add: Add, allocator := context.allocator, loc := #caller_location) -> (v: Text, err: Decode_Error) {
+ return (Text)(_decode_bytes(d, add, .Text, allocator, loc) or_return), nil
}
_encode_text :: proc(e: Encoder, val: Text) -> Encode_Error {
return _encode_bytes(e, transmute([]byte)val, .Text)
}
-_decode_array_ptr :: proc(d: Decoder, add: Add) -> (v: ^Array, err: Decode_Error) {
- v = new(Array) or_return
+_decode_array_ptr :: proc(d: Decoder, add: Add, allocator := context.allocator, loc := #caller_location) -> (v: ^Array, err: Decode_Error) {
+ v = new(Array, allocator, loc) or_return
defer if err != nil { free(v) }
- v^ = _decode_array(d, add) or_return
+ v^ = _decode_array(d, add, allocator, loc) or_return
return
}
-_decode_array :: proc(d: Decoder, add: Add) -> (v: Array, err: Decode_Error) {
+_decode_array :: proc(d: Decoder, add: Add, allocator := context.allocator, loc := #caller_location) -> (v: Array, err: Decode_Error) {
n, scap := _decode_len_container(d, add) or_return
- array := make([dynamic]Value, 0, scap) or_return
+ array := make([dynamic]Value, 0, scap, allocator, loc) or_return
defer if err != nil {
- for entry in array { destroy(entry) }
- delete(array)
+ for entry in array { destroy(entry, allocator) }
+ delete(array, loc)
}
for i := 0; n == -1 || i < n; i += 1 {
- val, verr := _decode_from_decoder(d)
+ val, verr := _decode_from_decoder(d, {}, allocator, loc)
if n == -1 && verr == .Break {
break
} else if verr != nil {
@@ -485,39 +486,39 @@ _encode_array :: proc(e: Encoder, arr: Array) -> Encode_Error {
return nil
}
-_decode_map_ptr :: proc(d: Decoder, add: Add) -> (v: ^Map, err: Decode_Error) {
- v = new(Map) or_return
+_decode_map_ptr :: proc(d: Decoder, add: Add, allocator := context.allocator, loc := #caller_location) -> (v: ^Map, err: Decode_Error) {
+ v = new(Map, allocator, loc) or_return
defer if err != nil { free(v) }
- v^ = _decode_map(d, add) or_return
+ v^ = _decode_map(d, add, allocator, loc) or_return
return
}
-_decode_map :: proc(d: Decoder, add: Add) -> (v: Map, err: Decode_Error) {
+_decode_map :: proc(d: Decoder, add: Add, allocator := context.allocator, loc := #caller_location) -> (v: Map, err: Decode_Error) {
n, scap := _decode_len_container(d, add) or_return
- items := make([dynamic]Map_Entry, 0, scap) or_return
+ items := make([dynamic]Map_Entry, 0, scap, allocator, loc) or_return
defer if err != nil {
for entry in items {
destroy(entry.key)
destroy(entry.value)
}
- delete(items)
+ delete(items, loc)
}
for i := 0; n == -1 || i < n; i += 1 {
- key, kerr := _decode_from_decoder(d)
+ key, kerr := _decode_from_decoder(d, {}, allocator, loc)
if n == -1 && kerr == .Break {
break
} else if kerr != nil {
return nil, kerr
}
- value := _decode_from_decoder(d) or_return
+ value := _decode_from_decoder(d, {}, allocator, loc) or_return
append(&items, Map_Entry{
key = key,
value = value,
- }) or_return
+ }, loc) or_return
}
if .Shrink_Excess in d.flags { shrink(&items) }
@@ -578,20 +579,20 @@ _encode_map :: proc(e: Encoder, m: Map) -> (err: Encode_Error) {
return nil
}
-_decode_tag_ptr :: proc(d: Decoder, add: Add) -> (v: Value, err: Decode_Error) {
- tag := _decode_tag(d, add) or_return
+_decode_tag_ptr :: proc(d: Decoder, add: Add, allocator := context.allocator, loc := #caller_location) -> (v: Value, err: Decode_Error) {
+ tag := _decode_tag(d, add, allocator, loc) or_return
if t, ok := tag.?; ok {
defer if err != nil { destroy(t.value) }
- tp := new(Tag) or_return
+ tp := new(Tag, allocator, loc) or_return
tp^ = t
return tp, nil
}
// no error, no tag, this was the self described CBOR tag, skip it.
- return _decode_from_decoder(d)
+ return _decode_from_decoder(d, {}, allocator, loc)
}
-_decode_tag :: proc(d: Decoder, add: Add) -> (v: Maybe(Tag), err: Decode_Error) {
+_decode_tag :: proc(d: Decoder, add: Add, allocator := context.allocator, loc := #caller_location) -> (v: Maybe(Tag), err: Decode_Error) {
num := _decode_uint_as_u64(d.reader, add) or_return
// CBOR can be wrapped in a tag that decoders can use to see/check if the binary data is CBOR.
@@ -602,7 +603,7 @@ _decode_tag :: proc(d: Decoder, add: Add) -> (v: Maybe(Tag), err: Decode_Error)
t := Tag{
number = num,
- value = _decode_from_decoder(d) or_return,
+ value = _decode_from_decoder(d, {}, allocator, loc) or_return,
}
if nested, ok := t.value.(^Tag); ok {
@@ -883,4 +884,4 @@ _encode_deterministic_f64 :: proc(w: io.Writer, v: f64) -> io.Error {
}
return _encode_f64_exact(w, v)
-}
+} \ No newline at end of file
diff --git a/core/encoding/cbor/marshal.odin b/core/encoding/cbor/marshal.odin
index 37c9dd180..775eafd9c 100644
--- a/core/encoding/cbor/marshal.odin
+++ b/core/encoding/cbor/marshal.odin
@@ -45,8 +45,8 @@ marshal :: marshal_into
// Marshals the given value into a CBOR byte stream (allocated using the given allocator).
// See docs on the `marshal_into` proc group for more info.
-marshal_into_bytes :: proc(v: any, flags := ENCODE_SMALL, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (bytes: []byte, err: Marshal_Error) {
- b, alloc_err := strings.builder_make(allocator)
+marshal_into_bytes :: proc(v: any, flags := ENCODE_SMALL, allocator := context.allocator, temp_allocator := context.temp_allocator, loc := #caller_location) -> (bytes: []byte, err: Marshal_Error) {
+ b, alloc_err := strings.builder_make(allocator, loc=loc)
// The builder as a stream also returns .EOF if it ran out of memory so this is consistent.
if alloc_err != nil {
return nil, .EOF
@@ -54,7 +54,7 @@ marshal_into_bytes :: proc(v: any, flags := ENCODE_SMALL, allocator := context.a
defer if err != nil { strings.builder_destroy(&b) }
- if err = marshal_into_builder(&b, v, flags, temp_allocator); err != nil {
+ if err = marshal_into_builder(&b, v, flags, temp_allocator, loc=loc); err != nil {
return
}
@@ -63,20 +63,20 @@ marshal_into_bytes :: proc(v: any, flags := ENCODE_SMALL, allocator := context.a
// Marshals the given value into a CBOR byte stream written to the given builder.
// See docs on the `marshal_into` proc group for more info.
-marshal_into_builder :: proc(b: ^strings.Builder, v: any, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator) -> Marshal_Error {
- return marshal_into_writer(strings.to_writer(b), v, flags, temp_allocator)
+marshal_into_builder :: proc(b: ^strings.Builder, v: any, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator, loc := #caller_location) -> Marshal_Error {
+ return marshal_into_writer(strings.to_writer(b), v, flags, temp_allocator, loc=loc)
}
// Marshals the given value into a CBOR byte stream written to the given writer.
// See docs on the `marshal_into` proc group for more info.
-marshal_into_writer :: proc(w: io.Writer, v: any, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator) -> Marshal_Error {
+marshal_into_writer :: proc(w: io.Writer, v: any, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator, loc := #caller_location) -> Marshal_Error {
encoder := Encoder{flags, w, temp_allocator}
- return marshal_into_encoder(encoder, v)
+ return marshal_into_encoder(encoder, v, loc=loc)
}
// Marshals the given value into a CBOR byte stream written to the given encoder.
// See docs on the `marshal_into` proc group for more info.
-marshal_into_encoder :: proc(e: Encoder, v: any) -> (err: Marshal_Error) {
+marshal_into_encoder :: proc(e: Encoder, v: any, loc := #caller_location) -> (err: Marshal_Error) {
e := e
if e.temp_allocator.procedure == nil {
diff --git a/core/encoding/cbor/unmarshal.odin b/core/encoding/cbor/unmarshal.odin
index a1524d9f4..c31ba1d92 100644
--- a/core/encoding/cbor/unmarshal.odin
+++ b/core/encoding/cbor/unmarshal.odin
@@ -31,8 +31,8 @@ unmarshal :: proc {
unmarshal_from_string,
}
-unmarshal_from_reader :: proc(r: io.Reader, ptr: ^$T, flags := Decoder_Flags{}, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (err: Unmarshal_Error) {
- err = unmarshal_from_decoder(Decoder{ DEFAULT_MAX_PRE_ALLOC, flags, r }, ptr, allocator, temp_allocator)
+unmarshal_from_reader :: proc(r: io.Reader, ptr: ^$T, flags := Decoder_Flags{}, allocator := context.allocator, temp_allocator := context.temp_allocator, loc := #caller_location) -> (err: Unmarshal_Error) {
+ err = unmarshal_from_decoder(Decoder{ DEFAULT_MAX_PRE_ALLOC, flags, r }, ptr, allocator, temp_allocator, loc)
// Normal EOF does not exist here, we try to read the exact amount that is said to be provided.
if err == .EOF { err = .Unexpected_EOF }
@@ -40,21 +40,21 @@ unmarshal_from_reader :: proc(r: io.Reader, ptr: ^$T, flags := Decoder_Flags{},
}
// Unmarshals from a string, see docs on the proc group `Unmarshal` for more info.
-unmarshal_from_string :: proc(s: string, ptr: ^$T, flags := Decoder_Flags{}, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (err: Unmarshal_Error) {
+unmarshal_from_string :: proc(s: string, ptr: ^$T, flags := Decoder_Flags{}, allocator := context.allocator, temp_allocator := context.temp_allocator, loc := #caller_location) -> (err: Unmarshal_Error) {
sr: strings.Reader
r := strings.to_reader(&sr, s)
- err = unmarshal_from_reader(r, ptr, flags, allocator, temp_allocator)
+ err = unmarshal_from_reader(r, ptr, flags, allocator, temp_allocator, loc)
// Normal EOF does not exist here, we try to read the exact amount that is said to be provided.
if err == .EOF { err = .Unexpected_EOF }
return
}
-unmarshal_from_decoder :: proc(d: Decoder, ptr: ^$T, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (err: Unmarshal_Error) {
+unmarshal_from_decoder :: proc(d: Decoder, ptr: ^$T, allocator := context.allocator, temp_allocator := context.temp_allocator, loc := #caller_location) -> (err: Unmarshal_Error) {
d := d
- err = _unmarshal_any_ptr(d, ptr, nil, allocator, temp_allocator)
+ err = _unmarshal_any_ptr(d, ptr, nil, allocator, temp_allocator, loc)
// Normal EOF does not exist here, we try to read the exact amount that is said to be provided.
if err == .EOF { err = .Unexpected_EOF }
@@ -62,7 +62,7 @@ unmarshal_from_decoder :: proc(d: Decoder, ptr: ^$T, allocator := context.alloca
}
-_unmarshal_any_ptr :: proc(d: Decoder, v: any, hdr: Maybe(Header) = nil, allocator := context.allocator, temp_allocator := context.temp_allocator) -> Unmarshal_Error {
+_unmarshal_any_ptr :: proc(d: Decoder, v: any, hdr: Maybe(Header) = nil, allocator := context.allocator, temp_allocator := context.temp_allocator, loc := #caller_location) -> Unmarshal_Error {
context.allocator = allocator
context.temp_allocator = temp_allocator
v := v
@@ -78,10 +78,10 @@ _unmarshal_any_ptr :: proc(d: Decoder, v: any, hdr: Maybe(Header) = nil, allocat
}
data := any{(^rawptr)(v.data)^, ti.variant.(reflect.Type_Info_Pointer).elem.id}
- return _unmarshal_value(d, data, hdr.? or_else (_decode_header(d.reader) or_return))
+ return _unmarshal_value(d, data, hdr.? or_else (_decode_header(d.reader) or_return), allocator, temp_allocator, loc)
}
-_unmarshal_value :: proc(d: Decoder, v: any, hdr: Header) -> (err: Unmarshal_Error) {
+_unmarshal_value :: proc(d: Decoder, v: any, hdr: Header, allocator := context.allocator, temp_allocator := context.temp_allocator, loc := #caller_location) -> (err: Unmarshal_Error) {
v := v
ti := reflect.type_info_base(type_info_of(v.id))
r := d.reader
@@ -104,7 +104,7 @@ _unmarshal_value :: proc(d: Decoder, v: any, hdr: Header) -> (err: Unmarshal_Err
// Allow generic unmarshal by doing it into a `Value`.
switch &dst in v {
case Value:
- dst = err_conv(_decode_from_decoder(d, hdr)) or_return
+ dst = err_conv(_decode_from_decoder(d, hdr, allocator, loc)) or_return
return
}
@@ -308,7 +308,7 @@ _unmarshal_value :: proc(d: Decoder, v: any, hdr: Header) -> (err: Unmarshal_Err
if impl, ok := _tag_implementations_nr[nr]; ok {
return impl->unmarshal(d, nr, v)
} else if nr == TAG_OBJECT_TYPE {
- return _unmarshal_union(d, v, ti, hdr)
+ return _unmarshal_union(d, v, ti, hdr, loc=loc)
} else {
// Discard the tag info and unmarshal as its value.
return _unmarshal_value(d, v, _decode_header(r) or_return)
@@ -316,19 +316,19 @@ _unmarshal_value :: proc(d: Decoder, v: any, hdr: Header) -> (err: Unmarshal_Err
return _unsupported(v, hdr, add)
- case .Bytes: return _unmarshal_bytes(d, v, ti, hdr, add)
- case .Text: return _unmarshal_string(d, v, ti, hdr, add)
- case .Array: return _unmarshal_array(d, v, ti, hdr, add)
- case .Map: return _unmarshal_map(d, v, ti, hdr, add)
+ case .Bytes: return _unmarshal_bytes(d, v, ti, hdr, add, allocator=allocator, loc=loc)
+ case .Text: return _unmarshal_string(d, v, ti, hdr, add, allocator=allocator, loc=loc)
+ case .Array: return _unmarshal_array(d, v, ti, hdr, add, allocator=allocator, loc=loc)
+ case .Map: return _unmarshal_map(d, v, ti, hdr, add, allocator=allocator, loc=loc)
case: return .Bad_Major
}
}
-_unmarshal_bytes :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add) -> (err: Unmarshal_Error) {
+_unmarshal_bytes :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add, allocator := context.allocator, loc := #caller_location) -> (err: Unmarshal_Error) {
#partial switch t in ti.variant {
case reflect.Type_Info_String:
- bytes := err_conv(_decode_bytes(d, add)) or_return
+ bytes := err_conv(_decode_bytes(d, add, allocator=allocator, loc=loc)) or_return
if t.is_cstring {
raw := (^cstring)(v.data)
@@ -347,7 +347,7 @@ _unmarshal_bytes :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
if elem_base.id != byte { return _unsupported(v, hdr) }
- bytes := err_conv(_decode_bytes(d, add)) or_return
+ bytes := err_conv(_decode_bytes(d, add, allocator=allocator, loc=loc)) or_return
raw := (^mem.Raw_Slice)(v.data)
raw^ = transmute(mem.Raw_Slice)bytes
return
@@ -357,12 +357,12 @@ _unmarshal_bytes :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
if elem_base.id != byte { return _unsupported(v, hdr) }
- bytes := err_conv(_decode_bytes(d, add)) or_return
+ bytes := err_conv(_decode_bytes(d, add, allocator=allocator, loc=loc)) or_return
raw := (^mem.Raw_Dynamic_Array)(v.data)
raw.data = raw_data(bytes)
raw.len = len(bytes)
raw.cap = len(bytes)
- raw.allocator = context.allocator
+ raw.allocator = allocator
return
case reflect.Type_Info_Array:
@@ -385,10 +385,10 @@ _unmarshal_bytes :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
return _unsupported(v, hdr)
}
-_unmarshal_string :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add) -> (err: Unmarshal_Error) {
+_unmarshal_string :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add, allocator := context.allocator, temp_allocator := context.temp_allocator, loc := #caller_location) -> (err: Unmarshal_Error) {
#partial switch t in ti.variant {
case reflect.Type_Info_String:
- text := err_conv(_decode_text(d, add)) or_return
+ text := err_conv(_decode_text(d, add, allocator, loc)) or_return
if t.is_cstring {
raw := (^cstring)(v.data)
@@ -403,8 +403,8 @@ _unmarshal_string :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Heade
// Enum by its variant name.
case reflect.Type_Info_Enum:
- text := err_conv(_decode_text(d, add, allocator=context.temp_allocator)) or_return
- defer delete(text, context.temp_allocator)
+ text := err_conv(_decode_text(d, add, allocator=temp_allocator, loc=loc)) or_return
+ defer delete(text, temp_allocator, loc)
for name, i in t.names {
if name == text {
@@ -414,8 +414,8 @@ _unmarshal_string :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Heade
}
case reflect.Type_Info_Rune:
- text := err_conv(_decode_text(d, add, allocator=context.temp_allocator)) or_return
- defer delete(text, context.temp_allocator)
+ text := err_conv(_decode_text(d, add, allocator=temp_allocator, loc=loc)) or_return
+ defer delete(text, temp_allocator, loc)
r := (^rune)(v.data)
dr, n := utf8.decode_rune(text)
@@ -430,13 +430,15 @@ _unmarshal_string :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Heade
return _unsupported(v, hdr)
}
-_unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add) -> (err: Unmarshal_Error) {
+_unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add, allocator := context.allocator, loc := #caller_location) -> (err: Unmarshal_Error) {
assign_array :: proc(
d: Decoder,
da: ^mem.Raw_Dynamic_Array,
elemt: ^reflect.Type_Info,
length: int,
growable := true,
+ allocator := context.allocator,
+ loc := #caller_location,
) -> (out_of_space: bool, err: Unmarshal_Error) {
for idx: uintptr = 0; length == -1 || idx < uintptr(length); idx += 1 {
elem_ptr := rawptr(uintptr(da.data) + idx*uintptr(elemt.size))
@@ -450,13 +452,13 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
if !growable { return true, .Out_Of_Memory }
cap := 2 * da.cap
- ok := runtime.__dynamic_array_reserve(da, elemt.size, elemt.align, cap)
+ ok := runtime.__dynamic_array_reserve(da, elemt.size, elemt.align, cap, loc)
// NOTE: Might be lying here, but it is at least an allocator error.
if !ok { return false, .Out_Of_Memory }
}
- err = _unmarshal_value(d, elem, hdr)
+ err = _unmarshal_value(d, elem, hdr, allocator=allocator, loc=loc)
if length == -1 && err == .Break { break }
if err != nil { return }
@@ -469,10 +471,10 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
// Allow generically storing the values array.
switch &dst in v {
case ^Array:
- dst = err_conv(_decode_array_ptr(d, add)) or_return
+ dst = err_conv(_decode_array_ptr(d, add, allocator=allocator, loc=loc)) or_return
return
case Array:
- dst = err_conv(_decode_array(d, add)) or_return
+ dst = err_conv(_decode_array(d, add, allocator=allocator, loc=loc)) or_return
return
}
@@ -480,8 +482,8 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
case reflect.Type_Info_Slice:
length, scap := err_conv(_decode_len_container(d, add)) or_return
- data := mem.alloc_bytes_non_zeroed(t.elem.size * scap, t.elem.align) or_return
- defer if err != nil { mem.free_bytes(data) }
+ data := mem.alloc_bytes_non_zeroed(t.elem.size * scap, t.elem.align, allocator=allocator, loc=loc) or_return
+ defer if err != nil { mem.free_bytes(data, allocator=allocator, loc=loc) }
da := mem.Raw_Dynamic_Array{raw_data(data), 0, length, context.allocator }
@@ -489,7 +491,7 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
if .Shrink_Excess in d.flags {
// Ignoring an error here, but this is not critical to succeed.
- _ = runtime.__dynamic_array_shrink(&da, t.elem.size, t.elem.align, da.len)
+ _ = runtime.__dynamic_array_shrink(&da, t.elem.size, t.elem.align, da.len, loc=loc)
}
raw := (^mem.Raw_Slice)(v.data)
@@ -500,8 +502,8 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
case reflect.Type_Info_Dynamic_Array:
length, scap := err_conv(_decode_len_container(d, add)) or_return
- data := mem.alloc_bytes_non_zeroed(t.elem.size * scap, t.elem.align) or_return
- defer if err != nil { mem.free_bytes(data) }
+ data := mem.alloc_bytes_non_zeroed(t.elem.size * scap, t.elem.align, loc=loc) or_return
+ defer if err != nil { mem.free_bytes(data, allocator=allocator, loc=loc) }
raw := (^mem.Raw_Dynamic_Array)(v.data)
raw.data = raw_data(data)
@@ -513,7 +515,7 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
if .Shrink_Excess in d.flags {
// Ignoring an error here, but this is not critical to succeed.
- _ = runtime.__dynamic_array_shrink(raw, t.elem.size, t.elem.align, raw.len)
+ _ = runtime.__dynamic_array_shrink(raw, t.elem.size, t.elem.align, raw.len, loc=loc)
}
return
@@ -525,7 +527,7 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
return _unsupported(v, hdr)
}
- da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, length, context.allocator }
+ da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, length, allocator }
out_of_space := assign_array(d, &da, t.elem, length, growable=false) or_return
if out_of_space { return _unsupported(v, hdr) }
@@ -539,7 +541,7 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
return _unsupported(v, hdr)
}
- da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, length, context.allocator }
+ da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, length, allocator }
out_of_space := assign_array(d, &da, t.elem, length, growable=false) or_return
if out_of_space { return _unsupported(v, hdr) }
@@ -553,7 +555,7 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
return _unsupported(v, hdr)
}
- da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, 2, context.allocator }
+ da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, 2, allocator }
info: ^runtime.Type_Info
switch ti.id {
@@ -575,7 +577,7 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
return _unsupported(v, hdr)
}
- da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, 4, context.allocator }
+ da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, 4, allocator }
info: ^runtime.Type_Info
switch ti.id {
@@ -593,17 +595,17 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
}
}
-_unmarshal_map :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add) -> (err: Unmarshal_Error) {
+_unmarshal_map :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add, allocator := context.allocator, loc := #caller_location) -> (err: Unmarshal_Error) {
r := d.reader
- decode_key :: proc(d: Decoder, v: any, allocator := context.allocator) -> (k: string, err: Unmarshal_Error) {
+ decode_key :: proc(d: Decoder, v: any, allocator := context.allocator, loc := #caller_location) -> (k: string, err: Unmarshal_Error) {
entry_hdr := _decode_header(d.reader) or_return
entry_maj, entry_add := _header_split(entry_hdr)
#partial switch entry_maj {
case .Text:
- k = err_conv(_decode_text(d, entry_add, allocator)) or_return
+ k = err_conv(_decode_text(d, entry_add, allocator=allocator, loc=loc)) or_return
return
case .Bytes:
- bytes := err_conv(_decode_bytes(d, entry_add, allocator=allocator)) or_return
+ bytes := err_conv(_decode_bytes(d, entry_add, allocator=allocator, loc=loc)) or_return
k = string(bytes)
return
case:
@@ -615,10 +617,10 @@ _unmarshal_map :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header,
// Allow generically storing the map array.
switch &dst in v {
case ^Map:
- dst = err_conv(_decode_map_ptr(d, add)) or_return
+ dst = err_conv(_decode_map_ptr(d, add, allocator=allocator, loc=loc)) or_return
return
case Map:
- dst = err_conv(_decode_map(d, add)) or_return
+ dst = err_conv(_decode_map(d, add, allocator=allocator, loc=loc)) or_return
return
}
@@ -754,7 +756,7 @@ _unmarshal_map :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header,
// Unmarshal into a union, based on the `TAG_OBJECT_TYPE` tag of the spec, it denotes a tag which
// contains an array of exactly two elements, the first is a textual representation of the following
// CBOR value's type.
-_unmarshal_union :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header) -> (err: Unmarshal_Error) {
+_unmarshal_union :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, loc := #caller_location) -> (err: Unmarshal_Error) {
r := d.reader
#partial switch t in ti.variant {
case reflect.Type_Info_Union:
@@ -792,7 +794,7 @@ _unmarshal_union :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
case reflect.Type_Info_Named:
if vti.name == target_name {
reflect.set_union_variant_raw_tag(v, tag)
- return _unmarshal_value(d, any{v.data, variant.id}, _decode_header(r) or_return)
+ return _unmarshal_value(d, any{v.data, variant.id}, _decode_header(r) or_return, loc=loc)
}
case:
@@ -804,7 +806,7 @@ _unmarshal_union :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
if variant_name == target_name {
reflect.set_union_variant_raw_tag(v, tag)
- return _unmarshal_value(d, any{v.data, variant.id}, _decode_header(r) or_return)
+ return _unmarshal_value(d, any{v.data, variant.id}, _decode_header(r) or_return, loc=loc)
}
}
}
diff --git a/core/encoding/entity/entity.odin b/core/encoding/entity/entity.odin
index cee6230ef..f5208ad6f 100644
--- a/core/encoding/entity/entity.odin
+++ b/core/encoding/entity/entity.odin
@@ -56,38 +56,27 @@ CDATA_END :: "]]>"
COMMENT_START :: "<!--"
COMMENT_END :: "-->"
-/*
- Default: CDATA and comments are passed through unchanged.
-*/
+// Default: CDATA and comments are passed through unchanged.
XML_Decode_Option :: enum u8 {
- /*
- Do not decode & entities. It decodes by default.
- If given, overrides `Decode_CDATA`.
- */
+ // Do not decode & entities. It decodes by default. If given, overrides `Decode_CDATA`.
No_Entity_Decode,
- /*
- CDATA is unboxed.
- */
+ // CDATA is unboxed.
Unbox_CDATA,
- /*
- Unboxed CDATA is decoded as well.
- Ignored if `.Unbox_CDATA` is not given.
- */
+ // Unboxed CDATA is decoded as well. Ignored if `.Unbox_CDATA` is not given.
Decode_CDATA,
- /*
- Comments are stripped.
- */
+ // Comments are stripped.
Comment_Strip,
+
+ // Normalize whitespace
+ Normalize_Whitespace,
}
XML_Decode_Options :: bit_set[XML_Decode_Option; u8]
-/*
- Decode a string that may include SGML/XML/HTML entities.
- The caller has to free the result.
-*/
+// Decode a string that may include SGML/XML/HTML entities.
+// The caller has to free the result.
decode_xml :: proc(input: string, options := XML_Decode_Options{}, allocator := context.allocator) -> (decoded: string, err: Error) {
context.allocator = allocator
@@ -100,14 +89,14 @@ decode_xml :: proc(input: string, options := XML_Decode_Options{}, allocator :=
t := Tokenizer{src=input}
in_data := false
+ prev: rune = ' '
+
loop: for {
advance(&t) or_return
if t.r < 0 { break loop }
- /*
- Below here we're never inside a CDATA tag.
- At most we'll see the start of one, but that doesn't affect the logic.
- */
+ // Below here we're never inside a CDATA tag. At most we'll see the start of one,
+ // but that doesn't affect the logic.
switch t.r {
case '<':
/*
@@ -126,9 +115,7 @@ decode_xml :: proc(input: string, options := XML_Decode_Options{}, allocator :=
in_data = _handle_xml_special(&t, &builder, options) or_return
case ']':
- /*
- If we're unboxing _and_ decoding CDATA, we'll have to check for the end tag.
- */
+ // If we're unboxing _and_ decoding CDATA, we'll have to check for the end tag.
if in_data {
if t.read_offset + len(CDATA_END) < len(t.src) {
if string(t.src[t.offset:][:len(CDATA_END)]) == CDATA_END {
@@ -143,22 +130,16 @@ decode_xml :: proc(input: string, options := XML_Decode_Options{}, allocator :=
case:
if in_data && .Decode_CDATA not_in options {
- /*
- Unboxed, but undecoded.
- */
+ // Unboxed, but undecoded.
write_rune(&builder, t.r)
continue
}
if t.r == '&' {
if entity, entity_err := _extract_xml_entity(&t); entity_err != .None {
- /*
- We read to the end of the string without closing the entity.
- Pass through as-is.
- */
+ // We read to the end of the string without closing the entity. Pass through as-is.
write_string(&builder, entity)
} else {
-
if .No_Entity_Decode not_in options {
if decoded, ok := xml_decode_entity(entity); ok {
write_rune(&builder, decoded)
@@ -166,19 +147,41 @@ decode_xml :: proc(input: string, options := XML_Decode_Options{}, allocator :=
}
}
- /*
- Literal passthrough because the decode failed or we want entities not decoded.
- */
+ // Literal passthrough because the decode failed or we want entities not decoded.
write_string(&builder, "&")
write_string(&builder, entity)
write_string(&builder, ";")
}
} else {
- write_rune(&builder, t.r)
+ // Handle AV Normalization: https://www.w3.org/TR/2006/REC-xml11-20060816/#AVNormalize
+ if .Normalize_Whitespace in options {
+ switch t.r {
+ case ' ', '\r', '\n', '\t':
+ if prev != ' ' {
+ write_rune(&builder, ' ')
+ prev = ' '
+ }
+ case:
+ write_rune(&builder, t.r)
+ prev = t.r
+ }
+ } else {
+ // https://www.w3.org/TR/2006/REC-xml11-20060816/#sec-line-ends
+ switch t.r {
+ case '\n', 0x85, 0x2028:
+ write_rune(&builder, '\n')
+ case '\r': // Do nothing until next character
+ case:
+ if prev == '\r' { // Turn a single carriage return into a \n
+ write_rune(&builder, '\n')
+ }
+ write_rune(&builder, t.r)
+ }
+ prev = t.r
+ }
}
}
}
-
return strings.clone(strings.to_string(builder), allocator), err
}
@@ -253,24 +256,18 @@ xml_decode_entity :: proc(entity: string) -> (decoded: rune, ok: bool) {
return rune(val), true
case:
- /*
- Named entity.
- */
+ // Named entity.
return named_xml_entity_to_rune(entity)
}
}
-/*
- Private XML helper to extract `&<stuff>;` entity.
-*/
+// Private XML helper to extract `&<stuff>;` entity.
@(private="file")
_extract_xml_entity :: proc(t: ^Tokenizer) -> (entity: string, err: Error) {
assert(t != nil && t.r == '&')
- /*
- All of these would be in the ASCII range.
- Even if one is not, it doesn't matter. All characters we need to compare to extract are.
- */
+ // All of these would be in the ASCII range.
+ // Even if one is not, it doesn't matter. All characters we need to compare to extract are.
length := len(t.src)
found := false
@@ -292,9 +289,7 @@ _extract_xml_entity :: proc(t: ^Tokenizer) -> (entity: string, err: Error) {
return string(t.src[t.offset : t.read_offset]), .Invalid_Entity_Encoding
}
-/*
- Private XML helper for CDATA and comments.
-*/
+// Private XML helper for CDATA and comments.
@(private="file")
_handle_xml_special :: proc(t: ^Tokenizer, builder: ^strings.Builder, options: XML_Decode_Options) -> (in_data: bool, err: Error) {
assert(t != nil && t.r == '<')
@@ -304,20 +299,14 @@ _handle_xml_special :: proc(t: ^Tokenizer, builder: ^strings.Builder, options: X
t.read_offset += len(CDATA_START) - 1
if .Unbox_CDATA in options && .Decode_CDATA in options {
- /*
- We're unboxing _and_ decoding CDATA
- */
+ // We're unboxing _and_ decoding CDATA
return true, .None
}
- /*
- CDATA is passed through.
- */
+ // CDATA is passed through.
offset := t.offset
- /*
- Scan until end of CDATA.
- */
+ // Scan until end of CDATA.
for {
advance(t) or_return
if t.r < 0 { return true, .CDATA_Not_Terminated }
@@ -341,14 +330,10 @@ _handle_xml_special :: proc(t: ^Tokenizer, builder: ^strings.Builder, options: X
} else if string(t.src[t.offset:][:len(COMMENT_START)]) == COMMENT_START {
t.read_offset += len(COMMENT_START)
- /*
- Comment is passed through by default.
- */
+ // Comment is passed through by default.
offset := t.offset
- /*
- Scan until end of Comment.
- */
+ // Scan until end of Comment.
for {
advance(t) or_return
if t.r < 0 { return true, .Comment_Not_Terminated }
diff --git a/core/encoding/hex/hex.odin b/core/encoding/hex/hex.odin
index dbffe216b..c2cd89c5b 100644
--- a/core/encoding/hex/hex.odin
+++ b/core/encoding/hex/hex.odin
@@ -2,8 +2,8 @@ package encoding_hex
import "core:strings"
-encode :: proc(src: []byte, allocator := context.allocator) -> []byte #no_bounds_check {
- dst := make([]byte, len(src) * 2, allocator)
+encode :: proc(src: []byte, allocator := context.allocator, loc := #caller_location) -> []byte #no_bounds_check {
+ dst := make([]byte, len(src) * 2, allocator, loc)
for i, j := 0, 0; i < len(src); i += 1 {
v := src[i]
dst[j] = HEXTABLE[v>>4]
@@ -15,12 +15,12 @@ encode :: proc(src: []byte, allocator := context.allocator) -> []byte #no_bounds
}
-decode :: proc(src: []byte, allocator := context.allocator) -> (dst: []byte, ok: bool) #no_bounds_check {
+decode :: proc(src: []byte, allocator := context.allocator, loc := #caller_location) -> (dst: []byte, ok: bool) #no_bounds_check {
if len(src) % 2 == 1 {
return
}
- dst = make([]byte, len(src) / 2, allocator)
+ dst = make([]byte, len(src) / 2, allocator, loc)
for i, j := 0, 1; j < len(src); j += 2 {
p := src[j-1]
q := src[j]
@@ -69,5 +69,4 @@ hex_digit :: proc(char: byte) -> (u8, bool) {
case 'A' ..= 'F': return char - 'A' + 10, true
case: return 0, false
}
-}
-
+} \ No newline at end of file
diff --git a/core/encoding/hxa/hxa.odin b/core/encoding/hxa/hxa.odin
index 9b24ede9c..9d0c58196 100644
--- a/core/encoding/hxa/hxa.odin
+++ b/core/encoding/hxa/hxa.odin
@@ -160,34 +160,35 @@ CONVENTION_SOFT_TRANSFORM :: "transform"
/* destroy procedures */
-meta_destroy :: proc(meta: Meta, allocator := context.allocator) {
+meta_destroy :: proc(meta: Meta, allocator := context.allocator, loc := #caller_location) {
if nested, ok := meta.value.([]Meta); ok {
for m in nested {
- meta_destroy(m)
+ meta_destroy(m, loc=loc)
}
- delete(nested, allocator)
+ delete(nested, allocator, loc=loc)
}
}
-nodes_destroy :: proc(nodes: []Node, allocator := context.allocator) {
+nodes_destroy :: proc(nodes: []Node, allocator := context.allocator, loc := #caller_location) {
for node in nodes {
for meta in node.meta_data {
- meta_destroy(meta)
+ meta_destroy(meta, loc=loc)
}
- delete(node.meta_data, allocator)
+ delete(node.meta_data, allocator, loc=loc)
switch n in node.content {
case Node_Geometry:
- delete(n.corner_stack, allocator)
- delete(n.edge_stack, allocator)
- delete(n.face_stack, allocator)
+ delete(n.corner_stack, allocator, loc=loc)
+ delete(n.vertex_stack, allocator, loc=loc)
+ delete(n.edge_stack, allocator, loc=loc)
+ delete(n.face_stack, allocator, loc=loc)
case Node_Image:
- delete(n.image_stack, allocator)
+ delete(n.image_stack, allocator, loc=loc)
}
}
- delete(nodes, allocator)
+ delete(nodes, allocator, loc=loc)
}
-file_destroy :: proc(file: File) {
- nodes_destroy(file.nodes, file.allocator)
- delete(file.backing, file.allocator)
-}
+file_destroy :: proc(file: File, loc := #caller_location) {
+ nodes_destroy(file.nodes, file.allocator, loc=loc)
+ delete(file.backing, file.allocator, loc=loc)
+} \ No newline at end of file
diff --git a/core/encoding/hxa/read.odin b/core/encoding/hxa/read.odin
index f37dc3193..5c8503229 100644
--- a/core/encoding/hxa/read.odin
+++ b/core/encoding/hxa/read.odin
@@ -11,24 +11,21 @@ Read_Error :: enum {
Unable_To_Read_File,
}
-read_from_file :: proc(filename: string, print_error := false, allocator := context.allocator) -> (file: File, err: Read_Error) {
+read_from_file :: proc(filename: string, print_error := false, allocator := context.allocator, loc := #caller_location) -> (file: File, err: Read_Error) {
context.allocator = allocator
- data, ok := os.read_entire_file(filename)
+ data, ok := os.read_entire_file(filename, allocator, loc)
if !ok {
err = .Unable_To_Read_File
+ delete(data, allocator, loc)
return
}
- defer if !ok {
- delete(data)
- } else {
- file.backing = data
- }
- file, err = read(data, filename, print_error, allocator)
+ file, err = read(data, filename, print_error, allocator, loc)
+ file.backing = data
return
}
-read :: proc(data: []byte, filename := "<input>", print_error := false, allocator := context.allocator) -> (file: File, err: Read_Error) {
+read :: proc(data: []byte, filename := "<input>", print_error := false, allocator := context.allocator, loc := #caller_location) -> (file: File, err: Read_Error) {
Reader :: struct {
filename: string,
data: []byte,
@@ -79,8 +76,8 @@ read :: proc(data: []byte, filename := "<input>", print_error := false, allocato
return string(data[:len]), nil
}
- read_meta :: proc(r: ^Reader, capacity: u32le) -> (meta_data: []Meta, err: Read_Error) {
- meta_data = make([]Meta, int(capacity))
+ read_meta :: proc(r: ^Reader, capacity: u32le, allocator := context.allocator, loc := #caller_location) -> (meta_data: []Meta, err: Read_Error) {
+ meta_data = make([]Meta, int(capacity), allocator=allocator)
count := 0
defer meta_data = meta_data[:count]
for &m in meta_data {
@@ -111,10 +108,10 @@ read :: proc(data: []byte, filename := "<input>", print_error := false, allocato
return
}
- read_layer_stack :: proc(r: ^Reader, capacity: u32le) -> (layers: Layer_Stack, err: Read_Error) {
+ read_layer_stack :: proc(r: ^Reader, capacity: u32le, allocator := context.allocator, loc := #caller_location) -> (layers: Layer_Stack, err: Read_Error) {
stack_count := read_value(r, u32le) or_return
layer_count := 0
- layers = make(Layer_Stack, stack_count)
+ layers = make(Layer_Stack, stack_count, allocator=allocator, loc=loc)
defer layers = layers[:layer_count]
for &layer in layers {
layer.name = read_name(r) or_return
@@ -170,7 +167,8 @@ read :: proc(data: []byte, filename := "<input>", print_error := false, allocato
node_count := 0
file.header = header^
- file.nodes = make([]Node, header.internal_node_count)
+ file.nodes = make([]Node, header.internal_node_count, allocator=allocator, loc=loc)
+ file.allocator = allocator
defer if err != nil {
nodes_destroy(file.nodes)
file.nodes = nil
@@ -198,15 +196,15 @@ read :: proc(data: []byte, filename := "<input>", print_error := false, allocato
case .Geometry:
g: Node_Geometry
- g.vertex_count = read_value(r, u32le) or_return
- g.vertex_stack = read_layer_stack(r, g.vertex_count) or_return
- g.edge_corner_count = read_value(r, u32le) or_return
- g.corner_stack = read_layer_stack(r, g.edge_corner_count) or_return
+ g.vertex_count = read_value(r, u32le) or_return
+ g.vertex_stack = read_layer_stack(r, g.vertex_count, loc=loc) or_return
+ g.edge_corner_count = read_value(r, u32le) or_return
+ g.corner_stack = read_layer_stack(r, g.edge_corner_count, loc=loc) or_return
if header.version > 2 {
- g.edge_stack = read_layer_stack(r, g.edge_corner_count) or_return
+ g.edge_stack = read_layer_stack(r, g.edge_corner_count, loc=loc) or_return
}
- g.face_count = read_value(r, u32le) or_return
- g.face_stack = read_layer_stack(r, g.face_count) or_return
+ g.face_count = read_value(r, u32le) or_return
+ g.face_stack = read_layer_stack(r, g.face_count, loc=loc) or_return
node.content = g
@@ -233,4 +231,4 @@ read :: proc(data: []byte, filename := "<input>", print_error := false, allocato
}
return
-}
+} \ No newline at end of file
diff --git a/core/encoding/ini/ini.odin b/core/encoding/ini/ini.odin
new file mode 100644
index 000000000..eb0ad9e7c
--- /dev/null
+++ b/core/encoding/ini/ini.odin
@@ -0,0 +1,189 @@
+package encoding_ini
+
+import "base:runtime"
+import "base:intrinsics"
+import "core:strings"
+import "core:strconv"
+import "core:io"
+import "core:os"
+import "core:fmt"
+_ :: fmt
+
+Options :: struct {
+ comment: string,
+ key_lower_case: bool,
+}
+
+DEFAULT_OPTIONS :: Options {
+ comment = ";",
+ key_lower_case = false,
+}
+
+Iterator :: struct {
+ section: string,
+ _src: string,
+ options: Options,
+}
+
+iterator_from_string :: proc(src: string, options := DEFAULT_OPTIONS) -> Iterator {
+ return {
+ section = "",
+ options = options,
+ _src = src,
+ }
+}
+
+
+// Returns the raw `key` and `value`. `ok` will be false if no more key=value pairs cannot be found.
+// They key and value may be quoted, which may require the use of `strconv.unquote_string`.
+iterate :: proc(it: ^Iterator) -> (key, value: string, ok: bool) {
+ for line_ in strings.split_lines_iterator(&it._src) {
+ line := strings.trim_space(line_)
+
+ if len(line) == 0 {
+ continue
+ }
+
+ if line[0] == '[' {
+ end_idx := strings.index_byte(line, ']')
+ if end_idx < 0 {
+ end_idx = len(line)
+ }
+ it.section = line[1:end_idx]
+ continue
+ }
+
+ if it.options.comment != "" && strings.has_prefix(line, it.options.comment) {
+ continue
+ }
+
+ equal := strings.index(line, " =") // check for things keys that `ctrl+= = zoom_in`
+ quote := strings.index_byte(line, '"')
+ if equal < 0 || quote > 0 && quote < equal {
+ equal = strings.index_byte(line, '=')
+ if equal < 0 {
+ continue
+ }
+ } else {
+ equal += 1
+ }
+
+ key = strings.trim_space(line[:equal])
+ value = strings.trim_space(line[equal+1:])
+ ok = true
+ return
+ }
+
+ it.section = ""
+ return
+}
+
+Map :: distinct map[string]map[string]string
+
+load_map_from_string :: proc(src: string, allocator: runtime.Allocator, options := DEFAULT_OPTIONS) -> (m: Map, err: runtime.Allocator_Error) {
+ unquote :: proc(val: string) -> (string, runtime.Allocator_Error) {
+ v, allocated, ok := strconv.unquote_string(val)
+ if !ok {
+ return strings.clone(val)
+ }
+ if allocated {
+ return v, nil
+ }
+ return strings.clone(v)
+
+ }
+
+ context.allocator = allocator
+
+ it := iterator_from_string(src, options)
+
+ for key, value in iterate(&it) {
+ section := it.section
+ if section not_in m {
+ section = strings.clone(section) or_return
+ m[section] = {}
+ }
+
+ // store key-value pair
+ pairs := &m[section]
+ new_key := unquote(key) or_return
+ if options.key_lower_case {
+ old_key := new_key
+ new_key = strings.to_lower(key) or_return
+ delete(old_key) or_return
+ }
+ pairs[new_key] = unquote(value) or_return
+ }
+ return
+}
+
+load_map_from_path :: proc(path: string, allocator: runtime.Allocator, options := DEFAULT_OPTIONS) -> (m: Map, err: runtime.Allocator_Error, ok: bool) {
+ data := os.read_entire_file(path, allocator) or_return
+ defer delete(data, allocator)
+ m, err = load_map_from_string(string(data), allocator, options)
+ ok = err != nil
+ defer if !ok {
+ delete_map(m)
+ }
+ return
+}
+
+save_map_to_string :: proc(m: Map, allocator: runtime.Allocator) -> (data: string) {
+ b := strings.builder_make(allocator)
+ _, _ = write_map(strings.to_writer(&b), m)
+ return strings.to_string(b)
+}
+
+delete_map :: proc(m: Map) {
+ allocator := m.allocator
+ for section, pairs in m {
+ for key, value in pairs {
+ delete(key, allocator)
+ delete(value, allocator)
+ }
+ delete(section)
+ }
+ delete(m)
+}
+
+write_section :: proc(w: io.Writer, name: string, n_written: ^int = nil) -> (n: int, err: io.Error) {
+ defer if n_written != nil { n_written^ += n }
+ io.write_byte (w, '[', &n) or_return
+ io.write_string(w, name, &n) or_return
+ io.write_byte (w, ']', &n) or_return
+ return
+}
+
+write_pair :: proc(w: io.Writer, key: string, value: $T, n_written: ^int = nil) -> (n: int, err: io.Error) {
+ defer if n_written != nil { n_written^ += n }
+ io.write_string(w, key, &n) or_return
+ io.write_string(w, " = ", &n) or_return
+ when intrinsics.type_is_string(T) {
+ val := string(value)
+ if len(val) > 0 && (val[0] == ' ' || val[len(val)-1] == ' ') {
+ io.write_quoted_string(w, val, n_written=&n) or_return
+ } else {
+ io.write_string(w, val, &n) or_return
+ }
+ } else {
+ n += fmt.wprint(w, value)
+ }
+ io.write_byte(w, '\n', &n) or_return
+ return
+}
+
+write_map :: proc(w: io.Writer, m: Map) -> (n: int, err: io.Error) {
+ section_index := 0
+ for section, pairs in m {
+ if section_index == 0 && section == "" {
+ // ignore section
+ } else {
+ write_section(w, section, &n) or_return
+ }
+ for key, value in pairs {
+ write_pair(w, key, value, &n) or_return
+ }
+ section_index += 1
+ }
+ return
+}
diff --git a/core/encoding/json/marshal.odin b/core/encoding/json/marshal.odin
index b41a76856..dfca8b9db 100644
--- a/core/encoding/json/marshal.odin
+++ b/core/encoding/json/marshal.odin
@@ -62,8 +62,8 @@ Marshal_Options :: struct {
mjson_skipped_first_braces_end: bool,
}
-marshal :: proc(v: any, opt: Marshal_Options = {}, allocator := context.allocator) -> (data: []byte, err: Marshal_Error) {
- b := strings.builder_make(allocator)
+marshal :: proc(v: any, opt: Marshal_Options = {}, allocator := context.allocator, loc := #caller_location) -> (data: []byte, err: Marshal_Error) {
+ b := strings.builder_make(allocator, loc)
defer if err != nil {
strings.builder_destroy(&b)
}
diff --git a/core/encoding/json/parser.odin b/core/encoding/json/parser.odin
index 3973725dc..38f71edf6 100644
--- a/core/encoding/json/parser.odin
+++ b/core/encoding/json/parser.odin
@@ -28,27 +28,27 @@ make_parser_from_string :: proc(data: string, spec := DEFAULT_SPECIFICATION, par
}
-parse :: proc(data: []byte, spec := DEFAULT_SPECIFICATION, parse_integers := false, allocator := context.allocator) -> (Value, Error) {
- return parse_string(string(data), spec, parse_integers, allocator)
+parse :: proc(data: []byte, spec := DEFAULT_SPECIFICATION, parse_integers := false, allocator := context.allocator, loc := #caller_location) -> (Value, Error) {
+ return parse_string(string(data), spec, parse_integers, allocator, loc)
}
-parse_string :: proc(data: string, spec := DEFAULT_SPECIFICATION, parse_integers := false, allocator := context.allocator) -> (Value, Error) {
+parse_string :: proc(data: string, spec := DEFAULT_SPECIFICATION, parse_integers := false, allocator := context.allocator, loc := #caller_location) -> (Value, Error) {
context.allocator = allocator
p := make_parser_from_string(data, spec, parse_integers, allocator)
switch p.spec {
case .JSON:
- return parse_object(&p)
+ return parse_object(&p, loc)
case .JSON5:
- return parse_value(&p)
+ return parse_value(&p, loc)
case .SJSON:
#partial switch p.curr_token.kind {
case .Ident, .String:
- return parse_object_body(&p, .EOF)
+ return parse_object_body(&p, .EOF, loc)
}
- return parse_value(&p)
+ return parse_value(&p, loc)
}
- return parse_object(&p)
+ return parse_object(&p, loc)
}
token_end_pos :: proc(tok: Token) -> Pos {
@@ -106,7 +106,7 @@ parse_comma :: proc(p: ^Parser) -> (do_break: bool) {
return false
}
-parse_value :: proc(p: ^Parser) -> (value: Value, err: Error) {
+parse_value :: proc(p: ^Parser, loc := #caller_location) -> (value: Value, err: Error) {
err = .None
token := p.curr_token
#partial switch token.kind {
@@ -142,13 +142,13 @@ parse_value :: proc(p: ^Parser) -> (value: Value, err: Error) {
case .String:
advance_token(p)
- return unquote_string(token, p.spec, p.allocator)
+ return unquote_string(token, p.spec, p.allocator, loc)
case .Open_Brace:
- return parse_object(p)
+ return parse_object(p, loc)
case .Open_Bracket:
- return parse_array(p)
+ return parse_array(p, loc)
case:
if p.spec != .JSON {
@@ -176,7 +176,7 @@ parse_value :: proc(p: ^Parser) -> (value: Value, err: Error) {
return
}
-parse_array :: proc(p: ^Parser) -> (value: Value, err: Error) {
+parse_array :: proc(p: ^Parser, loc := #caller_location) -> (value: Value, err: Error) {
err = .None
expect_token(p, .Open_Bracket) or_return
@@ -184,14 +184,14 @@ parse_array :: proc(p: ^Parser) -> (value: Value, err: Error) {
array.allocator = p.allocator
defer if err != nil {
for elem in array {
- destroy_value(elem)
+ destroy_value(elem, loc=loc)
}
- delete(array)
+ delete(array, loc)
}
for p.curr_token.kind != .Close_Bracket {
- elem := parse_value(p) or_return
- append(&array, elem)
+ elem := parse_value(p, loc) or_return
+ append(&array, elem, loc)
if parse_comma(p) {
break
@@ -228,38 +228,39 @@ clone_string :: proc(s: string, allocator: mem.Allocator, loc := #caller_locatio
return
}
-parse_object_key :: proc(p: ^Parser, key_allocator: mem.Allocator) -> (key: string, err: Error) {
+parse_object_key :: proc(p: ^Parser, key_allocator: mem.Allocator, loc := #caller_location) -> (key: string, err: Error) {
tok := p.curr_token
if p.spec != .JSON {
if allow_token(p, .Ident) {
- return clone_string(tok.text, key_allocator)
+ return clone_string(tok.text, key_allocator, loc)
}
}
if tok_err := expect_token(p, .String); tok_err != nil {
err = .Expected_String_For_Object_Key
return
}
- return unquote_string(tok, p.spec, key_allocator)
+ return unquote_string(tok, p.spec, key_allocator, loc)
}
-parse_object_body :: proc(p: ^Parser, end_token: Token_Kind) -> (obj: Object, err: Error) {
- obj.allocator = p.allocator
+parse_object_body :: proc(p: ^Parser, end_token: Token_Kind, loc := #caller_location) -> (obj: Object, err: Error) {
+ obj = make(Object, allocator=p.allocator, loc=loc)
+
defer if err != nil {
for key, elem in obj {
- delete(key, p.allocator)
- destroy_value(elem)
+ delete(key, p.allocator, loc)
+ destroy_value(elem, loc=loc)
}
- delete(obj)
+ delete(obj, loc)
}
for p.curr_token.kind != end_token {
- key := parse_object_key(p, p.allocator) or_return
+ key := parse_object_key(p, p.allocator, loc) or_return
parse_colon(p) or_return
- elem := parse_value(p) or_return
+ elem := parse_value(p, loc) or_return
if key in obj {
err = .Duplicate_Object_Key
- delete(key, p.allocator)
+ delete(key, p.allocator, loc)
return
}
@@ -267,7 +268,7 @@ parse_object_body :: proc(p: ^Parser, end_token: Token_Kind) -> (obj: Object, er
// inserting empty key/values into the object and for those we do not
// want to allocate anything
if key != "" {
- reserve_error := reserve(&obj, len(obj) + 1)
+ reserve_error := reserve(&obj, len(obj) + 1, loc)
if reserve_error == mem.Allocator_Error.Out_Of_Memory {
return nil, .Out_Of_Memory
}
@@ -281,9 +282,9 @@ parse_object_body :: proc(p: ^Parser, end_token: Token_Kind) -> (obj: Object, er
return obj, .None
}
-parse_object :: proc(p: ^Parser) -> (value: Value, err: Error) {
+parse_object :: proc(p: ^Parser, loc := #caller_location) -> (value: Value, err: Error) {
expect_token(p, .Open_Brace) or_return
- obj := parse_object_body(p, .Close_Brace) or_return
+ obj := parse_object_body(p, .Close_Brace, loc) or_return
expect_token(p, .Close_Brace) or_return
return obj, .None
}
@@ -480,4 +481,4 @@ unquote_string :: proc(token: Token, spec: Specification, allocator := context.a
}
return string(b[:w]), nil
-}
+} \ No newline at end of file
diff --git a/core/encoding/json/types.odin b/core/encoding/json/types.odin
index 73e183615..41eb21377 100644
--- a/core/encoding/json/types.odin
+++ b/core/encoding/json/types.odin
@@ -89,22 +89,22 @@ Error :: enum {
-destroy_value :: proc(value: Value, allocator := context.allocator) {
+destroy_value :: proc(value: Value, allocator := context.allocator, loc := #caller_location) {
context.allocator = allocator
#partial switch v in value {
case Object:
for key, elem in v {
- delete(key)
- destroy_value(elem)
+ delete(key, loc=loc)
+ destroy_value(elem, loc=loc)
}
- delete(v)
+ delete(v, loc=loc)
case Array:
for elem in v {
- destroy_value(elem)
+ destroy_value(elem, loc=loc)
}
- delete(v)
+ delete(v, loc=loc)
case String:
- delete(v)
+ delete(v, loc=loc)
}
}
diff --git a/core/encoding/xml/tokenizer.odin b/core/encoding/xml/tokenizer.odin
index 0f87c366b..2d06038b7 100644
--- a/core/encoding/xml/tokenizer.odin
+++ b/core/encoding/xml/tokenizer.odin
@@ -218,9 +218,7 @@ scan_identifier :: proc(t: ^Tokenizer) -> string {
for is_valid_identifier_rune(t.ch) {
advance_rune(t)
if t.ch == ':' {
- /*
- A namespaced attr can have at most two parts, `namespace:ident`.
- */
+ // A namespaced attr can have at most two parts, `namespace:ident`.
if namespaced {
break
}
@@ -268,14 +266,10 @@ scan_comment :: proc(t: ^Tokenizer) -> (comment: string, err: Error) {
return string(t.src[offset : t.offset - 1]), .None
}
-/*
- Skip CDATA
-*/
+// Skip CDATA
skip_cdata :: proc(t: ^Tokenizer) -> (err: Error) {
if t.read_offset + len(CDATA_START) >= len(t.src) {
- /*
- Can't be the start of a CDATA tag.
- */
+ // Can't be the start of a CDATA tag.
return .None
}
@@ -290,9 +284,7 @@ skip_cdata :: proc(t: ^Tokenizer) -> (err: Error) {
return .Premature_EOF
}
- /*
- Scan until the end of a CDATA tag.
- */
+ // Scan until the end of a CDATA tag.
if t.read_offset + len(CDATA_END) < len(t.src) {
if string(t.src[t.offset:][:len(CDATA_END)]) == CDATA_END {
t.read_offset += len(CDATA_END)
@@ -319,14 +311,10 @@ scan_string :: proc(t: ^Tokenizer, offset: int, close: rune = '<', consume_close
case '<':
if peek_byte(t) == '!' {
if peek_byte(t, 1) == '[' {
- /*
- Might be the start of a CDATA tag.
- */
+ // Might be the start of a CDATA tag.
skip_cdata(t) or_return
} else if peek_byte(t, 1) == '-' && peek_byte(t, 2) == '-' {
- /*
- Comment start. Eat comment.
- */
+ // Comment start. Eat comment.
t.read_offset += 3
_ = scan_comment(t) or_return
}
@@ -342,17 +330,13 @@ scan_string :: proc(t: ^Tokenizer, offset: int, close: rune = '<', consume_close
}
if t.ch == close {
- /*
- If it's not a CDATA or comment, it's the end of this body.
- */
+ // If it's not a CDATA or comment, it's the end of this body.
break loop
}
advance_rune(t)
}
- /*
- Strip trailing whitespace.
- */
+ // Strip trailing whitespace.
lit := string(t.src[offset : t.offset])
end := len(lit)
@@ -369,11 +353,6 @@ scan_string :: proc(t: ^Tokenizer, offset: int, close: rune = '<', consume_close
if consume_close {
advance_rune(t)
}
-
- /*
- TODO: Handle decoding escape characters and unboxing CDATA.
- */
-
return lit, err
}
@@ -384,7 +363,7 @@ peek :: proc(t: ^Tokenizer) -> (token: Token) {
return token
}
-scan :: proc(t: ^Tokenizer) -> Token {
+scan :: proc(t: ^Tokenizer, multiline_string := false) -> Token {
skip_whitespace(t)
offset := t.offset
@@ -418,7 +397,7 @@ scan :: proc(t: ^Tokenizer) -> Token {
case '"', '\'':
kind = .Invalid
- lit, err = scan_string(t, t.offset, ch, true, false)
+ lit, err = scan_string(t, t.offset, ch, true, multiline_string)
if err == .None {
kind = .String
}
@@ -435,4 +414,4 @@ scan :: proc(t: ^Tokenizer) -> Token {
lit = string(t.src[offset : t.offset])
}
return Token{kind, lit, pos}
-}
+} \ No newline at end of file
diff --git a/core/encoding/xml/xml_reader.odin b/core/encoding/xml/xml_reader.odin
index 5b4b12948..b9656900f 100644
--- a/core/encoding/xml/xml_reader.odin
+++ b/core/encoding/xml/xml_reader.odin
@@ -203,9 +203,7 @@ parse_bytes :: proc(data: []u8, options := DEFAULT_OPTIONS, path := "", error_ha
doc.elements = make([dynamic]Element, 1024, 1024, allocator)
- // strings.intern_init(&doc.intern, allocator, allocator)
-
- err = .Unexpected_Token
+ err = .Unexpected_Token
element, parent: Element_ID
open: Token
@@ -259,8 +257,8 @@ parse_bytes :: proc(data: []u8, options := DEFAULT_OPTIONS, path := "", error_ha
case .Slash:
// Empty tag. Close it.
expect(t, .Gt) or_return
- parent = doc.elements[element].parent
- element = parent
+ parent = doc.elements[element].parent
+ element = parent
case:
error(t, t.offset, "Expected close tag, got: %#v\n", end_token)
@@ -276,8 +274,8 @@ parse_bytes :: proc(data: []u8, options := DEFAULT_OPTIONS, path := "", error_ha
error(t, t.offset, "Mismatched Closing Tag. Expected %v, got %v\n", doc.elements[element].ident, ident.text)
return doc, .Mismatched_Closing_Tag
}
- parent = doc.elements[element].parent
- element = parent
+ parent = doc.elements[element].parent
+ element = parent
} else if open.kind == .Exclaim {
// <!
@@ -463,8 +461,8 @@ validate_options :: proc(options: Options) -> (validated: Options, err: Error) {
return validated, .None
}
-expect :: proc(t: ^Tokenizer, kind: Token_Kind) -> (tok: Token, err: Error) {
- tok = scan(t)
+expect :: proc(t: ^Tokenizer, kind: Token_Kind, multiline_string := false) -> (tok: Token, err: Error) {
+ tok = scan(t, multiline_string=multiline_string)
if tok.kind == kind { return tok, .None }
error(t, t.offset, "Expected \"%v\", got \"%v\".", kind, tok.kind)
@@ -480,7 +478,13 @@ parse_attribute :: proc(doc: ^Document) -> (attr: Attribute, offset: int, err: E
offset = t.offset - len(key.text)
_ = expect(t, .Eq) or_return
- value := expect(t, .String) or_return
+ value := expect(t, .String, multiline_string=true) or_return
+
+ normalized, normalize_err := entity.decode_xml(value.text, {.Normalize_Whitespace}, doc.allocator)
+ if normalize_err == .None {
+ append(&doc.strings_to_free, normalized)
+ value.text = normalized
+ }
attr.key = key.text
attr.val = value.text
diff --git a/core/fmt/fmt.odin b/core/fmt/fmt.odin
index 62cd95968..f9113a7a7 100644
--- a/core/fmt/fmt.odin
+++ b/core/fmt/fmt.odin
@@ -2,6 +2,7 @@ package fmt
import "base:intrinsics"
import "base:runtime"
+import "core:math"
import "core:math/bits"
import "core:mem"
import "core:io"
@@ -1494,7 +1495,7 @@ fmt_pointer :: proc(fi: ^Info, p: rawptr, verb: rune) {
u := u64(uintptr(p))
switch verb {
case 'p', 'v', 'w':
- if !fi.hash && verb == 'v' {
+ if !fi.hash {
io.write_string(fi.writer, "0x", &fi.n)
}
_fmt_int(fi, u, 16, false, 8*size_of(rawptr), __DIGITS_UPPER)
@@ -2968,6 +2969,21 @@ fmt_value :: proc(fi: ^Info, v: any, verb: rune) {
fmt_bit_field(fi, v, verb, info, "")
}
}
+// This proc helps keep some of the code around whether or not to print an
+// intermediate plus sign in complexes and quaternions more readable.
+@(private)
+_cq_should_print_intermediate_plus :: proc "contextless" (fi: ^Info, f: f64) -> bool {
+ if !fi.plus && f >= 0 {
+ #partial switch math.classify(f) {
+ case .Neg_Zero, .Inf:
+ // These two classes print their own signs.
+ return false
+ case:
+ return true
+ }
+ }
+ return false
+}
// Formats a complex number based on the given formatting verb
//
// Inputs:
@@ -2981,7 +2997,7 @@ fmt_complex :: proc(fi: ^Info, c: complex128, bits: int, verb: rune) {
case 'f', 'F', 'v', 'h', 'H', 'w':
r, i := real(c), imag(c)
fmt_float(fi, r, bits/2, verb)
- if !fi.plus && i >= 0 {
+ if _cq_should_print_intermediate_plus(fi, i) {
io.write_rune(fi.writer, '+', &fi.n)
}
fmt_float(fi, i, bits/2, verb)
@@ -3007,19 +3023,19 @@ fmt_quaternion :: proc(fi: ^Info, q: quaternion256, bits: int, verb: rune) {
fmt_float(fi, r, bits/4, verb)
- if !fi.plus && i >= 0 {
+ if _cq_should_print_intermediate_plus(fi, i) {
io.write_rune(fi.writer, '+', &fi.n)
}
fmt_float(fi, i, bits/4, verb)
io.write_rune(fi.writer, 'i', &fi.n)
- if !fi.plus && j >= 0 {
+ if _cq_should_print_intermediate_plus(fi, j) {
io.write_rune(fi.writer, '+', &fi.n)
}
fmt_float(fi, j, bits/4, verb)
io.write_rune(fi.writer, 'j', &fi.n)
- if !fi.plus && k >= 0 {
+ if _cq_should_print_intermediate_plus(fi, k) {
io.write_rune(fi.writer, '+', &fi.n)
}
fmt_float(fi, k, bits/4, verb)
diff --git a/core/fmt/fmt_os.odin b/core/fmt/fmt_os.odin
index a403dcd65..9de0d43be 100644
--- a/core/fmt/fmt_os.odin
+++ b/core/fmt/fmt_os.odin
@@ -1,5 +1,6 @@
//+build !freestanding
//+build !js
+//+build !orca
package fmt
import "base:runtime"
diff --git a/core/image/bmp/bmp.odin b/core/image/bmp/bmp.odin
new file mode 100644
index 000000000..64fc1d5a8
--- /dev/null
+++ b/core/image/bmp/bmp.odin
@@ -0,0 +1,746 @@
+// package bmp implements a Microsoft BMP image reader
+package core_image_bmp
+
+import "core:image"
+import "core:bytes"
+import "core:compress"
+import "core:mem"
+import "base:intrinsics"
+import "base:runtime"
+
+Error :: image.Error
+Image :: image.Image
+Options :: image.Options
+
+RGB_Pixel :: image.RGB_Pixel
+RGBA_Pixel :: image.RGBA_Pixel
+
+FILE_HEADER_SIZE :: 14
+INFO_STUB_SIZE :: FILE_HEADER_SIZE + size_of(image.BMP_Version)
+
+save_to_buffer :: proc(output: ^bytes.Buffer, img: ^Image, options := Options{}, allocator := context.allocator) -> (err: Error) {
+ context.allocator = allocator
+
+ if img == nil {
+ return .Invalid_Input_Image
+ }
+
+ if output == nil {
+ return .Invalid_Output
+ }
+
+ pixels := img.width * img.height
+ if pixels == 0 || pixels > image.MAX_DIMENSIONS {
+ return .Invalid_Input_Image
+ }
+
+ // While the BMP spec (and our loader) support more fanciful image types,
+ // `bmp.save` supports only 3 and 4 channel images with a bit depth of 8.
+ if img.depth != 8 || img.channels < 3 || img.channels > 4 {
+ return .Invalid_Input_Image
+ }
+
+ if img.channels * pixels != len(img.pixels.buf) {
+ return .Invalid_Input_Image
+ }
+
+ // Calculate and allocate size.
+ header_size := u32le(image.BMP_Version.V3)
+ total_header_size := header_size + 14 // file header = 14
+ pixel_count_bytes := u32le(align4(img.width * img.channels) * img.height)
+
+ header := image.BMP_Header{
+ // File header
+ magic = .Bitmap,
+ size = total_header_size + pixel_count_bytes,
+ _res1 = 0,
+ _res2 = 0,
+ pixel_offset = total_header_size,
+ // V3
+ info_size = .V3,
+ width = i32le(img.width),
+ height = i32le(img.height),
+ planes = 1,
+ bpp = u16le(8 * img.channels),
+ compression = .RGB,
+ image_size = pixel_count_bytes,
+ pels_per_meter = {2835, 2835}, // 72 DPI
+ colors_used = 0,
+ colors_important = 0,
+ }
+ written := 0
+
+ if resize(&output.buf, int(header.size)) != nil {
+ return .Unable_To_Allocate_Or_Resize
+ }
+
+ header_bytes := transmute([size_of(image.BMP_Header)]u8)header
+ written += int(total_header_size)
+ copy(output.buf[:], header_bytes[:written])
+
+ switch img.channels {
+ case 3:
+ row_bytes := img.width * img.channels
+ row_padded := align4(row_bytes)
+ pixels := mem.slice_data_cast([]RGB_Pixel, img.pixels.buf[:])
+ for y in 0..<img.height {
+ row_offset := row_padded * (img.height - y - 1) + written
+ for x in 0..<img.width {
+ pix_offset := 3 * x
+ output.buf[row_offset + pix_offset + 0] = pixels[0].b
+ output.buf[row_offset + pix_offset + 1] = pixels[0].g
+ output.buf[row_offset + pix_offset + 2] = pixels[0].r
+ pixels = pixels[1:]
+ }
+ }
+
+ case 4:
+ row_bytes := img.width * img.channels
+ pixels := mem.slice_data_cast([]RGBA_Pixel, img.pixels.buf[:])
+ for y in 0..<img.height {
+ row_offset := row_bytes * (img.height - y - 1) + written
+ for x in 0..<img.width {
+ pix_offset := 4 * x
+ output.buf[row_offset + pix_offset + 0] = pixels[0].b
+ output.buf[row_offset + pix_offset + 1] = pixels[0].g
+ output.buf[row_offset + pix_offset + 2] = pixels[0].r
+ output.buf[row_offset + pix_offset + 3] = pixels[0].a
+ pixels = pixels[1:]
+ }
+ }
+ }
+ return
+}
+
+
+load_from_bytes :: proc(data: []byte, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
+ ctx := &compress.Context_Memory_Input{
+ input_data = data,
+ }
+
+ img, err = load_from_context(ctx, options, allocator)
+ return img, err
+}
+
+@(optimization_mode="speed")
+load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
+ context.allocator = allocator
+ options := options
+
+ // For compress.read_slice(), until that's rewritten to not use temp allocator
+ runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
+
+ if .info in options {
+ options |= {.return_metadata, .do_not_decompress_image}
+ options -= {.info}
+ }
+
+ if .return_header in options && .return_metadata in options {
+ options -= {.return_header}
+ }
+
+ info_buf: [size_of(image.BMP_Header)]u8
+
+ // Read file header (14) + info size (4)
+ stub_data := compress.read_slice(ctx, INFO_STUB_SIZE) or_return
+ copy(info_buf[:], stub_data[:])
+ stub_info := transmute(image.BMP_Header)info_buf
+
+ if stub_info.magic != .Bitmap {
+ for v in image.BMP_Magic {
+ if stub_info.magic == v {
+ return img, .Unsupported_OS2_File
+ }
+ }
+ return img, .Invalid_Signature
+ }
+
+ info: image.BMP_Header
+ switch stub_info.info_size {
+ case .OS2_v1:
+ // Read the remainder of the header
+ os2_data := compress.read_data(ctx, image.OS2_Header) or_return
+
+ info = transmute(image.BMP_Header)info_buf
+ info.width = i32le(os2_data.width)
+ info.height = i32le(os2_data.height)
+ info.planes = os2_data.planes
+ info.bpp = os2_data.bpp
+
+ switch info.bpp {
+ case 1, 4, 8, 24:
+ case:
+ return img, .Unsupported_BPP
+ }
+
+ case .ABBR_16 ..= .V5:
+ // Sizes include V3, V4, V5 and OS2v2 outright, but can also handle truncated headers.
+ // Sometimes called BITMAPV2INFOHEADER or BITMAPV3INFOHEADER.
+ // Let's just try to process it.
+
+ to_read := int(stub_info.info_size) - size_of(image.BMP_Version)
+ info_data := compress.read_slice(ctx, to_read) or_return
+ copy(info_buf[INFO_STUB_SIZE:], info_data[:])
+
+ // Update info struct with the rest of the data we read
+ info = transmute(image.BMP_Header)info_buf
+
+ case:
+ return img, .Unsupported_BMP_Version
+ }
+
+ /* TODO(Jeroen): Add a "strict" option to catch these non-issues that violate spec?
+ if info.planes != 1 {
+ return img, .Invalid_Planes_Value
+ }
+ */
+
+ if img == nil {
+ img = new(Image)
+ }
+ img.which = .BMP
+
+ img.metadata = new_clone(image.BMP_Info{
+ info = info,
+ })
+
+ img.width = abs(int(info.width))
+ img.height = abs(int(info.height))
+ img.channels = 3
+ img.depth = 8
+
+ if img.width == 0 || img.height == 0 {
+ return img, .Invalid_Image_Dimensions
+ }
+
+ total_pixels := abs(img.width * img.height)
+ if total_pixels > image.MAX_DIMENSIONS {
+ return img, .Image_Dimensions_Too_Large
+ }
+
+ // TODO(Jeroen): Handle RGBA.
+ switch info.compression {
+ case .Bit_Fields, .Alpha_Bit_Fields:
+ switch info.bpp {
+ case 16, 32:
+ make_output(img, allocator) or_return
+ decode_rgb(ctx, img, info, allocator) or_return
+ case:
+ if is_os2(info.info_size) {
+ return img, .Unsupported_Compression
+ }
+ return img, .Unsupported_BPP
+ }
+ case .RGB:
+ make_output(img, allocator) or_return
+ decode_rgb(ctx, img, info, allocator) or_return
+ case .RLE4, .RLE8:
+ make_output(img, allocator) or_return
+ decode_rle(ctx, img, info, allocator) or_return
+ case .CMYK, .CMYK_RLE4, .CMYK_RLE8: fallthrough
+ case .PNG, .JPEG: fallthrough
+ case: return img, .Unsupported_Compression
+ }
+
+ // Flipped vertically
+ if info.height < 0 {
+ pixels := mem.slice_data_cast([]RGB_Pixel, img.pixels.buf[:])
+ for y in 0..<img.height / 2 {
+ for x in 0..<img.width {
+ top := y * img.width + x
+ bot := (img.height - y - 1) * img.width + x
+
+ pixels[top], pixels[bot] = pixels[bot], pixels[top]
+ }
+ }
+ }
+ return
+}
+
+is_os2 :: proc(version: image.BMP_Version) -> (res: bool) {
+ #partial switch version {
+ case .OS2_v1, .OS2_v2: return true
+ case: return false
+ }
+}
+
+make_output :: proc(img: ^Image, allocator := context.allocator) -> (err: Error) {
+ assert(img != nil)
+ bytes_needed := img.channels * img.height * img.width
+ img.pixels.buf = make([dynamic]u8, bytes_needed, allocator)
+ if len(img.pixels.buf) != bytes_needed {
+ return .Unable_To_Allocate_Or_Resize
+ }
+ return
+}
+
+write :: proc(img: ^Image, x, y: int, pix: RGB_Pixel) -> (err: Error) {
+ if y >= img.height || x >= img.width {
+ return .Corrupt
+ }
+ out := mem.slice_data_cast([]RGB_Pixel, img.pixels.buf[:])
+ assert(img.height >= 1 && img.width >= 1)
+ out[(img.height - y - 1) * img.width + x] = pix
+ return
+}
+
+Bitmask :: struct {
+ mask: [4]u32le `fmt:"b"`,
+ shift: [4]u32le,
+ bits: [4]u32le,
+}
+
+read_or_make_bit_masks :: proc(ctx: ^$C, info: image.BMP_Header) -> (res: Bitmask, read: int, err: Error) {
+ ctz :: intrinsics.count_trailing_zeros
+ c1s :: intrinsics.count_ones
+
+ #partial switch info.compression {
+ case .RGB:
+ switch info.bpp {
+ case 16:
+ return {
+ mask = {31 << 10, 31 << 5, 31, 0},
+ shift = { 10, 5, 0, 0},
+ bits = { 5, 5, 5, 0},
+ }, int(4 * info.colors_used), nil
+
+ case 32:
+ return {
+ mask = {255 << 16, 255 << 8, 255, 255 << 24},
+ shift = { 16, 8, 0, 24},
+ bits = { 8, 8, 8, 8},
+ }, int(4 * info.colors_used), nil
+
+ case: return {}, 0, .Unsupported_BPP
+ }
+ case .Bit_Fields, .Alpha_Bit_Fields:
+ bf := info.masks
+ alpha_mask := false
+ bit_count: u32le
+
+ #partial switch info.info_size {
+ case .ABBR_52 ..= .V5:
+ // All possible BMP header sizes 52+ bytes long, includes V4 + V5
+ // Bit fields were read as part of the header
+ // V3 header is 40 bytes. We need 56 at a minimum for RGBA bit fields in the next section.
+ if info.info_size >= .ABBR_56 {
+ alpha_mask = true
+ }
+
+ case .V3:
+ // Version 3 doesn't have a bit field embedded, but can still have a 3 or 4 color bit field.
+ // Because it wasn't read as part of the header, we need to read it now.
+
+ if info.compression == .Alpha_Bit_Fields {
+ bf = compress.read_data(ctx, [4]u32le) or_return
+ alpha_mask = true
+ read = 16
+ } else {
+ bf.xyz = compress.read_data(ctx, [3]u32le) or_return
+ read = 12
+ }
+
+ case:
+ // Bit fields are unhandled for this BMP version
+ return {}, 0, .Bitfield_Version_Unhandled
+ }
+
+ if alpha_mask {
+ res = {
+ mask = {bf.r, bf.g, bf.b, bf.a},
+ shift = {ctz(bf.r), ctz(bf.g), ctz(bf.b), ctz(bf.a)},
+ bits = {c1s(bf.r), c1s(bf.g), c1s(bf.b), c1s(bf.a)},
+ }
+
+ bit_count = res.bits.r + res.bits.g + res.bits.b + res.bits.a
+ } else {
+ res = {
+ mask = {bf.r, bf.g, bf.b, 0},
+ shift = {ctz(bf.r), ctz(bf.g), ctz(bf.b), 0},
+ bits = {c1s(bf.r), c1s(bf.g), c1s(bf.b), 0},
+ }
+
+ bit_count = res.bits.r + res.bits.g + res.bits.b
+ }
+
+ if bit_count > u32le(info.bpp) {
+ err = .Bitfield_Sum_Exceeds_BPP
+ }
+
+ overlapped := res.mask.r | res.mask.g | res.mask.b | res.mask.a
+ if c1s(overlapped) < bit_count {
+ err = .Bitfield_Overlapped
+ }
+ return res, read, err
+
+ case:
+ return {}, 0, .Unsupported_Compression
+ }
+ return
+}
+
+scale :: proc(val: $T, mask, shift, bits: u32le) -> (res: u8) {
+ if bits == 0 { return 0 } // Guard against malformed bit fields
+ v := (u32le(val) & mask) >> shift
+ mask_in := u32le(1 << bits) - 1
+ return u8(v * 255 / mask_in)
+}
+
+decode_rgb :: proc(ctx: ^$C, img: ^Image, info: image.BMP_Header, allocator := context.allocator) -> (err: Error) {
+ pixel_offset := int(info.pixel_offset)
+ pixel_offset -= int(info.info_size) + FILE_HEADER_SIZE
+
+ palette: [256]RGBA_Pixel
+
+ // Palette size is info.colors_used if populated. If not it's min(1 << bpp, offset to the pixels / channel count)
+ colors_used := min(256, 1 << info.bpp if info.colors_used == 0 else info.colors_used)
+ max_colors := pixel_offset / 3 if info.info_size == .OS2_v1 else pixel_offset / 4
+ colors_used = min(colors_used, u32le(max_colors))
+
+ switch info.bpp {
+ case 1:
+ if info.info_size == .OS2_v1 {
+ // 2 x RGB palette of instead of variable RGBA palette
+ for i in 0..<colors_used {
+ palette[i].rgb = image.read_data(ctx, RGB_Pixel) or_return
+ }
+ pixel_offset -= int(3 * colors_used)
+ } else {
+ for i in 0..<colors_used {
+ palette[i] = image.read_data(ctx, RGBA_Pixel) or_return
+ }
+ pixel_offset -= int(4 * colors_used)
+ }
+ skip_space(ctx, pixel_offset)
+
+ stride := (img.width + 7) / 8
+ for y in 0..<img.height {
+ data := compress.read_slice(ctx, stride) or_return
+ for x in 0..<img.width {
+ shift := u8(7 - (x & 0x07))
+ p := (data[x / 8] >> shift) & 0x01
+ write(img, x, y, palette[p].bgr) or_return
+ }
+ }
+
+ case 2: // Non-standard on modern Windows, but was allowed on WinCE
+ for i in 0..<colors_used {
+ palette[i] = image.read_data(ctx, RGBA_Pixel) or_return
+ }
+ pixel_offset -= int(4 * colors_used)
+ skip_space(ctx, pixel_offset)
+
+ stride := (img.width + 3) / 4
+ for y in 0..<img.height {
+ data := compress.read_slice(ctx, stride) or_return
+ for x in 0..<img.width {
+ shift := 6 - (x & 0x03) << 1
+ p := (data[x / 4] >> u8(shift)) & 0x03
+ write(img, x, y, palette[p].bgr) or_return
+ }
+ }
+
+ case 4:
+ if info.info_size == .OS2_v1 {
+ // 16 x RGB palette of instead of variable RGBA palette
+ for i in 0..<colors_used {
+ palette[i].rgb = image.read_data(ctx, RGB_Pixel) or_return
+ }
+ pixel_offset -= int(3 * colors_used)
+ } else {
+ for i in 0..<colors_used {
+ palette[i] = image.read_data(ctx, RGBA_Pixel) or_return
+ }
+ pixel_offset -= int(4 * colors_used)
+ }
+ skip_space(ctx, pixel_offset)
+
+ stride := (img.width + 1) / 2
+ for y in 0..<img.height {
+ data := compress.read_slice(ctx, stride) or_return
+ for x in 0..<img.width {
+ p := data[x / 2] >> 4 if x & 1 == 0 else data[x / 2]
+ write(img, x, y, palette[p & 0x0f].bgr) or_return
+ }
+ }
+
+ case 8:
+ if info.info_size == .OS2_v1 {
+ // 256 x RGB palette of instead of variable RGBA palette
+ for i in 0..<colors_used {
+ palette[i].rgb = image.read_data(ctx, RGB_Pixel) or_return
+ }
+ pixel_offset -= int(3 * colors_used)
+ } else {
+ for i in 0..<colors_used {
+ palette[i] = image.read_data(ctx, RGBA_Pixel) or_return
+ }
+ pixel_offset -= int(4 * colors_used)
+ }
+ skip_space(ctx, pixel_offset)
+
+ stride := align4(img.width)
+ for y in 0..<img.height {
+ data := compress.read_slice(ctx, stride) or_return
+ for x in 0..<img.width {
+ write(img, x, y, palette[data[x]].bgr) or_return
+ }
+ }
+
+ case 16:
+ bm, read := read_or_make_bit_masks(ctx, info) or_return
+ // Skip optional palette and other data
+ pixel_offset -= read
+ skip_space(ctx, pixel_offset)
+
+ stride := align4(img.width * 2)
+ for y in 0..<img.height {
+ data := compress.read_slice(ctx, stride) or_return
+ pixels := mem.slice_data_cast([]u16le, data)
+ for x in 0..<img.width {
+ v := pixels[x]
+ r := scale(v, bm.mask.r, bm.shift.r, bm.bits.r)
+ g := scale(v, bm.mask.g, bm.shift.g, bm.bits.g)
+ b := scale(v, bm.mask.b, bm.shift.b, bm.bits.b)
+ write(img, x, y, RGB_Pixel{r, g, b}) or_return
+ }
+ }
+
+ case 24:
+ // Eat useless palette and other padding
+ skip_space(ctx, pixel_offset)
+
+ stride := align4(img.width * 3)
+ for y in 0..<img.height {
+ data := compress.read_slice(ctx, stride) or_return
+ pixels := mem.slice_data_cast([]RGB_Pixel, data)
+ for x in 0..<img.width {
+ write(img, x, y, pixels[x].bgr) or_return
+ }
+ }
+
+ case 32:
+ bm, read := read_or_make_bit_masks(ctx, info) or_return
+ // Skip optional palette and other data
+ pixel_offset -= read
+ skip_space(ctx, pixel_offset)
+
+ for y in 0..<img.height {
+ data := compress.read_slice(ctx, img.width * size_of(RGBA_Pixel)) or_return
+ pixels := mem.slice_data_cast([]u32le, data)
+ for x in 0..<img.width {
+ v := pixels[x]
+ r := scale(v, bm.mask.r, bm.shift.r, bm.bits.r)
+ g := scale(v, bm.mask.g, bm.shift.g, bm.bits.g)
+ b := scale(v, bm.mask.b, bm.shift.b, bm.bits.b)
+ write(img, x, y, RGB_Pixel{r, g, b}) or_return
+ }
+ }
+
+ case:
+ return .Unsupported_BPP
+ }
+ return nil
+}
+
+decode_rle :: proc(ctx: ^$C, img: ^Image, info: image.BMP_Header, allocator := context.allocator) -> (err: Error) {
+ pixel_offset := int(info.pixel_offset)
+ pixel_offset -= int(info.info_size) + FILE_HEADER_SIZE
+
+ bytes_needed := size_of(RGB_Pixel) * img.height * img.width
+ if resize(&img.pixels.buf, bytes_needed) != nil {
+ return .Unable_To_Allocate_Or_Resize
+ }
+ out := mem.slice_data_cast([]RGB_Pixel, img.pixels.buf[:])
+ assert(len(out) == img.height * img.width)
+
+ palette: [256]RGBA_Pixel
+
+ switch info.bpp {
+ case 4:
+ colors_used := info.colors_used if info.colors_used > 0 else 16
+ colors_used = min(colors_used, 16)
+
+ for i in 0..<colors_used {
+ palette[i] = image.read_data(ctx, RGBA_Pixel) or_return
+ pixel_offset -= size_of(RGBA_Pixel)
+ }
+ skip_space(ctx, pixel_offset)
+
+ pixel_size := info.size - info.pixel_offset
+ remaining := compress.input_size(ctx) or_return
+ if remaining < i64(pixel_size) {
+ return .Corrupt
+ }
+
+ data := make([]u8, int(pixel_size) + 4)
+ defer delete(data)
+
+ for i in 0..<pixel_size {
+ data[i] = image.read_u8(ctx) or_return
+ }
+
+ y, x := 0, 0
+ index := 0
+ for {
+ if len(data[index:]) < 2 {
+ return .Corrupt
+ }
+
+ if data[index] > 0 {
+ for count in 0..<data[index] {
+ if count & 1 == 1 {
+ write(img, x, y, palette[(data[index + 1] >> 0) & 0x0f].bgr)
+ } else {
+ write(img, x, y, palette[(data[index + 1] >> 4) & 0x0f].bgr)
+ }
+ x += 1
+ }
+ index += 2
+ } else {
+ switch data[index + 1] {
+ case 0: // EOL
+ x = 0; y += 1
+ index += 2
+ case 1: // EOB
+ return
+ case 2: // MOVE
+ x += int(data[index + 2])
+ y += int(data[index + 3])
+ index += 4
+ case: // Literals
+ run_length := int(data[index + 1])
+ aligned := (align4(run_length) >> 1) + 2
+
+ if index + aligned >= len(data) {
+ return .Corrupt
+ }
+
+ for count in 0..<run_length {
+ val := data[index + 2 + count / 2]
+ if count & 1 == 1 {
+ val &= 0xf
+ } else {
+ val = val >> 4
+ }
+ write(img, x, y, palette[val].bgr)
+ x += 1
+ }
+ index += aligned
+ }
+ }
+ }
+
+ case 8:
+ colors_used := info.colors_used if info.colors_used > 0 else 256
+ colors_used = min(colors_used, 256)
+
+ for i in 0..<colors_used {
+ palette[i] = image.read_data(ctx, RGBA_Pixel) or_return
+ pixel_offset -= size_of(RGBA_Pixel)
+ }
+ skip_space(ctx, pixel_offset)
+
+ pixel_size := info.size - info.pixel_offset
+ remaining := compress.input_size(ctx) or_return
+ if remaining < i64(pixel_size) {
+ return .Corrupt
+ }
+
+ data := make([]u8, int(pixel_size) + 4)
+ defer delete(data)
+
+ for i in 0..<pixel_size {
+ data[i] = image.read_u8(ctx) or_return
+ }
+
+ y, x := 0, 0
+ index := 0
+ for {
+ if len(data[index:]) < 2 {
+ return .Corrupt
+ }
+
+ if data[index] > 0 {
+ for _ in 0..<data[index] {
+ write(img, x, y, palette[data[index + 1]].bgr)
+ x += 1
+ }
+ index += 2
+ } else {
+ switch data[index + 1] {
+ case 0: // EOL
+ x = 0; y += 1
+ index += 2
+ case 1: // EOB
+ return
+ case 2: // MOVE
+ x += int(data[index + 2])
+ y += int(data[index + 3])
+ index += 4
+ case: // Literals
+ run_length := int(data[index + 1])
+ aligned := align2(run_length) + 2
+
+ if index + aligned >= len(data) {
+ return .Corrupt
+ }
+ for count in 0..<run_length {
+ write(img, x, y, palette[data[index + 2 + count]].bgr)
+ x += 1
+ }
+ index += aligned
+ }
+ }
+ }
+
+ case:
+ return .Unsupported_BPP
+ }
+ return nil
+}
+
+align2 :: proc(width: int) -> (stride: int) {
+ stride = width
+ if width & 1 != 0 {
+ stride += 2 - (width & 1)
+ }
+ return
+}
+
+align4 :: proc(width: int) -> (stride: int) {
+ stride = width
+ if width & 3 != 0 {
+ stride += 4 - (width & 3)
+ }
+ return
+}
+
+skip_space :: proc(ctx: ^$C, bytes_to_skip: int) -> (err: Error) {
+ if bytes_to_skip < 0 {
+ return .Corrupt
+ }
+ for _ in 0..<bytes_to_skip {
+ image.read_u8(ctx) or_return
+ }
+ return
+}
+
+// Cleanup of image-specific data.
+destroy :: proc(img: ^Image) {
+ if img == nil {
+ // Nothing to do. Load must've returned with an error.
+ return
+ }
+
+ bytes.buffer_destroy(&img.pixels)
+ if v, ok := img.metadata.(^image.BMP_Info); ok {
+ free(v)
+ }
+ free(img)
+}
+
+@(init, private)
+_register :: proc() {
+ image.register(.BMP, load_from_bytes, destroy)
+} \ No newline at end of file
diff --git a/core/image/bmp/bmp_js.odin b/core/image/bmp/bmp_js.odin
new file mode 100644
index 000000000..d87a7d2d5
--- /dev/null
+++ b/core/image/bmp/bmp_js.odin
@@ -0,0 +1,4 @@
+//+build js
+package core_image_bmp
+
+load :: proc{load_from_bytes, load_from_context}
diff --git a/core/image/bmp/bmp_os.odin b/core/image/bmp/bmp_os.odin
new file mode 100644
index 000000000..d20abc685
--- /dev/null
+++ b/core/image/bmp/bmp_os.odin
@@ -0,0 +1,34 @@
+//+build !js
+package core_image_bmp
+
+import "core:os"
+import "core:bytes"
+
+load :: proc{load_from_file, load_from_bytes, load_from_context}
+
+load_from_file :: proc(filename: string, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
+ context.allocator = allocator
+
+ data, ok := os.read_entire_file(filename)
+ defer delete(data)
+
+ if ok {
+ return load_from_bytes(data, options)
+ } else {
+ return nil, .Unable_To_Read_File
+ }
+}
+
+save :: proc{save_to_buffer, save_to_file}
+
+save_to_file :: proc(output: string, img: ^Image, options := Options{}, allocator := context.allocator) -> (err: Error) {
+ context.allocator = allocator
+
+ out := &bytes.Buffer{}
+ defer bytes.buffer_destroy(out)
+
+ save_to_buffer(out, img, options) or_return
+ write_ok := os.write_entire_file(output, out.buf[:])
+
+ return nil if write_ok else .Unable_To_Write_File
+} \ No newline at end of file
diff --git a/core/image/common.odin b/core/image/common.odin
index b576a9521..fed2c1470 100644
--- a/core/image/common.odin
+++ b/core/image/common.odin
@@ -12,6 +12,7 @@ package image
import "core:bytes"
import "core:mem"
+import "core:io"
import "core:compress"
import "base:runtime"
@@ -62,6 +63,7 @@ Image_Metadata :: union #shared_nil {
^PNG_Info,
^QOI_Info,
^TGA_Info,
+ ^BMP_Info,
}
@@ -159,11 +161,13 @@ Error :: union #shared_nil {
Netpbm_Error,
PNG_Error,
QOI_Error,
+ BMP_Error,
compress.Error,
compress.General_Error,
compress.Deflate_Error,
compress.ZLIB_Error,
+ io.Error,
runtime.Allocator_Error,
}
@@ -197,6 +201,128 @@ General_Image_Error :: enum {
}
/*
+ BMP-specific
+*/
+BMP_Error :: enum {
+ None = 0,
+ Invalid_File_Size,
+ Unsupported_BMP_Version,
+ Unsupported_OS2_File,
+ Unsupported_Compression,
+ Unsupported_BPP,
+ Invalid_Stride,
+ Invalid_Color_Count,
+ Implausible_File_Size,
+ Bitfield_Version_Unhandled, // We don't (yet) handle bit fields for this BMP version.
+ Bitfield_Sum_Exceeds_BPP, // Total mask bit count > bpp
+ Bitfield_Overlapped, // Channel masks overlap
+}
+
+// img.metadata is wrapped in a struct in case we need to add to it later
+// without putting it in BMP_Header
+BMP_Info :: struct {
+ info: BMP_Header,
+}
+
+BMP_Magic :: enum u16le {
+ Bitmap = 0x4d42, // 'BM'
+ OS2_Bitmap_Array = 0x4142, // 'BA'
+ OS2_Icon = 0x4349, // 'IC',
+ OS2_Color_Icon = 0x4943, // 'CI'
+ OS2_Pointer = 0x5450, // 'PT'
+ OS2_Color_Pointer = 0x5043, // 'CP'
+}
+
+// See: http://justsolve.archiveteam.org/wiki/BMP#Well-known_versions
+BMP_Version :: enum u32le {
+ OS2_v1 = 12, // BITMAPCOREHEADER (Windows V2 / OS/2 version 1.0)
+ OS2_v2 = 64, // BITMAPCOREHEADER2 (OS/2 version 2.x)
+ V3 = 40, // BITMAPINFOHEADER
+ V4 = 108, // BITMAPV4HEADER
+ V5 = 124, // BITMAPV5HEADER
+
+ ABBR_16 = 16, // Abbreviated
+ ABBR_24 = 24, // ..
+ ABBR_48 = 48, // ..
+ ABBR_52 = 52, // ..
+ ABBR_56 = 56, // ..
+}
+
+BMP_Header :: struct #packed {
+ // File header
+ magic: BMP_Magic,
+ size: u32le,
+ _res1: u16le, // Reserved; must be zero
+ _res2: u16le, // Reserved; must be zero
+ pixel_offset: u32le, // Offset in bytes, from the beginning of BMP_Header to the pixel data
+ // V3
+ info_size: BMP_Version,
+ width: i32le,
+ height: i32le,
+ planes: u16le,
+ bpp: u16le,
+ compression: BMP_Compression,
+ image_size: u32le,
+ pels_per_meter: [2]u32le,
+ colors_used: u32le,
+ colors_important: u32le, // OS2_v2 is equal up to here
+ // V4
+ masks: [4]u32le `fmt:"32b"`,
+ colorspace: BMP_Logical_Color_Space,
+ endpoints: BMP_CIEXYZTRIPLE,
+ gamma: [3]BMP_GAMMA16_16,
+ // V5
+ intent: BMP_Gamut_Mapping_Intent,
+ profile_data: u32le,
+ profile_size: u32le,
+ reserved: u32le,
+}
+#assert(size_of(BMP_Header) == 138)
+
+OS2_Header :: struct #packed {
+ // BITMAPCOREHEADER minus info_size field
+ width: i16le,
+ height: i16le,
+ planes: u16le,
+ bpp: u16le,
+}
+#assert(size_of(OS2_Header) == 8)
+
+BMP_Compression :: enum u32le {
+ RGB = 0x0000,
+ RLE8 = 0x0001,
+ RLE4 = 0x0002,
+ Bit_Fields = 0x0003, // If Windows
+ Huffman1D = 0x0003, // If OS2v2
+ JPEG = 0x0004, // If Windows
+ RLE24 = 0x0004, // If OS2v2
+ PNG = 0x0005,
+ Alpha_Bit_Fields = 0x0006,
+ CMYK = 0x000B,
+ CMYK_RLE8 = 0x000C,
+ CMYK_RLE4 = 0x000D,
+}
+
+BMP_Logical_Color_Space :: enum u32le {
+ CALIBRATED_RGB = 0x00000000,
+ sRGB = 0x73524742, // 'sRGB'
+ WINDOWS_COLOR_SPACE = 0x57696E20, // 'Win '
+}
+
+BMP_FXPT2DOT30 :: u32le
+BMP_CIEXYZ :: [3]BMP_FXPT2DOT30
+BMP_CIEXYZTRIPLE :: [3]BMP_CIEXYZ
+BMP_GAMMA16_16 :: [2]u16le
+
+BMP_Gamut_Mapping_Intent :: enum u32le {
+ INVALID = 0x00000000, // If not V5, this field will just be zero-initialized and not valid.
+ ABS_COLORIMETRIC = 0x00000008,
+ BUSINESS = 0x00000001,
+ GRAPHICS = 0x00000002,
+ IMAGES = 0x00000004,
+}
+
+/*
Netpbm-specific definitions
*/
Netpbm_Format :: enum {
@@ -1133,6 +1259,40 @@ apply_palette_rgba :: proc(img: ^Image, palette: [256]RGBA_Pixel, allocator := c
}
apply_palette :: proc{apply_palette_rgb, apply_palette_rgba}
+blend_single_channel :: #force_inline proc(fg, alpha, bg: $T) -> (res: T) where T == u8 || T == u16 {
+ MAX :: 256 when T == u8 else 65536
+
+ c := u32(fg) * (MAX - u32(alpha)) + u32(bg) * (1 + u32(alpha))
+ return T(c & (MAX - 1))
+}
+
+blend_pixel :: #force_inline proc(fg: [$N]$T, alpha: T, bg: [N]T) -> (res: [N]T) where (T == u8 || T == u16), N >= 1 && N <= 4 {
+ MAX :: 256 when T == u8 else 65536
+
+ when N == 1 {
+ r := u32(fg.r) * (MAX - u32(alpha)) + u32(bg.r) * (1 + u32(alpha))
+ return {T(r & (MAX - 1))}
+ }
+ when N == 2 {
+ r := u32(fg.r) * (MAX - u32(alpha)) + u32(bg.r) * (1 + u32(alpha))
+ g := u32(fg.g) * (MAX - u32(alpha)) + u32(bg.g) * (1 + u32(alpha))
+ return {T(r & (MAX - 1)), T(g & (MAX - 1))}
+ }
+ when N == 3 || N == 4 {
+ r := u32(fg.r) * (MAX - u32(alpha)) + u32(bg.r) * (1 + u32(alpha))
+ g := u32(fg.g) * (MAX - u32(alpha)) + u32(bg.g) * (1 + u32(alpha))
+ b := u32(fg.b) * (MAX - u32(alpha)) + u32(bg.b) * (1 + u32(alpha))
+
+ when N == 3 {
+ return {T(r & (MAX - 1)), T(g & (MAX - 1)), T(b & (MAX - 1))}
+ } else {
+ return {T(r & (MAX - 1)), T(g & (MAX - 1)), T(b & (MAX - 1)), MAX - 1}
+ }
+ }
+ unreachable()
+}
+blend :: proc{blend_single_channel, blend_pixel}
+
// Replicates grayscale values into RGB(A) 8- or 16-bit images as appropriate.
// Returns early with `false` if already an RGB(A) image.
@@ -1245,4 +1405,4 @@ write_bytes :: proc(buf: ^bytes.Buffer, data: []u8) -> (err: compress.General_Er
return .Resize_Failed
}
return nil
-}
+} \ No newline at end of file
diff --git a/core/image/png/png.odin b/core/image/png/png.odin
index 4bb070da8..aa1c5f781 100644
--- a/core/image/png/png.odin
+++ b/core/image/png/png.odin
@@ -597,7 +597,7 @@ load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.a
dsc := depth_scale_table
scale := dsc[info.header.bit_depth]
if scale != 1 {
- key := mem.slice_data_cast([]u16be, c.data)[0] * u16be(scale)
+ key := (^u16be)(raw_data(c.data))^ * u16be(scale)
c.data = []u8{0, u8(key & 255)}
}
}
@@ -735,59 +735,48 @@ load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.a
return {}, .Unable_To_Allocate_Or_Resize
}
- i := 0; j := 0
-
// If we don't have transparency or drop it without applying it, we can do this:
if (!seen_trns || (seen_trns && .alpha_drop_if_present in options && .alpha_premultiply not_in options)) && .alpha_add_if_missing not_in options {
- for h := 0; h < int(img.height); h += 1 {
- for w := 0; w < int(img.width); w += 1 {
- c := _plte.entries[temp.buf[i]]
- t.buf[j ] = c.r
- t.buf[j+1] = c.g
- t.buf[j+2] = c.b
- i += 1; j += 3
- }
+ output := mem.slice_data_cast([]image.RGB_Pixel, t.buf[:])
+ for pal_idx, idx in temp.buf {
+ output[idx] = _plte.entries[pal_idx]
}
} else if add_alpha || .alpha_drop_if_present in options {
- bg := [3]f32{0, 0, 0}
+ bg := PLTE_Entry{0, 0, 0}
if premultiply && seen_bkgd {
c16 := img.background.([3]u16)
- bg = [3]f32{f32(c16.r), f32(c16.g), f32(c16.b)}
+ bg = {u8(c16.r), u8(c16.g), u8(c16.b)}
}
no_alpha := (.alpha_drop_if_present in options || premultiply) && .alpha_add_if_missing not_in options
blend_background := seen_bkgd && .blend_background in options
- for h := 0; h < int(img.height); h += 1 {
- for w := 0; w < int(img.width); w += 1 {
- index := temp.buf[i]
+ if no_alpha {
+ output := mem.slice_data_cast([]image.RGB_Pixel, t.buf[:])
+ for orig, idx in temp.buf {
+ c := _plte.entries[orig]
+ a := int(orig) < len(trns.data) ? trns.data[orig] : 255
- c := _plte.entries[index]
- a := int(index) < len(trns.data) ? trns.data[index] : 255
- alpha := f32(a) / 255.0
+ if blend_background {
+ output[idx] = image.blend(c, a, bg)
+ } else if premultiply {
+ output[idx] = image.blend(PLTE_Entry{}, a, c)
+ }
+ }
+ } else {
+ output := mem.slice_data_cast([]image.RGBA_Pixel, t.buf[:])
+ for orig, idx in temp.buf {
+ c := _plte.entries[orig]
+ a := int(orig) < len(trns.data) ? trns.data[orig] : 255
if blend_background {
- c.r = u8((1.0 - alpha) * bg[0] + f32(c.r) * alpha)
- c.g = u8((1.0 - alpha) * bg[1] + f32(c.g) * alpha)
- c.b = u8((1.0 - alpha) * bg[2] + f32(c.b) * alpha)
+ c = image.blend(c, a, bg)
a = 255
} else if premultiply {
- c.r = u8(f32(c.r) * alpha)
- c.g = u8(f32(c.g) * alpha)
- c.b = u8(f32(c.b) * alpha)
+ c = image.blend(PLTE_Entry{}, a, c)
}
- t.buf[j ] = c.r
- t.buf[j+1] = c.g
- t.buf[j+2] = c.b
- i += 1
-
- if no_alpha {
- j += 3
- } else {
- t.buf[j+3] = u8(a)
- j += 4
- }
+ output[idx] = {c.r, c.g, c.b, u8(a)}
}
}
} else {
@@ -1015,8 +1004,8 @@ load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.a
return {}, .Unable_To_Allocate_Or_Resize
}
- p := mem.slice_data_cast([]u8, temp.buf[:])
- o := mem.slice_data_cast([]u8, t.buf[:])
+ p := temp.buf[:]
+ o := t.buf[:]
switch raw_image_channels {
case 1:
@@ -1627,7 +1616,6 @@ defilter :: proc(img: ^Image, filter_bytes: ^bytes.Buffer, header: ^image.PNG_IH
return nil
}
-
@(init, private)
_register :: proc() {
image.register(.PNG, load_from_bytes, destroy)
diff --git a/core/log/file_console_logger.odin b/core/log/file_console_logger.odin
index bcce67578..fb968ccb6 100644
--- a/core/log/file_console_logger.odin
+++ b/core/log/file_console_logger.odin
@@ -1,6 +1,7 @@
//+build !freestanding
package log
+import "core:encoding/ansi"
import "core:fmt"
import "core:strings"
import "core:os"
@@ -70,18 +71,10 @@ file_console_logger_proc :: proc(logger_data: rawptr, level: Level, text: string
backing: [1024]byte //NOTE(Hoej): 1024 might be too much for a header backing, unless somebody has really long paths.
buf := strings.builder_from_bytes(backing[:])
- do_level_header(options, level, &buf)
+ do_level_header(options, &buf, level)
when time.IS_SUPPORTED {
- if Full_Timestamp_Opts & options != nil {
- fmt.sbprint(&buf, "[")
- t := time.now()
- y, m, d := time.date(t)
- h, min, s := time.clock(t)
- if .Date in options { fmt.sbprintf(&buf, "%d-%02d-%02d ", y, m, d) }
- if .Time in options { fmt.sbprintf(&buf, "%02d:%02d:%02d", h, min, s) }
- fmt.sbprint(&buf, "] ")
- }
+ do_time_header(options, &buf, time.now())
}
do_location_header(options, &buf, location)
@@ -99,12 +92,12 @@ file_console_logger_proc :: proc(logger_data: rawptr, level: Level, text: string
fmt.fprintf(h, "%s%s\n", strings.to_string(buf), text)
}
-do_level_header :: proc(opts: Options, level: Level, str: ^strings.Builder) {
+do_level_header :: proc(opts: Options, str: ^strings.Builder, level: Level) {
- RESET :: "\x1b[0m"
- RED :: "\x1b[31m"
- YELLOW :: "\x1b[33m"
- DARK_GREY :: "\x1b[90m"
+ RESET :: ansi.CSI + ansi.RESET + ansi.SGR
+ RED :: ansi.CSI + ansi.FG_RED + ansi.SGR
+ YELLOW :: ansi.CSI + ansi.FG_YELLOW + ansi.SGR
+ DARK_GREY :: ansi.CSI + ansi.FG_BRIGHT_BLACK + ansi.SGR
col := RESET
switch level {
@@ -125,6 +118,24 @@ do_level_header :: proc(opts: Options, level: Level, str: ^strings.Builder) {
}
}
+do_time_header :: proc(opts: Options, buf: ^strings.Builder, t: time.Time) {
+ when time.IS_SUPPORTED {
+ if Full_Timestamp_Opts & opts != nil {
+ fmt.sbprint(buf, "[")
+ y, m, d := time.date(t)
+ h, min, s := time.clock(t)
+ if .Date in opts {
+ fmt.sbprintf(buf, "%d-%02d-%02d", y, m, d)
+ if .Time in opts {
+ fmt.sbprint(buf, " ")
+ }
+ }
+ if .Time in opts { fmt.sbprintf(buf, "%02d:%02d:%02d", h, min, s) }
+ fmt.sbprint(buf, "] ")
+ }
+ }
+}
+
do_location_header :: proc(opts: Options, buf: ^strings.Builder, location := #caller_location) {
if Location_Header_Opts & opts == nil {
return
diff --git a/core/log/multi_logger.odin b/core/log/multi_logger.odin
index 55c0f1436..96d0f3dbd 100644
--- a/core/log/multi_logger.odin
+++ b/core/log/multi_logger.odin
@@ -12,11 +12,10 @@ create_multi_logger :: proc(logs: ..Logger) -> Logger {
return Logger{multi_logger_proc, data, Level.Debug, nil}
}
-destroy_multi_logger :: proc(log : ^Logger) {
+destroy_multi_logger :: proc(log: Logger) {
data := (^Multi_Logger_Data)(log.data)
delete(data.loggers)
- free(log.data)
- log^ = nil_logger()
+ free(data)
}
multi_logger_proc :: proc(logger_data: rawptr, level: Level, text: string,
diff --git a/core/math/big/combinatorics.odin b/core/math/big/combinatorics.odin
new file mode 100644
index 000000000..87c76d830
--- /dev/null
+++ b/core/math/big/combinatorics.odin
@@ -0,0 +1,60 @@
+package math_big
+
+/*
+ With `n` items, calculate how many ways that `r` of them can be ordered.
+*/
+permutations_with_repetition :: int_pow_int
+
+/*
+ With `n` items, calculate how many ways that `r` of them can be ordered without any repeats.
+*/
+permutations_without_repetition :: proc(dest: ^Int, n, r: int) -> (error: Error) {
+ if n == r {
+ return factorial(dest, n)
+ }
+
+ tmp := &Int{}
+ defer internal_destroy(tmp)
+
+ // n!
+ // --------
+ // (n - r)!
+ factorial(dest, n) or_return
+ factorial(tmp, n - r) or_return
+ div(dest, dest, tmp) or_return
+
+ return
+}
+
+/*
+ With `n` items, calculate how many ways that `r` of them can be chosen.
+
+ Also known as the multiset coefficient or (n multichoose k).
+*/
+combinations_with_repetition :: proc(dest: ^Int, n, r: int) -> (error: Error) {
+ // (n + r - 1)!
+ // ------------
+ // r! (n - 1)!
+ return combinations_without_repetition(dest, n + r - 1, r)
+}
+
+/*
+ With `n` items, calculate how many ways that `r` of them can be chosen without any repeats.
+
+ Also known as the binomial coefficient or (n choose k).
+*/
+combinations_without_repetition :: proc(dest: ^Int, n, r: int) -> (error: Error) {
+ tmp_a, tmp_b := &Int{}, &Int{}
+ defer internal_destroy(tmp_a, tmp_b)
+
+ // n!
+ // ------------
+ // r! (n - r)!
+ factorial(dest, n) or_return
+ factorial(tmp_a, r) or_return
+ factorial(tmp_b, n - r) or_return
+ mul(tmp_a, tmp_a, tmp_b) or_return
+ div(dest, dest, tmp_a) or_return
+
+ return
+}
diff --git a/core/math/big/prime.odin b/core/math/big/prime.odin
index 5e7c02f37..7fc78c7e5 100644
--- a/core/math/big/prime.odin
+++ b/core/math/big/prime.odin
@@ -1188,9 +1188,6 @@ internal_random_prime :: proc(a: ^Int, size_in_bits: int, trials: int, flags :=
flags := flags
trials := trials
- t := &Int{}
- defer internal_destroy(t)
-
/*
Sanity check the input.
*/
diff --git a/core/math/big/radix.odin b/core/math/big/radix.odin
index f4eed879f..a5100e478 100644
--- a/core/math/big/radix.odin
+++ b/core/math/big/radix.odin
@@ -315,6 +315,7 @@ int_atoi :: proc(res: ^Int, input: string, radix := i8(10), allocator := context
atoi :: proc { int_atoi, }
+string_to_int :: int_atoi
/*
We size for `string` by default.
diff --git a/core/math/cmplx/cmplx_trig.odin b/core/math/cmplx/cmplx_trig.odin
index 7ca404fab..15e757506 100644
--- a/core/math/cmplx/cmplx_trig.odin
+++ b/core/math/cmplx/cmplx_trig.odin
@@ -350,7 +350,7 @@ _reduce_pi_f64 :: proc "contextless" (x: f64) -> f64 #no_bounds_check {
// that is, 1/PI = SUM bdpi[i]*2^(-64*i).
// 19 64-bit digits give 1216 bits of precision
// to handle the largest possible f64 exponent.
- @static bdpi := [?]u64{
+ @(static, rodata) bdpi := [?]u64{
0x0000000000000000,
0x517cc1b727220a94,
0xfe13abe8fa9a6ee0,
diff --git a/core/math/linalg/general.odin b/core/math/linalg/general.odin
index 51dfd2360..37c0447cb 100644
--- a/core/math/linalg/general.odin
+++ b/core/math/linalg/general.odin
@@ -3,6 +3,7 @@ package linalg
import "core:math"
import "base:builtin"
import "base:intrinsics"
+import "base:runtime"
// Generic
@@ -223,33 +224,27 @@ quaternion_mul_quaternion :: proc "contextless" (q1, q2: $Q) -> Q where IS_QUATE
@(require_results)
quaternion64_mul_vector3 :: proc "contextless" (q: $Q/quaternion64, v: $V/[3]$F/f16) -> V {
- Raw_Quaternion :: struct {xyz: [3]f16, r: f16}
-
- q := transmute(Raw_Quaternion)q
+ q := transmute(runtime.Raw_Quaternion64_Vector_Scalar)q
v := v
- t := cross(2*q.xyz, v)
- return V(v + q.r*t + cross(q.xyz, t))
+ t := cross(2*q.vector, v)
+ return V(v + q.scalar*t + cross(q.vector, t))
}
@(require_results)
quaternion128_mul_vector3 :: proc "contextless" (q: $Q/quaternion128, v: $V/[3]$F/f32) -> V {
- Raw_Quaternion :: struct {xyz: [3]f32, r: f32}
-
- q := transmute(Raw_Quaternion)q
+ q := transmute(runtime.Raw_Quaternion128_Vector_Scalar)q
v := v
- t := cross(2*q.xyz, v)
- return V(v + q.r*t + cross(q.xyz, t))
+ t := cross(2*q.vector, v)
+ return V(v + q.scalar*t + cross(q.vector, t))
}
@(require_results)
quaternion256_mul_vector3 :: proc "contextless" (q: $Q/quaternion256, v: $V/[3]$F/f64) -> V {
- Raw_Quaternion :: struct {xyz: [3]f64, r: f64}
-
- q := transmute(Raw_Quaternion)q
+ q := transmute(runtime.Raw_Quaternion256_Vector_Scalar)q
v := v
- t := cross(2*q.xyz, v)
- return V(v + q.r*t + cross(q.xyz, t))
+ t := cross(2*q.vector, v)
+ return V(v + q.scalar*t + cross(q.vector, t))
}
quaternion_mul_vector3 :: proc{quaternion64_mul_vector3, quaternion128_mul_vector3, quaternion256_mul_vector3}
diff --git a/core/math/linalg/specific.odin b/core/math/linalg/specific.odin
index 41d0e5344..b841f0610 100644
--- a/core/math/linalg/specific.odin
+++ b/core/math/linalg/specific.odin
@@ -527,7 +527,7 @@ angle_from_quaternion :: proc{
@(require_results)
axis_from_quaternion_f16 :: proc "contextless" (q: Quaternionf16) -> Vector3f16 {
t1 := 1 - q.w*q.w
- if t1 < 0 {
+ if t1 <= 0 {
return {0, 0, 1}
}
t2 := 1.0 / math.sqrt(t1)
@@ -536,7 +536,7 @@ axis_from_quaternion_f16 :: proc "contextless" (q: Quaternionf16) -> Vector3f16
@(require_results)
axis_from_quaternion_f32 :: proc "contextless" (q: Quaternionf32) -> Vector3f32 {
t1 := 1 - q.w*q.w
- if t1 < 0 {
+ if t1 <= 0 {
return {0, 0, 1}
}
t2 := 1.0 / math.sqrt(t1)
@@ -545,7 +545,7 @@ axis_from_quaternion_f32 :: proc "contextless" (q: Quaternionf32) -> Vector3f32
@(require_results)
axis_from_quaternion_f64 :: proc "contextless" (q: Quaternionf64) -> Vector3f64 {
t1 := 1 - q.w*q.w
- if t1 < 0 {
+ if t1 <= 0 {
return {0, 0, 1}
}
t2 := 1.0 / math.sqrt(t1)
diff --git a/core/math/linalg/specific_euler_angles_f16.odin b/core/math/linalg/specific_euler_angles_f16.odin
index bacda163e..1e9ded9ab 100644
--- a/core/math/linalg/specific_euler_angles_f16.odin
+++ b/core/math/linalg/specific_euler_angles_f16.odin
@@ -159,7 +159,7 @@ roll_from_quaternion_f16 :: proc "contextless" (q: Quaternionf16) -> f16 {
@(require_results)
pitch_from_quaternion_f16 :: proc "contextless" (q: Quaternionf16) -> f16 {
- y := 2 * (q.y*q.z + q.w*q.w)
+ y := 2 * (q.y*q.z + q.w*q.x)
x := q.w*q.w - q.x*q.x - q.y*q.y + q.z*q.z
if abs(x) <= F16_EPSILON && abs(y) <= F16_EPSILON {
diff --git a/core/math/linalg/specific_euler_angles_f32.odin b/core/math/linalg/specific_euler_angles_f32.odin
index b9957034f..e33b1f095 100644
--- a/core/math/linalg/specific_euler_angles_f32.odin
+++ b/core/math/linalg/specific_euler_angles_f32.odin
@@ -159,7 +159,7 @@ roll_from_quaternion_f32 :: proc "contextless" (q: Quaternionf32) -> f32 {
@(require_results)
pitch_from_quaternion_f32 :: proc "contextless" (q: Quaternionf32) -> f32 {
- y := 2 * (q.y*q.z + q.w*q.w)
+ y := 2 * (q.y*q.z + q.w*q.x)
x := q.w*q.w - q.x*q.x - q.y*q.y + q.z*q.z
if abs(x) <= F32_EPSILON && abs(y) <= F32_EPSILON {
diff --git a/core/math/linalg/specific_euler_angles_f64.odin b/core/math/linalg/specific_euler_angles_f64.odin
index 8001d080a..9b5cf4b56 100644
--- a/core/math/linalg/specific_euler_angles_f64.odin
+++ b/core/math/linalg/specific_euler_angles_f64.odin
@@ -159,7 +159,7 @@ roll_from_quaternion_f64 :: proc "contextless" (q: Quaternionf64) -> f64 {
@(require_results)
pitch_from_quaternion_f64 :: proc "contextless" (q: Quaternionf64) -> f64 {
- y := 2 * (q.y*q.z + q.w*q.w)
+ y := 2 * (q.y*q.z + q.w*q.x)
x := q.w*q.w - q.x*q.x - q.y*q.y + q.z*q.z
if abs(x) <= F64_EPSILON && abs(y) <= F64_EPSILON {
diff --git a/core/math/math.odin b/core/math/math.odin
index 8d85c2381..3d0ab3c4e 100644
--- a/core/math/math.odin
+++ b/core/math/math.odin
@@ -130,10 +130,10 @@ pow10 :: proc{
@(require_results)
pow10_f16 :: proc "contextless" (n: f16) -> f16 {
- @static pow10_pos_tab := [?]f16{
+ @(static, rodata) pow10_pos_tab := [?]f16{
1e00, 1e01, 1e02, 1e03, 1e04,
}
- @static pow10_neg_tab := [?]f16{
+ @(static, rodata) pow10_neg_tab := [?]f16{
1e-00, 1e-01, 1e-02, 1e-03, 1e-04, 1e-05, 1e-06, 1e-07,
}
@@ -151,13 +151,13 @@ pow10_f16 :: proc "contextless" (n: f16) -> f16 {
@(require_results)
pow10_f32 :: proc "contextless" (n: f32) -> f32 {
- @static pow10_pos_tab := [?]f32{
+ @(static, rodata) pow10_pos_tab := [?]f32{
1e00, 1e01, 1e02, 1e03, 1e04, 1e05, 1e06, 1e07, 1e08, 1e09,
1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
1e20, 1e21, 1e22, 1e23, 1e24, 1e25, 1e26, 1e27, 1e28, 1e29,
1e30, 1e31, 1e32, 1e33, 1e34, 1e35, 1e36, 1e37, 1e38,
}
- @static pow10_neg_tab := [?]f32{
+ @(static, rodata) pow10_neg_tab := [?]f32{
1e-00, 1e-01, 1e-02, 1e-03, 1e-04, 1e-05, 1e-06, 1e-07, 1e-08, 1e-09,
1e-10, 1e-11, 1e-12, 1e-13, 1e-14, 1e-15, 1e-16, 1e-17, 1e-18, 1e-19,
1e-20, 1e-21, 1e-22, 1e-23, 1e-24, 1e-25, 1e-26, 1e-27, 1e-28, 1e-29,
@@ -179,16 +179,16 @@ pow10_f32 :: proc "contextless" (n: f32) -> f32 {
@(require_results)
pow10_f64 :: proc "contextless" (n: f64) -> f64 {
- @static pow10_tab := [?]f64{
+ @(static, rodata) pow10_tab := [?]f64{
1e00, 1e01, 1e02, 1e03, 1e04, 1e05, 1e06, 1e07, 1e08, 1e09,
1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
1e20, 1e21, 1e22, 1e23, 1e24, 1e25, 1e26, 1e27, 1e28, 1e29,
1e30, 1e31,
}
- @static pow10_pos_tab32 := [?]f64{
+ @(static, rodata) pow10_pos_tab32 := [?]f64{
1e00, 1e32, 1e64, 1e96, 1e128, 1e160, 1e192, 1e224, 1e256, 1e288,
}
- @static pow10_neg_tab32 := [?]f64{
+ @(static, rodata) pow10_neg_tab32 := [?]f64{
1e-00, 1e-32, 1e-64, 1e-96, 1e-128, 1e-160, 1e-192, 1e-224, 1e-256, 1e-288, 1e-320,
}
@@ -1274,7 +1274,7 @@ binomial :: proc "contextless" (n, k: int) -> int {
@(require_results)
factorial :: proc "contextless" (n: int) -> int {
when size_of(int) == size_of(i64) {
- @static table := [21]int{
+ @(static, rodata) table := [21]int{
1,
1,
2,
@@ -1298,7 +1298,7 @@ factorial :: proc "contextless" (n: int) -> int {
2_432_902_008_176_640_000,
}
} else {
- @static table := [13]int{
+ @(static, rodata) table := [13]int{
1,
1,
2,
diff --git a/core/math/math_gamma.odin b/core/math/math_gamma.odin
index 00d4b7316..9f5a364d3 100644
--- a/core/math/math_gamma.odin
+++ b/core/math/math_gamma.odin
@@ -67,7 +67,7 @@ package math
// masks any imprecision in the polynomial.
@(private="file", require_results)
stirling :: proc "contextless" (x: f64) -> (f64, f64) {
- @(static) gamS := [?]f64{
+ @(static, rodata) gamS := [?]f64{
+7.87311395793093628397e-04,
-2.29549961613378126380e-04,
-2.68132617805781232825e-03,
@@ -103,7 +103,7 @@ gamma_f64 :: proc "contextless" (x: f64) -> f64 {
return false
}
- @(static) gamP := [?]f64{
+ @(static, rodata) gamP := [?]f64{
1.60119522476751861407e-04,
1.19135147006586384913e-03,
1.04213797561761569935e-02,
@@ -112,7 +112,7 @@ gamma_f64 :: proc "contextless" (x: f64) -> f64 {
4.94214826801497100753e-01,
9.99999999999999996796e-01,
}
- @(static) gamQ := [?]f64{
+ @(static, rodata) gamQ := [?]f64{
-2.31581873324120129819e-05,
+5.39605580493303397842e-04,
-4.45641913851797240494e-03,
diff --git a/core/math/math_lgamma.odin b/core/math/math_lgamma.odin
index 0705d8564..828f17178 100644
--- a/core/math/math_lgamma.odin
+++ b/core/math/math_lgamma.odin
@@ -123,7 +123,7 @@ lgamma_f64 :: proc "contextless" (x: f64) -> (lgamma: f64, sign: int) {
return -x
}
- @static lgamA := [?]f64{
+ @(static, rodata) lgamA := [?]f64{
0h3FB3C467E37DB0C8,
0h3FD4A34CC4A60FAD,
0h3FB13E001A5562A7,
@@ -137,7 +137,7 @@ lgamma_f64 :: proc "contextless" (x: f64) -> (lgamma: f64, sign: int) {
0h3EFA7074428CFA52,
0h3F07858E90A45837,
}
- @static lgamR := [?]f64{
+ @(static, rodata) lgamR := [?]f64{
1.0,
0h3FF645A762C4AB74,
0h3FE71A1893D3DCDC,
@@ -146,7 +146,7 @@ lgamma_f64 :: proc "contextless" (x: f64) -> (lgamma: f64, sign: int) {
0h3F497DDACA41A95B,
0h3EDEBAF7A5B38140,
}
- @static lgamS := [?]f64{
+ @(static, rodata) lgamS := [?]f64{
0hBFB3C467E37DB0C8,
0h3FCB848B36E20878,
0h3FD4D98F4F139F59,
@@ -155,7 +155,7 @@ lgamma_f64 :: proc "contextless" (x: f64) -> (lgamma: f64, sign: int) {
0h3F5E26B67368F239,
0h3F00BFECDD17E945,
}
- @static lgamT := [?]f64{
+ @(static, rodata) lgamT := [?]f64{
0h3FDEF72BC8EE38A2,
0hBFC2E4278DC6C509,
0h3FB08B4294D5419B,
@@ -172,7 +172,7 @@ lgamma_f64 :: proc "contextless" (x: f64) -> (lgamma: f64, sign: int) {
0hBF347F24ECC38C38,
0h3F35FD3EE8C2D3F4,
}
- @static lgamU := [?]f64{
+ @(static, rodata) lgamU := [?]f64{
0hBFB3C467E37DB0C8,
0h3FE4401E8B005DFF,
0h3FF7475CD119BD6F,
@@ -180,7 +180,7 @@ lgamma_f64 :: proc "contextless" (x: f64) -> (lgamma: f64, sign: int) {
0h3FCD4EAEF6010924,
0h3F8B678BBF2BAB09,
}
- @static lgamV := [?]f64{
+ @(static, rodata) lgamV := [?]f64{
1.0,
0h4003A5D7C2BD619C,
0h40010725A42B18F5,
@@ -188,7 +188,7 @@ lgamma_f64 :: proc "contextless" (x: f64) -> (lgamma: f64, sign: int) {
0h3FBAAE55D6537C88,
0h3F6A5ABB57D0CF61,
}
- @static lgamW := [?]f64{
+ @(static, rodata) lgamW := [?]f64{
0h3FDACFE390C97D69,
0h3FB555555555553B,
0hBF66C16C16B02E5C,
diff --git a/core/math/math_sincos.odin b/core/math/math_sincos.odin
index 578876ac5..b616f410d 100644
--- a/core/math/math_sincos.odin
+++ b/core/math/math_sincos.odin
@@ -234,7 +234,7 @@ _trig_reduce_f64 :: proc "contextless" (x: f64) -> (j: u64, z: f64) #no_bounds_c
// that is, 4/pi = Sum bd_pi4[i]*2^(-64*i)
// 19 64-bit digits and the leading one bit give 1217 bits
// of precision to handle the largest possible f64 exponent.
- @static bd_pi4 := [?]u64{
+ @(static, rodata) bd_pi4 := [?]u64{
0x0000000000000001,
0x45f306dc9c882a53,
0xf84eafa3ea69bb81,
diff --git a/core/math/rand/exp.odin b/core/math/rand/exp.odin
index 719debe75..ebc849b2f 100644
--- a/core/math/rand/exp.odin
+++ b/core/math/rand/exp.odin
@@ -19,7 +19,7 @@ import "core:math"
exp_float64 :: proc(r: ^Rand = nil) -> f64 {
re :: 7.69711747013104972
- @(static)
+ @(static, rodata)
ke := [256]u32{
0xe290a139, 0x0, 0x9beadebc, 0xc377ac71, 0xd4ddb990,
0xde893fb8, 0xe4a8e87c, 0xe8dff16a, 0xebf2deab, 0xee49a6e8,
@@ -74,7 +74,7 @@ exp_float64 :: proc(r: ^Rand = nil) -> f64 {
0xf7b577d2, 0xf69c650c, 0xf51530f0, 0xf2cb0e3c, 0xeeefb15d,
0xe6da6ecf,
}
- @(static)
+ @(static, rodata)
we := [256]f32{
2.0249555e-09, 1.486674e-11, 2.4409617e-11, 3.1968806e-11,
3.844677e-11, 4.4228204e-11, 4.9516443e-11, 5.443359e-11,
@@ -141,7 +141,7 @@ exp_float64 :: proc(r: ^Rand = nil) -> f64 {
1.2393786e-09, 1.276585e-09, 1.3193139e-09, 1.3695435e-09,
1.4305498e-09, 1.508365e-09, 1.6160854e-09, 1.7921248e-09,
}
- @(static)
+ @(static, rodata)
fe := [256]f32{
1, 0.9381437, 0.90046996, 0.87170434, 0.8477855, 0.8269933,
0.8084217, 0.7915276, 0.77595687, 0.7614634, 0.7478686,
diff --git a/core/math/rand/normal.odin b/core/math/rand/normal.odin
index f96163fe9..c8681db80 100644
--- a/core/math/rand/normal.odin
+++ b/core/math/rand/normal.odin
@@ -21,7 +21,7 @@ import "core:math"
norm_float64 :: proc(r: ^Rand = nil) -> f64 {
rn :: 3.442619855899
- @(static)
+ @(static, rodata)
kn := [128]u32{
0x76ad2212, 0x00000000, 0x600f1b53, 0x6ce447a6, 0x725b46a2,
0x7560051d, 0x774921eb, 0x789a25bd, 0x799045c3, 0x7a4bce5d,
@@ -50,7 +50,7 @@ norm_float64 :: proc(r: ^Rand = nil) -> f64 {
0x7da61a1e, 0x7d72a0fb, 0x7d30e097, 0x7cd9b4ab, 0x7c600f1a,
0x7ba90bdc, 0x7a722176, 0x77d664e5,
}
- @(static)
+ @(static, rodata)
wn := [128]f32{
1.7290405e-09, 1.2680929e-10, 1.6897518e-10, 1.9862688e-10,
2.2232431e-10, 2.4244937e-10, 2.601613e-10, 2.7611988e-10,
@@ -85,7 +85,7 @@ norm_float64 :: proc(r: ^Rand = nil) -> f64 {
1.2601323e-09, 1.2857697e-09, 1.3146202e-09, 1.347784e-09,
1.3870636e-09, 1.4357403e-09, 1.5008659e-09, 1.6030948e-09,
}
- @(static)
+ @(static, rodata)
fn := [128]f32{
1.00000000, 0.9635997, 0.9362827, 0.9130436, 0.89228165,
0.87324303, 0.8555006, 0.8387836, 0.8229072, 0.8077383,
diff --git a/core/mem/raw.odin b/core/mem/raw.odin
index 56790e959..f56206957 100644
--- a/core/mem/raw.odin
+++ b/core/mem/raw.odin
@@ -11,12 +11,15 @@ Raw_Dynamic_Array :: runtime.Raw_Dynamic_Array
Raw_Map :: runtime.Raw_Map
Raw_Soa_Pointer :: runtime.Raw_Soa_Pointer
-Raw_Complex64 :: struct {real, imag: f32}
-Raw_Complex128 :: struct {real, imag: f64}
-Raw_Quaternion128 :: struct {imag, jmag, kmag: f32, real: f32}
-Raw_Quaternion256 :: struct {imag, jmag, kmag: f64, real: f64}
-Raw_Quaternion128_Vector_Scalar :: struct {vector: [3]f32, scalar: f32}
-Raw_Quaternion256_Vector_Scalar :: struct {vector: [3]f64, scalar: f64}
+Raw_Complex32 :: runtime.Raw_Complex32
+Raw_Complex64 :: runtime.Raw_Complex64
+Raw_Complex128 :: runtime.Raw_Complex128
+Raw_Quaternion64 :: runtime.Raw_Quaternion64
+Raw_Quaternion128 :: runtime.Raw_Quaternion128
+Raw_Quaternion256 :: runtime.Raw_Quaternion256
+Raw_Quaternion64_Vector_Scalar :: runtime.Raw_Quaternion64_Vector_Scalar
+Raw_Quaternion128_Vector_Scalar :: runtime.Raw_Quaternion128_Vector_Scalar
+Raw_Quaternion256_Vector_Scalar :: runtime.Raw_Quaternion256_Vector_Scalar
make_any :: proc "contextless" (data: rawptr, id: typeid) -> any {
return transmute(any)Raw_Any{data, id}
diff --git a/core/mem/rollback_stack_allocator.odin b/core/mem/rollback_stack_allocator.odin
new file mode 100644
index 000000000..f5e428d87
--- /dev/null
+++ b/core/mem/rollback_stack_allocator.odin
@@ -0,0 +1,341 @@
+package mem
+
+// The Rollback Stack Allocator was designed for the test runner to be fast,
+// able to grow, and respect the Tracking Allocator's requirement for
+// individual frees. It is not overly concerned with fragmentation, however.
+//
+// It has support for expansion when configured with a block allocator and
+// limited support for out-of-order frees.
+//
+// Allocation has constant-time best and usual case performance.
+// At worst, it is linear according to the number of memory blocks.
+//
+// Allocation follows a first-fit strategy when there are multiple memory
+// blocks.
+//
+// Freeing has constant-time best and usual case performance.
+// At worst, it is linear according to the number of memory blocks and number
+// of freed items preceding the last item in a block.
+//
+// Resizing has constant-time performance, if it's the last item in a block, or
+// the new size is smaller. Naturally, this becomes linear-time if there are
+// multiple blocks to search for the pointer's owning block. Otherwise, the
+// allocator defaults to a combined alloc & free operation internally.
+//
+// Out-of-order freeing is accomplished by collapsing a run of freed items
+// from the last allocation backwards.
+//
+// Each allocation has an overhead of 8 bytes and any extra bytes to satisfy
+// the requested alignment.
+
+import "base:runtime"
+
+ROLLBACK_STACK_DEFAULT_BLOCK_SIZE :: 4 * Megabyte
+
+// This limitation is due to the size of `prev_ptr`, but it is only for the
+// head block; any allocation in excess of the allocator's `block_size` is
+// valid, so long as the block allocator can handle it.
+//
+// This is because allocations over the block size are not split up if the item
+// within is freed; they are immediately returned to the block allocator.
+ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE :: 2 * Gigabyte
+
+
+Rollback_Stack_Header :: bit_field u64 {
+ prev_offset: uintptr | 32,
+ is_free: bool | 1,
+ prev_ptr: uintptr | 31,
+}
+
+Rollback_Stack_Block :: struct {
+ next_block: ^Rollback_Stack_Block,
+ last_alloc: rawptr,
+ offset: uintptr,
+ buffer: []byte,
+}
+
+Rollback_Stack :: struct {
+ head: ^Rollback_Stack_Block,
+ block_size: int,
+ block_allocator: Allocator,
+}
+
+
+@(private="file", require_results)
+rb_ptr_in_bounds :: proc(block: ^Rollback_Stack_Block, ptr: rawptr) -> bool {
+ start := raw_data(block.buffer)
+ end := start[block.offset:]
+ return start < ptr && ptr <= end
+}
+
+@(private="file", require_results)
+rb_find_ptr :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> (
+ parent: ^Rollback_Stack_Block,
+ block: ^Rollback_Stack_Block,
+ header: ^Rollback_Stack_Header,
+ err: Allocator_Error,
+) {
+ for block = stack.head; block != nil; block = block.next_block {
+ if rb_ptr_in_bounds(block, ptr) {
+ header = cast(^Rollback_Stack_Header)(cast(uintptr)ptr - size_of(Rollback_Stack_Header))
+ return
+ }
+ parent = block
+ }
+ return nil, nil, nil, .Invalid_Pointer
+}
+
+@(private="file", require_results)
+rb_find_last_alloc :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> (
+ block: ^Rollback_Stack_Block,
+ header: ^Rollback_Stack_Header,
+ ok: bool,
+) {
+ for block = stack.head; block != nil; block = block.next_block {
+ if block.last_alloc == ptr {
+ header = cast(^Rollback_Stack_Header)(cast(uintptr)ptr - size_of(Rollback_Stack_Header))
+ return block, header, true
+ }
+ }
+ return nil, nil, false
+}
+
+@(private="file")
+rb_rollback_block :: proc(block: ^Rollback_Stack_Block, header: ^Rollback_Stack_Header) {
+ header := header
+ for block.offset > 0 && header.is_free {
+ block.offset = header.prev_offset
+ block.last_alloc = raw_data(block.buffer)[header.prev_ptr:]
+ header = cast(^Rollback_Stack_Header)(raw_data(block.buffer)[header.prev_ptr - size_of(Rollback_Stack_Header):])
+ }
+}
+
+@(private="file", require_results)
+rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error {
+ parent, block, header := rb_find_ptr(stack, ptr) or_return
+ if header.is_free {
+ return .Invalid_Pointer
+ }
+ header.is_free = true
+ if block.last_alloc == ptr {
+ block.offset = header.prev_offset
+ rb_rollback_block(block, header)
+ }
+ if parent != nil && block.offset == 0 {
+ parent.next_block = block.next_block
+ runtime.mem_free_with_size(block, size_of(Rollback_Stack_Block) + len(block.buffer), stack.block_allocator)
+ }
+ return nil
+}
+
+@(private="file")
+rb_free_all :: proc(stack: ^Rollback_Stack) {
+ for block := stack.head.next_block; block != nil; /**/ {
+ next_block := block.next_block
+ runtime.mem_free_with_size(block, size_of(Rollback_Stack_Block) + len(block.buffer), stack.block_allocator)
+ block = next_block
+ }
+
+ stack.head.next_block = nil
+ stack.head.last_alloc = nil
+ stack.head.offset = 0
+}
+
+@(private="file", require_results)
+rb_resize :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment: int) -> (result: []byte, err: Allocator_Error) {
+ if ptr != nil {
+ if block, _, ok := rb_find_last_alloc(stack, ptr); ok {
+ // `block.offset` should never underflow because it is contingent
+ // on `old_size` in the first place, assuming sane arguments.
+ assert(block.offset >= cast(uintptr)old_size, "Rollback Stack Allocator received invalid `old_size`.")
+
+ if block.offset + cast(uintptr)size - cast(uintptr)old_size < cast(uintptr)len(block.buffer) {
+ // Prevent singleton allocations from fragmenting by forbidding
+ // them to shrink, removing the possibility of overflow bugs.
+ if len(block.buffer) <= stack.block_size {
+ block.offset += cast(uintptr)size - cast(uintptr)old_size
+ }
+ #no_bounds_check return (cast([^]byte)ptr)[:size], nil
+ }
+ }
+ }
+
+ result = rb_alloc(stack, size, alignment) or_return
+ runtime.mem_copy_non_overlapping(raw_data(result), ptr, old_size)
+ err = rb_free(stack, ptr)
+
+ return
+}
+
+@(private="file", require_results)
+rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byte, err: Allocator_Error) {
+ parent: ^Rollback_Stack_Block
+ for block := stack.head; /**/; block = block.next_block {
+ when !ODIN_DISABLE_ASSERT {
+ allocated_new_block: bool
+ }
+
+ if block == nil {
+ if stack.block_allocator.procedure == nil {
+ return nil, .Out_Of_Memory
+ }
+
+ minimum_size_required := size_of(Rollback_Stack_Header) + size + alignment - 1
+ new_block_size := max(minimum_size_required, stack.block_size)
+ block = rb_make_block(new_block_size, stack.block_allocator) or_return
+ parent.next_block = block
+ when !ODIN_DISABLE_ASSERT {
+ allocated_new_block = true
+ }
+ }
+
+ start := raw_data(block.buffer)[block.offset:]
+ padding := cast(uintptr)calc_padding_with_header(cast(uintptr)start, cast(uintptr)alignment, size_of(Rollback_Stack_Header))
+
+ if block.offset + padding + cast(uintptr)size > cast(uintptr)len(block.buffer) {
+ when !ODIN_DISABLE_ASSERT {
+ if allocated_new_block {
+ panic("Rollback Stack Allocator allocated a new block but did not use it.")
+ }
+ }
+ parent = block
+ continue
+ }
+
+ header := cast(^Rollback_Stack_Header)(start[padding - size_of(Rollback_Stack_Header):])
+ ptr := start[padding:]
+
+ header^ = {
+ prev_offset = block.offset,
+ prev_ptr = uintptr(0) if block.last_alloc == nil else cast(uintptr)block.last_alloc - cast(uintptr)raw_data(block.buffer),
+ is_free = false,
+ }
+
+ block.last_alloc = ptr
+ block.offset += padding + cast(uintptr)size
+
+ if len(block.buffer) > stack.block_size {
+ // This block exceeds the allocator's standard block size and is considered a singleton.
+ // Prevent any further allocations on it.
+ block.offset = cast(uintptr)len(block.buffer)
+ }
+
+ #no_bounds_check return ptr[:size], nil
+ }
+
+ return nil, .Out_Of_Memory
+}
+
+@(private="file", require_results)
+rb_make_block :: proc(size: int, allocator: Allocator) -> (block: ^Rollback_Stack_Block, err: Allocator_Error) {
+ buffer := runtime.mem_alloc(size_of(Rollback_Stack_Block) + size, align_of(Rollback_Stack_Block), allocator) or_return
+
+ block = cast(^Rollback_Stack_Block)raw_data(buffer)
+ #no_bounds_check block.buffer = buffer[size_of(Rollback_Stack_Block):]
+ return
+}
+
+
+rollback_stack_init_buffered :: proc(stack: ^Rollback_Stack, buffer: []byte, location := #caller_location) {
+ MIN_SIZE :: size_of(Rollback_Stack_Block) + size_of(Rollback_Stack_Header) + size_of(rawptr)
+ assert(len(buffer) >= MIN_SIZE, "User-provided buffer to Rollback Stack Allocator is too small.", location)
+
+ block := cast(^Rollback_Stack_Block)raw_data(buffer)
+ block^ = {}
+ #no_bounds_check block.buffer = buffer[size_of(Rollback_Stack_Block):]
+
+ stack^ = {}
+ stack.head = block
+ stack.block_size = len(block.buffer)
+}
+
+rollback_stack_init_dynamic :: proc(
+ stack: ^Rollback_Stack,
+ block_size : int = ROLLBACK_STACK_DEFAULT_BLOCK_SIZE,
+ block_allocator := context.allocator,
+ location := #caller_location,
+) -> Allocator_Error {
+ assert(block_size >= size_of(Rollback_Stack_Header) + size_of(rawptr), "Rollback Stack Allocator block size is too small.", location)
+ when size_of(int) > 4 {
+ // It's impossible to specify an argument in excess when your integer
+ // size is insufficient; check only on platforms with big enough ints.
+ assert(block_size <= ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE, "Rollback Stack Allocators cannot support head blocks larger than 2 gigabytes.", location)
+ }
+
+ block := rb_make_block(block_size, block_allocator) or_return
+
+ stack^ = {}
+ stack.head = block
+ stack.block_size = block_size
+ stack.block_allocator = block_allocator
+
+ return nil
+}
+
+rollback_stack_init :: proc {
+ rollback_stack_init_buffered,
+ rollback_stack_init_dynamic,
+}
+
+rollback_stack_destroy :: proc(stack: ^Rollback_Stack) {
+ if stack.block_allocator.procedure != nil {
+ rb_free_all(stack)
+ free(stack.head, stack.block_allocator)
+ }
+ stack^ = {}
+}
+
+@(require_results)
+rollback_stack_allocator :: proc(stack: ^Rollback_Stack) -> Allocator {
+ return Allocator {
+ data = stack,
+ procedure = rollback_stack_allocator_proc,
+ }
+}
+
+@(require_results)
+rollback_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
+ size, alignment: int,
+ old_memory: rawptr, old_size: int, location := #caller_location,
+) -> (result: []byte, err: Allocator_Error) {
+ stack := cast(^Rollback_Stack)allocator_data
+
+ switch mode {
+ case .Alloc, .Alloc_Non_Zeroed:
+ assert(size >= 0, "Size must be positive or zero.", location)
+ assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", location)
+ result = rb_alloc(stack, size, alignment) or_return
+
+ if mode == .Alloc {
+ zero_slice(result)
+ }
+
+ case .Free:
+ err = rb_free(stack, old_memory)
+
+ case .Free_All:
+ rb_free_all(stack)
+
+ case .Resize, .Resize_Non_Zeroed:
+ assert(size >= 0, "Size must be positive or zero.", location)
+ assert(old_size >= 0, "Old size must be positive or zero.", location)
+ assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", location)
+ result = rb_resize(stack, old_memory, old_size, size, alignment) or_return
+
+ #no_bounds_check if mode == .Resize && size > old_size {
+ zero_slice(result[old_size:])
+ }
+
+ case .Query_Features:
+ set := (^Allocator_Mode_Set)(old_memory)
+ if set != nil {
+ set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed}
+ }
+ return nil, nil
+
+ case .Query_Info:
+ return nil, .Mode_Not_Implemented
+ }
+
+ return
+}
diff --git a/core/mem/tlsf/LICENSE b/core/mem/tlsf/LICENSE
new file mode 100644
index 000000000..9d668ce02
--- /dev/null
+++ b/core/mem/tlsf/LICENSE
@@ -0,0 +1,36 @@
+Original BSD-3 license:
+
+Two Level Segregated Fit memory allocator, version 3.1.
+Written by Matthew Conte
+ http://tlsf.baisoku.org
+
+Based on the original documentation by Miguel Masmano:
+ http://www.gii.upv.es/tlsf/main/docs
+
+This implementation was written to the specification
+of the document, therefore no GPL restrictions apply.
+
+Copyright (c) 2006-2016, Matthew Conte
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the copyright holder nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL MATTHEW CONTE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file
diff --git a/core/mem/tlsf/tlsf.odin b/core/mem/tlsf/tlsf.odin
new file mode 100644
index 000000000..76ecbb4b1
--- /dev/null
+++ b/core/mem/tlsf/tlsf.odin
@@ -0,0 +1,156 @@
+/*
+ Copyright 2024 Jeroen van Rijn <nom@duclavier.com>.
+ Made available under Odin's BSD-3 license.
+
+ List of contributors:
+ Matt Conte: Original C implementation, see LICENSE file in this package
+ Jeroen van Rijn: Source port
+*/
+
+// package mem_tlsf implements a Two Level Segregated Fit memory allocator.
+package mem_tlsf
+
+import "base:runtime"
+
+Error :: enum byte {
+ None = 0,
+ Invalid_Backing_Allocator = 1,
+ Invalid_Alignment = 2,
+ Backing_Buffer_Too_Small = 3,
+ Backing_Buffer_Too_Large = 4,
+ Backing_Allocator_Error = 5,
+}
+
+
+Allocator :: struct {
+ // Empty lists point at this block to indicate they are free.
+ block_null: Block_Header,
+
+ // Bitmaps for free lists.
+ fl_bitmap: u32 `fmt:"-"`,
+ sl_bitmap: [FL_INDEX_COUNT]u32 `fmt:"-"`,
+
+ // Head of free lists.
+ blocks: [FL_INDEX_COUNT][SL_INDEX_COUNT]^Block_Header `fmt:"-"`,
+
+ // Keep track of pools so we can deallocate them.
+ // If `pool.allocator` is blank, we don't do anything.
+ // We also use this linked list of pools to report
+ // statistics like how much memory is still available,
+ // fragmentation, etc.
+ pool: Pool,
+}
+#assert(size_of(Allocator) % ALIGN_SIZE == 0)
+
+
+
+
+@(require_results)
+allocator :: proc(t: ^Allocator) -> runtime.Allocator {
+ return runtime.Allocator{
+ procedure = allocator_proc,
+ data = t,
+ }
+}
+
+@(require_results)
+init_from_buffer :: proc(control: ^Allocator, buf: []byte) -> Error {
+ assert(control != nil)
+ if uintptr(raw_data(buf)) % ALIGN_SIZE != 0 {
+ return .Invalid_Alignment
+ }
+
+ pool_bytes := align_down(len(buf) - POOL_OVERHEAD, ALIGN_SIZE)
+ if pool_bytes < BLOCK_SIZE_MIN {
+ return .Backing_Buffer_Too_Small
+ } else if pool_bytes > BLOCK_SIZE_MAX {
+ return .Backing_Buffer_Too_Large
+ }
+
+ clear(control)
+ return pool_add(control, buf[:])
+}
+
+@(require_results)
+init_from_allocator :: proc(control: ^Allocator, backing: runtime.Allocator, initial_pool_size: int, new_pool_size := 0) -> Error {
+ assert(control != nil)
+ pool_bytes := align_up(uint(initial_pool_size) + POOL_OVERHEAD, ALIGN_SIZE)
+ if pool_bytes < BLOCK_SIZE_MIN {
+ return .Backing_Buffer_Too_Small
+ } else if pool_bytes > BLOCK_SIZE_MAX {
+ return .Backing_Buffer_Too_Large
+ }
+
+ buf, backing_err := runtime.make_aligned([]byte, pool_bytes, ALIGN_SIZE, backing)
+ if backing_err != nil {
+ return .Backing_Allocator_Error
+ }
+ err := init_from_buffer(control, buf)
+ control.pool = Pool{
+ data = buf,
+ allocator = backing,
+ }
+ return err
+}
+init :: proc{init_from_buffer, init_from_allocator}
+
+destroy :: proc(control: ^Allocator) {
+ if control == nil { return }
+
+ // No need to call `pool_remove` or anything, as they're they're embedded in the backing memory.
+ // We do however need to free the `Pool` tracking entities and the backing memory itself.
+ // As `Allocator` is embedded in the first backing slice, the `control` pointer will be
+ // invalid after this call.
+ for p := control.pool.next; p != nil; {
+ next := p.next
+
+ // Free the allocation on the backing allocator
+ runtime.delete(p.data, p.allocator)
+ free(p, p.allocator)
+
+ p = next
+ }
+}
+
+allocator_proc :: proc(allocator_data: rawptr, mode: runtime.Allocator_Mode,
+ size, alignment: int,
+ old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, runtime.Allocator_Error) {
+
+ control := (^Allocator)(allocator_data)
+ if control == nil {
+ return nil, .Invalid_Argument
+ }
+
+ switch mode {
+ case .Alloc:
+ return alloc_bytes(control, uint(size), uint(alignment))
+ case .Alloc_Non_Zeroed:
+ return alloc_bytes_non_zeroed(control, uint(size), uint(alignment))
+
+ case .Free:
+ free_with_size(control, old_memory, uint(old_size))
+ return nil, nil
+
+ case .Free_All:
+ clear(control)
+ return nil, nil
+
+ case .Resize:
+ return resize(control, old_memory, uint(old_size), uint(size), uint(alignment))
+
+ case .Resize_Non_Zeroed:
+ return resize_non_zeroed(control, old_memory, uint(old_size), uint(size), uint(alignment))
+
+ case .Query_Features:
+ set := (^runtime.Allocator_Mode_Set)(old_memory)
+ if set != nil {
+ set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
+ }
+ return nil, nil
+
+ case .Query_Info:
+ return nil, .Mode_Not_Implemented
+ }
+
+ return nil, nil
+} \ No newline at end of file
diff --git a/core/mem/tlsf/tlsf_internal.odin b/core/mem/tlsf/tlsf_internal.odin
new file mode 100644
index 000000000..6f33e516c
--- /dev/null
+++ b/core/mem/tlsf/tlsf_internal.odin
@@ -0,0 +1,738 @@
+/*
+ Copyright 2024 Jeroen van Rijn <nom@duclavier.com>.
+ Made available under Odin's BSD-3 license.
+
+ List of contributors:
+ Matt Conte: Original C implementation, see LICENSE file in this package
+ Jeroen van Rijn: Source port
+*/
+
+
+package mem_tlsf
+
+import "base:intrinsics"
+import "base:runtime"
+// import "core:fmt"
+
+// log2 of number of linear subdivisions of block sizes.
+// Larger values require more memory in the control structure.
+// Values of 4 or 5 are typical.
+TLSF_SL_INDEX_COUNT_LOG2 :: #config(TLSF_SL_INDEX_COUNT_LOG2, 5)
+
+// All allocation sizes and addresses are aligned to 4/8 bytes
+ALIGN_SIZE_LOG2 :: 3 when size_of(uintptr) == 8 else 2
+
+// We can increase this to support larger allocation sizes,
+// at the expense of more overhead in the TLSF structure
+FL_INDEX_MAX :: 32 when size_of(uintptr) == 8 else 30
+#assert(FL_INDEX_MAX < 36)
+
+ALIGN_SIZE :: 1 << ALIGN_SIZE_LOG2
+SL_INDEX_COUNT :: 1 << TLSF_SL_INDEX_COUNT_LOG2
+FL_INDEX_SHIFT :: TLSF_SL_INDEX_COUNT_LOG2 + ALIGN_SIZE_LOG2
+FL_INDEX_COUNT :: FL_INDEX_MAX - FL_INDEX_SHIFT + 1
+SMALL_BLOCK_SIZE :: 1 << FL_INDEX_SHIFT
+
+/*
+We support allocations of sizes up to (1 << `FL_INDEX_MAX`) bits.
+However, because we linearly subdivide the second-level lists, and
+our minimum size granularity is 4 bytes, it doesn't make sense to
+create first-level lists for sizes smaller than `SL_INDEX_COUNT` * 4,
+or (1 << (`TLSF_SL_INDEX_COUNT_LOG2` + 2)) bytes, as there we will be
+trying to split size ranges into more slots than we have available.
+Instead, we calculate the minimum threshold size, and place all
+blocks below that size into the 0th first-level list.
+*/
+
+// SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage tree
+#assert(size_of(uint) * 8 >= SL_INDEX_COUNT)
+
+// Ensure we've properly tuned our sizes.
+#assert(ALIGN_SIZE == SMALL_BLOCK_SIZE / SL_INDEX_COUNT)
+
+#assert(size_of(Allocator) % ALIGN_SIZE == 0)
+
+Pool :: struct {
+ data: []u8 `fmt:"-"`,
+ allocator: runtime.Allocator,
+ next: ^Pool,
+}
+
+
+/*
+Block header structure.
+
+There are several implementation subtleties involved:
+- The `prev_phys_block` field is only valid if the previous block is free.
+- The `prev_phys_block` field is actually stored at the end of the
+ previous block. It appears at the beginning of this structure only to
+ simplify the implementation.
+- The `next_free` / `prev_free` fields are only valid if the block is free.
+*/
+Block_Header :: struct {
+ prev_phys_block: ^Block_Header,
+ size: uint, // The size of this block, excluding the block header
+
+ // Next and previous free blocks.
+ next_free: ^Block_Header,
+ prev_free: ^Block_Header,
+}
+#assert(offset_of(Block_Header, prev_phys_block) == 0)
+
+/*
+Since block sizes are always at least a multiple of 4, the two least
+significant bits of the size field are used to store the block status:
+- bit 0: whether block is busy or free
+- bit 1: whether previous block is busy or free
+*/
+BLOCK_HEADER_FREE :: uint(1 << 0)
+BLOCK_HEADER_PREV_FREE :: uint(1 << 1)
+
+/*
+The size of the block header exposed to used blocks is the `size` field.
+The `prev_phys_block` field is stored *inside* the previous free block.
+*/
+BLOCK_HEADER_OVERHEAD :: uint(size_of(uint))
+
+POOL_OVERHEAD :: 2 * BLOCK_HEADER_OVERHEAD
+
+// User data starts directly after the size field in a used block.
+BLOCK_START_OFFSET :: offset_of(Block_Header, size) + size_of(Block_Header{}.size)
+
+/*
+A free block must be large enough to store its header minus the size of
+the `prev_phys_block` field, and no larger than the number of addressable
+bits for `FL_INDEX`.
+*/
+BLOCK_SIZE_MIN :: uint(size_of(Block_Header) - size_of(^Block_Header))
+BLOCK_SIZE_MAX :: uint(1) << FL_INDEX_MAX
+
+/*
+ TLSF achieves O(1) cost for `alloc` and `free` operations by limiting
+ the search for a free block to a free list of guaranteed size
+ adequate to fulfill the request, combined with efficient free list
+ queries using bitmasks and architecture-specific bit-manipulation
+ routines.
+
+ NOTE: TLSF spec relies on ffs/fls returning value 0..31.
+*/
+
+@(require_results)
+ffs :: proc "contextless" (word: u32) -> (bit: i32) {
+ return -1 if word == 0 else i32(intrinsics.count_trailing_zeros(word))
+}
+
+@(require_results)
+fls :: proc "contextless" (word: u32) -> (bit: i32) {
+ N :: (size_of(u32) * 8) - 1
+ return i32(N - intrinsics.count_leading_zeros(word))
+}
+
+@(require_results)
+fls_uint :: proc "contextless" (size: uint) -> (bit: i32) {
+ N :: (size_of(uint) * 8) - 1
+ return i32(N - intrinsics.count_leading_zeros(size))
+}
+
+@(require_results)
+block_size :: proc "contextless" (block: ^Block_Header) -> (size: uint) {
+ return block.size &~ (BLOCK_HEADER_FREE | BLOCK_HEADER_PREV_FREE)
+}
+
+block_set_size :: proc "contextless" (block: ^Block_Header, size: uint) {
+ old_size := block.size
+ block.size = size | (old_size & (BLOCK_HEADER_FREE | BLOCK_HEADER_PREV_FREE))
+}
+
+@(require_results)
+block_is_last :: proc "contextless" (block: ^Block_Header) -> (is_last: bool) {
+ return block_size(block) == 0
+}
+
+@(require_results)
+block_is_free :: proc "contextless" (block: ^Block_Header) -> (is_free: bool) {
+ return (block.size & BLOCK_HEADER_FREE) == BLOCK_HEADER_FREE
+}
+
+block_set_free :: proc "contextless" (block: ^Block_Header) {
+ block.size |= BLOCK_HEADER_FREE
+}
+
+block_set_used :: proc "contextless" (block: ^Block_Header) {
+ block.size &~= BLOCK_HEADER_FREE
+}
+
+@(require_results)
+block_is_prev_free :: proc "contextless" (block: ^Block_Header) -> (is_prev_free: bool) {
+ return (block.size & BLOCK_HEADER_PREV_FREE) == BLOCK_HEADER_PREV_FREE
+}
+
+block_set_prev_free :: proc "contextless" (block: ^Block_Header) {
+ block.size |= BLOCK_HEADER_PREV_FREE
+}
+
+block_set_prev_used :: proc "contextless" (block: ^Block_Header) {
+ block.size &~= BLOCK_HEADER_PREV_FREE
+}
+
+@(require_results)
+block_from_ptr :: proc(ptr: rawptr) -> (block_ptr: ^Block_Header) {
+ return (^Block_Header)(uintptr(ptr) - BLOCK_START_OFFSET)
+}
+
+@(require_results)
+block_to_ptr :: proc(block: ^Block_Header) -> (ptr: rawptr) {
+ return rawptr(uintptr(block) + BLOCK_START_OFFSET)
+}
+
+// Return location of next block after block of given size.
+@(require_results)
+offset_to_block :: proc(ptr: rawptr, size: uint) -> (block: ^Block_Header) {
+ return (^Block_Header)(uintptr(ptr) + uintptr(size))
+}
+
+@(require_results)
+offset_to_block_backwards :: proc(ptr: rawptr, size: uint) -> (block: ^Block_Header) {
+ return (^Block_Header)(uintptr(ptr) - uintptr(size))
+}
+
+// Return location of previous block.
+@(require_results)
+block_prev :: proc(block: ^Block_Header) -> (prev: ^Block_Header) {
+ assert(block_is_prev_free(block), "previous block must be free")
+ return block.prev_phys_block
+}
+
+// Return location of next existing block.
+@(require_results)
+block_next :: proc(block: ^Block_Header) -> (next: ^Block_Header) {
+ return offset_to_block(block_to_ptr(block), block_size(block) - BLOCK_HEADER_OVERHEAD)
+}
+
+// Link a new block with its physical neighbor, return the neighbor.
+@(require_results)
+block_link_next :: proc(block: ^Block_Header) -> (next: ^Block_Header) {
+ next = block_next(block)
+ next.prev_phys_block = block
+ return
+}
+
+block_mark_as_free :: proc(block: ^Block_Header) {
+ // Link the block to the next block, first.
+ next := block_link_next(block)
+ block_set_prev_free(next)
+ block_set_free(block)
+}
+
+block_mark_as_used :: proc(block: ^Block_Header) {
+ next := block_next(block)
+ block_set_prev_used(next)
+ block_set_used(block)
+}
+
+@(require_results)
+align_up :: proc(x, align: uint) -> (aligned: uint) {
+ assert(0 == (align & (align - 1)), "must align to a power of two")
+ return (x + (align - 1)) &~ (align - 1)
+}
+
+@(require_results)
+align_down :: proc(x, align: uint) -> (aligned: uint) {
+ assert(0 == (align & (align - 1)), "must align to a power of two")
+ return x - (x & (align - 1))
+}
+
+@(require_results)
+align_ptr :: proc(ptr: rawptr, align: uint) -> (aligned: rawptr) {
+ assert(0 == (align & (align - 1)), "must align to a power of two")
+ align_mask := uintptr(align) - 1
+ _ptr := uintptr(ptr)
+ _aligned := (_ptr + align_mask) &~ (align_mask)
+ return rawptr(_aligned)
+}
+
+// Adjust an allocation size to be aligned to word size, and no smaller than internal minimum.
+@(require_results)
+adjust_request_size :: proc(size, align: uint) -> (adjusted: uint) {
+ if size == 0 {
+ return 0
+ }
+
+ // aligned size must not exceed `BLOCK_SIZE_MAX`, or we'll go out of bounds on `sl_bitmap`.
+ if aligned := align_up(size, align); aligned < BLOCK_SIZE_MAX {
+ adjusted = min(aligned, BLOCK_SIZE_MAX)
+ }
+ return
+}
+
+// Adjust an allocation size to be aligned to word size, and no smaller than internal minimum.
+@(require_results)
+adjust_request_size_with_err :: proc(size, align: uint) -> (adjusted: uint, err: runtime.Allocator_Error) {
+ if size == 0 {
+ return 0, nil
+ }
+
+ // aligned size must not exceed `BLOCK_SIZE_MAX`, or we'll go out of bounds on `sl_bitmap`.
+ if aligned := align_up(size, align); aligned < BLOCK_SIZE_MAX {
+ adjusted = min(aligned, BLOCK_SIZE_MAX)
+ } else {
+ err = .Out_Of_Memory
+ }
+ return
+}
+
+// TLSF utility functions. In most cases these are direct translations of
+// the documentation in the research paper.
+
+@(optimization_mode="speed", require_results)
+mapping_insert :: proc(size: uint) -> (fl, sl: i32) {
+ if size < SMALL_BLOCK_SIZE {
+ // Store small blocks in first list.
+ sl = i32(size) / (SMALL_BLOCK_SIZE / SL_INDEX_COUNT)
+ } else {
+ fl = fls_uint(size)
+ sl = i32(size >> (uint(fl) - TLSF_SL_INDEX_COUNT_LOG2)) ~ (1 << TLSF_SL_INDEX_COUNT_LOG2)
+ fl -= (FL_INDEX_SHIFT - 1)
+ }
+ return
+}
+
+@(optimization_mode="speed", require_results)
+mapping_round :: #force_inline proc(size: uint) -> (rounded: uint) {
+ rounded = size
+ if size >= SMALL_BLOCK_SIZE {
+ round := uint(1 << (uint(fls_uint(size) - TLSF_SL_INDEX_COUNT_LOG2))) - 1
+ rounded += round
+ }
+ return
+}
+
+// This version rounds up to the next block size (for allocations)
+@(optimization_mode="speed", require_results)
+mapping_search :: proc(size: uint) -> (fl, sl: i32) {
+ return mapping_insert(mapping_round(size))
+}
+
+@(require_results)
+search_suitable_block :: proc(control: ^Allocator, fli, sli: ^i32) -> (block: ^Block_Header) {
+ // First, search for a block in the list associated with the given fl/sl index.
+ fl := fli^; sl := sli^
+
+ sl_map := control.sl_bitmap[fli^] & (~u32(0) << uint(sl))
+ if sl_map == 0 {
+ // No block exists. Search in the next largest first-level list.
+ fl_map := control.fl_bitmap & (~u32(0) << uint(fl + 1))
+ if fl_map == 0 {
+ // No free blocks available, memory has been exhausted.
+ return {}
+ }
+
+ fl = ffs(fl_map)
+ fli^ = fl
+ sl_map = control.sl_bitmap[fl]
+ }
+ assert(sl_map != 0, "internal error - second level bitmap is null")
+ sl = ffs(sl_map)
+ sli^ = sl
+
+ // Return the first block in the free list.
+ return control.blocks[fl][sl]
+}
+
+// Remove a free block from the free list.
+remove_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl: i32) {
+ prev := block.prev_free
+ next := block.next_free
+ assert(prev != nil, "prev_free can not be nil")
+ assert(next != nil, "next_free can not be nil")
+ next.prev_free = prev
+ prev.next_free = next
+
+ // If this block is the head of the free list, set new head.
+ if control.blocks[fl][sl] == block {
+ control.blocks[fl][sl] = next
+
+ // If the new head is nil, clear the bitmap
+ if next == &control.block_null {
+ control.sl_bitmap[fl] &~= (u32(1) << uint(sl))
+
+ // If the second bitmap is now empty, clear the fl bitmap
+ if control.sl_bitmap[fl] == 0 {
+ control.fl_bitmap &~= (u32(1) << uint(fl))
+ }
+ }
+ }
+}
+
+// Insert a free block into the free block list.
+insert_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl: i32) {
+ current := control.blocks[fl][sl]
+ assert(current != nil, "free lists cannot have a nil entry")
+ assert(block != nil, "cannot insert a nil entry into the free list")
+ block.next_free = current
+ block.prev_free = &control.block_null
+ current.prev_free = block
+
+ assert(block_to_ptr(block) == align_ptr(block_to_ptr(block), ALIGN_SIZE), "block not properly aligned")
+
+ // Insert the new block at the head of the list, and mark the first- and second-level bitmaps appropriately.
+ control.blocks[fl][sl] = block
+ control.fl_bitmap |= (u32(1) << uint(fl))
+ control.sl_bitmap[fl] |= (u32(1) << uint(sl))
+}
+
+// Remove a given block from the free list.
+block_remove :: proc(control: ^Allocator, block: ^Block_Header) {
+ fl, sl := mapping_insert(block_size(block))
+ remove_free_block(control, block, fl, sl)
+}
+
+// Insert a given block into the free list.
+block_insert :: proc(control: ^Allocator, block: ^Block_Header) {
+ fl, sl := mapping_insert(block_size(block))
+ insert_free_block(control, block, fl, sl)
+}
+
+@(require_results)
+block_can_split :: proc(block: ^Block_Header, size: uint) -> (can_split: bool) {
+ return block_size(block) >= size_of(Block_Header) + size
+}
+
+// Split a block into two, the second of which is free.
+@(require_results)
+block_split :: proc(block: ^Block_Header, size: uint) -> (remaining: ^Block_Header) {
+ // Calculate the amount of space left in the remaining block.
+ remaining = offset_to_block(block_to_ptr(block), size - BLOCK_HEADER_OVERHEAD)
+
+ remain_size := block_size(block) - (size + BLOCK_HEADER_OVERHEAD)
+
+ assert(block_to_ptr(remaining) == align_ptr(block_to_ptr(remaining), ALIGN_SIZE),
+ "remaining block not aligned properly")
+
+ assert(block_size(block) == remain_size + size + BLOCK_HEADER_OVERHEAD)
+ block_set_size(remaining, remain_size)
+ assert(block_size(remaining) >= BLOCK_SIZE_MIN, "block split with invalid size")
+
+ block_set_size(block, size)
+ block_mark_as_free(remaining)
+
+ return remaining
+}
+
+// Absorb a free block's storage into an adjacent previous free block.
+@(require_results)
+block_absorb :: proc(prev: ^Block_Header, block: ^Block_Header) -> (absorbed: ^Block_Header) {
+ assert(!block_is_last(prev), "previous block can't be last")
+ // Note: Leaves flags untouched.
+ prev.size += block_size(block) + BLOCK_HEADER_OVERHEAD
+ _ = block_link_next(prev)
+ return prev
+}
+
+// Merge a just-freed block with an adjacent previous free block.
+@(require_results)
+block_merge_prev :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: ^Block_Header) {
+ merged = block
+ if (block_is_prev_free(block)) {
+ prev := block_prev(block)
+ assert(prev != nil, "prev physical block can't be nil")
+ assert(block_is_free(prev), "prev block is not free though marked as such")
+ block_remove(control, prev)
+ merged = block_absorb(prev, block)
+ }
+ return merged
+}
+
+// Merge a just-freed block with an adjacent free block.
+@(require_results)
+block_merge_next :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: ^Block_Header) {
+ merged = block
+ next := block_next(block)
+ assert(next != nil, "next physical block can't be nil")
+
+ if (block_is_free(next)) {
+ assert(!block_is_last(block), "previous block can't be last")
+ block_remove(control, next)
+ merged = block_absorb(block, next)
+ }
+ return merged
+}
+
+// Trim any trailing block space off the end of a free block, return to pool.
+block_trim_free :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
+ assert(block_is_free(block), "block must be free")
+ if (block_can_split(block, size)) {
+ remaining_block := block_split(block, size)
+ _ = block_link_next(block)
+ block_set_prev_free(remaining_block)
+ block_insert(control, remaining_block)
+ }
+}
+
+// Trim any trailing block space off the end of a used block, return to pool.
+block_trim_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
+ assert(!block_is_free(block), "Block must be used")
+ if (block_can_split(block, size)) {
+ // If the next block is free, we must coalesce.
+ remaining_block := block_split(block, size)
+ block_set_prev_used(remaining_block)
+
+ remaining_block = block_merge_next(control, remaining_block)
+ block_insert(control, remaining_block)
+ }
+}
+
+// Trim leading block space, return to pool.
+@(require_results)
+block_trim_free_leading :: proc(control: ^Allocator, block: ^Block_Header, size: uint) -> (remaining: ^Block_Header) {
+ remaining = block
+ if block_can_split(block, size) {
+ // We want the 2nd block.
+ remaining = block_split(block, size - BLOCK_HEADER_OVERHEAD)
+ block_set_prev_free(remaining)
+
+ _ = block_link_next(block)
+ block_insert(control, block)
+ }
+ return remaining
+}
+
+@(require_results)
+block_locate_free :: proc(control: ^Allocator, size: uint) -> (block: ^Block_Header) {
+ fl, sl: i32
+ if size != 0 {
+ fl, sl = mapping_search(size)
+
+ /*
+ `mapping_search` can futz with the size, so for excessively large sizes it can sometimes wind up
+ with indices that are off the end of the block array. So, we protect against that here,
+ since this is the only call site of `mapping_search`. Note that we don't need to check `sl`,
+ as it comes from a modulo operation that guarantees it's always in range.
+ */
+ if fl < FL_INDEX_COUNT {
+ block = search_suitable_block(control, &fl, &sl)
+ }
+ }
+
+ if block != nil {
+ assert(block_size(block) >= size)
+ remove_free_block(control, block, fl, sl)
+ }
+ return block
+}
+
+@(require_results)
+block_prepare_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) -> (res: []byte, err: runtime.Allocator_Error) {
+ if block != nil {
+ assert(size != 0, "Size must be non-zero")
+ block_trim_free(control, block, size)
+ block_mark_as_used(block)
+ res = ([^]byte)(block_to_ptr(block))[:size]
+ }
+ return
+}
+
+// Clear control structure and point all empty lists at the null block
+clear :: proc(control: ^Allocator) {
+ control.block_null.next_free = &control.block_null
+ control.block_null.prev_free = &control.block_null
+
+ control.fl_bitmap = 0
+ for i in 0..<FL_INDEX_COUNT {
+ control.sl_bitmap[i] = 0
+ for j in 0..<SL_INDEX_COUNT {
+ control.blocks[i][j] = &control.block_null
+ }
+ }
+}
+
+@(require_results)
+pool_add :: proc(control: ^Allocator, pool: []u8) -> (err: Error) {
+ assert(uintptr(raw_data(pool)) % ALIGN_SIZE == 0, "Added memory must be aligned")
+
+ pool_overhead := POOL_OVERHEAD
+ pool_bytes := align_down(len(pool) - pool_overhead, ALIGN_SIZE)
+
+ if pool_bytes < BLOCK_SIZE_MIN {
+ return .Backing_Buffer_Too_Small
+ } else if pool_bytes > BLOCK_SIZE_MAX {
+ return .Backing_Buffer_Too_Large
+ }
+
+ // Create the main free block. Offset the start of the block slightly,
+ // so that the `prev_phys_block` field falls outside of the pool -
+ // it will never be used.
+ block := offset_to_block_backwards(raw_data(pool), BLOCK_HEADER_OVERHEAD)
+
+ block_set_size(block, pool_bytes)
+ block_set_free(block)
+ block_set_prev_used(block)
+ block_insert(control, block)
+
+ // Split the block to create a zero-size sentinel block
+ next := block_link_next(block)
+ block_set_size(next, 0)
+ block_set_used(next)
+ block_set_prev_free(next)
+ return
+}
+
+pool_remove :: proc(control: ^Allocator, pool: []u8) {
+ block := offset_to_block_backwards(raw_data(pool), BLOCK_HEADER_OVERHEAD)
+
+ assert(block_is_free(block), "Block should be free")
+ assert(!block_is_free(block_next(block)), "Next block should not be free")
+ assert(block_size(block_next(block)) == 0, "Next block size should be zero")
+
+ fl, sl := mapping_insert(block_size(block))
+ remove_free_block(control, block, fl, sl)
+}
+
+@(require_results)
+alloc_bytes_non_zeroed :: proc(control: ^Allocator, size: uint, align: uint) -> (res: []byte, err: runtime.Allocator_Error) {
+ assert(control != nil)
+ adjust := adjust_request_size(size, ALIGN_SIZE)
+
+ GAP_MINIMUM :: size_of(Block_Header)
+ size_with_gap := adjust_request_size(adjust + align + GAP_MINIMUM, align)
+
+ aligned_size := size_with_gap if adjust != 0 && align > ALIGN_SIZE else adjust
+ if aligned_size == 0 && size > 0 {
+ return nil, .Out_Of_Memory
+ }
+
+ block := block_locate_free(control, aligned_size)
+ if block == nil {
+ return nil, .Out_Of_Memory
+ }
+ ptr := block_to_ptr(block)
+ aligned := align_ptr(ptr, align)
+ gap := uint(int(uintptr(aligned)) - int(uintptr(ptr)))
+
+ if gap != 0 && gap < GAP_MINIMUM {
+ gap_remain := GAP_MINIMUM - gap
+ offset := uintptr(max(gap_remain, align))
+ next_aligned := rawptr(uintptr(aligned) + offset)
+
+ aligned = align_ptr(next_aligned, align)
+
+ gap = uint(int(uintptr(aligned)) - int(uintptr(ptr)))
+ }
+
+ if gap != 0 {
+ assert(gap >= GAP_MINIMUM, "gap size too small")
+ block = block_trim_free_leading(control, block, gap)
+ }
+
+ return block_prepare_used(control, block, adjust)
+}
+
+@(require_results)
+alloc_bytes :: proc(control: ^Allocator, size: uint, align: uint) -> (res: []byte, err: runtime.Allocator_Error) {
+ res, err = alloc_bytes_non_zeroed(control, size, align)
+ if err != nil {
+ intrinsics.mem_zero(raw_data(res), len(res))
+ }
+ return
+}
+
+
+free_with_size :: proc(control: ^Allocator, ptr: rawptr, size: uint) {
+ assert(control != nil)
+ // `size` is currently ignored
+ if ptr == nil {
+ return
+ }
+
+ block := block_from_ptr(ptr)
+ assert(!block_is_free(block), "block already marked as free") // double free
+ block_mark_as_free(block)
+ block = block_merge_prev(control, block)
+ block = block_merge_next(control, block)
+ block_insert(control, block)
+}
+
+
+@(require_results)
+resize :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size: uint, alignment: uint) -> (res: []byte, err: runtime.Allocator_Error) {
+ assert(control != nil)
+ if ptr != nil && new_size == 0 {
+ free_with_size(control, ptr, old_size)
+ return
+ } else if ptr == nil {
+ return alloc_bytes(control, new_size, alignment)
+ }
+
+ block := block_from_ptr(ptr)
+ next := block_next(block)
+
+ curr_size := block_size(block)
+ combined := curr_size + block_size(next) + BLOCK_HEADER_OVERHEAD
+ adjust := adjust_request_size(new_size, max(ALIGN_SIZE, alignment))
+
+ assert(!block_is_free(block), "block already marked as free") // double free
+
+ min_size := min(curr_size, new_size, old_size)
+
+ if adjust > curr_size && (!block_is_free(next) || adjust > combined) {
+ res = alloc_bytes(control, new_size, alignment) or_return
+ if res != nil {
+ copy(res, ([^]byte)(ptr)[:min_size])
+ free_with_size(control, ptr, curr_size)
+ }
+ return
+ }
+ if adjust > curr_size {
+ _ = block_merge_next(control, block)
+ block_mark_as_used(block)
+ }
+
+ block_trim_used(control, block, adjust)
+ res = ([^]byte)(ptr)[:new_size]
+
+ if min_size < new_size {
+ to_zero := ([^]byte)(ptr)[min_size:new_size]
+ runtime.mem_zero(raw_data(to_zero), len(to_zero))
+ }
+ return
+}
+
+@(require_results)
+resize_non_zeroed :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size: uint, alignment: uint) -> (res: []byte, err: runtime.Allocator_Error) {
+ assert(control != nil)
+ if ptr != nil && new_size == 0 {
+ free_with_size(control, ptr, old_size)
+ return
+ } else if ptr == nil {
+ return alloc_bytes_non_zeroed(control, new_size, alignment)
+ }
+
+ block := block_from_ptr(ptr)
+ next := block_next(block)
+
+ curr_size := block_size(block)
+ combined := curr_size + block_size(next) + BLOCK_HEADER_OVERHEAD
+ adjust := adjust_request_size(new_size, max(ALIGN_SIZE, alignment))
+
+ assert(!block_is_free(block), "block already marked as free") // double free
+
+ min_size := min(curr_size, new_size, old_size)
+
+ if adjust > curr_size && (!block_is_free(next) || adjust > combined) {
+ res = alloc_bytes_non_zeroed(control, new_size, alignment) or_return
+ if res != nil {
+ copy(res, ([^]byte)(ptr)[:min_size])
+ free_with_size(control, ptr, old_size)
+ }
+ return
+ }
+
+ if adjust > curr_size {
+ _ = block_merge_next(control, block)
+ block_mark_as_used(block)
+ }
+
+ block_trim_used(control, block, adjust)
+ res = ([^]byte)(ptr)[:new_size]
+ return
+} \ No newline at end of file
diff --git a/core/mem/tracking_allocator.odin b/core/mem/tracking_allocator.odin
index bc624617d..1b57e5fb4 100644
--- a/core/mem/tracking_allocator.odin
+++ b/core/mem/tracking_allocator.odin
@@ -47,6 +47,7 @@ tracking_allocator_destroy :: proc(t: ^Tracking_Allocator) {
}
+// Clear only the current allocation data while keeping the totals intact.
tracking_allocator_clear :: proc(t: ^Tracking_Allocator) {
sync.mutex_lock(&t.mutex)
clear(&t.allocation_map)
@@ -55,6 +56,19 @@ tracking_allocator_clear :: proc(t: ^Tracking_Allocator) {
sync.mutex_unlock(&t.mutex)
}
+// Reset all of a Tracking Allocator's allocation data back to zero.
+tracking_allocator_reset :: proc(t: ^Tracking_Allocator) {
+ sync.mutex_lock(&t.mutex)
+ clear(&t.allocation_map)
+ clear(&t.bad_free_array)
+ t.total_memory_allocated = 0
+ t.total_allocation_count = 0
+ t.total_memory_freed = 0
+ t.total_free_count = 0
+ t.peak_memory_allocated = 0
+ t.current_memory_allocated = 0
+ sync.mutex_unlock(&t.mutex)
+}
@(require_results)
tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator {
diff --git a/core/odin/ast/ast.odin b/core/odin/ast/ast.odin
index be541befa..229f03d3d 100644
--- a/core/odin/ast/ast.odin
+++ b/core/odin/ast/ast.odin
@@ -538,7 +538,7 @@ Foreign_Import_Decl :: struct {
import_tok: tokenizer.Token,
name: ^Ident,
collection_name: string,
- fullpaths: []string,
+ fullpaths: []^Expr,
comment: ^Comment_Group,
}
@@ -753,7 +753,7 @@ Array_Type :: struct {
using node: Expr,
open: tokenizer.Pos,
tag: ^Expr,
- len: ^Expr, // Ellipsis node for [?]T arrray types, nil for slice types
+ len: ^Expr, // Ellipsis node for [?]T array types, nil for slice types
close: tokenizer.Pos,
elem: ^Expr,
}
diff --git a/core/odin/ast/clone.odin b/core/odin/ast/clone.odin
index bca740dd4..b0a1673b2 100644
--- a/core/odin/ast/clone.odin
+++ b/core/odin/ast/clone.odin
@@ -278,7 +278,9 @@ clone_node :: proc(node: ^Node) -> ^Node {
r.foreign_library = clone(r.foreign_library)
r.body = clone(r.body)
case ^Foreign_Import_Decl:
+ r.attributes = clone_dynamic_array(r.attributes)
r.name = auto_cast clone(r.name)
+ r.fullpaths = clone_array(r.fullpaths)
case ^Proc_Group:
r.args = clone(r.args)
case ^Attribute:
diff --git a/core/odin/ast/walk.odin b/core/odin/ast/walk.odin
index 63107a2e2..7304f237c 100644
--- a/core/odin/ast/walk.odin
+++ b/core/odin/ast/walk.odin
@@ -320,6 +320,7 @@ walk :: proc(v: ^Visitor, node: ^Node) {
if n.comment != nil {
walk(v, n.comment)
}
+ walk_expr_list(v, n.fullpaths)
case ^Proc_Group:
walk_expr_list(v, n.args)
diff --git a/core/odin/parser/parser.odin b/core/odin/parser/parser.odin
index e32fbdced..6b0aa2888 100644
--- a/core/odin/parser/parser.odin
+++ b/core/odin/parser/parser.odin
@@ -1190,12 +1190,12 @@ parse_foreign_decl :: proc(p: ^Parser) -> ^ast.Decl {
error(p, name.pos, "illegal foreign import name: '_'")
}
- fullpaths: [dynamic]string
+ fullpaths: [dynamic]^ast.Expr
if allow_token(p, .Open_Brace) {
for p.curr_tok.kind != .Close_Brace &&
p.curr_tok.kind != .EOF {
- path := expect_token(p, .String)
- append(&fullpaths, path.text)
+ path := parse_expr(p, false)
+ append(&fullpaths, path)
allow_token(p, .Comma) or_break
}
@@ -1203,7 +1203,9 @@ parse_foreign_decl :: proc(p: ^Parser) -> ^ast.Decl {
} else {
path := expect_token(p, .String)
reserve(&fullpaths, 1)
- append(&fullpaths, path.text)
+ bl := ast.new(ast.Basic_Lit, path.pos, end_pos(path))
+ bl.tok = path
+ append(&fullpaths, bl)
}
if len(fullpaths) == 0 {
@@ -1453,7 +1455,7 @@ parse_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
case "unroll":
return parse_unrolled_for_loop(p, tag)
case "reverse":
- stmt := parse_for_stmt(p)
+ stmt := parse_stmt(p)
if range, is_range := stmt.derived.(^ast.Range_Stmt); is_range {
if range.reverse {
@@ -3513,6 +3515,25 @@ parse_simple_stmt :: proc(p: ^Parser, flags: Stmt_Allow_Flags) -> ^ast.Stmt {
case op.kind == .Colon:
expect_token_after(p, .Colon, "identifier list")
if .Label in flags && len(lhs) == 1 {
+ is_partial := false
+ is_reverse := false
+
+ partial_token: tokenizer.Token
+ if p.curr_tok.kind == .Hash {
+ name := peek_token(p)
+ if name.kind == .Ident && name.text == "partial" &&
+ peek_token(p, 1).kind == .Switch {
+ partial_token = expect_token(p, .Hash)
+ expect_token(p, .Ident)
+ is_partial = true
+ } else if name.kind == .Ident && name.text == "reverse" &&
+ peek_token(p, 1).kind == .For {
+ partial_token = expect_token(p, .Hash)
+ expect_token(p, .Ident)
+ is_reverse = true
+ }
+ }
+
#partial switch p.curr_tok.kind {
case .Open_Brace, .If, .For, .Switch:
label := lhs[0]
@@ -3527,6 +3548,22 @@ parse_simple_stmt :: proc(p: ^Parser, flags: Stmt_Allow_Flags) -> ^ast.Stmt {
case ^ast.Type_Switch_Stmt: n.label = label
case ^ast.Range_Stmt: n.label = label
}
+
+ if is_partial {
+ #partial switch n in stmt.derived_stmt {
+ case ^ast.Switch_Stmt: n.partial = true
+ case ^ast.Type_Switch_Stmt: n.partial = true
+ case:
+ error(p, partial_token.pos, "incorrect use of directive, use '%s: #partial switch'", partial_token.text)
+ }
+ }
+ if is_reverse {
+ #partial switch n in stmt.derived_stmt {
+ case ^ast.Range_Stmt: n.reverse = true
+ case:
+ error(p, partial_token.pos, "incorrect use of directive, use '%s: #reverse for'", partial_token.text)
+ }
+ }
}
return stmt
diff --git a/core/os/dir_windows.odin b/core/os/dir_windows.odin
index 491507313..9ca78948e 100644
--- a/core/os/dir_windows.odin
+++ b/core/os/dir_windows.odin
@@ -87,8 +87,12 @@ read_dir :: proc(fd: Handle, n: int, allocator := context.allocator) -> (fi: []F
find_data := &win32.WIN32_FIND_DATAW{}
find_handle := win32.FindFirstFileW(raw_data(wpath_search), find_data)
+ if find_handle == win32.INVALID_HANDLE_VALUE {
+ err = Errno(win32.GetLastError())
+ return dfi[:], err
+ }
defer win32.FindClose(find_handle)
- for n != 0 && find_handle != nil {
+ for n != 0 {
fi: File_Info
fi = find_data_to_file_info(path, find_data)
if fi.name != "" {
diff --git a/core/os/os2/internal_util.odin b/core/os/os2/internal_util.odin
index 59d845350..e26cf7439 100644
--- a/core/os/os2/internal_util.odin
+++ b/core/os/os2/internal_util.odin
@@ -111,7 +111,7 @@ next_random :: proc(r: ^[2]u64) -> u64 {
@(require_results)
random_string :: proc(buf: []byte) -> string {
- @static digits := "0123456789"
+ @(static, rodata) digits := "0123456789"
u := next_random(&random_string_seed)
diff --git a/core/os/os_darwin.odin b/core/os/os_darwin.odin
index a688e1ac3..877a90bf1 100644
--- a/core/os/os_darwin.odin
+++ b/core/os/os_darwin.odin
@@ -442,7 +442,7 @@ F_GETPATH :: 50 // return the full path of the fd
foreign libc {
@(link_name="__error") __error :: proc() -> ^c.int ---
- @(link_name="open") _unix_open :: proc(path: cstring, flags: i32, mode: u16) -> Handle ---
+ @(link_name="open") _unix_open :: proc(path: cstring, flags: i32, #c_vararg args: ..any) -> Handle ---
@(link_name="close") _unix_close :: proc(handle: Handle) -> c.int ---
@(link_name="read") _unix_read :: proc(handle: Handle, buffer: rawptr, count: c.size_t) -> int ---
@(link_name="write") _unix_write :: proc(handle: Handle, buffer: rawptr, count: c.size_t) -> int ---
diff --git a/core/os/os_freebsd.odin b/core/os/os_freebsd.odin
index cdd44d301..8fe179478 100644
--- a/core/os/os_freebsd.odin
+++ b/core/os/os_freebsd.odin
@@ -112,15 +112,15 @@ EOWNERDEAD: Errno : 96
O_RDONLY :: 0x00000
O_WRONLY :: 0x00001
O_RDWR :: 0x00002
-O_CREATE :: 0x00040
-O_EXCL :: 0x00080
-O_NOCTTY :: 0x00100
-O_TRUNC :: 0x00200
-O_NONBLOCK :: 0x00800
-O_APPEND :: 0x00400
-O_SYNC :: 0x01000
-O_ASYNC :: 0x02000
-O_CLOEXEC :: 0x80000
+O_NONBLOCK :: 0x00004
+O_APPEND :: 0x00008
+O_ASYNC :: 0x00040
+O_SYNC :: 0x00080
+O_CREATE :: 0x00200
+O_TRUNC :: 0x00400
+O_EXCL :: 0x00800
+O_NOCTTY :: 0x08000
+O_CLOEXEC :: 0100000
SEEK_DATA :: 3
@@ -140,6 +140,8 @@ RTLD_NOLOAD :: 0x02000
MAX_PATH :: 1024
+KINFO_FILE_SIZE :: 1392
+
args := _alloc_command_line_arguments()
Unix_File_Time :: struct {
@@ -191,6 +193,21 @@ OS_Stat :: struct {
lspare: [10]u64,
}
+KInfo_File :: struct {
+ structsize: c.int,
+ type: c.int,
+ fd: c.int,
+ ref_count: c.int,
+ flags: c.int,
+ pad0: c.int,
+ offset: i64,
+
+ // NOTE(Feoramund): This field represents a complicated union that I am
+ // avoiding implementing for now. I only need the path data below.
+ _union: [336]byte,
+
+ path: [MAX_PATH]c.char,
+}
// since FreeBSD v12
Dirent :: struct {
@@ -254,6 +271,8 @@ X_OK :: 1 // Test for execute permission
W_OK :: 2 // Test for write permission
R_OK :: 4 // Test for read permission
+F_KINFO :: 22
+
foreign libc {
@(link_name="__error") __errno_location :: proc() -> ^c.int ---
@@ -274,6 +293,7 @@ foreign libc {
@(link_name="unlink") _unix_unlink :: proc(path: cstring) -> c.int ---
@(link_name="rmdir") _unix_rmdir :: proc(path: cstring) -> c.int ---
@(link_name="mkdir") _unix_mkdir :: proc(path: cstring, mode: mode_t) -> c.int ---
+ @(link_name="fcntl") _unix_fcntl :: proc(fd: Handle, cmd: c.int, arg: uintptr) -> c.int ---
@(link_name="fdopendir") _unix_fdopendir :: proc(fd: Handle) -> Dir ---
@(link_name="closedir") _unix_closedir :: proc(dirp: Dir) -> c.int ---
@@ -365,7 +385,7 @@ seek :: proc(fd: Handle, offset: i64, whence: int) -> (i64, Errno) {
}
file_size :: proc(fd: Handle) -> (i64, Errno) {
- s, err := fstat(fd)
+ s, err := _fstat(fd)
if err != ERROR_NONE {
return -1, err
}
@@ -591,9 +611,26 @@ _readlink :: proc(path: string) -> (string, Errno) {
return "", Errno{}
}
-// XXX FreeBSD
absolute_path_from_handle :: proc(fd: Handle) -> (string, Errno) {
- return "", Errno(ENOSYS)
+ // NOTE(Feoramund): The situation isn't ideal, but this was the best way I
+ // could find to implement this. There are a couple outstanding bug reports
+ // regarding the desire to retrieve an absolute path from a handle, but to
+ // my knowledge, there hasn't been any work done on it.
+ //
+ // https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=198570
+ //
+ // This may be unreliable, according to a comment from 2023.
+
+ kinfo: KInfo_File
+ kinfo.structsize = KINFO_FILE_SIZE
+
+ res := _unix_fcntl(fd, F_KINFO, cast(uintptr)&kinfo)
+ if res == -1 {
+ return "", Errno(get_last_error())
+ }
+
+ path := strings.clone_from_cstring_bounded(cast(cstring)&kinfo.path[0], len(kinfo.path))
+ return path, ERROR_NONE
}
absolute_path_from_relative :: proc(rel: string) -> (path: string, err: Errno) {
diff --git a/core/os/os_netbsd.odin b/core/os/os_netbsd.odin
index e8e551340..c0f237bf5 100644
--- a/core/os/os_netbsd.odin
+++ b/core/os/os_netbsd.odin
@@ -5,7 +5,6 @@ foreign import libc "system:c"
import "base:runtime"
import "core:strings"
-import "core:sys/unix"
import "core:c"
Handle :: distinct i32
@@ -328,6 +327,11 @@ foreign dl {
@(link_name="dlerror") _unix_dlerror :: proc() -> cstring ---
}
+@(private)
+foreign libc {
+ _lwp_self :: proc() -> i32 ---
+}
+
// NOTE(phix): Perhaps share the following functions with FreeBSD if they turn out to be the same in the end.
is_path_separator :: proc(r: rune) -> bool {
@@ -721,7 +725,7 @@ exit :: proc "contextless" (code: int) -> ! {
}
current_thread_id :: proc "contextless" () -> int {
- return cast(int) unix.pthread_self()
+ return int(_lwp_self())
}
dlopen :: proc(filename: string, flags: int) -> rawptr {
diff --git a/core/path/filepath/path_unix.odin b/core/path/filepath/path_unix.odin
index a4b27b027..b44a6a344 100644
--- a/core/path/filepath/path_unix.odin
+++ b/core/path/filepath/path_unix.odin
@@ -56,7 +56,7 @@ foreign libc {
@(link_name="free") _unix_free :: proc(ptr: rawptr) ---
}
-when ODIN_OS == .Darwin {
+when ODIN_OS == .Darwin || ODIN_OS == .FreeBSD {
@(private)
foreign libc {
@(link_name="__error") __error :: proc() -> ^i32 ---
diff --git a/core/simd/x86/aes.odin b/core/simd/x86/aes.odin
new file mode 100644
index 000000000..3a32de0d6
--- /dev/null
+++ b/core/simd/x86/aes.odin
@@ -0,0 +1,49 @@
+//+build i386, amd64
+package simd_x86
+
+@(require_results, enable_target_feature = "aes")
+_mm_aesdec :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
+ return aesdec(a, b)
+}
+
+@(require_results, enable_target_feature = "aes")
+_mm_aesdeclast :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
+ return aesdeclast(a, b)
+}
+
+@(require_results, enable_target_feature = "aes")
+_mm_aesenc :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
+ return aesenc(a, b)
+}
+
+@(require_results, enable_target_feature = "aes")
+_mm_aesenclast :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
+ return aesenclast(a, b)
+}
+
+@(require_results, enable_target_feature = "aes")
+_mm_aesimc :: #force_inline proc "c" (a: __m128i) -> __m128i {
+ return aesimc(a)
+}
+
+@(require_results, enable_target_feature = "aes")
+_mm_aeskeygenassist :: #force_inline proc "c" (a: __m128i, $IMM8: u8) -> __m128i {
+ return aeskeygenassist(a, u8(IMM8))
+}
+
+
+@(private, default_calling_convention = "none")
+foreign _ {
+ @(link_name = "llvm.x86.aesni.aesdec")
+ aesdec :: proc(a, b: __m128i) -> __m128i ---
+ @(link_name = "llvm.x86.aesni.aesdeclast")
+ aesdeclast :: proc(a, b: __m128i) -> __m128i ---
+ @(link_name = "llvm.x86.aesni.aesenc")
+ aesenc :: proc(a, b: __m128i) -> __m128i ---
+ @(link_name = "llvm.x86.aesni.aesenclast")
+ aesenclast :: proc(a, b: __m128i) -> __m128i ---
+ @(link_name = "llvm.x86.aesni.aesimc")
+ aesimc :: proc(a: __m128i) -> __m128i ---
+ @(link_name = "llvm.x86.aesni.aeskeygenassist")
+ aeskeygenassist :: proc(a: __m128i, imm8: u8) -> __m128i ---
+}
diff --git a/core/slice/permute.odin b/core/slice/permute.odin
new file mode 100644
index 000000000..42b6d4129
--- /dev/null
+++ b/core/slice/permute.odin
@@ -0,0 +1,105 @@
+package slice
+
+import "base:runtime"
+
+// An in-place permutation iterator.
+Permutation_Iterator :: struct($T: typeid) {
+ index: int,
+ slice: []T,
+ counters: []int,
+}
+
+/*
+Make an iterator to permute a slice in-place.
+
+*Allocates Using Provided Allocator*
+
+This procedure allocates some state to assist in permutation and does not make
+a copy of the underlying slice. If you want to permute a slice without altering
+the underlying data, use `clone` to create a copy, then permute that instead.
+
+Inputs:
+- slice: The slice to permute.
+- allocator: (default is context.allocator)
+
+Returns:
+- iter: The iterator, to be passed to `permute`.
+- error: An `Allocator_Error`, if allocation failed.
+*/
+make_permutation_iterator :: proc(
+ slice: []$T,
+ allocator := context.allocator,
+) -> (
+ iter: Permutation_Iterator(T),
+ error: runtime.Allocator_Error,
+) #optional_allocator_error {
+ iter.slice = slice
+ iter.counters = make([]int, len(iter.slice), allocator) or_return
+
+ return
+}
+/*
+Free the state allocated by `make_permutation_iterator`.
+
+Inputs:
+- iter: The iterator created by `make_permutation_iterator`.
+- allocator: The allocator used to create the iterator. (default is context.allocator)
+*/
+destroy_permutation_iterator :: proc(
+ iter: Permutation_Iterator($T),
+ allocator := context.allocator,
+) {
+ delete(iter.counters, allocator = allocator)
+}
+/*
+Permute a slice in-place.
+
+Note that the first iteration will always be the original, unpermuted slice.
+
+Inputs:
+- iter: The iterator created by `make_permutation_iterator`.
+
+Returns:
+- ok: True if the permutation succeeded, false if the iteration is complete.
+*/
+permute :: proc(iter: ^Permutation_Iterator($T)) -> (ok: bool) {
+ // This is an iterative, resumable implementation of Heap's algorithm.
+ //
+ // The original algorithm was described by B. R. Heap as "Permutations by
+ // interchanges" in The Computer Journal, 1963.
+ //
+ // This implementation is based on the nonrecursive version described by
+ // Robert Sedgewick in "Permutation Generation Methods" which was published
+ // in ACM Computing Surveys in 1977.
+
+ i := iter.index
+
+ if i == 0 {
+ iter.index = 1
+ return true
+ }
+
+ n := len(iter.counters)
+ #no_bounds_check for i < n {
+ if iter.counters[i] < i {
+ if i & 1 == 0 {
+ iter.slice[0], iter.slice[i] = iter.slice[i], iter.slice[0]
+ } else {
+ iter.slice[iter.counters[i]], iter.slice[i] = iter.slice[i], iter.slice[iter.counters[i]]
+ }
+
+ iter.counters[i] += 1
+ i = 1
+
+ break
+ } else {
+ iter.counters[i] = 0
+ i += 1
+ }
+ }
+ if i == n {
+ return false
+ }
+ iter.index = i
+ return true
+}
diff --git a/core/strconv/generic_float.odin b/core/strconv/generic_float.odin
index 6dc11c0be..b049f0fe1 100644
--- a/core/strconv/generic_float.odin
+++ b/core/strconv/generic_float.odin
@@ -375,7 +375,7 @@ decimal_to_float_bits :: proc(d: ^decimal.Decimal, info: ^Float_Info) -> (b: u64
return
}
- @static power_table := [?]int{1, 3, 6, 9, 13, 16, 19, 23, 26}
+ @(static, rodata) power_table := [?]int{1, 3, 6, 9, 13, 16, 19, 23, 26}
exp = 0
for d.decimal_point > 0 {
diff --git a/core/strconv/strconv.odin b/core/strconv/strconv.odin
index 94842617e..902f1cdc5 100644
--- a/core/strconv/strconv.odin
+++ b/core/strconv/strconv.odin
@@ -835,17 +835,21 @@ Example:
n, _, ok = strconv.parse_f64_prefix("12.34e2")
fmt.printfln("%.3f %v", n, ok)
+
+ n, _, ok = strconv.parse_f64_prefix("13.37 hellope")
+ fmt.printfln("%.3f %v", n, ok)
}
Output:
0.000 false
1234.000 true
+ 13.370 true
**Returns**
- value: The parsed 64-bit floating point number.
- nr: The length of the parsed substring.
-- ok: `false` if a base 10 float could not be found, or if the input string contained more than just the number.
+- ok: `false` if a base 10 float could not be found
*/
parse_f64_prefix :: proc(str: string) -> (value: f64, nr: int, ok: bool) {
common_prefix_len_ignore_case :: proc "contextless" (s, prefix: string) -> int {
@@ -878,13 +882,16 @@ parse_f64_prefix :: proc(str: string) -> (value: f64, nr: int, ok: bool) {
s = s[1:]
fallthrough
case 'i', 'I':
- n = common_prefix_len_ignore_case(s, "infinity")
- if 3 < n && n < 8 { // "inf" or "infinity"
- n = 3
- }
- if n == 3 || n == 8 {
+ m := common_prefix_len_ignore_case(s, "infinity")
+ if 3 <= m && m < 9 { // "inf" to "infinity"
f = 0h7ff00000_00000000 if sign == 1 else 0hfff00000_00000000
- n = nsign + 3
+ if m == 8 {
+ // We only count the entire prefix if it is precisely "infinity".
+ n = nsign + m
+ } else {
+ // The string was either only "inf" or incomplete.
+ n = nsign + 3
+ }
ok = true
return
}
@@ -1088,7 +1095,7 @@ parse_f64_prefix :: proc(str: string) -> (value: f64, nr: int, ok: bool) {
}
trunc_block: if !trunc {
- @static pow10 := [?]f64{
+ @(static, rodata) pow10 := [?]f64{
1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
1e20, 1e21, 1e22,
@@ -1124,6 +1131,275 @@ parse_f64_prefix :: proc(str: string) -> (value: f64, nr: int, ok: bool) {
ok = !overflow
return
}
+/*
+Parses a 128-bit complex number from a string
+
+**Inputs**
+- str: The input string containing a 128-bit complex number.
+- n: An optional pointer to an int to store the length of the parsed substring (default: nil).
+
+Example:
+
+ import "core:fmt"
+ import "core:strconv"
+ parse_complex128_example :: proc() {
+ n: int
+ c, ok := strconv.parse_complex128("3+1i", &n)
+ fmt.printfln("%v %i %t", c, n, ok)
+
+ c, ok = strconv.parse_complex128("5+7i hellope", &n)
+ fmt.printfln("%v %i %t", c, n, ok)
+ }
+
+Output:
+
+ 3+1i 4 true
+ 5+7i 4 false
+
+**Returns**
+- value: The parsed 128-bit complex number.
+- ok: `false` if a complex number could not be found, or if the input string contained more than just the number.
+*/
+parse_complex128 :: proc(str: string, n: ^int = nil) -> (value: complex128, ok: bool) {
+ real_value, imag_value: f64
+ nr_r, nr_i: int
+
+ real_value, nr_r, _ = parse_f64_prefix(str)
+ imag_value, nr_i, _ = parse_f64_prefix(str[nr_r:])
+
+ i_parsed := len(str) >= nr_r + nr_i + 1 && str[nr_r + nr_i] == 'i'
+ if !i_parsed {
+ // No `i` means we refuse to treat the second float we parsed as an
+ // imaginary value.
+ imag_value = 0
+ nr_i = 0
+ }
+
+ ok = i_parsed && len(str) == nr_r + nr_i + 1
+
+ if n != nil {
+ n^ = nr_r + nr_i + (1 if i_parsed else 0)
+ }
+
+ value = complex(real_value, imag_value)
+ return
+}
+/*
+Parses a 64-bit complex number from a string
+
+**Inputs**
+- str: The input string containing a 64-bit complex number.
+- n: An optional pointer to an int to store the length of the parsed substring (default: nil).
+
+Example:
+
+ import "core:fmt"
+ import "core:strconv"
+ parse_complex64_example :: proc() {
+ n: int
+ c, ok := strconv.parse_complex64("3+1i", &n)
+ fmt.printfln("%v %i %t", c, n, ok)
+
+ c, ok = strconv.parse_complex64("5+7i hellope", &n)
+ fmt.printfln("%v %i %t", c, n, ok)
+ }
+
+Output:
+
+ 3+1i 4 true
+ 5+7i 4 false
+
+**Returns**
+- value: The parsed 64-bit complex number.
+- ok: `false` if a complex number could not be found, or if the input string contained more than just the number.
+*/
+parse_complex64 :: proc(str: string, n: ^int = nil) -> (value: complex64, ok: bool) {
+ v: complex128 = ---
+ v, ok = parse_complex128(str, n)
+ return cast(complex64)v, ok
+}
+/*
+Parses a 32-bit complex number from a string
+
+**Inputs**
+- str: The input string containing a 32-bit complex number.
+- n: An optional pointer to an int to store the length of the parsed substring (default: nil).
+
+Example:
+
+ import "core:fmt"
+ import "core:strconv"
+ parse_complex32_example :: proc() {
+ n: int
+ c, ok := strconv.parse_complex32("3+1i", &n)
+ fmt.printfln("%v %i %t", c, n, ok)
+
+ c, ok = strconv.parse_complex32("5+7i hellope", &n)
+ fmt.printfln("%v %i %t", c, n, ok)
+ }
+
+Output:
+
+ 3+1i 4 true
+ 5+7i 4 false
+
+**Returns**
+- value: The parsed 32-bit complex number.
+- ok: `false` if a complex number could not be found, or if the input string contained more than just the number.
+*/
+parse_complex32 :: proc(str: string, n: ^int = nil) -> (value: complex32, ok: bool) {
+ v: complex128 = ---
+ v, ok = parse_complex128(str, n)
+ return cast(complex32)v, ok
+}
+/*
+Parses a 256-bit quaternion from a string
+
+**Inputs**
+- str: The input string containing a 256-bit quaternion.
+- n: An optional pointer to an int to store the length of the parsed substring (default: nil).
+
+Example:
+
+ import "core:fmt"
+ import "core:strconv"
+ parse_quaternion256_example :: proc() {
+ n: int
+ q, ok := strconv.parse_quaternion256("1+2i+3j+4k", &n)
+ fmt.printfln("%v %i %t", q, n, ok)
+
+ q, ok = strconv.parse_quaternion256("1+2i+3j+4k hellope", &n)
+ fmt.printfln("%v %i %t", q, n, ok)
+ }
+
+Output:
+
+ 1+2i+3j+4k 10 true
+ 1+2i+3j+4k 10 false
+
+**Returns**
+- value: The parsed 256-bit quaternion.
+- ok: `false` if a quaternion could not be found, or if the input string contained more than just the quaternion.
+*/
+parse_quaternion256 :: proc(str: string, n: ^int = nil) -> (value: quaternion256, ok: bool) {
+ iterate_and_assign :: proc (iter: ^string, terminator: byte, nr_total: ^int, state: bool) -> (value: f64, ok: bool) {
+ if !state {
+ return
+ }
+
+ nr: int
+ value, nr, _ = parse_f64_prefix(iter^)
+ iter^ = iter[nr:]
+
+ if len(iter) > 0 && iter[0] == terminator {
+ iter^ = iter[1:]
+ nr_total^ += nr + 1
+ ok = true
+ } else {
+ value = 0
+ }
+
+ return
+ }
+
+ real_value, imag_value, jmag_value, kmag_value: f64
+ nr: int
+
+ real_value, nr, _ = parse_f64_prefix(str)
+ iter := str[nr:]
+
+ // Need to have parsed at least something in order to get started.
+ ok = nr > 0
+
+ // Quaternion parsing is done this way to honour the rest of the API with
+ // regards to partial parsing. Otherwise, we could error out early.
+ imag_value, ok = iterate_and_assign(&iter, 'i', &nr, ok)
+ jmag_value, ok = iterate_and_assign(&iter, 'j', &nr, ok)
+ kmag_value, ok = iterate_and_assign(&iter, 'k', &nr, ok)
+
+ if len(iter) != 0 {
+ ok = false
+ }
+
+ if n != nil {
+ n^ = nr
+ }
+
+ value = quaternion(
+ real = real_value,
+ imag = imag_value,
+ jmag = jmag_value,
+ kmag = kmag_value)
+ return
+}
+/*
+Parses a 128-bit quaternion from a string
+
+**Inputs**
+- str: The input string containing a 128-bit quaternion.
+- n: An optional pointer to an int to store the length of the parsed substring (default: nil).
+
+Example:
+
+ import "core:fmt"
+ import "core:strconv"
+ parse_quaternion128_example :: proc() {
+ n: int
+ q, ok := strconv.parse_quaternion128("1+2i+3j+4k", &n)
+ fmt.printfln("%v %i %t", q, n, ok)
+
+ q, ok = strconv.parse_quaternion128("1+2i+3j+4k hellope", &n)
+ fmt.printfln("%v %i %t", q, n, ok)
+ }
+
+Output:
+
+ 1+2i+3j+4k 10 true
+ 1+2i+3j+4k 10 false
+
+**Returns**
+- value: The parsed 128-bit quaternion.
+- ok: `false` if a quaternion could not be found, or if the input string contained more than just the quaternion.
+*/
+parse_quaternion128 :: proc(str: string, n: ^int = nil) -> (value: quaternion128, ok: bool) {
+ v: quaternion256 = ---
+ v, ok = parse_quaternion256(str, n)
+ return cast(quaternion128)v, ok
+}
+/*
+Parses a 64-bit quaternion from a string
+
+**Inputs**
+- str: The input string containing a 64-bit quaternion.
+- n: An optional pointer to an int to store the length of the parsed substring (default: nil).
+
+Example:
+
+ import "core:fmt"
+ import "core:strconv"
+ parse_quaternion64_example :: proc() {
+ n: int
+ q, ok := strconv.parse_quaternion64("1+2i+3j+4k", &n)
+ fmt.printfln("%v %i %t", q, n, ok)
+
+ q, ok = strconv.parse_quaternion64("1+2i+3j+4k hellope", &n)
+ fmt.printfln("%v %i %t", q, n, ok)
+ }
+
+Output:
+
+ 1+2i+3j+4k 10 true
+ 1+2i+3j+4k 10 false
+
+**Returns**
+- value: The parsed 64-bit quaternion.
+- ok: `false` if a quaternion could not be found, or if the input string contained more than just the quaternion.
+*/
+parse_quaternion64 :: proc(str: string, n: ^int = nil) -> (value: quaternion64, ok: bool) {
+ v: quaternion256 = ---
+ v, ok = parse_quaternion256(str, n)
+ return cast(quaternion64)v, ok
+}
/*
Appends a boolean value as a string to the given buffer
diff --git a/core/strings/builder.odin b/core/strings/builder.odin
index 72eb815f9..11885b689 100644
--- a/core/strings/builder.odin
+++ b/core/strings/builder.odin
@@ -350,9 +350,9 @@ Output:
ab
*/
-write_byte :: proc(b: ^Builder, x: byte) -> (n: int) {
+write_byte :: proc(b: ^Builder, x: byte, loc := #caller_location) -> (n: int) {
n0 := len(b.buf)
- append(&b.buf, x)
+ append(&b.buf, x, loc)
n1 := len(b.buf)
return n1-n0
}
@@ -380,9 +380,9 @@ NOTE: The backing dynamic array may be fixed in capacity or fail to resize, `n`
Returns:
- n: The number of bytes appended
*/
-write_bytes :: proc(b: ^Builder, x: []byte) -> (n: int) {
+write_bytes :: proc(b: ^Builder, x: []byte, loc := #caller_location) -> (n: int) {
n0 := len(b.buf)
- append(&b.buf, ..x)
+ append(&b.buf, ..x, loc=loc)
n1 := len(b.buf)
return n1-n0
}
diff --git a/core/sync/futex_darwin.odin b/core/sync/futex_darwin.odin
index 6ea177d1b..fca9aadfe 100644
--- a/core/sync/futex_darwin.odin
+++ b/core/sync/futex_darwin.odin
@@ -52,7 +52,7 @@ _futex_wait_with_timeout :: proc "contextless" (f: ^Futex, expected: u32, durati
}
} else {
- timeout_ns := u32(duration) * 1000
+ timeout_ns := u32(duration)
s := __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, f, u64(expected), timeout_ns)
if s >= 0 {
return true
diff --git a/core/sync/primitives_netbsd.odin b/core/sync/primitives_netbsd.odin
index 042e744e8..594f2ff5c 100644
--- a/core/sync/primitives_netbsd.odin
+++ b/core/sync/primitives_netbsd.odin
@@ -1,8 +1,12 @@
//+private
package sync
-import "core:sys/unix"
+foreign import libc "system:c"
+
+foreign libc {
+ _lwp_self :: proc "c" () -> i32 ---
+}
_current_thread_id :: proc "contextless" () -> int {
- return cast(int) unix.pthread_self()
+ return int(_lwp_self())
}
diff --git a/core/sys/info/platform_darwin.odin b/core/sys/info/platform_darwin.odin
index 122dd42ee..0cae0aa98 100644
--- a/core/sys/info/platform_darwin.odin
+++ b/core/sys/info/platform_darwin.odin
@@ -527,6 +527,7 @@ macos_release_map: map[string]Darwin_To_Release = {
"23D60" = {{23, 3, 0}, "macOS", {"Sonoma", {14, 3, 1}}},
"23E214" = {{23, 4, 0}, "macOS", {"Sonoma", {14, 4, 0}}},
"23E224" = {{23, 4, 0}, "macOS", {"Sonoma", {14, 4, 1}}},
+ "23F79" = {{23, 5, 0}, "macOS", {"Sonoma", {14, 5, 0}}},
}
@(private)
diff --git a/core/sys/linux/sys.odin b/core/sys/linux/sys.odin
index 413c8742b..171829cde 100644
--- a/core/sys/linux/sys.odin
+++ b/core/sys/linux/sys.odin
@@ -487,6 +487,7 @@ connect :: proc "contextless" (sock: Fd, addr: ^$T) -> (Errno)
where
T == Sock_Addr_In ||
T == Sock_Addr_In6 ||
+ T == Sock_Addr_Un ||
T == Sock_Addr_Any
{
ret := syscall(SYS_connect, sock, addr, size_of(T))
@@ -502,6 +503,7 @@ accept :: proc "contextless" (sock: Fd, addr: ^$T, sockflags: Socket_FD_Flags =
where
T == Sock_Addr_In ||
T == Sock_Addr_In6 ||
+ T == Sock_Addr_Un ||
T == Sock_Addr_Any
{
addr_len: i32 = size_of(T)
@@ -514,6 +516,7 @@ recvfrom :: proc "contextless" (sock: Fd, buf: []u8, flags: Socket_Msg, addr: ^$
where
T == Sock_Addr_In ||
T == Sock_Addr_In6 ||
+ T == Sock_Addr_Un ||
T == Sock_Addr_Any
{
addr_len: i32 = size_of(T)
@@ -531,6 +534,7 @@ sendto :: proc "contextless" (sock: Fd, buf: []u8, flags: Socket_Msg, addr: ^$T)
where
T == Sock_Addr_In ||
T == Sock_Addr_In6 ||
+ T == Sock_Addr_Un ||
T == Sock_Addr_Any
{
ret := syscall(SYS_sendto, sock, raw_data(buf), len(buf), transmute(i32) flags, addr, size_of(T))
@@ -590,6 +594,7 @@ bind :: proc "contextless" (sock: Fd, addr: ^$T) -> (Errno)
where
T == Sock_Addr_In ||
T == Sock_Addr_In6 ||
+ T == Sock_Addr_Un ||
T == Sock_Addr_Any
{
ret := syscall(SYS_bind, sock, addr, size_of(T))
diff --git a/core/sys/linux/types.odin b/core/sys/linux/types.odin
index 677bac7e0..5053e1e1c 100644
--- a/core/sys/linux/types.odin
+++ b/core/sys/linux/types.odin
@@ -632,6 +632,14 @@ Sock_Addr_In6 :: struct #packed {
}
/*
+ Struct representing Unix Domain Socket address
+*/
+Sock_Addr_Un :: struct #packed {
+ sun_family: Address_Family,
+ sun_path: [108]u8,
+}
+
+/*
Struct representing an arbitrary socket address.
*/
Sock_Addr_Any :: struct #raw_union {
@@ -641,6 +649,7 @@ Sock_Addr_Any :: struct #raw_union {
},
using ipv4: Sock_Addr_In,
using ipv6: Sock_Addr_In6,
+ using uds: Sock_Addr_Un,
}
/*
diff --git a/core/sys/unix/pthread_freebsd.odin b/core/sys/unix/pthread_freebsd.odin
index 3417d3943..5f4dac289 100644
--- a/core/sys/unix/pthread_freebsd.odin
+++ b/core/sys/unix/pthread_freebsd.odin
@@ -95,7 +95,7 @@ sem_t :: struct {
PTHREAD_CANCEL_ENABLE :: 0
PTHREAD_CANCEL_DISABLE :: 1
PTHREAD_CANCEL_DEFERRED :: 0
-PTHREAD_CANCEL_ASYNCHRONOUS :: 1
+PTHREAD_CANCEL_ASYNCHRONOUS :: 2
foreign import "system:pthread"
@@ -119,4 +119,4 @@ foreign pthread {
pthread_setcancelstate :: proc (state: c.int, old_state: ^c.int) -> c.int ---
pthread_setcanceltype :: proc (type: c.int, old_type: ^c.int) -> c.int ---
pthread_cancel :: proc (thread: pthread_t) -> c.int ---
-} \ No newline at end of file
+}
diff --git a/core/sys/unix/pthread_openbsd.odin b/core/sys/unix/pthread_openbsd.odin
index 7ae82e662..855e7d99c 100644
--- a/core/sys/unix/pthread_openbsd.odin
+++ b/core/sys/unix/pthread_openbsd.odin
@@ -49,7 +49,7 @@ sem_t :: distinct rawptr
PTHREAD_CANCEL_ENABLE :: 0
PTHREAD_CANCEL_DISABLE :: 1
PTHREAD_CANCEL_DEFERRED :: 0
-PTHREAD_CANCEL_ASYNCHRONOUS :: 1
+PTHREAD_CANCEL_ASYNCHRONOUS :: 2
foreign import libc "system:c"
@@ -71,4 +71,4 @@ foreign libc {
pthread_setcancelstate :: proc (state: c.int, old_state: ^c.int) -> c.int ---
pthread_setcanceltype :: proc (type: c.int, old_type: ^c.int) -> c.int ---
pthread_cancel :: proc (thread: pthread_t) -> c.int ---
-} \ No newline at end of file
+}
diff --git a/core/sys/unix/pthread_unix.odin b/core/sys/unix/pthread_unix.odin
index 5760560ee..c876a214a 100644
--- a/core/sys/unix/pthread_unix.odin
+++ b/core/sys/unix/pthread_unix.odin
@@ -116,4 +116,5 @@ foreign pthread {
pthread_mutexattr_setpshared :: proc(attrs: ^pthread_mutexattr_t, value: c.int) -> c.int ---
pthread_mutexattr_getpshared :: proc(attrs: ^pthread_mutexattr_t, result: ^c.int) -> c.int ---
+ pthread_testcancel :: proc () ---
}
diff --git a/core/sys/windows/kernel32.odin b/core/sys/windows/kernel32.odin
index eba275522..3c60cfc43 100644..100755
--- a/core/sys/windows/kernel32.odin
+++ b/core/sys/windows/kernel32.odin
@@ -453,9 +453,9 @@ foreign kernel32 {
// [MS-Docs](https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-setfilecompletionnotificationmodes)
SetFileCompletionNotificationModes :: proc(FileHandle: HANDLE, Flags: u8) -> BOOL ---
// [MS-Docs](https://learn.microsoft.com/en-us/windows/win32/api/ioapiset/nf-ioapiset-createiocompletionport)
- CreateIoCompletionPort :: proc(FileHandle: HANDLE, ExistingCompletionPort: HANDLE, CompletionKey: ^uintptr, NumberOfConcurrentThreads: DWORD) -> HANDLE ---
+ CreateIoCompletionPort :: proc(FileHandle: HANDLE, ExistingCompletionPort: HANDLE, CompletionKey: ULONG_PTR, NumberOfConcurrentThreads: DWORD) -> HANDLE ---
//[MS-Docs](https://learn.microsoft.com/en-us/windows/win32/api/ioapiset/nf-ioapiset-getqueuedcompletionstatus)
- GetQueuedCompletionStatus :: proc(CompletionPort: HANDLE, lpNumberOfBytesTransferred: ^DWORD, lpCompletionKey: uintptr, lpOverlapped: ^^OVERLAPPED, dwMilliseconds: DWORD) -> BOOL ---
+ GetQueuedCompletionStatus :: proc(CompletionPort: HANDLE, lpNumberOfBytesTransferred: ^DWORD, lpCompletionKey: PULONG_PTR, lpOverlapped: ^^OVERLAPPED, dwMilliseconds: DWORD) -> BOOL ---
// [MS-Docs](https://learn.microsoft.com/en-us/windows/win32/api/ioapiset/nf-ioapiset-getqueuedcompletionstatusex)
GetQueuedCompletionStatusEx :: proc(CompletionPort: HANDLE, lpCompletionPortEntries: ^OVERLAPPED_ENTRY, ulCount: c_ulong, ulNumEntriesRemoved: ^c_ulong, dwMilliseconds: DWORD, fAlertable: BOOL) -> BOOL ---
// [MS-Docs](https://learn.microsoft.com/en-us/windows/win32/api/ioapiset/nf-ioapiset-postqueuedcompletionstatus)
@@ -1153,6 +1153,19 @@ foreign kernel32 {
SetCommState :: proc(handle: HANDLE, dcb: ^DCB) -> BOOL ---
}
+COMMTIMEOUTS :: struct {
+ ReadIntervalTimeout: DWORD,
+ ReadTotalTimeoutMultiplier: DWORD,
+ ReadTotalTimeoutConstant: DWORD,
+ WriteTotalTimeoutMultiplier: DWORD,
+ WriteTotalTimeoutConstant: DWORD,
+}
+
+@(default_calling_convention="system")
+foreign kernel32 {
+ GetCommTimeouts :: proc(handle: HANDLE, timeouts: ^COMMTIMEOUTS) -> BOOL ---
+ SetCommTimeouts :: proc(handle: HANDLE, timeouts: ^COMMTIMEOUTS) -> BOOL ---
+}
LPFIBER_START_ROUTINE :: #type proc "system" (lpFiberParameter: LPVOID)
diff --git a/core/testing/events.odin b/core/testing/events.odin
new file mode 100644
index 000000000..bab35aaad
--- /dev/null
+++ b/core/testing/events.odin
@@ -0,0 +1,48 @@
+//+private
+package testing
+
+import "base:runtime"
+import "core:sync/chan"
+import "core:time"
+
+Test_State :: enum {
+ Ready,
+ Running,
+ Successful,
+ Failed,
+}
+
+Update_Channel :: chan.Chan(Channel_Event)
+Update_Channel_Sender :: chan.Chan(Channel_Event, .Send)
+
+Task_Channel :: struct {
+ channel: Update_Channel,
+ test_index: int,
+}
+
+Event_New_Test :: struct {
+ test_index: int,
+}
+
+Event_State_Change :: struct {
+ new_state: Test_State,
+}
+
+Event_Set_Fail_Timeout :: struct {
+ at_time: time.Time,
+ location: runtime.Source_Code_Location,
+}
+
+Event_Log_Message :: struct {
+ level: runtime.Logger_Level,
+ text: string,
+ time: time.Time,
+ formatted_text: string,
+}
+
+Channel_Event :: union {
+ Event_New_Test,
+ Event_State_Change,
+ Event_Set_Fail_Timeout,
+ Event_Log_Message,
+}
diff --git a/core/testing/logging.odin b/core/testing/logging.odin
new file mode 100644
index 000000000..5bbbffeae
--- /dev/null
+++ b/core/testing/logging.odin
@@ -0,0 +1,71 @@
+//+private
+package testing
+
+import "base:runtime"
+import "core:fmt"
+import pkg_log "core:log"
+import "core:strings"
+import "core:sync/chan"
+import "core:time"
+
+Default_Test_Logger_Opts :: runtime.Logger_Options {
+ .Level,
+ .Terminal_Color,
+ .Short_File_Path,
+ .Line,
+ .Procedure,
+ .Date, .Time,
+}
+
+Log_Message :: struct {
+ level: runtime.Logger_Level,
+ text: string,
+ time: time.Time,
+ // `text` may be allocated differently, depending on where a log message
+ // originates from.
+ allocator: runtime.Allocator,
+}
+
+test_logger_proc :: proc(logger_data: rawptr, level: runtime.Logger_Level, text: string, options: runtime.Logger_Options, location := #caller_location) {
+ t := cast(^T)logger_data
+
+ if level >= .Error {
+ t.error_count += 1
+ }
+
+ cloned_text, clone_error := strings.clone(text, t._log_allocator)
+ assert(clone_error == nil, "Error while cloning string in test thread logger proc.")
+
+ now := time.now()
+
+ chan.send(t.channel, Event_Log_Message {
+ level = level,
+ text = cloned_text,
+ time = now,
+ formatted_text = format_log_text(level, text, options, location, now, t._log_allocator),
+ })
+}
+
+runner_logger_proc :: proc(logger_data: rawptr, level: runtime.Logger_Level, text: string, options: runtime.Logger_Options, location := #caller_location) {
+ log_messages := cast(^[dynamic]Log_Message)logger_data
+
+ now := time.now()
+
+ append(log_messages, Log_Message {
+ level = level,
+ text = format_log_text(level, text, options, location, now),
+ time = now,
+ allocator = context.allocator,
+ })
+}
+
+format_log_text :: proc(level: runtime.Logger_Level, text: string, options: runtime.Logger_Options, location: runtime.Source_Code_Location, at_time: time.Time, allocator := context.allocator) -> string{
+ backing: [1024]byte
+ buf := strings.builder_from_bytes(backing[:])
+
+ pkg_log.do_level_header(options, &buf, level)
+ pkg_log.do_time_header(options, &buf, at_time)
+ pkg_log.do_location_header(options, &buf, location)
+
+ return fmt.aprintf("%s%s", strings.to_string(buf), text, allocator = allocator)
+}
diff --git a/core/testing/reporting.odin b/core/testing/reporting.odin
new file mode 100644
index 000000000..92e144ccc
--- /dev/null
+++ b/core/testing/reporting.odin
@@ -0,0 +1,329 @@
+//+private
+package testing
+
+import "base:runtime"
+import "core:encoding/ansi"
+import "core:fmt"
+import "core:io"
+import "core:mem"
+import "core:path/filepath"
+import "core:strings"
+
+// Definitions of colors for use in the test runner.
+SGR_RESET :: ansi.CSI + ansi.RESET + ansi.SGR
+SGR_READY :: ansi.CSI + ansi.FG_BRIGHT_BLACK + ansi.SGR
+SGR_RUNNING :: ansi.CSI + ansi.FG_YELLOW + ansi.SGR
+SGR_SUCCESS :: ansi.CSI + ansi.FG_GREEN + ansi.SGR
+SGR_FAILED :: ansi.CSI + ansi.FG_RED + ansi.SGR
+
+MAX_PROGRESS_WIDTH :: 100
+
+// More than enough bytes to cover long package names, long test names, dozens
+// of ANSI codes, et cetera.
+LINE_BUFFER_SIZE :: (MAX_PROGRESS_WIDTH * 8 + 224) * runtime.Byte
+
+PROGRESS_COLUMN_SPACING :: 2
+
+Package_Run :: struct {
+ name: string,
+ header: string,
+
+ frame_ready: bool,
+
+ redraw_buffer: [LINE_BUFFER_SIZE]byte,
+ redraw_string: string,
+
+ last_change_state: Test_State,
+ last_change_name: string,
+
+ tests: []Internal_Test,
+ test_states: []Test_State,
+}
+
+Report :: struct {
+ packages: []Package_Run,
+ packages_by_name: map[string]^Package_Run,
+
+ pkg_column_len: int,
+ test_column_len: int,
+ progress_width: int,
+
+ all_tests: []Internal_Test,
+ all_test_states: []Test_State,
+}
+
+// Organize all tests by package and sort out test state data.
+make_report :: proc(internal_tests: []Internal_Test) -> (report: Report, error: runtime.Allocator_Error) {
+ assert(len(internal_tests) > 0, "make_report called with no tests")
+
+ packages: [dynamic]Package_Run
+
+ report.all_tests = internal_tests
+ report.all_test_states = make([]Test_State, len(internal_tests)) or_return
+
+ // First, figure out what belongs where.
+ #no_bounds_check cur_pkg := internal_tests[0].pkg
+ pkg_start: int
+
+ // This loop assumes the tests are sorted by package already.
+ for it, index in internal_tests {
+ if cur_pkg != it.pkg {
+ #no_bounds_check {
+ append(&packages, Package_Run {
+ name = cur_pkg,
+ tests = report.all_tests[pkg_start:index],
+ test_states = report.all_test_states[pkg_start:index],
+ }) or_return
+ }
+
+ when PROGRESS_WIDTH == 0 {
+ report.progress_width = max(report.progress_width, index - pkg_start)
+ }
+
+ pkg_start = index
+ report.pkg_column_len = max(report.pkg_column_len, len(cur_pkg))
+ cur_pkg = it.pkg
+ }
+ report.test_column_len = max(report.test_column_len, len(it.name))
+ }
+
+ // Handle the last (or only) package.
+ #no_bounds_check {
+ append(&packages, Package_Run {
+ name = cur_pkg,
+ header = cur_pkg,
+ tests = report.all_tests[pkg_start:],
+ test_states = report.all_test_states[pkg_start:],
+ }) or_return
+ }
+ when PROGRESS_WIDTH == 0 {
+ report.progress_width = max(report.progress_width, len(internal_tests) - pkg_start)
+ } else {
+ report.progress_width = PROGRESS_WIDTH
+ }
+ report.progress_width = min(report.progress_width, MAX_PROGRESS_WIDTH)
+
+ report.pkg_column_len = PROGRESS_COLUMN_SPACING + max(report.pkg_column_len, len(cur_pkg))
+
+ shrink(&packages) or_return
+
+ for &pkg in packages {
+ pkg.header = fmt.aprintf("%- *[1]s[", pkg.name, report.pkg_column_len)
+ assert(len(pkg.header) > 0, "Error allocating package header string.")
+
+ // This is safe because the array is done resizing, and it has the same
+ // lifetime as the map.
+ report.packages_by_name[pkg.name] = &pkg
+ }
+
+ // It's okay to discard the dynamic array's allocator information here,
+ // because its capacity has been shrunk to its length, it was allocated by
+ // the caller's context allocator, and it will be deallocated by the same.
+ //
+ // `delete_slice` is equivalent to `delete_dynamic_array` in this case.
+ report.packages = packages[:]
+
+ return
+}
+
+destroy_report :: proc(report: ^Report) {
+ for pkg in report.packages {
+ delete(pkg.header)
+ }
+
+ delete(report.packages)
+ delete(report.packages_by_name)
+ delete(report.all_test_states)
+}
+
+redraw_package :: proc(w: io.Writer, report: Report, pkg: ^Package_Run) {
+ if pkg.frame_ready {
+ io.write_string(w, pkg.redraw_string)
+ return
+ }
+
+ // Write the output line here so we can cache it.
+ line_builder := strings.builder_from_bytes(pkg.redraw_buffer[:])
+ line_writer := strings.to_writer(&line_builder)
+
+ highest_run_index: int
+ failed_count: int
+ done_count: int
+ #no_bounds_check for i := 0; i < len(pkg.test_states); i += 1 {
+ switch pkg.test_states[i] {
+ case .Ready:
+ continue
+ case .Running:
+ highest_run_index = max(highest_run_index, i)
+ case .Successful:
+ done_count += 1
+ case .Failed:
+ failed_count += 1
+ done_count += 1
+ }
+ }
+
+ start := max(0, highest_run_index - (report.progress_width - 1))
+ end := min(start + report.progress_width, len(pkg.test_states))
+
+ // This variable is to keep track of the last ANSI code emitted, in
+ // order to avoid repeating the same code over in a sequence.
+ //
+ // This should help reduce screen flicker.
+ last_state := Test_State(-1)
+
+ io.write_string(line_writer, pkg.header)
+
+ #no_bounds_check for state in pkg.test_states[start:end] {
+ switch state {
+ case .Ready:
+ if last_state != state {
+ io.write_string(line_writer, SGR_READY)
+ last_state = state
+ }
+ case .Running:
+ if last_state != state {
+ io.write_string(line_writer, SGR_RUNNING)
+ last_state = state
+ }
+ case .Successful:
+ if last_state != state {
+ io.write_string(line_writer, SGR_SUCCESS)
+ last_state = state
+ }
+ case .Failed:
+ if last_state != state {
+ io.write_string(line_writer, SGR_FAILED)
+ last_state = state
+ }
+ }
+ io.write_byte(line_writer, '|')
+ }
+
+ for _ in 0 ..< report.progress_width - (end - start) {
+ io.write_byte(line_writer, ' ')
+ }
+
+ io.write_string(line_writer, SGR_RESET + "] ")
+
+ ticker: string
+ if done_count == len(pkg.test_states) {
+ ticker = "[package done]"
+ if failed_count > 0 {
+ ticker = fmt.tprintf("%s (" + SGR_FAILED + "%i" + SGR_RESET + " failed)", ticker, failed_count)
+ }
+ } else {
+ if len(pkg.last_change_name) == 0 {
+ #no_bounds_check pkg.last_change_name = pkg.tests[0].name
+ }
+
+ switch pkg.last_change_state {
+ case .Ready:
+ ticker = fmt.tprintf(SGR_READY + "%s" + SGR_RESET, pkg.last_change_name)
+ case .Running:
+ ticker = fmt.tprintf(SGR_RUNNING + "%s" + SGR_RESET, pkg.last_change_name)
+ case .Failed:
+ ticker = fmt.tprintf(SGR_FAILED + "%s" + SGR_RESET, pkg.last_change_name)
+ case .Successful:
+ ticker = fmt.tprintf(SGR_SUCCESS + "%s" + SGR_RESET, pkg.last_change_name)
+ }
+ }
+
+ if done_count == len(pkg.test_states) {
+ fmt.wprintfln(line_writer, " % 4i :: %s",
+ len(pkg.test_states),
+ ticker,
+ )
+ } else {
+ fmt.wprintfln(line_writer, "% 4i/% 4i :: %s",
+ done_count,
+ len(pkg.test_states),
+ ticker,
+ )
+ }
+
+ pkg.redraw_string = strings.to_string(line_builder)
+ pkg.frame_ready = true
+ io.write_string(w, pkg.redraw_string)
+}
+
+redraw_report :: proc(w: io.Writer, report: Report) {
+ // If we print a line longer than the user's terminal can handle, it may
+ // wrap around, shifting the progress report out of alignment.
+ //
+ // There are ways to get the current terminal width, and that would be the
+ // ideal way to handle this, but it would require system-specific code such
+ // as setting STDIN to be non-blocking in order to read the response from
+ // the ANSI DSR escape code, or reading environment variables.
+ //
+ // The DECAWM escape codes control whether or not the terminal will wrap
+ // long lines or overwrite the last visible character.
+ // This should be fine for now.
+ //
+ // Note that we only do this for the animated summary; log messages are
+ // still perfectly fine to wrap, as they're printed in their own batch,
+ // whereas the animation depends on each package being only on one line.
+ //
+ // Of course, if you resize your terminal while it's printing, things can
+ // still break...
+ fmt.wprint(w, ansi.CSI + ansi.DECAWM_OFF)
+ for &pkg in report.packages {
+ redraw_package(w, report, &pkg)
+ }
+ fmt.wprint(w, ansi.CSI + ansi.DECAWM_ON)
+}
+
+needs_to_redraw :: proc(report: Report) -> bool {
+ for pkg in report.packages {
+ if !pkg.frame_ready {
+ return true
+ }
+ }
+
+ return false
+}
+
+draw_status_bar :: proc(w: io.Writer, threads_string: string, total_done_count, total_test_count: int) {
+ if total_done_count == total_test_count {
+ // All tests are done; print a blank line to maintain the same height
+ // of the progress report.
+ fmt.wprintln(w)
+ } else {
+ fmt.wprintfln(w,
+ "%s % 4i/% 4i :: total",
+ threads_string,
+ total_done_count,
+ total_test_count)
+ }
+}
+
+write_memory_report :: proc(w: io.Writer, tracker: ^mem.Tracking_Allocator, pkg, name: string) {
+ fmt.wprintf(w,
+ "<% 10M/% 10M> <% 10M> (% 5i/% 5i) :: %s.%s",
+ tracker.current_memory_allocated,
+ tracker.total_memory_allocated,
+ tracker.peak_memory_allocated,
+ tracker.total_free_count,
+ tracker.total_allocation_count,
+ pkg,
+ name)
+
+ for ptr, entry in tracker.allocation_map {
+ fmt.wprintf(w,
+ "\n +++ leak % 10M @ %p [%s:%i:%s()]",
+ entry.size,
+ ptr,
+ filepath.base(entry.location.file_path),
+ entry.location.line,
+ entry.location.procedure)
+ }
+
+ for entry in tracker.bad_free_array {
+ fmt.wprintf(w,
+ "\n +++ bad free @ %p [%s:%i:%s()]",
+ entry.memory,
+ filepath.base(entry.location.file_path),
+ entry.location.line,
+ entry.location.procedure)
+ }
+}
diff --git a/core/testing/runner.odin b/core/testing/runner.odin
index 0039f1939..328186c35 100644
--- a/core/testing/runner.odin
+++ b/core/testing/runner.odin
@@ -1,73 +1,823 @@
//+private
package testing
+import "base:intrinsics"
+import "base:runtime"
+import "core:bytes"
+import "core:encoding/ansi"
+@require import "core:encoding/base64"
+import "core:fmt"
import "core:io"
+@require import pkg_log "core:log"
+import "core:mem"
import "core:os"
import "core:slice"
+@require import "core:strings"
+import "core:sync/chan"
+import "core:thread"
+import "core:time"
-reset_t :: proc(t: ^T) {
- clear(&t.cleanups)
- t.error_count = 0
+// Specify how many threads to use when running tests.
+TEST_THREADS : int : #config(ODIN_TEST_THREADS, 0)
+// Track the memory used by each test.
+TRACKING_MEMORY : bool : #config(ODIN_TEST_TRACK_MEMORY, true)
+// Always report how much memory is used, even when there are no leaks or bad frees.
+ALWAYS_REPORT_MEMORY : bool : #config(ODIN_TEST_ALWAYS_REPORT_MEMORY, false)
+// Specify how much memory each thread allocator starts with.
+PER_THREAD_MEMORY : int : #config(ODIN_TEST_THREAD_MEMORY, mem.ROLLBACK_STACK_DEFAULT_BLOCK_SIZE)
+// Select a specific set of tests to run by name.
+// Each test is separated by a comma and may optionally include the package name.
+// This may be useful when running tests on multiple packages with `-all-packages`.
+// The format is: `package.test_name,test_name_only,...`
+TEST_NAMES : string : #config(ODIN_TEST_NAMES, "")
+// Show the fancy animated progress report.
+FANCY_OUTPUT : bool : #config(ODIN_TEST_FANCY, true)
+// Copy failed tests to the clipboard when done.
+USE_CLIPBOARD : bool : #config(ODIN_TEST_CLIPBOARD, false)
+// How many test results to show at a time per package.
+PROGRESS_WIDTH : int : #config(ODIN_TEST_PROGRESS_WIDTH, 24)
+// This is the random seed that will be sent to each test.
+// If it is unspecified, it will be set to the system cycle counter at startup.
+SHARED_RANDOM_SEED : u64 : #config(ODIN_TEST_RANDOM_SEED, 0)
+// Set the lowest log level for this test run.
+LOG_LEVEL : string : #config(ODIN_TEST_LOG_LEVEL, "info")
+
+
+get_log_level :: #force_inline proc() -> runtime.Logger_Level {
+ when ODIN_DEBUG {
+ // Always use .Debug in `-debug` mode.
+ return .Debug
+ } else {
+ when LOG_LEVEL == "debug" { return .Debug }
+ else when LOG_LEVEL == "info" { return .Info }
+ else when LOG_LEVEL == "warning" { return .Warning }
+ else when LOG_LEVEL == "error" { return .Error }
+ else when LOG_LEVEL == "fatal" { return .Fatal }
+ else {
+ #panic("Unknown `ODIN_TEST_LOG_LEVEL`: \"" + LOG_LEVEL + "\", possible levels are: \"debug\", \"info\", \"warning\", \"error\", or \"fatal\".")
+ }
+ }
}
+
end_t :: proc(t: ^T) {
for i := len(t.cleanups)-1; i >= 0; i -= 1 {
- c := t.cleanups[i]
+ #no_bounds_check c := t.cleanups[i]
+ context = c.ctx
c.procedure(c.user_data)
}
+
+ delete(t.cleanups)
+ t.cleanups = {}
+}
+
+Task_Data :: struct {
+ it: Internal_Test,
+ t: T,
+ allocator_index: int,
+}
+
+Task_Timeout :: struct {
+ test_index: int,
+ at_time: time.Time,
+ location: runtime.Source_Code_Location,
+}
+
+run_test_task :: proc(task: thread.Task) {
+ data := cast(^Task_Data)(task.data)
+
+ setup_task_signal_handler(task.user_index)
+
+ chan.send(data.t.channel, Event_New_Test {
+ test_index = task.user_index,
+ })
+
+ chan.send(data.t.channel, Event_State_Change {
+ new_state = .Running,
+ })
+
+ context.assertion_failure_proc = test_assertion_failure_proc
+
+ context.logger = {
+ procedure = test_logger_proc,
+ data = &data.t,
+ lowest_level = get_log_level(),
+ options = Default_Test_Logger_Opts,
+ }
+
+ free_all(context.temp_allocator)
+
+ data.it.p(&data.t)
+
+ end_t(&data.t)
+
+ new_state : Test_State = .Failed if failed(&data.t) else .Successful
+
+ chan.send(data.t.channel, Event_State_Change {
+ new_state = new_state,
+ })
}
runner :: proc(internal_tests: []Internal_Test) -> bool {
- stream := os.stream_from_handle(os.stdout)
- w := io.to_writer(stream)
+ BATCH_BUFFER_SIZE :: 32 * mem.Kilobyte
+ POOL_BLOCK_SIZE :: 16 * mem.Kilobyte
+ CLIPBOARD_BUFFER_SIZE :: 16 * mem.Kilobyte
+
+ BUFFERED_EVENTS_PER_CHANNEL :: 16
+ RESERVED_LOG_MESSAGES :: 64
+ RESERVED_TEST_FAILURES :: 64
+
+ ERROR_STRING_TIMEOUT : string : "Test timed out."
+ ERROR_STRING_UNKNOWN : string : "Test failed for unknown reasons."
+ OSC_WINDOW_TITLE : string : ansi.OSC + ansi.WINDOW_TITLE + ";Odin test runner (%i/%i)" + ansi.ST
+
+ safe_delete_string :: proc(s: string, allocator := context.allocator) {
+ // Guard against bad frees on static strings.
+ switch raw_data(s) {
+ case raw_data(ERROR_STRING_TIMEOUT), raw_data(ERROR_STRING_UNKNOWN):
+ return
+ case:
+ delete(s, allocator)
+ }
+ }
+
+ stdout := io.to_writer(os.stream_from_handle(os.stdout))
+ stderr := io.to_writer(os.stream_from_handle(os.stderr))
+
+ // -- Prepare test data.
+
+ alloc_error: mem.Allocator_Error
+
+ when TEST_NAMES != "" {
+ select_internal_tests: [dynamic]Internal_Test
+ defer delete(select_internal_tests)
+
+ {
+ index_list := TEST_NAMES
+ for selector in strings.split_iterator(&index_list, ",") {
+ // Temp allocator is fine since we just need to identify which test it's referring to.
+ split_selector := strings.split(selector, ".", context.temp_allocator)
- t := &T{}
- t.w = w
- reserve(&t.cleanups, 1024)
- defer delete(t.cleanups)
+ found := false
+ switch len(split_selector) {
+ case 1:
+ // Only the test name?
+ #no_bounds_check name := split_selector[0]
+ find_test_by_name: for it in internal_tests {
+ if it.name == name {
+ found = true
+ _, alloc_error = append(&select_internal_tests, it)
+ fmt.assertf(alloc_error == nil, "Error appending to select internal tests: %v", alloc_error)
+ break find_test_by_name
+ }
+ }
+ case 2:
+ #no_bounds_check pkg := split_selector[0]
+ #no_bounds_check name := split_selector[1]
+ find_test_by_pkg_and_name: for it in internal_tests {
+ if it.pkg == pkg && it.name == name {
+ found = true
+ _, alloc_error = append(&select_internal_tests, it)
+ fmt.assertf(alloc_error == nil, "Error appending to select internal tests: %v", alloc_error)
+ break find_test_by_pkg_and_name
+ }
+ }
+ }
+ if !found {
+ fmt.wprintfln(stderr, "No test found for the name: %q", selector)
+ }
+ }
+ }
+
+ // Intentional shadow with user-specified tests.
+ internal_tests := select_internal_tests[:]
+ }
+ total_failure_count := 0
total_success_count := 0
- total_test_count := len(internal_tests)
+ total_done_count := 0
+ total_test_count := len(internal_tests)
+
+ when !FANCY_OUTPUT {
+ // This is strictly for updating the window title when the progress
+ // report is disabled. We're otherwise able to depend on the call to
+ // `needs_to_redraw`.
+ last_done_count := -1
+ }
+
+ if total_test_count == 0 {
+ // Exit early.
+ fmt.wprintln(stdout, "No tests to run.")
+ return true
+ }
+
+ for it in internal_tests {
+ // NOTE(Feoramund): The old test runner skipped over tests with nil
+ // procedures, but I couldn't find any case where they occurred.
+ // This assert stands to prevent any oversight on my part.
+ fmt.assertf(it.p != nil, "Test %s.%s has <nil> procedure.", it.pkg, it.name)
+ }
slice.sort_by(internal_tests, proc(a, b: Internal_Test) -> bool {
- if a.pkg < b.pkg {
- return true
+ if a.pkg == b.pkg {
+ return a.name < b.name
+ } else {
+ return a.pkg < b.pkg
}
- return a.name < b.name
})
- prev_pkg := ""
+ // -- Set thread count.
- for it in internal_tests {
- if it.p == nil {
- total_test_count -= 1
- continue
+ when TEST_THREADS == 0 {
+ thread_count := os.processor_core_count()
+ } else {
+ thread_count := max(1, TEST_THREADS)
+ }
+
+ thread_count = min(thread_count, total_test_count)
+
+ // -- Allocate.
+
+ pool_stack: mem.Rollback_Stack
+ alloc_error = mem.rollback_stack_init(&pool_stack, POOL_BLOCK_SIZE)
+ fmt.assertf(alloc_error == nil, "Error allocating memory for thread pool: %v", alloc_error)
+ defer mem.rollback_stack_destroy(&pool_stack)
+
+ pool: thread.Pool
+ thread.pool_init(&pool, mem.rollback_stack_allocator(&pool_stack), thread_count)
+ defer thread.pool_destroy(&pool)
+
+ task_channels: []Task_Channel = ---
+ task_channels, alloc_error = make([]Task_Channel, thread_count)
+ fmt.assertf(alloc_error == nil, "Error allocating memory for update channels: %v", alloc_error)
+ defer delete(task_channels)
+
+ for &task_channel, index in task_channels {
+ task_channel.channel, alloc_error = chan.create_buffered(Update_Channel, BUFFERED_EVENTS_PER_CHANNEL, context.allocator)
+ fmt.assertf(alloc_error == nil, "Error allocating memory for update channel #%i: %v", index, alloc_error)
+ }
+ defer for &task_channel in task_channels {
+ chan.destroy(&task_channel.channel)
+ }
+
+ // This buffer is used to batch writes to STDOUT or STDERR, to help reduce
+ // screen flickering.
+ batch_buffer: bytes.Buffer
+ bytes.buffer_init_allocator(&batch_buffer, 0, BATCH_BUFFER_SIZE)
+ batch_writer := io.to_writer(bytes.buffer_to_stream(&batch_buffer))
+ defer bytes.buffer_destroy(&batch_buffer)
+
+ report: Report = ---
+ report, alloc_error = make_report(internal_tests)
+ fmt.assertf(alloc_error == nil, "Error allocating memory for test report: %v", alloc_error)
+ defer destroy_report(&report)
+
+ when FANCY_OUTPUT {
+ // We cannot make use of the ANSI save/restore cursor codes, because they
+ // work by absolute screen coordinates. This will cause unnecessary
+ // scrollback if we print at the bottom of someone's terminal.
+ ansi_redraw_string := fmt.aprintf(
+ // ANSI for "go up N lines then erase the screen from the cursor forward."
+ ansi.CSI + "%i" + ansi.CPL + ansi.CSI + ansi.ED +
+ // We'll combine this with the window title format string, since it
+ // can be printed at the same time.
+ "%s",
+ // 1 extra line for the status bar.
+ 1 + len(report.packages), OSC_WINDOW_TITLE)
+ assert(len(ansi_redraw_string) > 0, "Error allocating ANSI redraw string.")
+ defer delete(ansi_redraw_string)
+
+ thread_count_status_string: string = ---
+ {
+ PADDING :: PROGRESS_COLUMN_SPACING + PROGRESS_WIDTH
+
+ unpadded := fmt.tprintf("%i thread%s", thread_count, "" if thread_count == 1 else "s")
+ thread_count_status_string = fmt.aprintf("%- *[1]s", unpadded, report.pkg_column_len + PADDING)
+ assert(len(thread_count_status_string) > 0, "Error allocating thread count status string.")
}
+ defer delete(thread_count_status_string)
+ }
+
+ task_data_slots: []Task_Data = ---
+ task_data_slots, alloc_error = make([]Task_Data, thread_count)
+ fmt.assertf(alloc_error == nil, "Error allocating memory for task data slots: %v", alloc_error)
+ defer delete(task_data_slots)
- free_all(context.temp_allocator)
- reset_t(t)
- defer end_t(t)
+ // Tests rotate through these allocators as they finish.
+ task_allocators: []mem.Rollback_Stack = ---
+ task_allocators, alloc_error = make([]mem.Rollback_Stack, thread_count)
+ fmt.assertf(alloc_error == nil, "Error allocating memory for task allocators: %v", alloc_error)
+ defer delete(task_allocators)
- if prev_pkg != it.pkg {
- prev_pkg = it.pkg
- logf(t, "[Package: %s]", it.pkg)
+ when TRACKING_MEMORY {
+ task_memory_trackers: []mem.Tracking_Allocator = ---
+ task_memory_trackers, alloc_error = make([]mem.Tracking_Allocator, thread_count)
+ fmt.assertf(alloc_error == nil, "Error allocating memory for memory trackers: %v", alloc_error)
+ defer delete(task_memory_trackers)
+ }
+
+ #no_bounds_check for i in 0 ..< thread_count {
+ alloc_error = mem.rollback_stack_init(&task_allocators[i], PER_THREAD_MEMORY)
+ fmt.assertf(alloc_error == nil, "Error allocating memory for task allocator #%i: %v", i, alloc_error)
+ when TRACKING_MEMORY {
+ mem.tracking_allocator_init(&task_memory_trackers[i], mem.rollback_stack_allocator(&task_allocators[i]))
}
+ }
- logf(t, "[Test: %s]", it.name)
+ defer #no_bounds_check for i in 0 ..< thread_count {
+ when TRACKING_MEMORY {
+ mem.tracking_allocator_destroy(&task_memory_trackers[i])
+ }
+ mem.rollback_stack_destroy(&task_allocators[i])
+ }
- run_internal_test(t, it)
+ task_timeouts: [dynamic]Task_Timeout = ---
+ task_timeouts, alloc_error = make([dynamic]Task_Timeout, 0, thread_count)
+ fmt.assertf(alloc_error == nil, "Error allocating memory for task timeouts: %v", alloc_error)
+ defer delete(task_timeouts)
- if failed(t) {
- logf(t, "[%s : FAILURE]", it.name)
- } else {
- logf(t, "[%s : SUCCESS]", it.name)
- total_success_count += 1
+ failed_test_reason_map: map[int]string = ---
+ failed_test_reason_map, alloc_error = make(map[int]string, RESERVED_TEST_FAILURES)
+ fmt.assertf(alloc_error == nil, "Error allocating memory for failed test reasons: %v", alloc_error)
+ defer delete(failed_test_reason_map)
+
+ log_messages: [dynamic]Log_Message = ---
+ log_messages, alloc_error = make([dynamic]Log_Message, 0, RESERVED_LOG_MESSAGES)
+ fmt.assertf(alloc_error == nil, "Error allocating memory for log message queue: %v", alloc_error)
+ defer delete(log_messages)
+
+ sorted_failed_test_reasons: [dynamic]int = ---
+ sorted_failed_test_reasons, alloc_error = make([dynamic]int, 0, RESERVED_TEST_FAILURES)
+ fmt.assertf(alloc_error == nil, "Error allocating memory for sorted failed test reasons: %v", alloc_error)
+ defer delete(sorted_failed_test_reasons)
+
+ when USE_CLIPBOARD {
+ clipboard_buffer: bytes.Buffer
+ bytes.buffer_init_allocator(&clipboard_buffer, 0, CLIPBOARD_BUFFER_SIZE)
+ defer bytes.buffer_destroy(&clipboard_buffer)
+ }
+
+ when SHARED_RANDOM_SEED == 0 {
+ shared_random_seed := cast(u64)intrinsics.read_cycle_counter()
+ } else {
+ shared_random_seed := SHARED_RANDOM_SEED
+ }
+
+ // -- Setup initial tasks.
+
+ // NOTE(Feoramund): This is the allocator that will be used by threads to
+ // persist log messages past their lifetimes. It has its own variable name
+ // in the event it needs to be changed from `context.allocator` without
+ // digging through the source to divine everywhere it is used for that.
+ shared_log_allocator := context.allocator
+
+ context.logger = {
+ procedure = runner_logger_proc,
+ data = &log_messages,
+ lowest_level = get_log_level(),
+ options = Default_Test_Logger_Opts - {.Short_File_Path, .Line, .Procedure},
+ }
+
+ run_index: int
+
+ setup_tasks: for &data, task_index in task_data_slots {
+ setup_next_test: for run_index < total_test_count {
+ #no_bounds_check it := internal_tests[run_index]
+ defer run_index += 1
+
+ data.it = it
+ data.t.seed = shared_random_seed
+ #no_bounds_check data.t.channel = chan.as_send(task_channels[task_index].channel)
+ data.t._log_allocator = shared_log_allocator
+ data.allocator_index = task_index
+
+ #no_bounds_check when TRACKING_MEMORY {
+ task_allocator := mem.tracking_allocator(&task_memory_trackers[task_index])
+ } else {
+ task_allocator := mem.rollback_stack_allocator(&task_allocators[task_index])
+ }
+
+ thread.pool_add_task(&pool, task_allocator, run_test_task, &data, run_index)
+
+ continue setup_tasks
}
}
- logf(t, "----------------------------------------")
- if total_test_count == 0 {
- log(t, "NO TESTS RAN")
+
+ // -- Run tests.
+
+ setup_signal_handler()
+
+ fmt.wprint(stdout, ansi.CSI + ansi.DECTCEM_HIDE)
+
+ when FANCY_OUTPUT {
+ signals_were_raised := false
+
+ redraw_report(stdout, report)
+ draw_status_bar(stdout, thread_count_status_string, total_done_count, total_test_count)
+ }
+
+ when TEST_THREADS == 0 {
+ pkg_log.infof("Starting test runner with %i thread%s. Set with -define:ODIN_TEST_THREADS=n.",
+ thread_count,
+ "" if thread_count == 1 else "s")
+ } else {
+ pkg_log.infof("Starting test runner with %i thread%s.",
+ thread_count,
+ "" if thread_count == 1 else "s")
+ }
+
+ when SHARED_RANDOM_SEED == 0 {
+ pkg_log.infof("The random seed sent to every test is: %v. Set with -define:ODIN_TEST_RANDOM_SEED=n.", shared_random_seed)
} else {
- logf(t, "%d/%d SUCCESSFUL", total_success_count, total_test_count)
+ pkg_log.infof("The random seed sent to every test is: %v.", shared_random_seed)
+ }
+
+ when TRACKING_MEMORY {
+ when ALWAYS_REPORT_MEMORY {
+ pkg_log.info("Memory tracking is enabled. Tests will log their memory usage when complete.")
+ } else {
+ pkg_log.info("Memory tracking is enabled. Tests will log their memory usage if there's an issue.")
+ }
+ pkg_log.info("< Final Mem/ Total Mem> < Peak Mem> (#Free/Alloc) :: [package.test_name]")
+ } else when ALWAYS_REPORT_MEMORY {
+ pkg_log.warn("ODIN_TEST_ALWAYS_REPORT_MEMORY is true, but ODIN_TRACK_MEMORY is false.")
+ }
+
+ start_time := time.now()
+
+ thread.pool_start(&pool)
+ main_loop: for !thread.pool_is_empty(&pool) {
+ {
+ events_pending := thread.pool_num_done(&pool) > 0
+
+ if !events_pending {
+ poll_tasks: for &task_channel in task_channels {
+ if chan.len(task_channel.channel) > 0 {
+ events_pending = true
+ break poll_tasks
+ }
+ }
+ }
+
+ if !events_pending {
+ // Keep the main thread from pegging a core at 100% usage.
+ time.sleep(1 * time.Microsecond)
+ }
+ }
+
+ cycle_pool: for task in thread.pool_pop_done(&pool) {
+ data := cast(^Task_Data)(task.data)
+
+ when TRACKING_MEMORY {
+ #no_bounds_check tracker := &task_memory_trackers[data.allocator_index]
+
+ memory_is_in_bad_state := len(tracker.allocation_map) + len(tracker.bad_free_array) > 0
+
+ when ALWAYS_REPORT_MEMORY {
+ should_report := true
+ } else {
+ should_report := memory_is_in_bad_state
+ }
+
+ if should_report {
+ write_memory_report(batch_writer, tracker, data.it.pkg, data.it.name)
+
+ pkg_log.log(.Warning if memory_is_in_bad_state else .Info, bytes.buffer_to_string(&batch_buffer))
+ bytes.buffer_reset(&batch_buffer)
+ }
+
+ mem.tracking_allocator_reset(tracker)
+ }
+
+ free_all(task.allocator)
+
+ if run_index < total_test_count {
+ #no_bounds_check it := internal_tests[run_index]
+ defer run_index += 1
+
+ data.it = it
+ data.t.seed = shared_random_seed
+ data.t.error_count = 0
+
+ thread.pool_add_task(&pool, task.allocator, run_test_task, data, run_index)
+ }
+ }
+
+ handle_events: for &task_channel in task_channels {
+ for ev in chan.try_recv(task_channel.channel) {
+ switch event in ev {
+ case Event_New_Test:
+ task_channel.test_index = event.test_index
+
+ case Event_State_Change:
+ #no_bounds_check report.all_test_states[task_channel.test_index] = event.new_state
+
+ #no_bounds_check it := internal_tests[task_channel.test_index]
+ #no_bounds_check pkg := report.packages_by_name[it.pkg]
+
+ #partial switch event.new_state {
+ case .Failed:
+ if task_channel.test_index not_in failed_test_reason_map {
+ failed_test_reason_map[task_channel.test_index] = ERROR_STRING_UNKNOWN
+ }
+ total_failure_count += 1
+ total_done_count += 1
+ case .Successful:
+ total_success_count += 1
+ total_done_count += 1
+ }
+
+ when ODIN_DEBUG {
+ pkg_log.debugf("Test #%i %s.%s changed state to %v.", task_channel.test_index, it.pkg, it.name, event.new_state)
+ }
+
+ pkg.last_change_state = event.new_state
+ pkg.last_change_name = it.name
+ pkg.frame_ready = false
+
+ case Event_Set_Fail_Timeout:
+ _, alloc_error = append(&task_timeouts, Task_Timeout {
+ test_index = task_channel.test_index,
+ at_time = event.at_time,
+ location = event.location,
+ })
+ fmt.assertf(alloc_error == nil, "Error appending to task timeouts: %v", alloc_error)
+
+ case Event_Log_Message:
+ _, alloc_error = append(&log_messages, Log_Message {
+ level = event.level,
+ text = event.formatted_text,
+ time = event.time,
+ allocator = shared_log_allocator,
+ })
+ fmt.assertf(alloc_error == nil, "Error appending to log messages: %v", alloc_error)
+
+ if event.level >= .Error {
+ // Save the message for the final summary.
+ if old_error, ok := failed_test_reason_map[task_channel.test_index]; ok {
+ safe_delete_string(old_error, shared_log_allocator)
+ }
+ failed_test_reason_map[task_channel.test_index] = event.text
+ } else {
+ delete(event.text, shared_log_allocator)
+ }
+ }
+ }
+ }
+
+ check_timeouts: for i := len(task_timeouts) - 1; i >= 0; i -= 1 {
+ #no_bounds_check timeout := &task_timeouts[i]
+
+ if time.since(timeout.at_time) < 0 {
+ continue check_timeouts
+ }
+
+ defer unordered_remove(&task_timeouts, i)
+
+ #no_bounds_check if report.all_test_states[timeout.test_index] > .Running {
+ continue check_timeouts
+ }
+
+ if !thread.pool_stop_task(&pool, timeout.test_index) {
+ // The task may have stopped a split second after we started
+ // checking, but we haven't handled the new state yet.
+ continue check_timeouts
+ }
+
+ #no_bounds_check report.all_test_states[timeout.test_index] = .Failed
+ #no_bounds_check it := internal_tests[timeout.test_index]
+ #no_bounds_check pkg := report.packages_by_name[it.pkg]
+ pkg.frame_ready = false
+
+ if old_error, ok := failed_test_reason_map[timeout.test_index]; ok {
+ safe_delete_string(old_error, shared_log_allocator)
+ }
+ failed_test_reason_map[timeout.test_index] = ERROR_STRING_TIMEOUT
+ total_failure_count += 1
+ total_done_count += 1
+
+ now := time.now()
+ _, alloc_error = append(&log_messages, Log_Message {
+ level = .Error,
+ text = format_log_text(.Error, ERROR_STRING_TIMEOUT, Default_Test_Logger_Opts, timeout.location, now),
+ time = now,
+ allocator = context.allocator,
+ })
+ fmt.assertf(alloc_error == nil, "Error appending to log messages: %v", alloc_error)
+
+ find_task_data: for &data in task_data_slots {
+ if data.it.pkg == it.pkg && data.it.name == it.name {
+ end_t(&data.t)
+ break find_task_data
+ }
+ }
+ }
+
+ if should_stop_runner() {
+ fmt.wprintln(stderr, "\nCaught interrupt signal. Stopping all tests.")
+ thread.pool_shutdown(&pool)
+ break main_loop
+ }
+
+ when FANCY_OUTPUT {
+ // Because the bounds checking procs send directly to STDERR with
+ // no way to redirect or handle them, we need to at least try to
+ // let the user see those messages when using the animated progress
+ // report. This flag may be set by the block of code below if a
+ // signal is raised.
+ //
+ // It'll be purely by luck if the output is interleaved properly,
+ // given the nature of non-thread-safe printing.
+ //
+ // At worst, if Odin did not print any error for this signal, we'll
+ // just re-display the progress report. The fatal log error message
+ // should be enough to clue the user in that something dire has
+ // occurred.
+ bypass_progress_overwrite := false
+ }
+
+ if test_index, reason, ok := should_stop_test(); ok {
+ #no_bounds_check report.all_test_states[test_index] = .Failed
+ #no_bounds_check it := internal_tests[test_index]
+ #no_bounds_check pkg := report.packages_by_name[it.pkg]
+ pkg.frame_ready = false
+
+ fmt.assertf(thread.pool_stop_task(&pool, test_index),
+ "A signal (%v) was raised to stop test #%i %s.%s, but it was unable to be found.",
+ reason, test_index, it.pkg, it.name)
+
+ if test_index not_in failed_test_reason_map {
+ // We only write a new error message here if there wasn't one
+ // already, because the message we can provide based only on
+ // the signal won't be very useful, whereas asserts and panics
+ // will provide a user-written error message.
+ failed_test_reason_map[test_index] = fmt.aprintf("Signal caught: %v", reason, allocator = shared_log_allocator)
+ pkg_log.fatalf("Caught signal to stop test #%i %s.%s for: %v.", test_index, it.pkg, it.name, reason)
+
+ }
+
+ when FANCY_OUTPUT {
+ bypass_progress_overwrite = true
+ signals_were_raised = true
+ }
+
+ total_failure_count += 1
+ total_done_count += 1
+ }
+
+ // -- Redraw.
+
+ when FANCY_OUTPUT {
+ if len(log_messages) == 0 && !needs_to_redraw(report) {
+ continue main_loop
+ }
+
+ if !bypass_progress_overwrite {
+ fmt.wprintf(stdout, ansi_redraw_string, total_done_count, total_test_count)
+ }
+ } else {
+ if total_done_count != last_done_count {
+ fmt.wprintf(stdout, OSC_WINDOW_TITLE, total_done_count, total_test_count)
+ last_done_count = total_done_count
+ }
+
+ if len(log_messages) == 0 {
+ continue main_loop
+ }
+ }
+
+ // Because each thread has its own messenger channel, log messages
+ // arrive in chunks that are in-order, but when they're merged with the
+ // logs from other threads, they become out-of-order.
+ slice.stable_sort_by(log_messages[:], proc(a, b: Log_Message) -> bool {
+ return time.diff(a.time, b.time) > 0
+ })
+
+ for message in log_messages {
+ fmt.wprintln(batch_writer, message.text)
+ delete(message.text, message.allocator)
+ }
+
+ fmt.wprint(stderr, bytes.buffer_to_string(&batch_buffer))
+ clear(&log_messages)
+ bytes.buffer_reset(&batch_buffer)
+
+ when FANCY_OUTPUT {
+ redraw_report(batch_writer, report)
+ draw_status_bar(batch_writer, thread_count_status_string, total_done_count, total_test_count)
+ fmt.wprint(stdout, bytes.buffer_to_string(&batch_buffer))
+ bytes.buffer_reset(&batch_buffer)
+ }
+ }
+
+ // -- All tests are complete, or the runner has been interrupted.
+
+ // NOTE(Feoramund): If you've arrived here after receiving signal 11 or
+ // SIGSEGV on the main runner thread, while using a UNIX-like platform,
+ // there is the possibility that you may have encountered a rare edge case
+ // involving the joining of threads.
+ //
+ // At the time of writing, the thread library is undergoing a rewrite that
+ // should solve this problem; it is not an issue with the test runner itself.
+ thread.pool_join(&pool)
+
+ finished_in := time.since(start_time)
+
+ when !FANCY_OUTPUT {
+ // One line to space out the results, since we don't have the status
+ // bar in plain mode.
+ fmt.wprintln(batch_writer)
+ }
+
+ fmt.wprintf(batch_writer,
+ "Finished %i test%s in %v.",
+ total_done_count,
+ "" if total_done_count == 1 else "s",
+ finished_in)
+
+ if total_done_count != total_test_count {
+ not_run_count := total_test_count - total_done_count
+ fmt.wprintf(batch_writer,
+ " " + SGR_READY + "%i" + SGR_RESET + " %s left undone.",
+ not_run_count,
+ "test was" if not_run_count == 1 else "tests were")
+ }
+
+ if total_success_count == total_test_count {
+ fmt.wprintfln(batch_writer,
+ " %s " + SGR_SUCCESS + "successful." + SGR_RESET,
+ "The test was" if total_test_count == 1 else "All tests were")
+ } else if total_failure_count > 0 {
+ if total_failure_count == total_test_count {
+ fmt.wprintfln(batch_writer,
+ " %s " + SGR_FAILED + "failed." + SGR_RESET,
+ "The test" if total_test_count == 1 else "All tests")
+ } else {
+ fmt.wprintfln(batch_writer,
+ " " + SGR_FAILED + "%i" + SGR_RESET + " test%s failed.",
+ total_failure_count,
+ "" if total_failure_count == 1 else "s")
+ }
+
+ for test_index in failed_test_reason_map {
+ _, alloc_error = append(&sorted_failed_test_reasons, test_index)
+ fmt.assertf(alloc_error == nil, "Error appending to sorted failed test reasons: %v", alloc_error)
+ }
+
+ slice.sort(sorted_failed_test_reasons[:])
+
+ for test_index in sorted_failed_test_reasons {
+ #no_bounds_check last_error := failed_test_reason_map[test_index]
+ #no_bounds_check it := internal_tests[test_index]
+ pkg_and_name := fmt.tprintf("%s.%s", it.pkg, it.name)
+ fmt.wprintfln(batch_writer, " - %- *[1]s\t%s",
+ pkg_and_name,
+ report.pkg_column_len + report.test_column_len,
+ last_error)
+ safe_delete_string(last_error, shared_log_allocator)
+ }
+
+ if total_success_count > 0 {
+ when USE_CLIPBOARD {
+ clipboard_writer := io.to_writer(bytes.buffer_to_stream(&clipboard_buffer))
+ fmt.wprint(clipboard_writer, "-define:ODIN_TEST_NAMES=")
+ for test_index in sorted_failed_test_reasons {
+ #no_bounds_check it := internal_tests[test_index]
+ fmt.wprintf(clipboard_writer, "%s.%s,", it.pkg, it.name)
+ }
+
+ encoded_names := base64.encode(bytes.buffer_to_bytes(&clipboard_buffer), allocator = context.temp_allocator)
+
+ fmt.wprintf(batch_writer,
+ ansi.OSC + ansi.CLIPBOARD + ";c;%s" + ansi.ST +
+ "\nThe name%s of the failed test%s been copied to your clipboard.",
+ encoded_names,
+ "" if total_failure_count == 1 else "s",
+ " has" if total_failure_count == 1 else "s have")
+ } else {
+ fmt.wprintf(batch_writer, "\nTo run only the failed test%s, use:\n\t-define:ODIN_TEST_NAMES=",
+ "" if total_failure_count == 1 else "s")
+ for test_index in sorted_failed_test_reasons {
+ #no_bounds_check it := internal_tests[test_index]
+ fmt.wprintf(batch_writer, "%s.%s,", it.pkg, it.name)
+ }
+ fmt.wprint(batch_writer, "\n\nIf your terminal supports OSC 52, you may use -define:ODIN_TEST_CLIPBOARD to have this copied directly to your clipboard.")
+ }
+
+ fmt.wprintln(batch_writer)
+ }
}
+
+ fmt.wprint(stdout, ansi.CSI + ansi.DECTCEM_SHOW)
+
+ when FANCY_OUTPUT {
+ if signals_were_raised {
+ fmt.wprintln(batch_writer, `
+Signals were raised during this test run. Log messages are likely to have collided with each other.
+To partly mitigate this, redirect STDERR to a file or use the -define:ODIN_TEST_FANCY=false option.`)
+ }
+ }
+
+ fmt.wprintln(stderr, bytes.buffer_to_string(&batch_buffer))
+
return total_success_count == total_test_count
}
diff --git a/core/testing/runner_other.odin b/core/testing/runner_other.odin
deleted file mode 100644
index f3271d209..000000000
--- a/core/testing/runner_other.odin
+++ /dev/null
@@ -1,14 +0,0 @@
-//+private
-//+build !windows
-package testing
-
-import "core:time"
-
-run_internal_test :: proc(t: ^T, it: Internal_Test) {
- // TODO(bill): Catch panics on other platforms
- it.p(t)
-}
-
-_fail_timeout :: proc(t: ^T, duration: time.Duration, loc := #caller_location) {
-
-} \ No newline at end of file
diff --git a/core/testing/runner_windows.odin b/core/testing/runner_windows.odin
deleted file mode 100644
index 15264355b..000000000
--- a/core/testing/runner_windows.odin
+++ /dev/null
@@ -1,235 +0,0 @@
-//+private
-//+build windows
-package testing
-
-import win32 "core:sys/windows"
-import "base:runtime"
-import "base:intrinsics"
-import "core:time"
-
-Sema :: struct {
- count: i32,
-}
-
-sema_reset :: proc "contextless" (s: ^Sema) {
- intrinsics.atomic_store(&s.count, 0)
-}
-sema_wait :: proc "contextless" (s: ^Sema) {
- for {
- original_count := s.count
- for original_count == 0 {
- win32.WaitOnAddress(&s.count, &original_count, size_of(original_count), win32.INFINITE)
- original_count = s.count
- }
- if original_count == intrinsics.atomic_compare_exchange_strong(&s.count, original_count-1, original_count) {
- return
- }
- }
-}
-sema_wait_with_timeout :: proc "contextless" (s: ^Sema, duration: time.Duration) -> bool {
- if duration <= 0 {
- return false
- }
- for {
-
- original_count := intrinsics.atomic_load(&s.count)
- for start := time.tick_now(); original_count == 0; /**/ {
- if intrinsics.atomic_load(&s.count) != original_count {
- remaining := duration - time.tick_since(start)
- if remaining < 0 {
- return false
- }
- ms := u32(remaining/time.Millisecond)
- if !win32.WaitOnAddress(&s.count, &original_count, size_of(original_count), ms) {
- return false
- }
- }
- original_count = s.count
- }
- if original_count == intrinsics.atomic_compare_exchange_strong(&s.count, original_count-1, original_count) {
- return true
- }
- }
-}
-
-sema_post :: proc "contextless" (s: ^Sema, count := 1) {
- intrinsics.atomic_add(&s.count, i32(count))
- if count == 1 {
- win32.WakeByAddressSingle(&s.count)
- } else {
- win32.WakeByAddressAll(&s.count)
- }
-}
-
-
-
-Thread_Proc :: #type proc(^Thread)
-
-MAX_USER_ARGUMENTS :: 8
-
-Thread :: struct {
- using specific: Thread_Os_Specific,
- procedure: Thread_Proc,
-
- t: ^T,
- it: Internal_Test,
- success: bool,
-
- init_context: Maybe(runtime.Context),
-
- creation_allocator: runtime.Allocator,
-
- internal_fail_timeout: time.Duration,
- internal_fail_timeout_loc: runtime.Source_Code_Location,
-}
-
-Thread_Os_Specific :: struct {
- win32_thread: win32.HANDLE,
- win32_thread_id: win32.DWORD,
- done: bool, // see note in `is_done`
-}
-
-thread_create :: proc(procedure: Thread_Proc) -> ^Thread {
- __windows_thread_entry_proc :: proc "system" (t_: rawptr) -> win32.DWORD {
- t := (^Thread)(t_)
- context = t.init_context.? or_else runtime.default_context()
-
- t.procedure(t)
-
- if t.init_context == nil {
- if context.temp_allocator.data == &runtime.global_default_temp_allocator_data {
- runtime.default_temp_allocator_destroy(auto_cast context.temp_allocator.data)
- }
- }
-
- intrinsics.atomic_store(&t.done, true)
- return 0
- }
-
-
- thread := new(Thread)
- if thread == nil {
- return nil
- }
- thread.creation_allocator = context.allocator
-
- win32_thread_id: win32.DWORD
- win32_thread := win32.CreateThread(nil, 0, __windows_thread_entry_proc, thread, win32.CREATE_SUSPENDED, &win32_thread_id)
- if win32_thread == nil {
- free(thread, thread.creation_allocator)
- return nil
- }
- thread.procedure = procedure
- thread.win32_thread = win32_thread
- thread.win32_thread_id = win32_thread_id
- thread.init_context = context
-
- return thread
-}
-
-thread_start :: proc "contextless" (thread: ^Thread) {
- win32.ResumeThread(thread.win32_thread)
-}
-
-thread_join_and_destroy :: proc(thread: ^Thread) {
- if thread.win32_thread != win32.INVALID_HANDLE {
- win32.WaitForSingleObject(thread.win32_thread, win32.INFINITE)
- win32.CloseHandle(thread.win32_thread)
- thread.win32_thread = win32.INVALID_HANDLE
- }
- free(thread, thread.creation_allocator)
-}
-
-thread_terminate :: proc "contextless" (thread: ^Thread, exit_code: int) {
- win32.TerminateThread(thread.win32_thread, u32(exit_code))
-}
-
-
-_fail_timeout :: proc(t: ^T, duration: time.Duration, loc := #caller_location) {
- assert(global_fail_timeout_thread == nil, "set_fail_timeout previously called", loc)
-
- thread := thread_create(proc(thread: ^Thread) {
- t := thread.t
- timeout := thread.internal_fail_timeout
- if !sema_wait_with_timeout(&global_fail_timeout_semaphore, timeout) {
- fail_now(t, "TIMEOUT", thread.internal_fail_timeout_loc)
- }
- })
- thread.internal_fail_timeout = duration
- thread.internal_fail_timeout_loc = loc
- thread.t = t
- global_fail_timeout_thread = thread
- thread_start(thread)
-}
-
-global_fail_timeout_thread: ^Thread
-global_fail_timeout_semaphore: Sema
-
-global_threaded_runner_semaphore: Sema
-global_exception_handler: rawptr
-global_current_thread: ^Thread
-global_current_t: ^T
-
-run_internal_test :: proc(t: ^T, it: Internal_Test) {
- thread := thread_create(proc(thread: ^Thread) {
- exception_handler_proc :: proc "system" (ExceptionInfo: ^win32.EXCEPTION_POINTERS) -> win32.LONG {
- switch ExceptionInfo.ExceptionRecord.ExceptionCode {
- case
- win32.EXCEPTION_DATATYPE_MISALIGNMENT,
- win32.EXCEPTION_BREAKPOINT,
- win32.EXCEPTION_ACCESS_VIOLATION,
- win32.EXCEPTION_ILLEGAL_INSTRUCTION,
- win32.EXCEPTION_ARRAY_BOUNDS_EXCEEDED,
- win32.EXCEPTION_STACK_OVERFLOW:
-
- sema_post(&global_threaded_runner_semaphore)
- return win32.EXCEPTION_EXECUTE_HANDLER
- }
-
- return win32.EXCEPTION_CONTINUE_SEARCH
- }
- global_exception_handler = win32.AddVectoredExceptionHandler(0, exception_handler_proc)
-
- context.assertion_failure_proc = proc(prefix, message: string, loc: runtime.Source_Code_Location) -> ! {
- errorf(global_current_t, "%s %s", prefix, message, loc=loc)
- intrinsics.trap()
- }
-
- t := thread.t
-
- global_fail_timeout_thread = nil
- sema_reset(&global_fail_timeout_semaphore)
-
- thread.it.p(t)
-
- sema_post(&global_fail_timeout_semaphore)
- if global_fail_timeout_thread != nil do thread_join_and_destroy(global_fail_timeout_thread)
-
- thread.success = true
- sema_post(&global_threaded_runner_semaphore)
- })
-
- sema_reset(&global_threaded_runner_semaphore)
- global_current_t = t
-
- t._fail_now = proc() -> ! {
- intrinsics.trap()
- }
-
- thread.t = t
- thread.it = it
- thread.success = false
- thread_start(thread)
-
- sema_wait(&global_threaded_runner_semaphore)
- thread_terminate(thread, int(!thread.success))
- thread_join_and_destroy(thread)
-
- win32.RemoveVectoredExceptionHandler(global_exception_handler)
-
- if !thread.success && t.error_count == 0 {
- t.error_count += 1
- }
-
- return
-}
diff --git a/core/testing/signal_handler.odin b/core/testing/signal_handler.odin
new file mode 100644
index 000000000..891f6bbb6
--- /dev/null
+++ b/core/testing/signal_handler.odin
@@ -0,0 +1,33 @@
+//+private
+package testing
+
+import "base:runtime"
+import pkg_log "core:log"
+
+Stop_Reason :: enum {
+ Unknown,
+ Illegal_Instruction,
+ Arithmetic_Error,
+ Segmentation_Fault,
+}
+
+test_assertion_failure_proc :: proc(prefix, message: string, loc: runtime.Source_Code_Location) -> ! {
+ pkg_log.fatalf("%s: %s", prefix, message, location = loc)
+ runtime.trap()
+}
+
+setup_signal_handler :: proc() {
+ _setup_signal_handler()
+}
+
+setup_task_signal_handler :: proc(test_index: int) {
+ _setup_task_signal_handler(test_index)
+}
+
+should_stop_runner :: proc() -> bool {
+ return _should_stop_runner()
+}
+
+should_stop_test :: proc() -> (test_index: int, reason: Stop_Reason, ok: bool) {
+ return _should_stop_test()
+}
diff --git a/core/testing/signal_handler_libc.odin b/core/testing/signal_handler_libc.odin
new file mode 100644
index 000000000..0ab34776e
--- /dev/null
+++ b/core/testing/signal_handler_libc.odin
@@ -0,0 +1,149 @@
+//+private
+//+build windows, linux, darwin, freebsd, openbsd, netbsd, haiku
+package testing
+
+import "base:intrinsics"
+import "core:c/libc"
+import "core:encoding/ansi"
+import "core:sync"
+import "core:os"
+@require import "core:sys/unix"
+
+@(private="file") stop_runner_flag: libc.sig_atomic_t
+
+@(private="file") stop_test_gate: sync.Mutex
+@(private="file") stop_test_index: libc.sig_atomic_t
+@(private="file") stop_test_reason: libc.sig_atomic_t
+@(private="file") stop_test_alert: libc.sig_atomic_t
+
+@(private="file", thread_local)
+local_test_index: libc.sig_atomic_t
+
+@(private="file")
+stop_runner_callback :: proc "c" (sig: libc.int) {
+ prev := intrinsics.atomic_add(&stop_runner_flag, 1)
+
+ // If the flag was already set (if this is the second signal sent for example),
+ // consider this a forced (not graceful) exit.
+ if prev > 0 {
+ os.exit(int(sig))
+ }
+}
+
+@(private="file")
+stop_test_callback :: proc "c" (sig: libc.int) {
+ if local_test_index == -1 {
+ // We're the test runner, and we ourselves have caught a signal from
+ // which there is no recovery.
+ //
+ // The most we can do now is make sure the user's cursor is visible,
+ // nuke the entire processs, and hope a useful core dump survives.
+
+ // NOTE(Feoramund): Using these write calls in a signal handler is
+ // undefined behavior in C99 but possibly tolerated in POSIX 2008.
+ // Either way, we may as well try to salvage what we can.
+ show_cursor := ansi.CSI + ansi.DECTCEM_SHOW
+ libc.fwrite(raw_data(show_cursor), size_of(byte), len(show_cursor), libc.stdout)
+ libc.fflush(libc.stdout)
+
+ // This is an attempt at being compliant by avoiding printf.
+ sigbuf: [8]byte
+ sigstr: string
+ {
+ signum := cast(int)sig
+ i := len(sigbuf) - 2
+ for signum > 0 {
+ m := signum % 10
+ signum /= 10
+ sigbuf[i] = cast(u8)('0' + m)
+ i -= 1
+ }
+ sigstr = cast(string)sigbuf[1 + i:len(sigbuf) - 1]
+ }
+
+ advisory_a := `
+The test runner's main thread has caught an unrecoverable error (signal `
+ advisory_b := `) and will now forcibly terminate.
+This is a dire bug and should be reported to the Odin developers.
+`
+ libc.fwrite(raw_data(advisory_a), size_of(byte), len(advisory_a), libc.stderr)
+ libc.fwrite(raw_data(sigstr), size_of(byte), len(sigstr), libc.stderr)
+ libc.fwrite(raw_data(advisory_b), size_of(byte), len(advisory_b), libc.stderr)
+
+ // Try to get a core dump.
+ libc.abort()
+ }
+
+ if sync.mutex_guard(&stop_test_gate) {
+ intrinsics.atomic_store(&stop_test_index, local_test_index)
+ intrinsics.atomic_store(&stop_test_reason, cast(libc.sig_atomic_t)sig)
+ intrinsics.atomic_store(&stop_test_alert, 1)
+
+ for {
+ // Idle until this thread is terminated by the runner,
+ // otherwise we may continue to generate signals.
+ intrinsics.cpu_relax()
+
+ when ODIN_OS != .Windows {
+ // NOTE(Feoramund): Some UNIX-like platforms may require this.
+ //
+ // During testing, I found that NetBSD 10.0 refused to
+ // terminate a task thread, even when its thread had been
+ // properly set to PTHREAD_CANCEL_ASYNCHRONOUS.
+ //
+ // The runner would stall after returning from `pthread_cancel`.
+
+ unix.pthread_testcancel()
+ }
+ }
+ }
+}
+
+_setup_signal_handler :: proc() {
+ local_test_index = -1
+
+ // Catch user interrupt / CTRL-C.
+ libc.signal(libc.SIGINT, stop_runner_callback)
+ // Catch polite termination request.
+ libc.signal(libc.SIGTERM, stop_runner_callback)
+
+ // For tests:
+ // Catch asserts and panics.
+ libc.signal(libc.SIGILL, stop_test_callback)
+ // Catch arithmetic errors.
+ libc.signal(libc.SIGFPE, stop_test_callback)
+ // Catch segmentation faults (illegal memory access).
+ libc.signal(libc.SIGSEGV, stop_test_callback)
+}
+
+_setup_task_signal_handler :: proc(test_index: int) {
+ local_test_index = cast(libc.sig_atomic_t)test_index
+}
+
+_should_stop_runner :: proc() -> bool {
+ return intrinsics.atomic_load(&stop_runner_flag) == 1
+}
+
+@(private="file")
+unlock_stop_test_gate :: proc(_: int, _: Stop_Reason, ok: bool) {
+ if ok {
+ sync.mutex_unlock(&stop_test_gate)
+ }
+}
+
+@(deferred_out=unlock_stop_test_gate)
+_should_stop_test :: proc() -> (test_index: int, reason: Stop_Reason, ok: bool) {
+ if intrinsics.atomic_load(&stop_test_alert) == 1 {
+ intrinsics.atomic_store(&stop_test_alert, 0)
+
+ test_index = cast(int)intrinsics.atomic_load(&stop_test_index)
+ switch intrinsics.atomic_load(&stop_test_reason) {
+ case libc.SIGFPE: reason = .Arithmetic_Error
+ case libc.SIGILL: reason = .Illegal_Instruction
+ case libc.SIGSEGV: reason = .Segmentation_Fault
+ }
+ ok = true
+ }
+
+ return
+}
diff --git a/core/testing/signal_handler_other.odin b/core/testing/signal_handler_other.odin
new file mode 100644
index 000000000..04981f5af
--- /dev/null
+++ b/core/testing/signal_handler_other.odin
@@ -0,0 +1,19 @@
+//+private
+//+build !windows !linux !darwin !freebsd !openbsd !netbsd !haiku
+package testing
+
+_setup_signal_handler :: proc() {
+ // Do nothing.
+}
+
+_setup_task_signal_handler :: proc(test_index: int) {
+ // Do nothing.
+}
+
+_should_stop_runner :: proc() -> bool {
+ return false
+}
+
+_should_stop_test :: proc() -> (test_index: int, reason: Stop_Reason, ok: bool) {
+ return 0, {}, false
+}
diff --git a/core/testing/testing.odin b/core/testing/testing.odin
index a8c5ffa48..92b4d391d 100644
--- a/core/testing/testing.odin
+++ b/core/testing/testing.odin
@@ -1,10 +1,11 @@
package testing
-import "core:fmt"
-import "core:io"
-import "core:time"
import "base:intrinsics"
+import "base:runtime"
+import pkg_log "core:log"
import "core:reflect"
+import "core:sync/chan"
+import "core:time"
_ :: reflect // alias reflect to nothing to force visibility for -vet
@@ -22,44 +23,55 @@ Internal_Test :: struct {
Internal_Cleanup :: struct {
procedure: proc(rawptr),
user_data: rawptr,
+ ctx: runtime.Context,
}
T :: struct {
error_count: int,
- w: io.Writer,
+ // If your test needs to perform random operations, it's advised to use
+ // this value to seed a local random number generator rather than relying
+ // on the non-thread-safe global one.
+ //
+ // This way, your results will be deterministic.
+ //
+ // This value is chosen at startup of the test runner, logged, and may be
+ // specified by the user. It is the same for all tests of a single run.
+ seed: u64,
+
+ channel: Update_Channel_Sender,
cleanups: [dynamic]Internal_Cleanup,
+ // This allocator is shared between the test runner and its threads for
+ // cloning log strings, so they can outlive the lifetime of individual
+ // tests during channel transmission.
+ _log_allocator: runtime.Allocator,
+
_fail_now: proc() -> !,
}
+@(deprecated="prefer `log.error`")
error :: proc(t: ^T, args: ..any, loc := #caller_location) {
- fmt.wprintf(t.w, "%v: ", loc)
- fmt.wprintln(t.w, ..args)
- t.error_count += 1
+ pkg_log.error(..args, location = loc)
}
+@(deprecated="prefer `log.errorf`")
errorf :: proc(t: ^T, format: string, args: ..any, loc := #caller_location) {
- fmt.wprintf(t.w, "%v: ", loc)
- fmt.wprintf(t.w, format, ..args)
- fmt.wprintln(t.w)
- t.error_count += 1
+ pkg_log.errorf(format, ..args, location = loc)
}
fail :: proc(t: ^T, loc := #caller_location) {
- error(t, "FAIL", loc=loc)
- t.error_count += 1
+ pkg_log.error("FAIL", location=loc)
}
fail_now :: proc(t: ^T, msg := "", loc := #caller_location) {
if msg != "" {
- error(t, "FAIL:", msg, loc=loc)
+ pkg_log.error("FAIL:", msg, location=loc)
} else {
- error(t, "FAIL", loc=loc)
+ pkg_log.error("FAIL", location=loc)
}
- t.error_count += 1
if t._fail_now != nil {
t._fail_now()
}
@@ -69,32 +81,34 @@ failed :: proc(t: ^T) -> bool {
return t.error_count != 0
}
+@(deprecated="prefer `log.info`")
log :: proc(t: ^T, args: ..any, loc := #caller_location) {
- fmt.wprintln(t.w, ..args)
+ pkg_log.info(..args, location = loc)
}
+@(deprecated="prefer `log.infof`")
logf :: proc(t: ^T, format: string, args: ..any, loc := #caller_location) {
- fmt.wprintf(t.w, format, ..args)
- fmt.wprintln(t.w)
+ pkg_log.infof(format, ..args, location = loc)
}
-// cleanup registers a procedure and user_data, which will be called when the test, and all its subtests, complete
-// cleanup procedures will be called in LIFO (last added, first called) order.
+// cleanup registers a procedure and user_data, which will be called when the test, and all its subtests, complete.
+// Cleanup procedures will be called in LIFO (last added, first called) order.
+// Each procedure will use a copy of the context at the time of registering.
cleanup :: proc(t: ^T, procedure: proc(rawptr), user_data: rawptr) {
- append(&t.cleanups, Internal_Cleanup{procedure, user_data})
+ append(&t.cleanups, Internal_Cleanup{procedure, user_data, context})
}
expect :: proc(t: ^T, ok: bool, msg: string = "", loc := #caller_location) -> bool {
if !ok {
- error(t, msg, loc=loc)
+ pkg_log.error(msg, location=loc)
}
return ok
}
expectf :: proc(t: ^T, ok: bool, format: string, args: ..any, loc := #caller_location) -> bool {
if !ok {
- errorf(t, format, ..args, loc=loc)
+ pkg_log.errorf(format, ..args, location=loc)
}
return ok
}
@@ -102,12 +116,15 @@ expectf :: proc(t: ^T, ok: bool, format: string, args: ..any, loc := #caller_loc
expect_value :: proc(t: ^T, value, expected: $T, loc := #caller_location) -> bool where intrinsics.type_is_comparable(T) {
ok := value == expected || reflect.is_nil(value) && reflect.is_nil(expected)
if !ok {
- errorf(t, "expected %v, got %v", expected, value, loc=loc)
+ pkg_log.errorf("expected %v, got %v", expected, value, location=loc)
}
return ok
}
set_fail_timeout :: proc(t: ^T, duration: time.Duration, loc := #caller_location) {
- _fail_timeout(t, duration, loc)
+ chan.send(t.channel, Event_Set_Fail_Timeout {
+ at_time = time.time_add(time.now(), duration),
+ location = loc,
+ })
}
diff --git a/core/text/i18n/qt_linguist.odin b/core/text/i18n/qt_linguist.odin
index 0e75df873..bdd3f5fd7 100644
--- a/core/text/i18n/qt_linguist.odin
+++ b/core/text/i18n/qt_linguist.odin
@@ -162,8 +162,6 @@ parse_qt_linguist_file :: proc(filename: string, options := DEFAULT_PARSE_OPTION
context.allocator = allocator
data, data_ok := os.read_entire_file(filename)
- defer delete(data)
-
if !data_ok { return {}, .File_Error }
return parse_qt_linguist_from_bytes(data, options, pluralizer, allocator)
diff --git a/core/thread/thread_pool.odin b/core/thread/thread_pool.odin
index fddcac89e..da5e116ff 100644
--- a/core/thread/thread_pool.odin
+++ b/core/thread/thread_pool.odin
@@ -44,6 +44,29 @@ Pool :: struct {
tasks_done: [dynamic]Task,
}
+Pool_Thread_Data :: struct {
+ pool: ^Pool,
+ task: Task,
+}
+
+@(private="file")
+pool_thread_runner :: proc(t: ^Thread) {
+ data := cast(^Pool_Thread_Data)t.data
+ pool := data.pool
+
+ for intrinsics.atomic_load(&pool.is_running) {
+ sync.wait(&pool.sem_available)
+
+ if task, ok := pool_pop_waiting(pool); ok {
+ data.task = task
+ pool_do_work(pool, task)
+ data.task = {}
+ }
+ }
+
+ sync.post(&pool.sem_available, 1)
+}
+
// Once initialized, the pool's memory address is not allowed to change until
// it is destroyed.
//
@@ -58,21 +81,11 @@ pool_init :: proc(pool: ^Pool, allocator: mem.Allocator, thread_count: int) {
pool.is_running = true
for _, i in pool.threads {
- t := create(proc(t: ^Thread) {
- pool := (^Pool)(t.data)
-
- for intrinsics.atomic_load(&pool.is_running) {
- sync.wait(&pool.sem_available)
-
- if task, ok := pool_pop_waiting(pool); ok {
- pool_do_work(pool, task)
- }
- }
-
- sync.post(&pool.sem_available, 1)
- })
+ t := create(pool_thread_runner)
+ data := new(Pool_Thread_Data)
+ data.pool = pool
t.user_index = i
- t.data = pool
+ t.data = data
pool.threads[i] = t
}
}
@@ -82,6 +95,8 @@ pool_destroy :: proc(pool: ^Pool) {
delete(pool.tasks_done)
for &t in pool.threads {
+ data := cast(^Pool_Thread_Data)t.data
+ free(data, pool.allocator)
destroy(t)
}
@@ -103,7 +118,7 @@ pool_join :: proc(pool: ^Pool) {
yield()
-started_count: int
+ started_count: int
for started_count < len(pool.threads) {
started_count = 0
for t in pool.threads {
@@ -138,6 +153,94 @@ pool_add_task :: proc(pool: ^Pool, allocator: mem.Allocator, procedure: Task_Pro
sync.post(&pool.sem_available, 1)
}
+// Forcibly stop a running task by its user index.
+//
+// This will terminate the underlying thread. Ideally, you should use some
+// means of communication to stop a task, as thread termination may leave
+// resources unclaimed.
+//
+// The thread will be restarted to accept new tasks.
+//
+// Returns true if the task was found and terminated.
+pool_stop_task :: proc(pool: ^Pool, user_index: int, exit_code: int = 1) -> bool {
+ sync.guard(&pool.mutex)
+
+ for t, i in pool.threads {
+ data := cast(^Pool_Thread_Data)t.data
+ if data.task.user_index == user_index && data.task.procedure != nil {
+ terminate(t, exit_code)
+
+ append(&pool.tasks_done, data.task)
+ intrinsics.atomic_add(&pool.num_done, 1)
+ intrinsics.atomic_sub(&pool.num_outstanding, 1)
+ intrinsics.atomic_sub(&pool.num_in_processing, 1)
+
+ destroy(t)
+
+ replacement := create(pool_thread_runner)
+ replacement.user_index = t.user_index
+ replacement.data = data
+ data.task = {}
+ pool.threads[i] = replacement
+
+ start(replacement)
+ return true
+ }
+ }
+
+ return false
+}
+
+// Forcibly stop all running tasks.
+//
+// The same notes from `pool_stop_task` apply here.
+pool_stop_all_tasks :: proc(pool: ^Pool, exit_code: int = 1) {
+ sync.guard(&pool.mutex)
+
+ for t, i in pool.threads {
+ data := cast(^Pool_Thread_Data)t.data
+ if data.task.procedure != nil {
+ terminate(t, exit_code)
+
+ append(&pool.tasks_done, data.task)
+ intrinsics.atomic_add(&pool.num_done, 1)
+ intrinsics.atomic_sub(&pool.num_outstanding, 1)
+ intrinsics.atomic_sub(&pool.num_in_processing, 1)
+
+ destroy(t)
+
+ replacement := create(pool_thread_runner)
+ replacement.user_index = t.user_index
+ replacement.data = data
+ data.task = {}
+ pool.threads[i] = replacement
+
+ start(replacement)
+ }
+ }
+}
+
+// Force the pool to stop all of its threads and put it into a state where
+// it will no longer run any more tasks.
+//
+// The pool must still be destroyed after this.
+pool_shutdown :: proc(pool: ^Pool, exit_code: int = 1) {
+ intrinsics.atomic_store(&pool.is_running, false)
+ sync.guard(&pool.mutex)
+
+ for t in pool.threads {
+ terminate(t, exit_code)
+
+ data := cast(^Pool_Thread_Data)t.data
+ if data.task.procedure != nil {
+ append(&pool.tasks_done, data.task)
+ intrinsics.atomic_add(&pool.num_done, 1)
+ intrinsics.atomic_sub(&pool.num_outstanding, 1)
+ intrinsics.atomic_sub(&pool.num_in_processing, 1)
+ }
+ }
+}
+
// Number of tasks waiting to be processed. Only informational, mostly for
// debugging. Don't rely on this value being consistent with other num_*
// values.
diff --git a/core/thread/thread_unix.odin b/core/thread/thread_unix.odin
index acc0e05cb..5291917da 100644
--- a/core/thread/thread_unix.odin
+++ b/core/thread/thread_unix.odin
@@ -2,11 +2,11 @@
// +private
package thread
-import "base:intrinsics"
import "core:sync"
import "core:sys/unix"
+import "core:time"
-CAS :: intrinsics.atomic_compare_exchange_strong
+CAS :: sync.atomic_compare_exchange_strong
// NOTE(tetra): Aligned here because of core/unix/pthread_linux.odin/pthread_t.
// Also see core/sys/darwin/mach_darwin.odin/semaphore_t.
@@ -32,11 +32,13 @@ _create :: proc(procedure: Thread_Proc, priority: Thread_Priority) -> ^Thread {
t.id = sync.current_thread_id()
- for (.Started not_in t.flags) {
- sync.wait(&t.cond, &t.mutex)
+ for (.Started not_in sync.atomic_load(&t.flags)) {
+ // HACK: use a timeout so in the event that the condition is signalled at THIS comment's exact point
+ // (after checking flags, before starting the wait) it gets itself out of that deadlock after a ms.
+ sync.wait_with_timeout(&t.cond, &t.mutex, time.Millisecond)
}
- if .Joined in t.flags {
+ if .Joined in sync.atomic_load(&t.flags) {
return nil
}
@@ -60,11 +62,11 @@ _create :: proc(procedure: Thread_Proc, priority: Thread_Priority) -> ^Thread {
t.procedure(t)
}
- intrinsics.atomic_store(&t.flags, t.flags + { .Done })
+ sync.atomic_or(&t.flags, { .Done })
sync.unlock(&t.mutex)
- if .Self_Cleanup in t.flags {
+ if .Self_Cleanup in sync.atomic_load(&t.flags) {
t.unix_thread = {}
// NOTE(ftphikari): It doesn't matter which context 'free' received, right?
context = {}
@@ -122,13 +124,12 @@ _create :: proc(procedure: Thread_Proc, priority: Thread_Priority) -> ^Thread {
}
_start :: proc(t: ^Thread) {
- // sync.guard(&t.mutex)
- t.flags += { .Started }
+ sync.atomic_or(&t.flags, { .Started })
sync.signal(&t.cond)
}
_is_done :: proc(t: ^Thread) -> bool {
- return .Done in intrinsics.atomic_load(&t.flags)
+ return .Done in sync.atomic_load(&t.flags)
}
_join :: proc(t: ^Thread) {
@@ -139,7 +140,7 @@ _join :: proc(t: ^Thread) {
}
// Preserve other flags besides `.Joined`, like `.Started`.
- unjoined := intrinsics.atomic_load(&t.flags) - {.Joined}
+ unjoined := sync.atomic_load(&t.flags) - {.Joined}
joined := unjoined + {.Joined}
// Try to set `t.flags` from unjoined to joined. If it returns joined,
diff --git a/core/time/datetime/datetime.odin b/core/time/datetime/datetime.odin
index e15ced5a5..89fa2ce98 100644
--- a/core/time/datetime/datetime.odin
+++ b/core/time/datetime/datetime.odin
@@ -127,13 +127,13 @@ days_remaining :: proc "contextless" (date: Date) -> (days_remaining: i64, err:
return delta.days, .None
}
-last_day_of_month :: proc "contextless" (#any_int year: i64, #any_int month: i8) -> (day: i64, err: Error) {
+last_day_of_month :: proc "contextless" (#any_int year: i64, #any_int month: i8) -> (day: i8, err: Error) {
// Not using formula 2.27 from the book. This is far simpler and gives the same answer.
validate(Date{year, month, 1}) or_return
month_days := MONTH_DAYS
- day = i64(month_days[month])
+ day = month_days[month]
if month == 2 && is_leap_year(year) {
day += 1
}
diff --git a/core/time/iso8061.odin b/core/time/iso8601.odin
index 528e0b00a..528e0b00a 100644
--- a/core/time/iso8061.odin
+++ b/core/time/iso8601.odin
diff --git a/core/time/time.odin b/core/time/time.odin
index 4807af840..4575b36f7 100644
--- a/core/time/time.odin
+++ b/core/time/time.odin
@@ -389,6 +389,7 @@ is_leap_year :: proc "contextless" (year: int) -> (leap: bool) {
return year % 4 == 0 && (year % 100 != 0 || year % 400 == 0)
}
+@(rodata)
days_before := [?]i32{
0,
31,
diff --git a/core/time/time_orca.odin b/core/time/time_orca.odin
new file mode 100644
index 000000000..d222c8247
--- /dev/null
+++ b/core/time/time_orca.odin
@@ -0,0 +1,24 @@
+//+private
+//+build orca
+package time
+
+_IS_SUPPORTED :: false
+
+_now :: proc "contextless" () -> Time {
+ return {}
+}
+
+_sleep :: proc "contextless" (d: Duration) {
+}
+
+_tick_now :: proc "contextless" () -> Tick {
+ // mul_div_u64 :: proc "contextless" (val, num, den: i64) -> i64 {
+ // q := val / den
+ // r := val % den
+ // return q * num + r * num / den
+ // }
+ return {}
+}
+
+_yield :: proc "contextless" () {
+}
diff --git a/core/unicode/tables.odin b/core/unicode/tables.odin
index f43827413..dfa5caaa2 100644
--- a/core/unicode/tables.odin
+++ b/core/unicode/tables.odin
@@ -12,6 +12,7 @@ package unicode
@(private) pLo :: pLl | pLu // a letter that is neither upper nor lower case.
@(private) pLmask :: pLo
+@(rodata)
char_properties := [MAX_LATIN1+1]u8{
0x00 = pC, // '\x00'
0x01 = pC, // '\x01'
@@ -272,6 +273,7 @@ char_properties := [MAX_LATIN1+1]u8{
}
+@(rodata)
alpha_ranges := [?]i32{
0x00d8, 0x00f6,
0x00f8, 0x01f5,
@@ -427,6 +429,7 @@ alpha_ranges := [?]i32{
0xffda, 0xffdc,
}
+@(rodata)
alpha_singlets := [?]i32{
0x00aa,
0x00b5,
@@ -462,6 +465,7 @@ alpha_singlets := [?]i32{
0xfe74,
}
+@(rodata)
space_ranges := [?]i32{
0x0009, 0x000d, // tab and newline
0x0020, 0x0020, // space
@@ -477,6 +481,7 @@ space_ranges := [?]i32{
0xfeff, 0xfeff,
}
+@(rodata)
unicode_spaces := [?]i32{
0x0009, // tab
0x000a, // LF
@@ -494,6 +499,7 @@ unicode_spaces := [?]i32{
0xfeff, // unknown
}
+@(rodata)
to_upper_ranges := [?]i32{
0x0061, 0x007a, 468, // a-z A-Z
0x00e0, 0x00f6, 468,
@@ -532,6 +538,7 @@ to_upper_ranges := [?]i32{
0xff41, 0xff5a, 468,
}
+@(rodata)
to_upper_singlets := [?]i32{
0x00ff, 621,
0x0101, 499,
@@ -875,6 +882,7 @@ to_upper_singlets := [?]i32{
0x1ff3, 509,
}
+@(rodata)
to_lower_ranges := [?]i32{
0x0041, 0x005a, 532, // A-Z a-z
0x00c0, 0x00d6, 532, // - -
@@ -914,6 +922,7 @@ to_lower_ranges := [?]i32{
0xff21, 0xff3a, 532, // - -
}
+@(rodata)
to_lower_singlets := [?]i32{
0x0100, 501,
0x0102, 501,
@@ -1250,6 +1259,7 @@ to_lower_singlets := [?]i32{
0x1ffc, 491,
}
+@(rodata)
to_title_singlets := [?]i32{
0x01c4, 501,
0x01c6, 499,