aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorAndreas T Jonsson <mail@andreasjonsson.se>2024-04-25 22:04:40 +0200
committerAndreas T Jonsson <mail@andreasjonsson.se>2024-04-25 22:04:40 +0200
commit9a008d10f3d8f97ff11ba948d6939bec7e6beb6d (patch)
tree8b2fd91da22620b6e0a7158a66429f1af4aca210 /core
parent3000508c027c9d30c168266d0ae276cc14de3982 (diff)
parentf745fff640ab1582bdfdd18a7239c58fa37db753 (diff)
Merge branch 'master' into netbsd
Diffstat (limited to 'core')
-rw-r--r--core/c/frontend/tokenizer/doc.odin55
-rw-r--r--core/compress/gzip/example.odin2
-rw-r--r--core/compress/gzip/gzip.odin2
-rw-r--r--core/compress/shoco/model.odin2
-rw-r--r--core/compress/shoco/shoco.odin2
-rw-r--r--core/compress/zlib/example.odin2
-rw-r--r--core/compress/zlib/zlib.odin2
-rw-r--r--core/container/bit_array/bit_array.odin2
-rw-r--r--core/container/bit_array/doc.odin75
-rw-r--r--core/crypto/rand_bsd.odin2
-rw-r--r--core/crypto/rand_darwin.odin14
-rw-r--r--core/crypto/rand_generic.odin2
-rw-r--r--core/crypto/rand_js.odin2
-rw-r--r--core/crypto/rand_linux.odin2
-rw-r--r--core/crypto/rand_windows.odin2
-rw-r--r--core/dynlib/doc.odin5
-rw-r--r--core/dynlib/lib.odin2
-rw-r--r--core/encoding/base32/base32.odin2
-rw-r--r--core/encoding/base64/base64.odin173
-rw-r--r--core/encoding/cbor/cbor.odin673
-rw-r--r--core/encoding/cbor/coding.odin886
-rw-r--r--core/encoding/cbor/doc.odin141
-rw-r--r--core/encoding/cbor/marshal.odin575
-rw-r--r--core/encoding/cbor/tags.odin381
-rw-r--r--core/encoding/cbor/unmarshal.odin932
-rw-r--r--core/encoding/csv/reader.odin26
-rw-r--r--core/encoding/csv/writer.odin7
-rw-r--r--core/encoding/entity/entity.odin2
-rw-r--r--core/encoding/entity/generated.odin2
-rw-r--r--core/encoding/hex/hex.odin2
-rw-r--r--core/encoding/json/marshal.odin2
-rw-r--r--core/encoding/json/parser.odin2
-rw-r--r--core/encoding/json/tokenizer.odin2
-rw-r--r--core/encoding/json/types.odin2
-rw-r--r--core/encoding/json/unmarshal.odin2
-rw-r--r--core/encoding/json/validator.odin2
-rw-r--r--core/encoding/varint/doc.odin2
-rw-r--r--core/encoding/varint/leb128.odin2
-rw-r--r--core/encoding/xml/debug_print.odin2
-rw-r--r--core/encoding/xml/helpers.odin2
-rw-r--r--core/encoding/xml/tokenizer.odin2
-rw-r--r--core/encoding/xml/xml_reader.odin2
-rw-r--r--core/fmt/fmt.odin33
-rw-r--r--core/fmt/fmt_js.odin4
-rw-r--r--core/image/netpbm/doc.odin39
-rw-r--r--core/io/io.odin25
-rw-r--r--core/math/big/prime.odin14
-rw-r--r--core/math/math.odin29
-rw-r--r--core/math/rand/rand.odin4
-rw-r--r--core/net/socket_linux.odin8
-rw-r--r--core/net/url.odin17
-rw-r--r--core/odin/parser/parser.odin21
-rw-r--r--core/odin/tokenizer/tokenizer.odin1
-rw-r--r--core/os/stat.odin4
-rw-r--r--core/reflect/reflect.odin21
-rw-r--r--core/sys/darwin/CoreFoundation/CFBase.odin34
-rw-r--r--core/sys/darwin/CoreFoundation/CFString.odin203
-rw-r--r--core/sys/darwin/Foundation/NSApplication.odin2
-rw-r--r--core/sys/darwin/Foundation/NSString.odin15
-rw-r--r--core/sys/darwin/Security/SecBase.odin386
-rw-r--r--core/sys/darwin/Security/SecRandom.odin19
-rw-r--r--core/sys/darwin/core_foundation.odin98
-rw-r--r--core/sys/darwin/security.odin26
-rw-r--r--core/sys/info/doc.odin128
-rw-r--r--core/sys/linux/sys.odin12
-rw-r--r--core/text/edit/text_edit.odin29
-rw-r--r--core/text/i18n/doc.odin185
-rw-r--r--core/text/i18n/i18n.odin4
-rw-r--r--core/text/table/doc.odin31
69 files changed, 4859 insertions, 534 deletions
diff --git a/core/c/frontend/tokenizer/doc.odin b/core/c/frontend/tokenizer/doc.odin
index 9b1734fc4..43747dfe8 100644
--- a/core/c/frontend/tokenizer/doc.odin
+++ b/core/c/frontend/tokenizer/doc.odin
@@ -1,34 +1,31 @@
/*
-package demo
-
-import tokenizer "core:c/frontend/tokenizer"
-import preprocessor "core:c/frontend/preprocessor"
-import "core:fmt"
-
-main :: proc() {
- t := &tokenizer.Tokenizer{};
- tokenizer.init_defaults(t);
-
- cpp := &preprocessor.Preprocessor{};
- cpp.warn, cpp.err = t.warn, t.err;
- preprocessor.init_lookup_tables(cpp);
- preprocessor.init_default_macros(cpp);
- cpp.include_paths = {"my/path/to/include"};
-
- tok := tokenizer.tokenize_file(t, "the/source/file.c", 1);
-
- tok = preprocessor.preprocess(cpp, tok);
- if tok != nil {
- for t := tok; t.kind != .EOF; t = t.next {
- fmt.println(t.lit);
+Example:
+ package demo
+
+ import tokenizer "core:c/frontend/tokenizer"
+ import preprocessor "core:c/frontend/preprocessor"
+ import "core:fmt"
+
+ main :: proc() {
+ t := &tokenizer.Tokenizer{};
+ tokenizer.init_defaults(t);
+
+ cpp := &preprocessor.Preprocessor{};
+ cpp.warn, cpp.err = t.warn, t.err;
+ preprocessor.init_lookup_tables(cpp);
+ preprocessor.init_default_macros(cpp);
+ cpp.include_paths = {"my/path/to/include"};
+
+ tok := tokenizer.tokenize_file(t, "the/source/file.c", 1);
+
+ tok = preprocessor.preprocess(cpp, tok);
+ if tok != nil {
+ for t := tok; t.kind != .EOF; t = t.next {
+ fmt.println(t.lit);
+ }
}
- }
- fmt.println("[Done]");
-}
+ fmt.println("[Done]");
+ }
*/
-
-
package c_frontend_tokenizer
-
-
diff --git a/core/compress/gzip/example.odin b/core/compress/gzip/example.odin
index 635134e40..09540aafc 100644
--- a/core/compress/gzip/example.odin
+++ b/core/compress/gzip/example.odin
@@ -1,5 +1,5 @@
//+build ignore
-package gzip
+package compress_gzip
/*
Copyright 2021 Jeroen van Rijn <nom@duclavier.com>.
diff --git a/core/compress/gzip/gzip.odin b/core/compress/gzip/gzip.odin
index 50945fc77..57ed3c3c5 100644
--- a/core/compress/gzip/gzip.odin
+++ b/core/compress/gzip/gzip.odin
@@ -1,4 +1,4 @@
-package gzip
+package compress_gzip
/*
Copyright 2021 Jeroen van Rijn <nom@duclavier.com>.
diff --git a/core/compress/shoco/model.odin b/core/compress/shoco/model.odin
index bbc38903d..f62236c00 100644
--- a/core/compress/shoco/model.odin
+++ b/core/compress/shoco/model.odin
@@ -5,7 +5,7 @@
*/
// package shoco is an implementation of the shoco short string compressor
-package shoco
+package compress_shoco
DEFAULT_MODEL :: Shoco_Model {
min_char = 39,
diff --git a/core/compress/shoco/shoco.odin b/core/compress/shoco/shoco.odin
index e65acb0bc..3c1f412ba 100644
--- a/core/compress/shoco/shoco.odin
+++ b/core/compress/shoco/shoco.odin
@@ -9,7 +9,7 @@
*/
// package shoco is an implementation of the shoco short string compressor
-package shoco
+package compress_shoco
import "base:intrinsics"
import "core:compress"
diff --git a/core/compress/zlib/example.odin b/core/compress/zlib/example.odin
index 19017b279..fedd6671d 100644
--- a/core/compress/zlib/example.odin
+++ b/core/compress/zlib/example.odin
@@ -1,5 +1,5 @@
//+build ignore
-package zlib
+package compress_zlib
/*
Copyright 2021 Jeroen van Rijn <nom@duclavier.com>.
diff --git a/core/compress/zlib/zlib.odin b/core/compress/zlib/zlib.odin
index d4dc6e3d7..b7f381f2b 100644
--- a/core/compress/zlib/zlib.odin
+++ b/core/compress/zlib/zlib.odin
@@ -1,5 +1,5 @@
//+vet !using-param
-package zlib
+package compress_zlib
/*
Copyright 2021 Jeroen van Rijn <nom@duclavier.com>.
diff --git a/core/container/bit_array/bit_array.odin b/core/container/bit_array/bit_array.odin
index dbd2e0d3a..a8720715c 100644
--- a/core/container/bit_array/bit_array.odin
+++ b/core/container/bit_array/bit_array.odin
@@ -1,4 +1,4 @@
-package dynamic_bit_array
+package container_dynamic_bit_array
import "base:intrinsics"
import "core:mem"
diff --git a/core/container/bit_array/doc.odin b/core/container/bit_array/doc.odin
index 52e252d8a..77e1904a8 100644
--- a/core/container/bit_array/doc.odin
+++ b/core/container/bit_array/doc.odin
@@ -1,53 +1,52 @@
-package dynamic_bit_array
-
/*
- The Bit Array can be used in several ways:
+The Bit Array can be used in several ways:
- -- By default you don't need to instantiate a Bit Array:
+- By default you don't need to instantiate a Bit Array:
- package test
+ package test
- import "core:fmt"
- import "core:container/bit_array"
+ import "core:fmt"
+ import "core:container/bit_array"
- main :: proc() {
- using bit_array
+ main :: proc() {
+ using bit_array
- bits: Bit_Array
+ bits: Bit_Array
- // returns `true`
- fmt.println(set(&bits, 42))
+ // returns `true`
+ fmt.println(set(&bits, 42))
- // returns `false`, `false`, because this Bit Array wasn't created to allow negative indices.
- was_set, was_retrieved := get(&bits, -1)
- fmt.println(was_set, was_retrieved)
- destroy(&bits)
- }
+ // returns `false`, `false`, because this Bit Array wasn't created to allow negative indices.
+ was_set, was_retrieved := get(&bits, -1)
+ fmt.println(was_set, was_retrieved)
+ destroy(&bits)
+ }
- -- A Bit Array can optionally allow for negative indices, if the mininum value was given during creation:
+- A Bit Array can optionally allow for negative indices, if the minimum value was given during creation:
- package test
+ package test
- import "core:fmt"
- import "core:container/bit_array"
+ import "core:fmt"
+ import "core:container/bit_array"
- main :: proc() {
- Foo :: enum int {
- Negative_Test = -42,
- Bar = 420,
- Leaves = 69105,
- }
+ main :: proc() {
+ Foo :: enum int {
+ Negative_Test = -42,
+ Bar = 420,
+ Leaves = 69105,
+ }
- using bit_array
+ using bit_array
- bits := create(int(max(Foo)), int(min(Foo)))
- defer destroy(bits)
+ bits := create(int(max(Foo)), int(min(Foo)))
+ defer destroy(bits)
- fmt.printf("Set(Bar): %v\n", set(bits, Foo.Bar))
- fmt.printf("Get(Bar): %v, %v\n", get(bits, Foo.Bar))
- fmt.printf("Set(Negative_Test): %v\n", set(bits, Foo.Negative_Test))
- fmt.printf("Get(Leaves): %v, %v\n", get(bits, Foo.Leaves))
- fmt.printf("Get(Negative_Test): %v, %v\n", get(bits, Foo.Negative_Test))
- fmt.printf("Freed.\n")
- }
-*/ \ No newline at end of file
+ fmt.printf("Set(Bar): %v\n", set(bits, Foo.Bar))
+ fmt.printf("Get(Bar): %v, %v\n", get(bits, Foo.Bar))
+ fmt.printf("Set(Negative_Test): %v\n", set(bits, Foo.Negative_Test))
+ fmt.printf("Get(Leaves): %v, %v\n", get(bits, Foo.Leaves))
+ fmt.printf("Get(Negative_Test): %v, %v\n", get(bits, Foo.Negative_Test))
+ fmt.printf("Freed.\n")
+ }
+*/
+package container_dynamic_bit_array
diff --git a/core/crypto/rand_bsd.odin b/core/crypto/rand_bsd.odin
index 658221769..bdcc0a433 100644
--- a/core/crypto/rand_bsd.odin
+++ b/core/crypto/rand_bsd.odin
@@ -11,6 +11,6 @@ _rand_bytes :: proc(dst: []byte) {
arc4random_buf(raw_data(dst), len(dst))
}
-_has_rand_bytes :: proc () -> bool {
+_has_rand_bytes :: proc() -> bool {
return true
}
diff --git a/core/crypto/rand_darwin.odin b/core/crypto/rand_darwin.odin
index 2864b46dd..c1a3d1dbc 100644
--- a/core/crypto/rand_darwin.odin
+++ b/core/crypto/rand_darwin.odin
@@ -1,16 +1,18 @@
package crypto
import "core:fmt"
-import "core:sys/darwin"
+
+import CF "core:sys/darwin/CoreFoundation"
+import Sec "core:sys/darwin/Security"
_rand_bytes :: proc(dst: []byte) {
- res := darwin.SecRandomCopyBytes(count=len(dst), bytes=raw_data(dst))
- if res != .Success {
- msg := darwin.CFStringCopyToOdinString(darwin.SecCopyErrorMessageString(res))
- panic(fmt.tprintf("crypto/rand_bytes: SecRandomCopyBytes returned non-zero result: %v %s", res, msg))
+ err := Sec.RandomCopyBytes(count=len(dst), bytes=raw_data(dst))
+ if err != .Success {
+ msg := CF.StringCopyToOdinString(Sec.CopyErrorMessageString(err))
+ panic(fmt.tprintf("crypto/rand_bytes: SecRandomCopyBytes returned non-zero result: %v %s", err, msg))
}
}
-_has_rand_bytes :: proc () -> bool {
+_has_rand_bytes :: proc() -> bool {
return true
}
diff --git a/core/crypto/rand_generic.odin b/core/crypto/rand_generic.odin
index cab76418b..157476683 100644
--- a/core/crypto/rand_generic.odin
+++ b/core/crypto/rand_generic.odin
@@ -11,6 +11,6 @@ _rand_bytes :: proc(dst: []byte) {
unimplemented("crypto: rand_bytes not supported on this OS")
}
-_has_rand_bytes :: proc () -> bool {
+_has_rand_bytes :: proc() -> bool {
return false
}
diff --git a/core/crypto/rand_js.odin b/core/crypto/rand_js.odin
index cb2711404..90f60b99b 100644
--- a/core/crypto/rand_js.odin
+++ b/core/crypto/rand_js.odin
@@ -19,6 +19,6 @@ _rand_bytes :: proc(dst: []byte) {
}
}
-_has_rand_bytes :: proc () -> bool {
+_has_rand_bytes :: proc() -> bool {
return true
}
diff --git a/core/crypto/rand_linux.odin b/core/crypto/rand_linux.odin
index 05c05597d..a9dc37415 100644
--- a/core/crypto/rand_linux.odin
+++ b/core/crypto/rand_linux.odin
@@ -35,6 +35,6 @@ _rand_bytes :: proc (dst: []byte) {
}
}
-_has_rand_bytes :: proc () -> bool {
+_has_rand_bytes :: proc() -> bool {
return true
}
diff --git a/core/crypto/rand_windows.odin b/core/crypto/rand_windows.odin
index e1d9f6118..5cafe7fb5 100644
--- a/core/crypto/rand_windows.odin
+++ b/core/crypto/rand_windows.odin
@@ -22,6 +22,6 @@ _rand_bytes :: proc(dst: []byte) {
}
}
-_has_rand_bytes :: proc () -> bool {
+_has_rand_bytes :: proc() -> bool {
return true
}
diff --git a/core/dynlib/doc.odin b/core/dynlib/doc.odin
index 849e03a71..f5c91c54e 100644
--- a/core/dynlib/doc.odin
+++ b/core/dynlib/doc.odin
@@ -1,6 +1,5 @@
-//+build ignore
/*
-Package core:dynlib implements loading of shared libraries/DLLs and their symbols.
+Package `core:dynlib` implements loading of shared libraries/DLLs and their symbols.
The behaviour of dynamically loaded libraries is specific to the target platform of the program.
For in depth detail on the underlying behaviour please refer to your target platform's documentation.
@@ -8,4 +7,4 @@ For in depth detail on the underlying behaviour please refer to your target plat
See `example` directory for an example library exporting 3 symbols and a host program loading them automatically
by defining a symbol table struct.
*/
-package dynlib \ No newline at end of file
+package dynlib
diff --git a/core/dynlib/lib.odin b/core/dynlib/lib.odin
index 30d55edae..3d41cbe2e 100644
--- a/core/dynlib/lib.odin
+++ b/core/dynlib/lib.odin
@@ -135,7 +135,7 @@ initialize_symbols :: proc(
prefixed_symbol_buf: [2048]u8 = ---
count = 0
- for field, i in reflect.struct_fields_zipped(T) {
+ for field in reflect.struct_fields_zipped(T) {
// Calculate address of struct member
field_ptr := rawptr(uintptr(symbol_table) + field.offset)
diff --git a/core/encoding/base32/base32.odin b/core/encoding/base32/base32.odin
index 7ab35afd0..962a3ead4 100644
--- a/core/encoding/base32/base32.odin
+++ b/core/encoding/base32/base32.odin
@@ -1,4 +1,4 @@
-package base32
+package encoding_base32
// @note(zh): Encoding utility for Base32
// A secondary param can be used to supply a custom alphabet to
diff --git a/core/encoding/base64/base64.odin b/core/encoding/base64/base64.odin
index cf2ea1c12..1013a7d0b 100644
--- a/core/encoding/base64/base64.odin
+++ b/core/encoding/base64/base64.odin
@@ -1,4 +1,8 @@
-package base64
+package encoding_base64
+
+import "core:io"
+import "core:mem"
+import "core:strings"
// @note(zh): Encoding utility for Base64
// A secondary param can be used to supply a custom alphabet to
@@ -39,59 +43,132 @@ DEC_TABLE := [128]int {
49, 50, 51, -1, -1, -1, -1, -1,
}
-encode :: proc(data: []byte, ENC_TBL := ENC_TABLE, allocator := context.allocator) -> string #no_bounds_check {
- length := len(data)
- if length == 0 {
- return ""
- }
+encode :: proc(data: []byte, ENC_TBL := ENC_TABLE, allocator := context.allocator) -> (encoded: string, err: mem.Allocator_Error) #optional_allocator_error {
+ out_length := encoded_len(data)
+ if out_length == 0 {
+ return
+ }
+
+ out := strings.builder_make(0, out_length, allocator) or_return
+ ioerr := encode_into(strings.to_stream(&out), data, ENC_TBL)
+
+ assert(ioerr == nil, "string builder should not IO error")
+ assert(strings.builder_cap(out) == out_length, "buffer resized, `encoded_len` was wrong")
+
+ return strings.to_string(out), nil
+}
+
+encode_into :: proc(w: io.Writer, data: []byte, ENC_TBL := ENC_TABLE) -> io.Error {
+ length := len(data)
+ if length == 0 {
+ return nil
+ }
+
+ c0, c1, c2, block: int
+ out: [4]byte
+ for i := 0; i < length; i += 3 {
+ #no_bounds_check {
+ c0, c1, c2 = int(data[i]), -1, -1
+
+ if i + 1 < length { c1 = int(data[i + 1]) }
+ if i + 2 < length { c2 = int(data[i + 2]) }
+
+ block = (c0 << 16) | (max(c1, 0) << 8) | max(c2, 0)
+
+ out[0] = ENC_TBL[block >> 18 & 63]
+ out[1] = ENC_TBL[block >> 12 & 63]
+ out[2] = c1 == -1 ? PADDING : ENC_TBL[block >> 6 & 63]
+ out[3] = c2 == -1 ? PADDING : ENC_TBL[block & 63]
+ }
+ io.write_full(w, out[:]) or_return
+ }
+ return nil
+}
- out_length := ((4 * length / 3) + 3) &~ 3
- out := make([]byte, out_length, allocator)
+encoded_len :: proc(data: []byte) -> int {
+ length := len(data)
+ if length == 0 {
+ return 0
+ }
- c0, c1, c2, block: int
+ return ((4 * length / 3) + 3) &~ 3
+}
- for i, d := 0, 0; i < length; i, d = i + 3, d + 4 {
- c0, c1, c2 = int(data[i]), -1, -1
+decode :: proc(data: string, DEC_TBL := DEC_TABLE, allocator := context.allocator) -> (decoded: []byte, err: mem.Allocator_Error) #optional_allocator_error {
+ out_length := decoded_len(data)
- if i + 1 < length { c1 = int(data[i + 1]) }
- if i + 2 < length { c2 = int(data[i + 2]) }
+ out := strings.builder_make(0, out_length, allocator) or_return
+ ioerr := decode_into(strings.to_stream(&out), data, DEC_TBL)
- block = (c0 << 16) | (max(c1, 0) << 8) | max(c2, 0)
+ assert(ioerr == nil, "string builder should not IO error")
+ assert(strings.builder_cap(out) == out_length, "buffer resized, `decoded_len` was wrong")
+
+ return out.buf[:], nil
+}
- out[d] = ENC_TBL[block >> 18 & 63]
- out[d + 1] = ENC_TBL[block >> 12 & 63]
- out[d + 2] = c1 == -1 ? PADDING : ENC_TBL[block >> 6 & 63]
- out[d + 3] = c2 == -1 ? PADDING : ENC_TBL[block & 63]
- }
- return string(out)
+decode_into :: proc(w: io.Writer, data: string, DEC_TBL := DEC_TABLE) -> io.Error {
+ length := decoded_len(data)
+ if length == 0 {
+ return nil
+ }
+
+ c0, c1, c2, c3: int
+ b0, b1, b2: int
+ buf: [3]byte
+ i, j: int
+ for ; j + 3 <= length; i, j = i + 4, j + 3 {
+ #no_bounds_check {
+ c0 = DEC_TBL[data[i]]
+ c1 = DEC_TBL[data[i + 1]]
+ c2 = DEC_TBL[data[i + 2]]
+ c3 = DEC_TBL[data[i + 3]]
+
+ b0 = (c0 << 2) | (c1 >> 4)
+ b1 = (c1 << 4) | (c2 >> 2)
+ b2 = (c2 << 6) | c3
+
+ buf[0] = byte(b0)
+ buf[1] = byte(b1)
+ buf[2] = byte(b2)
+ }
+
+ io.write_full(w, buf[:]) or_return
+ }
+
+ rest := length - j
+ if rest > 0 {
+ #no_bounds_check {
+ c0 = DEC_TBL[data[i]]
+ c1 = DEC_TBL[data[i + 1]]
+ c2 = DEC_TBL[data[i + 2]]
+
+ b0 = (c0 << 2) | (c1 >> 4)
+ b1 = (c1 << 4) | (c2 >> 2)
+ }
+
+ switch rest {
+ case 1: io.write_byte(w, byte(b0)) or_return
+ case 2: io.write_full(w, {byte(b0), byte(b1)}) or_return
+ }
+ }
+
+ return nil
}
-decode :: proc(data: string, DEC_TBL := DEC_TABLE, allocator := context.allocator) -> []byte #no_bounds_check {
- length := len(data)
- if length == 0 {
- return nil
- }
-
- pad_count := data[length - 1] == PADDING ? (data[length - 2] == PADDING ? 2 : 1) : 0
- out_length := ((length * 6) >> 3) - pad_count
- out := make([]byte, out_length, allocator)
-
- c0, c1, c2, c3: int
- b0, b1, b2: int
-
- for i, j := 0, 0; i < length; i, j = i + 4, j + 3 {
- c0 = DEC_TBL[data[i]]
- c1 = DEC_TBL[data[i + 1]]
- c2 = DEC_TBL[data[i + 2]]
- c3 = DEC_TBL[data[i + 3]]
-
- b0 = (c0 << 2) | (c1 >> 4)
- b1 = (c1 << 4) | (c2 >> 2)
- b2 = (c2 << 6) | c3
-
- out[j] = byte(b0)
- out[j + 1] = byte(b1)
- out[j + 2] = byte(b2)
- }
- return out
+decoded_len :: proc(data: string) -> int {
+ length := len(data)
+ if length == 0 {
+ return 0
+ }
+
+ padding: int
+ if data[length - 1] == PADDING {
+ if length > 1 && data[length - 2] == PADDING {
+ padding = 2
+ } else {
+ padding = 1
+ }
+ }
+
+ return ((length * 6) >> 3) - padding
}
diff --git a/core/encoding/cbor/cbor.odin b/core/encoding/cbor/cbor.odin
new file mode 100644
index 000000000..d0e406ab1
--- /dev/null
+++ b/core/encoding/cbor/cbor.odin
@@ -0,0 +1,673 @@
+package encoding_cbor
+
+import "base:intrinsics"
+
+import "core:encoding/json"
+import "core:io"
+import "core:mem"
+import "core:strconv"
+import "core:strings"
+
+// If we are decoding a stream of either a map or list, the initial capacity will be this value.
+INITIAL_STREAMED_CONTAINER_CAPACITY :: 8
+
+// If we are decoding a stream of either text or bytes, the initial capacity will be this value.
+INITIAL_STREAMED_BYTES_CAPACITY :: 16
+
+// The default maximum amount of bytes to allocate on a buffer/container at once to prevent
+// malicious input from causing massive allocations.
+DEFAULT_MAX_PRE_ALLOC :: mem.Kilobyte
+
+// Known/common headers are defined, undefined headers can still be valid.
+// Higher 3 bits is for the major type and lower 5 bits for the additional information.
+Header :: enum u8 {
+ U8 = (u8(Major.Unsigned) << 5) | u8(Add.One_Byte),
+ U16 = (u8(Major.Unsigned) << 5) | u8(Add.Two_Bytes),
+ U32 = (u8(Major.Unsigned) << 5) | u8(Add.Four_Bytes),
+ U64 = (u8(Major.Unsigned) << 5) | u8(Add.Eight_Bytes),
+
+ Neg_U8 = (u8(Major.Negative) << 5) | u8(Add.One_Byte),
+ Neg_U16 = (u8(Major.Negative) << 5) | u8(Add.Two_Bytes),
+ Neg_U32 = (u8(Major.Negative) << 5) | u8(Add.Four_Bytes),
+ Neg_U64 = (u8(Major.Negative) << 5) | u8(Add.Eight_Bytes),
+
+ False = (u8(Major.Other) << 5) | u8(Add.False),
+ True = (u8(Major.Other) << 5) | u8(Add.True),
+
+ Nil = (u8(Major.Other) << 5) | u8(Add.Nil),
+ Undefined = (u8(Major.Other) << 5) | u8(Add.Undefined),
+
+ Simple = (u8(Major.Other) << 5) | u8(Add.One_Byte),
+
+ F16 = (u8(Major.Other) << 5) | u8(Add.Two_Bytes),
+ F32 = (u8(Major.Other) << 5) | u8(Add.Four_Bytes),
+ F64 = (u8(Major.Other) << 5) | u8(Add.Eight_Bytes),
+
+ Break = (u8(Major.Other) << 5) | u8(Add.Break),
+}
+
+// The higher 3 bits of the header which denotes what type of value it is.
+Major :: enum u8 {
+ Unsigned,
+ Negative,
+ Bytes,
+ Text,
+ Array,
+ Map,
+ Tag,
+ Other,
+}
+
+// The lower 3 bits of the header which denotes additional information for the type of value.
+Add :: enum u8 {
+ False = 20,
+ True = 21,
+ Nil = 22,
+ Undefined = 23,
+
+ One_Byte = 24,
+ Two_Bytes = 25,
+ Four_Bytes = 26,
+ Eight_Bytes = 27,
+
+ Length_Unknown = 31,
+ Break = Length_Unknown,
+}
+
+Value :: union {
+ u8,
+ u16,
+ u32,
+ u64,
+
+ Negative_U8,
+ Negative_U16,
+ Negative_U32,
+ Negative_U64,
+
+ // Pointers so the size of the Value union stays small.
+ ^Bytes,
+ ^Text,
+ ^Array,
+ ^Map,
+ ^Tag,
+
+ Simple,
+ f16,
+ f32,
+ f64,
+ bool,
+ Undefined,
+ Nil,
+}
+
+Bytes :: []byte
+Text :: string
+
+Array :: []Value
+
+Map :: []Map_Entry
+Map_Entry :: struct {
+ key: Value, // Can be any unsigned, negative, float, Simple, bool, Text.
+ value: Value,
+}
+
+Tag :: struct {
+ number: Tag_Number,
+ value: Value, // Value based on the number.
+}
+
+Tag_Number :: u64
+
+Nil :: distinct rawptr
+Undefined :: distinct rawptr
+
+// A distinct atom-like number, range from `0..=19` and `32..=max(u8)`.
+Simple :: distinct u8
+Atom :: Simple
+
+Unmarshal_Error :: union #shared_nil {
+ io.Error,
+ mem.Allocator_Error,
+ Decode_Data_Error,
+ Unmarshal_Data_Error,
+ Maybe(Unsupported_Type_Error),
+}
+
+Marshal_Error :: union #shared_nil {
+ io.Error,
+ mem.Allocator_Error,
+ Encode_Data_Error,
+ Marshal_Data_Error,
+ Maybe(Unsupported_Type_Error),
+}
+
+Decode_Error :: union #shared_nil {
+ io.Error,
+ mem.Allocator_Error,
+ Decode_Data_Error,
+}
+
+Encode_Error :: union #shared_nil {
+ io.Error,
+ mem.Allocator_Error,
+ Encode_Data_Error,
+}
+
+Decode_Data_Error :: enum {
+ None,
+ Bad_Major, // An invalid major type was encountered.
+ Bad_Argument, // A general unexpected value (most likely invalid additional info in header).
+ Bad_Tag_Value, // When the type of value for the given tag is not valid.
+ Nested_Indefinite_Length, // When an streamed/indefinite length container nests another, this is not allowed.
+ Nested_Tag, // When a tag's value is another tag, this is not allowed.
+ Length_Too_Big, // When the length of a container (map, array, bytes, string) is more than `max(int)`.
+ Disallowed_Streaming, // When the `.Disallow_Streaming` flag is set and a streaming header is encountered.
+ Break, // When the `break` header was found without any stream to break off.
+}
+
+Encode_Data_Error :: enum {
+ None,
+ Invalid_Simple, // When a simple is being encoded that is out of the range `0..=19` and `32..=max(u8)`.
+ Int_Too_Big, // When an int is being encoded that is larger than `max(u64)` or smaller than `min(u64)`.
+ Bad_Tag_Value, // When the type of value is not supported by the tag implementation.
+}
+
+Unmarshal_Data_Error :: enum {
+ None,
+ Invalid_Parameter, // When the given `any` can not be unmarshalled into.
+ Non_Pointer_Parameter, // When the given `any` is not a pointer.
+}
+
+Marshal_Data_Error :: enum {
+ None,
+ Invalid_CBOR_Tag, // When the struct tag `cbor_tag:""` is not a registered name or number.
+}
+
+// Error that is returned when a type couldn't be marshalled into or out of, as much information
+// as possible/available is added.
+Unsupported_Type_Error :: struct {
+ id: typeid,
+ hdr: Header,
+ add: Add,
+}
+
+_unsupported :: proc(v: any, hdr: Header, add: Add = nil) -> Maybe(Unsupported_Type_Error) {
+ return Unsupported_Type_Error{
+ id = v.id,
+ hdr = hdr,
+ add = add,
+ }
+}
+
+// Actual value is `-1 - x` (be careful of overflows).
+
+Negative_U8 :: distinct u8
+Negative_U16 :: distinct u16
+Negative_U32 :: distinct u32
+Negative_U64 :: distinct u64
+
+// Turns the CBOR negative unsigned int type into a signed integer type.
+negative_to_int :: proc {
+ negative_u8_to_int,
+ negative_u16_to_int,
+ negative_u32_to_int,
+ negative_u64_to_int,
+}
+
+negative_u8_to_int :: #force_inline proc(u: Negative_U8) -> i16 {
+ return -1 - i16(u)
+}
+
+negative_u16_to_int :: #force_inline proc(u: Negative_U16) -> i32 {
+ return -1 - i32(u)
+}
+
+negative_u32_to_int :: #force_inline proc(u: Negative_U32) -> i64 {
+ return -1 - i64(u)
+}
+
+negative_u64_to_int :: #force_inline proc(u: Negative_U64) -> i128 {
+ return -1 - i128(u)
+}
+
+// Utility for converting between the different errors when they are subsets of the other.
+err_conv :: proc {
+ encode_to_marshal_err,
+ encode_to_marshal_err_p2,
+ decode_to_unmarshal_err,
+ decode_to_unmarshal_err_p,
+ decode_to_unmarshal_err_p2,
+}
+
+encode_to_marshal_err :: #force_inline proc(err: Encode_Error) -> Marshal_Error {
+ switch e in err {
+ case nil: return nil
+ case io.Error: return e
+ case mem.Allocator_Error: return e
+ case Encode_Data_Error: return e
+ case: return nil
+ }
+}
+
+encode_to_marshal_err_p2 :: #force_inline proc(v: $T, v2: $T2, err: Encode_Error) -> (T, T2, Marshal_Error) {
+ return v, v2, err_conv(err)
+}
+
+decode_to_unmarshal_err :: #force_inline proc(err: Decode_Error) -> Unmarshal_Error {
+ switch e in err {
+ case nil: return nil
+ case io.Error: return e
+ case mem.Allocator_Error: return e
+ case Decode_Data_Error: return e
+ case: return nil
+ }
+}
+
+decode_to_unmarshal_err_p :: #force_inline proc(v: $T, err: Decode_Error) -> (T, Unmarshal_Error) {
+ return v, err_conv(err)
+}
+
+decode_to_unmarshal_err_p2 :: #force_inline proc(v: $T, v2: $T2, err: Decode_Error) -> (T, T2, Unmarshal_Error) {
+ return v, v2, err_conv(err)
+}
+
+// Recursively frees all memory allocated when decoding the passed value.
+destroy :: proc(val: Value, allocator := context.allocator) {
+ context.allocator = allocator
+ #partial switch v in val {
+ case ^Map:
+ if v == nil { return }
+ for entry in v {
+ destroy(entry.key)
+ destroy(entry.value)
+ }
+ delete(v^)
+ free(v)
+ case ^Array:
+ if v == nil { return }
+ for entry in v {
+ destroy(entry)
+ }
+ delete(v^)
+ free(v)
+ case ^Text:
+ if v == nil { return }
+ delete(v^)
+ free(v)
+ case ^Bytes:
+ if v == nil { return }
+ delete(v^)
+ free(v)
+ case ^Tag:
+ if v == nil { return }
+ destroy(v.value)
+ free(v)
+ }
+}
+
+/*
+to_diagnostic_format either writes or returns a human-readable representation of the value,
+optionally formatted, defined as the diagnostic format in [[RFC 8949 Section 8;https://www.rfc-editor.org/rfc/rfc8949.html#name-diagnostic-notation]].
+
+Incidentally, if the CBOR does not contain any of the additional types defined on top of JSON
+this will also be valid JSON.
+*/
+to_diagnostic_format :: proc {
+ to_diagnostic_format_string,
+ to_diagnostic_format_writer,
+}
+
+// Turns the given CBOR value into a human-readable string.
+// See docs on the proc group `diagnose` for more info.
+to_diagnostic_format_string :: proc(val: Value, padding := 0, allocator := context.allocator) -> (string, mem.Allocator_Error) #optional_allocator_error {
+ b := strings.builder_make(allocator)
+ w := strings.to_stream(&b)
+ err := to_diagnostic_format_writer(w, val, padding)
+ if err == .EOF {
+ // The string builder stream only returns .EOF, and only if it can't write (out of memory).
+ return "", .Out_Of_Memory
+ }
+ assert(err == nil)
+
+ return strings.to_string(b), nil
+}
+
+// Writes the given CBOR value into the writer as human-readable text.
+// See docs on the proc group `diagnose` for more info.
+to_diagnostic_format_writer :: proc(w: io.Writer, val: Value, padding := 0) -> io.Error {
+ @(require_results)
+ indent :: proc(padding: int) -> int {
+ padding := padding
+ if padding != -1 {
+ padding += 1
+ }
+ return padding
+ }
+
+ @(require_results)
+ dedent :: proc(padding: int) -> int {
+ padding := padding
+ if padding != -1 {
+ padding -= 1
+ }
+ return padding
+ }
+
+ comma :: proc(w: io.Writer, padding: int) -> io.Error {
+ _ = io.write_string(w, ", " if padding == -1 else ",") or_return
+ return nil
+ }
+
+ newline :: proc(w: io.Writer, padding: int) -> io.Error {
+ if padding != -1 {
+ io.write_string(w, "\n") or_return
+ for _ in 0..<padding {
+ io.write_string(w, "\t") or_return
+ }
+ }
+ return nil
+ }
+
+ padding := padding
+ switch v in val {
+ case u8: io.write_uint(w, uint(v)) or_return
+ case u16: io.write_uint(w, uint(v)) or_return
+ case u32: io.write_uint(w, uint(v)) or_return
+ case u64: io.write_u64(w, v) or_return
+ case Negative_U8: io.write_int(w, int(negative_to_int(v))) or_return
+ case Negative_U16: io.write_int(w, int(negative_to_int(v))) or_return
+ case Negative_U32: io.write_int(w, int(negative_to_int(v))) or_return
+ case Negative_U64: io.write_i128(w, i128(negative_to_int(v))) or_return
+
+ // NOTE: not using io.write_float because it removes the sign,
+ // which we want for the diagnostic format.
+ case f16:
+ buf: [64]byte
+ str := strconv.append_float(buf[:], f64(v), 'f', 2*size_of(f16), 8*size_of(f16))
+ if str[0] == '+' && str != "+Inf" { str = str[1:] }
+ io.write_string(w, str) or_return
+ case f32:
+ buf: [128]byte
+ str := strconv.append_float(buf[:], f64(v), 'f', 2*size_of(f32), 8*size_of(f32))
+ if str[0] == '+' && str != "+Inf" { str = str[1:] }
+ io.write_string(w, str) or_return
+ case f64:
+ buf: [256]byte
+ str := strconv.append_float(buf[:], f64(v), 'f', 2*size_of(f64), 8*size_of(f64))
+ if str[0] == '+' && str != "+Inf" { str = str[1:] }
+ io.write_string(w, str) or_return
+
+ case bool: io.write_string(w, "true" if v else "false") or_return
+ case Nil: io.write_string(w, "nil") or_return
+ case Undefined: io.write_string(w, "undefined") or_return
+ case ^Bytes:
+ io.write_string(w, "h'") or_return
+ for b in v { io.write_int(w, int(b), 16) or_return }
+ io.write_string(w, "'") or_return
+ case ^Text:
+ io.write_string(w, `"`) or_return
+ io.write_string(w, v^) or_return
+ io.write_string(w, `"`) or_return
+ case ^Array:
+ if v == nil || len(v) == 0 {
+ io.write_string(w, "[]") or_return
+ return nil
+ }
+
+ io.write_string(w, "[") or_return
+
+ padding = indent(padding)
+ newline(w, padding) or_return
+
+ for entry, i in v {
+ to_diagnostic_format(w, entry, padding) or_return
+ if i != len(v)-1 {
+ comma(w, padding) or_return
+ newline(w, padding) or_return
+ }
+ }
+
+ padding = dedent(padding)
+ newline(w, padding) or_return
+
+ io.write_string(w, "]") or_return
+ case ^Map:
+ if v == nil || len(v) == 0 {
+ io.write_string(w, "{}") or_return
+ return nil
+ }
+
+ io.write_string(w, "{") or_return
+
+ padding = indent(padding)
+ newline(w, padding) or_return
+
+ for entry, i in v {
+ to_diagnostic_format(w, entry.key, padding) or_return
+ io.write_string(w, ": ") or_return
+ to_diagnostic_format(w, entry.value, padding) or_return
+ if i != len(v)-1 {
+ comma(w, padding) or_return
+ newline(w, padding) or_return
+ }
+ }
+
+ padding = dedent(padding)
+ newline(w, padding) or_return
+
+ io.write_string(w, "}") or_return
+ case ^Tag:
+ io.write_u64(w, v.number) or_return
+ io.write_string(w, "(") or_return
+ to_diagnostic_format(w, v.value, padding) or_return
+ io.write_string(w, ")") or_return
+ case Simple:
+ io.write_string(w, "simple(") or_return
+ io.write_uint(w, uint(v)) or_return
+ io.write_string(w, ")") or_return
+ }
+ return nil
+}
+
+/*
+Converts from JSON to CBOR.
+
+Everything is copied to the given allocator, the passed in JSON value can be deleted after.
+*/
+from_json :: proc(val: json.Value, allocator := context.allocator) -> (Value, mem.Allocator_Error) #optional_allocator_error {
+ internal :: proc(val: json.Value) -> (ret: Value, err: mem.Allocator_Error) {
+ switch v in val {
+ case json.Null: return Nil{}, nil
+ case json.Integer:
+ i, major := _int_to_uint(v)
+ #partial switch major {
+ case .Unsigned: return i, nil
+ case .Negative: return Negative_U64(i), nil
+ case: unreachable()
+ }
+ case json.Float: return v, nil
+ case json.Boolean: return v, nil
+ case json.String:
+ container := new(Text) or_return
+
+ // We need the string to have a nil byte at the end so we clone to cstring.
+ container^ = string(strings.clone_to_cstring(v) or_return)
+ return container, nil
+ case json.Array:
+ arr := new(Array) or_return
+ arr^ = make([]Value, len(v)) or_return
+ for _, i in arr {
+ arr[i] = internal(v[i]) or_return
+ }
+ return arr, nil
+ case json.Object:
+ m := new(Map) or_return
+ dm := make([dynamic]Map_Entry, 0, len(v)) or_return
+ for mkey, mval in v {
+ append(&dm, Map_Entry{from_json(mkey) or_return, from_json(mval) or_return})
+ }
+ m^ = dm[:]
+ return m, nil
+ }
+ return nil, nil
+ }
+
+ context.allocator = allocator
+ return internal(val)
+}
+
+/*
+Converts from CBOR to JSON.
+
+NOTE: overflow on integers or floats is not handled.
+
+Everything is copied to the given allocator, the passed in CBOR value can be `destroy`'ed after.
+
+If a CBOR map with non-string keys is encountered it is turned into an array of tuples.
+*/
+to_json :: proc(val: Value, allocator := context.allocator) -> (json.Value, mem.Allocator_Error) #optional_allocator_error {
+ internal :: proc(val: Value) -> (ret: json.Value, err: mem.Allocator_Error) {
+ switch v in val {
+ case Simple: return json.Integer(v), nil
+
+ case u8: return json.Integer(v), nil
+ case u16: return json.Integer(v), nil
+ case u32: return json.Integer(v), nil
+ case u64: return json.Integer(v), nil
+
+ case Negative_U8: return json.Integer(negative_to_int(v)), nil
+ case Negative_U16: return json.Integer(negative_to_int(v)), nil
+ case Negative_U32: return json.Integer(negative_to_int(v)), nil
+ case Negative_U64: return json.Integer(negative_to_int(v)), nil
+
+ case f16: return json.Float(v), nil
+ case f32: return json.Float(v), nil
+ case f64: return json.Float(v), nil
+
+ case bool: return json.Boolean(v), nil
+
+ case Undefined: return json.Null{}, nil
+ case Nil: return json.Null{}, nil
+
+ case ^Bytes: return json.String(strings.clone(string(v^)) or_return), nil
+ case ^Text: return json.String(strings.clone(v^) or_return), nil
+
+ case ^Map:
+ keys_all_strings :: proc(m: ^Map) -> bool {
+ for entry in m {
+ #partial switch kv in entry.key {
+ case ^Bytes:
+ case ^Text:
+ case: return false
+ }
+ }
+ return false
+ }
+
+ if keys_all_strings(v) {
+ obj := make(json.Object, len(v)) or_return
+ for entry in v {
+ k: string
+ #partial switch kv in entry.key {
+ case ^Bytes: k = string(kv^)
+ case ^Text: k = kv^
+ case: unreachable()
+ }
+
+ v := internal(entry.value) or_return
+ obj[k] = v
+ }
+ return obj, nil
+ } else {
+ // Resort to an array of tuples if keys aren't all strings.
+ arr := make(json.Array, 0, len(v)) or_return
+ for entry in v {
+ entry_arr := make(json.Array, 0, 2) or_return
+ append(&entry_arr, internal(entry.key) or_return) or_return
+ append(&entry_arr, internal(entry.value) or_return) or_return
+ append(&arr, entry_arr) or_return
+ }
+ return arr, nil
+ }
+
+ case ^Array:
+ arr := make(json.Array, 0, len(v)) or_return
+ for entry in v {
+ append(&arr, internal(entry) or_return) or_return
+ }
+ return arr, nil
+
+ case ^Tag:
+ obj := make(json.Object, 2) or_return
+ obj[strings.clone("number") or_return] = internal(v.number) or_return
+ obj[strings.clone("value") or_return] = internal(v.value) or_return
+ return obj, nil
+
+ case: return json.Null{}, nil
+ }
+ }
+
+ context.allocator = allocator
+ return internal(val)
+}
+
+_int_to_uint :: proc {
+ _i8_to_uint,
+ _i16_to_uint,
+ _i32_to_uint,
+ _i64_to_uint,
+ _i128_to_uint,
+}
+
+_u128_to_u64 :: #force_inline proc(v: u128) -> (u64, Encode_Data_Error) {
+ if v > u128(max(u64)) {
+ return 0, .Int_Too_Big
+ }
+
+ return u64(v), nil
+}
+
+_i8_to_uint :: #force_inline proc(v: i8) -> (u: u8, m: Major) {
+ if v < 0 {
+ return u8(abs(v)-1), .Negative
+ }
+
+ return u8(v), .Unsigned
+}
+
+_i16_to_uint :: #force_inline proc(v: i16) -> (u: u16, m: Major) {
+ if v < 0 {
+ return u16(abs(v)-1), .Negative
+ }
+
+ return u16(v), .Unsigned
+}
+
+_i32_to_uint :: #force_inline proc(v: i32) -> (u: u32, m: Major) {
+ if v < 0 {
+ return u32(abs(v)-1), .Negative
+ }
+
+ return u32(v), .Unsigned
+}
+
+_i64_to_uint :: #force_inline proc(v: i64) -> (u: u64, m: Major) {
+ if v < 0 {
+ return u64(abs(v)-1), .Negative
+ }
+
+ return u64(v), .Unsigned
+}
+
+_i128_to_uint :: proc(v: i128) -> (u: u64, m: Major, err: Encode_Data_Error) {
+ if v < 0 {
+ m = .Negative
+ u, err = _u128_to_u64(u128(abs(v) - 1))
+ return
+ }
+
+ m = .Unsigned
+ u, err = _u128_to_u64(u128(v))
+ return
+}
diff --git a/core/encoding/cbor/coding.odin b/core/encoding/cbor/coding.odin
new file mode 100644
index 000000000..0d276a7a1
--- /dev/null
+++ b/core/encoding/cbor/coding.odin
@@ -0,0 +1,886 @@
+package encoding_cbor
+
+import "base:intrinsics"
+import "base:runtime"
+
+import "core:bytes"
+import "core:encoding/endian"
+import "core:io"
+import "core:slice"
+import "core:strings"
+
+Encoder_Flag :: enum {
+ // CBOR defines a tag header that also acts as a file/binary header,
+ // this way decoders can check the first header of the binary and see if it is CBOR.
+ Self_Described_CBOR,
+
+ // Integers are stored in the smallest integer type it fits.
+ // This involves checking each int against the max of all its smaller types.
+ Deterministic_Int_Size,
+
+ // Floats are stored in the smallest size float type without losing precision.
+ // This involves casting each float down to its smaller types and checking if it changed.
+ Deterministic_Float_Size,
+
+ // Sort maps by their keys in bytewise lexicographic order of their deterministic encoding.
+ // NOTE: In order to do this, all keys of a map have to be pre-computed, sorted, and
+ // then written, this involves temporary allocations for the keys and a copy of the map itself.
+ Deterministic_Map_Sorting,
+}
+
+Encoder_Flags :: bit_set[Encoder_Flag]
+
+// Flags for fully deterministic output (if you are not using streaming/indeterminate length).
+ENCODE_FULLY_DETERMINISTIC :: Encoder_Flags{.Deterministic_Int_Size, .Deterministic_Float_Size, .Deterministic_Map_Sorting}
+
+// Flags for the smallest encoding output.
+ENCODE_SMALL :: Encoder_Flags{.Deterministic_Int_Size, .Deterministic_Float_Size}
+
+Encoder :: struct {
+ flags: Encoder_Flags,
+ writer: io.Writer,
+ temp_allocator: runtime.Allocator,
+}
+
+Decoder_Flag :: enum {
+ // Rejects (with an error `.Disallowed_Streaming`) when a streaming CBOR header is encountered.
+ Disallow_Streaming,
+
+ // Pre-allocates buffers and containers with the size that was set in the CBOR header.
+ // This should only be enabled when you control both ends of the encoding, if you don't,
+ // attackers can craft input that causes massive (`max(u64)`) byte allocations for a few bytes of
+ // CBOR.
+ Trusted_Input,
+
+ // Makes the decoder shrink of excess capacity from allocated buffers/containers before returning.
+ Shrink_Excess,
+}
+
+Decoder_Flags :: bit_set[Decoder_Flag]
+
+Decoder :: struct {
+ // The max amount of bytes allowed to pre-allocate when `.Trusted_Input` is not set on the
+ // flags.
+ max_pre_alloc: int,
+
+ flags: Decoder_Flags,
+ reader: io.Reader,
+}
+
+/*
+Decodes both deterministic and non-deterministic CBOR into a `Value` variant.
+
+`Text` and `Bytes` can safely be cast to cstrings because of an added 0 byte.
+
+Allocations are done using the given allocator,
+*no* allocations are done on the `context.temp_allocator`.
+
+A value can be (fully and recursively) deallocated using the `destroy` proc in this package.
+
+Disable streaming/indeterminate lengths with the `.Disallow_Streaming` flag.
+
+Shrink excess bytes in buffers and containers with the `.Shrink_Excess` flag.
+
+Mark the input as trusted input with the `.Trusted_Input` flag, this turns off the safety feature
+of not pre-allocating more than `max_pre_alloc` bytes before reading into the bytes. You should only
+do this when you own both sides of the encoding and are sure there can't be malicious bytes used as
+an input.
+*/
+decode_from :: proc {
+ decode_from_string,
+ decode_from_reader,
+ decode_from_decoder,
+}
+decode :: decode_from
+
+// Decodes the given string as CBOR.
+// See docs on the proc group `decode` for more information.
+decode_from_string :: proc(s: string, flags: Decoder_Flags = {}, allocator := context.allocator) -> (v: Value, err: Decode_Error) {
+ r: strings.Reader
+ strings.reader_init(&r, s)
+ return decode_from_reader(strings.reader_to_stream(&r), flags, allocator)
+}
+
+// Reads a CBOR value from the given reader.
+// See docs on the proc group `decode` for more information.
+decode_from_reader :: proc(r: io.Reader, flags: Decoder_Flags = {}, allocator := context.allocator) -> (v: Value, err: Decode_Error) {
+ return decode_from_decoder(
+ Decoder{ DEFAULT_MAX_PRE_ALLOC, flags, r },
+ allocator=allocator,
+ )
+}
+
+// Reads a CBOR value from the given decoder.
+// See docs on the proc group `decode` for more information.
+decode_from_decoder :: proc(d: Decoder, allocator := context.allocator) -> (v: Value, err: Decode_Error) {
+ context.allocator = allocator
+
+ d := d
+
+ if d.max_pre_alloc <= 0 {
+ d.max_pre_alloc = DEFAULT_MAX_PRE_ALLOC
+ }
+
+ v, err = _decode_from_decoder(d)
+ // Normal EOF does not exist here, we try to read the exact amount that is said to be provided.
+ if err == .EOF { err = .Unexpected_EOF }
+ return
+}
+
+_decode_from_decoder :: proc(d: Decoder, hdr: Header = Header(0)) -> (v: Value, err: Decode_Error) {
+ hdr := hdr
+ r := d.reader
+ if hdr == Header(0) { hdr = _decode_header(r) or_return }
+ switch hdr {
+ case .U8: return _decode_u8 (r)
+ case .U16: return _decode_u16(r)
+ case .U32: return _decode_u32(r)
+ case .U64: return _decode_u64(r)
+
+ case .Neg_U8: return Negative_U8 (_decode_u8 (r) or_return), nil
+ case .Neg_U16: return Negative_U16(_decode_u16(r) or_return), nil
+ case .Neg_U32: return Negative_U32(_decode_u32(r) or_return), nil
+ case .Neg_U64: return Negative_U64(_decode_u64(r) or_return), nil
+
+ case .Simple: return _decode_simple(r)
+
+ case .F16: return _decode_f16(r)
+ case .F32: return _decode_f32(r)
+ case .F64: return _decode_f64(r)
+
+ case .True: return true, nil
+ case .False: return false, nil
+
+ case .Nil: return Nil{}, nil
+ case .Undefined: return Undefined{}, nil
+
+ case .Break: return nil, .Break
+ }
+
+ maj, add := _header_split(hdr)
+ switch maj {
+ case .Unsigned: return _decode_tiny_u8(add)
+ case .Negative: return Negative_U8(_decode_tiny_u8(add) or_return), nil
+ case .Bytes: return _decode_bytes_ptr(d, add)
+ case .Text: return _decode_text_ptr(d, add)
+ case .Array: return _decode_array_ptr(d, add)
+ case .Map: return _decode_map_ptr(d, add)
+ case .Tag: return _decode_tag_ptr(d, add)
+ case .Other: return _decode_tiny_simple(add)
+ case: return nil, .Bad_Major
+ }
+}
+
+/*
+Encodes the CBOR value into a binary CBOR.
+
+Flags can be used to control the output (mainly determinism, which coincidently affects size).
+
+The default flags `ENCODE_SMALL` (`.Deterministic_Int_Size`, `.Deterministic_Float_Size`) will try
+to put ints and floats into their smallest possible byte size without losing equality.
+
+Adding the `.Self_Described_CBOR` flag will wrap the value in a tag that lets generic decoders know
+the contents are CBOR from just reading the first byte.
+
+Adding the `.Deterministic_Map_Sorting` flag will sort the encoded maps by the byte content of the
+encoded key. This flag has a cost on performance and memory efficiency because all keys in a map
+have to be precomputed, sorted and only then written to the output.
+
+Empty flags will do nothing extra to the value.
+
+The allocations for the `.Deterministic_Map_Sorting` flag are done using the given temp_allocator.
+but are followed by the necessary `delete` and `free` calls if the allocator supports them.
+This is helpful when the CBOR size is so big that you don't want to collect all the temporary
+allocations until the end.
+*/
+encode_into :: proc {
+ encode_into_bytes,
+ encode_into_builder,
+ encode_into_writer,
+ encode_into_encoder,
+}
+encode :: encode_into
+
+// Encodes the CBOR value into binary CBOR allocated on the given allocator.
+// See the docs on the proc group `encode_into` for more info.
+encode_into_bytes :: proc(v: Value, flags := ENCODE_SMALL, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (data: []byte, err: Encode_Error) {
+ b := strings.builder_make(allocator) or_return
+ encode_into_builder(&b, v, flags, temp_allocator) or_return
+ return b.buf[:], nil
+}
+
+// Encodes the CBOR value into binary CBOR written to the given builder.
+// See the docs on the proc group `encode_into` for more info.
+encode_into_builder :: proc(b: ^strings.Builder, v: Value, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator) -> Encode_Error {
+ return encode_into_writer(strings.to_stream(b), v, flags, temp_allocator)
+}
+
+// Encodes the CBOR value into binary CBOR written to the given writer.
+// See the docs on the proc group `encode_into` for more info.
+encode_into_writer :: proc(w: io.Writer, v: Value, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator) -> Encode_Error {
+ return encode_into_encoder(Encoder{flags, w, temp_allocator}, v)
+}
+
+// Encodes the CBOR value into binary CBOR written to the given encoder.
+// See the docs on the proc group `encode_into` for more info.
+encode_into_encoder :: proc(e: Encoder, v: Value) -> Encode_Error {
+ e := e
+
+ if e.temp_allocator.procedure == nil {
+ e.temp_allocator = context.temp_allocator
+ }
+
+ if .Self_Described_CBOR in e.flags {
+ _encode_u64(e, TAG_SELF_DESCRIBED_CBOR, .Tag) or_return
+ e.flags &~= { .Self_Described_CBOR }
+ }
+
+ switch v_spec in v {
+ case u8: return _encode_u8(e.writer, v_spec, .Unsigned)
+ case u16: return _encode_u16(e, v_spec, .Unsigned)
+ case u32: return _encode_u32(e, v_spec, .Unsigned)
+ case u64: return _encode_u64(e, v_spec, .Unsigned)
+ case Negative_U8: return _encode_u8(e.writer, u8(v_spec), .Negative)
+ case Negative_U16: return _encode_u16(e, u16(v_spec), .Negative)
+ case Negative_U32: return _encode_u32(e, u32(v_spec), .Negative)
+ case Negative_U64: return _encode_u64(e, u64(v_spec), .Negative)
+ case ^Bytes: return _encode_bytes(e, v_spec^)
+ case ^Text: return _encode_text(e, v_spec^)
+ case ^Array: return _encode_array(e, v_spec^)
+ case ^Map: return _encode_map(e, v_spec^)
+ case ^Tag: return _encode_tag(e, v_spec^)
+ case Simple: return _encode_simple(e.writer, v_spec)
+ case f16: return _encode_f16(e.writer, v_spec)
+ case f32: return _encode_f32(e, v_spec)
+ case f64: return _encode_f64(e, v_spec)
+ case bool: return _encode_bool(e.writer, v_spec)
+ case Nil: return _encode_nil(e.writer)
+ case Undefined: return _encode_undefined(e.writer)
+ case: return nil
+ }
+}
+
+_decode_header :: proc(r: io.Reader) -> (hdr: Header, err: io.Error) {
+ hdr = Header(_decode_u8(r) or_return)
+ return
+}
+
+_header_split :: proc(hdr: Header) -> (Major, Add) {
+ return Major(u8(hdr) >> 5), Add(u8(hdr) & 0x1f)
+}
+
+_decode_u8 :: proc(r: io.Reader) -> (v: u8, err: io.Error) {
+ byte: [1]byte = ---
+ io.read_full(r, byte[:]) or_return
+ return byte[0], nil
+}
+
+_encode_uint :: proc {
+ _encode_u8,
+ _encode_u16,
+ _encode_u32,
+ _encode_u64,
+}
+
+_encode_u8 :: proc(w: io.Writer, v: u8, major: Major = .Unsigned) -> (err: io.Error) {
+ header := u8(major) << 5
+ if v < u8(Add.One_Byte) {
+ header |= v
+ _, err = io.write_full(w, {header})
+ return
+ }
+
+ header |= u8(Add.One_Byte)
+ _, err = io.write_full(w, {header, v})
+ return
+}
+
+_decode_tiny_u8 :: proc(additional: Add) -> (u8, Decode_Data_Error) {
+ if additional < .One_Byte {
+ return u8(additional), nil
+ }
+
+ return 0, .Bad_Argument
+}
+
+_decode_u16 :: proc(r: io.Reader) -> (v: u16, err: io.Error) {
+ bytes: [2]byte = ---
+ io.read_full(r, bytes[:]) or_return
+ return endian.unchecked_get_u16be(bytes[:]), nil
+}
+
+_encode_u16 :: proc(e: Encoder, v: u16, major: Major = .Unsigned) -> Encode_Error {
+ if .Deterministic_Int_Size in e.flags {
+ return _encode_deterministic_uint(e.writer, v, major)
+ }
+ return _encode_u16_exact(e.writer, v, major)
+}
+
+_encode_u16_exact :: proc(w: io.Writer, v: u16, major: Major = .Unsigned) -> (err: io.Error) {
+ bytes: [3]byte = ---
+ bytes[0] = (u8(major) << 5) | u8(Add.Two_Bytes)
+ endian.unchecked_put_u16be(bytes[1:], v)
+ _, err = io.write_full(w, bytes[:])
+ return
+}
+
+_decode_u32 :: proc(r: io.Reader) -> (v: u32, err: io.Error) {
+ bytes: [4]byte = ---
+ io.read_full(r, bytes[:]) or_return
+ return endian.unchecked_get_u32be(bytes[:]), nil
+}
+
+_encode_u32 :: proc(e: Encoder, v: u32, major: Major = .Unsigned) -> Encode_Error {
+ if .Deterministic_Int_Size in e.flags {
+ return _encode_deterministic_uint(e.writer, v, major)
+ }
+ return _encode_u32_exact(e.writer, v, major)
+}
+
+_encode_u32_exact :: proc(w: io.Writer, v: u32, major: Major = .Unsigned) -> (err: io.Error) {
+ bytes: [5]byte = ---
+ bytes[0] = (u8(major) << 5) | u8(Add.Four_Bytes)
+ endian.unchecked_put_u32be(bytes[1:], v)
+ _, err = io.write_full(w, bytes[:])
+ return
+}
+
+_decode_u64 :: proc(r: io.Reader) -> (v: u64, err: io.Error) {
+ bytes: [8]byte = ---
+ io.read_full(r, bytes[:]) or_return
+ return endian.unchecked_get_u64be(bytes[:]), nil
+}
+
+_encode_u64 :: proc(e: Encoder, v: u64, major: Major = .Unsigned) -> Encode_Error {
+ if .Deterministic_Int_Size in e.flags {
+ return _encode_deterministic_uint(e.writer, v, major)
+ }
+ return _encode_u64_exact(e.writer, v, major)
+}
+
+_encode_u64_exact :: proc(w: io.Writer, v: u64, major: Major = .Unsigned) -> (err: io.Error) {
+ bytes: [9]byte = ---
+ bytes[0] = (u8(major) << 5) | u8(Add.Eight_Bytes)
+ endian.unchecked_put_u64be(bytes[1:], v)
+ _, err = io.write_full(w, bytes[:])
+ return
+}
+
+_decode_bytes_ptr :: proc(d: Decoder, add: Add, type: Major = .Bytes) -> (v: ^Bytes, err: Decode_Error) {
+ v = new(Bytes) or_return
+ defer if err != nil { free(v) }
+
+ v^ = _decode_bytes(d, add, type) or_return
+ return
+}
+
+_decode_bytes :: proc(d: Decoder, add: Add, type: Major = .Bytes, allocator := context.allocator) -> (v: Bytes, err: Decode_Error) {
+ context.allocator = allocator
+
+ add := add
+ n, scap := _decode_len_str(d, add) or_return
+
+ buf := strings.builder_make(0, scap) or_return
+ defer if err != nil { strings.builder_destroy(&buf) }
+ buf_stream := strings.to_stream(&buf)
+
+ if n == -1 {
+ indefinite_loop: for {
+ header := _decode_header(d.reader) or_return
+ maj: Major
+ maj, add = _header_split(header)
+ #partial switch maj {
+ case type:
+ iter_n, iter_cap := _decode_len_str(d, add) or_return
+ if iter_n == -1 {
+ return nil, .Nested_Indefinite_Length
+ }
+ reserve(&buf.buf, len(buf.buf) + iter_cap) or_return
+ io.copy_n(buf_stream, d.reader, i64(iter_n)) or_return
+
+ case .Other:
+ if add != .Break { return nil, .Bad_Argument }
+ break indefinite_loop
+
+ case:
+ return nil, .Bad_Major
+ }
+ }
+ } else {
+ io.copy_n(buf_stream, d.reader, i64(n)) or_return
+ }
+
+ v = buf.buf[:]
+
+ // Write zero byte so this can be converted to cstring.
+ strings.write_byte(&buf, 0)
+
+ if .Shrink_Excess in d.flags { shrink(&buf.buf) }
+ return
+}
+
+_encode_bytes :: proc(e: Encoder, val: Bytes, major: Major = .Bytes) -> (err: Encode_Error) {
+ assert(len(val) >= 0)
+ _encode_u64(e, u64(len(val)), major) or_return
+ _, err = io.write_full(e.writer, val[:])
+ return
+}
+
+_decode_text_ptr :: proc(d: Decoder, add: Add) -> (v: ^Text, err: Decode_Error) {
+ v = new(Text) or_return
+ defer if err != nil { free(v) }
+
+ v^ = _decode_text(d, add) or_return
+ return
+}
+
+_decode_text :: proc(d: Decoder, add: Add, allocator := context.allocator) -> (v: Text, err: Decode_Error) {
+ return (Text)(_decode_bytes(d, add, .Text, allocator) or_return), nil
+}
+
+_encode_text :: proc(e: Encoder, val: Text) -> Encode_Error {
+ return _encode_bytes(e, transmute([]byte)val, .Text)
+}
+
+_decode_array_ptr :: proc(d: Decoder, add: Add) -> (v: ^Array, err: Decode_Error) {
+ v = new(Array) or_return
+ defer if err != nil { free(v) }
+
+ v^ = _decode_array(d, add) or_return
+ return
+}
+
+_decode_array :: proc(d: Decoder, add: Add) -> (v: Array, err: Decode_Error) {
+ n, scap := _decode_len_container(d, add) or_return
+ array := make([dynamic]Value, 0, scap) or_return
+ defer if err != nil {
+ for entry in array { destroy(entry) }
+ delete(array)
+ }
+
+ for i := 0; n == -1 || i < n; i += 1 {
+ val, verr := _decode_from_decoder(d)
+ if n == -1 && verr == .Break {
+ break
+ } else if verr != nil {
+ err = verr
+ return
+ }
+
+ append(&array, val) or_return
+ }
+
+ if .Shrink_Excess in d.flags { shrink(&array) }
+
+ v = array[:]
+ return
+}
+
+_encode_array :: proc(e: Encoder, arr: Array) -> Encode_Error {
+ assert(len(arr) >= 0)
+ _encode_u64(e, u64(len(arr)), .Array)
+ for val in arr {
+ encode(e, val) or_return
+ }
+ return nil
+}
+
+_decode_map_ptr :: proc(d: Decoder, add: Add) -> (v: ^Map, err: Decode_Error) {
+ v = new(Map) or_return
+ defer if err != nil { free(v) }
+
+ v^ = _decode_map(d, add) or_return
+ return
+}
+
+_decode_map :: proc(d: Decoder, add: Add) -> (v: Map, err: Decode_Error) {
+ n, scap := _decode_len_container(d, add) or_return
+ items := make([dynamic]Map_Entry, 0, scap) or_return
+ defer if err != nil {
+ for entry in items {
+ destroy(entry.key)
+ destroy(entry.value)
+ }
+ delete(items)
+ }
+
+ for i := 0; n == -1 || i < n; i += 1 {
+ key, kerr := _decode_from_decoder(d)
+ if n == -1 && kerr == .Break {
+ break
+ } else if kerr != nil {
+ return nil, kerr
+ }
+
+ value := _decode_from_decoder(d) or_return
+
+ append(&items, Map_Entry{
+ key = key,
+ value = value,
+ }) or_return
+ }
+
+ if .Shrink_Excess in d.flags { shrink(&items) }
+
+ v = items[:]
+ return
+}
+
+_encode_map :: proc(e: Encoder, m: Map) -> (err: Encode_Error) {
+ assert(len(m) >= 0)
+ _encode_u64(e, u64(len(m)), .Map) or_return
+
+ if .Deterministic_Map_Sorting not_in e.flags {
+ for entry in m {
+ encode(e, entry.key) or_return
+ encode(e, entry.value) or_return
+ }
+ return
+ }
+
+ // Deterministic_Map_Sorting needs us to sort the entries by the byte contents of the
+ // encoded key.
+ //
+ // This means we have to store and sort them before writing incurring extra (temporary) allocations.
+
+ Map_Entry_With_Key :: struct {
+ encoded_key: []byte,
+ entry: Map_Entry,
+ }
+
+ entries := make([]Map_Entry_With_Key, len(m), e.temp_allocator) or_return
+ defer delete(entries, e.temp_allocator)
+
+ for &entry, i in entries {
+ entry.entry = m[i]
+
+ buf := strings.builder_make(e.temp_allocator) or_return
+
+ ke := e
+ ke.writer = strings.to_stream(&buf)
+
+ encode(ke, entry.entry.key) or_return
+ entry.encoded_key = buf.buf[:]
+ }
+
+ // Sort lexicographic on the bytes of the key.
+ slice.sort_by_cmp(entries, proc(a, b: Map_Entry_With_Key) -> slice.Ordering {
+ return slice.Ordering(bytes.compare(a.encoded_key, b.encoded_key))
+ })
+
+ for entry in entries {
+ io.write_full(e.writer, entry.encoded_key) or_return
+ delete(entry.encoded_key, e.temp_allocator)
+
+ encode(e, entry.entry.value) or_return
+ }
+
+ return nil
+}
+
+_decode_tag_ptr :: proc(d: Decoder, add: Add) -> (v: Value, err: Decode_Error) {
+ tag := _decode_tag(d, add) or_return
+ if t, ok := tag.?; ok {
+ defer if err != nil { destroy(t.value) }
+ tp := new(Tag) or_return
+ tp^ = t
+ return tp, nil
+ }
+
+ // no error, no tag, this was the self described CBOR tag, skip it.
+ return _decode_from_decoder(d)
+}
+
+_decode_tag :: proc(d: Decoder, add: Add) -> (v: Maybe(Tag), err: Decode_Error) {
+ num := _decode_uint_as_u64(d.reader, add) or_return
+
+ // CBOR can be wrapped in a tag that decoders can use to see/check if the binary data is CBOR.
+ // We can ignore it here.
+ if num == TAG_SELF_DESCRIBED_CBOR {
+ return
+ }
+
+ t := Tag{
+ number = num,
+ value = _decode_from_decoder(d) or_return,
+ }
+
+ if nested, ok := t.value.(^Tag); ok {
+ destroy(nested)
+ return nil, .Nested_Tag
+ }
+
+ return t, nil
+}
+
+_decode_uint_as_u64 :: proc(r: io.Reader, add: Add) -> (nr: u64, err: Decode_Error) {
+ #partial switch add {
+ case .One_Byte: return u64(_decode_u8(r) or_return), nil
+ case .Two_Bytes: return u64(_decode_u16(r) or_return), nil
+ case .Four_Bytes: return u64(_decode_u32(r) or_return), nil
+ case .Eight_Bytes: return u64(_decode_u64(r) or_return), nil
+ case: return u64(_decode_tiny_u8(add) or_return), nil
+ }
+}
+
+_encode_tag :: proc(e: Encoder, val: Tag) -> Encode_Error {
+ _encode_u64(e, val.number, .Tag) or_return
+ return encode(e, val.value)
+}
+
+_decode_simple :: proc(r: io.Reader) -> (v: Simple, err: io.Error) {
+ buf: [1]byte = ---
+ io.read_full(r, buf[:]) or_return
+ return Simple(buf[0]), nil
+}
+
+_encode_simple :: proc(w: io.Writer, v: Simple) -> (err: Encode_Error) {
+ header := u8(Major.Other) << 5
+
+ if v < Simple(Add.False) {
+ header |= u8(v)
+ _, err = io.write_full(w, {header})
+ return
+ } else if v <= Simple(Add.Break) {
+ return .Invalid_Simple
+ }
+
+ header |= u8(Add.One_Byte)
+ _, err = io.write_full(w, {header, u8(v)})
+ return
+}
+
+_decode_tiny_simple :: proc(add: Add) -> (Simple, Decode_Data_Error) {
+ if add < Add.False {
+ return Simple(add), nil
+ }
+
+ return 0, .Bad_Argument
+}
+
+_decode_f16 :: proc(r: io.Reader) -> (v: f16, err: io.Error) {
+ bytes: [2]byte = ---
+ io.read_full(r, bytes[:]) or_return
+ n := endian.unchecked_get_u16be(bytes[:])
+ return transmute(f16)n, nil
+}
+
+_encode_f16 :: proc(w: io.Writer, v: f16) -> (err: io.Error) {
+ bytes: [3]byte = ---
+ bytes[0] = u8(Header.F16)
+ endian.unchecked_put_u16be(bytes[1:], transmute(u16)v)
+ _, err = io.write_full(w, bytes[:])
+ return
+}
+
+_decode_f32 :: proc(r: io.Reader) -> (v: f32, err: io.Error) {
+ bytes: [4]byte = ---
+ io.read_full(r, bytes[:]) or_return
+ n := endian.unchecked_get_u32be(bytes[:])
+ return transmute(f32)n, nil
+}
+
+_encode_f32 :: proc(e: Encoder, v: f32) -> io.Error {
+ if .Deterministic_Float_Size in e.flags {
+ return _encode_deterministic_float(e.writer, v)
+ }
+ return _encode_f32_exact(e.writer, v)
+}
+
+_encode_f32_exact :: proc(w: io.Writer, v: f32) -> (err: io.Error) {
+ bytes: [5]byte = ---
+ bytes[0] = u8(Header.F32)
+ endian.unchecked_put_u32be(bytes[1:], transmute(u32)v)
+ _, err = io.write_full(w, bytes[:])
+ return
+}
+
+_decode_f64 :: proc(r: io.Reader) -> (v: f64, err: io.Error) {
+ bytes: [8]byte = ---
+ io.read_full(r, bytes[:]) or_return
+ n := endian.unchecked_get_u64be(bytes[:])
+ return transmute(f64)n, nil
+}
+
+_encode_f64 :: proc(e: Encoder, v: f64) -> io.Error {
+ if .Deterministic_Float_Size in e.flags {
+ return _encode_deterministic_float(e.writer, v)
+ }
+ return _encode_f64_exact(e.writer, v)
+}
+
+_encode_f64_exact :: proc(w: io.Writer, v: f64) -> (err: io.Error) {
+ bytes: [9]byte = ---
+ bytes[0] = u8(Header.F64)
+ endian.unchecked_put_u64be(bytes[1:], transmute(u64)v)
+ _, err = io.write_full(w, bytes[:])
+ return
+}
+
+_encode_bool :: proc(w: io.Writer, v: bool) -> (err: io.Error) {
+ switch v {
+ case true: _, err = io.write_full(w, {u8(Header.True )}); return
+ case false: _, err = io.write_full(w, {u8(Header.False)}); return
+ case: unreachable()
+ }
+}
+
+_encode_undefined :: proc(w: io.Writer) -> io.Error {
+ _, err := io.write_full(w, {u8(Header.Undefined)})
+ return err
+}
+
+_encode_nil :: proc(w: io.Writer) -> io.Error {
+ _, err := io.write_full(w, {u8(Header.Nil)})
+ return err
+}
+
+// Streaming
+
+encode_stream_begin :: proc(w: io.Writer, major: Major) -> (err: io.Error) {
+ assert(major >= Major(.Bytes) && major <= Major(.Map), "illegal stream type")
+
+ header := (u8(major) << 5) | u8(Add.Length_Unknown)
+ _, err = io.write_full(w, {header})
+ return
+}
+
+encode_stream_end :: proc(w: io.Writer) -> io.Error {
+ header := (u8(Major.Other) << 5) | u8(Add.Break)
+ _, err := io.write_full(w, {header})
+ return err
+}
+
+encode_stream_bytes :: _encode_bytes
+encode_stream_text :: _encode_text
+encode_stream_array_item :: encode
+
+encode_stream_map_entry :: proc(e: Encoder, key: Value, val: Value) -> Encode_Error {
+ encode(e, key) or_return
+ return encode(e, val)
+}
+
+// For `Bytes` and `Text` strings: Decodes the number of items the header says follows.
+// If the number is not specified -1 is returned and streaming should be initiated.
+// A suitable starting capacity is also returned for a buffer that is allocated up the stack.
+_decode_len_str :: proc(d: Decoder, add: Add) -> (n: int, scap: int, err: Decode_Error) {
+ if add == .Length_Unknown {
+ if .Disallow_Streaming in d.flags {
+ return -1, -1, .Disallowed_Streaming
+ }
+ return -1, INITIAL_STREAMED_BYTES_CAPACITY, nil
+ }
+
+ _n := _decode_uint_as_u64(d.reader, add) or_return
+ if _n > u64(max(int)) { return -1, -1, .Length_Too_Big }
+ n = int(_n)
+
+ scap = n + 1 // Space for zero byte.
+ if .Trusted_Input not_in d.flags {
+ scap = min(d.max_pre_alloc, scap)
+ }
+
+ return
+}
+
+// For `Array` and `Map` types: Decodes the number of items the header says follows.
+// If the number is not specified -1 is returned and streaming should be initiated.
+// A suitable starting capacity is also returned for a buffer that is allocated up the stack.
+_decode_len_container :: proc(d: Decoder, add: Add) -> (n: int, scap: int, err: Decode_Error) {
+ if add == .Length_Unknown {
+ if .Disallow_Streaming in d.flags {
+ return -1, -1, .Disallowed_Streaming
+ }
+ return -1, INITIAL_STREAMED_CONTAINER_CAPACITY, nil
+ }
+
+ _n := _decode_uint_as_u64(d.reader, add) or_return
+ if _n > u64(max(int)) { return -1, -1, .Length_Too_Big }
+ n = int(_n)
+
+ scap = n
+ if .Trusted_Input not_in d.flags {
+ // NOTE: if this is a map it will be twice this.
+ scap = min(d.max_pre_alloc / size_of(Value), scap)
+ }
+
+ return
+}
+
+// Deterministic encoding is (among other things) encoding all values into their smallest
+// possible representation.
+// See section 4 of RFC 8949.
+
+_encode_deterministic_uint :: proc {
+ _encode_u8,
+ _encode_deterministic_u16,
+ _encode_deterministic_u32,
+ _encode_deterministic_u64,
+ _encode_deterministic_u128,
+}
+
+_encode_deterministic_u16 :: proc(w: io.Writer, v: u16, major: Major = .Unsigned) -> Encode_Error {
+ switch {
+ case v <= u16(max(u8)): return _encode_u8(w, u8(v), major)
+ case: return _encode_u16_exact(w, v, major)
+ }
+}
+
+_encode_deterministic_u32 :: proc(w: io.Writer, v: u32, major: Major = .Unsigned) -> Encode_Error {
+ switch {
+ case v <= u32(max(u8)): return _encode_u8(w, u8(v), major)
+ case v <= u32(max(u16)): return _encode_u16_exact(w, u16(v), major)
+ case: return _encode_u32_exact(w, u32(v), major)
+ }
+}
+
+_encode_deterministic_u64 :: proc(w: io.Writer, v: u64, major: Major = .Unsigned) -> Encode_Error {
+ switch {
+ case v <= u64(max(u8)): return _encode_u8(w, u8(v), major)
+ case v <= u64(max(u16)): return _encode_u16_exact(w, u16(v), major)
+ case v <= u64(max(u32)): return _encode_u32_exact(w, u32(v), major)
+ case: return _encode_u64_exact(w, u64(v), major)
+ }
+}
+
+_encode_deterministic_u128 :: proc(w: io.Writer, v: u128, major: Major = .Unsigned) -> Encode_Error {
+ switch {
+ case v <= u128(max(u8)): return _encode_u8(w, u8(v), major)
+ case v <= u128(max(u16)): return _encode_u16_exact(w, u16(v), major)
+ case v <= u128(max(u32)): return _encode_u32_exact(w, u32(v), major)
+ case v <= u128(max(u64)): return _encode_u64_exact(w, u64(v), major)
+ case: return .Int_Too_Big
+ }
+}
+
+_encode_deterministic_negative :: #force_inline proc(w: io.Writer, v: $T) -> Encode_Error
+ where T == Negative_U8 || T == Negative_U16 || T == Negative_U32 || T == Negative_U64 {
+ return _encode_deterministic_uint(w, v, .Negative)
+}
+
+// A Deterministic float is a float in the smallest type that stays the same after down casting.
+_encode_deterministic_float :: proc {
+ _encode_f16,
+ _encode_deterministic_f32,
+ _encode_deterministic_f64,
+}
+
+_encode_deterministic_f32 :: proc(w: io.Writer, v: f32) -> io.Error {
+ if (f32(f16(v)) == v) {
+ return _encode_f16(w, f16(v))
+ }
+
+ return _encode_f32_exact(w, v)
+}
+
+_encode_deterministic_f64 :: proc(w: io.Writer, v: f64) -> io.Error {
+ if (f64(f16(v)) == v) {
+ return _encode_f16(w, f16(v))
+ }
+
+ if (f64(f32(v)) == v) {
+ return _encode_f32_exact(w, f32(v))
+ }
+
+ return _encode_f64_exact(w, v)
+}
diff --git a/core/encoding/cbor/doc.odin b/core/encoding/cbor/doc.odin
new file mode 100644
index 000000000..937b1b61b
--- /dev/null
+++ b/core/encoding/cbor/doc.odin
@@ -0,0 +1,141 @@
+/*
+Package cbor encodes, decodes, marshals and unmarshals types from/into RCF 8949 compatible CBOR binary.
+Also provided are conversion to and from JSON and the CBOR diagnostic format.
+
+**Allocations:**
+
+In general, when in the following table it says allocations are done on the `temp_allocator`, these allocations
+are still attempted to be deallocated.
+This allows you to use an allocator with freeing implemented as the `temp_allocator` which is handy with big CBOR.
+
+- *Encoding*: If the `.Deterministic_Map_Sorting` flag is set on the encoder, this allocates on the given `temp_allocator`
+ some space for the keys of maps in order to sort them and then write them.
+ Other than that there are no allocations (only for the final bytes if you use `cbor.encode_into_bytes`.
+
+- *Decoding*: Allocates everything on the given allocator and input given can be deleted after decoding.
+ *No* temporary allocations are done.
+
+- *Marshal*: Same allocation strategy as encoding.
+
+- *Unmarshal*: Allocates everything on the given allocator and input given can be deleted after unmarshalling.
+ Some temporary allocations are done on the given `temp_allocator`.
+
+**Determinism:**
+
+CBOR defines a deterministic en/decoder, which among other things uses the smallest type possible for integers and floats,
+and sorts map keys by their (encoded) lexical bytewise order.
+
+You can enable this behaviour using a combination of flags, also available as the `cbor.ENCODE_FULLY_DETERMINISTIC` constant.
+If you just want the small size that comes with this, but not the map sorting (which has a performance cost) you can use the
+`cbor.ENCODE_SMALL` constant for the flags.
+
+A deterministic float is a float in the smallest type (f16, f32, f64) that hasn't changed after conversion.
+A deterministic integer is an integer in the smallest representation (u8, u16, u32, u64) it fits in.
+
+**Untrusted Input:**
+
+By default input is treated as untrusted, this means the sizes that are encoded in the CBOR are not blindly trusted.
+If you were to trust these sizes, and allocate space for them an attacker would be able to cause massive allocations with small payloads.
+
+The decoder has a `max_pre_alloc` field that specifies the maximum amount of bytes (roughly) to pre allocate, a KiB by default.
+
+This does mean reallocations are more common though, you can, if you know the input is trusted, add the `.Trusted_Input` flag to the decoder.
+
+**Tags:**
+
+CBOR describes tags that you can wrap values with to assign a number to describe what type of data will follow.
+
+More information and a list of default tags can be found here: [[RFC 8949 Section 3.4;https://www.rfc-editor.org/rfc/rfc8949.html#name-tagging-of-items]].
+
+A list of registered extension types can be found here: [[IANA CBOR assignments;https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml]].
+
+Tags can either be assigned to a distinct Odin type (used by default),
+or be used with struct tags (`cbor_tag:"base64"`, or `cbor_tag:"1"` for example).
+
+By default, the following tags are supported/provided by this implementation:
+
+- *1/epoch*: Assign this tag to `time.Time` or integer fields to use the defined seconds since epoch format.
+
+- *24/cbor*: Assign this tag to string or byte fields to store encoded CBOR (not decoding it).
+
+- *34/base64*: Assign this tag to string or byte fields to store and decode the contents in base64.
+
+- *2 & 3*: Used automatically by the implementation to encode and decode big numbers into/from `core:math/big`.
+
+- *55799*: Self described CBOR, used when `.Self_Described_CBOR` flag is used to wrap the entire binary.
+ This shows other implementations that we are dealing with CBOR by just looking at the first byte of input.
+
+- *1010*: An extension tag that defines a string type followed by its value, this is used by this implementation to support Odin's unions.
+
+Users can provide their own tag implementations using the `cbor.tag_register_type(...)` to register a tag for a distinct Odin type
+used automatically when it is encountered during marshal and unmarshal.
+Or with `cbor.tag_register_number(...)` to register a tag number along with an identifier for convenience that can be used with struct tags,
+e.g. `cbor_tag:"69"` or `cbor_tag:"my_tag"`.
+
+You can look at the default tags provided for pointers on how these implementations work.
+
+Example:
+ package main
+
+ import "core:encoding/cbor"
+ import "core:fmt"
+ import "core:time"
+
+ Possibilities :: union {
+ string,
+ int,
+ }
+
+ Data :: struct {
+ str: string,
+ neg: cbor.Negative_U16, // Store a CBOR value directly.
+ now: time.Time `cbor_tag:"epoch"`, // Wrapped in the epoch tag.
+ ignore_this: ^Data `cbor:"-"`, // Ignored by implementation.
+ renamed: f32 `cbor:"renamed :)"`, // Renamed when encoded.
+ my_union: Possibilities, // Union support.
+ }
+
+ main :: proc() {
+ now := time.Time{_nsec = 1701117968 * 1e9}
+
+ data := Data{
+ str = "Hello, World!",
+ neg = 300,
+ now = now,
+ ignore_this = &Data{},
+ renamed = 123123.125,
+ my_union = 3,
+ }
+
+ // Marshal the struct into binary CBOR.
+ binary, err := cbor.marshal(data, cbor.ENCODE_FULLY_DETERMINISTIC)
+ assert(err == nil)
+ defer delete(binary)
+
+ // Decode the binary data into a `cbor.Value`.
+ decoded, derr := cbor.decode(string(binary))
+ assert(derr == nil)
+ defer cbor.destroy(decoded)
+
+ // Turn the CBOR into a human readable representation defined as the diagnostic format in [[RFC 8949 Section 8;https://www.rfc-editor.org/rfc/rfc8949.html#name-diagnostic-notation]].
+ diagnosis, eerr := cbor.to_diagnostic_format(decoded)
+ assert(eerr == nil)
+ defer delete(diagnosis)
+
+ fmt.println(diagnosis)
+ }
+
+Output:
+ {
+ "my_union": 1010([
+ "int",
+ 3
+ ]),
+ "neg": -301,
+ "now": 1(1701117968),
+ "renamed :)": 123123.12500000,
+ "str": "Hello, World!"
+ }
+*/
+package encoding_cbor
+
diff --git a/core/encoding/cbor/marshal.odin b/core/encoding/cbor/marshal.odin
new file mode 100644
index 000000000..37c9dd180
--- /dev/null
+++ b/core/encoding/cbor/marshal.odin
@@ -0,0 +1,575 @@
+package encoding_cbor
+
+import "base:intrinsics"
+import "base:runtime"
+
+import "core:bytes"
+import "core:io"
+import "core:mem"
+import "core:reflect"
+import "core:slice"
+import "core:strconv"
+import "core:strings"
+import "core:unicode/utf8"
+
+/*
+Marshal a value into binary CBOR.
+
+Flags can be used to control the output (mainly determinism, which coincidently affects size).
+
+The default flags `ENCODE_SMALL` (`.Deterministic_Int_Size`, `.Deterministic_Float_Size`) will try
+to put ints and floats into their smallest possible byte size without losing equality.
+
+Adding the `.Self_Described_CBOR` flag will wrap the value in a tag that lets generic decoders know
+the contents are CBOR from just reading the first byte.
+
+Adding the `.Deterministic_Map_Sorting` flag will sort the encoded maps by the byte content of the
+encoded key. This flag has a cost on performance and memory efficiency because all keys in a map
+have to be precomputed, sorted and only then written to the output.
+
+Empty flags will do nothing extra to the value.
+
+The allocations for the `.Deterministic_Map_Sorting` flag are done using the given `temp_allocator`.
+but are followed by the necessary `delete` and `free` calls if the allocator supports them.
+This is helpful when the CBOR size is so big that you don't want to collect all the temporary
+allocations until the end.
+*/
+marshal_into :: proc {
+ marshal_into_bytes,
+ marshal_into_builder,
+ marshal_into_writer,
+ marshal_into_encoder,
+}
+
+marshal :: marshal_into
+
+// Marshals the given value into a CBOR byte stream (allocated using the given allocator).
+// See docs on the `marshal_into` proc group for more info.
+marshal_into_bytes :: proc(v: any, flags := ENCODE_SMALL, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (bytes: []byte, err: Marshal_Error) {
+ b, alloc_err := strings.builder_make(allocator)
+ // The builder as a stream also returns .EOF if it ran out of memory so this is consistent.
+ if alloc_err != nil {
+ return nil, .EOF
+ }
+
+ defer if err != nil { strings.builder_destroy(&b) }
+
+ if err = marshal_into_builder(&b, v, flags, temp_allocator); err != nil {
+ return
+ }
+
+ return b.buf[:], nil
+}
+
+// Marshals the given value into a CBOR byte stream written to the given builder.
+// See docs on the `marshal_into` proc group for more info.
+marshal_into_builder :: proc(b: ^strings.Builder, v: any, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator) -> Marshal_Error {
+ return marshal_into_writer(strings.to_writer(b), v, flags, temp_allocator)
+}
+
+// Marshals the given value into a CBOR byte stream written to the given writer.
+// See docs on the `marshal_into` proc group for more info.
+marshal_into_writer :: proc(w: io.Writer, v: any, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator) -> Marshal_Error {
+ encoder := Encoder{flags, w, temp_allocator}
+ return marshal_into_encoder(encoder, v)
+}
+
+// Marshals the given value into a CBOR byte stream written to the given encoder.
+// See docs on the `marshal_into` proc group for more info.
+marshal_into_encoder :: proc(e: Encoder, v: any) -> (err: Marshal_Error) {
+ e := e
+
+ if e.temp_allocator.procedure == nil {
+ e.temp_allocator = context.temp_allocator
+ }
+
+ if .Self_Described_CBOR in e.flags {
+ err_conv(_encode_u64(e, TAG_SELF_DESCRIBED_CBOR, .Tag)) or_return
+ e.flags &~= { .Self_Described_CBOR }
+ }
+
+ if v == nil {
+ return _encode_nil(e.writer)
+ }
+
+ // Check if type has a tag implementation to use.
+ if impl, ok := _tag_implementations_type[v.id]; ok {
+ return impl->marshal(e, v)
+ }
+
+ ti := runtime.type_info_base(type_info_of(v.id))
+ a := any{v.data, ti.id}
+
+ #partial switch info in ti.variant {
+ case runtime.Type_Info_Named:
+ unreachable()
+
+ case runtime.Type_Info_Pointer:
+ switch vv in v {
+ case Undefined: return _encode_undefined(e.writer)
+ case Nil: return _encode_nil(e.writer)
+ }
+
+ case runtime.Type_Info_Integer:
+ switch vv in v {
+ case Simple: return err_conv(_encode_simple(e.writer, vv))
+ case Negative_U8: return _encode_u8(e.writer, u8(vv), .Negative)
+ case Negative_U16: return err_conv(_encode_u16(e, u16(vv), .Negative))
+ case Negative_U32: return err_conv(_encode_u32(e, u32(vv), .Negative))
+ case Negative_U64: return err_conv(_encode_u64(e, u64(vv), .Negative))
+ }
+
+ switch i in a {
+ case i8: return _encode_uint(e.writer, _int_to_uint(i))
+ case i16: return err_conv(_encode_uint(e, _int_to_uint(i)))
+ case i32: return err_conv(_encode_uint(e, _int_to_uint(i)))
+ case i64: return err_conv(_encode_uint(e, _int_to_uint(i)))
+ case i128: return err_conv(_encode_uint(e, _int_to_uint(i128(i)) or_return))
+ case int: return err_conv(_encode_uint(e, _int_to_uint(i64(i))))
+
+ case u8: return _encode_uint(e.writer, i)
+ case u16: return err_conv(_encode_uint(e, i))
+ case u32: return err_conv(_encode_uint(e, i))
+ case u64: return err_conv(_encode_uint(e, i))
+ case u128: return err_conv(_encode_uint(e, _u128_to_u64(u128(i)) or_return))
+ case uint: return err_conv(_encode_uint(e, u64(i)))
+ case uintptr: return err_conv(_encode_uint(e, u64(i)))
+
+ case i16le: return err_conv(_encode_uint(e, _int_to_uint(i16(i))))
+ case i32le: return err_conv(_encode_uint(e, _int_to_uint(i32(i))))
+ case i64le: return err_conv(_encode_uint(e, _int_to_uint(i64(i))))
+ case i128le: return err_conv(_encode_uint(e, _int_to_uint(i128(i)) or_return))
+
+ case u16le: return err_conv(_encode_uint(e, u16(i)))
+ case u32le: return err_conv(_encode_uint(e, u32(i)))
+ case u64le: return err_conv(_encode_uint(e, u64(i)))
+ case u128le: return err_conv(_encode_uint(e, _u128_to_u64(u128(i)) or_return))
+
+ case i16be: return err_conv(_encode_uint(e, _int_to_uint(i16(i))))
+ case i32be: return err_conv(_encode_uint(e, _int_to_uint(i32(i))))
+ case i64be: return err_conv(_encode_uint(e, _int_to_uint(i64(i))))
+ case i128be: return err_conv(_encode_uint(e, _int_to_uint(i128(i)) or_return))
+
+ case u16be: return err_conv(_encode_uint(e, u16(i)))
+ case u32be: return err_conv(_encode_uint(e, u32(i)))
+ case u64be: return err_conv(_encode_uint(e, u64(i)))
+ case u128be: return err_conv(_encode_uint(e, _u128_to_u64(u128(i)) or_return))
+ }
+
+ case runtime.Type_Info_Rune:
+ buf, w := utf8.encode_rune(a.(rune))
+ return err_conv(_encode_text(e, string(buf[:w])))
+
+ case runtime.Type_Info_Float:
+ switch f in a {
+ case f16: return _encode_f16(e.writer, f)
+ case f32: return _encode_f32(e, f)
+ case f64: return _encode_f64(e, f)
+
+ case f16le: return _encode_f16(e.writer, f16(f))
+ case f32le: return _encode_f32(e, f32(f))
+ case f64le: return _encode_f64(e, f64(f))
+
+ case f16be: return _encode_f16(e.writer, f16(f))
+ case f32be: return _encode_f32(e, f32(f))
+ case f64be: return _encode_f64(e, f64(f))
+ }
+
+ case runtime.Type_Info_Complex:
+ switch z in a {
+ case complex32:
+ arr: [2]Value = {real(z), imag(z)}
+ return err_conv(_encode_array(e, arr[:]))
+ case complex64:
+ arr: [2]Value = {real(z), imag(z)}
+ return err_conv(_encode_array(e, arr[:]))
+ case complex128:
+ arr: [2]Value = {real(z), imag(z)}
+ return err_conv(_encode_array(e, arr[:]))
+ }
+
+ case runtime.Type_Info_Quaternion:
+ switch q in a {
+ case quaternion64:
+ arr: [4]Value = {imag(q), jmag(q), kmag(q), real(q)}
+ return err_conv(_encode_array(e, arr[:]))
+ case quaternion128:
+ arr: [4]Value = {imag(q), jmag(q), kmag(q), real(q)}
+ return err_conv(_encode_array(e, arr[:]))
+ case quaternion256:
+ arr: [4]Value = {imag(q), jmag(q), kmag(q), real(q)}
+ return err_conv(_encode_array(e, arr[:]))
+ }
+
+ case runtime.Type_Info_String:
+ switch s in a {
+ case string: return err_conv(_encode_text(e, s))
+ case cstring: return err_conv(_encode_text(e, string(s)))
+ }
+
+ case runtime.Type_Info_Boolean:
+ switch b in a {
+ case bool: return _encode_bool(e.writer, b)
+ case b8: return _encode_bool(e.writer, bool(b))
+ case b16: return _encode_bool(e.writer, bool(b))
+ case b32: return _encode_bool(e.writer, bool(b))
+ case b64: return _encode_bool(e.writer, bool(b))
+ }
+
+ case runtime.Type_Info_Array:
+ if info.elem.id == byte {
+ raw := ([^]byte)(v.data)
+ return err_conv(_encode_bytes(e, raw[:info.count]))
+ }
+
+ err_conv(_encode_u64(e, u64(info.count), .Array)) or_return
+ for i in 0..<info.count {
+ data := uintptr(v.data) + uintptr(i*info.elem_size)
+ marshal_into(e, any{rawptr(data), info.elem.id}) or_return
+ }
+ return
+
+ case runtime.Type_Info_Enumerated_Array:
+ // index := runtime.type_info_base(info.index).variant.(runtime.Type_Info_Enum)
+ err_conv(_encode_u64(e, u64(info.count), .Array)) or_return
+ for i in 0..<info.count {
+ data := uintptr(v.data) + uintptr(i*info.elem_size)
+ marshal_into(e, any{rawptr(data), info.elem.id}) or_return
+ }
+ return
+
+ case runtime.Type_Info_Dynamic_Array:
+ if info.elem.id == byte {
+ raw := (^[dynamic]byte)(v.data)
+ return err_conv(_encode_bytes(e, raw[:]))
+ }
+
+ array := (^mem.Raw_Dynamic_Array)(v.data)
+ err_conv(_encode_u64(e, u64(array.len), .Array)) or_return
+ for i in 0..<array.len {
+ data := uintptr(array.data) + uintptr(i*info.elem_size)
+ marshal_into(e, any{rawptr(data), info.elem.id}) or_return
+ }
+ return
+
+ case runtime.Type_Info_Slice:
+ if info.elem.id == byte {
+ raw := (^[]byte)(v.data)
+ return err_conv(_encode_bytes(e, raw^))
+ }
+
+ array := (^mem.Raw_Slice)(v.data)
+ err_conv(_encode_u64(e, u64(array.len), .Array)) or_return
+ for i in 0..<array.len {
+ data := uintptr(array.data) + uintptr(i*info.elem_size)
+ marshal_into(e, any{rawptr(data), info.elem.id}) or_return
+ }
+ return
+
+ case runtime.Type_Info_Map:
+ m := (^mem.Raw_Map)(v.data)
+ err_conv(_encode_u64(e, u64(runtime.map_len(m^)), .Map)) or_return
+ if m != nil {
+ if info.map_info == nil {
+ return _unsupported(v.id, nil)
+ }
+
+ map_cap := uintptr(runtime.map_cap(m^))
+ ks, vs, hs, _, _ := runtime.map_kvh_data_dynamic(m^, info.map_info)
+
+ if .Deterministic_Map_Sorting not_in e.flags {
+ for bucket_index in 0..<map_cap {
+ runtime.map_hash_is_valid(hs[bucket_index]) or_continue
+
+ key := rawptr(runtime.map_cell_index_dynamic(ks, info.map_info.ks, bucket_index))
+ value := rawptr(runtime.map_cell_index_dynamic(vs, info.map_info.vs, bucket_index))
+
+ marshal_into(e, any{ key, info.key.id }) or_return
+ marshal_into(e, any{ value, info.value.id }) or_return
+ }
+
+ return
+ }
+
+ // Deterministic_Map_Sorting needs us to sort the entries by the byte contents of the
+ // encoded key.
+ //
+ // This means we have to store and sort them before writing incurring extra (temporary) allocations.
+ //
+ // If the map key is a `string` or `cstring` we only allocate space for a dynamic array of entries
+ // we sort.
+ //
+ // If the map key is of another type we also allocate space for encoding the key into.
+
+ // To sort a string/cstring we need to first sort by their encoded header/length.
+ // This fits in 9 bytes at most.
+ pre_key :: #force_inline proc(e: Encoder, str: string) -> (res: [10]byte) {
+ e := e
+ builder := strings.builder_from_slice(res[:])
+ e.writer = strings.to_stream(&builder)
+
+ assert(_encode_u64(e, u64(len(str)), .Text) == nil)
+ res[9] = u8(len(builder.buf))
+ assert(res[9] < 10)
+ return
+ }
+
+ Encoded_Entry_Fast :: struct($T: typeid) {
+ pre_key: [10]byte,
+ key: T,
+ val_idx: uintptr,
+ }
+
+ Encoded_Entry :: struct {
+ key: ^[dynamic]byte,
+ val_idx: uintptr,
+ }
+
+ switch info.key.id {
+ case string:
+ entries := make([dynamic]Encoded_Entry_Fast(^[]byte), 0, map_cap, e.temp_allocator) or_return
+ defer delete(entries)
+
+ for bucket_index in 0..<map_cap {
+ runtime.map_hash_is_valid(hs[bucket_index]) or_continue
+
+ key := (^[]byte)(runtime.map_cell_index_dynamic(ks, info.map_info.ks, bucket_index))
+ append(&entries, Encoded_Entry_Fast(^[]byte){
+ pre_key = pre_key(e, string(key^)),
+ key = key,
+ val_idx = bucket_index,
+ })
+ }
+
+ slice.sort_by_cmp(entries[:], proc(a, b: Encoded_Entry_Fast(^[]byte)) -> slice.Ordering {
+ a, b := a, b
+ pre_cmp := slice.Ordering(bytes.compare(a.pre_key[:a.pre_key[9]], b.pre_key[:b.pre_key[9]]))
+ if pre_cmp != .Equal {
+ return pre_cmp
+ }
+
+ return slice.Ordering(bytes.compare(a.key^, b.key^))
+ })
+
+ for &entry in entries {
+ io.write_full(e.writer, entry.pre_key[:entry.pre_key[9]]) or_return
+ io.write_full(e.writer, entry.key^) or_return
+
+ value := rawptr(runtime.map_cell_index_dynamic(vs, info.map_info.vs, entry.val_idx))
+ marshal_into(e, any{ value, info.value.id }) or_return
+ }
+ return
+
+ case cstring:
+ entries := make([dynamic]Encoded_Entry_Fast(^cstring), 0, map_cap, e.temp_allocator) or_return
+ defer delete(entries)
+
+ for bucket_index in 0..<map_cap {
+ runtime.map_hash_is_valid(hs[bucket_index]) or_continue
+
+ key := (^cstring)(runtime.map_cell_index_dynamic(ks, info.map_info.ks, bucket_index))
+ append(&entries, Encoded_Entry_Fast(^cstring){
+ pre_key = pre_key(e, string(key^)),
+ key = key,
+ val_idx = bucket_index,
+ })
+ }
+
+ slice.sort_by_cmp(entries[:], proc(a, b: Encoded_Entry_Fast(^cstring)) -> slice.Ordering {
+ a, b := a, b
+ pre_cmp := slice.Ordering(bytes.compare(a.pre_key[:a.pre_key[9]], b.pre_key[:b.pre_key[9]]))
+ if pre_cmp != .Equal {
+ return pre_cmp
+ }
+
+ ab := transmute([]byte)string(a.key^)
+ bb := transmute([]byte)string(b.key^)
+ return slice.Ordering(bytes.compare(ab, bb))
+ })
+
+ for &entry in entries {
+ io.write_full(e.writer, entry.pre_key[:entry.pre_key[9]]) or_return
+ io.write_full(e.writer, transmute([]byte)string(entry.key^)) or_return
+
+ value := rawptr(runtime.map_cell_index_dynamic(vs, info.map_info.vs, entry.val_idx))
+ marshal_into(e, any{ value, info.value.id }) or_return
+ }
+ return
+
+ case:
+ entries := make([dynamic]Encoded_Entry, 0, map_cap, e.temp_allocator) or_return
+ defer delete(entries)
+
+ for bucket_index in 0..<map_cap {
+ runtime.map_hash_is_valid(hs[bucket_index]) or_continue
+
+ key := rawptr(runtime.map_cell_index_dynamic(ks, info.map_info.ks, bucket_index))
+ key_builder := strings.builder_make(0, 8, e.temp_allocator) or_return
+ marshal_into(Encoder{e.flags, strings.to_stream(&key_builder), e.temp_allocator}, any{ key, info.key.id }) or_return
+ append(&entries, Encoded_Entry{ &key_builder.buf, bucket_index }) or_return
+ }
+
+ slice.sort_by_cmp(entries[:], proc(a, b: Encoded_Entry) -> slice.Ordering {
+ return slice.Ordering(bytes.compare(a.key[:], b.key[:]))
+ })
+
+ for entry in entries {
+ io.write_full(e.writer, entry.key[:]) or_return
+ delete(entry.key^)
+
+ value := rawptr(runtime.map_cell_index_dynamic(vs, info.map_info.vs, entry.val_idx))
+ marshal_into(e, any{ value, info.value.id }) or_return
+ }
+ return
+ }
+ }
+
+ case runtime.Type_Info_Struct:
+ switch vv in v {
+ case Tag: return err_conv(_encode_tag(e, vv))
+ }
+
+ field_name :: #force_inline proc(info: runtime.Type_Info_Struct, i: int) -> string {
+ if cbor_name := string(reflect.struct_tag_get(reflect.Struct_Tag(info.tags[i]), "cbor")); cbor_name != "" {
+ return cbor_name
+ } else {
+ return info.names[i]
+ }
+ }
+
+ marshal_entry :: #force_inline proc(e: Encoder, info: runtime.Type_Info_Struct, v: any, name: string, i: int) -> Marshal_Error {
+ err_conv(_encode_text(e, name)) or_return
+
+ id := info.types[i].id
+ data := rawptr(uintptr(v.data) + info.offsets[i])
+ field_any := any{data, id}
+
+ if tag := string(reflect.struct_tag_get(reflect.Struct_Tag(info.tags[i]), "cbor_tag")); tag != "" {
+ if impl, ok := _tag_implementations_id[tag]; ok {
+ return impl->marshal(e, field_any)
+ }
+
+ nr, ok := strconv.parse_u64_of_base(tag, 10)
+ if !ok { return .Invalid_CBOR_Tag }
+
+ if impl, nok := _tag_implementations_nr[nr]; nok {
+ return impl->marshal(e, field_any)
+ }
+
+ err_conv(_encode_u64(e, nr, .Tag)) or_return
+ }
+
+ return marshal_into(e, field_any)
+ }
+
+ n: u64; {
+ for _, i in info.names {
+ if field_name(info, i) != "-" {
+ n += 1
+ }
+ }
+ err_conv(_encode_u64(e, n, .Map)) or_return
+ }
+
+ if .Deterministic_Map_Sorting in e.flags {
+ Name :: struct {
+ name: string,
+ field: int,
+ }
+ entries := make([dynamic]Name, 0, n, e.temp_allocator) or_return
+ defer delete(entries)
+
+ for _, i in info.names {
+ fname := field_name(info, i)
+ if fname == "-" {
+ continue
+ }
+
+ append(&entries, Name{fname, i}) or_return
+ }
+
+ // Sort lexicographic on the bytes of the key.
+ slice.sort_by_cmp(entries[:], proc(a, b: Name) -> slice.Ordering {
+ return slice.Ordering(bytes.compare(transmute([]byte)a.name, transmute([]byte)b.name))
+ })
+
+ for entry in entries {
+ marshal_entry(e, info, v, entry.name, entry.field) or_return
+ }
+ } else {
+ for _, i in info.names {
+ fname := field_name(info, i)
+ if fname == "-" {
+ continue
+ }
+
+ marshal_entry(e, info, v, fname, i) or_return
+ }
+ }
+ return
+
+ case runtime.Type_Info_Union:
+ switch vv in v {
+ case Value: return err_conv(encode(e, vv))
+ }
+
+ id := reflect.union_variant_typeid(v)
+ if v.data == nil || id == nil {
+ return _encode_nil(e.writer)
+ }
+
+ if len(info.variants) == 1 {
+ return marshal_into(e, any{v.data, id})
+ }
+
+ // Encode a non-nil multi-variant union as the `TAG_OBJECT_TYPE`.
+ // Which is a tag of an array, where the first element is the textual id/type of the object
+ // that follows it.
+
+ err_conv(_encode_u16(e, TAG_OBJECT_TYPE, .Tag)) or_return
+ _encode_u8(e.writer, 2, .Array) or_return
+
+ vti := reflect.union_variant_type_info(v)
+ #partial switch vt in vti.variant {
+ case reflect.Type_Info_Named:
+ err_conv(_encode_text(e, vt.name)) or_return
+ case:
+ builder := strings.builder_make(e.temp_allocator) or_return
+ defer strings.builder_destroy(&builder)
+ reflect.write_type(&builder, vti)
+ err_conv(_encode_text(e, strings.to_string(builder))) or_return
+ }
+
+ return marshal_into(e, any{v.data, vti.id})
+
+ case runtime.Type_Info_Enum:
+ return marshal_into(e, any{v.data, info.base.id})
+
+ case runtime.Type_Info_Bit_Set:
+ // Store bit_set as big endian just like the protocol.
+ do_byte_swap := !reflect.bit_set_is_big_endian(v)
+ switch ti.size * 8 {
+ case 0:
+ return _encode_u8(e.writer, 0)
+ case 8:
+ x := (^u8)(v.data)^
+ return _encode_u8(e.writer, x)
+ case 16:
+ x := (^u16)(v.data)^
+ if do_byte_swap { x = intrinsics.byte_swap(x) }
+ return err_conv(_encode_u16(e, x))
+ case 32:
+ x := (^u32)(v.data)^
+ if do_byte_swap { x = intrinsics.byte_swap(x) }
+ return err_conv(_encode_u32(e, x))
+ case 64:
+ x := (^u64)(v.data)^
+ if do_byte_swap { x = intrinsics.byte_swap(x) }
+ return err_conv(_encode_u64(e, x))
+ case:
+ panic("unknown bit_size size")
+ }
+ }
+
+ return _unsupported(v.id, nil)
+}
diff --git a/core/encoding/cbor/tags.odin b/core/encoding/cbor/tags.odin
new file mode 100644
index 000000000..3dc79a5dd
--- /dev/null
+++ b/core/encoding/cbor/tags.odin
@@ -0,0 +1,381 @@
+package encoding_cbor
+
+import "base:runtime"
+
+import "core:encoding/base64"
+import "core:io"
+import "core:math"
+import "core:math/big"
+import "core:mem"
+import "core:reflect"
+import "core:strings"
+import "core:time"
+
+// Tags defined in RFC 7049 that we provide implementations for.
+
+// UTC time in seconds, unmarshalled into a `core:time` `time.Time` or integer.
+// Use the struct tag `cbor_tag:"1"` or `cbor_tag:"epoch"` to have your `time.Time` field en/decoded as epoch time.
+TAG_EPOCH_TIME_NR :: 1
+TAG_EPOCH_TIME_ID :: "epoch"
+
+// Using `core:math/big`, big integers are properly encoded and decoded during marshal and unmarshal.
+// These fields use this tag by default, no struct tag required.
+TAG_UNSIGNED_BIG_NR :: 2
+// Using `core:math/big`, big integers are properly encoded and decoded during marshal and unmarshal.
+// These fields use this tag by default, no struct tag required.
+TAG_NEGATIVE_BIG_NR :: 3
+
+// TAG_DECIMAL_FRACTION :: 4 // NOTE: We could probably implement this with `math/fixed`.
+
+// Sometimes it is beneficial to carry an embedded CBOR data item that is not meant to be decoded
+// immediately at the time the enclosing data item is being decoded. Tag number 24 (CBOR data item)
+// can be used to tag the embedded byte string as a single data item encoded in CBOR format.
+// Use the struct tag `cbor_tag:"24"` or `cbor_tag:"cbor"` to keep a non-decoded field (string or bytes) of raw CBOR.
+TAG_CBOR_NR :: 24
+TAG_CBOR_ID :: "cbor"
+
+// The contents of this tag are base64 encoded during marshal and decoded during unmarshal.
+// Use the struct tag `cbor_tag:"34"` or `cbor_tag:"base64"` to have your field string or bytes field en/decoded as base64.
+TAG_BASE64_NR :: 34
+TAG_BASE64_ID :: "base64"
+
+// A tag that is used to detect the contents of a binary buffer (like a file) are CBOR.
+// This tag would wrap everything else, decoders can then check for this header and see if the
+// given content is definitely CBOR.
+// Added by the encoder if it has the flag `.Self_Described_CBOR`, decoded by default.
+TAG_SELF_DESCRIBED_CBOR :: 55799
+
+// A tag that is used to assign a textual type to the object following it.
+// The tag's value must be an array of 2 items, where the first is text (describing the following type)
+// and the second is any valid CBOR value.
+//
+// See the registration: https://datatracker.ietf.org/doc/draft-rundgren-cotx/05/
+//
+// We use this in Odin to marshal and unmarshal unions.
+TAG_OBJECT_TYPE :: 1010
+
+// A tag implementation that handles marshals and unmarshals for the tag it is registered on.
+Tag_Implementation :: struct {
+ data: rawptr,
+ unmarshal: Tag_Unmarshal_Proc,
+ marshal: Tag_Marshal_Proc,
+}
+
+// Procedure responsible for umarshalling the tag out of the reader into the given `any`.
+Tag_Unmarshal_Proc :: #type proc(self: ^Tag_Implementation, d: Decoder, tag_nr: Tag_Number, v: any) -> Unmarshal_Error
+
+// Procedure responsible for marshalling the tag in the given `any` into the given encoder.
+Tag_Marshal_Proc :: #type proc(self: ^Tag_Implementation, e: Encoder, v: any) -> Marshal_Error
+
+// When encountering a tag in the CBOR being unmarshalled, the implementation is used to unmarshal it.
+// When encountering a struct tag like `cbor_tag:"Tag_Number"`, the implementation is used to marshal it.
+_tag_implementations_nr: map[Tag_Number]Tag_Implementation
+
+// Same as the number implementations but friendlier to use as a struct tag.
+// Instead of `cbor_tag:"34"` you can use `cbor_tag:"base64"`.
+_tag_implementations_id: map[string]Tag_Implementation
+
+// Tag implementations that are always used by a type, if that type is encountered in marshal it
+// will rely on the implementation to marshal it.
+//
+// This is good for types that don't make sense or can't marshal in its default form.
+_tag_implementations_type: map[typeid]Tag_Implementation
+
+// Register a custom tag implementation to be used when marshalling that type and unmarshalling that tag number.
+tag_register_type :: proc(impl: Tag_Implementation, nr: Tag_Number, type: typeid) {
+ _tag_implementations_nr[nr] = impl
+ _tag_implementations_type[type] = impl
+}
+
+// Register a custom tag implementation to be used when marshalling that tag number or marshalling
+// a field with the struct tag `cbor_tag:"nr"`.
+tag_register_number :: proc(impl: Tag_Implementation, nr: Tag_Number, id: string) {
+ _tag_implementations_nr[nr] = impl
+ _tag_implementations_id[id] = impl
+}
+
+// Controls initialization of default tag implementations.
+// JS and WASI default to a panic allocator so we don't want to do it on those.
+INITIALIZE_DEFAULT_TAGS :: #config(CBOR_INITIALIZE_DEFAULT_TAGS, !ODIN_DEFAULT_TO_PANIC_ALLOCATOR && !ODIN_DEFAULT_TO_NIL_ALLOCATOR)
+
+@(private, init, disabled=!INITIALIZE_DEFAULT_TAGS)
+tags_initialize_defaults :: proc() {
+ tags_register_defaults()
+}
+
+// Registers tags that have implementations provided by this package.
+// This is done by default and can be controlled with the `CBOR_INITIALIZE_DEFAULT_TAGS` define.
+tags_register_defaults :: proc() {
+ tag_register_number({nil, tag_time_unmarshal, tag_time_marshal}, TAG_EPOCH_TIME_NR, TAG_EPOCH_TIME_ID)
+ tag_register_number({nil, tag_base64_unmarshal, tag_base64_marshal}, TAG_BASE64_NR, TAG_BASE64_ID)
+ tag_register_number({nil, tag_cbor_unmarshal, tag_cbor_marshal}, TAG_CBOR_NR, TAG_CBOR_ID)
+
+ // These following tags are registered at the type level and don't require an opt-in struct tag.
+ // Encoding these types on its own make no sense or no data is lost to encode it.
+
+ // En/Decoding of `big.Int` fields by default.
+ tag_register_type({nil, tag_big_unmarshal, tag_big_marshal}, TAG_UNSIGNED_BIG_NR, big.Int)
+ tag_register_type({nil, tag_big_unmarshal, tag_big_marshal}, TAG_NEGATIVE_BIG_NR, big.Int)
+}
+
+// Tag number 1 contains a numerical value counting the number of seconds from 1970-01-01T00:00Z
+// in UTC time to the represented point in civil time.
+//
+// See RFC 8949 section 3.4.2.
+@(private)
+tag_time_unmarshal :: proc(_: ^Tag_Implementation, d: Decoder, _: Tag_Number, v: any) -> (err: Unmarshal_Error) {
+ hdr := _decode_header(d.reader) or_return
+ #partial switch hdr {
+ case .U8, .U16, .U32, .U64, .Neg_U8, .Neg_U16, .Neg_U32, .Neg_U64:
+ switch &dst in v {
+ case time.Time:
+ i: i64
+ _unmarshal_any_ptr(d, &i, hdr) or_return
+ dst = time.unix(i64(i), 0)
+ return
+ case:
+ return _unmarshal_value(d, v, hdr)
+ }
+
+ case .F16, .F32, .F64:
+ switch &dst in v {
+ case time.Time:
+ f: f64
+ _unmarshal_any_ptr(d, &f, hdr) or_return
+ whole, fract := math.modf(f)
+ dst = time.unix(i64(whole), i64(fract * 1e9))
+ return
+ case:
+ return _unmarshal_value(d, v, hdr)
+ }
+
+ case:
+ maj, add := _header_split(hdr)
+ if maj == .Other {
+ i := _decode_tiny_u8(add) or_return
+
+ switch &dst in v {
+ case time.Time:
+ dst = time.unix(i64(i), 0)
+ case:
+ if _assign_int(v, i) { return }
+ }
+ }
+
+ // Only numbers and floats are allowed in this tag.
+ return .Bad_Tag_Value
+ }
+
+ return _unsupported(v, hdr)
+}
+
+@(private)
+tag_time_marshal :: proc(_: ^Tag_Implementation, e: Encoder, v: any) -> Marshal_Error {
+ switch vv in v {
+ case time.Time:
+ // NOTE: we lose precision here, which is one of the reasons for this tag being opt-in.
+ i := time.time_to_unix(vv)
+
+ _encode_u8(e.writer, TAG_EPOCH_TIME_NR, .Tag) or_return
+ return err_conv(_encode_uint(e, _int_to_uint(i)))
+ case:
+ unreachable()
+ }
+}
+
+@(private)
+tag_big_unmarshal :: proc(_: ^Tag_Implementation, d: Decoder, tnr: Tag_Number, v: any) -> (err: Unmarshal_Error) {
+ hdr := _decode_header(d.reader) or_return
+ maj, add := _header_split(hdr)
+ if maj != .Bytes {
+ // Only bytes are supported in this tag.
+ return .Bad_Tag_Value
+ }
+
+ switch &dst in v {
+ case big.Int:
+ bytes := err_conv(_decode_bytes(d, add)) or_return
+ defer delete(bytes)
+
+ if err := big.int_from_bytes_big(&dst, bytes); err != nil {
+ return .Bad_Tag_Value
+ }
+
+ if tnr == TAG_NEGATIVE_BIG_NR {
+ dst.sign = .Negative
+ }
+
+ return
+ }
+
+ return _unsupported(v, hdr)
+}
+
+@(private)
+tag_big_marshal :: proc(_: ^Tag_Implementation, e: Encoder, v: any) -> Marshal_Error {
+ switch &vv in v {
+ case big.Int:
+ if !big.int_is_initialized(&vv) {
+ _encode_u8(e.writer, TAG_UNSIGNED_BIG_NR, .Tag) or_return
+ return _encode_u8(e.writer, 0, .Bytes)
+ }
+
+ // NOTE: using the panic_allocator because all procedures should only allocate if the Int
+ // is uninitialized (which we checked).
+
+ is_neg, err := big.is_negative(&vv, mem.panic_allocator())
+ assert(err == nil, "should only error if not initialized, which has been checked")
+
+ tnr: u8 = TAG_NEGATIVE_BIG_NR if is_neg else TAG_UNSIGNED_BIG_NR
+ _encode_u8(e.writer, tnr, .Tag) or_return
+
+ size_in_bytes, berr := big.int_to_bytes_size(&vv, false, mem.panic_allocator())
+ assert(berr == nil, "should only error if not initialized, which has been checked")
+ assert(size_in_bytes >= 0)
+
+ err_conv(_encode_u64(e, u64(size_in_bytes), .Bytes)) or_return
+
+ for offset := (size_in_bytes*8)-8; offset >= 0; offset -= 8 {
+ bits, derr := big.int_bitfield_extract(&vv, offset, 8, mem.panic_allocator())
+ assert(derr == nil, "should only error if not initialized or invalid argument (offset and count), which won't happen")
+
+ io.write_full(e.writer, {u8(bits & 255)}) or_return
+ }
+ return nil
+
+ case: unreachable()
+ }
+}
+
+@(private)
+tag_cbor_unmarshal :: proc(_: ^Tag_Implementation, d: Decoder, _: Tag_Number, v: any) -> Unmarshal_Error {
+ hdr := _decode_header(d.reader) or_return
+ major, add := _header_split(hdr)
+ #partial switch major {
+ case .Bytes:
+ ti := reflect.type_info_base(type_info_of(v.id))
+ return _unmarshal_bytes(d, v, ti, hdr, add)
+
+ case: return .Bad_Tag_Value
+ }
+}
+
+@(private)
+tag_cbor_marshal :: proc(_: ^Tag_Implementation, e: Encoder, v: any) -> Marshal_Error {
+ _encode_u8(e.writer, TAG_CBOR_NR, .Tag) or_return
+ ti := runtime.type_info_base(type_info_of(v.id))
+ #partial switch t in ti.variant {
+ case runtime.Type_Info_String:
+ return marshal_into(e, v)
+ case runtime.Type_Info_Array:
+ elem_base := reflect.type_info_base(t.elem)
+ if elem_base.id != byte { return .Bad_Tag_Value }
+ return marshal_into(e, v)
+ case runtime.Type_Info_Slice:
+ elem_base := reflect.type_info_base(t.elem)
+ if elem_base.id != byte { return .Bad_Tag_Value }
+ return marshal_into(e, v)
+ case runtime.Type_Info_Dynamic_Array:
+ elem_base := reflect.type_info_base(t.elem)
+ if elem_base.id != byte { return .Bad_Tag_Value }
+ return marshal_into(e, v)
+ case:
+ return .Bad_Tag_Value
+ }
+}
+
+@(private)
+tag_base64_unmarshal :: proc(_: ^Tag_Implementation, d: Decoder, _: Tag_Number, v: any) -> (err: Unmarshal_Error) {
+ hdr := _decode_header(d.reader) or_return
+ major, add := _header_split(hdr)
+ ti := reflect.type_info_base(type_info_of(v.id))
+
+ if major != .Text && major != .Bytes {
+ return .Bad_Tag_Value
+ }
+
+ bytes := string(err_conv(_decode_bytes(d, add, allocator=context.temp_allocator)) or_return)
+ defer delete(bytes, context.temp_allocator)
+
+ #partial switch t in ti.variant {
+ case reflect.Type_Info_String:
+
+ if t.is_cstring {
+ length := base64.decoded_len(bytes)
+ builder := strings.builder_make(0, length+1)
+ base64.decode_into(strings.to_stream(&builder), bytes) or_return
+
+ raw := (^cstring)(v.data)
+ raw^ = cstring(raw_data(builder.buf))
+ } else {
+ raw := (^string)(v.data)
+ raw^ = string(base64.decode(bytes) or_return)
+ }
+
+ return
+
+ case reflect.Type_Info_Slice:
+ elem_base := reflect.type_info_base(t.elem)
+
+ if elem_base.id != byte { return _unsupported(v, hdr) }
+
+ raw := (^[]byte)(v.data)
+ raw^ = base64.decode(bytes) or_return
+ return
+
+ case reflect.Type_Info_Dynamic_Array:
+ elem_base := reflect.type_info_base(t.elem)
+
+ if elem_base.id != byte { return _unsupported(v, hdr) }
+
+ decoded := base64.decode(bytes) or_return
+
+ raw := (^mem.Raw_Dynamic_Array)(v.data)
+ raw.data = raw_data(decoded)
+ raw.len = len(decoded)
+ raw.cap = len(decoded)
+ raw.allocator = context.allocator
+ return
+
+ case reflect.Type_Info_Array:
+ elem_base := reflect.type_info_base(t.elem)
+
+ if elem_base.id != byte { return _unsupported(v, hdr) }
+
+ if base64.decoded_len(bytes) > t.count { return _unsupported(v, hdr) }
+
+ slice := ([^]byte)(v.data)[:len(bytes)]
+ copy(slice, base64.decode(bytes) or_return)
+ return
+ }
+
+ return _unsupported(v, hdr)
+}
+
+@(private)
+tag_base64_marshal :: proc(_: ^Tag_Implementation, e: Encoder, v: any) -> Marshal_Error {
+ _encode_u8(e.writer, TAG_BASE64_NR, .Tag) or_return
+
+ ti := runtime.type_info_base(type_info_of(v.id))
+ a := any{v.data, ti.id}
+
+ bytes: []byte
+ switch val in a {
+ case string: bytes = transmute([]byte)val
+ case cstring: bytes = transmute([]byte)string(val)
+ case []byte: bytes = val
+ case [dynamic]byte: bytes = val[:]
+ case:
+ #partial switch t in ti.variant {
+ case runtime.Type_Info_Array:
+ if t.elem.id != byte { return .Bad_Tag_Value }
+ bytes = ([^]byte)(v.data)[:t.count]
+ case:
+ return .Bad_Tag_Value
+ }
+ }
+
+ out_len := base64.encoded_len(bytes)
+ err_conv(_encode_u64(e, u64(out_len), .Text)) or_return
+ return base64.encode_into(e.writer, bytes)
+}
diff --git a/core/encoding/cbor/unmarshal.odin b/core/encoding/cbor/unmarshal.odin
new file mode 100644
index 000000000..a1524d9f4
--- /dev/null
+++ b/core/encoding/cbor/unmarshal.odin
@@ -0,0 +1,932 @@
+package encoding_cbor
+
+import "base:intrinsics"
+import "base:runtime"
+
+import "core:io"
+import "core:mem"
+import "core:reflect"
+import "core:strings"
+import "core:unicode/utf8"
+
+/*
+Unmarshals the given CBOR into the given pointer using reflection.
+Types that require allocation are allocated using the given allocator.
+
+Some temporary allocations are done on the given `temp_allocator`, but, if you want to,
+this can be set to a "normal" allocator, because the necessary `delete` and `free` calls are still made.
+This is helpful when the CBOR size is so big that you don't want to collect all the temporary allocations until the end.
+
+Disable streaming/indeterminate lengths with the `.Disallow_Streaming` flag.
+
+Shrink excess bytes in buffers and containers with the `.Shrink_Excess` flag.
+
+Mark the input as trusted input with the `.Trusted_Input` flag, this turns off the safety feature
+of not pre-allocating more than `max_pre_alloc` bytes before reading into the bytes. You should only
+do this when you own both sides of the encoding and are sure there can't be malicious bytes used as
+an input.
+*/
+unmarshal :: proc {
+ unmarshal_from_reader,
+ unmarshal_from_string,
+}
+
+unmarshal_from_reader :: proc(r: io.Reader, ptr: ^$T, flags := Decoder_Flags{}, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (err: Unmarshal_Error) {
+ err = unmarshal_from_decoder(Decoder{ DEFAULT_MAX_PRE_ALLOC, flags, r }, ptr, allocator, temp_allocator)
+
+ // Normal EOF does not exist here, we try to read the exact amount that is said to be provided.
+ if err == .EOF { err = .Unexpected_EOF }
+ return
+}
+
+// Unmarshals from a string, see docs on the proc group `Unmarshal` for more info.
+unmarshal_from_string :: proc(s: string, ptr: ^$T, flags := Decoder_Flags{}, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (err: Unmarshal_Error) {
+ sr: strings.Reader
+ r := strings.to_reader(&sr, s)
+
+ err = unmarshal_from_reader(r, ptr, flags, allocator, temp_allocator)
+
+ // Normal EOF does not exist here, we try to read the exact amount that is said to be provided.
+ if err == .EOF { err = .Unexpected_EOF }
+ return
+}
+
+unmarshal_from_decoder :: proc(d: Decoder, ptr: ^$T, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (err: Unmarshal_Error) {
+ d := d
+
+ err = _unmarshal_any_ptr(d, ptr, nil, allocator, temp_allocator)
+
+ // Normal EOF does not exist here, we try to read the exact amount that is said to be provided.
+ if err == .EOF { err = .Unexpected_EOF }
+ return
+
+}
+
+_unmarshal_any_ptr :: proc(d: Decoder, v: any, hdr: Maybe(Header) = nil, allocator := context.allocator, temp_allocator := context.temp_allocator) -> Unmarshal_Error {
+ context.allocator = allocator
+ context.temp_allocator = temp_allocator
+ v := v
+
+ if v == nil || v.id == nil {
+ return .Invalid_Parameter
+ }
+
+ v = reflect.any_base(v)
+ ti := type_info_of(v.id)
+ if !reflect.is_pointer(ti) || ti.id == rawptr {
+ return .Non_Pointer_Parameter
+ }
+
+ data := any{(^rawptr)(v.data)^, ti.variant.(reflect.Type_Info_Pointer).elem.id}
+ return _unmarshal_value(d, data, hdr.? or_else (_decode_header(d.reader) or_return))
+}
+
+_unmarshal_value :: proc(d: Decoder, v: any, hdr: Header) -> (err: Unmarshal_Error) {
+ v := v
+ ti := reflect.type_info_base(type_info_of(v.id))
+ r := d.reader
+
+ // If it's a union with only one variant, then treat it as that variant
+ if u, ok := ti.variant.(reflect.Type_Info_Union); ok && len(u.variants) == 1 {
+ #partial switch hdr {
+ case .Nil, .Undefined, nil: // no-op.
+ case:
+ variant := u.variants[0]
+ v.id = variant.id
+ ti = reflect.type_info_base(variant)
+ if !reflect.is_pointer_internally(variant) {
+ tag := any{rawptr(uintptr(v.data) + u.tag_offset), u.tag_type.id}
+ assert(_assign_int(tag, 1))
+ }
+ }
+ }
+
+ // Allow generic unmarshal by doing it into a `Value`.
+ switch &dst in v {
+ case Value:
+ dst = err_conv(_decode_from_decoder(d, hdr)) or_return
+ return
+ }
+
+ switch hdr {
+ case .U8:
+ decoded := _decode_u8(r) or_return
+ if !_assign_int(v, decoded) { return _unsupported(v, hdr) }
+ return
+
+ case .U16:
+ decoded := _decode_u16(r) or_return
+ if !_assign_int(v, decoded) { return _unsupported(v, hdr) }
+ return
+
+ case .U32:
+ decoded := _decode_u32(r) or_return
+ if !_assign_int(v, decoded) { return _unsupported(v, hdr) }
+ return
+
+ case .U64:
+ decoded := _decode_u64(r) or_return
+ if !_assign_int(v, decoded) { return _unsupported(v, hdr) }
+ return
+
+ case .Neg_U8:
+ decoded := Negative_U8(_decode_u8(r) or_return)
+
+ switch &dst in v {
+ case Negative_U8:
+ dst = decoded
+ return
+ case Negative_U16:
+ dst = Negative_U16(decoded)
+ return
+ case Negative_U32:
+ dst = Negative_U32(decoded)
+ return
+ case Negative_U64:
+ dst = Negative_U64(decoded)
+ return
+ }
+
+ if reflect.is_unsigned(ti) { return _unsupported(v, hdr) }
+
+ if !_assign_int(v, negative_to_int(decoded)) { return _unsupported(v, hdr) }
+ return
+
+ case .Neg_U16:
+ decoded := Negative_U16(_decode_u16(r) or_return)
+
+ switch &dst in v {
+ case Negative_U16:
+ dst = decoded
+ return
+ case Negative_U32:
+ dst = Negative_U32(decoded)
+ return
+ case Negative_U64:
+ dst = Negative_U64(decoded)
+ return
+ }
+
+ if reflect.is_unsigned(ti) { return _unsupported(v, hdr) }
+
+ if !_assign_int(v, negative_to_int(decoded)) { return _unsupported(v, hdr) }
+ return
+
+ case .Neg_U32:
+ decoded := Negative_U32(_decode_u32(r) or_return)
+
+ switch &dst in v {
+ case Negative_U32:
+ dst = decoded
+ return
+ case Negative_U64:
+ dst = Negative_U64(decoded)
+ return
+ }
+
+ if reflect.is_unsigned(ti) { return _unsupported(v, hdr) }
+
+ if !_assign_int(v, negative_to_int(decoded)) { return _unsupported(v, hdr) }
+ return
+
+ case .Neg_U64:
+ decoded := Negative_U64(_decode_u64(r) or_return)
+
+ switch &dst in v {
+ case Negative_U64:
+ dst = decoded
+ return
+ }
+
+ if reflect.is_unsigned(ti) { return _unsupported(v, hdr) }
+
+ if !_assign_int(v, negative_to_int(decoded)) { return _unsupported(v, hdr) }
+ return
+
+ case .Simple:
+ decoded := _decode_simple(r) or_return
+
+ // NOTE: Because this is a special type and not to be treated as a general integer,
+ // We only put the value of it in fields that are explicitly of type `Simple`.
+ switch &dst in v {
+ case Simple:
+ dst = decoded
+ return
+ case:
+ return _unsupported(v, hdr)
+ }
+
+ case .F16:
+ decoded := _decode_f16(r) or_return
+ if !_assign_float(v, decoded) { return _unsupported(v, hdr) }
+ return
+
+ case .F32:
+ decoded := _decode_f32(r) or_return
+ if !_assign_float(v, decoded) { return _unsupported(v, hdr) }
+ return
+
+ case .F64:
+ decoded := _decode_f64(r) or_return
+ if !_assign_float(v, decoded) { return _unsupported(v, hdr) }
+ return
+
+ case .True:
+ if !_assign_bool(v, true) { return _unsupported(v, hdr) }
+ return
+
+ case .False:
+ if !_assign_bool(v, false) { return _unsupported(v, hdr) }
+ return
+
+ case .Nil, .Undefined:
+ mem.zero(v.data, ti.size)
+ return
+
+ case .Break:
+ return .Break
+ }
+
+ maj, add := _header_split(hdr)
+ switch maj {
+ case .Unsigned:
+ decoded := _decode_tiny_u8(add) or_return
+ if !_assign_int(v, decoded) { return _unsupported(v, hdr, add) }
+ return
+
+ case .Negative:
+ decoded := Negative_U8(_decode_tiny_u8(add) or_return)
+
+ switch &dst in v {
+ case Negative_U8:
+ dst = decoded
+ return
+ }
+
+ if reflect.is_unsigned(ti) { return _unsupported(v, hdr, add) }
+
+ if !_assign_int(v, negative_to_int(decoded)) { return _unsupported(v, hdr, add) }
+ return
+
+ case .Other:
+ decoded := _decode_tiny_simple(add) or_return
+
+ // NOTE: Because this is a special type and not to be treated as a general integer,
+ // We only put the value of it in fields that are explicitly of type `Simple`.
+ switch &dst in v {
+ case Simple:
+ dst = decoded
+ return
+ case:
+ return _unsupported(v, hdr, add)
+ }
+
+ case .Tag:
+ switch &dst in v {
+ case ^Tag:
+ tval := err_conv(_decode_tag_ptr(d, add)) or_return
+ if t, is_tag := tval.(^Tag); is_tag {
+ dst = t
+ return
+ }
+
+ destroy(tval)
+ return .Bad_Tag_Value
+ case Tag:
+ t := err_conv(_decode_tag(d, add)) or_return
+ if t, is_tag := t.?; is_tag {
+ dst = t
+ return
+ }
+
+ return .Bad_Tag_Value
+ }
+
+ nr := err_conv(_decode_uint_as_u64(r, add)) or_return
+
+ // Custom tag implementations.
+ if impl, ok := _tag_implementations_nr[nr]; ok {
+ return impl->unmarshal(d, nr, v)
+ } else if nr == TAG_OBJECT_TYPE {
+ return _unmarshal_union(d, v, ti, hdr)
+ } else {
+ // Discard the tag info and unmarshal as its value.
+ return _unmarshal_value(d, v, _decode_header(r) or_return)
+ }
+
+ return _unsupported(v, hdr, add)
+
+ case .Bytes: return _unmarshal_bytes(d, v, ti, hdr, add)
+ case .Text: return _unmarshal_string(d, v, ti, hdr, add)
+ case .Array: return _unmarshal_array(d, v, ti, hdr, add)
+ case .Map: return _unmarshal_map(d, v, ti, hdr, add)
+
+ case: return .Bad_Major
+ }
+}
+
+_unmarshal_bytes :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add) -> (err: Unmarshal_Error) {
+ #partial switch t in ti.variant {
+ case reflect.Type_Info_String:
+ bytes := err_conv(_decode_bytes(d, add)) or_return
+
+ if t.is_cstring {
+ raw := (^cstring)(v.data)
+ assert_safe_for_cstring(string(bytes))
+ raw^ = cstring(raw_data(bytes))
+ } else {
+ // String has same memory layout as a slice, so we can directly use it as a slice.
+ raw := (^mem.Raw_String)(v.data)
+ raw^ = transmute(mem.Raw_String)bytes
+ }
+
+ return
+
+ case reflect.Type_Info_Slice:
+ elem_base := reflect.type_info_base(t.elem)
+
+ if elem_base.id != byte { return _unsupported(v, hdr) }
+
+ bytes := err_conv(_decode_bytes(d, add)) or_return
+ raw := (^mem.Raw_Slice)(v.data)
+ raw^ = transmute(mem.Raw_Slice)bytes
+ return
+
+ case reflect.Type_Info_Dynamic_Array:
+ elem_base := reflect.type_info_base(t.elem)
+
+ if elem_base.id != byte { return _unsupported(v, hdr) }
+
+ bytes := err_conv(_decode_bytes(d, add)) or_return
+ raw := (^mem.Raw_Dynamic_Array)(v.data)
+ raw.data = raw_data(bytes)
+ raw.len = len(bytes)
+ raw.cap = len(bytes)
+ raw.allocator = context.allocator
+ return
+
+ case reflect.Type_Info_Array:
+ elem_base := reflect.type_info_base(t.elem)
+
+ if elem_base.id != byte { return _unsupported(v, hdr) }
+
+ bytes := err_conv(_decode_bytes(d, add, allocator=context.temp_allocator)) or_return
+ defer delete(bytes, context.temp_allocator)
+
+ if len(bytes) > t.count { return _unsupported(v, hdr) }
+
+ // Copy into array type, delete original.
+ slice := ([^]byte)(v.data)[:len(bytes)]
+ n := copy(slice, bytes)
+ assert(n == len(bytes))
+ return
+ }
+
+ return _unsupported(v, hdr)
+}
+
+_unmarshal_string :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add) -> (err: Unmarshal_Error) {
+ #partial switch t in ti.variant {
+ case reflect.Type_Info_String:
+ text := err_conv(_decode_text(d, add)) or_return
+
+ if t.is_cstring {
+ raw := (^cstring)(v.data)
+
+ assert_safe_for_cstring(text)
+ raw^ = cstring(raw_data(text))
+ } else {
+ raw := (^string)(v.data)
+ raw^ = text
+ }
+ return
+
+ // Enum by its variant name.
+ case reflect.Type_Info_Enum:
+ text := err_conv(_decode_text(d, add, allocator=context.temp_allocator)) or_return
+ defer delete(text, context.temp_allocator)
+
+ for name, i in t.names {
+ if name == text {
+ if !_assign_int(any{v.data, ti.id}, t.values[i]) { return _unsupported(v, hdr) }
+ return
+ }
+ }
+
+ case reflect.Type_Info_Rune:
+ text := err_conv(_decode_text(d, add, allocator=context.temp_allocator)) or_return
+ defer delete(text, context.temp_allocator)
+
+ r := (^rune)(v.data)
+ dr, n := utf8.decode_rune(text)
+ if dr == utf8.RUNE_ERROR || n < len(text) {
+ return _unsupported(v, hdr)
+ }
+
+ r^ = dr
+ return
+ }
+
+ return _unsupported(v, hdr)
+}
+
+_unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add) -> (err: Unmarshal_Error) {
+ assign_array :: proc(
+ d: Decoder,
+ da: ^mem.Raw_Dynamic_Array,
+ elemt: ^reflect.Type_Info,
+ length: int,
+ growable := true,
+ ) -> (out_of_space: bool, err: Unmarshal_Error) {
+ for idx: uintptr = 0; length == -1 || idx < uintptr(length); idx += 1 {
+ elem_ptr := rawptr(uintptr(da.data) + idx*uintptr(elemt.size))
+ elem := any{elem_ptr, elemt.id}
+
+ hdr := _decode_header(d.reader) or_return
+
+ // Double size if out of capacity.
+ if da.cap <= da.len {
+ // Not growable, error out.
+ if !growable { return true, .Out_Of_Memory }
+
+ cap := 2 * da.cap
+ ok := runtime.__dynamic_array_reserve(da, elemt.size, elemt.align, cap)
+
+ // NOTE: Might be lying here, but it is at least an allocator error.
+ if !ok { return false, .Out_Of_Memory }
+ }
+
+ err = _unmarshal_value(d, elem, hdr)
+ if length == -1 && err == .Break { break }
+ if err != nil { return }
+
+ da.len += 1
+ }
+
+ return false, nil
+ }
+
+ // Allow generically storing the values array.
+ switch &dst in v {
+ case ^Array:
+ dst = err_conv(_decode_array_ptr(d, add)) or_return
+ return
+ case Array:
+ dst = err_conv(_decode_array(d, add)) or_return
+ return
+ }
+
+ #partial switch t in ti.variant {
+ case reflect.Type_Info_Slice:
+ length, scap := err_conv(_decode_len_container(d, add)) or_return
+
+ data := mem.alloc_bytes_non_zeroed(t.elem.size * scap, t.elem.align) or_return
+ defer if err != nil { mem.free_bytes(data) }
+
+ da := mem.Raw_Dynamic_Array{raw_data(data), 0, length, context.allocator }
+
+ assign_array(d, &da, t.elem, length) or_return
+
+ if .Shrink_Excess in d.flags {
+ // Ignoring an error here, but this is not critical to succeed.
+ _ = runtime.__dynamic_array_shrink(&da, t.elem.size, t.elem.align, da.len)
+ }
+
+ raw := (^mem.Raw_Slice)(v.data)
+ raw.data = da.data
+ raw.len = da.len
+ return
+
+ case reflect.Type_Info_Dynamic_Array:
+ length, scap := err_conv(_decode_len_container(d, add)) or_return
+
+ data := mem.alloc_bytes_non_zeroed(t.elem.size * scap, t.elem.align) or_return
+ defer if err != nil { mem.free_bytes(data) }
+
+ raw := (^mem.Raw_Dynamic_Array)(v.data)
+ raw.data = raw_data(data)
+ raw.len = 0
+ raw.cap = length
+ raw.allocator = context.allocator
+
+ _ = assign_array(d, raw, t.elem, length) or_return
+
+ if .Shrink_Excess in d.flags {
+ // Ignoring an error here, but this is not critical to succeed.
+ _ = runtime.__dynamic_array_shrink(raw, t.elem.size, t.elem.align, raw.len)
+ }
+ return
+
+ case reflect.Type_Info_Array:
+ _, scap := err_conv(_decode_len_container(d, add)) or_return
+ length := min(scap, t.count)
+
+ if length > t.count {
+ return _unsupported(v, hdr)
+ }
+
+ da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, length, context.allocator }
+
+ out_of_space := assign_array(d, &da, t.elem, length, growable=false) or_return
+ if out_of_space { return _unsupported(v, hdr) }
+ return
+
+ case reflect.Type_Info_Enumerated_Array:
+ _, scap := err_conv(_decode_len_container(d, add)) or_return
+ length := min(scap, t.count)
+
+ if length > t.count {
+ return _unsupported(v, hdr)
+ }
+
+ da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, length, context.allocator }
+
+ out_of_space := assign_array(d, &da, t.elem, length, growable=false) or_return
+ if out_of_space { return _unsupported(v, hdr) }
+ return
+
+ case reflect.Type_Info_Complex:
+ _, scap := err_conv(_decode_len_container(d, add)) or_return
+ length := min(scap, 2)
+
+ if length > 2 {
+ return _unsupported(v, hdr)
+ }
+
+ da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, 2, context.allocator }
+
+ info: ^runtime.Type_Info
+ switch ti.id {
+ case complex32: info = type_info_of(f16)
+ case complex64: info = type_info_of(f32)
+ case complex128: info = type_info_of(f64)
+ case: unreachable()
+ }
+
+ out_of_space := assign_array(d, &da, info, 2, growable=false) or_return
+ if out_of_space { return _unsupported(v, hdr) }
+ return
+
+ case reflect.Type_Info_Quaternion:
+ _, scap := err_conv(_decode_len_container(d, add)) or_return
+ length := min(scap, 4)
+
+ if length > 4 {
+ return _unsupported(v, hdr)
+ }
+
+ da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, 4, context.allocator }
+
+ info: ^runtime.Type_Info
+ switch ti.id {
+ case quaternion64: info = type_info_of(f16)
+ case quaternion128: info = type_info_of(f32)
+ case quaternion256: info = type_info_of(f64)
+ case: unreachable()
+ }
+
+ out_of_space := assign_array(d, &da, info, 4, growable=false) or_return
+ if out_of_space { return _unsupported(v, hdr) }
+ return
+
+ case: return _unsupported(v, hdr)
+ }
+}
+
+_unmarshal_map :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add) -> (err: Unmarshal_Error) {
+ r := d.reader
+ decode_key :: proc(d: Decoder, v: any, allocator := context.allocator) -> (k: string, err: Unmarshal_Error) {
+ entry_hdr := _decode_header(d.reader) or_return
+ entry_maj, entry_add := _header_split(entry_hdr)
+ #partial switch entry_maj {
+ case .Text:
+ k = err_conv(_decode_text(d, entry_add, allocator)) or_return
+ return
+ case .Bytes:
+ bytes := err_conv(_decode_bytes(d, entry_add, allocator=allocator)) or_return
+ k = string(bytes)
+ return
+ case:
+ err = _unsupported(v, entry_hdr)
+ return
+ }
+ }
+
+ // Allow generically storing the map array.
+ switch &dst in v {
+ case ^Map:
+ dst = err_conv(_decode_map_ptr(d, add)) or_return
+ return
+ case Map:
+ dst = err_conv(_decode_map(d, add)) or_return
+ return
+ }
+
+ #partial switch t in ti.variant {
+ case reflect.Type_Info_Struct:
+ if t.is_raw_union {
+ return _unsupported(v, hdr)
+ }
+
+ length, _ := err_conv(_decode_len_container(d, add)) or_return
+ unknown := length == -1
+ fields := reflect.struct_fields_zipped(ti.id)
+
+ for idx := 0; idx < len(fields) && (unknown || idx < length); idx += 1 {
+ // Decode key, keys can only be strings.
+ key: string
+ if keyv, kerr := decode_key(d, v, context.temp_allocator); unknown && kerr == .Break {
+ break
+ } else if kerr != nil {
+ err = kerr
+ return
+ } else {
+ key = keyv
+ }
+ defer delete(key, context.temp_allocator)
+
+ // Find matching field.
+ use_field_idx := -1
+ {
+ for field, field_idx in fields {
+ tag_value := string(reflect.struct_tag_get(field.tag, "cbor"))
+ if tag_value == "-" {
+ continue
+ }
+
+ if key == tag_value {
+ use_field_idx = field_idx
+ break
+ }
+
+ if key == field.name {
+ // No break because we want to still check remaining struct tags.
+ use_field_idx = field_idx
+ }
+ }
+
+ // Skips unused map entries.
+ if use_field_idx < 0 {
+ continue
+ }
+ }
+
+ field := fields[use_field_idx]
+ // name := field.name
+ ptr := rawptr(uintptr(v.data) + field.offset)
+ fany := any{ptr, field.type.id}
+ _unmarshal_value(d, fany, _decode_header(r) or_return) or_return
+ }
+ return
+
+ case reflect.Type_Info_Map:
+ if !reflect.is_string(t.key) {
+ return _unsupported(v, hdr)
+ }
+
+ raw_map := (^mem.Raw_Map)(v.data)
+ if raw_map.allocator.procedure == nil {
+ raw_map.allocator = context.allocator
+ }
+
+ defer if err != nil {
+ _ = runtime.map_free_dynamic(raw_map^, t.map_info)
+ }
+
+ length, scap := err_conv(_decode_len_container(d, add)) or_return
+ unknown := length == -1
+ if !unknown {
+ // Reserve space before setting so we can return allocation errors and be efficient on big maps.
+ new_len := uintptr(min(scap, runtime.map_len(raw_map^)+length))
+ runtime.map_reserve_dynamic(raw_map, t.map_info, new_len) or_return
+ }
+
+ // Temporary memory to unmarshal keys into before inserting them into the map.
+ elem_backing := mem.alloc_bytes_non_zeroed(t.value.size, t.value.align, context.temp_allocator) or_return
+ defer delete(elem_backing, context.temp_allocator)
+
+ map_backing_value := any{raw_data(elem_backing), t.value.id}
+
+ for idx := 0; unknown || idx < length; idx += 1 {
+ // Decode key, keys can only be strings.
+ key: string
+ if keyv, kerr := decode_key(d, v); unknown && kerr == .Break {
+ break
+ } else if kerr != nil {
+ err = kerr
+ return
+ } else {
+ key = keyv
+ }
+
+ if unknown || idx > scap {
+ // Reserve space for new element so we can return allocator errors.
+ new_len := uintptr(runtime.map_len(raw_map^)+1)
+ runtime.map_reserve_dynamic(raw_map, t.map_info, new_len) or_return
+ }
+
+ mem.zero_slice(elem_backing)
+ _unmarshal_value(d, map_backing_value, _decode_header(r) or_return) or_return
+
+ key_ptr := rawptr(&key)
+ key_cstr: cstring
+ if reflect.is_cstring(t.key) {
+ assert_safe_for_cstring(key)
+ key_cstr = cstring(raw_data(key))
+ key_ptr = &key_cstr
+ }
+
+ set_ptr := runtime.__dynamic_map_set_without_hash(raw_map, t.map_info, key_ptr, map_backing_value.data)
+ // We already reserved space for it, so this shouldn't fail.
+ assert(set_ptr != nil)
+ }
+
+ if .Shrink_Excess in d.flags {
+ _, _ = runtime.map_shrink_dynamic(raw_map, t.map_info)
+ }
+ return
+
+ case:
+ return _unsupported(v, hdr)
+ }
+}
+
+// Unmarshal into a union, based on the `TAG_OBJECT_TYPE` tag of the spec, it denotes a tag which
+// contains an array of exactly two elements, the first is a textual representation of the following
+// CBOR value's type.
+_unmarshal_union :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header) -> (err: Unmarshal_Error) {
+ r := d.reader
+ #partial switch t in ti.variant {
+ case reflect.Type_Info_Union:
+ idhdr: Header
+ target_name: string
+ {
+ vhdr := _decode_header(r) or_return
+ vmaj, vadd := _header_split(vhdr)
+ if vmaj != .Array {
+ return .Bad_Tag_Value
+ }
+
+ n_items, _ := err_conv(_decode_len_container(d, vadd)) or_return
+ if n_items != 2 {
+ return .Bad_Tag_Value
+ }
+
+ idhdr = _decode_header(r) or_return
+ idmaj, idadd := _header_split(idhdr)
+ if idmaj != .Text {
+ return .Bad_Tag_Value
+ }
+
+ target_name = err_conv(_decode_text(d, idadd, context.temp_allocator)) or_return
+ }
+ defer delete(target_name, context.temp_allocator)
+
+ for variant, i in t.variants {
+ tag := i64(i)
+ if !t.no_nil {
+ tag += 1
+ }
+
+ #partial switch vti in variant.variant {
+ case reflect.Type_Info_Named:
+ if vti.name == target_name {
+ reflect.set_union_variant_raw_tag(v, tag)
+ return _unmarshal_value(d, any{v.data, variant.id}, _decode_header(r) or_return)
+ }
+
+ case:
+ builder := strings.builder_make(context.temp_allocator)
+ defer strings.builder_destroy(&builder)
+
+ reflect.write_type(&builder, variant)
+ variant_name := strings.to_string(builder)
+
+ if variant_name == target_name {
+ reflect.set_union_variant_raw_tag(v, tag)
+ return _unmarshal_value(d, any{v.data, variant.id}, _decode_header(r) or_return)
+ }
+ }
+ }
+
+ // No variant matched.
+ return _unsupported(v, idhdr)
+
+ case:
+ // Not a union.
+ return _unsupported(v, hdr)
+ }
+}
+
+_assign_int :: proc(val: any, i: $T) -> bool {
+ v := reflect.any_core(val)
+
+ // NOTE: should under/over flow be checked here? `encoding/json` doesn't, but maybe that is a
+ // less strict encoding?.
+
+ switch &dst in v {
+ case i8: dst = i8 (i)
+ case i16: dst = i16 (i)
+ case i16le: dst = i16le (i)
+ case i16be: dst = i16be (i)
+ case i32: dst = i32 (i)
+ case i32le: dst = i32le (i)
+ case i32be: dst = i32be (i)
+ case i64: dst = i64 (i)
+ case i64le: dst = i64le (i)
+ case i64be: dst = i64be (i)
+ case i128: dst = i128 (i)
+ case i128le: dst = i128le (i)
+ case i128be: dst = i128be (i)
+ case u8: dst = u8 (i)
+ case u16: dst = u16 (i)
+ case u16le: dst = u16le (i)
+ case u16be: dst = u16be (i)
+ case u32: dst = u32 (i)
+ case u32le: dst = u32le (i)
+ case u32be: dst = u32be (i)
+ case u64: dst = u64 (i)
+ case u64le: dst = u64le (i)
+ case u64be: dst = u64be (i)
+ case u128: dst = u128 (i)
+ case u128le: dst = u128le (i)
+ case u128be: dst = u128be (i)
+ case int: dst = int (i)
+ case uint: dst = uint (i)
+ case uintptr: dst = uintptr(i)
+ case:
+ ti := type_info_of(v.id)
+ if _, ok := ti.variant.(runtime.Type_Info_Bit_Set); ok {
+ do_byte_swap := !reflect.bit_set_is_big_endian(v)
+ switch ti.size * 8 {
+ case 0: // no-op.
+ case 8:
+ x := (^u8)(v.data)
+ x^ = u8(i)
+ case 16:
+ x := (^u16)(v.data)
+ x^ = do_byte_swap ? intrinsics.byte_swap(u16(i)) : u16(i)
+ case 32:
+ x := (^u32)(v.data)
+ x^ = do_byte_swap ? intrinsics.byte_swap(u32(i)) : u32(i)
+ case 64:
+ x := (^u64)(v.data)
+ x^ = do_byte_swap ? intrinsics.byte_swap(u64(i)) : u64(i)
+ case:
+ panic("unknown bit_size size")
+ }
+ return true
+ }
+ return false
+ }
+ return true
+}
+
+_assign_float :: proc(val: any, f: $T) -> bool {
+ v := reflect.any_core(val)
+
+ // NOTE: should under/over flow be checked here? `encoding/json` doesn't, but maybe that is a
+ // less strict encoding?.
+
+ switch &dst in v {
+ case f16: dst = f16 (f)
+ case f16le: dst = f16le(f)
+ case f16be: dst = f16be(f)
+ case f32: dst = f32 (f)
+ case f32le: dst = f32le(f)
+ case f32be: dst = f32be(f)
+ case f64: dst = f64 (f)
+ case f64le: dst = f64le(f)
+ case f64be: dst = f64be(f)
+
+ case complex32: dst = complex(f16(f), 0)
+ case complex64: dst = complex(f32(f), 0)
+ case complex128: dst = complex(f64(f), 0)
+
+ case quaternion64: dst = quaternion(w=f16(f), x=0, y=0, z=0)
+ case quaternion128: dst = quaternion(w=f32(f), x=0, y=0, z=0)
+ case quaternion256: dst = quaternion(w=f64(f), x=0, y=0, z=0)
+
+ case: return false
+ }
+ return true
+}
+
+_assign_bool :: proc(val: any, b: bool) -> bool {
+ v := reflect.any_core(val)
+ switch &dst in v {
+ case bool: dst = bool(b)
+ case b8: dst = b8 (b)
+ case b16: dst = b16 (b)
+ case b32: dst = b32 (b)
+ case b64: dst = b64 (b)
+ case: return false
+ }
+ return true
+}
+
+// Sanity check that the decoder added a nil byte to the end.
+@(private, disabled=ODIN_DISABLE_ASSERT)
+assert_safe_for_cstring :: proc(s: string, loc := #caller_location) {
+ assert(([^]byte)(raw_data(s))[len(s)] == 0, loc = loc)
+}
diff --git a/core/encoding/csv/reader.odin b/core/encoding/csv/reader.odin
index 44a9fdcc4..f8c72c423 100644
--- a/core/encoding/csv/reader.odin
+++ b/core/encoding/csv/reader.odin
@@ -1,6 +1,6 @@
// package csv reads and writes comma-separated values (CSV) files.
// This package supports the format described in RFC 4180 <https://tools.ietf.org/html/rfc4180.html>
-package csv
+package encoding_csv
import "core:bufio"
import "core:bytes"
@@ -91,7 +91,10 @@ DEFAULT_RECORD_BUFFER_CAPACITY :: 256
// reader_init initializes a new Reader from r
reader_init :: proc(reader: ^Reader, r: io.Reader, buffer_allocator := context.allocator) {
- reader.comma = ','
+ switch reader.comma {
+ case '\x00', '\n', '\r', 0xfffd:
+ reader.comma = ','
+ }
context.allocator = buffer_allocator
reserve(&reader.record_buffer, DEFAULT_RECORD_BUFFER_CAPACITY)
@@ -121,6 +124,7 @@ reader_destroy :: proc(r: ^Reader) {
// read reads a single record (a slice of fields) from r
//
// All \r\n sequences are normalized to \n, including multi-line field
+@(require_results)
read :: proc(r: ^Reader, allocator := context.allocator) -> (record: []string, err: Error) {
if r.reuse_record {
record, err = _read_record(r, &r.last_record, allocator)
@@ -133,6 +137,7 @@ read :: proc(r: ^Reader, allocator := context.allocator) -> (record: []string, e
}
// is_io_error checks where an Error is a specific io.Error kind
+@(require_results)
is_io_error :: proc(err: Error, io_err: io.Error) -> bool {
if v, ok := err.(io.Error); ok {
return v == io_err
@@ -140,10 +145,10 @@ is_io_error :: proc(err: Error, io_err: io.Error) -> bool {
return false
}
-
// read_all reads all the remaining records from r.
// Each record is a slice of fields.
// read_all is defined to read until an EOF, and does not treat, and does not treat EOF as an error
+@(require_results)
read_all :: proc(r: ^Reader, allocator := context.allocator) -> ([][]string, Error) {
context.allocator = allocator
records: [dynamic][]string
@@ -153,13 +158,18 @@ read_all :: proc(r: ^Reader, allocator := context.allocator) -> ([][]string, Err
return records[:], nil
}
if rerr != nil {
- return nil, rerr
+ // allow for a partial read
+ if record != nil {
+ append(&records, record)
+ }
+ return records[:], rerr
}
append(&records, record)
}
}
// read reads a single record (a slice of fields) from the provided input.
+@(require_results)
read_from_string :: proc(input: string, record_allocator := context.allocator, buffer_allocator := context.allocator) -> (record: []string, n: int, err: Error) {
ir: strings.Reader
strings.reader_init(&ir, input)
@@ -175,6 +185,7 @@ read_from_string :: proc(input: string, record_allocator := context.allocator, b
// read_all reads all the remaining records from the provided input.
+@(require_results)
read_all_from_string :: proc(input: string, records_allocator := context.allocator, buffer_allocator := context.allocator) -> ([][]string, Error) {
ir: strings.Reader
strings.reader_init(&ir, input)
@@ -186,7 +197,7 @@ read_all_from_string :: proc(input: string, records_allocator := context.allocat
return read_all(&r, records_allocator)
}
-@private
+@(private, require_results)
is_valid_delim :: proc(r: rune) -> bool {
switch r {
case 0, '"', '\r', '\n', utf8.RUNE_ERROR:
@@ -195,8 +206,9 @@ is_valid_delim :: proc(r: rune) -> bool {
return utf8.valid_rune(r)
}
-@private
+@(private, require_results)
_read_record :: proc(r: ^Reader, dst: ^[dynamic]string, allocator := context.allocator) -> ([]string, Error) {
+ @(require_results)
read_line :: proc(r: ^Reader) -> ([]byte, io.Error) {
if !r.multiline_fields {
line, err := bufio.reader_read_slice(&r.r, '\n')
@@ -266,6 +278,7 @@ _read_record :: proc(r: ^Reader, dst: ^[dynamic]string, allocator := context.all
unreachable()
}
+ @(require_results)
length_newline :: proc(b: []byte) -> int {
if len(b) > 0 && b[len(b)-1] == '\n' {
return 1
@@ -273,6 +286,7 @@ _read_record :: proc(r: ^Reader, dst: ^[dynamic]string, allocator := context.all
return 0
}
+ @(require_results)
next_rune :: proc(b: []byte) -> rune {
r, _ := utf8.decode_rune(b)
return r
diff --git a/core/encoding/csv/writer.odin b/core/encoding/csv/writer.odin
index d519104f2..132fa0a51 100644
--- a/core/encoding/csv/writer.odin
+++ b/core/encoding/csv/writer.odin
@@ -1,4 +1,4 @@
-package csv
+package encoding_csv
import "core:io"
import "core:strings"
@@ -17,7 +17,10 @@ Writer :: struct {
// writer_init initializes a Writer that writes to w
writer_init :: proc(writer: ^Writer, w: io.Writer) {
- writer.comma = ','
+ switch writer.comma {
+ case '\x00', '\n', '\r', 0xfffd:
+ writer.comma = ','
+ }
writer.w = w
}
diff --git a/core/encoding/entity/entity.odin b/core/encoding/entity/entity.odin
index ec640c69f..cee6230ef 100644
--- a/core/encoding/entity/entity.odin
+++ b/core/encoding/entity/entity.odin
@@ -1,4 +1,4 @@
-package unicode_entity
+package encoding_unicode_entity
/*
A unicode entity encoder/decoder
diff --git a/core/encoding/entity/generated.odin b/core/encoding/entity/generated.odin
index 3d1c02513..d2acde20d 100644
--- a/core/encoding/entity/generated.odin
+++ b/core/encoding/entity/generated.odin
@@ -1,4 +1,4 @@
-package unicode_entity
+package encoding_unicode_entity
/*
------ GENERATED ------ DO NOT EDIT ------ GENERATED ------ DO NOT EDIT ------ GENERATED ------
diff --git a/core/encoding/hex/hex.odin b/core/encoding/hex/hex.odin
index ef0bab1d0..dbffe216b 100644
--- a/core/encoding/hex/hex.odin
+++ b/core/encoding/hex/hex.odin
@@ -1,4 +1,4 @@
-package hex
+package encoding_hex
import "core:strings"
diff --git a/core/encoding/json/marshal.odin b/core/encoding/json/marshal.odin
index 3d57316b3..04ef6d434 100644
--- a/core/encoding/json/marshal.odin
+++ b/core/encoding/json/marshal.odin
@@ -1,4 +1,4 @@
-package json
+package encoding_json
import "core:mem"
import "core:math/bits"
diff --git a/core/encoding/json/parser.odin b/core/encoding/json/parser.odin
index 8bcef1339..3973725dc 100644
--- a/core/encoding/json/parser.odin
+++ b/core/encoding/json/parser.odin
@@ -1,4 +1,4 @@
-package json
+package encoding_json
import "core:mem"
import "core:unicode/utf8"
diff --git a/core/encoding/json/tokenizer.odin b/core/encoding/json/tokenizer.odin
index a406a73a5..5c20a2cc3 100644
--- a/core/encoding/json/tokenizer.odin
+++ b/core/encoding/json/tokenizer.odin
@@ -1,4 +1,4 @@
-package json
+package encoding_json
import "core:unicode/utf8"
diff --git a/core/encoding/json/types.odin b/core/encoding/json/types.odin
index 20c806236..73e183615 100644
--- a/core/encoding/json/types.odin
+++ b/core/encoding/json/types.odin
@@ -1,4 +1,4 @@
-package json
+package encoding_json
import "core:strings"
diff --git a/core/encoding/json/unmarshal.odin b/core/encoding/json/unmarshal.odin
index b2052e43c..691303521 100644
--- a/core/encoding/json/unmarshal.odin
+++ b/core/encoding/json/unmarshal.odin
@@ -1,4 +1,4 @@
-package json
+package encoding_json
import "core:mem"
import "core:math"
diff --git a/core/encoding/json/validator.odin b/core/encoding/json/validator.odin
index 961c2dc23..a6873319d 100644
--- a/core/encoding/json/validator.odin
+++ b/core/encoding/json/validator.odin
@@ -1,4 +1,4 @@
-package json
+package encoding_json
import "core:mem"
diff --git a/core/encoding/varint/doc.odin b/core/encoding/varint/doc.odin
index 5e4708a59..c0a09873c 100644
--- a/core/encoding/varint/doc.odin
+++ b/core/encoding/varint/doc.odin
@@ -25,4 +25,4 @@
```
*/
-package varint \ No newline at end of file
+package encoding_varint \ No newline at end of file
diff --git a/core/encoding/varint/leb128.odin b/core/encoding/varint/leb128.odin
index 1cdbb81b0..ca6513f04 100644
--- a/core/encoding/varint/leb128.odin
+++ b/core/encoding/varint/leb128.odin
@@ -8,7 +8,7 @@
// package varint implements variable length integer encoding and decoding using
// the LEB128 format as used by DWARF debug info, Android .dex and other file formats.
-package varint
+package encoding_varint
// In theory we should use the bigint package. In practice, varints bigger than this indicate a corrupted file.
// Instead we'll set limits on the values we'll encode/decode
diff --git a/core/encoding/xml/debug_print.odin b/core/encoding/xml/debug_print.odin
index 2607bec23..be958baaa 100644
--- a/core/encoding/xml/debug_print.odin
+++ b/core/encoding/xml/debug_print.odin
@@ -1,4 +1,4 @@
-package xml
+package encoding_xml
/*
An XML 1.0 / 1.1 parser
diff --git a/core/encoding/xml/helpers.odin b/core/encoding/xml/helpers.odin
index 42a5258b3..a9d4ad493 100644
--- a/core/encoding/xml/helpers.odin
+++ b/core/encoding/xml/helpers.odin
@@ -1,4 +1,4 @@
-package xml
+package encoding_xml
/*
An XML 1.0 / 1.1 parser
diff --git a/core/encoding/xml/tokenizer.odin b/core/encoding/xml/tokenizer.odin
index a223a75d6..0f87c366b 100644
--- a/core/encoding/xml/tokenizer.odin
+++ b/core/encoding/xml/tokenizer.odin
@@ -1,4 +1,4 @@
-package xml
+package encoding_xml
/*
An XML 1.0 / 1.1 parser
diff --git a/core/encoding/xml/xml_reader.odin b/core/encoding/xml/xml_reader.odin
index bf8646bc3..5b4b12948 100644
--- a/core/encoding/xml/xml_reader.odin
+++ b/core/encoding/xml/xml_reader.odin
@@ -24,7 +24,7 @@ MAYBE:
List of contributors:
- Jeroen van Rijn: Initial implementation.
*/
-package xml
+package encoding_xml
// An XML 1.0 / 1.1 parser
import "core:bytes"
diff --git a/core/fmt/fmt.odin b/core/fmt/fmt.odin
index d3b9d7d69..ba749d102 100644
--- a/core/fmt/fmt.odin
+++ b/core/fmt/fmt.odin
@@ -1900,7 +1900,7 @@ fmt_struct :: proc(fi: ^Info, v: any, the_verb: rune, info: runtime.Type_Info_St
// fi.hash = false;
fi.indent += 1
- if hash {
+ if !is_soa && hash {
io.write_byte(fi.writer, '\n', &fi.n)
}
defer {
@@ -1934,6 +1934,9 @@ fmt_struct :: proc(fi: ^Info, v: any, the_verb: rune, info: runtime.Type_Info_St
n = uintptr((^int)(uintptr(v.data) + info.offsets[actual_field_count])^)
}
+ if hash && n > 0 {
+ io.write_byte(fi.writer, '\n', &fi.n)
+ }
for index in 0..<n {
if !hash && index > 0 { io.write_string(fi.writer, ", ", &fi.n) }
@@ -1942,9 +1945,23 @@ fmt_struct :: proc(fi: ^Info, v: any, the_verb: rune, info: runtime.Type_Info_St
if !hash && field_count > 0 { io.write_string(fi.writer, ", ", &fi.n) }
+ if hash {
+ fi.indent -= 1
+ fmt_write_indent(fi)
+ fi.indent += 1
+ }
io.write_string(fi.writer, base_type_name, &fi.n)
io.write_byte(fi.writer, '{', &fi.n)
- defer io.write_byte(fi.writer, '}', &fi.n)
+ if hash { io.write_byte(fi.writer, '\n', &fi.n) }
+ defer {
+ if hash {
+ fi.indent -= 1
+ fmt_write_indent(fi)
+ fi.indent += 1
+ }
+ io.write_byte(fi.writer, '}', &fi.n)
+ if hash { io.write_string(fi.writer, ",\n", &fi.n) }
+ }
fi.record_level += 1
defer fi.record_level -= 1
@@ -2156,14 +2173,18 @@ fmt_named :: proc(fi: ^Info, v: any, verb: rune, info: runtime.Type_Info_Named)
when ODIN_ERROR_POS_STYLE == .Default {
io.write_byte(fi.writer, '(', &fi.n)
io.write_int(fi.writer, int(a.line), 10, &fi.n)
- io.write_byte(fi.writer, ':', &fi.n)
- io.write_int(fi.writer, int(a.column), 10, &fi.n)
+ if a.column != 0 {
+ io.write_byte(fi.writer, ':', &fi.n)
+ io.write_int(fi.writer, int(a.column), 10, &fi.n)
+ }
io.write_byte(fi.writer, ')', &fi.n)
} else when ODIN_ERROR_POS_STYLE == .Unix {
io.write_byte(fi.writer, ':', &fi.n)
io.write_int(fi.writer, int(a.line), 10, &fi.n)
- io.write_byte(fi.writer, ':', &fi.n)
- io.write_int(fi.writer, int(a.column), 10, &fi.n)
+ if a.column != 0 {
+ io.write_byte(fi.writer, ':', &fi.n)
+ io.write_int(fi.writer, int(a.column), 10, &fi.n)
+ }
io.write_byte(fi.writer, ':', &fi.n)
} else {
#panic("Unhandled ODIN_ERROR_POS_STYLE")
diff --git a/core/fmt/fmt_js.odin b/core/fmt/fmt_js.odin
index c70b7c1c0..a0a890a9a 100644
--- a/core/fmt/fmt_js.odin
+++ b/core/fmt/fmt_js.odin
@@ -37,6 +37,8 @@ print :: proc(args: ..any, sep := " ", flush := true) -> int { return wprint(w
println :: proc(args: ..any, sep := " ", flush := true) -> int { return wprintln(w=stdout, args=args, sep=sep, flush=flush) }
// printf formats according to the specififed format string and writes to stdout
printf :: proc(fmt: string, args: ..any, flush := true) -> int { return wprintf(stdout, fmt, ..args, flush=flush) }
+// printfln formats according to the specified format string and writes to stdout, followed by a newline.
+printfln :: proc(fmt: string, args: ..any, flush := true) -> int { return wprintf(stdout, fmt, ..args, flush=flush, newline=true) }
// eprint formats using the default print settings and writes to stderr
eprint :: proc(args: ..any, sep := " ", flush := true) -> int { return wprint(w=stderr, args=args, sep=sep, flush=flush) }
@@ -44,3 +46,5 @@ eprint :: proc(args: ..any, sep := " ", flush := true) -> int { return wprint(
eprintln :: proc(args: ..any, sep := " ", flush := true) -> int { return wprintln(w=stderr, args=args, sep=sep, flush=flush) }
// eprintf formats according to the specififed format string and writes to stderr
eprintf :: proc(fmt: string, args: ..any, flush := true) -> int { return wprintf(stderr, fmt, ..args, flush=flush) }
+// eprintfln formats according to the specified format string and writes to stderr, followed by a newline.
+eprintfln :: proc(fmt: string, args: ..any, flush := true) -> int { return wprintf(stdout, fmt, ..args, flush=flush, newline=true) }
diff --git a/core/image/netpbm/doc.odin b/core/image/netpbm/doc.odin
index 1b5b46856..7106e023e 100644
--- a/core/image/netpbm/doc.odin
+++ b/core/image/netpbm/doc.odin
@@ -1,5 +1,6 @@
/*
Formats:
+
PBM (P1, P4): Portable Bit Map, stores black and white images (1 channel)
PGM (P2, P5): Portable Gray Map, stores greyscale images (1 channel, 1 or 2 bytes per value)
PPM (P3, P6): Portable Pixel Map, stores colour images (3 channel, 1 or 2 bytes per value)
@@ -7,27 +8,29 @@ Formats:
PFM (Pf, PF): Portable Float Map, stores floating-point images (Pf: 1 channel, PF: 3 channel)
Reading:
- All formats fill out header fields `format`, `width`, `height`, `channels`, `depth`
- Specific formats use more fields
- PGM, PPM, and PAM set `maxval` (maximum of 65535)
- PAM sets `tupltype` if there is one, and can set `channels` to any value (not just 1 or 3)
- PFM sets `scale` (float equivalent of `maxval`) and `little_endian` (endianness of stored floats)
- Currently doesn't support reading multiple images from one binary-format file
+
+- All formats fill out header fields `format`, `width`, `height`, `channels`, `depth`.
+- Specific formats use more fields:
+ PGM, PPM, and PAM set `maxval` (maximum of 65535)
+ PAM sets `tupltype` if there is one, and can set `channels` to any value (not just 1 or 3)
+ PFM sets `scale` (float equivalent of `maxval`) and `little_endian` (endianness of stored floats)
+- Currently doesn't support reading multiple images from one binary-format file.
Writing:
- You can use your own `Netpbm_Info` struct to control how images are written
- All formats require the header field `format` to be specified
- Additional header fields are required for specific formats
- PGM, PPM, and PAM require `maxval` (maximum of 65535)
- PAM also uses `tupltype`, though it may be left as default (empty or nil string)
- PFM requires `scale`, and optionally `little_endian`
+
+- You can use your own `Netpbm_Info` struct to control how images are written.
+- All formats require the header field `format` to be specified.
+- Additional header fields are required for specific formats:
+ PGM, PPM, and PAM require `maxval` (maximum of 65535)
+ PAM also uses `tupltype`, though it may be left as default (empty or nil string)
+ PFM requires `scale`, and optionally `little_endian`
Some syntax differences from the specifications:
- `channels` stores the number of values per pixel, what the PAM specification calls `depth`
- `depth` instead is the number of bits for a single value (32 for PFM, 16 or 8 otherwise)
- `scale` and `little_endian` are separated, so the `header` will always store a positive `scale`
- `little_endian` will only be true for a negative `scale` PFM, every other format will be false
- `little_endian` only describes the netpbm data being read/written, the image buffer will be native
-*/
+- `channels` stores the number of values per pixel, what the PAM specification calls `depth`
+- `depth` instead is the number of bits for a single value (32 for PFM, 16 or 8 otherwise)
+- `scale` and `little_endian` are separated, so the `header` will always store a positive `scale`
+- `little_endian` will only be true for a negative `scale` PFM, every other format will be false
+- `little_endian` only describes the netpbm data being read/written, the image buffer will be native
+*/
package netpbm
diff --git a/core/io/io.odin b/core/io/io.odin
index ea8e240b0..961dbe43e 100644
--- a/core/io/io.odin
+++ b/core/io/io.odin
@@ -29,7 +29,7 @@ Error :: enum i32 {
// Invalid_Write means that a write returned an impossible count
Invalid_Write,
- // Short_Buffer means that a read required a longer buffer than was provided
+ // Short_Buffer means that a read/write required a longer buffer than was provided
Short_Buffer,
// No_Progress is returned by some implementations of `io.Reader` when many calls
@@ -359,6 +359,29 @@ read_at_least :: proc(r: Reader, buf: []byte, min: int) -> (n: int, err: Error)
return
}
+// write_full writes until the entire contents of `buf` has been written or an error occurs.
+write_full :: proc(w: Writer, buf: []byte) -> (n: int, err: Error) {
+ return write_at_least(w, buf, len(buf))
+}
+
+// write_at_least writes at least `buf[:min]` to the writer and returns the amount written.
+// If an error occurs before writing everything it is returned.
+write_at_least :: proc(w: Writer, buf: []byte, min: int) -> (n: int, err: Error) {
+ if len(buf) < min {
+ return 0, .Short_Buffer
+ }
+ for n < min && err == nil {
+ nn: int
+ nn, err = write(w, buf[n:])
+ n += nn
+ }
+
+ if err == nil && n < min {
+ err = .Short_Write
+ }
+ return
+}
+
// copy copies from src to dst till either EOF is reached on src or an error occurs
// It returns the number of bytes copied and the first error that occurred whilst copying, if any.
copy :: proc(dst: Writer, src: Reader) -> (written: i64, err: Error) {
diff --git a/core/math/big/prime.odin b/core/math/big/prime.odin
index b02b7cb4e..5e7c02f37 100644
--- a/core/math/big/prime.odin
+++ b/core/math/big/prime.odin
@@ -1247,6 +1247,20 @@ internal_random_prime :: proc(a: ^Int, size_in_bits: int, trials: int, flags :=
a.digit[0] |= 3
}
if .Second_MSB_On in flags {
+ /*
+ Ensure there's enough space for the bit to be set.
+ */
+ if a.used * _DIGIT_BITS < size_in_bits - 1 {
+ new_size := (size_in_bits - 1) / _DIGIT_BITS
+
+ if new_size % _DIGIT_BITS > 0 {
+ new_size += 1
+ }
+
+ internal_grow(a, new_size) or_return
+ a.used = new_size
+ }
+
internal_int_bitfield_set_single(a, size_in_bits - 2) or_return
}
diff --git a/core/math/math.odin b/core/math/math.odin
index 570c2d255..8d85c2381 100644
--- a/core/math/math.odin
+++ b/core/math/math.odin
@@ -60,6 +60,7 @@ sqrt :: proc{
@(require_results) sin_f32be :: proc "contextless" (θ: f32be) -> f32be { return #force_inline f32be(sin_f32(f32(θ))) }
@(require_results) sin_f64le :: proc "contextless" (θ: f64le) -> f64le { return #force_inline f64le(sin_f64(f64(θ))) }
@(require_results) sin_f64be :: proc "contextless" (θ: f64be) -> f64be { return #force_inline f64be(sin_f64(f64(θ))) }
+// Return the sine of θ in radians.
sin :: proc{
sin_f16, sin_f16le, sin_f16be,
sin_f32, sin_f32le, sin_f32be,
@@ -72,6 +73,7 @@ sin :: proc{
@(require_results) cos_f32be :: proc "contextless" (θ: f32be) -> f32be { return #force_inline f32be(cos_f32(f32(θ))) }
@(require_results) cos_f64le :: proc "contextless" (θ: f64le) -> f64le { return #force_inline f64le(cos_f64(f64(θ))) }
@(require_results) cos_f64be :: proc "contextless" (θ: f64be) -> f64be { return #force_inline f64be(cos_f64(f64(θ))) }
+// Return the cosine of θ in radians.
cos :: proc{
cos_f16, cos_f16le, cos_f16be,
cos_f32, cos_f32le, cos_f32be,
@@ -378,6 +380,7 @@ log10 :: proc{
@(require_results) tan_f64 :: proc "contextless" (θ: f64) -> f64 { return sin(θ)/cos(θ) }
@(require_results) tan_f64le :: proc "contextless" (θ: f64le) -> f64le { return f64le(tan_f64(f64(θ))) }
@(require_results) tan_f64be :: proc "contextless" (θ: f64be) -> f64be { return f64be(tan_f64(f64(θ))) }
+// Return the tangent of θ in radians.
tan :: proc{
tan_f16, tan_f16le, tan_f16be,
tan_f32, tan_f32le, tan_f32be,
@@ -1752,7 +1755,28 @@ atan2_f64be :: proc "contextless" (y, x: f64be) -> f64be {
// TODO(bill): Better atan2_f32
return f64be(atan2_f64(f64(y), f64(x)))
}
-
+/*
+ Return the arc tangent of y/x in radians. Defined on the domain [-∞, ∞] for x and y with a range of [-π, π]
+
+ Special cases:
+ atan2(y, NaN) = NaN
+ atan2(NaN, x) = NaN
+ atan2(+0, x>=0) = + 0
+ atan2(-0, x>=0) = - 0
+ atan2(+0, x<=-0) = + π
+ atan2(-0, x<=-0) = - π
+ atan2(y>0, 0) = + π/2
+ atan2(y<0, 0) = - π/2
+ atan2(+∞, +∞) = + π/4
+ atan2(-∞, +∞) = - π/4
+ atan2(+∞, -∞) = 3π/4
+ atan2(-∞, -∞) = - 3π/4
+ atan2(y, +∞) = 0
+ atan2(y>0, -∞) = + π
+ atan2(y<0, -∞) = - π
+ atan2(+∞, x) = + π/2
+ atan2(-∞, x) = - π/2
+*/
atan2 :: proc{
atan2_f64, atan2_f32, atan2_f16,
atan2_f64le, atan2_f64be,
@@ -1760,6 +1784,7 @@ atan2 :: proc{
atan2_f16le, atan2_f16be,
}
+// Return the arc tangent of x, in radians. Defined on the domain of [-∞, ∞] with a range of [-π/2, π/2]
@(require_results)
atan :: proc "contextless" (x: $T) -> T where intrinsics.type_is_float(T) {
return atan2(x, 1)
@@ -1871,6 +1896,7 @@ asin_f16le :: proc "contextless" (x: f16le) -> f16le {
asin_f16be :: proc "contextless" (x: f16be) -> f16be {
return f16be(asin_f64(f64(x)))
}
+// Return the arc sine of x, in radians. Defined on the domain of [-1, 1] with a range of [-π/2, π/2]
asin :: proc{
asin_f64, asin_f32, asin_f16,
asin_f64le, asin_f64be,
@@ -1985,6 +2011,7 @@ acos_f16le :: proc "contextless" (x: f16le) -> f16le {
acos_f16be :: proc "contextless" (x: f16be) -> f16be {
return f16be(acos_f64(f64(x)))
}
+// Return the arc cosine of x, in radians. Defined on the domain of [-1, 1] with a range of [0, π].
acos :: proc{
acos_f64, acos_f32, acos_f16,
acos_f64le, acos_f64be,
diff --git a/core/math/rand/rand.odin b/core/math/rand/rand.odin
index 560dc8379..d6a20bd1e 100644
--- a/core/math/rand/rand.odin
+++ b/core/math/rand/rand.odin
@@ -789,8 +789,8 @@ shuffle :: proc(array: $T/[]$E, r: ^Rand = nil) {
return
}
- for i := i64(0); i < n; i += 1 {
- j := int63_max(n, r)
+ for i := i64(n - 1); i > 0; i -= 1 {
+ j := int63_max(i + 1, r)
array[i], array[j] = array[j], array[i]
}
}
diff --git a/core/net/socket_linux.odin b/core/net/socket_linux.odin
index ba48959fb..a4d75b92b 100644
--- a/core/net/socket_linux.odin
+++ b/core/net/socket_linux.odin
@@ -258,8 +258,12 @@ _send_tcp :: proc(tcp_sock: TCP_Socket, buf: []byte) -> (int, Network_Error) {
for total_written < len(buf) {
limit := min(int(max(i32)), len(buf) - total_written)
remaining := buf[total_written:][:limit]
- res, errno := linux.send(linux.Fd(tcp_sock), remaining, {})
- if errno != .NONE {
+ res, errno := linux.send(linux.Fd(tcp_sock), remaining, {.NOSIGNAL})
+ if errno == .EPIPE {
+ // If the peer is disconnected when we are trying to send we will get an `EPIPE` error,
+ // so we turn that into a clearer error
+ return total_written, TCP_Send_Error.Connection_Closed
+ } else if errno != .NONE {
return total_written, TCP_Send_Error(errno)
}
total_written += int(res)
diff --git a/core/net/url.odin b/core/net/url.odin
index 7ad88bd1f..16aa57ec5 100644
--- a/core/net/url.odin
+++ b/core/net/url.odin
@@ -21,7 +21,7 @@ import "core:strconv"
import "core:unicode/utf8"
import "core:encoding/hex"
-split_url :: proc(url: string, allocator := context.allocator) -> (scheme, host, path: string, queries: map[string]string) {
+split_url :: proc(url: string, allocator := context.allocator) -> (scheme, host, path: string, queries: map[string]string, fragment: string) {
s := url
i := strings.index(s, "://")
@@ -30,6 +30,12 @@ split_url :: proc(url: string, allocator := context.allocator) -> (scheme, host,
s = s[i+3:]
}
+ i = strings.index(s, "#")
+ if i != -1 {
+ fragment = s[i+1:]
+ s = s[:i]
+ }
+
i = strings.index(s, "?")
if i != -1 {
query_str := s[i+1:]
@@ -62,7 +68,7 @@ split_url :: proc(url: string, allocator := context.allocator) -> (scheme, host,
return
}
-join_url :: proc(scheme, host, path: string, queries: map[string]string, allocator := context.allocator) -> string {
+join_url :: proc(scheme, host, path: string, queries: map[string]string, fragment: string, allocator := context.allocator) -> string {
b := strings.builder_make(allocator)
strings.builder_grow(&b, len(scheme) + 3 + len(host) + 1 + len(path))
@@ -95,6 +101,13 @@ join_url :: proc(scheme, host, path: string, queries: map[string]string, allocat
i += 1
}
+ if fragment != "" {
+ if fragment[0] != '#' {
+ strings.write_string(&b, "#")
+ }
+ strings.write_string(&b, strings.trim_space(fragment))
+ }
+
return strings.to_string(b)
}
diff --git a/core/odin/parser/parser.odin b/core/odin/parser/parser.odin
index 9eaef4655..b2ffd3888 100644
--- a/core/odin/parser/parser.odin
+++ b/core/odin/parser/parser.odin
@@ -416,24 +416,28 @@ end_of_line_pos :: proc(p: ^Parser, tok: tokenizer.Token) -> tokenizer.Pos {
}
expect_closing_brace_of_field_list :: proc(p: ^Parser) -> tokenizer.Token {
+ return expect_closing_token_of_field_list(p, .Close_Brace, "field list")
+}
+
+expect_closing_token_of_field_list :: proc(p: ^Parser, closing_kind: tokenizer.Token_Kind, msg: string) -> tokenizer.Token {
token := p.curr_tok
- if allow_token(p, .Close_Brace) {
+ if allow_token(p, closing_kind) {
return token
}
if allow_token(p, .Semicolon) && !tokenizer.is_newline(token) {
str := tokenizer.token_to_string(token)
error(p, end_of_line_pos(p, p.prev_tok), "expected a comma, got %s", str)
}
- expect_brace := expect_token(p, .Close_Brace)
+ expect_closing := expect_token_after(p, closing_kind, msg)
- if expect_brace.kind != .Close_Brace {
- for p.curr_tok.kind != .Close_Brace && p.curr_tok.kind != .EOF && !is_non_inserted_semicolon(p.curr_tok) {
+ if expect_closing.kind != closing_kind {
+ for p.curr_tok.kind != closing_kind && p.curr_tok.kind != .EOF && !is_non_inserted_semicolon(p.curr_tok) {
advance_token(p)
}
return p.curr_tok
}
- return expect_brace
+ return expect_closing
}
expect_closing_parentheses_of_field_list :: proc(p: ^Parser) -> tokenizer.Token {
@@ -1354,6 +1358,7 @@ parse_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
rs := ast.new(ast.Return_Stmt, tok.pos, end)
rs.results = results[:]
+ expect_semicolon(p, rs)
return rs
case .Break, .Continue, .Fallthrough:
@@ -2990,8 +2995,8 @@ parse_literal_value :: proc(p: ^Parser, type: ^ast.Expr) -> ^ast.Comp_Lit {
}
p.expr_level -= 1
- skip_possible_newline(p)
- close := expect_token_after(p, .Close_Brace, "compound literal")
+ skip_possible_newline(p)
+ close := expect_closing_brace_of_field_list(p)
pos := type.pos if type != nil else open.pos
lit := ast.new(ast.Comp_Lit, pos, end_pos(close))
@@ -3054,7 +3059,7 @@ parse_call_expr :: proc(p: ^Parser, operand: ^ast.Expr) -> ^ast.Expr {
allow_token(p, .Comma) or_break
}
- close := expect_token_after(p, .Close_Paren, "argument list")
+ close := expect_closing_token_of_field_list(p, .Close_Paren, "argument list")
p.expr_level -= 1
ce := ast.new(ast.Call_Expr, operand.pos, end_pos(close))
diff --git a/core/odin/tokenizer/tokenizer.odin b/core/odin/tokenizer/tokenizer.odin
index 41de3ac8b..62170aa10 100644
--- a/core/odin/tokenizer/tokenizer.odin
+++ b/core/odin/tokenizer/tokenizer.odin
@@ -39,6 +39,7 @@ init :: proc(t: ^Tokenizer, src: string, path: string, err: Error_Handler = defa
t.read_offset = 0
t.line_offset = 0
t.line_count = len(src) > 0 ? 1 : 0
+ t.insert_semicolon = false
t.error_count = 0
t.path = path
diff --git a/core/os/stat.odin b/core/os/stat.odin
index 1b64ad33b..21a4961d1 100644
--- a/core/os/stat.odin
+++ b/core/os/stat.odin
@@ -3,8 +3,8 @@ package os
import "core:time"
File_Info :: struct {
- fullpath: string,
- name: string,
+ fullpath: string, // allocated
+ name: string, // uses `fullpath` as underlying data
size: i64,
mode: File_Mode,
is_dir: bool,
diff --git a/core/reflect/reflect.odin b/core/reflect/reflect.odin
index de5dec2e3..de7379ecc 100644
--- a/core/reflect/reflect.odin
+++ b/core/reflect/reflect.odin
@@ -934,6 +934,27 @@ set_union_value :: proc(dst: any, value: any) -> bool {
panic("expected a union to reflect.set_union_variant_typeid")
}
+@(require_results)
+bit_set_is_big_endian :: proc(value: any, loc := #caller_location) -> bool {
+ if value == nil { return ODIN_ENDIAN == .Big }
+
+ ti := runtime.type_info_base(type_info_of(value.id))
+ if info, ok := ti.variant.(runtime.Type_Info_Bit_Set); ok {
+ if info.underlying == nil { return ODIN_ENDIAN == .Big }
+
+ underlying_ti := runtime.type_info_base(info.underlying)
+ if underlying_info, uok := underlying_ti.variant.(runtime.Type_Info_Integer); uok {
+ switch underlying_info.endianness {
+ case .Platform: return ODIN_ENDIAN == .Big
+ case .Little: return false
+ case .Big: return true
+ }
+ }
+
+ return ODIN_ENDIAN == .Big
+ }
+ panic("expected a bit_set to reflect.bit_set_is_big_endian", loc)
+}
@(require_results)
diff --git a/core/sys/darwin/CoreFoundation/CFBase.odin b/core/sys/darwin/CoreFoundation/CFBase.odin
new file mode 100644
index 000000000..7335f087b
--- /dev/null
+++ b/core/sys/darwin/CoreFoundation/CFBase.odin
@@ -0,0 +1,34 @@
+package CoreFoundation
+
+foreign import CoreFoundation "system:CoreFoundation.framework"
+
+TypeID :: distinct uint
+OptionFlags :: distinct uint
+HashCode :: distinct uint
+Index :: distinct int
+TypeRef :: distinct rawptr
+
+Range :: struct {
+ location: Index,
+ length: Index,
+}
+
+foreign CoreFoundation {
+ // Releases a Core Foundation object.
+ CFRelease :: proc(cf: TypeRef) ---
+}
+
+// Releases a Core Foundation object.
+Release :: proc {
+ ReleaseObject,
+ ReleaseString,
+}
+
+ReleaseObject :: #force_inline proc(cf: TypeRef) {
+ CFRelease(cf)
+}
+
+// Releases a Core Foundation string.
+ReleaseString :: #force_inline proc(theString: String) {
+ CFRelease(TypeRef(theString))
+}
diff --git a/core/sys/darwin/CoreFoundation/CFString.odin b/core/sys/darwin/CoreFoundation/CFString.odin
new file mode 100644
index 000000000..4a167c604
--- /dev/null
+++ b/core/sys/darwin/CoreFoundation/CFString.odin
@@ -0,0 +1,203 @@
+package CoreFoundation
+
+import "base:runtime"
+
+foreign import CoreFoundation "system:CoreFoundation.framework"
+
+String :: distinct TypeRef // same as CFStringRef
+
+StringEncoding :: distinct u32
+
+StringBuiltInEncodings :: enum StringEncoding {
+ MacRoman = 0,
+ WindowsLatin1 = 0x0500,
+ ISOLatin1 = 0x0201,
+ NextStepLatin = 0x0B01,
+ ASCII = 0x0600,
+ Unicode = 0x0100,
+ UTF8 = 0x08000100,
+ NonLossyASCII = 0x0BFF,
+
+ UTF16 = 0x0100,
+ UTF16BE = 0x10000100,
+ UTF16LE = 0x14000100,
+
+ UTF32 = 0x0c000100,
+ UTF32BE = 0x18000100,
+ UTF32LE = 0x1c000100,
+}
+
+StringEncodings :: enum Index {
+ MacJapanese = 1,
+ MacChineseTrad = 2,
+ MacKorean = 3,
+ MacArabic = 4,
+ MacHebrew = 5,
+ MacGreek = 6,
+ MacCyrillic = 7,
+ MacDevanagari = 9,
+ MacGurmukhi = 10,
+ MacGujarati = 11,
+ MacOriya = 12,
+ MacBengali = 13,
+ MacTamil = 14,
+ MacTelugu = 15,
+ MacKannada = 16,
+ MacMalayalam = 17,
+ MacSinhalese = 18,
+ MacBurmese = 19,
+ MacKhmer = 20,
+ MacThai = 21,
+ MacLaotian = 22,
+ MacGeorgian = 23,
+ MacArmenian = 24,
+ MacChineseSimp = 25,
+ MacTibetan = 26,
+ MacMongolian = 27,
+ MacEthiopic = 28,
+ MacCentralEurRoman = 29,
+ MacVietnamese = 30,
+ MacExtArabic = 31,
+ MacSymbol = 33,
+ MacDingbats = 34,
+ MacTurkish = 35,
+ MacCroatian = 36,
+ MacIcelandic = 37,
+ MacRomanian = 38,
+ MacCeltic = 39,
+ MacGaelic = 40,
+ MacFarsi = 0x8C,
+ MacUkrainian = 0x98,
+ MacInuit = 0xEC,
+ MacVT100 = 0xFC,
+ MacHFS = 0xFF,
+ ISOLatin2 = 0x0202,
+ ISOLatin3 = 0x0203,
+ ISOLatin4 = 0x0204,
+ ISOLatinCyrillic = 0x0205,
+ ISOLatinArabic = 0x0206,
+ ISOLatinGreek = 0x0207,
+ ISOLatinHebrew = 0x0208,
+ ISOLatin5 = 0x0209,
+ ISOLatin6 = 0x020A,
+ ISOLatinThai = 0x020B,
+ ISOLatin7 = 0x020D,
+ ISOLatin8 = 0x020E,
+ ISOLatin9 = 0x020F,
+ ISOLatin10 = 0x0210,
+ DOSLatinUS = 0x0400,
+ DOSGreek = 0x0405,
+ DOSBalticRim = 0x0406,
+ DOSLatin1 = 0x0410,
+ DOSGreek1 = 0x0411,
+ DOSLatin2 = 0x0412,
+ DOSCyrillic = 0x0413,
+ DOSTurkish = 0x0414,
+ DOSPortuguese = 0x0415,
+ DOSIcelandic = 0x0416,
+ DOSHebrew = 0x0417,
+ DOSCanadianFrench = 0x0418,
+ DOSArabic = 0x0419,
+ DOSNordic = 0x041A,
+ DOSRussian = 0x041B,
+ DOSGreek2 = 0x041C,
+ DOSThai = 0x041D,
+ DOSJapanese = 0x0420,
+ DOSChineseSimplif = 0x0421,
+ DOSKorean = 0x0422,
+ DOSChineseTrad = 0x0423,
+ WindowsLatin2 = 0x0501,
+ WindowsCyrillic = 0x0502,
+ WindowsGreek = 0x0503,
+ WindowsLatin5 = 0x0504,
+ WindowsHebrew = 0x0505,
+ WindowsArabic = 0x0506,
+ WindowsBalticRim = 0x0507,
+ WindowsVietnamese = 0x0508,
+ WindowsKoreanJohab = 0x0510,
+ ANSEL = 0x0601,
+ JIS_X0201_76 = 0x0620,
+ JIS_X0208_83 = 0x0621,
+ JIS_X0208_90 = 0x0622,
+ JIS_X0212_90 = 0x0623,
+ JIS_C6226_78 = 0x0624,
+ ShiftJIS_X0213 = 0x0628,
+ ShiftJIS_X0213_MenKuTen = 0x0629,
+ GB_2312_80 = 0x0630,
+ GBK_95 = 0x0631,
+ GB_18030_2000 = 0x0632,
+ KSC_5601_87 = 0x0640,
+ KSC_5601_92_Johab = 0x0641,
+ CNS_11643_92_P1 = 0x0651,
+ CNS_11643_92_P2 = 0x0652,
+ CNS_11643_92_P3 = 0x0653,
+ ISO_2022_JP = 0x0820,
+ ISO_2022_JP_2 = 0x0821,
+ ISO_2022_JP_1 = 0x0822,
+ ISO_2022_JP_3 = 0x0823,
+ ISO_2022_CN = 0x0830,
+ ISO_2022_CN_EXT = 0x0831,
+ ISO_2022_KR = 0x0840,
+ EUC_JP = 0x0920,
+ EUC_CN = 0x0930,
+ EUC_TW = 0x0931,
+ EUC_KR = 0x0940,
+ ShiftJIS = 0x0A01,
+ KOI8_R = 0x0A02,
+ Big5 = 0x0A03,
+ MacRomanLatin1 = 0x0A04,
+ HZ_GB_2312 = 0x0A05,
+ Big5_HKSCS_1999 = 0x0A06,
+ VISCII = 0x0A07,
+ KOI8_U = 0x0A08,
+ Big5_E = 0x0A09,
+ NextStepJapanese = 0x0B02,
+ EBCDIC_US = 0x0C01,
+ EBCDIC_CP037 = 0x0C02,
+ UTF7 = 0x04000100,
+ UTF7_IMAP = 0x0A10,
+ ShiftJIS_X0213_00 = 0x0628, // Deprecated. Use `ShiftJIS_X0213` instead.
+}
+
+@(link_prefix = "CF", default_calling_convention = "c")
+foreign CoreFoundation {
+ // Copies the character contents of a string to a local C string buffer after converting the characters to a given encoding.
+ StringGetCString :: proc(theString: String, buffer: [^]byte, bufferSize: Index, encoding: StringEncoding) -> b8 ---
+
+ // Returns the number (in terms of UTF-16 code pairs) of Unicode characters in a string.
+ StringGetLength :: proc(theString: String) -> Index ---
+
+ // Returns the maximum number of bytes a string of a specified length (in Unicode characters) will take up if encoded in a specified encoding.
+ StringGetMaximumSizeForEncoding :: proc(length: Index, encoding: StringEncoding) -> Index ---
+
+ // Fetches a range of the characters from a string into a byte buffer after converting the characters to a specified encoding.
+ StringGetBytes :: proc(thestring: String, range: Range, encoding: StringEncoding, lossByte: u8, isExternalRepresentation: b8, buffer: [^]byte, maxBufLen: Index, usedBufLen: ^Index) -> Index ---
+
+ StringIsEncodingAvailable :: proc(encoding: StringEncoding) -> bool ---
+
+ @(link_name = "__CFStringMakeConstantString")
+ StringMakeConstantString :: proc "c" (#const c: cstring) -> String ---
+}
+
+STR :: StringMakeConstantString
+
+StringCopyToOdinString :: proc(
+ theString: String,
+ allocator := context.allocator,
+) -> (
+ str: string,
+ ok: bool,
+) #optional_ok {
+ length := StringGetLength(theString)
+ max := StringGetMaximumSizeForEncoding(length, StringEncoding(StringBuiltInEncodings.UTF8))
+
+ buf, err := make([]byte, max, allocator)
+ if err != nil do return
+
+ raw_str := runtime.Raw_String {
+ data = raw_data(buf),
+ }
+ StringGetBytes(theString, {0, length}, StringEncoding(StringBuiltInEncodings.UTF8), 0, false, raw_data(buf), max, (^Index)(&raw_str.len))
+
+ return transmute(string)raw_str, true
+}
diff --git a/core/sys/darwin/Foundation/NSApplication.odin b/core/sys/darwin/Foundation/NSApplication.odin
index d332345f9..34221aed6 100644
--- a/core/sys/darwin/Foundation/NSApplication.odin
+++ b/core/sys/darwin/Foundation/NSApplication.odin
@@ -132,7 +132,7 @@ Application_nextEventMatchingMask :: proc "c" (self: ^Application, mask: EventMa
@(objc_type=Application, objc_name="sendEvent")
Application_sendEvent :: proc "c" (self: ^Application, event: ^Event) {
- msgSend(Event, self, "sendEvent:", event)
+ msgSend(nil, self, "sendEvent:", event)
}
@(objc_type=Application, objc_name="updateWindows")
Application_updateWindows :: proc "c" (self: ^Application) {
diff --git a/core/sys/darwin/Foundation/NSString.odin b/core/sys/darwin/Foundation/NSString.odin
index d3c6c454d..b4918b3fb 100644
--- a/core/sys/darwin/Foundation/NSString.odin
+++ b/core/sys/darwin/Foundation/NSString.odin
@@ -23,12 +23,9 @@ StringEncoding :: enum UInteger {
WindowsCP1250 = 15,
ISO2022JP = 21,
MacOSRoman = 30,
-
UTF16 = Unicode,
-
UTF16BigEndian = 0x90000100,
UTF16LittleEndian = 0x94000100,
-
UTF32 = 0x8c000100,
UTF32BigEndian = 0x98000100,
UTF32LittleEndian = 0x9c000100,
@@ -49,12 +46,9 @@ StringCompareOption :: enum UInteger {
unichar :: distinct u16
-@(link_prefix="NS", default_calling_convention="c")
-foreign Foundation {
- StringFromClass :: proc(cls: Class) -> ^String ---
-}
-
AT :: MakeConstantString
+
+// CFString is 'toll-free bridged' with its Cocoa Foundation counterpart, NSString.
MakeConstantString :: proc "c" (#const c: cstring) -> ^String {
foreign Foundation {
__CFStringMakeConstantString :: proc "c" (c: cstring) -> ^String ---
@@ -62,6 +56,10 @@ MakeConstantString :: proc "c" (#const c: cstring) -> ^String {
return __CFStringMakeConstantString(c)
}
+@(link_prefix="NS", default_calling_convention="c")
+foreign Foundation {
+ StringFromClass :: proc(cls: Class) -> ^String ---
+}
@(objc_type=String, objc_name="alloc", objc_is_class_method=true)
String_alloc :: proc "c" () -> ^String {
@@ -73,7 +71,6 @@ String_init :: proc "c" (self: ^String) -> ^String {
return msgSend(^String, self, "init")
}
-
@(objc_type=String, objc_name="initWithString")
String_initWithString :: proc "c" (self: ^String, other: ^String) -> ^String {
return msgSend(^String, self, "initWithString:", other)
diff --git a/core/sys/darwin/Security/SecBase.odin b/core/sys/darwin/Security/SecBase.odin
new file mode 100644
index 000000000..9cc82d6f5
--- /dev/null
+++ b/core/sys/darwin/Security/SecBase.odin
@@ -0,0 +1,386 @@
+package Security
+
+OSStatus :: distinct i32
+
+errSec :: enum OSStatus {
+ Success = 0, // No error.
+ Unimplemented = -4, // Function or operation not implemented.
+ DiskFull = -34, // The disk is full.
+ IO = -36, // I/O error.
+ OpWr = -49, // File already open with with write permission.
+ Param = -50, // One or more parameters passed to a function were not valid.
+ WrPerm = -61, // Write permissions error.
+ Allocate = -108, // Failed to allocate memory.
+ UserCanceled = -128, // User canceled the operation.
+ BadReq = -909, // Bad parameter or invalid state for operation.
+ InternalComponent = -2070,
+ CoreFoundationUnknown = -4960,
+ MissingEntitlement, // A required entitlement isn't present.
+ RestrictedAPI, // Client is restricted and is not permitted to perform this operation.
+ NotAvailable = -25291, // No keychain is available. You may need to restart your computer.
+ ReadOnly = -25292, // This keychain cannot be modified.
+ AuthFailed = -25293, // The user name or passphrase you entered is not correct.
+ NoSuchKeychain = -25294, // The specified keychain could not be found.
+ InvalidKeychain = -25295, // The specified keychain is not a valid keychain file.
+ DuplicateKeychain = -25296, // A keychain with the same name already exists.
+ DuplicateCallback = -25297, // The specified callback function is already installed.
+ InvalidCallback = -25298, // The specified callback function is not valid.
+ DuplicateItem = -25299, // The specified item already exists in the keychain.
+ ItemNotFound = -25300, // The specified item could not be found in the keychain.
+ BufferTooSmall = -25301, // There is not enough memory available to use the specified item.
+ DataTooLarge = -25302, // This item contains information which is too large or in a format that cannot be displayed.
+ NoSuchAttr = -25303, // The specified attribute does not exist.
+ InvalidItemRef = -25304, // The specified item is no longer valid. It may have been deleted from the keychain.
+ InvalidSearchRef = -25305, // Unable to search the current keychain.
+ NoSuchClass = -25306, // The specified item does not appear to be a valid keychain item.
+ NoDefaultKeychain = -25307, // A default keychain could not be found.
+ InteractionNotAllowed = -25308, // User interaction is not allowed.
+ ReadOnlyAttr = -25309, // The specified attribute could not be modified.
+ WrongSecVersion = -25310, // This keychain was created by a different version of the system software and cannot be opened.
+ KeySizeNotAllowed = -25311, // This item specifies a key size which is too large or too small.
+ NoStorageModule = -25312, // A required component (data storage module) could not be loaded. You may need to restart your computer.
+ NoCertificateModule = -25313, // A required component (certificate module) could not be loaded. You may need to restart your computer.
+ NoPolicyModule = -25314, // A required component (policy module) could not be loaded. You may need to restart your computer.
+ InteractionRequired = -25315, // User interaction is required, but is currently not allowed.
+ DataNotAvailable = -25316, // The contents of this item cannot be retrieved.
+ DataNotModifiable = -25317, // The contents of this item cannot be modified.
+ CreateChainFailed = -25318, // One or more certificates required to validate this certificate cannot be found.
+ InvalidPrefsDomain = -25319, // The specified preferences domain is not valid.
+ InDarkWake = -25320, // In dark wake, no UI possible
+ ACLNotSimple = -25240, // The specified access control list is not in standard (simple) form.
+ PolicyNotFound = -25241, // The specified policy cannot be found.
+ InvalidTrustSetting = -25242, // The specified trust setting is invalid.
+ NoAccessForItem = -25243, // The specified item has no access control.
+ InvalidOwnerEdit = -25244, // Invalid attempt to change the owner of this item.
+ TrustNotAvailable = -25245, // No trust results are available.
+ UnsupportedFormat = -25256, // Import/Export format unsupported.
+ UnknownFormat = -25257, // Unknown format in import.
+ KeyIsSensitive = -25258, // Key material must be wrapped for export.
+ MultiplePrivKeys = -25259, // An attempt was made to import multiple private keys.
+ PassphraseRequired = -25260, // Passphrase is required for import/export.
+ InvalidPasswordRef = -25261, // The password reference was invalid.
+ InvalidTrustSettings = -25262, // The Trust Settings Record was corrupted.
+ NoTrustSettings = -25263, // No Trust Settings were found.
+ Pkcs12VerifyFailure = -25264, // MAC verification failed during PKCS12 import (wrong password?)
+ NotSigner = -26267, // A certificate was not signed by its proposed parent.
+ Decode = -26275, // Unable to decode the provided data.
+ ServiceNotAvailable = -67585, // The required service is not available.
+ InsufficientClientID = -67586, // The client ID is not correct.
+ DeviceReset = -67587, // A device reset has occurred.
+ DeviceFailed = -67588, // A device failure has occurred.
+ AppleAddAppACLSubject = -67589, // Adding an application ACL subject failed.
+ ApplePublicKeyIncomplete = -67590, // The public key is incomplete.
+ AppleSignatureMismatch = -67591, // A signature mismatch has occurred.
+ AppleInvalidKeyStartDate = -67592, // The specified key has an invalid start date.
+ AppleInvalidKeyEndDate = -67593, // The specified key has an invalid end date.
+ ConversionError = -67594, // A conversion error has occurred.
+ AppleSSLv2Rollback = -67595, // A SSLv2 rollback error has occurred.
+ QuotaExceeded = -67596, // The quota was exceeded.
+ FileTooBig = -67597, // The file is too big.
+ InvalidDatabaseBlob = -67598, // The specified database has an invalid blob.
+ InvalidKeyBlob = -67599, // The specified database has an invalid key blob.
+ IncompatibleDatabaseBlob = -67600, // The specified database has an incompatible blob.
+ IncompatibleKeyBlob = -67601, // The specified database has an incompatible key blob.
+ HostNameMismatch = -67602, // A host name mismatch has occurred.
+ UnknownCriticalExtensionFlag = -67603, // There is an unknown critical extension flag.
+ NoBasicConstraints = -67604, // No basic constraints were found.
+ NoBasicConstraintsCA = -67605, // No basic CA constraints were found.
+ InvalidAuthorityKeyID = -67606, // The authority key ID is not valid.
+ InvalidSubjectKeyID = -67607, // The subject key ID is not valid.
+ InvalidKeyUsageForPolicy = -67608, // The key usage is not valid for the specified policy.
+ InvalidExtendedKeyUsage = -67609, // The extended key usage is not valid.
+ InvalidIDLinkage = -67610, // The ID linkage is not valid.
+ PathLengthConstraintExceeded = -67611, // The path length constraint was exceeded.
+ InvalidRoot = -67612, // The root or anchor certificate is not valid.
+ CRLExpired = -67613, // The CRL has expired.
+ CRLNotValidYet = -67614, // The CRL is not yet valid.
+ CRLNotFound = -67615, // The CRL was not found.
+ CRLServerDown = -67616, // The CRL server is down.
+ CRLBadURI = -67617, // The CRL has a bad Uniform Resource Identifier.
+ UnknownCertExtension = -67618, // An unknown certificate extension was encountered.
+ UnknownCRLExtension = -67619, // An unknown CRL extension was encountered.
+ CRLNotTrusted = -67620, // The CRL is not trusted.
+ CRLPolicyFailed = -67621, // The CRL policy failed.
+ IDPFailure = -67622, // The issuing distribution point was not valid.
+ SMIMEEmailAddressesNotFound = -67623, // An email address mismatch was encountered.
+ SMIMEBadExtendedKeyUsage = -67624, // The appropriate extended key usage for SMIME was not found.
+ SMIMEBadKeyUsage = -67625, // The key usage is not compatible with SMIME.
+ SMIMEKeyUsageNotCritical = -67626, // The key usage extension is not marked as critical.
+ SMIMENoEmailAddress = -67627, // No email address was found in the certificate.
+ SMIMESubjAltNameNotCritical = -67628, // The subject alternative name extension is not marked as critical.
+ SSLBadExtendedKeyUsage = -67629, // The appropriate extended key usage for SSL was not found.
+ OCSPBadResponse = -67630, // The OCSP response was incorrect or could not be parsed.
+ OCSPBadRequest = -67631, // The OCSP request was incorrect or could not be parsed.
+ OCSPUnavailable = -67632, // OCSP service is unavailable.
+ OCSPStatusUnrecognized = -67633, // The OCSP server did not recognize this certificate.
+ EndOfData = -67634, // An end-of-data was detected.
+ IncompleteCertRevocationCheck = -67635, // An incomplete certificate revocation check occurred.
+ NetworkFailure = -67636, // A network failure occurred.
+ OCSPNotTrustedToAnchor = -67637, // The OCSP response was not trusted to a root or anchor certificate.
+ RecordModified = -67638, // The record was modified.
+ OCSPSignatureError = -67639, // The OCSP response had an invalid signature.
+ OCSPNoSigner = -67640, // The OCSP response had no signer.
+ OCSPResponderMalformedReq = -67641, // The OCSP responder was given a malformed request.
+ OCSPResponderInternalError = -67642, // The OCSP responder encountered an internal error.
+ OCSPResponderTryLater = -67643, // The OCSP responder is busy, try again later.
+ OCSPResponderSignatureRequired = -67644, // The OCSP responder requires a signature.
+ OCSPResponderUnauthorized = -67645, // The OCSP responder rejected this request as unauthorized.
+ OCSPResponseNonceMismatch = -67646, // The OCSP response nonce did not match the request.
+ CodeSigningBadCertChainLength = -67647, // Code signing encountered an incorrect certificate chain length.
+ CodeSigningNoBasicConstraints = -67648, // Code signing found no basic constraints.
+ CodeSigningBadPathLengthConstraint = -67649, // Code signing encountered an incorrect path length constraint.
+ CodeSigningNoExtendedKeyUsage = -67650, // Code signing found no extended key usage.
+ CodeSigningDevelopment = -67651, // Code signing indicated use of a development-only certificate.
+ ResourceSignBadCertChainLength = -67652, // Resource signing has encountered an incorrect certificate chain length.
+ ResourceSignBadExtKeyUsage = -67653, // Resource signing has encountered an error in the extended key usage.
+ TrustSettingDeny = -67654, // The trust setting for this policy was set to Deny.
+ InvalidSubjectName = -67655, // An invalid certificate subject name was encountered.
+ UnknownQualifiedCertStatement = -67656, // An unknown qualified certificate statement was encountered.
+ MobileMeRequestQueued = -67657,
+ MobileMeRequestRedirected = -67658,
+ MobileMeServerError = -67659,
+ MobileMeServerNotAvailable = -67660,
+ MobileMeServerAlreadyExists = -67661,
+ MobileMeServerServiceErr = -67662,
+ MobileMeRequestAlreadyPending = -67663,
+ MobileMeNoRequestPending = -67664,
+ MobileMeCSRVerifyFailure = -67665,
+ MobileMeFailedConsistencyCheck = -67666,
+ NotInitialized = -67667, // A function was called without initializing CSSM.
+ InvalidHandleUsage = -67668, // The CSSM handle does not match with the service type.
+ PVCReferentNotFound = -67669, // A reference to the calling module was not found in the list of authorized callers.
+ FunctionIntegrityFail = -67670, // A function address was not within the verified module.
+ InternalError = -67671, // An internal error has occurred.
+ MemoryError = -67672, // A memory error has occurred.
+ InvalidData = -67673, // Invalid data was encountered.
+ MDSError = -67674, // A Module Directory Service error has occurred.
+ InvalidPointer = -67675, // An invalid pointer was encountered.
+ SelfCheckFailed = -67676, // Self-check has failed.
+ FunctionFailed = -67677, // A function has failed.
+ ModuleManifestVerifyFailed = -67678, // A module manifest verification failure has occurred.
+ InvalidGUID = -67679, // An invalid GUID was encountered.
+ InvalidHandle = -67680, // An invalid handle was encountered.
+ InvalidDBList = -67681, // An invalid DB list was encountered.
+ InvalidPassthroughID = -67682, // An invalid passthrough ID was encountered.
+ InvalidNetworkAddress = -67683, // An invalid network address was encountered.
+ CRLAlreadySigned = -67684, // The certificate revocation list is already signed.
+ InvalidNumberOfFields = -67685, // An invalid number of fields were encountered.
+ VerificationFailure = -67686, // A verification failure occurred.
+ UnknownTag = -67687, // An unknown tag was encountered.
+ InvalidSignature = -67688, // An invalid signature was encountered.
+ InvalidName = -67689, // An invalid name was encountered.
+ InvalidCertificateRef = -67690, // An invalid certificate reference was encountered.
+ InvalidCertificateGroup = -67691, // An invalid certificate group was encountered.
+ TagNotFound = -67692, // The specified tag was not found.
+ InvalidQuery = -67693, // The specified query was not valid.
+ InvalidValue = -67694, // An invalid value was detected.
+ CallbackFailed = -67695, // A callback has failed.
+ ACLDeleteFailed = -67696, // An ACL delete operation has failed.
+ ACLReplaceFailed = -67697, // An ACL replace operation has failed.
+ ACLAddFailed = -67698, // An ACL add operation has failed.
+ ACLChangeFailed = -67699, // An ACL change operation has failed.
+ InvalidAccessCredentials = -67700, // Invalid access credentials were encountered.
+ InvalidRecord = -67701, // An invalid record was encountered.
+ InvalidACL = -67702, // An invalid ACL was encountered.
+ InvalidSampleValue = -67703, // An invalid sample value was encountered.
+ IncompatibleVersion = -67704, // An incompatible version was encountered.
+ PrivilegeNotGranted = -67705, // The privilege was not granted.
+ InvalidScope = -67706, // An invalid scope was encountered.
+ PVCAlreadyConfigured = -67707, // The PVC is already configured.
+ InvalidPVC = -67708, // An invalid PVC was encountered.
+ EMMLoadFailed = -67709, // The EMM load has failed.
+ EMMUnloadFailed = -67710, // The EMM unload has failed.
+ AddinLoadFailed = -67711, // The add-in load operation has failed.
+ InvalidKeyRef = -67712, // An invalid key was encountered.
+ InvalidKeyHierarchy = -67713, // An invalid key hierarchy was encountered.
+ AddinUnloadFailed = -67714, // The add-in unload operation has failed.
+ LibraryReferenceNotFound = -67715, // A library reference was not found.
+ InvalidAddinFunctionTable = -67716, // An invalid add-in function table was encountered.
+ InvalidServiceMask = -67717, // An invalid service mask was encountered.
+ ModuleNotLoaded = -67718, // A module was not loaded.
+ InvalidSubServiceID = -67719, // An invalid subservice ID was encountered.
+ AttributeNotInContext = -67720, // An attribute was not in the context.
+ ModuleManagerInitializeFailed = -67721, // A module failed to initialize.
+ ModuleManagerNotFound = -67722, // A module was not found.
+ EventNotificationCallbackNotFound = -67723, // An event notification callback was not found.
+ InputLengthError = -67724, // An input length error was encountered.
+ OutputLengthError = -67725, // An output length error was encountered.
+ PrivilegeNotSupported = -67726, // The privilege is not supported.
+ DeviceError = -67727, // A device error was encountered.
+ AttachHandleBusy = -67728, // The CSP handle was busy.
+ NotLoggedIn = -67729, // You are not logged in.
+ AlgorithmMismatch = -67730, // An algorithm mismatch was encountered.
+ KeyUsageIncorrect = -67731, // The key usage is incorrect.
+ KeyBlobTypeIncorrect = -67732, // The key blob type is incorrect.
+ KeyHeaderInconsistent = -67733, // The key header is inconsistent.
+ UnsupportedKeyFormat = -67734, // The key header format is not supported.
+ UnsupportedKeySize = -67735, // The key size is not supported.
+ InvalidKeyUsageMask = -67736, // The key usage mask is not valid.
+ UnsupportedKeyUsageMask = -67737, // The key usage mask is not supported.
+ InvalidKeyAttributeMask = -67738, // The key attribute mask is not valid.
+ UnsupportedKeyAttributeMask = -67739, // The key attribute mask is not supported.
+ InvalidKeyLabel = -67740, // The key label is not valid.
+ UnsupportedKeyLabel = -67741, // The key label is not supported.
+ InvalidKeyFormat = -67742, // The key format is not valid.
+ UnsupportedVectorOfBuffers = -67743, // The vector of buffers is not supported.
+ InvalidInputVector = -67744, // The input vector is not valid.
+ InvalidOutputVector = -67745, // The output vector is not valid.
+ InvalidContext = -67746, // An invalid context was encountered.
+ InvalidAlgorithm = -67747, // An invalid algorithm was encountered.
+ InvalidAttributeKey = -67748, // A key attribute was not valid.
+ MissingAttributeKey = -67749, // A key attribute was missing.
+ InvalidAttributeInitVector = -67750, // An init vector attribute was not valid.
+ MissingAttributeInitVector = -67751, // An init vector attribute was missing.
+ InvalidAttributeSalt = -67752, // A salt attribute was not valid.
+ MissingAttributeSalt = -67753, // A salt attribute was missing.
+ InvalidAttributePadding = -67754, // A padding attribute was not valid.
+ MissingAttributePadding = -67755, // A padding attribute was missing.
+ InvalidAttributeRandom = -67756, // A random number attribute was not valid.
+ MissingAttributeRandom = -67757, // A random number attribute was missing.
+ InvalidAttributeSeed = -67758, // A seed attribute was not valid.
+ MissingAttributeSeed = -67759, // A seed attribute was missing.
+ InvalidAttributePassphrase = -67760, // A passphrase attribute was not valid.
+ MissingAttributePassphrase = -67761, // A passphrase attribute was missing.
+ InvalidAttributeKeyLength = -67762, // A key length attribute was not valid.
+ MissingAttributeKeyLength = -67763, // A key length attribute was missing.
+ InvalidAttributeBlockSize = -67764, // A block size attribute was not valid.
+ MissingAttributeBlockSize = -67765, // A block size attribute was missing.
+ InvalidAttributeOutputSize = -67766, // An output size attribute was not valid.
+ MissingAttributeOutputSize = -67767, // An output size attribute was missing.
+ InvalidAttributeRounds = -67768, // The number of rounds attribute was not valid.
+ MissingAttributeRounds = -67769, // The number of rounds attribute was missing.
+ InvalidAlgorithmParms = -67770, // An algorithm parameters attribute was not valid.
+ MissingAlgorithmParms = -67771, // An algorithm parameters attribute was missing.
+ InvalidAttributeLabel = -67772, // A label attribute was not valid.
+ MissingAttributeLabel = -67773, // A label attribute was missing.
+ InvalidAttributeKeyType = -67774, // A key type attribute was not valid.
+ MissingAttributeKeyType = -67775, // A key type attribute was missing.
+ InvalidAttributeMode = -67776, // A mode attribute was not valid.
+ MissingAttributeMode = -67777, // A mode attribute was missing.
+ InvalidAttributeEffectiveBits = -67778, // An effective bits attribute was not valid.
+ MissingAttributeEffectiveBits = -67779, // An effective bits attribute was missing.
+ InvalidAttributeStartDate = -67780, // A start date attribute was not valid.
+ MissingAttributeStartDate = -67781, // A start date attribute was missing.
+ InvalidAttributeEndDate = -67782, // An end date attribute was not valid.
+ MissingAttributeEndDate = -67783, // An end date attribute was missing.
+ InvalidAttributeVersion = -67784, // A version attribute was not valid.
+ MissingAttributeVersion = -67785, // A version attribute was missing.
+ InvalidAttributePrime = -67786, // A prime attribute was not valid.
+ MissingAttributePrime = -67787, // A prime attribute was missing.
+ InvalidAttributeBase = -67788, // A base attribute was not valid.
+ MissingAttributeBase = -67789, // A base attribute was missing.
+ InvalidAttributeSubprime = -67790, // A subprime attribute was not valid.
+ MissingAttributeSubprime = -67791, // A subprime attribute was missing.
+ InvalidAttributeIterationCount = -67792, // An iteration count attribute was not valid.
+ MissingAttributeIterationCount = -67793, // An iteration count attribute was missing.
+ InvalidAttributeDLDBHandle = -67794, // A database handle attribute was not valid.
+ MissingAttributeDLDBHandle = -67795, // A database handle attribute was missing.
+ InvalidAttributeAccessCredentials = -67796, // An access credentials attribute was not valid.
+ MissingAttributeAccessCredentials = -67797, // An access credentials attribute was missing.
+ InvalidAttributePublicKeyFormat = -67798, // A public key format attribute was not valid.
+ MissingAttributePublicKeyFormat = -67799, // A public key format attribute was missing.
+ InvalidAttributePrivateKeyFormat = -67800, // A private key format attribute was not valid.
+ MissingAttributePrivateKeyFormat = -67801, // A private key format attribute was missing.
+ InvalidAttributeSymmetricKeyFormat = -67802, // A symmetric key format attribute was not valid.
+ MissingAttributeSymmetricKeyFormat = -67803, // A symmetric key format attribute was missing.
+ InvalidAttributeWrappedKeyFormat = -67804, // A wrapped key format attribute was not valid.
+ MissingAttributeWrappedKeyFormat = -67805, // A wrapped key format attribute was missing.
+ StagedOperationInProgress = -67806, // A staged operation is in progress.
+ StagedOperationNotStarted = -67807, // A staged operation was not started.
+ VerifyFailed = -67808, // A cryptographic verification failure has occurred.
+ QuerySizeUnknown = -67809, // The query size is unknown.
+ BlockSizeMismatch = -67810, // A block size mismatch occurred.
+ PublicKeyInconsistent = -67811, // The public key was inconsistent.
+ DeviceVerifyFailed = -67812, // A device verification failure has occurred.
+ InvalidLoginName = -67813, // An invalid login name was detected.
+ AlreadyLoggedIn = -67814, // The user is already logged in.
+ InvalidDigestAlgorithm = -67815, // An invalid digest algorithm was detected.
+ InvalidCRLGroup = -67816, // An invalid CRL group was detected.
+ CertificateCannotOperate = -67817, // The certificate cannot operate.
+ CertificateExpired = -67818, // An expired certificate was detected.
+ CertificateNotValidYet = -67819, // The certificate is not yet valid.
+ CertificateRevoked = -67820, // The certificate was revoked.
+ CertificateSuspended = -67821, // The certificate was suspended.
+ InsufficientCredentials = -67822, // Insufficient credentials were detected.
+ InvalidAction = -67823, // The action was not valid.
+ InvalidAuthority = -67824, // The authority was not valid.
+ VerifyActionFailed = -67825, // A verify action has failed.
+ InvalidCertAuthority = -67826, // The certificate authority was not valid.
+ InvalidCRLAuthority = -67827, // The CRL authority was not valid.
+ InvalidCRLEncoding = -67828, // The CRL encoding was not valid.
+ InvalidCRLType = -67829, // The CRL type was not valid.
+ InvalidCRL = -67830, // The CRL was not valid.
+ InvalidFormType = -67831, // The form type was not valid.
+ InvalidID = -67832, // The ID was not valid.
+ InvalidIdentifier = -67833, // The identifier was not valid.
+ InvalidIndex = -67834, // The index was not valid.
+ InvalidPolicyIdentifiers = -67835, // The policy identifiers are not valid.
+ InvalidTimeString = -67836, // The time specified was not valid.
+ InvalidReason = -67837, // The trust policy reason was not valid.
+ InvalidRequestInputs = -67838, // The request inputs are not valid.
+ InvalidResponseVector = -67839, // The response vector was not valid.
+ InvalidStopOnPolicy = -67840, // The stop-on policy was not valid.
+ InvalidTuple = -67841, // The tuple was not valid.
+ MultipleValuesUnsupported = -67842, // Multiple values are not supported.
+ NotTrusted = -67843, // The certificate was not trusted.
+ NoDefaultAuthority = -67844, // No default authority was detected.
+ RejectedForm = -67845, // The trust policy had a rejected form.
+ RequestLost = -67846, // The request was lost.
+ RequestRejected = -67847, // The request was rejected.
+ UnsupportedAddressType = -67848, // The address type is not supported.
+ UnsupportedService = -67849, // The service is not supported.
+ InvalidTupleGroup = -67850, // The tuple group was not valid.
+ InvalidBaseACLs = -67851, // The base ACLs are not valid.
+ InvalidTupleCredentials = -67852, // The tuple credentials are not valid.
+ InvalidEncoding = -67853, // The encoding was not valid.
+ InvalidValidityPeriod = -67854, // The validity period was not valid.
+ InvalidRequestor = -67855, // The requestor was not valid.
+ RequestDescriptor = -67856, // The request descriptor was not valid.
+ InvalidBundleInfo = -67857, // The bundle information was not valid.
+ InvalidCRLIndex = -67858, // The CRL index was not valid.
+ NoFieldValues = -67859, // No field values were detected.
+ UnsupportedFieldFormat = -67860, // The field format is not supported.
+ UnsupportedIndexInfo = -67861, // The index information is not supported.
+ UnsupportedLocality = -67862, // The locality is not supported.
+ UnsupportedNumAttributes = -67863, // The number of attributes is not supported.
+ UnsupportedNumIndexes = -67864, // The number of indexes is not supported.
+ UnsupportedNumRecordTypes = -67865, // The number of record types is not supported.
+ FieldSpecifiedMultiple = -67866, // Too many fields were specified.
+ IncompatibleFieldFormat = -67867, // The field format was incompatible.
+ InvalidParsingModule = -67868, // The parsing module was not valid.
+ DatabaseLocked = -67869, // The database is locked.
+ DatastoreIsOpen = -67870, // The data store is open.
+ MissingValue = -67871, // A missing value was detected.
+ UnsupportedQueryLimits = -67872, // The query limits are not supported.
+ UnsupportedNumSelectionPreds = -67873, // The number of selection predicates is not supported.
+ UnsupportedOperator = -67874, // The operator is not supported.
+ InvalidDBLocation = -67875, // The database location is not valid.
+ InvalidAccessRequest = -67876, // The access request is not valid.
+ InvalidIndexInfo = -67877, // The index information is not valid.
+ InvalidNewOwner = -67878, // The new owner is not valid.
+ InvalidModifyMode = -67879, // The modify mode is not valid.
+ MissingRequiredExtension = -67880, // A required certificate extension is missing.
+ ExtendedKeyUsageNotCritical = -67881, // The extended key usage extension was not marked critical.
+ TimestampMissing = -67882, // A timestamp was expected but was not found.
+ TimestampInvalid = -67883, // The timestamp was not valid.
+ TimestampNotTrusted = -67884, // The timestamp was not trusted.
+ TimestampServiceNotAvailable = -67885, // The timestamp service is not available.
+ TimestampBadAlg = -67886, // An unrecognized or unsupported Algorithm Identifier in timestamp.
+ TimestampBadRequest = -67887, // The timestamp transaction is not permitted or supported.
+ TimestampBadDataFormat = -67888, // The timestamp data submitted has the wrong format.
+ TimestampTimeNotAvailable = -67889, // The time source for the Timestamp Authority is not available.
+ TimestampUnacceptedPolicy = -67890, // The requested policy is not supported by the Timestamp Authority.
+ TimestampUnacceptedExtension = -67891, // The requested extension is not supported by the Timestamp Authority.
+ TimestampAddInfoNotAvailable = -67892, // The additional information requested is not available.
+ TimestampSystemFailure = -67893, // The timestamp request cannot be handled due to system failure.
+ SigningTimeMissing = -67894, // A signing time was expected but was not found.
+ TimestampRejection = -67895, // A timestamp transaction was rejected.
+ TimestampWaiting = -67896, // A timestamp transaction is waiting.
+ TimestampRevocationWarning = -67897, // A timestamp authority revocation warning was issued.
+ TimestampRevocationNotification = -67898, // A timestamp authority revocation notification was issued.
+ CertificatePolicyNotAllowed = -67899, // The requested policy is not allowed for this certificate.
+ CertificateNameNotAllowed = -67900, // The requested name is not allowed for this certificate.
+ CertificateValidityPeriodTooLong = -67901, // The validity period in the certificate exceeds the maximum allowed.
+ CertificateIsCA = -67902, // The verified certificate is a CA rather than an end-entity.
+ CertificateDuplicateExtension = -67903, // The certificate contains multiple extensions with the same extension ID.
+}
diff --git a/core/sys/darwin/Security/SecRandom.odin b/core/sys/darwin/Security/SecRandom.odin
new file mode 100644
index 000000000..0527baca1
--- /dev/null
+++ b/core/sys/darwin/Security/SecRandom.odin
@@ -0,0 +1,19 @@
+package Security
+
+import CF "core:sys/darwin/CoreFoundation"
+
+foreign import Security "system:Security.framework"
+
+// A reference to a random number generator.
+RandomRef :: distinct rawptr
+
+@(link_prefix="Sec", default_calling_convention="c")
+foreign Security {
+ // Default random ref for /dev/random. Synonym for nil.
+ @(link_name="kSecRandomDefault") kSecRandomDefault: RandomRef
+
+ // Generates an array of cryptographically secure random bytes.
+ RandomCopyBytes :: proc(rnd: RandomRef = kSecRandomDefault, count: uint, bytes: [^]byte) -> errSec ---
+
+ CopyErrorMessageString :: proc(status: errSec, reserved: rawptr = nil) -> CF.String ---
+} \ No newline at end of file
diff --git a/core/sys/darwin/core_foundation.odin b/core/sys/darwin/core_foundation.odin
deleted file mode 100644
index 325122216..000000000
--- a/core/sys/darwin/core_foundation.odin
+++ /dev/null
@@ -1,98 +0,0 @@
-//+build darwin
-package darwin
-
-import "base:runtime"
-
-foreign import core_foundation "system:CoreFoundation.framework"
-
-CFTypeRef :: distinct rawptr
-
-CFStringRef :: distinct CFTypeRef
-
-CFIndex :: int
-
-CFRange :: struct {
- location: CFIndex,
- length: CFIndex,
-}
-
-CFStringEncoding :: enum u32 {
- ASCII = 1,
- NEXTSTEP = 2,
- JapaneseEUC = 3,
- UTF8 = 4,
- ISOLatin1 = 5,
- Symbol = 6,
- NonLossyASCII = 7,
- ShiftJIS = 8,
- ISOLatin2 = 9,
- Unicode = 10,
- WindowsCP1251 = 11,
- WindowsCP1252 = 12,
- WindowsCP1253 = 13,
- WindowsCP1254 = 14,
- WindowsCP1250 = 15,
- ISO2022JP = 21,
- MacOSRoman = 30,
-
- UTF16 = Unicode,
-
- UTF16BigEndian = 0x90000100,
- UTF16LittleEndian = 0x94000100,
-
- UTF32 = 0x8c000100,
- UTF32BigEndian = 0x98000100,
- UTF32LittleEndian = 0x9c000100,
-}
-
-foreign core_foundation {
- // Copies the character contents of a string to a local C string buffer after converting the characters to a given encoding.
- CFStringGetCString :: proc(theString: CFStringRef, buffer: [^]byte, bufferSize: CFIndex, encoding: CFStringEncoding) -> Bool ---
-
- // Returns the number (in terms of UTF-16 code pairs) of Unicode characters in a string.
- CFStringGetLength :: proc(theString: CFStringRef) -> CFIndex ---
-
- // Returns the maximum number of bytes a string of a specified length (in Unicode characters) will take up if encoded in a specified encoding.
- CFStringGetMaximumSizeForEncoding :: proc(length: CFIndex, encoding: CFStringEncoding) -> CFIndex ---
-
- // Fetches a range of the characters from a string into a byte buffer after converting the characters to a specified encoding.
- CFStringGetBytes :: proc(
- thestring: CFStringRef,
- range: CFRange,
- encoding: CFStringEncoding,
- lossByte: u8,
- isExternalRepresentation: Bool,
- buffer: [^]byte,
- maxBufLen: CFIndex,
- usedBufLen: ^CFIndex,
- ) -> CFIndex ---
-
- // Releases a Core Foundation object.
- @(link_name="CFRelease")
- _CFRelease :: proc(cf: CFTypeRef) ---
-}
-
-// Releases a Core Foundation object.
-CFRelease :: proc {
- CFReleaseString,
-}
-
-// Releases a Core Foundation string.
-CFReleaseString :: #force_inline proc(theString: CFStringRef) {
- _CFRelease(CFTypeRef(theString))
-}
-
-CFStringCopyToOdinString :: proc(theString: CFStringRef, allocator := context.allocator) -> (str: string, ok: bool) #optional_ok {
- length := CFStringGetLength(theString)
- max := CFStringGetMaximumSizeForEncoding(length, .UTF8)
-
- buf, err := make([]byte, max, allocator)
- if err != nil { return }
-
- raw_str := runtime.Raw_String{
- data = raw_data(buf),
- }
- CFStringGetBytes(theString, {0, length}, .UTF8, 0, false, raw_data(buf), max, &raw_str.len)
-
- return transmute(string)raw_str, true
-}
diff --git a/core/sys/darwin/security.odin b/core/sys/darwin/security.odin
deleted file mode 100644
index 0c58260e7..000000000
--- a/core/sys/darwin/security.odin
+++ /dev/null
@@ -1,26 +0,0 @@
-//+build darwin
-package darwin
-
-foreign import security "system:Security.framework"
-
-// A reference to a random number generator.
-SecRandomRef :: distinct rawptr
-
-OSStatus :: distinct i32
-
-errSec :: enum OSStatus {
- Success = 0, // No error.
- Unimplemented = -4, // Function or operation not implemented.
-
- // Many more...
-}
-
-foreign security {
- // Synonym for nil, uses a cryptographically secure random number generator.
- kSecRandomDefault: SecRandomRef
-
- // Generates an array of cryptographically secure random bytes.
- SecRandomCopyBytes :: proc(rnd: SecRandomRef = kSecRandomDefault, count: uint, bytes: [^]byte) -> errSec ---
-
- SecCopyErrorMessageString :: proc(status: errSec, reserved: rawptr = nil) -> CFStringRef ---
-}
diff --git a/core/sys/info/doc.odin b/core/sys/info/doc.odin
index 81c3fb342..15af0d4b3 100644
--- a/core/sys/info/doc.odin
+++ b/core/sys/info/doc.odin
@@ -1,78 +1,78 @@
/*
- Copyright 2022 Jeroen van Rijn <nom@duclavier.com>.
- Made available under Odin's BSD-3 license.
+Copyright 2022 Jeroen van Rijn <nom@duclavier.com>.
+Made available under Odin's BSD-3 license.
- Package `core:sys/info` gathers system information on:
- Windows, Linux, macOS, FreeBSD & OpenBSD.
+Package `core:sys/info` gathers system information on:
+Windows, Linux, macOS, FreeBSD & OpenBSD.
- Simply import the package and you'll have access to the OS version, RAM amount
- and CPU information.
+Simply import the package and you'll have access to the OS version, RAM amount
+and CPU information.
- On Windows, GPUs will also be enumerated using the registry.
+On Windows, GPUs will also be enumerated using the registry.
- CPU feature flags can be tested against `cpu_features`, where applicable, e.g.
- `if .aes in si.aes { ... }`
-*/
-//+build ignore
-package sysinfo
+CPU feature flags can be tested against `cpu_features`, where applicable, e.g.
+`if .aes in si.aes { ... }`
+
+Example:
+
+ import "core:fmt"
+ import si "core:sys/info"
-import "core:fmt"
-import si "core:sys/info"
+ main :: proc() {
+ fmt.printf("Odin: %v\n", ODIN_VERSION)
+ fmt.printf("OS: %v\n", si.os_version.as_string)
+ fmt.printf("OS: %#v\n", si.os_version)
+ fmt.printf("CPU: %v\n", si.cpu_name)
+ fmt.printf("RAM: %v MiB\n", si.ram.total_ram / 1024 / 1024)
-main :: proc() {
- fmt.printf("Odin: %v\n", ODIN_VERSION)
- fmt.printf("OS: %v\n", si.os_version.as_string)
- fmt.printf("OS: %#v\n", si.os_version)
- fmt.printf("CPU: %v\n", si.cpu_name)
- fmt.printf("RAM: %v MiB\n", si.ram.total_ram / 1024 / 1024)
+ fmt.println()
+ for gpu, i in si.gpus {
+ fmt.printf("GPU #%v:\n", i)
+ fmt.printf("\tVendor: %v\n", gpu.vendor_name)
+ fmt.printf("\tModel: %v\n", gpu.model_name)
+ fmt.printf("\tVRAM: %v MiB\n", gpu.total_ram / 1024 / 1024)
+ }
+ }
- fmt.println()
- for gpu, i in si.gpus {
- fmt.printf("GPU #%v:\n", i)
- fmt.printf("\tVendor: %v\n", gpu.vendor_name)
- fmt.printf("\tModel: %v\n", gpu.model_name)
- fmt.printf("\tVRAM: %v MiB\n", gpu.total_ram / 1024 / 1024)
+- Example Windows output:
+
+ Odin: dev-2022-09
+ OS: Windows 10 Professional (version: 20H2), build: 19042.1466
+ OS: OS_Version{
+ platform = "Windows",
+ major = 10,
+ minor = 0,
+ patch = 0,
+ build = [
+ 19042,
+ 1466,
+ ],
+ version = "20H2",
+ as_string = "Windows 10 Professional (version: 20H2), build: 19042.1466",
}
-}
+ CPU: AMD Ryzen 7 1800X Eight-Core Processor
+ RAM: 65469 MiB
+ GPU #0:
+ Vendor: Advanced Micro Devices, Inc.
+ Model: Radeon RX Vega
+ VRAM: 8176 MiB
-/*
- Example Windows output:
- Odin: dev-2022-09
- OS: Windows 10 Professional (version: 20H2), build: 19042.1466
- OS: OS_Version{
- platform = "Windows",
- major = 10,
- minor = 0,
+- Example macOS output:
+
+ ODIN: dev-2022-09
+ OS: OS_Version{
+ platform = "MacOS",
+ major = 21,
+ minor = 5,
patch = 0,
build = [
- 19042,
- 1466,
+ 0,
+ 0,
],
- version = "20H2",
- as_string = "Windows 10 Professional (version: 20H2), build: 19042.1466",
- }
- CPU: AMD Ryzen 7 1800X Eight-Core Processor
- RAM: 65469 MiB
-
- GPU #0:
- Vendor: Advanced Micro Devices, Inc.
- Model: Radeon RX Vega
- VRAM: 8176 MiB
-
- Example macOS output:
- ODIN: dev-2022-09
- OS: OS_Version{
- platform = "MacOS",
- major = 21,
- minor = 5,
- patch = 0,
- build = [
- 0,
- 0,
- ],
- version = "21F79",
- as_string = "macOS Monterey 12.4 (build 21F79, kernel 21.5.0)",
- }
- CPU: Intel(R) Core(TM) i5-7360U CPU @ 2.30GHz
- RAM: 8192 MiB
+ version = "21F79",
+ as_string = "macOS Monterey 12.4 (build 21F79, kernel 21.5.0)",
+ }
+ CPU: Intel(R) Core(TM) i5-7360U CPU @ 2.30GHz
+ RAM: 8192 MiB
*/
+package sysinfo
diff --git a/core/sys/linux/sys.odin b/core/sys/linux/sys.odin
index 869ce88e3..63fb3b776 100644
--- a/core/sys/linux/sys.odin
+++ b/core/sys/linux/sys.odin
@@ -40,10 +40,10 @@ write :: proc "contextless" (fd: Fd, buf: []u8) -> (int, Errno) {
*/
open :: proc "contextless" (name: cstring, flags: Open_Flags, mode: Mode = {}) -> (Fd, Errno) {
when ODIN_ARCH == .arm64 {
- ret := syscall(SYS_openat, AT_FDCWD, transmute(uintptr) name, transmute(u32) mode)
+ ret := syscall(SYS_openat, AT_FDCWD, transmute(uintptr) name, transmute(u32) flags, transmute(u32) mode)
return errno_unwrap(ret, Fd)
} else {
- ret := syscall(SYS_open, transmute(uintptr) name, transmute(u32) mode)
+ ret := syscall(SYS_open, transmute(uintptr) name, transmute(u32) flags, transmute(u32) mode)
return errno_unwrap(ret, Fd)
}
}
@@ -91,10 +91,10 @@ stat :: proc "contextless" (filename: cstring, stat: ^Stat) -> (Errno) {
*/
fstat :: proc "contextless" (fd: Fd, stat: ^Stat) -> (Errno) {
when size_of(int) == 8 {
- ret := syscall(SYS_fstat, stat)
+ ret := syscall(SYS_fstat, cast(i32) fd, stat)
return Errno(-ret)
} else {
- ret := syscall(SYS_fstat64, stat)
+ ret := syscall(SYS_fstat64, cast(i32) fd, stat)
return Errno(-ret)
}
}
@@ -787,8 +787,8 @@ exit :: proc "contextless" (code: i32) -> ! {
Wait for the process to change state.
Available since Linux 1.0.
*/
-wait4 :: proc "contextless" (pid: Pid, status: ^u32, options: Wait_Options) -> (Pid, Errno) {
- ret := syscall(SYS_wait4, pid, status, transmute(u32) options)
+wait4 :: proc "contextless" (pid: Pid, status: ^u32, options: Wait_Options, rusage: ^RUsage) -> (Pid, Errno) {
+ ret := syscall(SYS_wait4, pid, status, transmute(u32) options, rusage)
return errno_unwrap(ret, Pid)
}
diff --git a/core/text/edit/text_edit.odin b/core/text/edit/text_edit.odin
index caccb6be8..6f21c9860 100644
--- a/core/text/edit/text_edit.odin
+++ b/core/text/edit/text_edit.odin
@@ -137,6 +137,9 @@ clear_all :: proc(s: ^State) -> (cleared: bool) {
// push current text state to the wanted undo|redo stack
undo_state_push :: proc(s: ^State, undo: ^[dynamic]^Undo_State) -> mem.Allocator_Error {
+ if s.builder != nil {
+ return nil
+ }
text := string(s.builder.buf[:])
item := (^Undo_State)(mem.alloc(size_of(Undo_State) + len(text), align_of(Undo_State), s.undo_text_allocator) or_return)
item.selection = s.selection
@@ -154,7 +157,7 @@ undo :: proc(s: ^State, undo, redo: ^[dynamic]^Undo_State) {
undo_state_push(s, redo)
item := pop(undo)
s.selection = item.selection
- #no_bounds_check {
+ #no_bounds_check if s.builder != nil {
strings.builder_reset(s.builder)
strings.write_string(s.builder, string(item.text[:item.len]))
}
@@ -224,13 +227,17 @@ input_rune :: proc(s: ^State, r: rune) {
// insert a single rune into the edit state - deletes the current selection
insert :: proc(s: ^State, at: int, text: string) {
undo_check(s)
- inject_at(&s.builder.buf, at, text)
+ if s.builder != nil {
+ inject_at(&s.builder.buf, at, text)
+ }
}
// remove the wanted range withing, usually the selection within byte indices
remove :: proc(s: ^State, lo, hi: int) {
undo_check(s)
- remove_range(&s.builder.buf, lo, hi)
+ if s.builder != nil {
+ remove_range(&s.builder.buf, lo, hi)
+ }
}
// true if selection head and tail dont match and form a selection of multiple characters
@@ -244,8 +251,8 @@ has_selection :: proc(s: ^State) -> bool {
sorted_selection :: proc(s: ^State) -> (lo, hi: int) {
lo = min(s.selection[0], s.selection[1])
hi = max(s.selection[0], s.selection[1])
- lo = clamp(lo, 0, len(s.builder.buf))
- hi = clamp(hi, 0, len(s.builder.buf))
+ lo = clamp(lo, 0, len(s.builder.buf) if s.builder != nil else 0)
+ hi = clamp(hi, 0, len(s.builder.buf) if s.builder != nil else 0)
return
}
@@ -265,7 +272,10 @@ translate_position :: proc(s: ^State, t: Translation) -> int {
return b == ' ' || b == '\t' || b == '\n'
}
- buf := s.builder.buf[:]
+ buf: []byte
+ if s.builder != nil {
+ buf = s.builder.buf[:]
+ }
pos := clamp(s.selection[0], 0, len(buf))
switch t {
@@ -352,7 +362,10 @@ delete_to :: proc(s: ^State, t: Translation) {
// return the currently selected text
current_selected_text :: proc(s: ^State) -> string {
lo, hi := sorted_selection(s)
- return string(s.builder.buf[lo:hi])
+ if s.builder != nil {
+ return string(s.builder.buf[lo:hi])
+ }
+ return ""
}
// copy & delete the current selection when copy() succeeds
@@ -431,7 +444,7 @@ perform_command :: proc(s: ^State, cmd: Command) {
case .Cut: cut(s)
case .Copy: copy(s)
case .Paste: paste(s)
- case .Select_All: s.selection = {len(s.builder.buf), 0}
+ case .Select_All: s.selection = {len(s.builder.buf) if s.builder != nil else 0, 0}
case .Backspace: delete_to(s, .Left)
case .Delete: delete_to(s, .Right)
case .Delete_Word_Left: delete_to(s, .Word_Left)
diff --git a/core/text/i18n/doc.odin b/core/text/i18n/doc.odin
index ef619451e..54bf8b80f 100644
--- a/core/text/i18n/doc.odin
+++ b/core/text/i18n/doc.odin
@@ -1,111 +1,106 @@
-//+build ignore
-package i18n
/*
- The i18n package is flexible and easy to use.
+The `i18n` package is flexible and easy to use.
- It has one call to get a translation: `get`, which the user can alias into something like `T`.
+It has one call to get a translation: `get`, which the user can alias into something like `T`.
- `get`, referred to as `T` here, has a few different signatures.
- All of them will return the key if the entry can't be found in the active translation catalog.
+`get`, referred to as `T` here, has a few different signatures.
+All of them will return the key if the entry can't be found in the active translation catalog.
- - `T(key)` returns the translation of `key`.
- - `T(key, n)` returns a pluralized translation of `key` according to value `n`.
+- `T(key)` returns the translation of `key`.
+- `T(key, n)` returns a pluralized translation of `key` according to value `n`.
- - `T(section, key)` returns the translation of `key` in `section`.
- - `T(section, key, n)` returns a pluralized translation of `key` in `section` according to value `n`.
+- `T(section, key)` returns the translation of `key` in `section`.
+- `T(section, key, n)` returns a pluralized translation of `key` in `section` according to value `n`.
- By default lookup take place in the global `i18n.ACTIVE` catalog for ease of use.
- If you want to override which translation to use, for example in a language preview dialog, you can use the following:
+By default lookup take place in the global `i18n.ACTIVE` catalog for ease of use.
+If you want to override which translation to use, for example in a language preview dialog, you can use the following:
- - `T(key, n, catalog)` returns the pluralized version of `key` from explictly supplied catalog.
- - `T(section, key, n, catalog)` returns the pluralized version of `key` in `section` from explictly supplied catalog.
+- `T(key, n, catalog)` returns the pluralized version of `key` from explictly supplied catalog.
+- `T(section, key, n, catalog)` returns the pluralized version of `key` in `section` from explictly supplied catalog.
- If a catalog has translation contexts or sections, then ommitting it in the above calls looks up in section "".
+If a catalog has translation contexts or sections, then omitting it in the above calls looks up in section "".
- The default pluralization rule is n != 1, which is to say that passing n == 1 (or not passing n) returns the singular form.
- Passing n != 1 returns plural form 1.
+The default pluralization rule is n != 1, which is to say that passing n == 1 (or not passing n) returns the singular form.
+Passing n != 1 returns plural form 1.
- Should a language not conform to this rule, you can pass a pluralizer procedure to the catalog parser.
- This is a procedure that maps an integer to an integer, taking a value and returning which plural slot should be used.
+Should a language not conform to this rule, you can pass a pluralizer procedure to the catalog parser.
+This is a procedure that maps an integer to an integer, taking a value and returning which plural slot should be used.
- You can also assign it to a loaded catalog after parsing, of course.
+You can also assign it to a loaded catalog after parsing, of course.
- Some code examples follow.
-*/
+Example:
-/*
-```cpp
-import "core:fmt"
-import "core:text/i18n"
-
-T :: i18n.get
-
-mo :: proc() {
- using fmt
-
- err: i18n.Error
-
- /*
- Parse MO file and set it as the active translation so we can omit `get`'s "catalog" parameter.
- */
- i18n.ACTIVE, err = i18n.parse_mo(#load("translations/nl_NL.mo"))
- defer i18n.destroy()
-
- if err != .None { return }
-
- /*
- These are in the .MO catalog.
- */
- println("-----")
- println(T(""))
- println("-----")
- println(T("There are 69,105 leaves here."))
- println("-----")
- println(T("Hellope, World!"))
- println("-----")
- // We pass 1 into `T` to get the singular format string, then 1 again into printf.
- printf(T("There is %d leaf.\n", 1), 1)
- // We pass 42 into `T` to get the plural format string, then 42 again into printf.
- printf(T("There is %d leaf.\n", 42), 42)
-
- /*
- This isn't in the translation catalog, so the key is passed back untranslated.
- */
- println("-----")
- println(T("Come visit us on Discord!"))
-}
-
-qt :: proc() {
- using fmt
-
- err: i18n.Error
-
- /*
- Parse QT file and set it as the active translation so we can omit `get`'s "catalog" parameter.
- */
- i18n.ACTIVE, err = i18n.parse_qt(#load("translations/nl_NL-qt-ts.ts"))
- defer i18n.destroy()
-
- if err != .None {
- return
+ import "core:fmt"
+ import "core:text/i18n"
+
+ T :: i18n.get
+
+ mo :: proc() {
+ using fmt
+
+ err: i18n.Error
+
+ /*
+ Parse MO file and set it as the active translation so we can omit `get`'s "catalog" parameter.
+ */
+ i18n.ACTIVE, err = i18n.parse_mo(#load("translations/nl_NL.mo"))
+ defer i18n.destroy()
+
+ if err != .None { return }
+
+ /*
+ These are in the .MO catalog.
+ */
+ println("-----")
+ println(T(""))
+ println("-----")
+ println(T("There are 69,105 leaves here."))
+ println("-----")
+ println(T("Hellope, World!"))
+ println("-----")
+ // We pass 1 into `T` to get the singular format string, then 1 again into printf.
+ printf(T("There is %d leaf.\n", 1), 1)
+ // We pass 42 into `T` to get the plural format string, then 42 again into printf.
+ printf(T("There is %d leaf.\n", 42), 42)
+
+ /*
+ This isn't in the translation catalog, so the key is passed back untranslated.
+ */
+ println("-----")
+ println(T("Come visit us on Discord!"))
}
- /*
- These are in the .TS catalog. As you can see they have sections.
- */
- println("--- Page section ---")
- println("Page:Text for translation =", T("Page", "Text for translation"))
- println("-----")
- println("Page:Also text to translate =", T("Page", "Also text to translate"))
- println("-----")
- println("--- installscript section ---")
- println("installscript:99 bottles of beer on the wall =", T("installscript", "99 bottles of beer on the wall"))
- println("-----")
- println("--- apple_count section ---")
- println("apple_count:%d apple(s) =")
- println("\t 1 =", T("apple_count", "%d apple(s)", 1))
- println("\t 42 =", T("apple_count", "%d apple(s)", 42))
-}
-```
-*/ \ No newline at end of file
+ qt :: proc() {
+ using fmt
+
+ err: i18n.Error
+
+ /*
+ Parse QT file and set it as the active translation so we can omit `get`'s "catalog" parameter.
+ */
+ i18n.ACTIVE, err = i18n.parse_qt(#load("translations/nl_NL-qt-ts.ts"))
+ defer i18n.destroy()
+
+ if err != .None {
+ return
+ }
+
+ /*
+ These are in the .TS catalog. As you can see they have sections.
+ */
+ println("--- Page section ---")
+ println("Page:Text for translation =", T("Page", "Text for translation"))
+ println("-----")
+ println("Page:Also text to translate =", T("Page", "Also text to translate"))
+ println("-----")
+ println("--- installscript section ---")
+ println("installscript:99 bottles of beer on the wall =", T("installscript", "99 bottles of beer on the wall"))
+ println("-----")
+ println("--- apple_count section ---")
+ println("apple_count:%d apple(s) =")
+ println("\t 1 =", T("apple_count", "%d apple(s)", 1))
+ println("\t 42 =", T("apple_count", "%d apple(s)", 42))
+ }
+*/
+package i18n
diff --git a/core/text/i18n/i18n.odin b/core/text/i18n/i18n.odin
index 151f9e129..64593c4e8 100644
--- a/core/text/i18n/i18n.odin
+++ b/core/text/i18n/i18n.odin
@@ -90,7 +90,7 @@ DEFAULT_PARSE_OPTIONS :: Parse_Options{
- get(key, number), which returns the appropriate plural from the active catalog, or
- get(key, number, catalog) to grab text from a specific one.
*/
-get_single_section :: proc(key: string, number := 0, catalog: ^Translation = ACTIVE) -> (value: string) {
+get_single_section :: proc(key: string, number := 1, catalog: ^Translation = ACTIVE) -> (value: string) {
/*
A lot of languages use singular for 1 item and plural for 0 or more than 1 items. This is our default pluralize rule.
*/
@@ -108,7 +108,7 @@ get_single_section :: proc(key: string, number := 0, catalog: ^Translation = ACT
- get(section, key, number), which returns the appropriate plural from the active catalog, or
- get(section, key, number, catalog) to grab text from a specific one.
*/
-get_by_section :: proc(section, key: string, number := 0, catalog: ^Translation = ACTIVE) -> (value: string) {
+get_by_section :: proc(section, key: string, number := 1, catalog: ^Translation = ACTIVE) -> (value: string) {
/*
A lot of languages use singular for 1 item and plural for 0 or more than 1 items. This is our default pluralize rule.
*/
diff --git a/core/text/table/doc.odin b/core/text/table/doc.odin
index 9b5c1f932..76886bdea 100644
--- a/core/text/table/doc.odin
+++ b/core/text/table/doc.odin
@@ -1,11 +1,8 @@
/*
- package table implements ascii/markdown/html/custom rendering of tables.
+The package `table` implements ASCII/markdown/HTML/custom rendering of tables.
- ---
+**Custom rendering example:**
- Custom rendering example:
-
- ```odin
tbl := init(&Table{})
padding(tbl, 0, 1)
row(tbl, "A_LONG_ENUM", "= 54,", "// A comment about A_LONG_ENUM")
@@ -17,19 +14,14 @@
}
io.write_byte(stdio_writer(), '\n')
}
- ```
- This outputs:
- ```
+This outputs:
+
A_LONG_ENUM = 54, // A comment about A_LONG_ENUM
AN_EVEN_LONGER_ENUM = 1, // A comment about AN_EVEN_LONGER_ENUM
- ```
-
- ---
- ASCII rendering example:
+**ASCII rendering example:**
- ```odin
tbl := init(&Table{})
defer destroy(tbl)
@@ -69,10 +61,9 @@
write_ascii_table(stdio_writer(), tbl)
write_markdown_table(stdio_writer(), tbl)
- ```
- This outputs:
- ```
+This outputs:
+
+-----------------------------------------------+
| This is a table caption and it is very long |
+------------------+-----------------+----------+
@@ -82,19 +73,15 @@
| 000000005 | 6.283185 | |
| a | bbb | c |
+------------------+-----------------+----------+
- ```
- and
+and
- ```
| AAAAAAAAA | B | C |
|:-----------------|:---------------:|---------:|
| 123 | foo | |
| 000000005 | 6.283185 | |
| a | bbb | c |
- ```
- respectively.
+respectively.
*/
-
package text_table