aboutsummaryrefslogtreecommitdiff
path: root/core/encoding
diff options
context:
space:
mode:
authorColin Davidson <colrdavidson@gmail.com>2024-09-24 02:32:06 -0700
committerColin Davidson <colrdavidson@gmail.com>2024-09-24 02:32:06 -0700
commitf3ab14b8ccb45d0fef8a96937635bdf0943ce7d6 (patch)
tree1309d7c797117463996a84522ef3d1c9713a286c /core/encoding
parent99938c7d4fb26d43a07dd4b8f4f00ab87e67e73f (diff)
parentf7d74ff3a8596efef67d151ffb758ed085e94be0 (diff)
Merge branch 'master' into macharena
Diffstat (limited to 'core/encoding')
-rw-r--r--core/encoding/ansi/doc.odin6
-rw-r--r--core/encoding/cbor/unmarshal.odin38
-rw-r--r--core/encoding/csv/doc.odin96
-rw-r--r--core/encoding/csv/example.odin88
-rw-r--r--core/encoding/csv/reader.odin4
-rw-r--r--core/encoding/endian/doc.odin25
-rw-r--r--core/encoding/entity/entity.odin26
-rw-r--r--core/encoding/entity/generated.odin2
-rw-r--r--core/encoding/hxa/doc.odin172
-rw-r--r--core/encoding/json/unmarshal.odin25
-rw-r--r--core/encoding/uuid/doc.odin9
-rw-r--r--core/encoding/uuid/writing.odin35
-rw-r--r--core/encoding/varint/doc.odin13
-rw-r--r--core/encoding/varint/leb128.odin4
-rw-r--r--core/encoding/xml/doc.odin23
-rw-r--r--core/encoding/xml/xml_reader.odin28
16 files changed, 313 insertions, 281 deletions
diff --git a/core/encoding/ansi/doc.odin b/core/encoding/ansi/doc.odin
index a0945c581..966e6be00 100644
--- a/core/encoding/ansi/doc.odin
+++ b/core/encoding/ansi/doc.odin
@@ -13,8 +13,8 @@ If your terminal supports 24-bit true color mode, you can also do this:
fmt.println(ansi.CSI + ansi.FG_COLOR_24_BIT + ";0;255;255" + ansi.SGR + "Hellope!" + ansi.CSI + ansi.RESET + ansi.SGR)
For more information, see:
- 1. https://en.wikipedia.org/wiki/ANSI_escape_code
- 2. https://www.vt100.net/docs/vt102-ug/chapter5.html
- 3. https://invisible-island.net/xterm/ctlseqs/ctlseqs.html
+- [[ https://en.wikipedia.org/wiki/ANSI_escape_code ]]
+- [[ https://www.vt100.net/docs/vt102-ug/chapter5.html ]]
+- [[ https://invisible-island.net/xterm/ctlseqs/ctlseqs.html ]]
*/
package ansi
diff --git a/core/encoding/cbor/unmarshal.odin b/core/encoding/cbor/unmarshal.odin
index c54660839..bf27171f4 100644
--- a/core/encoding/cbor/unmarshal.odin
+++ b/core/encoding/cbor/unmarshal.odin
@@ -675,10 +675,6 @@ _unmarshal_map :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header,
return
case reflect.Type_Info_Map:
- if !reflect.is_string(t.key) {
- return _unsupported(v, hdr)
- }
-
raw_map := (^mem.Raw_Map)(v.data)
if raw_map.allocator.procedure == nil {
raw_map.allocator = context.allocator
@@ -695,43 +691,31 @@ _unmarshal_map :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header,
new_len := uintptr(min(scap, runtime.map_len(raw_map^)+length))
runtime.map_reserve_dynamic(raw_map, t.map_info, new_len) or_return
}
-
- // Temporary memory to unmarshal keys into before inserting them into the map.
+
+ // Temporary memory to unmarshal values into before inserting them into the map.
elem_backing := mem.alloc_bytes_non_zeroed(t.value.size, t.value.align, context.temp_allocator) or_return
defer delete(elem_backing, context.temp_allocator)
-
map_backing_value := any{raw_data(elem_backing), t.value.id}
- for idx := 0; unknown || idx < length; idx += 1 {
- // Decode key, keys can only be strings.
- key: string
- if keyv, kerr := decode_key(d, v); unknown && kerr == .Break {
- break
- } else if kerr != nil {
- err = kerr
- return
- } else {
- key = keyv
- }
+ // Temporary memory to unmarshal keys into.
+ key_backing := mem.alloc_bytes_non_zeroed(t.key.size, t.key.align, context.temp_allocator) or_return
+ defer delete(key_backing, context.temp_allocator)
+ key_backing_value := any{raw_data(key_backing), t.key.id}
+ for idx := 0; unknown || idx < length; idx += 1 {
if unknown || idx > scap {
// Reserve space for new element so we can return allocator errors.
new_len := uintptr(runtime.map_len(raw_map^)+1)
runtime.map_reserve_dynamic(raw_map, t.map_info, new_len) or_return
}
+ mem.zero_slice(key_backing)
+ _unmarshal_value(d, key_backing_value, _decode_header(r) or_return) or_return
+
mem.zero_slice(elem_backing)
_unmarshal_value(d, map_backing_value, _decode_header(r) or_return) or_return
- key_ptr := rawptr(&key)
- key_cstr: cstring
- if reflect.is_cstring(t.key) {
- assert_safe_for_cstring(key)
- key_cstr = cstring(raw_data(key))
- key_ptr = &key_cstr
- }
-
- set_ptr := runtime.__dynamic_map_set_without_hash(raw_map, t.map_info, key_ptr, map_backing_value.data)
+ set_ptr := runtime.__dynamic_map_set_without_hash(raw_map, t.map_info, key_backing_value.data, map_backing_value.data)
// We already reserved space for it, so this shouldn't fail.
assert(set_ptr != nil)
}
diff --git a/core/encoding/csv/doc.odin b/core/encoding/csv/doc.odin
new file mode 100644
index 000000000..bfeadafd6
--- /dev/null
+++ b/core/encoding/csv/doc.odin
@@ -0,0 +1,96 @@
+/*
+package csv reads and writes comma-separated values (CSV) files.
+This package supports the format described in [[ RFC 4180; https://tools.ietf.org/html/rfc4180.html ]]
+
+Example:
+ package main
+
+ import "core:fmt"
+ import "core:encoding/csv"
+ import "core:os"
+
+ // Requires keeping the entire CSV file in memory at once
+ iterate_csv_from_string :: proc(filename: string) {
+ r: csv.Reader
+ r.trim_leading_space = true
+ r.reuse_record = true // Without it you have to delete(record)
+ r.reuse_record_buffer = true // Without it you have to each of the fields within it
+ defer csv.reader_destroy(&r)
+
+ csv_data, ok := os.read_entire_file(filename)
+ if ok {
+ csv.reader_init_with_string(&r, string(csv_data))
+ } else {
+ fmt.printfln("Unable to open file: %v", filename)
+ return
+ }
+ defer delete(csv_data)
+
+ for r, i, err in csv.iterator_next(&r) {
+ if err != nil { /* Do something with error */ }
+ for f, j in r {
+ fmt.printfln("Record %v, field %v: %q", i, j, f)
+ }
+ }
+ }
+
+ // Reads the CSV as it's processed (with a small buffer)
+ iterate_csv_from_stream :: proc(filename: string) {
+ fmt.printfln("Hellope from %v", filename)
+ r: csv.Reader
+ r.trim_leading_space = true
+ r.reuse_record = true // Without it you have to delete(record)
+ r.reuse_record_buffer = true // Without it you have to each of the fields within it
+ defer csv.reader_destroy(&r)
+
+ handle, err := os.open(filename)
+ if err != nil {
+ fmt.eprintfln("Error opening file: %v", filename)
+ return
+ }
+ defer os.close(handle)
+ csv.reader_init(&r, os.stream_from_handle(handle))
+
+ for r, i in csv.iterator_next(&r) {
+ for f, j in r {
+ fmt.printfln("Record %v, field %v: %q", i, j, f)
+ }
+ }
+ fmt.printfln("Error: %v", csv.iterator_last_error(r))
+ }
+
+ // Read all records at once
+ read_csv_from_string :: proc(filename: string) {
+ r: csv.Reader
+ r.trim_leading_space = true
+ r.reuse_record = true // Without it you have to delete(record)
+ r.reuse_record_buffer = true // Without it you have to each of the fields within it
+ defer csv.reader_destroy(&r)
+
+ csv_data, ok := os.read_entire_file(filename)
+ if ok {
+ csv.reader_init_with_string(&r, string(csv_data))
+ } else {
+ fmt.printfln("Unable to open file: %v", filename)
+ return
+ }
+ defer delete(csv_data)
+
+ records, err := csv.read_all(&r)
+ if err != nil { /* Do something with CSV parse error */ }
+
+ defer {
+ for rec in records {
+ delete(rec)
+ }
+ delete(records)
+ }
+
+ for r, i in records {
+ for f, j in r {
+ fmt.printfln("Record %v, field %v: %q", i, j, f)
+ }
+ }
+ }
+*/
+package encoding_csv
diff --git a/core/encoding/csv/example.odin b/core/encoding/csv/example.odin
deleted file mode 100644
index d791eb33b..000000000
--- a/core/encoding/csv/example.odin
+++ /dev/null
@@ -1,88 +0,0 @@
-//+build ignore
-package encoding_csv
-
-import "core:fmt"
-import "core:encoding/csv"
-import "core:os"
-
-// Requires keeping the entire CSV file in memory at once
-iterate_csv_from_string :: proc(filename: string) {
- r: csv.Reader
- r.trim_leading_space = true
- r.reuse_record = true // Without it you have to delete(record)
- r.reuse_record_buffer = true // Without it you have to each of the fields within it
- defer csv.reader_destroy(&r)
-
- if csv_data, ok := os.read_entire_file(filename); ok {
- csv.reader_init_with_string(&r, string(csv_data))
- defer delete(csv_data)
- } else {
- fmt.printfln("Unable to open file: %v", filename)
- return
- }
-
- for r, i, err in csv.iterator_next(&r) {
- if err != nil { /* Do something with error */ }
- for f, j in r {
- fmt.printfln("Record %v, field %v: %q", i, j, f)
- }
- }
-}
-
-// Reads the CSV as it's processed (with a small buffer)
-iterate_csv_from_stream :: proc(filename: string) {
- fmt.printfln("Hellope from %v", filename)
- r: csv.Reader
- r.trim_leading_space = true
- r.reuse_record = true // Without it you have to delete(record)
- r.reuse_record_buffer = true // Without it you have to each of the fields within it
- defer csv.reader_destroy(&r)
-
- handle, err := os.open(filename)
- if err != nil {
- fmt.eprintfln("Error opening file: %v", filename)
- return
- }
- defer os.close(handle)
- csv.reader_init(&r, os.stream_from_handle(handle))
-
- for r, i in csv.iterator_next(&r) {
- for f, j in r {
- fmt.printfln("Record %v, field %v: %q", i, j, f)
- }
- }
- fmt.printfln("Error: %v", csv.iterator_last_error(r))
-}
-
-// Read all records at once
-read_csv_from_string :: proc(filename: string) {
- r: csv.Reader
- r.trim_leading_space = true
- r.reuse_record = true // Without it you have to delete(record)
- r.reuse_record_buffer = true // Without it you have to each of the fields within it
- defer csv.reader_destroy(&r)
-
- if csv_data, ok := os.read_entire_file(filename); ok {
- csv.reader_init_with_string(&r, string(csv_data))
- defer delete(csv_data)
- } else {
- fmt.printfln("Unable to open file: %v", filename)
- return
- }
-
- records, err := csv.read_all(&r)
- if err != nil { /* Do something with CSV parse error */ }
-
- defer {
- for rec in records {
- delete(rec)
- }
- delete(records)
- }
-
- for r, i in records {
- for f, j in r {
- fmt.printfln("Record %v, field %v: %q", i, j, f)
- }
- }
-} \ No newline at end of file
diff --git a/core/encoding/csv/reader.odin b/core/encoding/csv/reader.odin
index ebc7b39a0..5348624d5 100644
--- a/core/encoding/csv/reader.odin
+++ b/core/encoding/csv/reader.odin
@@ -1,5 +1,5 @@
// package csv reads and writes comma-separated values (CSV) files.
-// This package supports the format described in RFC 4180 <https://tools.ietf.org/html/rfc4180.html>
+// This package supports the format described in [[ RFC 4180; https://tools.ietf.org/html/rfc4180.html ]]
package encoding_csv
import "core:bufio"
@@ -484,4 +484,4 @@ _read_record :: proc(r: ^Reader, dst: ^[dynamic]string, allocator := context.all
r.fields_per_record = len(dst)
}
return dst[:], err
-} \ No newline at end of file
+}
diff --git a/core/encoding/endian/doc.odin b/core/encoding/endian/doc.odin
index 8ebefd0a4..0b43e3097 100644
--- a/core/encoding/endian/doc.odin
+++ b/core/encoding/endian/doc.odin
@@ -2,22 +2,23 @@
Package endian implements a simple translation between bytes and numbers with
specific endian encodings.
- buf: [100]u8
- put_u16(buf[:], .Little, 16) or_return
+Example:
+ buf: [100]u8
+ put_u16(buf[:], .Little, 16) or_return
- You may ask yourself, why isn't `byte_order` platform Endianness by default, so we can write:
- put_u16(buf[:], 16) or_return
+ // You may ask yourself, why isn't `byte_order` platform Endianness by default, so we can write:
+ put_u16(buf[:], 16) or_return
- The answer is that very few file formats are written in native/platform endianness. Most of them specify the endianness of
- each of their fields, or use a header field which specifies it for the entire file.
+ // The answer is that very few file formats are written in native/platform endianness. Most of them specify the endianness of
+ // each of their fields, or use a header field which specifies it for the entire file.
- e.g. a file which specifies it at the top for all fields could do this:
- file_order := .Little if buf[0] == 0 else .Big
- field := get_u16(buf[1:], file_order) or_return
+ // e.g. a file which specifies it at the top for all fields could do this:
+ file_order := .Little if buf[0] == 0 else .Big
+ field := get_u16(buf[1:], file_order) or_return
- If on the other hand a field is *always* Big-Endian, you're wise to explicitly state it for the benefit of the reader,
- be that your future self or someone else.
+ // If on the other hand a field is *always* Big-Endian, you're wise to explicitly state it for the benefit of the reader,
+ // be that your future self or someone else.
- field := get_u16(buf[:], .Big) or_return
+ field := get_u16(buf[:], .Big) or_return
*/
package encoding_endian
diff --git a/core/encoding/entity/entity.odin b/core/encoding/entity/entity.odin
index f5208ad6f..d2f1d46b2 100644
--- a/core/encoding/entity/entity.odin
+++ b/core/encoding/entity/entity.odin
@@ -1,23 +1,25 @@
-package encoding_unicode_entity
/*
- A unicode entity encoder/decoder
-
Copyright 2021 Jeroen van Rijn <nom@duclavier.com>.
Made available under Odin's BSD-3 license.
+ List of contributors:
+ Jeroen van Rijn: Initial implementation.
+*/
+
+/*
+ A unicode entity encoder/decoder.
+
This code has several procedures to map unicode runes to/from different textual encodings.
- SGML/XML/HTML entity
- -- &#<decimal>;
- -- &#x<hexadecimal>;
- -- &<entity name>; (If the lookup tables are compiled in).
- Reference: https://www.w3.org/2003/entities/2007xml/unicode.xml
+ - &#<decimal>;
+ - &#x<hexadecimal>;
+ - &<entity name>; (If the lookup tables are compiled in).
+ Reference: [[ https://www.w3.org/2003/entities/2007xml/unicode.xml ]]
- URL encode / decode %hex entity
- Reference: https://datatracker.ietf.org/doc/html/rfc3986/#section-2.1
-
- List of contributors:
- Jeroen van Rijn: Initial implementation.
+ Reference: [[ https://datatracker.ietf.org/doc/html/rfc3986/#section-2.1 ]]
*/
+package encoding_unicode_entity
import "core:unicode/utf8"
import "core:unicode"
@@ -353,4 +355,4 @@ _handle_xml_special :: proc(t: ^Tokenizer, builder: ^strings.Builder, options: X
}
return false, .None
-} \ No newline at end of file
+}
diff --git a/core/encoding/entity/generated.odin b/core/encoding/entity/generated.odin
index 0c4742149..52027ae03 100644
--- a/core/encoding/entity/generated.odin
+++ b/core/encoding/entity/generated.odin
@@ -42,7 +42,7 @@ XML_NAME_TO_RUNE_MAX_LENGTH :: 31
Input:
entity_name - a string, like "copy" that describes a user-encoded Unicode entity as used in XML.
- Output:
+ Returns:
"decoded" - The decoded rune if found by name, or -1 otherwise.
"ok" - true if found, false if not.
diff --git a/core/encoding/hxa/doc.odin b/core/encoding/hxa/doc.odin
index 230d6ea66..b696bef7e 100644
--- a/core/encoding/hxa/doc.odin
+++ b/core/encoding/hxa/doc.odin
@@ -1,83 +1,89 @@
-// Implementation of the HxA 3D asset format
-// HxA is a interchangeable graphics asset format.
-// Designed by Eskil Steenberg. @quelsolaar / eskil 'at' obsession 'dot' se / www.quelsolaar.com
-//
-// Author of this Odin package: Ginger Bill
-//
-// Following comment is copied from the original C-implementation
-// ---------
-// -Does the world need another Graphics file format?
-// Unfortunately, Yes. All existing formats are either too large and complicated to be implemented from
-// scratch, or don't have some basic features needed in modern computer graphics.
-// -Who is this format for?
-// For people who want a capable open Graphics format that can be implemented from scratch in
-// a few hours. It is ideal for graphics researchers, game developers or other people who
-// wants to build custom graphics pipelines. Given how easy it is to parse and write, it
-// should be easy to write utilities that process assets to preform tasks like: generating
-// normals, light-maps, tangent spaces, Error detection, GPU optimization, LOD generation,
-// and UV mapping.
-// -Why store images in the format when there are so many good image formats already?
-// Yes there are, but only for 2D RGB/RGBA images. A lot of computer graphics rendering rely
-// on 1D, 3D, cube, multilayer, multi channel, floating point bitmap buffers. There almost no
-// formats for this kind of data. Also 3D files that reference separate image files rely on
-// file paths, and this often creates issues when the assets are moved. By including the
-// texture data in the files directly the assets become self contained.
-// -Why doesn't the format support <insert whatever>?
-// Because the entire point is to make a format that can be implemented. Features like NURBSs,
-// Construction history, or BSP trees would make the format too large to serve its purpose.
-// The facilities of the formats to store meta data should make the format flexible enough
-// for most uses. Adding HxA support should be something anyone can do in a days work.
-//
-// Structure:
-// ----------
-// HxA is designed to be extremely simple to parse, and is therefore based around conventions. It has
-// a few basic structures, and depending on how they are used they mean different things. This means
-// that you can implement a tool that loads the entire file, modifies the parts it cares about and
-// leaves the rest intact. It is also possible to write a tool that makes all data in the file
-// editable without the need to understand its use. It is also possible for anyone to use the format
-// to store data axillary data. Anyone who wants to store data not covered by a convention can submit
-// a convention to extend the format. There should never be a convention for storing the same data in
-// two differed ways.
-// The data is story in a number of nodes that are stored in an array. Each node stores an array of
-// meta data. Meta data can describe anything you want, and a lot of conventions will use meta data
-// to store additional information, for things like transforms, lights, shaders and animation.
-// Data for Vertices, Corners, Faces, and Pixels are stored in named layer stacks. Each stack consists
-// of a number of named layers. All layers in the stack have the same number of elements. Each layer
-// describes one property of the primitive. Each layer can have multiple channels and each layer can
-// store data of a different type.
-//
-// HaX stores 3 kinds of nodes
-// - Pixel data.
-// - Polygon geometry data.
-// - Meta data only.
-//
-// Pixel Nodes stores pixels in a layer stack. A layer may store things like Albedo, Roughness,
-// Reflectance, Light maps, Masks, Normal maps, and Displacement. Layers use the channels of the
-// layers to store things like color. The length of the layer stack is determined by the type and
-// dimensions stored in the
-//
-// Geometry data is stored in 3 separate layer stacks for: vertex data, corner data and face data. The
-// vertex data stores things like verities, blend shapes, weight maps, and vertex colors. The first
-// layer in a vertex stack has to be a 3 channel layer named "position" describing the base position
-// of the vertices. The corner stack describes data per corner or edge of the polygons. It can be used
-// for things like UV, normals, and adjacency. The first layer in a corner stack has to be a 1 channel
-// integer layer named "index" describing the vertices used to form polygons. The last value in each
-// polygon has a negative - 1 index to indicate the end of the polygon.
-//
-// Example:
-// A quad and a tri with the vertex index:
-// [0, 1, 2, 3] [1, 4, 2]
-// is stored:
-// [0, 1, 2, -4, 1, 4, -3]
-// The face stack stores values per face. the length of the face stack has to match the number of
-// negative values in the index layer in the corner stack. The face stack can be used to store things
-// like material index.
-//
-// Storage
-// -------
-// All data is stored in little endian byte order with no padding. The layout mirrors the structs
-// defined below with a few exceptions. All names are stored as a 8-bit unsigned integer indicating
-// the length of the name followed by that many characters. Termination is not stored in the file.
-// Text strings stored in meta data are stored the same way as names, but instead of a 8-bit unsigned
-// integer a 32-bit unsigned integer is used.
-package encoding_hxa \ No newline at end of file
+/*
+Implementation of the HxA 3D asset format
+HxA is a interchangeable graphics asset format.
+Designed by Eskil Steenberg. @quelsolaar / eskil 'at' obsession 'dot' se / www.quelsolaar.com
+
+Author of this Odin package: Ginger Bill
+
+Following comment is copied from the original C-implementation
+---------
+- Does the world need another Graphics file format?
+Unfortunately, Yes. All existing formats are either too large and complicated to be implemented from
+scratch, or don't have some basic features needed in modern computer graphics.
+
+- Who is this format for?
+For people who want a capable open Graphics format that can be implemented from scratch in
+a few hours. It is ideal for graphics researchers, game developers or other people who
+wants to build custom graphics pipelines. Given how easy it is to parse and write, it
+should be easy to write utilities that process assets to preform tasks like: generating
+normals, light-maps, tangent spaces, Error detection, GPU optimization, LOD generation,
+and UV mapping.
+
+- Why store images in the format when there are so many good image formats already?
+Yes there are, but only for 2D RGB/RGBA images. A lot of computer graphics rendering rely
+on 1D, 3D, cube, multilayer, multi channel, floating point bitmap buffers. There almost no
+formats for this kind of data. Also 3D files that reference separate image files rely on
+file paths, and this often creates issues when the assets are moved. By including the
+texture data in the files directly the assets become self contained.
+
+- Why doesn't the format support <insert whatever>?
+Because the entire point is to make a format that can be implemented. Features like NURBSs,
+Construction history, or BSP trees would make the format too large to serve its purpose.
+The facilities of the formats to store meta data should make the format flexible enough
+for most uses. Adding HxA support should be something anyone can do in a days work.
+
+Structure:
+----------
+HxA is designed to be extremely simple to parse, and is therefore based around conventions. It has
+a few basic structures, and depending on how they are used they mean different things. This means
+that you can implement a tool that loads the entire file, modifies the parts it cares about and
+leaves the rest intact. It is also possible to write a tool that makes all data in the file
+editable without the need to understand its use. It is also possible for anyone to use the format
+to store data axillary data. Anyone who wants to store data not covered by a convention can submit
+a convention to extend the format. There should never be a convention for storing the same data in
+two differed ways.
+
+The data is story in a number of nodes that are stored in an array. Each node stores an array of
+meta data. Meta data can describe anything you want, and a lot of conventions will use meta data
+to store additional information, for things like transforms, lights, shaders and animation.
+Data for Vertices, Corners, Faces, and Pixels are stored in named layer stacks. Each stack consists
+of a number of named layers. All layers in the stack have the same number of elements. Each layer
+describes one property of the primitive. Each layer can have multiple channels and each layer can
+store data of a different type.
+
+HaX stores 3 kinds of nodes
+- Pixel data.
+- Polygon geometry data.
+- Meta data only.
+
+Pixel Nodes stores pixels in a layer stack. A layer may store things like Albedo, Roughness,
+Reflectance, Light maps, Masks, Normal maps, and Displacement. Layers use the channels of the
+layers to store things like color.
+The length of the layer stack is determined by the type and dimensions stored in the Geometry data
+is stored in 3 separate layer stacks for: vertex data, corner data and face data. The
+vertex data stores things like verities, blend shapes, weight maps, and vertex colors. The first
+layer in a vertex stack has to be a 3 channel layer named "position" describing the base position
+of the vertices. The corner stack describes data per corner or edge of the polygons. It can be used
+for things like UV, normals, and adjacency. The first layer in a corner stack has to be a 1 channel
+integer layer named "index" describing the vertices used to form polygons. The last value in each
+polygon has a negative - 1 index to indicate the end of the polygon.
+
+For Example:
+ A quad and a tri with the vertex index:
+ [0, 1, 2, 3] [1, 4, 2]
+ is stored:
+ [0, 1, 2, -4, 1, 4, -3]
+
+The face stack stores values per face. the length of the face stack has to match the number of
+negative values in the index layer in the corner stack. The face stack can be used to store things
+like material index.
+
+Storage:
+-------
+All data is stored in little endian byte order with no padding. The layout mirrors the structs
+defined below with a few exceptions. All names are stored as a 8-bit unsigned integer indicating
+the length of the name followed by that many characters. Termination is not stored in the file.
+Text strings stored in meta data are stored the same way as names, but instead of a 8-bit unsigned
+integer a 32-bit unsigned integer is used.
+*/
+package encoding_hxa
diff --git a/core/encoding/json/unmarshal.odin b/core/encoding/json/unmarshal.odin
index 127bce650..738e20c68 100644
--- a/core/encoding/json/unmarshal.odin
+++ b/core/encoding/json/unmarshal.odin
@@ -116,7 +116,30 @@ assign_int :: proc(val: any, i: $T) -> bool {
case int: dst = int (i)
case uint: dst = uint (i)
case uintptr: dst = uintptr(i)
- case: return false
+ case:
+ ti := type_info_of(v.id)
+ if _, ok := ti.variant.(runtime.Type_Info_Bit_Set); ok {
+ do_byte_swap := !reflect.bit_set_is_big_endian(v)
+ switch ti.size * 8 {
+ case 0: // no-op.
+ case 8:
+ x := (^u8)(v.data)
+ x^ = u8(i)
+ case 16:
+ x := (^u16)(v.data)
+ x^ = do_byte_swap ? intrinsics.byte_swap(u16(i)) : u16(i)
+ case 32:
+ x := (^u32)(v.data)
+ x^ = do_byte_swap ? intrinsics.byte_swap(u32(i)) : u32(i)
+ case 64:
+ x := (^u64)(v.data)
+ x^ = do_byte_swap ? intrinsics.byte_swap(u64(i)) : u64(i)
+ case:
+ panic("unknown bit_size size")
+ }
+ return true
+ }
+ return false
}
return true
}
diff --git a/core/encoding/uuid/doc.odin b/core/encoding/uuid/doc.odin
index 6fa375b72..f910c33d8 100644
--- a/core/encoding/uuid/doc.odin
+++ b/core/encoding/uuid/doc.odin
@@ -21,8 +21,9 @@ cryptographically-secure, per RFC 9562's suggestion.
- Version 6 without either a clock or node argument.
- Version 7 in all cases.
-Here's an example of how to set up one:
-
+Example:
+ package main
+
import "core:crypto"
import "core:encoding/uuid"
@@ -40,7 +41,7 @@ Here's an example of how to set up one:
For more information on the specifications, see here:
-- https://www.rfc-editor.org/rfc/rfc4122.html
-- https://www.rfc-editor.org/rfc/rfc9562.html
+- [[ https://www.rfc-editor.org/rfc/rfc4122.html ]]
+- [[ https://www.rfc-editor.org/rfc/rfc9562.html ]]
*/
package uuid
diff --git a/core/encoding/uuid/writing.odin b/core/encoding/uuid/writing.odin
index 499cba72b..7acaa3cd7 100644
--- a/core/encoding/uuid/writing.odin
+++ b/core/encoding/uuid/writing.odin
@@ -11,7 +11,7 @@ Write a UUID in the 8-4-4-4-12 format.
This procedure performs error checking with every byte written.
If you can guarantee beforehand that your stream has enough space to hold the
-UUID (32 bytes), then it is better to use `unsafe_write` instead as that will
+UUID (36 bytes), then it is better to use `unsafe_write` instead as that will
be faster.
Inputs:
@@ -22,7 +22,7 @@ Returns:
- error: An `io` error, if one occurred, otherwise `nil`.
*/
write :: proc(w: io.Writer, id: Identifier) -> (error: io.Error) #no_bounds_check {
- write_octet :: proc (w: io.Writer, octet: u8) -> io.Error #no_bounds_check {
+ write_octet :: proc(w: io.Writer, octet: u8) -> io.Error #no_bounds_check {
high_nibble := octet >> 4
low_nibble := octet & 0xF
@@ -31,15 +31,15 @@ write :: proc(w: io.Writer, id: Identifier) -> (error: io.Error) #no_bounds_chec
return nil
}
- for index in 0 ..< 4 { write_octet(w, id[index]) or_return }
+ for index in 0 ..< 4 {write_octet(w, id[index]) or_return}
io.write_byte(w, '-') or_return
- for index in 4 ..< 6 { write_octet(w, id[index]) or_return }
+ for index in 4 ..< 6 {write_octet(w, id[index]) or_return}
io.write_byte(w, '-') or_return
- for index in 6 ..< 8 { write_octet(w, id[index]) or_return }
+ for index in 6 ..< 8 {write_octet(w, id[index]) or_return}
io.write_byte(w, '-') or_return
- for index in 8 ..< 10 { write_octet(w, id[index]) or_return }
+ for index in 8 ..< 10 {write_octet(w, id[index]) or_return}
io.write_byte(w, '-') or_return
- for index in 10 ..< 16 { write_octet(w, id[index]) or_return }
+ for index in 10 ..< 16 {write_octet(w, id[index]) or_return}
return nil
}
@@ -54,7 +54,7 @@ Inputs:
- id: The identifier to convert.
*/
unsafe_write :: proc(w: io.Writer, id: Identifier) #no_bounds_check {
- write_octet :: proc (w: io.Writer, octet: u8) #no_bounds_check {
+ write_octet :: proc(w: io.Writer, octet: u8) #no_bounds_check {
high_nibble := octet >> 4
low_nibble := octet & 0xF
@@ -62,15 +62,15 @@ unsafe_write :: proc(w: io.Writer, id: Identifier) #no_bounds_check {
io.write_byte(w, strconv.digits[low_nibble])
}
- for index in 0 ..< 4 { write_octet(w, id[index]) }
+ for index in 0 ..< 4 {write_octet(w, id[index])}
io.write_byte(w, '-')
- for index in 4 ..< 6 { write_octet(w, id[index]) }
+ for index in 4 ..< 6 {write_octet(w, id[index])}
io.write_byte(w, '-')
- for index in 6 ..< 8 { write_octet(w, id[index]) }
+ for index in 6 ..< 8 {write_octet(w, id[index])}
io.write_byte(w, '-')
- for index in 8 ..< 10 { write_octet(w, id[index]) }
+ for index in 8 ..< 10 {write_octet(w, id[index])}
io.write_byte(w, '-')
- for index in 10 ..< 16 { write_octet(w, id[index]) }
+ for index in 10 ..< 16 {write_octet(w, id[index])}
}
/*
@@ -106,7 +106,7 @@ Convert a UUID to a string in the 8-4-4-4-12 format.
Inputs:
- id: The identifier to convert.
-- buffer: A byte buffer to store the result. Must be at least 32 bytes large.
+- buffer: A byte buffer to store the result. Must be at least 36 bytes large.
- loc: The caller location for debugging purposes (default: #caller_location)
Returns:
@@ -119,7 +119,11 @@ to_string_buffer :: proc(
) -> (
str: string,
) {
- assert(len(buffer) >= EXPECTED_LENGTH, "The buffer provided is not at least 32 bytes large.", loc)
+ assert(
+ len(buffer) >= EXPECTED_LENGTH,
+ "The buffer provided is not at least 36 bytes large.",
+ loc,
+ )
builder := strings.builder_from_bytes(buffer)
unsafe_write(strings.to_writer(&builder), id)
return strings.to_string(builder)
@@ -129,3 +133,4 @@ to_string :: proc {
to_string_allocated,
to_string_buffer,
}
+
diff --git a/core/encoding/varint/doc.odin b/core/encoding/varint/doc.odin
index c0a09873c..a00cfed15 100644
--- a/core/encoding/varint/doc.odin
+++ b/core/encoding/varint/doc.odin
@@ -1,10 +1,11 @@
/*
- Implementation of the LEB128 variable integer encoding as used by DWARF encoding and DEX files, among others.
+Implementation of the LEB128 variable integer encoding as used by DWARF encoding and DEX files, among others.
- Author of this Odin package: Jeroen van Rijn
+Author of this Odin package: Jeroen van Rijn
+
+Example:
+ package main
- Example:
- ```odin
import "core:encoding/varint"
import "core:fmt"
@@ -22,7 +23,5 @@
assert(decoded_val == value && decode_size == encode_size && decode_err == .None)
fmt.printf("Decoded as %v, using %v byte%v\n", decoded_val, decode_size, "" if decode_size == 1 else "s")
}
- ```
-
*/
-package encoding_varint \ No newline at end of file
+package encoding_varint
diff --git a/core/encoding/varint/leb128.odin b/core/encoding/varint/leb128.odin
index ca6513f04..606c57ba7 100644
--- a/core/encoding/varint/leb128.odin
+++ b/core/encoding/varint/leb128.odin
@@ -6,8 +6,6 @@
Jeroen van Rijn: Initial implementation.
*/
-// package varint implements variable length integer encoding and decoding using
-// the LEB128 format as used by DWARF debug info, Android .dex and other file formats.
package encoding_varint
// In theory we should use the bigint package. In practice, varints bigger than this indicate a corrupted file.
@@ -160,4 +158,4 @@ encode_ileb128 :: proc(buf: []u8, val: i128) -> (size: int, err: Error) {
buf[size - 1] = u8(low)
}
return
-} \ No newline at end of file
+}
diff --git a/core/encoding/xml/doc.odin b/core/encoding/xml/doc.odin
new file mode 100644
index 000000000..10d9f78be
--- /dev/null
+++ b/core/encoding/xml/doc.odin
@@ -0,0 +1,23 @@
+/*
+XML 1.0 / 1.1 parser
+
+A from-scratch XML implementation, loosely modelled on the [[ spec; https://www.w3.org/TR/2006/REC-xml11-20060816 ]].
+
+Features:
+- Supports enough of the XML 1.0/1.1 spec to handle the 99.9% of XML documents in common current usage.
+- Simple to understand and use. Small.
+
+Caveats:
+- We do NOT support HTML in this package, as that may or may not be valid XML.
+ If it works, great. If it doesn't, that's not considered a bug.
+
+- We do NOT support UTF-16. If you have a UTF-16 XML file, please convert it to UTF-8 first. Also, our condolences.
+- <[!ELEMENT and <[!ATTLIST are not supported, and will be either ignored or return an error depending on the parser options.
+
+MAYBE:
+- XML writer?
+- Serialize/deserialize Odin types?
+
+For a full example, see: [[ core/encoding/xml/example; https://github.com/odin-lang/Odin/tree/master/core/encoding/xml/example ]]
+*/
+package encoding_xml
diff --git a/core/encoding/xml/xml_reader.odin b/core/encoding/xml/xml_reader.odin
index b9656900f..b8c8b13a4 100644
--- a/core/encoding/xml/xml_reader.odin
+++ b/core/encoding/xml/xml_reader.odin
@@ -1,29 +1,11 @@
/*
- XML 1.0 / 1.1 parser
+ 2021-2022 Jeroen van Rijn <nom@duclavier.com>.
+ available under Odin's BSD-3 license.
- 2021-2022 Jeroen van Rijn <nom@duclavier.com>.
- available under Odin's BSD-3 license.
-
- from-scratch XML implementation, loosely modelled on the [spec](https://www.w3.org/TR/2006/REC-xml11-20060816).
-
-Features:
-- Supports enough of the XML 1.0/1.1 spec to handle the 99.9% of XML documents in common current usage.
-- Simple to understand and use. Small.
-
-Caveats:
-- We do NOT support HTML in this package, as that may or may not be valid XML.
- If it works, great. If it doesn't, that's not considered a bug.
-
-- We do NOT support UTF-16. If you have a UTF-16 XML file, please convert it to UTF-8 first. Also, our condolences.
-- <[!ELEMENT and <[!ATTLIST are not supported, and will be either ignored or return an error depending on the parser options.
-
-MAYBE:
-- XML writer?
-- Serialize/deserialize Odin types?
-
-List of contributors:
-- Jeroen van Rijn: Initial implementation.
+ List of contributors:
+ - Jeroen van Rijn: Initial implementation.
*/
+
package encoding_xml
// An XML 1.0 / 1.1 parser