aboutsummaryrefslogtreecommitdiff
path: root/core/encoding
diff options
context:
space:
mode:
authorLaytan Laats <laytanlaats@hotmail.com>2024-09-03 19:59:04 +0200
committerLaytan Laats <laytanlaats@hotmail.com>2024-09-03 19:59:04 +0200
commit288312a8126d71fae26c9d62a8cd342d830e1c5f (patch)
treeaa56a083e0222975888a24cf8d755b7d0a4f1bc0 /core/encoding
parent0e6109e171d24b3bb17289219ae3b482c24f2460 (diff)
core: improve package doc comments for the documentation generator
Diffstat (limited to 'core/encoding')
-rw-r--r--core/encoding/ansi/doc.odin6
-rw-r--r--core/encoding/csv/doc.odin96
-rw-r--r--core/encoding/csv/example.odin90
-rw-r--r--core/encoding/csv/reader.odin4
-rw-r--r--core/encoding/endian/doc.odin25
-rw-r--r--core/encoding/entity/entity.odin26
-rw-r--r--core/encoding/hxa/doc.odin172
-rw-r--r--core/encoding/uuid/doc.odin9
-rw-r--r--core/encoding/varint/doc.odin13
-rw-r--r--core/encoding/varint/leb128.odin4
-rw-r--r--core/encoding/xml/doc.odin23
-rw-r--r--core/encoding/xml/xml_reader.odin28
12 files changed, 257 insertions, 239 deletions
diff --git a/core/encoding/ansi/doc.odin b/core/encoding/ansi/doc.odin
index a0945c581..966e6be00 100644
--- a/core/encoding/ansi/doc.odin
+++ b/core/encoding/ansi/doc.odin
@@ -13,8 +13,8 @@ If your terminal supports 24-bit true color mode, you can also do this:
fmt.println(ansi.CSI + ansi.FG_COLOR_24_BIT + ";0;255;255" + ansi.SGR + "Hellope!" + ansi.CSI + ansi.RESET + ansi.SGR)
For more information, see:
- 1. https://en.wikipedia.org/wiki/ANSI_escape_code
- 2. https://www.vt100.net/docs/vt102-ug/chapter5.html
- 3. https://invisible-island.net/xterm/ctlseqs/ctlseqs.html
+- [[ https://en.wikipedia.org/wiki/ANSI_escape_code ]]
+- [[ https://www.vt100.net/docs/vt102-ug/chapter5.html ]]
+- [[ https://invisible-island.net/xterm/ctlseqs/ctlseqs.html ]]
*/
package ansi
diff --git a/core/encoding/csv/doc.odin b/core/encoding/csv/doc.odin
new file mode 100644
index 000000000..bfeadafd6
--- /dev/null
+++ b/core/encoding/csv/doc.odin
@@ -0,0 +1,96 @@
+/*
+package csv reads and writes comma-separated values (CSV) files.
+This package supports the format described in [[ RFC 4180; https://tools.ietf.org/html/rfc4180.html ]]
+
+Example:
+ package main
+
+ import "core:fmt"
+ import "core:encoding/csv"
+ import "core:os"
+
+ // Requires keeping the entire CSV file in memory at once
+ iterate_csv_from_string :: proc(filename: string) {
+ r: csv.Reader
+ r.trim_leading_space = true
+ r.reuse_record = true // Without it you have to delete(record)
+ r.reuse_record_buffer = true // Without it you have to each of the fields within it
+ defer csv.reader_destroy(&r)
+
+ csv_data, ok := os.read_entire_file(filename)
+ if ok {
+ csv.reader_init_with_string(&r, string(csv_data))
+ } else {
+ fmt.printfln("Unable to open file: %v", filename)
+ return
+ }
+ defer delete(csv_data)
+
+ for r, i, err in csv.iterator_next(&r) {
+ if err != nil { /* Do something with error */ }
+ for f, j in r {
+ fmt.printfln("Record %v, field %v: %q", i, j, f)
+ }
+ }
+ }
+
+ // Reads the CSV as it's processed (with a small buffer)
+ iterate_csv_from_stream :: proc(filename: string) {
+ fmt.printfln("Hellope from %v", filename)
+ r: csv.Reader
+ r.trim_leading_space = true
+ r.reuse_record = true // Without it you have to delete(record)
+ r.reuse_record_buffer = true // Without it you have to each of the fields within it
+ defer csv.reader_destroy(&r)
+
+ handle, err := os.open(filename)
+ if err != nil {
+ fmt.eprintfln("Error opening file: %v", filename)
+ return
+ }
+ defer os.close(handle)
+ csv.reader_init(&r, os.stream_from_handle(handle))
+
+ for r, i in csv.iterator_next(&r) {
+ for f, j in r {
+ fmt.printfln("Record %v, field %v: %q", i, j, f)
+ }
+ }
+ fmt.printfln("Error: %v", csv.iterator_last_error(r))
+ }
+
+ // Read all records at once
+ read_csv_from_string :: proc(filename: string) {
+ r: csv.Reader
+ r.trim_leading_space = true
+ r.reuse_record = true // Without it you have to delete(record)
+ r.reuse_record_buffer = true // Without it you have to each of the fields within it
+ defer csv.reader_destroy(&r)
+
+ csv_data, ok := os.read_entire_file(filename)
+ if ok {
+ csv.reader_init_with_string(&r, string(csv_data))
+ } else {
+ fmt.printfln("Unable to open file: %v", filename)
+ return
+ }
+ defer delete(csv_data)
+
+ records, err := csv.read_all(&r)
+ if err != nil { /* Do something with CSV parse error */ }
+
+ defer {
+ for rec in records {
+ delete(rec)
+ }
+ delete(records)
+ }
+
+ for r, i in records {
+ for f, j in r {
+ fmt.printfln("Record %v, field %v: %q", i, j, f)
+ }
+ }
+ }
+*/
+package encoding_csv
diff --git a/core/encoding/csv/example.odin b/core/encoding/csv/example.odin
deleted file mode 100644
index f7c368636..000000000
--- a/core/encoding/csv/example.odin
+++ /dev/null
@@ -1,90 +0,0 @@
-//+build ignore
-package encoding_csv
-
-import "core:fmt"
-import "core:encoding/csv"
-import "core:os"
-
-// Requires keeping the entire CSV file in memory at once
-iterate_csv_from_string :: proc(filename: string) {
- r: csv.Reader
- r.trim_leading_space = true
- r.reuse_record = true // Without it you have to delete(record)
- r.reuse_record_buffer = true // Without it you have to each of the fields within it
- defer csv.reader_destroy(&r)
-
- csv_data, ok := os.read_entire_file(filename)
- if ok {
- csv.reader_init_with_string(&r, string(csv_data))
- } else {
- fmt.printfln("Unable to open file: %v", filename)
- return
- }
- defer delete(csv_data)
-
- for r, i, err in csv.iterator_next(&r) {
- if err != nil { /* Do something with error */ }
- for f, j in r {
- fmt.printfln("Record %v, field %v: %q", i, j, f)
- }
- }
-}
-
-// Reads the CSV as it's processed (with a small buffer)
-iterate_csv_from_stream :: proc(filename: string) {
- fmt.printfln("Hellope from %v", filename)
- r: csv.Reader
- r.trim_leading_space = true
- r.reuse_record = true // Without it you have to delete(record)
- r.reuse_record_buffer = true // Without it you have to each of the fields within it
- defer csv.reader_destroy(&r)
-
- handle, err := os.open(filename)
- if err != nil {
- fmt.eprintfln("Error opening file: %v", filename)
- return
- }
- defer os.close(handle)
- csv.reader_init(&r, os.stream_from_handle(handle))
-
- for r, i in csv.iterator_next(&r) {
- for f, j in r {
- fmt.printfln("Record %v, field %v: %q", i, j, f)
- }
- }
- fmt.printfln("Error: %v", csv.iterator_last_error(r))
-}
-
-// Read all records at once
-read_csv_from_string :: proc(filename: string) {
- r: csv.Reader
- r.trim_leading_space = true
- r.reuse_record = true // Without it you have to delete(record)
- r.reuse_record_buffer = true // Without it you have to each of the fields within it
- defer csv.reader_destroy(&r)
-
- csv_data, ok := os.read_entire_file(filename)
- if ok {
- csv.reader_init_with_string(&r, string(csv_data))
- } else {
- fmt.printfln("Unable to open file: %v", filename)
- return
- }
- defer delete(csv_data)
-
- records, err := csv.read_all(&r)
- if err != nil { /* Do something with CSV parse error */ }
-
- defer {
- for rec in records {
- delete(rec)
- }
- delete(records)
- }
-
- for r, i in records {
- for f, j in r {
- fmt.printfln("Record %v, field %v: %q", i, j, f)
- }
- }
-} \ No newline at end of file
diff --git a/core/encoding/csv/reader.odin b/core/encoding/csv/reader.odin
index ebc7b39a0..5348624d5 100644
--- a/core/encoding/csv/reader.odin
+++ b/core/encoding/csv/reader.odin
@@ -1,5 +1,5 @@
// package csv reads and writes comma-separated values (CSV) files.
-// This package supports the format described in RFC 4180 <https://tools.ietf.org/html/rfc4180.html>
+// This package supports the format described in [[ RFC 4180; https://tools.ietf.org/html/rfc4180.html ]]
package encoding_csv
import "core:bufio"
@@ -484,4 +484,4 @@ _read_record :: proc(r: ^Reader, dst: ^[dynamic]string, allocator := context.all
r.fields_per_record = len(dst)
}
return dst[:], err
-} \ No newline at end of file
+}
diff --git a/core/encoding/endian/doc.odin b/core/encoding/endian/doc.odin
index 8ebefd0a4..0b43e3097 100644
--- a/core/encoding/endian/doc.odin
+++ b/core/encoding/endian/doc.odin
@@ -2,22 +2,23 @@
Package endian implements a simple translation between bytes and numbers with
specific endian encodings.
- buf: [100]u8
- put_u16(buf[:], .Little, 16) or_return
+Example:
+ buf: [100]u8
+ put_u16(buf[:], .Little, 16) or_return
- You may ask yourself, why isn't `byte_order` platform Endianness by default, so we can write:
- put_u16(buf[:], 16) or_return
+ // You may ask yourself, why isn't `byte_order` platform Endianness by default, so we can write:
+ put_u16(buf[:], 16) or_return
- The answer is that very few file formats are written in native/platform endianness. Most of them specify the endianness of
- each of their fields, or use a header field which specifies it for the entire file.
+ // The answer is that very few file formats are written in native/platform endianness. Most of them specify the endianness of
+ // each of their fields, or use a header field which specifies it for the entire file.
- e.g. a file which specifies it at the top for all fields could do this:
- file_order := .Little if buf[0] == 0 else .Big
- field := get_u16(buf[1:], file_order) or_return
+ // e.g. a file which specifies it at the top for all fields could do this:
+ file_order := .Little if buf[0] == 0 else .Big
+ field := get_u16(buf[1:], file_order) or_return
- If on the other hand a field is *always* Big-Endian, you're wise to explicitly state it for the benefit of the reader,
- be that your future self or someone else.
+ // If on the other hand a field is *always* Big-Endian, you're wise to explicitly state it for the benefit of the reader,
+ // be that your future self or someone else.
- field := get_u16(buf[:], .Big) or_return
+ field := get_u16(buf[:], .Big) or_return
*/
package encoding_endian
diff --git a/core/encoding/entity/entity.odin b/core/encoding/entity/entity.odin
index f5208ad6f..d2f1d46b2 100644
--- a/core/encoding/entity/entity.odin
+++ b/core/encoding/entity/entity.odin
@@ -1,23 +1,25 @@
-package encoding_unicode_entity
/*
- A unicode entity encoder/decoder
-
Copyright 2021 Jeroen van Rijn <nom@duclavier.com>.
Made available under Odin's BSD-3 license.
+ List of contributors:
+ Jeroen van Rijn: Initial implementation.
+*/
+
+/*
+ A unicode entity encoder/decoder.
+
This code has several procedures to map unicode runes to/from different textual encodings.
- SGML/XML/HTML entity
- -- &#<decimal>;
- -- &#x<hexadecimal>;
- -- &<entity name>; (If the lookup tables are compiled in).
- Reference: https://www.w3.org/2003/entities/2007xml/unicode.xml
+ - &#<decimal>;
+ - &#x<hexadecimal>;
+ - &<entity name>; (If the lookup tables are compiled in).
+ Reference: [[ https://www.w3.org/2003/entities/2007xml/unicode.xml ]]
- URL encode / decode %hex entity
- Reference: https://datatracker.ietf.org/doc/html/rfc3986/#section-2.1
-
- List of contributors:
- Jeroen van Rijn: Initial implementation.
+ Reference: [[ https://datatracker.ietf.org/doc/html/rfc3986/#section-2.1 ]]
*/
+package encoding_unicode_entity
import "core:unicode/utf8"
import "core:unicode"
@@ -353,4 +355,4 @@ _handle_xml_special :: proc(t: ^Tokenizer, builder: ^strings.Builder, options: X
}
return false, .None
-} \ No newline at end of file
+}
diff --git a/core/encoding/hxa/doc.odin b/core/encoding/hxa/doc.odin
index 230d6ea66..b696bef7e 100644
--- a/core/encoding/hxa/doc.odin
+++ b/core/encoding/hxa/doc.odin
@@ -1,83 +1,89 @@
-// Implementation of the HxA 3D asset format
-// HxA is a interchangeable graphics asset format.
-// Designed by Eskil Steenberg. @quelsolaar / eskil 'at' obsession 'dot' se / www.quelsolaar.com
-//
-// Author of this Odin package: Ginger Bill
-//
-// Following comment is copied from the original C-implementation
-// ---------
-// -Does the world need another Graphics file format?
-// Unfortunately, Yes. All existing formats are either too large and complicated to be implemented from
-// scratch, or don't have some basic features needed in modern computer graphics.
-// -Who is this format for?
-// For people who want a capable open Graphics format that can be implemented from scratch in
-// a few hours. It is ideal for graphics researchers, game developers or other people who
-// wants to build custom graphics pipelines. Given how easy it is to parse and write, it
-// should be easy to write utilities that process assets to preform tasks like: generating
-// normals, light-maps, tangent spaces, Error detection, GPU optimization, LOD generation,
-// and UV mapping.
-// -Why store images in the format when there are so many good image formats already?
-// Yes there are, but only for 2D RGB/RGBA images. A lot of computer graphics rendering rely
-// on 1D, 3D, cube, multilayer, multi channel, floating point bitmap buffers. There almost no
-// formats for this kind of data. Also 3D files that reference separate image files rely on
-// file paths, and this often creates issues when the assets are moved. By including the
-// texture data in the files directly the assets become self contained.
-// -Why doesn't the format support <insert whatever>?
-// Because the entire point is to make a format that can be implemented. Features like NURBSs,
-// Construction history, or BSP trees would make the format too large to serve its purpose.
-// The facilities of the formats to store meta data should make the format flexible enough
-// for most uses. Adding HxA support should be something anyone can do in a days work.
-//
-// Structure:
-// ----------
-// HxA is designed to be extremely simple to parse, and is therefore based around conventions. It has
-// a few basic structures, and depending on how they are used they mean different things. This means
-// that you can implement a tool that loads the entire file, modifies the parts it cares about and
-// leaves the rest intact. It is also possible to write a tool that makes all data in the file
-// editable without the need to understand its use. It is also possible for anyone to use the format
-// to store data axillary data. Anyone who wants to store data not covered by a convention can submit
-// a convention to extend the format. There should never be a convention for storing the same data in
-// two differed ways.
-// The data is story in a number of nodes that are stored in an array. Each node stores an array of
-// meta data. Meta data can describe anything you want, and a lot of conventions will use meta data
-// to store additional information, for things like transforms, lights, shaders and animation.
-// Data for Vertices, Corners, Faces, and Pixels are stored in named layer stacks. Each stack consists
-// of a number of named layers. All layers in the stack have the same number of elements. Each layer
-// describes one property of the primitive. Each layer can have multiple channels and each layer can
-// store data of a different type.
-//
-// HaX stores 3 kinds of nodes
-// - Pixel data.
-// - Polygon geometry data.
-// - Meta data only.
-//
-// Pixel Nodes stores pixels in a layer stack. A layer may store things like Albedo, Roughness,
-// Reflectance, Light maps, Masks, Normal maps, and Displacement. Layers use the channels of the
-// layers to store things like color. The length of the layer stack is determined by the type and
-// dimensions stored in the
-//
-// Geometry data is stored in 3 separate layer stacks for: vertex data, corner data and face data. The
-// vertex data stores things like verities, blend shapes, weight maps, and vertex colors. The first
-// layer in a vertex stack has to be a 3 channel layer named "position" describing the base position
-// of the vertices. The corner stack describes data per corner or edge of the polygons. It can be used
-// for things like UV, normals, and adjacency. The first layer in a corner stack has to be a 1 channel
-// integer layer named "index" describing the vertices used to form polygons. The last value in each
-// polygon has a negative - 1 index to indicate the end of the polygon.
-//
-// Example:
-// A quad and a tri with the vertex index:
-// [0, 1, 2, 3] [1, 4, 2]
-// is stored:
-// [0, 1, 2, -4, 1, 4, -3]
-// The face stack stores values per face. the length of the face stack has to match the number of
-// negative values in the index layer in the corner stack. The face stack can be used to store things
-// like material index.
-//
-// Storage
-// -------
-// All data is stored in little endian byte order with no padding. The layout mirrors the structs
-// defined below with a few exceptions. All names are stored as a 8-bit unsigned integer indicating
-// the length of the name followed by that many characters. Termination is not stored in the file.
-// Text strings stored in meta data are stored the same way as names, but instead of a 8-bit unsigned
-// integer a 32-bit unsigned integer is used.
-package encoding_hxa \ No newline at end of file
+/*
+Implementation of the HxA 3D asset format
+HxA is a interchangeable graphics asset format.
+Designed by Eskil Steenberg. @quelsolaar / eskil 'at' obsession 'dot' se / www.quelsolaar.com
+
+Author of this Odin package: Ginger Bill
+
+Following comment is copied from the original C-implementation
+---------
+- Does the world need another Graphics file format?
+Unfortunately, Yes. All existing formats are either too large and complicated to be implemented from
+scratch, or don't have some basic features needed in modern computer graphics.
+
+- Who is this format for?
+For people who want a capable open Graphics format that can be implemented from scratch in
+a few hours. It is ideal for graphics researchers, game developers or other people who
+wants to build custom graphics pipelines. Given how easy it is to parse and write, it
+should be easy to write utilities that process assets to preform tasks like: generating
+normals, light-maps, tangent spaces, Error detection, GPU optimization, LOD generation,
+and UV mapping.
+
+- Why store images in the format when there are so many good image formats already?
+Yes there are, but only for 2D RGB/RGBA images. A lot of computer graphics rendering rely
+on 1D, 3D, cube, multilayer, multi channel, floating point bitmap buffers. There almost no
+formats for this kind of data. Also 3D files that reference separate image files rely on
+file paths, and this often creates issues when the assets are moved. By including the
+texture data in the files directly the assets become self contained.
+
+- Why doesn't the format support <insert whatever>?
+Because the entire point is to make a format that can be implemented. Features like NURBSs,
+Construction history, or BSP trees would make the format too large to serve its purpose.
+The facilities of the formats to store meta data should make the format flexible enough
+for most uses. Adding HxA support should be something anyone can do in a days work.
+
+Structure:
+----------
+HxA is designed to be extremely simple to parse, and is therefore based around conventions. It has
+a few basic structures, and depending on how they are used they mean different things. This means
+that you can implement a tool that loads the entire file, modifies the parts it cares about and
+leaves the rest intact. It is also possible to write a tool that makes all data in the file
+editable without the need to understand its use. It is also possible for anyone to use the format
+to store data axillary data. Anyone who wants to store data not covered by a convention can submit
+a convention to extend the format. There should never be a convention for storing the same data in
+two differed ways.
+
+The data is story in a number of nodes that are stored in an array. Each node stores an array of
+meta data. Meta data can describe anything you want, and a lot of conventions will use meta data
+to store additional information, for things like transforms, lights, shaders and animation.
+Data for Vertices, Corners, Faces, and Pixels are stored in named layer stacks. Each stack consists
+of a number of named layers. All layers in the stack have the same number of elements. Each layer
+describes one property of the primitive. Each layer can have multiple channels and each layer can
+store data of a different type.
+
+HaX stores 3 kinds of nodes
+- Pixel data.
+- Polygon geometry data.
+- Meta data only.
+
+Pixel Nodes stores pixels in a layer stack. A layer may store things like Albedo, Roughness,
+Reflectance, Light maps, Masks, Normal maps, and Displacement. Layers use the channels of the
+layers to store things like color.
+The length of the layer stack is determined by the type and dimensions stored in the Geometry data
+is stored in 3 separate layer stacks for: vertex data, corner data and face data. The
+vertex data stores things like verities, blend shapes, weight maps, and vertex colors. The first
+layer in a vertex stack has to be a 3 channel layer named "position" describing the base position
+of the vertices. The corner stack describes data per corner or edge of the polygons. It can be used
+for things like UV, normals, and adjacency. The first layer in a corner stack has to be a 1 channel
+integer layer named "index" describing the vertices used to form polygons. The last value in each
+polygon has a negative - 1 index to indicate the end of the polygon.
+
+For Example:
+ A quad and a tri with the vertex index:
+ [0, 1, 2, 3] [1, 4, 2]
+ is stored:
+ [0, 1, 2, -4, 1, 4, -3]
+
+The face stack stores values per face. the length of the face stack has to match the number of
+negative values in the index layer in the corner stack. The face stack can be used to store things
+like material index.
+
+Storage:
+-------
+All data is stored in little endian byte order with no padding. The layout mirrors the structs
+defined below with a few exceptions. All names are stored as a 8-bit unsigned integer indicating
+the length of the name followed by that many characters. Termination is not stored in the file.
+Text strings stored in meta data are stored the same way as names, but instead of a 8-bit unsigned
+integer a 32-bit unsigned integer is used.
+*/
+package encoding_hxa
diff --git a/core/encoding/uuid/doc.odin b/core/encoding/uuid/doc.odin
index 6fa375b72..f910c33d8 100644
--- a/core/encoding/uuid/doc.odin
+++ b/core/encoding/uuid/doc.odin
@@ -21,8 +21,9 @@ cryptographically-secure, per RFC 9562's suggestion.
- Version 6 without either a clock or node argument.
- Version 7 in all cases.
-Here's an example of how to set up one:
-
+Example:
+ package main
+
import "core:crypto"
import "core:encoding/uuid"
@@ -40,7 +41,7 @@ Here's an example of how to set up one:
For more information on the specifications, see here:
-- https://www.rfc-editor.org/rfc/rfc4122.html
-- https://www.rfc-editor.org/rfc/rfc9562.html
+- [[ https://www.rfc-editor.org/rfc/rfc4122.html ]]
+- [[ https://www.rfc-editor.org/rfc/rfc9562.html ]]
*/
package uuid
diff --git a/core/encoding/varint/doc.odin b/core/encoding/varint/doc.odin
index c0a09873c..a00cfed15 100644
--- a/core/encoding/varint/doc.odin
+++ b/core/encoding/varint/doc.odin
@@ -1,10 +1,11 @@
/*
- Implementation of the LEB128 variable integer encoding as used by DWARF encoding and DEX files, among others.
+Implementation of the LEB128 variable integer encoding as used by DWARF encoding and DEX files, among others.
- Author of this Odin package: Jeroen van Rijn
+Author of this Odin package: Jeroen van Rijn
+
+Example:
+ package main
- Example:
- ```odin
import "core:encoding/varint"
import "core:fmt"
@@ -22,7 +23,5 @@
assert(decoded_val == value && decode_size == encode_size && decode_err == .None)
fmt.printf("Decoded as %v, using %v byte%v\n", decoded_val, decode_size, "" if decode_size == 1 else "s")
}
- ```
-
*/
-package encoding_varint \ No newline at end of file
+package encoding_varint
diff --git a/core/encoding/varint/leb128.odin b/core/encoding/varint/leb128.odin
index ca6513f04..606c57ba7 100644
--- a/core/encoding/varint/leb128.odin
+++ b/core/encoding/varint/leb128.odin
@@ -6,8 +6,6 @@
Jeroen van Rijn: Initial implementation.
*/
-// package varint implements variable length integer encoding and decoding using
-// the LEB128 format as used by DWARF debug info, Android .dex and other file formats.
package encoding_varint
// In theory we should use the bigint package. In practice, varints bigger than this indicate a corrupted file.
@@ -160,4 +158,4 @@ encode_ileb128 :: proc(buf: []u8, val: i128) -> (size: int, err: Error) {
buf[size - 1] = u8(low)
}
return
-} \ No newline at end of file
+}
diff --git a/core/encoding/xml/doc.odin b/core/encoding/xml/doc.odin
new file mode 100644
index 000000000..10d9f78be
--- /dev/null
+++ b/core/encoding/xml/doc.odin
@@ -0,0 +1,23 @@
+/*
+XML 1.0 / 1.1 parser
+
+A from-scratch XML implementation, loosely modelled on the [[ spec; https://www.w3.org/TR/2006/REC-xml11-20060816 ]].
+
+Features:
+- Supports enough of the XML 1.0/1.1 spec to handle the 99.9% of XML documents in common current usage.
+- Simple to understand and use. Small.
+
+Caveats:
+- We do NOT support HTML in this package, as that may or may not be valid XML.
+ If it works, great. If it doesn't, that's not considered a bug.
+
+- We do NOT support UTF-16. If you have a UTF-16 XML file, please convert it to UTF-8 first. Also, our condolences.
+- <[!ELEMENT and <[!ATTLIST are not supported, and will be either ignored or return an error depending on the parser options.
+
+MAYBE:
+- XML writer?
+- Serialize/deserialize Odin types?
+
+For a full example, see: [[ core/encoding/xml/example; https://github.com/odin-lang/Odin/tree/master/core/encoding/xml/example ]]
+*/
+package encoding_xml
diff --git a/core/encoding/xml/xml_reader.odin b/core/encoding/xml/xml_reader.odin
index b9656900f..b8c8b13a4 100644
--- a/core/encoding/xml/xml_reader.odin
+++ b/core/encoding/xml/xml_reader.odin
@@ -1,29 +1,11 @@
/*
- XML 1.0 / 1.1 parser
+ 2021-2022 Jeroen van Rijn <nom@duclavier.com>.
+ available under Odin's BSD-3 license.
- 2021-2022 Jeroen van Rijn <nom@duclavier.com>.
- available under Odin's BSD-3 license.
-
- from-scratch XML implementation, loosely modelled on the [spec](https://www.w3.org/TR/2006/REC-xml11-20060816).
-
-Features:
-- Supports enough of the XML 1.0/1.1 spec to handle the 99.9% of XML documents in common current usage.
-- Simple to understand and use. Small.
-
-Caveats:
-- We do NOT support HTML in this package, as that may or may not be valid XML.
- If it works, great. If it doesn't, that's not considered a bug.
-
-- We do NOT support UTF-16. If you have a UTF-16 XML file, please convert it to UTF-8 first. Also, our condolences.
-- <[!ELEMENT and <[!ATTLIST are not supported, and will be either ignored or return an error depending on the parser options.
-
-MAYBE:
-- XML writer?
-- Serialize/deserialize Odin types?
-
-List of contributors:
-- Jeroen van Rijn: Initial implementation.
+ List of contributors:
+ - Jeroen van Rijn: Initial implementation.
*/
+
package encoding_xml
// An XML 1.0 / 1.1 parser