diff options
| author | gingerBill <bill@gingerbill.org> | 2021-08-31 22:21:13 +0100 |
|---|---|---|
| committer | gingerBill <bill@gingerbill.org> | 2021-08-31 22:21:13 +0100 |
| commit | 251da264ed6e0f039931683c7b0d4b97e88c8d99 (patch) | |
| tree | c7a9a088477d2452c2cf850458c62d994a211df6 /core/encoding | |
| parent | b176af27427a6c39448a71a8023e4a9877f0a51c (diff) | |
Remove unneeded semicolons from the core library
Diffstat (limited to 'core/encoding')
| -rw-r--r-- | core/encoding/base32/base32.odin | 130 | ||||
| -rw-r--r-- | core/encoding/base64/base64.odin | 70 | ||||
| -rw-r--r-- | core/encoding/csv/reader.odin | 278 | ||||
| -rw-r--r-- | core/encoding/csv/writer.odin | 64 | ||||
| -rw-r--r-- | core/encoding/hxa/hxa.odin | 92 | ||||
| -rw-r--r-- | core/encoding/hxa/read.odin | 198 | ||||
| -rw-r--r-- | core/encoding/hxa/write.odin | 192 | ||||
| -rw-r--r-- | core/encoding/json/marshal.odin | 310 | ||||
| -rw-r--r-- | core/encoding/json/parser.odin | 406 | ||||
| -rw-r--r-- | core/encoding/json/tokenizer.odin | 304 | ||||
| -rw-r--r-- | core/encoding/json/types.odin | 26 | ||||
| -rw-r--r-- | core/encoding/json/validator.odin | 74 |
12 files changed, 1072 insertions, 1072 deletions
diff --git a/core/encoding/base32/base32.odin b/core/encoding/base32/base32.odin index 4e0e948e3..7ab35afd0 100644 --- a/core/encoding/base32/base32.odin +++ b/core/encoding/base32/base32.odin @@ -12,9 +12,9 @@ ENC_TABLE := [32]byte { 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',
'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X',
'Y', 'Z', '2', '3', '4', '5', '6', '7',
-};
+}
-PADDING :: '=';
+PADDING :: '='
DEC_TABLE := [?]u8 {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -31,118 +31,118 @@ DEC_TABLE := [?]u8 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-};
+}
encode :: proc(data: []byte, ENC_TBL := ENC_TABLE, allocator := context.allocator) -> string {
- out_length := (len(data) + 4) / 5 * 8;
- out := make([]byte, out_length);
- _encode(out, data);
- return string(out);
+ out_length := (len(data) + 4) / 5 * 8
+ out := make([]byte, out_length)
+ _encode(out, data)
+ return string(out)
}
@private
_encode :: proc(out, data: []byte, ENC_TBL := ENC_TABLE, allocator := context.allocator) {
- out := out;
- data := data;
+ out := out
+ data := data
for len(data) > 0 {
- carry: byte;
+ carry: byte
switch len(data) {
case:
- out[7] = ENC_TABLE[data[4] & 0x1f];
- carry = data[4] >> 5;
- fallthrough;
+ out[7] = ENC_TABLE[data[4] & 0x1f]
+ carry = data[4] >> 5
+ fallthrough
case 4:
- out[6] = ENC_TABLE[carry | (data[3] << 3) & 0x1f];
- out[5] = ENC_TABLE[(data[3] >> 2) & 0x1f];
- carry = data[3] >> 7;
- fallthrough;
+ out[6] = ENC_TABLE[carry | (data[3] << 3) & 0x1f]
+ out[5] = ENC_TABLE[(data[3] >> 2) & 0x1f]
+ carry = data[3] >> 7
+ fallthrough
case 3:
- out[4] = ENC_TABLE[carry | (data[2] << 1) & 0x1f];
- carry = (data[2] >> 4) & 0x1f;
- fallthrough;
+ out[4] = ENC_TABLE[carry | (data[2] << 1) & 0x1f]
+ carry = (data[2] >> 4) & 0x1f
+ fallthrough
case 2:
- out[3] = ENC_TABLE[carry | (data[1] << 4) & 0x1f];
- out[2] = ENC_TABLE[(data[1] >> 1) & 0x1f];
- carry = (data[1] >> 6) & 0x1f;
- fallthrough;
+ out[3] = ENC_TABLE[carry | (data[1] << 4) & 0x1f]
+ out[2] = ENC_TABLE[(data[1] >> 1) & 0x1f]
+ carry = (data[1] >> 6) & 0x1f
+ fallthrough
case 1:
- out[1] = ENC_TABLE[carry | (data[0] << 2) & 0x1f];
- out[0] = ENC_TABLE[data[0] >> 3];
+ out[1] = ENC_TABLE[carry | (data[0] << 2) & 0x1f]
+ out[0] = ENC_TABLE[data[0] >> 3]
}
if len(data) < 5 {
- out[7] = byte(PADDING);
+ out[7] = byte(PADDING)
if len(data) < 4 {
- out[6] = byte(PADDING);
- out[5] = byte(PADDING);
+ out[6] = byte(PADDING)
+ out[5] = byte(PADDING)
if len(data) < 3 {
- out[4] = byte(PADDING);
+ out[4] = byte(PADDING)
if len(data) < 2 {
- out[3] = byte(PADDING);
- out[2] = byte(PADDING);
+ out[3] = byte(PADDING)
+ out[2] = byte(PADDING)
}
}
}
- break;
+ break
}
- data = data[5:];
- out = out[8:];
+ data = data[5:]
+ out = out[8:]
}
}
decode :: proc(data: string, DEC_TBL := DEC_TABLE, allocator := context.allocator) -> []byte #no_bounds_check{
if len(data) == 0 {
- return nil;
+ return nil
}
- outi := 0;
- data := data;
+ outi := 0
+ data := data
- out := make([]byte, len(data) / 8 * 5, allocator);
- end := false;
+ out := make([]byte, len(data) / 8 * 5, allocator)
+ end := false
for len(data) > 0 && !end {
- dbuf : [8]byte;
- dlen := 8;
+ dbuf : [8]byte
+ dlen := 8
for j := 0; j < 8; {
if len(data) == 0 {
- dlen, end = j, true;
- break;
+ dlen, end = j, true
+ break
}
- input := data[0];
- data = data[1:];
+ input := data[0]
+ data = data[1:]
if input == byte(PADDING) && j >= 2 && len(data) < 8 {
- assert(!(len(data) + j < 8 - 1), "Corrupted input");
+ assert(!(len(data) + j < 8 - 1), "Corrupted input")
for k := 0; k < 8-1-j; k +=1 {
- assert(len(data) < k || data[k] == byte(PADDING), "Corrupted input");
+ assert(len(data) < k || data[k] == byte(PADDING), "Corrupted input")
}
- dlen, end = j, true;
- assert(dlen != 1 && dlen != 3 && dlen != 6, "Corrupted input");
- break;
+ dlen, end = j, true
+ assert(dlen != 1 && dlen != 3 && dlen != 6, "Corrupted input")
+ break
}
- dbuf[j] = DEC_TABLE[input];
- assert(dbuf[j] != 0xff, "Corrupted input");
- j += 1;
+ dbuf[j] = DEC_TABLE[input]
+ assert(dbuf[j] != 0xff, "Corrupted input")
+ j += 1
}
switch dlen {
case 8:
- out[outi + 4] = dbuf[6] << 5 | dbuf[7];
- fallthrough;
+ out[outi + 4] = dbuf[6] << 5 | dbuf[7]
+ fallthrough
case 7:
- out[outi + 3] = dbuf[4] << 7 | dbuf[5] << 2 | dbuf[6] >> 3;
- fallthrough;
+ out[outi + 3] = dbuf[4] << 7 | dbuf[5] << 2 | dbuf[6] >> 3
+ fallthrough
case 5:
- out[outi + 2] = dbuf[3] << 4 | dbuf[4] >> 1;
- fallthrough;
+ out[outi + 2] = dbuf[3] << 4 | dbuf[4] >> 1
+ fallthrough
case 4:
- out[outi + 1] = dbuf[1] << 6 | dbuf[2] << 1 | dbuf[3] >> 4;
- fallthrough;
+ out[outi + 1] = dbuf[1] << 6 | dbuf[2] << 1 | dbuf[3] >> 4
+ fallthrough
case 2:
- out[outi + 0] = dbuf[0] << 3 | dbuf[1] >> 2;
+ out[outi + 0] = dbuf[0] << 3 | dbuf[1] >> 2
}
- outi += 5;
+ outi += 5
}
- return out;
+ return out
}
diff --git a/core/encoding/base64/base64.odin b/core/encoding/base64/base64.odin index 7c17a2860..ca509326b 100644 --- a/core/encoding/base64/base64.odin +++ b/core/encoding/base64/base64.odin @@ -16,9 +16,9 @@ ENC_TABLE := [64]byte { 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/', -}; +} -PADDING :: '='; +PADDING :: '=' DEC_TABLE := [128]int { -1, -1, -1, -1, -1, -1, -1, -1, @@ -37,61 +37,61 @@ DEC_TABLE := [128]int { 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, -1, -1, -1, -1, -1, -}; +} encode :: proc(data: []byte, ENC_TBL := ENC_TABLE, allocator := context.allocator) -> string #no_bounds_check { - length := len(data); + length := len(data) if length == 0 { - return ""; + return "" } - out_length := ((4 * length / 3) + 3) &~ 3; - out := make([]byte, out_length, allocator); + out_length := ((4 * length / 3) + 3) &~ 3 + out := make([]byte, out_length, allocator) - c0, c1, c2, block: int; + c0, c1, c2, block: int for i, d := 0, 0; i < length; i, d = i + 3, d + 4 { - c0, c1, c2 = int(data[i]), -1, -1; + c0, c1, c2 = int(data[i]), -1, -1 if i + 1 < length { c1 = int(data[i + 1]); } if i + 2 < length { c2 = int(data[i + 2]); } - block = (c0 << 16) | (max(c1, 0) << 8) | max(c2, 0); + block = (c0 << 16) | (max(c1, 0) << 8) | max(c2, 0) - out[d] = ENC_TBL[block >> 18 & 63]; - out[d + 1] = ENC_TBL[block >> 12 & 63]; - out[d + 2] = c1 == -1 ? PADDING : ENC_TBL[block >> 6 & 63]; - out[d + 3] = c2 == -1 ? PADDING : ENC_TBL[block & 63]; + out[d] = ENC_TBL[block >> 18 & 63] + out[d + 1] = ENC_TBL[block >> 12 & 63] + out[d + 2] = c1 == -1 ? PADDING : ENC_TBL[block >> 6 & 63] + out[d + 3] = c2 == -1 ? PADDING : ENC_TBL[block & 63] } - return string(out); + return string(out) } decode :: proc(data: string, DEC_TBL := DEC_TABLE, allocator := context.allocator) -> []byte #no_bounds_check { - length := len(data); + length := len(data) if length == 0 { - return nil; + return nil } - pad_count := data[length - 1] == PADDING ? (data[length - 2] == PADDING ? 2 : 1) : 0; - out_length := ((length * 6) >> 3) - pad_count; - out := make([]byte, out_length, allocator); + pad_count := data[length - 1] == PADDING ? (data[length - 2] == PADDING ? 2 : 1) : 0 + out_length := ((length * 6) >> 3) - pad_count + out := make([]byte, out_length, allocator) - c0, c1, c2, c3: int; - b0, b1, b2: int; + c0, c1, c2, c3: int + b0, b1, b2: int for i, j := 0, 0; i < length; i, j = i + 4, j + 3 { - c0 = DEC_TBL[data[i]]; - c1 = DEC_TBL[data[i + 1]]; - c2 = DEC_TBL[data[i + 2]]; - c3 = DEC_TBL[data[i + 3]]; - - b0 = (c0 << 2) | (c1 >> 4); - b1 = (c1 << 4) | (c2 >> 2); - b2 = (c2 << 6) | c3; - - out[j] = byte(b0); - out[j + 1] = byte(b1); - out[j + 2] = byte(b2); + c0 = DEC_TBL[data[i]] + c1 = DEC_TBL[data[i + 1]] + c2 = DEC_TBL[data[i + 2]] + c3 = DEC_TBL[data[i + 3]] + + b0 = (c0 << 2) | (c1 >> 4) + b1 = (c1 << 4) | (c2 >> 2) + b2 = (c2 << 6) | c3 + + out[j] = byte(b0) + out[j + 1] = byte(b1) + out[j + 2] = byte(b2) } - return out; + return out } diff --git a/core/encoding/csv/reader.odin b/core/encoding/csv/reader.odin index 4c28ea9f3..aecb73d7b 100644 --- a/core/encoding/csv/reader.odin +++ b/core/encoding/csv/reader.odin @@ -68,7 +68,7 @@ reader_error_kind_string := [Reader_Error_Kind]string{ .Quote = "extra or missing \" in quoted field", .Field_Count = "wrong field count", .Invalid_Delim = "invalid delimiter", -}; +} Reader_Error :: struct { kind: Reader_Error_Kind, @@ -83,35 +83,35 @@ Error :: union { io.Error, } -DEFAULT_RECORD_BUFFER_CAPACITY :: 256; +DEFAULT_RECORD_BUFFER_CAPACITY :: 256 // reader_init initializes a new Reader from r reader_init :: proc(reader: ^Reader, r: io.Reader, buffer_allocator := context.allocator) { - reader.comma = ','; - - context.allocator = buffer_allocator; - reserve(&reader.record_buffer, DEFAULT_RECORD_BUFFER_CAPACITY); - reserve(&reader.raw_buffer, 0); - reserve(&reader.field_indices, 0); - reserve(&reader.last_record, 0); - bufio.reader_init(&reader.r, r); + reader.comma = ',' + + context.allocator = buffer_allocator + reserve(&reader.record_buffer, DEFAULT_RECORD_BUFFER_CAPACITY) + reserve(&reader.raw_buffer, 0) + reserve(&reader.field_indices, 0) + reserve(&reader.last_record, 0) + bufio.reader_init(&reader.r, r) } // reader_init_with_string initializes a new Reader from s reader_init_with_string :: proc(reader: ^Reader, s: string, buffer_allocator := context.allocator) { - strings.reader_init(&reader.sr, s); - r, _ := io.to_reader(strings.reader_to_stream(&reader.sr)); - reader_init(reader, r, buffer_allocator); + strings.reader_init(&reader.sr, s) + r, _ := io.to_reader(strings.reader_to_stream(&reader.sr)) + reader_init(reader, r, buffer_allocator) } // reader_destroy destroys a Reader reader_destroy :: proc(r: ^Reader) { - delete(r.raw_buffer); - delete(r.record_buffer); - delete(r.field_indices); - delete(r.last_record); - bufio.reader_destroy(&r.r); + delete(r.raw_buffer) + delete(r.record_buffer) + delete(r.field_indices) + delete(r.last_record) + bufio.reader_destroy(&r.r) } // read reads a single record (a slice of fields) from r @@ -119,21 +119,21 @@ reader_destroy :: proc(r: ^Reader) { // All \r\n sequences are normalized to \n, including multi-line field read :: proc(r: ^Reader, allocator := context.allocator) -> (record: []string, err: Error) { if r.reuse_record { - record, err = _read_record(r, &r.last_record, allocator); - resize(&r.last_record, len(record)); - copy(r.last_record[:], record); + record, err = _read_record(r, &r.last_record, allocator) + resize(&r.last_record, len(record)) + copy(r.last_record[:], record) } else { - record, err = _read_record(r, nil, allocator); + record, err = _read_record(r, nil, allocator) } - return; + return } // is_io_error checks where an Error is a specific io.Error kind is_io_error :: proc(err: Error, io_err: io.Error) -> bool { if v, ok := err.(io.Error); ok { - return v == io_err; + return v == io_err } - return false; + return false } @@ -141,97 +141,97 @@ is_io_error :: proc(err: Error, io_err: io.Error) -> bool { // Each record is a slice of fields. // read_all is defined to read until an EOF, and does not treat, and does not treat EOF as an error read_all :: proc(r: ^Reader, allocator := context.allocator) -> ([][]string, Error) { - context.allocator = allocator; - records: [dynamic][]string; + context.allocator = allocator + records: [dynamic][]string for { - record, rerr := _read_record(r, nil, allocator); + record, rerr := _read_record(r, nil, allocator) if is_io_error(rerr, .EOF) { - return records[:], nil; + return records[:], nil } if rerr != nil { - return nil, rerr; + return nil, rerr } - append(&records, record); + append(&records, record) } } // read reads a single record (a slice of fields) from the provided input. read_from_string :: proc(input: string, record_allocator := context.allocator, buffer_allocator := context.allocator) -> (record: []string, n: int, err: Error) { - ir: strings.Reader; - strings.reader_init(&ir, input); - input_reader, _ := io.to_reader(strings.reader_to_stream(&ir)); - - r: Reader; - reader_init(&r, input_reader, buffer_allocator); - defer reader_destroy(&r); - record, err = read(&r, record_allocator); - n = int(r.r.r); - return; + ir: strings.Reader + strings.reader_init(&ir, input) + input_reader, _ := io.to_reader(strings.reader_to_stream(&ir)) + + r: Reader + reader_init(&r, input_reader, buffer_allocator) + defer reader_destroy(&r) + record, err = read(&r, record_allocator) + n = int(r.r.r) + return } // read_all reads all the remaining records from the provided input. read_all_from_string :: proc(input: string, records_allocator := context.allocator, buffer_allocator := context.allocator) -> ([][]string, Error) { - ir: strings.Reader; - strings.reader_init(&ir, input); - input_reader, _ := io.to_reader(strings.reader_to_stream(&ir)); - - r: Reader; - reader_init(&r, input_reader, buffer_allocator); - defer reader_destroy(&r); - return read_all(&r, records_allocator); + ir: strings.Reader + strings.reader_init(&ir, input) + input_reader, _ := io.to_reader(strings.reader_to_stream(&ir)) + + r: Reader + reader_init(&r, input_reader, buffer_allocator) + defer reader_destroy(&r) + return read_all(&r, records_allocator) } @private is_valid_delim :: proc(r: rune) -> bool { switch r { case 0, '"', '\r', '\n', utf8.RUNE_ERROR: - return false; + return false } - return utf8.valid_rune(r); + return utf8.valid_rune(r) } @private _read_record :: proc(r: ^Reader, dst: ^[dynamic]string, allocator := context.allocator) -> ([]string, Error) { read_line :: proc(r: ^Reader) -> ([]byte, io.Error) { - line, err := bufio.reader_read_slice(&r.r, '\n'); + line, err := bufio.reader_read_slice(&r.r, '\n') if err == .Buffer_Full { - clear(&r.raw_buffer); - append(&r.raw_buffer, ..line); + clear(&r.raw_buffer) + append(&r.raw_buffer, ..line) for err == .Buffer_Full { - line, err = bufio.reader_read_slice(&r.r, '\n'); - append(&r.raw_buffer, ..line); + line, err = bufio.reader_read_slice(&r.r, '\n') + append(&r.raw_buffer, ..line) } - line = r.raw_buffer[:]; + line = r.raw_buffer[:] } if len(line) > 0 && err == .EOF { - err = nil; + err = nil if line[len(line)-1] == '\r' { - line = line[:len(line)-1]; + line = line[:len(line)-1] } } - r.line_count += 1; + r.line_count += 1 // normalize \r\n to \n - n := len(line); + n := len(line) for n >= 2 && string(line[n-2:]) == "\r\n" { - line[n-2] = '\n'; - line = line[:n-1]; + line[n-2] = '\n' + line = line[:n-1] } - return line, err; + return line, err } length_newline :: proc(b: []byte) -> int { if len(b) > 0 && b[len(b)-1] == '\n' { - return 1; + return 1 } - return 0; + return 0 } next_rune :: proc(b: []byte) -> rune { - r, _ := utf8.decode_rune(b); - return r; + r, _ := utf8.decode_rune(b) + return r } if r.comma == r.comment || @@ -240,152 +240,152 @@ _read_record :: proc(r: ^Reader, dst: ^[dynamic]string, allocator := context.all err := Reader_Error{ kind = .Invalid_Delim, line = r.line_count, - }; - return nil, err; + } + return nil, err } - line, full_line: []byte; - err_read: io.Error; + line, full_line: []byte + err_read: io.Error for err_read == nil { - line, err_read = read_line(r); + line, err_read = read_line(r) if r.comment != 0 && next_rune(line) == r.comment { - line = nil; - continue; + line = nil + continue } if err_read == nil && len(line) == length_newline(line) { - line = nil; - continue; + line = nil + continue } - full_line = line; - break; + full_line = line + break } if is_io_error(err_read, .EOF) { - return nil, err_read; + return nil, err_read } - err: Error; - quote_len :: len(`"`); - comma_len := utf8.rune_size(r.comma); - record_line := r.line_count; - clear(&r.record_buffer); - clear(&r.field_indices); + err: Error + quote_len :: len(`"`) + comma_len := utf8.rune_size(r.comma) + record_line := r.line_count + clear(&r.record_buffer) + clear(&r.field_indices) parse_field: for { if r.trim_leading_space { - line = bytes.trim_left_space(line); + line = bytes.trim_left_space(line) } if len(line) == 0 || line[0] != '"' { - i := bytes.index_rune(line, r.comma); - field := line; + i := bytes.index_rune(line, r.comma) + field := line if i >= 0 { - field = field[:i]; + field = field[:i] } else { - field = field[:len(field) - length_newline(field)]; + field = field[:len(field) - length_newline(field)] } if !r.lazy_quotes { if j := bytes.index_byte(field, '"'); j >= 0 { - column := utf8.rune_count(full_line[:len(full_line) - len(line[j:])]); + column := utf8.rune_count(full_line[:len(full_line) - len(line[j:])]) err = Reader_Error{ kind = .Bare_Quote, start_line = record_line, line = r.line_count, column = column, - }; - break parse_field; + } + break parse_field } } - append(&r.record_buffer, ..field); - append(&r.field_indices, len(r.record_buffer)); + append(&r.record_buffer, ..field) + append(&r.field_indices, len(r.record_buffer)) if i >= 0 { - line = line[i+comma_len:]; - continue parse_field; + line = line[i+comma_len:] + continue parse_field } - break parse_field; + break parse_field } else { - line = line[quote_len:]; + line = line[quote_len:] for { - i := bytes.index_byte(line, '"'); + i := bytes.index_byte(line, '"') switch { case i >= 0: - append(&r.record_buffer, ..line[:i]); - line = line[i+quote_len:]; + append(&r.record_buffer, ..line[:i]) + line = line[i+quote_len:] switch ch := next_rune(line); { case ch == '"': // append quote - append(&r.record_buffer, '"'); - line = line[quote_len:]; + append(&r.record_buffer, '"') + line = line[quote_len:] case ch == r.comma: // end of field - line = line[comma_len:]; - append(&r.field_indices, len(r.record_buffer)); - continue parse_field; + line = line[comma_len:] + append(&r.field_indices, len(r.record_buffer)) + continue parse_field case length_newline(line) == len(line): // end of line - append(&r.field_indices, len(r.record_buffer)); - break parse_field; + append(&r.field_indices, len(r.record_buffer)) + break parse_field case r.lazy_quotes: // bare quote - append(&r.record_buffer, '"'); + append(&r.record_buffer, '"') case: // invalid non-escaped quote - column := utf8.rune_count(full_line[:len(full_line) - len(line) - quote_len]); + column := utf8.rune_count(full_line[:len(full_line) - len(line) - quote_len]) err = Reader_Error{ kind = .Quote, start_line = record_line, line = r.line_count, column = column, - }; - break parse_field; + } + break parse_field } case len(line) > 0: - append(&r.record_buffer, ..line); + append(&r.record_buffer, ..line) if err_read != nil { - break parse_field; + break parse_field } - line, err_read = read_line(r); + line, err_read = read_line(r) if is_io_error(err_read, .EOF) { - err_read = nil; + err_read = nil } - full_line = line; + full_line = line case: if !r.lazy_quotes && err_read == nil { - column := utf8.rune_count(full_line); + column := utf8.rune_count(full_line) err = Reader_Error{ kind = .Quote, start_line = record_line, line = r.line_count, column = column, - }; - break parse_field; + } + break parse_field } - append(&r.field_indices, len(r.record_buffer)); - break parse_field; + append(&r.field_indices, len(r.record_buffer)) + break parse_field } } } } if err == nil && err_read != nil { - err = err_read; + err = err_read } - context.allocator = allocator; - dst := dst; - str := string(r.record_buffer[:]); + context.allocator = allocator + dst := dst + str := string(r.record_buffer[:]) if dst == nil { // use local variable - dst = &([dynamic]string){}; + dst = &([dynamic]string){} } - clear(dst); - resize(dst, len(r.field_indices)); - pre_idx: int; + clear(dst) + resize(dst, len(r.field_indices)) + pre_idx: int for idx, i in r.field_indices { - field := str[pre_idx:idx]; + field := str[pre_idx:idx] if !r.reuse_record_buffer { - field = strings.clone(field); + field = strings.clone(field) } - dst[i] = field; - pre_idx = idx; + dst[i] = field + pre_idx = idx } if r.fields_per_record > 0 { @@ -396,11 +396,11 @@ _read_record :: proc(r: ^Reader, dst: ^[dynamic]string, allocator := context.all line = r.line_count, expected = r.fields_per_record, got = len(dst), - }; + } } } else if r.fields_per_record == 0 { - r.fields_per_record = len(dst); + r.fields_per_record = len(dst) } - return dst[:], err; + return dst[:], err } diff --git a/core/encoding/csv/writer.odin b/core/encoding/csv/writer.odin index 2176e6781..3a0038916 100644 --- a/core/encoding/csv/writer.odin +++ b/core/encoding/csv/writer.odin @@ -17,8 +17,8 @@ Writer :: struct { // writer_init initializes a Writer that writes to w writer_init :: proc(writer: ^Writer, w: io.Writer) { - writer.comma = ','; - writer.w = w; + writer.comma = ',' + writer.w = w } // write writes a single CSV records to w with any of the necessarily quoting. @@ -26,101 +26,101 @@ writer_init :: proc(writer: ^Writer, w: io.Writer) { // // If the underlying io.Writer requires flushing, make sure to call io.flush write :: proc(w: ^Writer, record: []string) -> io.Error { - CHAR_SET :: "\n\r\""; + CHAR_SET :: "\n\r\"" field_needs_quoting :: proc(w: ^Writer, field: string) -> bool { switch { case field == "": // No need to quote empty strings - return false; + return false case field == `\.`: // Postgres is weird - return true; + return true case w.comma < utf8.RUNE_SELF: // ASCII optimization for i in 0..<len(field) { switch field[i] { case '\n', '\r', '"', byte(w.comma): - return true; + return true } } case: if strings.contains_rune(field, w.comma) >= 0 { - return true; + return true } if strings.contains_any(field, CHAR_SET) { - return true; + return true } } // Leading spaces need quoting - r, _ := utf8.decode_rune_in_string(field); - return strings.is_space(r); + r, _ := utf8.decode_rune_in_string(field) + return strings.is_space(r) } if !is_valid_delim(w.comma) { - return .No_Progress; // TODO(bill): Is this a good error? + return .No_Progress // TODO(bill): Is this a good error? } for _, field_idx in record { // NOTE(bill): declared like this so that the field can be modified later if necessary - field := record[field_idx]; + field := record[field_idx] if field_idx > 0 { - io.write_rune(w.w, w.comma) or_return; + io.write_rune(w.w, w.comma) or_return } if !field_needs_quoting(w, field) { - io.write_string(w.w, field) or_return; - continue; + io.write_string(w.w, field) or_return + continue } - io.write_byte(w.w, '"') or_return; + io.write_byte(w.w, '"') or_return for len(field) > 0 { - i := strings.index_any(field, CHAR_SET); + i := strings.index_any(field, CHAR_SET) if i < 0 { - i = len(field); + i = len(field) } - io.write_string(w.w, field[:i]) or_return; - field = field[i:]; + io.write_string(w.w, field[:i]) or_return + field = field[i:] if len(field) > 0 { switch field[0] { case '\r': if !w.use_crlf { - io.write_byte(w.w, '\r') or_return; + io.write_byte(w.w, '\r') or_return } case '\n': if w.use_crlf { - io.write_string(w.w, "\r\n") or_return; + io.write_string(w.w, "\r\n") or_return } else { - io.write_byte(w.w, '\n') or_return; + io.write_byte(w.w, '\n') or_return } case '"': - io.write_string(w.w, `""`) or_return; + io.write_string(w.w, `""`) or_return } - field = field[1:]; + field = field[1:] } } - io.write_byte(w.w, '"') or_return; + io.write_byte(w.w, '"') or_return } if w.use_crlf { - _, err := io.write_string(w.w, "\r\n"); - return err; + _, err := io.write_string(w.w, "\r\n") + return err } - return io.write_byte(w.w, '\n'); + return io.write_byte(w.w, '\n') } // write_all writes multiple CSV records to w using write, and then flushes (if necessary). write_all :: proc(w: ^Writer, records: [][]string) -> io.Error { for record in records { - write(w, record) or_return; + write(w, record) or_return } - return writer_flush(w); + return writer_flush(w) } // writer_flush flushes the underlying io.Writer. // If the underlying io.Writer does not support flush, nil is returned. writer_flush :: proc(w: ^Writer) -> io.Error { - return io.flush(auto_cast w.w); + return io.flush(auto_cast w.w) } diff --git a/core/encoding/hxa/hxa.odin b/core/encoding/hxa/hxa.odin index 31113f907..f47661bad 100644 --- a/core/encoding/hxa/hxa.odin +++ b/core/encoding/hxa/hxa.odin @@ -2,10 +2,10 @@ package encoding_hxa import "core:mem" -LATEST_VERSION :: 3; -VERSION_API :: "0.3"; +LATEST_VERSION :: 3 +VERSION_API :: "0.3" -MAGIC_NUMBER :: 'H'<<0 | 'x'<<8 | 'A'<<16 | '\x00'<<24; +MAGIC_NUMBER :: 'H'<<0 | 'x'<<8 | 'A'<<16 | '\x00'<<24 Header :: struct #packed { magic_number: u32le, @@ -48,7 +48,7 @@ Meta_Value_Type :: enum u8 { Text = 3, Binary = 4, Meta = 5, -}; +} Meta :: struct { name: string, // name of the meta data value (maximum length is 255) @@ -74,7 +74,7 @@ Layer :: struct { } // Layers stacks are arrays of layers where all the layers have the same number of entries (polygons, edges, vertices or pixels) -Layer_Stack :: distinct []Layer; +Layer_Stack :: distinct []Layer Node_Geometry :: struct { vertex_count: u32le, // number of vertices @@ -92,7 +92,7 @@ Node_Image :: struct { image_stack: Layer_Stack, } -Node_Index :: distinct u32le; +Node_Index :: distinct u32le // A file consists of an array of nodes, All nodes have meta data. Geometry nodes have geometry, image nodes have pixels Node :: struct { @@ -114,15 +114,15 @@ If you use HxA for something not covered by the conventions but need a conventio /* Hard conventions */ /* ---------------- */ -CONVENTION_HARD_BASE_VERTEX_LAYER_NAME :: "vertex"; -CONVENTION_HARD_BASE_VERTEX_LAYER_ID :: 0; -CONVENTION_HARD_BASE_VERTEX_LAYER_COMPONENTS :: 3; -CONVENTION_HARD_BASE_CORNER_LAYER_NAME :: "reference"; -CONVENTION_HARD_BASE_CORNER_LAYER_ID :: 0; -CONVENTION_HARD_BASE_CORNER_LAYER_COMPONENTS :: 1; -CONVENTION_HARD_BASE_CORNER_LAYER_TYPE :: Layer_Data_Type.Int32; -CONVENTION_HARD_EDGE_NEIGHBOUR_LAYER_NAME :: "neighbour"; -CONVENTION_HARD_EDGE_NEIGHBOUR_LAYER_TYPE :: Layer_Data_Type.Int32; +CONVENTION_HARD_BASE_VERTEX_LAYER_NAME :: "vertex" +CONVENTION_HARD_BASE_VERTEX_LAYER_ID :: 0 +CONVENTION_HARD_BASE_VERTEX_LAYER_COMPONENTS :: 3 +CONVENTION_HARD_BASE_CORNER_LAYER_NAME :: "reference" +CONVENTION_HARD_BASE_CORNER_LAYER_ID :: 0 +CONVENTION_HARD_BASE_CORNER_LAYER_COMPONENTS :: 1 +CONVENTION_HARD_BASE_CORNER_LAYER_TYPE :: Layer_Data_Type.Int32 +CONVENTION_HARD_EDGE_NEIGHBOUR_LAYER_NAME :: "neighbour" +CONVENTION_HARD_EDGE_NEIGHBOUR_LAYER_TYPE :: Layer_Data_Type.Int32 @@ -131,63 +131,63 @@ CONVENTION_HARD_EDGE_NEIGHBOUR_LAYER_TYPE :: Layer_Data_Type.Int32; /* geometry layers */ -CONVENTION_SOFT_LAYER_SEQUENCE0 :: "sequence"; -CONVENTION_SOFT_LAYER_NAME_UV0 :: "uv"; -CONVENTION_SOFT_LAYER_NORMALS :: "normal"; -CONVENTION_SOFT_LAYER_BINORMAL :: "binormal"; -CONVENTION_SOFT_LAYER_TANGENT :: "tangent"; -CONVENTION_SOFT_LAYER_COLOR :: "color"; -CONVENTION_SOFT_LAYER_CREASES :: "creases"; -CONVENTION_SOFT_LAYER_SELECTION :: "select"; -CONVENTION_SOFT_LAYER_SKIN_WEIGHT :: "skining_weight"; -CONVENTION_SOFT_LAYER_SKIN_REFERENCE :: "skining_reference"; -CONVENTION_SOFT_LAYER_BLENDSHAPE :: "blendshape"; -CONVENTION_SOFT_LAYER_ADD_BLENDSHAPE :: "addblendshape"; -CONVENTION_SOFT_LAYER_MATERIAL_ID :: "material"; +CONVENTION_SOFT_LAYER_SEQUENCE0 :: "sequence" +CONVENTION_SOFT_LAYER_NAME_UV0 :: "uv" +CONVENTION_SOFT_LAYER_NORMALS :: "normal" +CONVENTION_SOFT_LAYER_BINORMAL :: "binormal" +CONVENTION_SOFT_LAYER_TANGENT :: "tangent" +CONVENTION_SOFT_LAYER_COLOR :: "color" +CONVENTION_SOFT_LAYER_CREASES :: "creases" +CONVENTION_SOFT_LAYER_SELECTION :: "select" +CONVENTION_SOFT_LAYER_SKIN_WEIGHT :: "skining_weight" +CONVENTION_SOFT_LAYER_SKIN_REFERENCE :: "skining_reference" +CONVENTION_SOFT_LAYER_BLENDSHAPE :: "blendshape" +CONVENTION_SOFT_LAYER_ADD_BLENDSHAPE :: "addblendshape" +CONVENTION_SOFT_LAYER_MATERIAL_ID :: "material" /* Image layers */ -CONVENTION_SOFT_ALBEDO :: "albedo"; -CONVENTION_SOFT_LIGHT :: "light"; -CONVENTION_SOFT_DISPLACEMENT :: "displacement"; -CONVENTION_SOFT_DISTORTION :: "distortion"; -CONVENTION_SOFT_AMBIENT_OCCLUSION :: "ambient_occlusion"; +CONVENTION_SOFT_ALBEDO :: "albedo" +CONVENTION_SOFT_LIGHT :: "light" +CONVENTION_SOFT_DISPLACEMENT :: "displacement" +CONVENTION_SOFT_DISTORTION :: "distortion" +CONVENTION_SOFT_AMBIENT_OCCLUSION :: "ambient_occlusion" /* tags layers */ -CONVENTION_SOFT_NAME :: "name"; -CONVENTION_SOFT_TRANSFORM :: "transform"; +CONVENTION_SOFT_NAME :: "name" +CONVENTION_SOFT_TRANSFORM :: "transform" /* destroy procedures */ meta_destroy :: proc(meta: Meta, allocator := context.allocator) { if nested, ok := meta.value.([]Meta); ok { for m in nested { - meta_destroy(m); + meta_destroy(m) } - delete(nested, allocator); + delete(nested, allocator) } } nodes_destroy :: proc(nodes: []Node, allocator := context.allocator) { for node in nodes { for meta in node.meta_data { - meta_destroy(meta); + meta_destroy(meta) } - delete(node.meta_data, allocator); + delete(node.meta_data, allocator) switch n in node.content { case Node_Geometry: - delete(n.corner_stack, allocator); - delete(n.edge_stack, allocator); - delete(n.face_stack, allocator); + delete(n.corner_stack, allocator) + delete(n.edge_stack, allocator) + delete(n.face_stack, allocator) case Node_Image: - delete(n.image_stack, allocator); + delete(n.image_stack, allocator) } } - delete(nodes, allocator); + delete(nodes, allocator) } file_destroy :: proc(file: File) { - nodes_destroy(file.nodes, file.allocator); - delete(file.backing, file.allocator); + nodes_destroy(file.nodes, file.allocator) + delete(file.backing, file.allocator) } diff --git a/core/encoding/hxa/read.odin b/core/encoding/hxa/read.odin index 0cf58dce0..ef7edc8b7 100644 --- a/core/encoding/hxa/read.odin +++ b/core/encoding/hxa/read.odin @@ -12,20 +12,20 @@ Read_Error :: enum { } read_from_file :: proc(filename: string, print_error := false, allocator := context.allocator) -> (file: File, err: Read_Error) { - context.allocator = allocator; + context.allocator = allocator - data, ok := os.read_entire_file(filename); + data, ok := os.read_entire_file(filename) if !ok { - err = .Unable_To_Read_File; - return; + err = .Unable_To_Read_File + return } defer if !ok { - delete(data); + delete(data) } else { - file.backing = data; + file.backing = data } - file, err = read(data, filename, print_error, allocator); - return; + file, err = read(data, filename, print_error, allocator) + return } read :: proc(data: []byte, filename := "<input>", print_error := false, allocator := context.allocator) -> (file: File, err: Read_Error) { @@ -34,182 +34,182 @@ read :: proc(data: []byte, filename := "<input>", print_error := false, allocato data: []byte, offset: int, print_error: bool, - }; + } read_value :: proc(r: ^Reader, $T: typeid) -> (value: T, err: Read_Error) { - remaining := len(r.data) - r.offset; + remaining := len(r.data) - r.offset if remaining < size_of(T) { - err = .Short_Read; - return; + err = .Short_Read + return } - ptr := raw_data(r.data[r.offset:]); - value = (^T)(ptr)^; - r.offset += size_of(T); - return; + ptr := raw_data(r.data[r.offset:]) + value = (^T)(ptr)^ + r.offset += size_of(T) + return } read_array :: proc(r: ^Reader, $T: typeid, count: int) -> (value: []T, err: Read_Error) { - remaining := len(r.data) - r.offset; + remaining := len(r.data) - r.offset if remaining < size_of(T)*count { - err = .Short_Read; - return; + err = .Short_Read + return } - ptr := raw_data(r.data[r.offset:]); + ptr := raw_data(r.data[r.offset:]) - value = mem.slice_ptr((^T)(ptr), count); - r.offset += size_of(T)*count; - return; + value = mem.slice_ptr((^T)(ptr), count) + r.offset += size_of(T)*count + return } read_string :: proc(r: ^Reader, count: int) -> (string, Read_Error) { - buf, err := read_array(r, byte, count); - return string(buf), err; + buf, err := read_array(r, byte, count) + return string(buf), err } read_name :: proc(r: ^Reader) -> (value: string, err: Read_Error) { - len := read_value(r, u8) or_return; - data := read_array(r, byte, int(len)) or_return; - return string(data[:len]), nil; + len := read_value(r, u8) or_return + data := read_array(r, byte, int(len)) or_return + return string(data[:len]), nil } read_meta :: proc(r: ^Reader, capacity: u32le) -> (meta_data: []Meta, err: Read_Error) { - meta_data = make([]Meta, int(capacity)); - count := 0; - defer meta_data = meta_data[:count]; + meta_data = make([]Meta, int(capacity)) + count := 0 + defer meta_data = meta_data[:count] for m in &meta_data { - m.name = read_name(r) or_return; + m.name = read_name(r) or_return - type := read_value(r, Meta_Value_Type) or_return; + type := read_value(r, Meta_Value_Type) or_return if type > max(Meta_Value_Type) { if r.print_error { - fmt.eprintf("HxA Error: file '%s' has meta value type %d. Maximum value is ", r.filename, u8(type), u8(max(Meta_Value_Type))); + fmt.eprintf("HxA Error: file '%s' has meta value type %d. Maximum value is ", r.filename, u8(type), u8(max(Meta_Value_Type))) } - err = .Invalid_Data; - return; + err = .Invalid_Data + return } - array_length := read_value(r, u32le) or_return; + array_length := read_value(r, u32le) or_return switch type { - case .Int64: m.value = read_array(r, i64le, int(array_length)) or_return; - case .Double: m.value = read_array(r, f64le, int(array_length)) or_return; - case .Node: m.value = read_array(r, Node_Index, int(array_length)) or_return; - case .Text: m.value = read_string(r, int(array_length)) or_return; - case .Binary: m.value = read_array(r, byte, int(array_length)) or_return; - case .Meta: m.value = read_meta(r, array_length) or_return; + case .Int64: m.value = read_array(r, i64le, int(array_length)) or_return + case .Double: m.value = read_array(r, f64le, int(array_length)) or_return + case .Node: m.value = read_array(r, Node_Index, int(array_length)) or_return + case .Text: m.value = read_string(r, int(array_length)) or_return + case .Binary: m.value = read_array(r, byte, int(array_length)) or_return + case .Meta: m.value = read_meta(r, array_length) or_return } - count += 1; + count += 1 } - return; + return } read_layer_stack :: proc(r: ^Reader, capacity: u32le) -> (layers: Layer_Stack, err: Read_Error) { - stack_count := read_value(r, u32le) or_return; - layer_count := 0; - layers = make(Layer_Stack, stack_count); - defer layers = layers[:layer_count]; + stack_count := read_value(r, u32le) or_return + layer_count := 0 + layers = make(Layer_Stack, stack_count) + defer layers = layers[:layer_count] for layer in &layers { - layer.name = read_name(r) or_return; - layer.components = read_value(r, u8) or_return; - type := read_value(r, Layer_Data_Type) or_return; + layer.name = read_name(r) or_return + layer.components = read_value(r, u8) or_return + type := read_value(r, Layer_Data_Type) or_return if type > max(type) { if r.print_error { - fmt.eprintf("HxA Error: file '%s' has layer data type %d. Maximum value is ", r.filename, u8(type), u8(max(Layer_Data_Type))); + fmt.eprintf("HxA Error: file '%s' has layer data type %d. Maximum value is ", r.filename, u8(type), u8(max(Layer_Data_Type))) } - err = .Invalid_Data; - return; + err = .Invalid_Data + return } - data_len := int(layer.components) * int(capacity); + data_len := int(layer.components) * int(capacity) switch type { - case .Uint8: layer.data = read_array(r, u8, data_len) or_return; - case .Int32: layer.data = read_array(r, i32le, data_len) or_return; - case .Float: layer.data = read_array(r, f32le, data_len) or_return; - case .Double: layer.data = read_array(r, f64le, data_len) or_return; + case .Uint8: layer.data = read_array(r, u8, data_len) or_return + case .Int32: layer.data = read_array(r, i32le, data_len) or_return + case .Float: layer.data = read_array(r, f32le, data_len) or_return + case .Double: layer.data = read_array(r, f64le, data_len) or_return } - layer_count += 1; + layer_count += 1 } - return; + return } if len(data) < size_of(Header) { - return; + return } - context.allocator = allocator; + context.allocator = allocator - header := cast(^Header)raw_data(data); - assert(header.magic_number == MAGIC_NUMBER); + header := cast(^Header)raw_data(data) + assert(header.magic_number == MAGIC_NUMBER) r := &Reader{ filename = filename, data = data[:], offset = size_of(Header), print_error = print_error, - }; + } - node_count := 0; - file.nodes = make([]Node, header.internal_node_count); + node_count := 0 + file.nodes = make([]Node, header.internal_node_count) defer if err != nil { - nodes_destroy(file.nodes); - file.nodes = nil; + nodes_destroy(file.nodes) + file.nodes = nil } - defer file.nodes = file.nodes[:node_count]; + defer file.nodes = file.nodes[:node_count] for node_idx in 0..<header.internal_node_count { - node := &file.nodes[node_count]; - type := read_value(r, Node_Type) or_return; + node := &file.nodes[node_count] + type := read_value(r, Node_Type) or_return if type > max(Node_Type) { if r.print_error { - fmt.eprintf("HxA Error: file '%s' has node type %d. Maximum value is ", r.filename, u8(type), u8(max(Node_Type))); + fmt.eprintf("HxA Error: file '%s' has node type %d. Maximum value is ", r.filename, u8(type), u8(max(Node_Type))) } - err = .Invalid_Data; - return; + err = .Invalid_Data + return } - node_count += 1; + node_count += 1 - node.meta_data = read_meta(r, read_value(r, u32le) or_return) or_return; + node.meta_data = read_meta(r, read_value(r, u32le) or_return) or_return switch type { case .Meta_Only: // Okay case .Geometry: - g: Node_Geometry; + g: Node_Geometry - g.vertex_count = read_value(r, u32le) or_return; - g.vertex_stack = read_layer_stack(r, g.vertex_count) or_return; - g.edge_corner_count = read_value(r, u32le) or_return; - g.corner_stack = read_layer_stack(r, g.edge_corner_count) or_return; + g.vertex_count = read_value(r, u32le) or_return + g.vertex_stack = read_layer_stack(r, g.vertex_count) or_return + g.edge_corner_count = read_value(r, u32le) or_return + g.corner_stack = read_layer_stack(r, g.edge_corner_count) or_return if header.version > 2 { - g.edge_stack = read_layer_stack(r, g.edge_corner_count) or_return; + g.edge_stack = read_layer_stack(r, g.edge_corner_count) or_return } - g.face_count = read_value(r, u32le) or_return; - g.face_stack = read_layer_stack(r, g.face_count) or_return; + g.face_count = read_value(r, u32le) or_return + g.face_stack = read_layer_stack(r, g.face_count) or_return - node.content = g; + node.content = g case .Image: - img: Node_Image; + img: Node_Image - img.type = read_value(r, Image_Type) or_return; - dimensions := int(img.type); + img.type = read_value(r, Image_Type) or_return + dimensions := int(img.type) if img.type == .Image_Cube { - dimensions = 2; + dimensions = 2 } - img.resolution = {1, 1, 1}; + img.resolution = {1, 1, 1} for d in 0..<dimensions { - img.resolution[d] = read_value(r, u32le) or_return; + img.resolution[d] = read_value(r, u32le) or_return } - size := img.resolution[0]*img.resolution[1]*img.resolution[2]; + size := img.resolution[0]*img.resolution[1]*img.resolution[2] if img.type == .Image_Cube { - size *= 6; + size *= 6 } - img.image_stack = read_layer_stack(r, size) or_return; + img.image_stack = read_layer_stack(r, size) or_return - node.content = img; + node.content = img } } - return; + return } diff --git a/core/encoding/hxa/write.odin b/core/encoding/hxa/write.odin index 4391e700a..e774018b2 100644 --- a/core/encoding/hxa/write.odin +++ b/core/encoding/hxa/write.odin @@ -10,36 +10,36 @@ Write_Error :: enum { } write_to_file :: proc(filepath: string, file: File) -> (err: Write_Error) { - required := required_write_size(file); - buf, alloc_err := make([]byte, required); + required := required_write_size(file) + buf, alloc_err := make([]byte, required) if alloc_err == .Out_Of_Memory { - return .Failed_File_Write; + return .Failed_File_Write } - defer delete(buf); + defer delete(buf) - write_internal(&Writer{data = buf}, file); + write_internal(&Writer{data = buf}, file) if !os.write_entire_file(filepath, buf) { - err =.Failed_File_Write; + err =.Failed_File_Write } - return; + return } write :: proc(buf: []byte, file: File) -> (n: int, err: Write_Error) { - required := required_write_size(file); + required := required_write_size(file) if len(buf) < required { - err = .Buffer_Too_Small; - return; + err = .Buffer_Too_Small + return } - n = required; - write_internal(&Writer{data = buf}, file); - return; + n = required + write_internal(&Writer{data = buf}, file) + return } required_write_size :: proc(file: File) -> (n: int) { - writer := &Writer{dummy_pass = true}; - write_internal(writer, file); - n = writer.offset; - return; + writer := &Writer{dummy_pass = true} + write_internal(writer, file) + n = writer.offset + return } @@ -48,146 +48,146 @@ Writer :: struct { data: []byte, offset: int, dummy_pass: bool, -}; +} @(private) write_internal :: proc(w: ^Writer, file: File) { write_value :: proc(w: ^Writer, value: $T) { if !w.dummy_pass { - remaining := len(w.data) - w.offset; - assert(size_of(T) <= remaining); - ptr := raw_data(w.data[w.offset:]); - (^T)(ptr)^ = value; + remaining := len(w.data) - w.offset + assert(size_of(T) <= remaining) + ptr := raw_data(w.data[w.offset:]) + (^T)(ptr)^ = value } - w.offset += size_of(T); + w.offset += size_of(T) } write_array :: proc(w: ^Writer, array: []$T) { if !w.dummy_pass { - remaining := len(w.data) - w.offset; - assert(size_of(T)*len(array) <= remaining); - ptr := raw_data(w.data[w.offset:]); - dst := mem.slice_ptr((^T)(ptr), len(array)); - copy(dst, array); + remaining := len(w.data) - w.offset + assert(size_of(T)*len(array) <= remaining) + ptr := raw_data(w.data[w.offset:]) + dst := mem.slice_ptr((^T)(ptr), len(array)) + copy(dst, array) } - w.offset += size_of(T)*len(array); + w.offset += size_of(T)*len(array) } write_string :: proc(w: ^Writer, str: string) { if !w.dummy_pass { - remaining := len(w.data) - w.offset; - assert(size_of(byte)*len(str) <= remaining); - ptr := raw_data(w.data[w.offset:]); - dst := mem.slice_ptr((^byte)(ptr), len(str)); - copy(dst, str); + remaining := len(w.data) - w.offset + assert(size_of(byte)*len(str) <= remaining) + ptr := raw_data(w.data[w.offset:]) + dst := mem.slice_ptr((^byte)(ptr), len(str)) + copy(dst, str) } - w.offset += size_of(byte)*len(str); + w.offset += size_of(byte)*len(str) } write_metadata :: proc(w: ^Writer, meta_data: []Meta) { for m in meta_data { - name_len := max(len(m.name), 255); - write_value(w, u8(name_len)); - write_string(w, m.name[:name_len]); + name_len := max(len(m.name), 255) + write_value(w, u8(name_len)) + write_string(w, m.name[:name_len]) - meta_data_type: Meta_Value_Type; - length: u32le = 0; + meta_data_type: Meta_Value_Type + length: u32le = 0 switch v in m.value { case []i64le: - meta_data_type = .Int64; - length = u32le(len(v)); + meta_data_type = .Int64 + length = u32le(len(v)) case []f64le: - meta_data_type = .Double; - length = u32le(len(v)); + meta_data_type = .Double + length = u32le(len(v)) case []Node_Index: - meta_data_type = .Node; - length = u32le(len(v)); + meta_data_type = .Node + length = u32le(len(v)) case string: - meta_data_type = .Text; - length = u32le(len(v)); + meta_data_type = .Text + length = u32le(len(v)) case []byte: - meta_data_type = .Binary; - length = u32le(len(v)); + meta_data_type = .Binary + length = u32le(len(v)) case []Meta: - meta_data_type = .Meta; - length = u32le(len(v)); + meta_data_type = .Meta + length = u32le(len(v)) } - write_value(w, meta_data_type); - write_value(w, length); + write_value(w, meta_data_type) + write_value(w, length) switch v in m.value { - case []i64le: write_array(w, v); - case []f64le: write_array(w, v); - case []Node_Index: write_array(w, v); - case string: write_string(w, v); - case []byte: write_array(w, v); - case []Meta: write_metadata(w, v); + case []i64le: write_array(w, v) + case []f64le: write_array(w, v) + case []Node_Index: write_array(w, v) + case string: write_string(w, v) + case []byte: write_array(w, v) + case []Meta: write_metadata(w, v) } } - return; + return } write_layer_stack :: proc(w: ^Writer, layers: Layer_Stack) { - write_value(w, u32(len(layers))); + write_value(w, u32(len(layers))) for layer in layers { - name_len := max(len(layer.name), 255); - write_value(w, u8(name_len)); - write_string(w, layer .name[:name_len]); + name_len := max(len(layer.name), 255) + write_value(w, u8(name_len)) + write_string(w, layer .name[:name_len]) - write_value(w, layer.components); + write_value(w, layer.components) - layer_data_type: Layer_Data_Type; + layer_data_type: Layer_Data_Type switch v in layer.data { - case []u8: layer_data_type = .Uint8; - case []i32le: layer_data_type = .Int32; - case []f32le: layer_data_type = .Float; - case []f64le: layer_data_type = .Double; + case []u8: layer_data_type = .Uint8 + case []i32le: layer_data_type = .Int32 + case []f32le: layer_data_type = .Float + case []f64le: layer_data_type = .Double } - write_value(w, layer_data_type); + write_value(w, layer_data_type) switch v in layer.data { - case []u8: write_array(w, v); - case []i32le: write_array(w, v); - case []f32le: write_array(w, v); - case []f64le: write_array(w, v); + case []u8: write_array(w, v) + case []i32le: write_array(w, v) + case []f32le: write_array(w, v) + case []f64le: write_array(w, v) } } - return; + return } write_value(w, &Header{ magic_number = MAGIC_NUMBER, version = LATEST_VERSION, internal_node_count = u32le(len(file.nodes)), - }); + }) for node in file.nodes { - node_type: Node_Type; + node_type: Node_Type switch content in node.content { - case Node_Geometry: node_type = .Geometry; - case Node_Image: node_type = .Image; + case Node_Geometry: node_type = .Geometry + case Node_Image: node_type = .Image } - write_value(w, node_type); + write_value(w, node_type) - write_value(w, u32(len(node.meta_data))); - write_metadata(w, node.meta_data); + write_value(w, u32(len(node.meta_data))) + write_metadata(w, node.meta_data) switch content in node.content { case Node_Geometry: - write_value(w, content.vertex_count); - write_layer_stack(w, content.vertex_stack); - write_value(w, content.edge_corner_count); - write_layer_stack(w, content.corner_stack); - write_layer_stack(w, content.edge_stack); - write_value(w, content.face_count); - write_layer_stack(w, content.face_stack); + write_value(w, content.vertex_count) + write_layer_stack(w, content.vertex_stack) + write_value(w, content.edge_corner_count) + write_layer_stack(w, content.corner_stack) + write_layer_stack(w, content.edge_stack) + write_value(w, content.face_count) + write_layer_stack(w, content.face_stack) case Node_Image: - write_value(w, content.type); - dimensions := int(content.type); + write_value(w, content.type) + dimensions := int(content.type) if content.type == .Image_Cube { - dimensions = 2; + dimensions = 2 } for d in 0..<dimensions { - write_value(w, content.resolution[d]); + write_value(w, content.resolution[d]) } - write_layer_stack(w, content.image_stack); + write_layer_stack(w, content.image_stack) } } } diff --git a/core/encoding/json/marshal.odin b/core/encoding/json/marshal.odin index d9a674d33..cea3c3df6 100644 --- a/core/encoding/json/marshal.odin +++ b/core/encoding/json/marshal.odin @@ -13,305 +13,305 @@ Marshal_Error :: enum { } marshal :: proc(v: any, allocator := context.allocator) -> ([]byte, Marshal_Error) { - b: strings.Builder; - strings.init_builder(&b, allocator); + b: strings.Builder + strings.init_builder(&b, allocator) - err := marshal_arg(&b, v); + err := marshal_arg(&b, v) if err != .None { - strings.destroy_builder(&b); - return nil, err; + strings.destroy_builder(&b) + return nil, err } if len(b.buf) == 0 { - strings.destroy_builder(&b); - return nil, err; + strings.destroy_builder(&b) + return nil, err } - return b.buf[:], err; + return b.buf[:], err } marshal_arg :: proc(b: ^strings.Builder, v: any) -> Marshal_Error { if v == nil { - strings.write_string(b, "null"); - return .None; + strings.write_string(b, "null") + return .None } - ti := runtime.type_info_base(type_info_of(v.id)); - a := any{v.data, ti.id}; + ti := runtime.type_info_base(type_info_of(v.id)) + a := any{v.data, ti.id} switch info in ti.variant { case runtime.Type_Info_Named: - unreachable(); + unreachable() case runtime.Type_Info_Integer: - buf: [21]byte; - u: u64; + buf: [21]byte + u: u64 switch i in a { - case i8: u = u64(i); - case i16: u = u64(i); - case i32: u = u64(i); - case i64: u = u64(i); - case int: u = u64(i); - case u8: u = u64(i); - case u16: u = u64(i); - case u32: u = u64(i); - case u64: u = u64(i); - case uint: u = u64(i); - case uintptr: u = u64(i); - - case i16le: u = u64(i); - case i32le: u = u64(i); - case i64le: u = u64(i); - case u16le: u = u64(i); - case u32le: u = u64(i); - case u64le: u = u64(i); - - case i16be: u = u64(i); - case i32be: u = u64(i); - case i64be: u = u64(i); - case u16be: u = u64(i); - case u32be: u = u64(i); - case u64be: u = u64(i); + case i8: u = u64(i) + case i16: u = u64(i) + case i32: u = u64(i) + case i64: u = u64(i) + case int: u = u64(i) + case u8: u = u64(i) + case u16: u = u64(i) + case u32: u = u64(i) + case u64: u = u64(i) + case uint: u = u64(i) + case uintptr: u = u64(i) + + case i16le: u = u64(i) + case i32le: u = u64(i) + case i64le: u = u64(i) + case u16le: u = u64(i) + case u32le: u = u64(i) + case u64le: u = u64(i) + + case i16be: u = u64(i) + case i32be: u = u64(i) + case i64be: u = u64(i) + case u16be: u = u64(i) + case u32be: u = u64(i) + case u64be: u = u64(i) } - s := strconv.append_bits(buf[:], u, 10, info.signed, 8*ti.size, "0123456789", nil); - strings.write_string(b, s); + s := strconv.append_bits(buf[:], u, 10, info.signed, 8*ti.size, "0123456789", nil) + strings.write_string(b, s) case runtime.Type_Info_Rune: - r := a.(rune); - strings.write_byte(b, '"'); - strings.write_escaped_rune(b, r, '"', true); - strings.write_byte(b, '"'); + r := a.(rune) + strings.write_byte(b, '"') + strings.write_escaped_rune(b, r, '"', true) + strings.write_byte(b, '"') case runtime.Type_Info_Float: - val: f64; + val: f64 switch f in a { - case f16: val = f64(f); - case f32: val = f64(f); - case f64: val = f64(f); + case f16: val = f64(f) + case f32: val = f64(f) + case f64: val = f64(f) } - buf: [386]byte; + buf: [386]byte - str := strconv.append_float(buf[1:], val, 'f', 2*ti.size, 8*ti.size); - s := buf[:len(str)+1]; + str := strconv.append_float(buf[1:], val, 'f', 2*ti.size, 8*ti.size) + s := buf[:len(str)+1] if s[1] == '+' || s[1] == '-' { - s = s[1:]; + s = s[1:] } else { - s[0] = '+'; + s[0] = '+' } if s[0] == '+' { - s = s[1:]; + s = s[1:] } - strings.write_string(b, string(s)); + strings.write_string(b, string(s)) case runtime.Type_Info_Complex: - return .Unsupported_Type; + return .Unsupported_Type case runtime.Type_Info_Quaternion: - return .Unsupported_Type; + return .Unsupported_Type case runtime.Type_Info_String: switch s in a { - case string: strings.write_quoted_string(b, s); - case cstring: strings.write_quoted_string(b, string(s)); + case string: strings.write_quoted_string(b, s) + case cstring: strings.write_quoted_string(b, string(s)) } case runtime.Type_Info_Boolean: - val: bool; + val: bool switch b in a { - case bool: val = bool(b); - case b8: val = bool(b); - case b16: val = bool(b); - case b32: val = bool(b); - case b64: val = bool(b); + case bool: val = bool(b) + case b8: val = bool(b) + case b16: val = bool(b) + case b32: val = bool(b) + case b64: val = bool(b) } - strings.write_string(b, val ? "true" : "false"); + strings.write_string(b, val ? "true" : "false") case runtime.Type_Info_Any: - return .Unsupported_Type; + return .Unsupported_Type case runtime.Type_Info_Type_Id: - return .Unsupported_Type; + return .Unsupported_Type case runtime.Type_Info_Pointer: - return .Unsupported_Type; + return .Unsupported_Type case runtime.Type_Info_Multi_Pointer: - return .Unsupported_Type; + return .Unsupported_Type case runtime.Type_Info_Procedure: - return .Unsupported_Type; + return .Unsupported_Type case runtime.Type_Info_Tuple: - return .Unsupported_Type; + return .Unsupported_Type case runtime.Type_Info_Enumerated_Array: - return .Unsupported_Type; + return .Unsupported_Type case runtime.Type_Info_Simd_Vector: - return .Unsupported_Type; + return .Unsupported_Type case runtime.Type_Info_Relative_Pointer: - return .Unsupported_Type; + return .Unsupported_Type case runtime.Type_Info_Relative_Slice: - return .Unsupported_Type; + return .Unsupported_Type case runtime.Type_Info_Array: - strings.write_byte(b, '['); + strings.write_byte(b, '[') for i in 0..<info.count { if i > 0 { strings.write_string(b, ", "); } - data := uintptr(v.data) + uintptr(i*info.elem_size); - marshal_arg(b, any{rawptr(data), info.elem.id}); + data := uintptr(v.data) + uintptr(i*info.elem_size) + marshal_arg(b, any{rawptr(data), info.elem.id}) } - strings.write_byte(b, ']'); + strings.write_byte(b, ']') case runtime.Type_Info_Dynamic_Array: - strings.write_byte(b, '['); - array := cast(^mem.Raw_Dynamic_Array)v.data; + strings.write_byte(b, '[') + array := cast(^mem.Raw_Dynamic_Array)v.data for i in 0..<array.len { if i > 0 { strings.write_string(b, ", "); } - data := uintptr(array.data) + uintptr(i*info.elem_size); - marshal_arg(b, any{rawptr(data), info.elem.id}); + data := uintptr(array.data) + uintptr(i*info.elem_size) + marshal_arg(b, any{rawptr(data), info.elem.id}) } - strings.write_byte(b, ']'); + strings.write_byte(b, ']') case runtime.Type_Info_Slice: - strings.write_byte(b, '['); - slice := cast(^mem.Raw_Slice)v.data; + strings.write_byte(b, '[') + slice := cast(^mem.Raw_Slice)v.data for i in 0..<slice.len { if i > 0 { strings.write_string(b, ", "); } - data := uintptr(slice.data) + uintptr(i*info.elem_size); - marshal_arg(b, any{rawptr(data), info.elem.id}); + data := uintptr(slice.data) + uintptr(i*info.elem_size) + marshal_arg(b, any{rawptr(data), info.elem.id}) } - strings.write_byte(b, ']'); + strings.write_byte(b, ']') case runtime.Type_Info_Map: - m := (^mem.Raw_Map)(v.data); + m := (^mem.Raw_Map)(v.data) - strings.write_byte(b, '{'); + strings.write_byte(b, '{') if m != nil { if info.generated_struct == nil { - return .Unsupported_Type; + return .Unsupported_Type } - entries := &m.entries; - gs := runtime.type_info_base(info.generated_struct).variant.(runtime.Type_Info_Struct); - ed := runtime.type_info_base(gs.types[1]).variant.(runtime.Type_Info_Dynamic_Array); - entry_type := ed.elem.variant.(runtime.Type_Info_Struct); - entry_size := ed.elem_size; + entries := &m.entries + gs := runtime.type_info_base(info.generated_struct).variant.(runtime.Type_Info_Struct) + ed := runtime.type_info_base(gs.types[1]).variant.(runtime.Type_Info_Dynamic_Array) + entry_type := ed.elem.variant.(runtime.Type_Info_Struct) + entry_size := ed.elem_size for i in 0..<entries.len { if i > 0 { strings.write_string(b, ", "); } - data := uintptr(entries.data) + uintptr(i*entry_size); - key := rawptr(data + entry_type.offsets[2]); - value := rawptr(data + entry_type.offsets[3]); + data := uintptr(entries.data) + uintptr(i*entry_size) + key := rawptr(data + entry_type.offsets[2]) + value := rawptr(data + entry_type.offsets[3]) - marshal_arg(b, any{key, info.key.id}); - strings.write_string(b, ": "); - marshal_arg(b, any{value, info.value.id}); + marshal_arg(b, any{key, info.key.id}) + strings.write_string(b, ": ") + marshal_arg(b, any{value, info.value.id}) } } - strings.write_byte(b, '}'); + strings.write_byte(b, '}') case runtime.Type_Info_Struct: - strings.write_byte(b, '{'); + strings.write_byte(b, '{') for name, i in info.names { if i > 0 { strings.write_string(b, ", "); } - strings.write_quoted_string(b, name); - strings.write_string(b, ": "); + strings.write_quoted_string(b, name) + strings.write_string(b, ": ") - id := info.types[i].id; - data := rawptr(uintptr(v.data) + info.offsets[i]); - marshal_arg(b, any{data, id}); + id := info.types[i].id + data := rawptr(uintptr(v.data) + info.offsets[i]) + marshal_arg(b, any{data, id}) } - strings.write_byte(b, '}'); + strings.write_byte(b, '}') case runtime.Type_Info_Union: - tag_ptr := uintptr(v.data) + info.tag_offset; - tag_any := any{rawptr(tag_ptr), info.tag_type.id}; + tag_ptr := uintptr(v.data) + info.tag_offset + tag_any := any{rawptr(tag_ptr), info.tag_type.id} - tag: i64 = -1; + tag: i64 = -1 switch i in tag_any { - case u8: tag = i64(i); - case i8: tag = i64(i); - case u16: tag = i64(i); - case i16: tag = i64(i); - case u32: tag = i64(i); - case i32: tag = i64(i); - case u64: tag = i64(i); - case i64: tag = i64(i); - case: panic("Invalid union tag type"); + case u8: tag = i64(i) + case i8: tag = i64(i) + case u16: tag = i64(i) + case i16: tag = i64(i) + case u32: tag = i64(i) + case i32: tag = i64(i) + case u64: tag = i64(i) + case i64: tag = i64(i) + case: panic("Invalid union tag type") } if v.data == nil || tag == 0 { - strings.write_string(b, "null"); + strings.write_string(b, "null") } else { - id := info.variants[tag-1].id; - marshal_arg(b, any{v.data, id}); + id := info.variants[tag-1].id + marshal_arg(b, any{v.data, id}) } case runtime.Type_Info_Enum: - return marshal_arg(b, any{v.data, info.base.id}); + return marshal_arg(b, any{v.data, info.base.id}) case runtime.Type_Info_Bit_Set: is_bit_set_different_endian_to_platform :: proc(ti: ^runtime.Type_Info) -> bool { if ti == nil { - return false; + return false } - t := runtime.type_info_base(ti); + t := runtime.type_info_base(ti) #partial switch info in t.variant { case runtime.Type_Info_Integer: switch info.endianness { - case .Platform: return false; - case .Little: return ODIN_ENDIAN != "little"; - case .Big: return ODIN_ENDIAN != "big"; + case .Platform: return false + case .Little: return ODIN_ENDIAN != "little" + case .Big: return ODIN_ENDIAN != "big" } } - return false; + return false } - bit_data: u64; - bit_size := u64(8*ti.size); + bit_data: u64 + bit_size := u64(8*ti.size) - do_byte_swap := is_bit_set_different_endian_to_platform(info.underlying); + do_byte_swap := is_bit_set_different_endian_to_platform(info.underlying) switch bit_size { - case 0: bit_data = 0; + case 0: bit_data = 0 case 8: - x := (^u8)(v.data)^; - bit_data = u64(x); + x := (^u8)(v.data)^ + bit_data = u64(x) case 16: - x := (^u16)(v.data)^; + x := (^u16)(v.data)^ if do_byte_swap { - x = bits.byte_swap(x); + x = bits.byte_swap(x) } - bit_data = u64(x); + bit_data = u64(x) case 32: - x := (^u32)(v.data)^; + x := (^u32)(v.data)^ if do_byte_swap { - x = bits.byte_swap(x); + x = bits.byte_swap(x) } - bit_data = u64(x); + bit_data = u64(x) case 64: - x := (^u64)(v.data)^; + x := (^u64)(v.data)^ if do_byte_swap { - x = bits.byte_swap(x); + x = bits.byte_swap(x) } - bit_data = u64(x); - case: panic("unknown bit_size size"); + bit_data = u64(x) + case: panic("unknown bit_size size") } - strings.write_u64(b, bit_data); + strings.write_u64(b, bit_data) - return .Unsupported_Type; + return .Unsupported_Type } - return .None; + return .None } diff --git a/core/encoding/json/parser.odin b/core/encoding/json/parser.odin index 8fafdcda4..b71d90b96 100644 --- a/core/encoding/json/parser.odin +++ b/core/encoding/json/parser.odin @@ -15,235 +15,235 @@ Parser :: struct { } make_parser :: proc(data: []byte, spec := Specification.JSON, parse_integers := false, allocator := context.allocator) -> Parser { - p: Parser; - p.tok = make_tokenizer(data, spec, parse_integers); - p.spec = spec; - p.allocator = allocator; - assert(p.allocator.procedure != nil); - advance_token(&p); - return p; + p: Parser + p.tok = make_tokenizer(data, spec, parse_integers) + p.spec = spec + p.allocator = allocator + assert(p.allocator.procedure != nil) + advance_token(&p) + return p } parse :: proc(data: []byte, spec := Specification.JSON, parse_integers := false, allocator := context.allocator) -> (Value, Error) { - context.allocator = allocator; - p := make_parser(data, spec, parse_integers, allocator); + context.allocator = allocator + p := make_parser(data, spec, parse_integers, allocator) if p.spec == Specification.JSON5 { - return parse_value(&p); + return parse_value(&p) } - return parse_object(&p); + return parse_object(&p) } token_end_pos :: proc(tok: Token) -> Pos { - end := tok.pos; - end.offset += len(tok.text); - return end; + end := tok.pos + end.offset += len(tok.text) + return end } advance_token :: proc(p: ^Parser) -> (Token, Error) { - err: Error; - p.prev_token = p.curr_token; - p.curr_token, err = get_token(&p.tok); - return p.prev_token, err; + err: Error + p.prev_token = p.curr_token + p.curr_token, err = get_token(&p.tok) + return p.prev_token, err } allow_token :: proc(p: ^Parser, kind: Token_Kind) -> bool { if p.curr_token.kind == kind { - advance_token(p); - return true; + advance_token(p) + return true } - return false; + return false } expect_token :: proc(p: ^Parser, kind: Token_Kind) -> Error { - prev := p.curr_token; - advance_token(p); + prev := p.curr_token + advance_token(p) if prev.kind == kind { - return .None; + return .None } - return .Unexpected_Token; + return .Unexpected_Token } parse_value :: proc(p: ^Parser) -> (value: Value, err: Error) { - token := p.curr_token; + token := p.curr_token #partial switch token.kind { case .Null: - value = Null{}; - advance_token(p); - return; + value = Null{} + advance_token(p) + return case .False: - value = Boolean(false); - advance_token(p); - return; + value = Boolean(false) + advance_token(p) + return case .True: - value = Boolean(true); - advance_token(p); - return; + value = Boolean(true) + advance_token(p) + return case .Integer: - i, _ := strconv.parse_i64(token.text); - value = Integer(i); - advance_token(p); - return; + i, _ := strconv.parse_i64(token.text) + value = Integer(i) + advance_token(p) + return case .Float: - f, _ := strconv.parse_f64(token.text); - value = Float(f); - advance_token(p); - return; + f, _ := strconv.parse_f64(token.text) + value = Float(f) + advance_token(p) + return case .String: - value = String(unquote_string(token, p.spec, p.allocator)); - advance_token(p); - return; + value = String(unquote_string(token, p.spec, p.allocator)) + advance_token(p) + return case .Open_Brace: - return parse_object(p); + return parse_object(p) case .Open_Bracket: - return parse_array(p); + return parse_array(p) case: if p.spec == Specification.JSON5 { #partial switch token.kind { case .Infinity: - inf: u64 = 0x7ff0000000000000; + inf: u64 = 0x7ff0000000000000 if token.text[0] == '-' { - inf = 0xfff0000000000000; + inf = 0xfff0000000000000 } - value = transmute(f64)inf; - advance_token(p); - return; + value = transmute(f64)inf + advance_token(p) + return case .NaN: - nan: u64 = 0x7ff7ffffffffffff; + nan: u64 = 0x7ff7ffffffffffff if token.text[0] == '-' { - nan = 0xfff7ffffffffffff; + nan = 0xfff7ffffffffffff } - value = transmute(f64)nan; - advance_token(p); - return; + value = transmute(f64)nan + advance_token(p) + return } } } - err = .Unexpected_Token; - advance_token(p); - return; + err = .Unexpected_Token + advance_token(p) + return } parse_array :: proc(p: ^Parser) -> (value: Value, err: Error) { - expect_token(p, .Open_Bracket) or_return; + expect_token(p, .Open_Bracket) or_return - array: Array; - array.allocator = p.allocator; + array: Array + array.allocator = p.allocator defer if err != .None { for elem in array { - destroy_value(elem); + destroy_value(elem) } - delete(array); + delete(array) } for p.curr_token.kind != .Close_Bracket { - elem := parse_value(p) or_return; - append(&array, elem); + elem := parse_value(p) or_return + append(&array, elem) // Disallow trailing commas for the time being if allow_token(p, .Comma) { - continue; + continue } else { - break; + break } } - expect_token(p, .Close_Bracket) or_return; - value = array; - return; + expect_token(p, .Close_Bracket) or_return + value = array + return } clone_string :: proc(s: string, allocator: mem.Allocator) -> string { - n := len(s); - b := make([]byte, n+1, allocator); - copy(b, s); - b[n] = 0; - return string(b[:n]); + n := len(s) + b := make([]byte, n+1, allocator) + copy(b, s) + b[n] = 0 + return string(b[:n]) } parse_object_key :: proc(p: ^Parser) -> (key: string, err: Error) { - tok := p.curr_token; + tok := p.curr_token if p.spec == Specification.JSON5 { if tok.kind == .String { - expect_token(p, .String); - key = unquote_string(tok, p.spec, p.allocator); - return; + expect_token(p, .String) + key = unquote_string(tok, p.spec, p.allocator) + return } else if tok.kind == .Ident { - expect_token(p, .Ident); - key = clone_string(tok.text, p.allocator); - return; + expect_token(p, .Ident) + key = clone_string(tok.text, p.allocator) + return } } if tok_err := expect_token(p, .String); tok_err != .None { - err = .Expected_String_For_Object_Key; - return; + err = .Expected_String_For_Object_Key + return } - key = unquote_string(tok, p.spec, p.allocator); - return; + key = unquote_string(tok, p.spec, p.allocator) + return } parse_object :: proc(p: ^Parser) -> (value: Value, err: Error) { - expect_token(p, .Open_Brace) or_return; + expect_token(p, .Open_Brace) or_return - obj: Object; - obj.allocator = p.allocator; + obj: Object + obj.allocator = p.allocator defer if err != .None { for key, elem in obj { - delete(key, p.allocator); - destroy_value(elem); + delete(key, p.allocator) + destroy_value(elem) } - delete(obj); + delete(obj) } for p.curr_token.kind != .Close_Brace { - key: string; - key, err = parse_object_key(p); + key: string + key, err = parse_object_key(p) if err != .None { - delete(key, p.allocator); - return; + delete(key, p.allocator) + return } if colon_err := expect_token(p, .Colon); colon_err != .None { - err = .Expected_Colon_After_Key; - return; + err = .Expected_Colon_After_Key + return } - elem := parse_value(p) or_return; + elem := parse_value(p) or_return if key in obj { - err = .Duplicate_Object_Key; - delete(key, p.allocator); - return; + err = .Duplicate_Object_Key + delete(key, p.allocator) + return } - obj[key] = elem; + obj[key] = elem if p.spec == Specification.JSON5 { // Allow trailing commas if allow_token(p, .Comma) { - continue; + continue } } else { // Disallow trailing commas if allow_token(p, .Comma) { - continue; + continue } else { - break; + break } } } - expect_token(p, .Close_Brace) or_return; - value = obj; - return; + expect_token(p, .Close_Brace) or_return + value = obj + return } @@ -251,177 +251,177 @@ parse_object :: proc(p: ^Parser) -> (value: Value, err: Error) { unquote_string :: proc(token: Token, spec: Specification, allocator := context.allocator) -> string { get_u2_rune :: proc(s: string) -> rune { if len(s) < 4 || s[0] != '\\' || s[1] != 'x' { - return -1; + return -1 } - r: rune; + r: rune for c in s[2:4] { - x: rune; + x: rune switch c { - case '0'..='9': x = c - '0'; - case 'a'..='f': x = c - 'a' + 10; - case 'A'..='F': x = c - 'A' + 10; - case: return -1; + case '0'..='9': x = c - '0' + case 'a'..='f': x = c - 'a' + 10 + case 'A'..='F': x = c - 'A' + 10 + case: return -1 } - r = r*16 + x; + r = r*16 + x } - return r; + return r } get_u4_rune :: proc(s: string) -> rune { if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1; + return -1 } - r: rune; + r: rune for c in s[2:6] { - x: rune; + x: rune switch c { - case '0'..='9': x = c - '0'; - case 'a'..='f': x = c - 'a' + 10; - case 'A'..='F': x = c - 'A' + 10; - case: return -1; + case '0'..='9': x = c - '0' + case 'a'..='f': x = c - 'a' + 10 + case 'A'..='F': x = c - 'A' + 10 + case: return -1 } - r = r*16 + x; + r = r*16 + x } - return r; + return r } if token.kind != .String { - return ""; + return "" } - s := token.text; + s := token.text if len(s) <= 2 { - return ""; + return "" } - quote := s[0]; + quote := s[0] if s[0] != s[len(s)-1] { // Invalid string - return ""; + return "" } - s = s[1:len(s)-1]; + s = s[1:len(s)-1] - i := 0; + i := 0 for i < len(s) { - c := s[i]; + c := s[i] if c == '\\' || c == quote || c < ' ' { - break; + break } if c < utf8.RUNE_SELF { - i += 1; - continue; + i += 1 + continue } - r, w := utf8.decode_rune_in_string(s); + r, w := utf8.decode_rune_in_string(s) if r == utf8.RUNE_ERROR && w == 1 { - break; + break } - i += w; + i += w } if i == len(s) { - return clone_string(s, allocator); + return clone_string(s, allocator) } - b := make([]byte, len(s) + 2*utf8.UTF_MAX, allocator); - w := copy(b, s[0:i]); + b := make([]byte, len(s) + 2*utf8.UTF_MAX, allocator) + w := copy(b, s[0:i]) loop: for i < len(s) { - c := s[i]; + c := s[i] switch { case c == '\\': - i += 1; + i += 1 if i >= len(s) { - break loop; + break loop } switch s[i] { - case: break loop; + case: break loop case '"', '\'', '\\', '/': - b[w] = s[i]; - i += 1; - w += 1; + b[w] = s[i] + i += 1 + w += 1 case 'b': - b[w] = '\b'; - i += 1; - w += 1; + b[w] = '\b' + i += 1 + w += 1 case 'f': - b[w] = '\f'; - i += 1; - w += 1; + b[w] = '\f' + i += 1 + w += 1 case 'r': - b[w] = '\r'; - i += 1; - w += 1; + b[w] = '\r' + i += 1 + w += 1 case 't': - b[w] = '\t'; - i += 1; - w += 1; + b[w] = '\t' + i += 1 + w += 1 case 'n': - b[w] = '\n'; - i += 1; - w += 1; + b[w] = '\n' + i += 1 + w += 1 case 'u': - i -= 1; // Include the \u in the check for sanity sake - r := get_u4_rune(s[i:]); + i -= 1 // Include the \u in the check for sanity sake + r := get_u4_rune(s[i:]) if r < 0 { - break loop; + break loop } - i += 6; + i += 6 - buf, buf_width := utf8.encode_rune(r); - copy(b[w:], buf[:buf_width]); - w += buf_width; + buf, buf_width := utf8.encode_rune(r) + copy(b[w:], buf[:buf_width]) + w += buf_width case '0': if spec == Specification.JSON5 { - b[w] = '\x00'; - i += 1; - w += 1; + b[w] = '\x00' + i += 1 + w += 1 } else { - break loop; + break loop } case 'v': if spec == Specification.JSON5 { - b[w] = '\v'; - i += 1; - w += 1; + b[w] = '\v' + i += 1 + w += 1 } else { - break loop; + break loop } case 'x': if spec == Specification.JSON5 { - i -= 1; // Include the \x in the check for sanity sake - r := get_u2_rune(s[i:]); + i -= 1 // Include the \x in the check for sanity sake + r := get_u2_rune(s[i:]) if r < 0 { - break loop; + break loop } - i += 4; + i += 4 - buf, buf_width := utf8.encode_rune(r); - copy(b[w:], buf[:buf_width]); - w += buf_width; + buf, buf_width := utf8.encode_rune(r) + copy(b[w:], buf[:buf_width]) + w += buf_width } else { - break loop; + break loop } } case c == quote, c < ' ': - break loop; + break loop case c < utf8.RUNE_SELF: - b[w] = c; - i += 1; - w += 1; + b[w] = c + i += 1 + w += 1 case: - r, width := utf8.decode_rune_in_string(s[i:]); - i += width; + r, width := utf8.decode_rune_in_string(s[i:]) + i += width - buf, buf_width := utf8.encode_rune(r); - assert(buf_width <= width); - copy(b[w:], buf[:buf_width]); - w += buf_width; + buf, buf_width := utf8.encode_rune(r) + assert(buf_width <= width) + copy(b[w:], buf[:buf_width]) + w += buf_width } } - return string(b[:w]); + return string(b[:w]) } diff --git a/core/encoding/json/tokenizer.odin b/core/encoding/json/tokenizer.odin index 7bd2c5283..69ae81bd1 100644 --- a/core/encoding/json/tokenizer.odin +++ b/core/encoding/json/tokenizer.odin @@ -54,22 +54,22 @@ Tokenizer :: struct { make_tokenizer :: proc(data: []byte, spec := Specification.JSON, parse_integers := false) -> Tokenizer { - t := Tokenizer{pos = {line=1}, data = data, spec = spec, parse_integers = parse_integers}; - next_rune(&t); + t := Tokenizer{pos = {line=1}, data = data, spec = spec, parse_integers = parse_integers} + next_rune(&t) if t.r == utf8.RUNE_BOM { - next_rune(&t); + next_rune(&t) } - return t; + return t } next_rune :: proc(t: ^Tokenizer) -> rune #no_bounds_check { if t.offset >= len(t.data) { - return utf8.RUNE_EOF; + return utf8.RUNE_EOF } - t.offset += t.w; - t.r, t.w = utf8.decode_rune(t.data[t.offset:]); - t.pos.column = t.offset - t.curr_line_offset; - return t.r; + t.offset += t.w + t.r, t.w = utf8.decode_rune(t.data[t.offset:]) + t.pos.column = t.offset - t.curr_line_offset + return t.r } @@ -79,19 +79,19 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) { if '0' <= t.r && t.r <= '9' { // Okay } else { - return; + return } - next_rune(t); + next_rune(t) } } skip_hex_digits :: proc(t: ^Tokenizer) { for t.offset < len(t.data) { - next_rune(t); + next_rune(t) switch t.r { case '0'..='9', 'a'..='f', 'A'..='F': // Okay case: - return; + return } } } @@ -99,56 +99,56 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) { scan_espace :: proc(t: ^Tokenizer) -> bool { switch t.r { case '"', '\'', '\\', '/', 'b', 'n', 'r', 't', 'f': - next_rune(t); - return true; + next_rune(t) + return true case 'u': // Expect 4 hexadecimal digits for i := 0; i < 4; i += 1 { - r := next_rune(t); + r := next_rune(t) switch r { case '0'..='9', 'a'..='f', 'A'..='F': // Okay case: - return false; + return false } } - return true; + return true case: // Ignore the next rune regardless - next_rune(t); + next_rune(t) } - return false; + return false } skip_whitespace :: proc(t: ^Tokenizer) -> rune { loop: for t.offset < len(t.data) { switch t.r { case ' ', '\t', '\v', '\f', '\r': - next_rune(t); + next_rune(t) case '\n': - t.line += 1; - t.curr_line_offset = t.offset; - t.pos.column = 1; - next_rune(t); + t.line += 1 + t.curr_line_offset = t.offset + t.pos.column = 1 + next_rune(t) case: if t.spec == .JSON5 { switch t.r { case 0x2028, 0x2029, 0xFEFF: - next_rune(t); - continue loop; + next_rune(t) + continue loop } } - break loop; + break loop } } - return t.r; + return t.r } skip_to_next_line :: proc(t: ^Tokenizer) { for t.offset < len(t.data) { - r := next_rune(t); + r := next_rune(t) if r == '\n' { - return; + return } } } @@ -157,53 +157,53 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) { for t.offset < len(t.data) { switch next_rune(t) { case 'A'..='Z', 'a'..='z', '0'..='9', '_': - continue; + continue } - return; + return } } - skip_whitespace(t); + skip_whitespace(t) - token.pos = t.pos; + token.pos = t.pos - token.kind = .Invalid; + token.kind = .Invalid - curr_rune := t.r; - next_rune(t); + curr_rune := t.r + next_rune(t) block: switch curr_rune { case utf8.RUNE_ERROR: - err = .Illegal_Character; + err = .Illegal_Character case utf8.RUNE_EOF, '\x00': - token.kind = .EOF; - err = .EOF; + token.kind = .EOF + err = .EOF case 'A'..='Z', 'a'..='z', '_': - token.kind = .Ident; + token.kind = .Ident - skip_alphanum(t); + skip_alphanum(t) switch str := string(t.data[token.offset:t.offset]); str { - case "null": token.kind = .Null; - case "false": token.kind = .False; - case "true": token.kind = .True; + case "null": token.kind = .Null + case "false": token.kind = .False + case "true": token.kind = .True case: if t.spec == .JSON5 { switch str { - case "Infinity": token.kind = .Infinity; - case "NaN": token.kind = .NaN; + case "Infinity": token.kind = .Infinity + case "NaN": token.kind = .NaN } } } case '+': - err = .Illegal_Character; + err = .Illegal_Character if t.spec != .JSON5 { - break; + break } - fallthrough; + fallthrough case '-': switch t.r { @@ -211,281 +211,281 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) { // Okay case: // Illegal use of +/- - err = .Illegal_Character; + err = .Illegal_Character if t.spec == .JSON5 { if t.r == 'I' || t.r == 'N' { - skip_alphanum(t); + skip_alphanum(t) } switch string(t.data[token.offset:t.offset]) { - case "-Infinity": token.kind = .Infinity; - case "-NaN": token.kind = .NaN; + case "-Infinity": token.kind = .Infinity + case "-NaN": token.kind = .NaN } } - break block; + break block } - fallthrough; + fallthrough case '0'..='9': - token.kind = t.parse_integers ? .Integer : .Float; + token.kind = t.parse_integers ? .Integer : .Float if t.spec == .JSON5 { // Hexadecimal Numbers if curr_rune == '0' && (t.r == 'x' || t.r == 'X') { - next_rune(t); - skip_hex_digits(t); - break; + next_rune(t) + skip_hex_digits(t) + break } } - skip_digits(t); + skip_digits(t) if t.r == '.' { - token.kind = .Float; - next_rune(t); - skip_digits(t); + token.kind = .Float + next_rune(t) + skip_digits(t) } if t.r == 'e' || t.r == 'E' { switch r := next_rune(t); r { case '+', '-': - next_rune(t); + next_rune(t) } - skip_digits(t); + skip_digits(t) } - str := string(t.data[token.offset:t.offset]); + str := string(t.data[token.offset:t.offset]) if !is_valid_number(str, t.spec) { - err = .Invalid_Number; + err = .Invalid_Number } case '.': - err = .Illegal_Character; + err = .Illegal_Character if t.spec == .JSON5 { // Allow leading decimal point - skip_digits(t); + skip_digits(t) if t.r == 'e' || t.r == 'E' { switch r := next_rune(t); r { case '+', '-': - next_rune(t); + next_rune(t) } - skip_digits(t); + skip_digits(t) } - str := string(t.data[token.offset:t.offset]); + str := string(t.data[token.offset:t.offset]) if !is_valid_number(str, t.spec) { - err = .Invalid_Number; + err = .Invalid_Number } } case '\'': - err = .Illegal_Character; + err = .Illegal_Character if t.spec != .JSON5 { - break; + break } - fallthrough; + fallthrough case '"': - token.kind = .String; - quote := curr_rune; + token.kind = .String + quote := curr_rune for t.offset < len(t.data) { - r := t.r; + r := t.r if r == '\n' || r < 0 { - err = .String_Not_Terminated; - break; + err = .String_Not_Terminated + break } - next_rune(t); + next_rune(t) if r == quote { - break; + break } if r == '\\' { - scan_espace(t); + scan_espace(t) } } - str := string(t.data[token.offset : t.offset]); + str := string(t.data[token.offset : t.offset]) if !is_valid_string_literal(str, t.spec) { - err = .Invalid_String; + err = .Invalid_String } - case ',': token.kind = .Comma; - case ':': token.kind = .Colon; - case '{': token.kind = .Open_Brace; - case '}': token.kind = .Close_Brace; - case '[': token.kind = .Open_Bracket; - case ']': token.kind = .Close_Bracket; + case ',': token.kind = .Comma + case ':': token.kind = .Colon + case '{': token.kind = .Open_Brace + case '}': token.kind = .Close_Brace + case '[': token.kind = .Open_Bracket + case ']': token.kind = .Close_Bracket case '/': - err = .Illegal_Character; + err = .Illegal_Character if t.spec == .JSON5 { switch t.r { case '/': // Single-line comments - skip_to_next_line(t); - return get_token(t); + skip_to_next_line(t) + return get_token(t) case '*': // None-nested multi-line comments for t.offset < len(t.data) { - next_rune(t); + next_rune(t) if t.r == '*' { - next_rune(t); + next_rune(t) if t.r == '/' { - next_rune(t); - return get_token(t); + next_rune(t) + return get_token(t) } } } - err = .EOF; + err = .EOF } } - case: err = .Illegal_Character; + case: err = .Illegal_Character } - token.text = string(t.data[token.offset : t.offset]); + token.text = string(t.data[token.offset : t.offset]) - return; + return } is_valid_number :: proc(str: string, spec: Specification) -> bool { - s := str; + s := str if s == "" { - return false; + return false } if s[0] == '-' { - s = s[1:]; + s = s[1:] if s == "" { - return false; + return false } } else if spec == .JSON5 { if s[0] == '+' { // Allow positive sign - s = s[1:]; + s = s[1:] if s == "" { - return false; + return false } } } switch s[0] { case '0': - s = s[1:]; + s = s[1:] case '1'..='9': - s = s[1:]; + s = s[1:] for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:]; + s = s[1:] } case '.': if spec == .JSON5 { // Allow leading decimal point - s = s[1:]; + s = s[1:] } else { - return false; + return false } case: - return false; + return false } if spec == .JSON5 { if len(s) == 1 && s[0] == '.' { // Allow trailing decimal point - return true; + return true } } if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { - s = s[2:]; + s = s[2:] for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:]; + s = s[1:] } } if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { - s = s[1:]; + s = s[1:] switch s[0] { case '+', '-': - s = s[1:]; + s = s[1:] if s == "" { - return false; + return false } } for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:]; + s = s[1:] } } // The string should be empty now to be valid - return s == ""; + return s == "" } is_valid_string_literal :: proc(str: string, spec: Specification) -> bool { - s := str; + s := str if len(s) < 2 { - return false; + return false } - quote := s[0]; + quote := s[0] if s[0] != s[len(s)-1] { - return false; + return false } if s[0] != '"' || s[len(s)-1] != '"' { if spec == .JSON5 { if s[0] != '\'' || s[len(s)-1] != '\'' { - return false; + return false } } else { - return false; + return false } } - s = s[1 : len(s)-1]; + s = s[1 : len(s)-1] - i := 0; + i := 0 for i < len(s) { - c := s[i]; + c := s[i] switch { case c == '\\': - i += 1; + i += 1 if i >= len(s) { - return false; + return false } switch s[i] { case '"', '\'', '\\', '/', 'b', 'n', 'r', 't', 'f': - i += 1; + i += 1 case 'u': if i >= len(s) { - return false; + return false } - hex := s[i+1:]; + hex := s[i+1:] if len(hex) < 4 { - return false; + return false } - hex = hex[:4]; - i += 5; + hex = hex[:4] + i += 5 for j := 0; j < 4; j += 1 { - c2 := hex[j]; + c2 := hex[j] switch c2 { case '0'..='9', 'a'..='z', 'A'..='Z': // Okay case: - return false; + return false } } - case: return false; + case: return false } case c == quote, c < ' ': - return false; + return false case c < utf8.RUNE_SELF: - i += 1; + i += 1 case: - r, width := utf8.decode_rune_in_string(s[i:]); + r, width := utf8.decode_rune_in_string(s[i:]) if r == utf8.RUNE_ERROR && width == 1 { - return false; + return false } - i += width; + i += width } } if i == len(s) { - return true; + return true } - return true; + return true } diff --git a/core/encoding/json/types.odin b/core/encoding/json/types.odin index 10b88d87d..27bbae432 100644 --- a/core/encoding/json/types.odin +++ b/core/encoding/json/types.odin @@ -6,13 +6,13 @@ Specification :: enum { // MJSON, // http://bitsquid.blogspot.com/2009/09/json-configuration-data.html } -Null :: distinct rawptr; -Integer :: i64; -Float :: f64; -Boolean :: bool; -String :: string; -Array :: distinct [dynamic]Value; -Object :: distinct map[string]Value; +Null :: distinct rawptr +Integer :: i64 +Float :: f64 +Boolean :: bool +String :: string +Array :: distinct [dynamic]Value +Object :: distinct map[string]Value Value :: union { Null, @@ -50,17 +50,17 @@ destroy_value :: proc(value: Value) { #partial switch v in value { case Object: for key, elem in v { - delete(key); - destroy_value(elem); + delete(key) + destroy_value(elem) } - delete(v); + delete(v) case Array: for elem in v { - destroy_value(elem); + destroy_value(elem) } - delete(v); + delete(v) case String: - delete(v); + delete(v) } } diff --git a/core/encoding/json/validator.odin b/core/encoding/json/validator.odin index 1d2f7a3ed..3f180c722 100644 --- a/core/encoding/json/validator.odin +++ b/core/encoding/json/validator.odin @@ -4,119 +4,119 @@ import "core:mem" // NOTE(bill): is_valid will not check for duplicate keys is_valid :: proc(data: []byte, spec := Specification.JSON, parse_integers := false) -> bool { - p := make_parser(data, spec, parse_integers, mem.nil_allocator()); + p := make_parser(data, spec, parse_integers, mem.nil_allocator()) if p.spec == Specification.JSON5 { - return validate_value(&p); + return validate_value(&p) } - return validate_object(&p); + return validate_object(&p) } validate_object_key :: proc(p: ^Parser) -> bool { - tok := p.curr_token; + tok := p.curr_token if p.spec == Specification.JSON5 { if tok.kind == .String { - expect_token(p, .String); - return true; + expect_token(p, .String) + return true } else if tok.kind == .Ident { - expect_token(p, .Ident); - return true; + expect_token(p, .Ident) + return true } } - err := expect_token(p, .String); - return err == Error.None; + err := expect_token(p, .String) + return err == Error.None } validate_object :: proc(p: ^Parser) -> bool { if err := expect_token(p, .Open_Brace); err != Error.None { - return false; + return false } for p.curr_token.kind != .Close_Brace { if !validate_object_key(p) { - return false; + return false } if colon_err := expect_token(p, .Colon); colon_err != Error.None { - return false; + return false } if !validate_value(p) { - return false; + return false } if p.spec == Specification.JSON5 { // Allow trailing commas if allow_token(p, .Comma) { - continue; + continue } } else { // Disallow trailing commas if allow_token(p, .Comma) { - continue; + continue } else { - break; + break } } } if err := expect_token(p, .Close_Brace); err != Error.None { - return false; + return false } - return true; + return true } validate_array :: proc(p: ^Parser) -> bool { if err := expect_token(p, .Open_Bracket); err != Error.None { - return false; + return false } for p.curr_token.kind != .Close_Bracket { if !validate_value(p) { - return false; + return false } // Disallow trailing commas for the time being if allow_token(p, .Comma) { - continue; + continue } else { - break; + break } } if err := expect_token(p, .Close_Bracket); err != Error.None { - return false; + return false } - return true; + return true } validate_value :: proc(p: ^Parser) -> bool { - token := p.curr_token; + token := p.curr_token #partial switch token.kind { case .Null, .False, .True: - advance_token(p); - return true; + advance_token(p) + return true case .Integer, .Float: - advance_token(p); - return true; + advance_token(p) + return true case .String: - advance_token(p); - return is_valid_string_literal(token.text, p.spec); + advance_token(p) + return is_valid_string_literal(token.text, p.spec) case .Open_Brace: - return validate_object(p); + return validate_object(p) case .Open_Bracket: - return validate_array(p); + return validate_array(p) case: if p.spec == Specification.JSON5 { #partial switch token.kind { case .Infinity, .NaN: - advance_token(p); - return true; + advance_token(p) + return true } } } - return false; + return false } |