diff options
| author | gingerBill <bill@gingerbill.org> | 2021-08-31 22:21:13 +0100 |
|---|---|---|
| committer | gingerBill <bill@gingerbill.org> | 2021-08-31 22:21:13 +0100 |
| commit | 251da264ed6e0f039931683c7b0d4b97e88c8d99 (patch) | |
| tree | c7a9a088477d2452c2cf850458c62d994a211df6 /core/encoding/json | |
| parent | b176af27427a6c39448a71a8023e4a9877f0a51c (diff) | |
Remove unneeded semicolons from the core library
Diffstat (limited to 'core/encoding/json')
| -rw-r--r-- | core/encoding/json/marshal.odin | 310 | ||||
| -rw-r--r-- | core/encoding/json/parser.odin | 406 | ||||
| -rw-r--r-- | core/encoding/json/tokenizer.odin | 304 | ||||
| -rw-r--r-- | core/encoding/json/types.odin | 26 | ||||
| -rw-r--r-- | core/encoding/json/validator.odin | 74 |
5 files changed, 560 insertions, 560 deletions
diff --git a/core/encoding/json/marshal.odin b/core/encoding/json/marshal.odin index d9a674d33..cea3c3df6 100644 --- a/core/encoding/json/marshal.odin +++ b/core/encoding/json/marshal.odin @@ -13,305 +13,305 @@ Marshal_Error :: enum { } marshal :: proc(v: any, allocator := context.allocator) -> ([]byte, Marshal_Error) { - b: strings.Builder; - strings.init_builder(&b, allocator); + b: strings.Builder + strings.init_builder(&b, allocator) - err := marshal_arg(&b, v); + err := marshal_arg(&b, v) if err != .None { - strings.destroy_builder(&b); - return nil, err; + strings.destroy_builder(&b) + return nil, err } if len(b.buf) == 0 { - strings.destroy_builder(&b); - return nil, err; + strings.destroy_builder(&b) + return nil, err } - return b.buf[:], err; + return b.buf[:], err } marshal_arg :: proc(b: ^strings.Builder, v: any) -> Marshal_Error { if v == nil { - strings.write_string(b, "null"); - return .None; + strings.write_string(b, "null") + return .None } - ti := runtime.type_info_base(type_info_of(v.id)); - a := any{v.data, ti.id}; + ti := runtime.type_info_base(type_info_of(v.id)) + a := any{v.data, ti.id} switch info in ti.variant { case runtime.Type_Info_Named: - unreachable(); + unreachable() case runtime.Type_Info_Integer: - buf: [21]byte; - u: u64; + buf: [21]byte + u: u64 switch i in a { - case i8: u = u64(i); - case i16: u = u64(i); - case i32: u = u64(i); - case i64: u = u64(i); - case int: u = u64(i); - case u8: u = u64(i); - case u16: u = u64(i); - case u32: u = u64(i); - case u64: u = u64(i); - case uint: u = u64(i); - case uintptr: u = u64(i); - - case i16le: u = u64(i); - case i32le: u = u64(i); - case i64le: u = u64(i); - case u16le: u = u64(i); - case u32le: u = u64(i); - case u64le: u = u64(i); - - case i16be: u = u64(i); - case i32be: u = u64(i); - case i64be: u = u64(i); - case u16be: u = u64(i); - case u32be: u = u64(i); - case u64be: u = u64(i); + case i8: u = u64(i) + case i16: u = u64(i) + case i32: u = u64(i) + case i64: u = u64(i) + case int: u = u64(i) + case u8: u = u64(i) + case u16: u = u64(i) + case u32: u = u64(i) + case u64: u = u64(i) + case uint: u = u64(i) + case uintptr: u = u64(i) + + case i16le: u = u64(i) + case i32le: u = u64(i) + case i64le: u = u64(i) + case u16le: u = u64(i) + case u32le: u = u64(i) + case u64le: u = u64(i) + + case i16be: u = u64(i) + case i32be: u = u64(i) + case i64be: u = u64(i) + case u16be: u = u64(i) + case u32be: u = u64(i) + case u64be: u = u64(i) } - s := strconv.append_bits(buf[:], u, 10, info.signed, 8*ti.size, "0123456789", nil); - strings.write_string(b, s); + s := strconv.append_bits(buf[:], u, 10, info.signed, 8*ti.size, "0123456789", nil) + strings.write_string(b, s) case runtime.Type_Info_Rune: - r := a.(rune); - strings.write_byte(b, '"'); - strings.write_escaped_rune(b, r, '"', true); - strings.write_byte(b, '"'); + r := a.(rune) + strings.write_byte(b, '"') + strings.write_escaped_rune(b, r, '"', true) + strings.write_byte(b, '"') case runtime.Type_Info_Float: - val: f64; + val: f64 switch f in a { - case f16: val = f64(f); - case f32: val = f64(f); - case f64: val = f64(f); + case f16: val = f64(f) + case f32: val = f64(f) + case f64: val = f64(f) } - buf: [386]byte; + buf: [386]byte - str := strconv.append_float(buf[1:], val, 'f', 2*ti.size, 8*ti.size); - s := buf[:len(str)+1]; + str := strconv.append_float(buf[1:], val, 'f', 2*ti.size, 8*ti.size) + s := buf[:len(str)+1] if s[1] == '+' || s[1] == '-' { - s = s[1:]; + s = s[1:] } else { - s[0] = '+'; + s[0] = '+' } if s[0] == '+' { - s = s[1:]; + s = s[1:] } - strings.write_string(b, string(s)); + strings.write_string(b, string(s)) case runtime.Type_Info_Complex: - return .Unsupported_Type; + return .Unsupported_Type case runtime.Type_Info_Quaternion: - return .Unsupported_Type; + return .Unsupported_Type case runtime.Type_Info_String: switch s in a { - case string: strings.write_quoted_string(b, s); - case cstring: strings.write_quoted_string(b, string(s)); + case string: strings.write_quoted_string(b, s) + case cstring: strings.write_quoted_string(b, string(s)) } case runtime.Type_Info_Boolean: - val: bool; + val: bool switch b in a { - case bool: val = bool(b); - case b8: val = bool(b); - case b16: val = bool(b); - case b32: val = bool(b); - case b64: val = bool(b); + case bool: val = bool(b) + case b8: val = bool(b) + case b16: val = bool(b) + case b32: val = bool(b) + case b64: val = bool(b) } - strings.write_string(b, val ? "true" : "false"); + strings.write_string(b, val ? "true" : "false") case runtime.Type_Info_Any: - return .Unsupported_Type; + return .Unsupported_Type case runtime.Type_Info_Type_Id: - return .Unsupported_Type; + return .Unsupported_Type case runtime.Type_Info_Pointer: - return .Unsupported_Type; + return .Unsupported_Type case runtime.Type_Info_Multi_Pointer: - return .Unsupported_Type; + return .Unsupported_Type case runtime.Type_Info_Procedure: - return .Unsupported_Type; + return .Unsupported_Type case runtime.Type_Info_Tuple: - return .Unsupported_Type; + return .Unsupported_Type case runtime.Type_Info_Enumerated_Array: - return .Unsupported_Type; + return .Unsupported_Type case runtime.Type_Info_Simd_Vector: - return .Unsupported_Type; + return .Unsupported_Type case runtime.Type_Info_Relative_Pointer: - return .Unsupported_Type; + return .Unsupported_Type case runtime.Type_Info_Relative_Slice: - return .Unsupported_Type; + return .Unsupported_Type case runtime.Type_Info_Array: - strings.write_byte(b, '['); + strings.write_byte(b, '[') for i in 0..<info.count { if i > 0 { strings.write_string(b, ", "); } - data := uintptr(v.data) + uintptr(i*info.elem_size); - marshal_arg(b, any{rawptr(data), info.elem.id}); + data := uintptr(v.data) + uintptr(i*info.elem_size) + marshal_arg(b, any{rawptr(data), info.elem.id}) } - strings.write_byte(b, ']'); + strings.write_byte(b, ']') case runtime.Type_Info_Dynamic_Array: - strings.write_byte(b, '['); - array := cast(^mem.Raw_Dynamic_Array)v.data; + strings.write_byte(b, '[') + array := cast(^mem.Raw_Dynamic_Array)v.data for i in 0..<array.len { if i > 0 { strings.write_string(b, ", "); } - data := uintptr(array.data) + uintptr(i*info.elem_size); - marshal_arg(b, any{rawptr(data), info.elem.id}); + data := uintptr(array.data) + uintptr(i*info.elem_size) + marshal_arg(b, any{rawptr(data), info.elem.id}) } - strings.write_byte(b, ']'); + strings.write_byte(b, ']') case runtime.Type_Info_Slice: - strings.write_byte(b, '['); - slice := cast(^mem.Raw_Slice)v.data; + strings.write_byte(b, '[') + slice := cast(^mem.Raw_Slice)v.data for i in 0..<slice.len { if i > 0 { strings.write_string(b, ", "); } - data := uintptr(slice.data) + uintptr(i*info.elem_size); - marshal_arg(b, any{rawptr(data), info.elem.id}); + data := uintptr(slice.data) + uintptr(i*info.elem_size) + marshal_arg(b, any{rawptr(data), info.elem.id}) } - strings.write_byte(b, ']'); + strings.write_byte(b, ']') case runtime.Type_Info_Map: - m := (^mem.Raw_Map)(v.data); + m := (^mem.Raw_Map)(v.data) - strings.write_byte(b, '{'); + strings.write_byte(b, '{') if m != nil { if info.generated_struct == nil { - return .Unsupported_Type; + return .Unsupported_Type } - entries := &m.entries; - gs := runtime.type_info_base(info.generated_struct).variant.(runtime.Type_Info_Struct); - ed := runtime.type_info_base(gs.types[1]).variant.(runtime.Type_Info_Dynamic_Array); - entry_type := ed.elem.variant.(runtime.Type_Info_Struct); - entry_size := ed.elem_size; + entries := &m.entries + gs := runtime.type_info_base(info.generated_struct).variant.(runtime.Type_Info_Struct) + ed := runtime.type_info_base(gs.types[1]).variant.(runtime.Type_Info_Dynamic_Array) + entry_type := ed.elem.variant.(runtime.Type_Info_Struct) + entry_size := ed.elem_size for i in 0..<entries.len { if i > 0 { strings.write_string(b, ", "); } - data := uintptr(entries.data) + uintptr(i*entry_size); - key := rawptr(data + entry_type.offsets[2]); - value := rawptr(data + entry_type.offsets[3]); + data := uintptr(entries.data) + uintptr(i*entry_size) + key := rawptr(data + entry_type.offsets[2]) + value := rawptr(data + entry_type.offsets[3]) - marshal_arg(b, any{key, info.key.id}); - strings.write_string(b, ": "); - marshal_arg(b, any{value, info.value.id}); + marshal_arg(b, any{key, info.key.id}) + strings.write_string(b, ": ") + marshal_arg(b, any{value, info.value.id}) } } - strings.write_byte(b, '}'); + strings.write_byte(b, '}') case runtime.Type_Info_Struct: - strings.write_byte(b, '{'); + strings.write_byte(b, '{') for name, i in info.names { if i > 0 { strings.write_string(b, ", "); } - strings.write_quoted_string(b, name); - strings.write_string(b, ": "); + strings.write_quoted_string(b, name) + strings.write_string(b, ": ") - id := info.types[i].id; - data := rawptr(uintptr(v.data) + info.offsets[i]); - marshal_arg(b, any{data, id}); + id := info.types[i].id + data := rawptr(uintptr(v.data) + info.offsets[i]) + marshal_arg(b, any{data, id}) } - strings.write_byte(b, '}'); + strings.write_byte(b, '}') case runtime.Type_Info_Union: - tag_ptr := uintptr(v.data) + info.tag_offset; - tag_any := any{rawptr(tag_ptr), info.tag_type.id}; + tag_ptr := uintptr(v.data) + info.tag_offset + tag_any := any{rawptr(tag_ptr), info.tag_type.id} - tag: i64 = -1; + tag: i64 = -1 switch i in tag_any { - case u8: tag = i64(i); - case i8: tag = i64(i); - case u16: tag = i64(i); - case i16: tag = i64(i); - case u32: tag = i64(i); - case i32: tag = i64(i); - case u64: tag = i64(i); - case i64: tag = i64(i); - case: panic("Invalid union tag type"); + case u8: tag = i64(i) + case i8: tag = i64(i) + case u16: tag = i64(i) + case i16: tag = i64(i) + case u32: tag = i64(i) + case i32: tag = i64(i) + case u64: tag = i64(i) + case i64: tag = i64(i) + case: panic("Invalid union tag type") } if v.data == nil || tag == 0 { - strings.write_string(b, "null"); + strings.write_string(b, "null") } else { - id := info.variants[tag-1].id; - marshal_arg(b, any{v.data, id}); + id := info.variants[tag-1].id + marshal_arg(b, any{v.data, id}) } case runtime.Type_Info_Enum: - return marshal_arg(b, any{v.data, info.base.id}); + return marshal_arg(b, any{v.data, info.base.id}) case runtime.Type_Info_Bit_Set: is_bit_set_different_endian_to_platform :: proc(ti: ^runtime.Type_Info) -> bool { if ti == nil { - return false; + return false } - t := runtime.type_info_base(ti); + t := runtime.type_info_base(ti) #partial switch info in t.variant { case runtime.Type_Info_Integer: switch info.endianness { - case .Platform: return false; - case .Little: return ODIN_ENDIAN != "little"; - case .Big: return ODIN_ENDIAN != "big"; + case .Platform: return false + case .Little: return ODIN_ENDIAN != "little" + case .Big: return ODIN_ENDIAN != "big" } } - return false; + return false } - bit_data: u64; - bit_size := u64(8*ti.size); + bit_data: u64 + bit_size := u64(8*ti.size) - do_byte_swap := is_bit_set_different_endian_to_platform(info.underlying); + do_byte_swap := is_bit_set_different_endian_to_platform(info.underlying) switch bit_size { - case 0: bit_data = 0; + case 0: bit_data = 0 case 8: - x := (^u8)(v.data)^; - bit_data = u64(x); + x := (^u8)(v.data)^ + bit_data = u64(x) case 16: - x := (^u16)(v.data)^; + x := (^u16)(v.data)^ if do_byte_swap { - x = bits.byte_swap(x); + x = bits.byte_swap(x) } - bit_data = u64(x); + bit_data = u64(x) case 32: - x := (^u32)(v.data)^; + x := (^u32)(v.data)^ if do_byte_swap { - x = bits.byte_swap(x); + x = bits.byte_swap(x) } - bit_data = u64(x); + bit_data = u64(x) case 64: - x := (^u64)(v.data)^; + x := (^u64)(v.data)^ if do_byte_swap { - x = bits.byte_swap(x); + x = bits.byte_swap(x) } - bit_data = u64(x); - case: panic("unknown bit_size size"); + bit_data = u64(x) + case: panic("unknown bit_size size") } - strings.write_u64(b, bit_data); + strings.write_u64(b, bit_data) - return .Unsupported_Type; + return .Unsupported_Type } - return .None; + return .None } diff --git a/core/encoding/json/parser.odin b/core/encoding/json/parser.odin index 8fafdcda4..b71d90b96 100644 --- a/core/encoding/json/parser.odin +++ b/core/encoding/json/parser.odin @@ -15,235 +15,235 @@ Parser :: struct { } make_parser :: proc(data: []byte, spec := Specification.JSON, parse_integers := false, allocator := context.allocator) -> Parser { - p: Parser; - p.tok = make_tokenizer(data, spec, parse_integers); - p.spec = spec; - p.allocator = allocator; - assert(p.allocator.procedure != nil); - advance_token(&p); - return p; + p: Parser + p.tok = make_tokenizer(data, spec, parse_integers) + p.spec = spec + p.allocator = allocator + assert(p.allocator.procedure != nil) + advance_token(&p) + return p } parse :: proc(data: []byte, spec := Specification.JSON, parse_integers := false, allocator := context.allocator) -> (Value, Error) { - context.allocator = allocator; - p := make_parser(data, spec, parse_integers, allocator); + context.allocator = allocator + p := make_parser(data, spec, parse_integers, allocator) if p.spec == Specification.JSON5 { - return parse_value(&p); + return parse_value(&p) } - return parse_object(&p); + return parse_object(&p) } token_end_pos :: proc(tok: Token) -> Pos { - end := tok.pos; - end.offset += len(tok.text); - return end; + end := tok.pos + end.offset += len(tok.text) + return end } advance_token :: proc(p: ^Parser) -> (Token, Error) { - err: Error; - p.prev_token = p.curr_token; - p.curr_token, err = get_token(&p.tok); - return p.prev_token, err; + err: Error + p.prev_token = p.curr_token + p.curr_token, err = get_token(&p.tok) + return p.prev_token, err } allow_token :: proc(p: ^Parser, kind: Token_Kind) -> bool { if p.curr_token.kind == kind { - advance_token(p); - return true; + advance_token(p) + return true } - return false; + return false } expect_token :: proc(p: ^Parser, kind: Token_Kind) -> Error { - prev := p.curr_token; - advance_token(p); + prev := p.curr_token + advance_token(p) if prev.kind == kind { - return .None; + return .None } - return .Unexpected_Token; + return .Unexpected_Token } parse_value :: proc(p: ^Parser) -> (value: Value, err: Error) { - token := p.curr_token; + token := p.curr_token #partial switch token.kind { case .Null: - value = Null{}; - advance_token(p); - return; + value = Null{} + advance_token(p) + return case .False: - value = Boolean(false); - advance_token(p); - return; + value = Boolean(false) + advance_token(p) + return case .True: - value = Boolean(true); - advance_token(p); - return; + value = Boolean(true) + advance_token(p) + return case .Integer: - i, _ := strconv.parse_i64(token.text); - value = Integer(i); - advance_token(p); - return; + i, _ := strconv.parse_i64(token.text) + value = Integer(i) + advance_token(p) + return case .Float: - f, _ := strconv.parse_f64(token.text); - value = Float(f); - advance_token(p); - return; + f, _ := strconv.parse_f64(token.text) + value = Float(f) + advance_token(p) + return case .String: - value = String(unquote_string(token, p.spec, p.allocator)); - advance_token(p); - return; + value = String(unquote_string(token, p.spec, p.allocator)) + advance_token(p) + return case .Open_Brace: - return parse_object(p); + return parse_object(p) case .Open_Bracket: - return parse_array(p); + return parse_array(p) case: if p.spec == Specification.JSON5 { #partial switch token.kind { case .Infinity: - inf: u64 = 0x7ff0000000000000; + inf: u64 = 0x7ff0000000000000 if token.text[0] == '-' { - inf = 0xfff0000000000000; + inf = 0xfff0000000000000 } - value = transmute(f64)inf; - advance_token(p); - return; + value = transmute(f64)inf + advance_token(p) + return case .NaN: - nan: u64 = 0x7ff7ffffffffffff; + nan: u64 = 0x7ff7ffffffffffff if token.text[0] == '-' { - nan = 0xfff7ffffffffffff; + nan = 0xfff7ffffffffffff } - value = transmute(f64)nan; - advance_token(p); - return; + value = transmute(f64)nan + advance_token(p) + return } } } - err = .Unexpected_Token; - advance_token(p); - return; + err = .Unexpected_Token + advance_token(p) + return } parse_array :: proc(p: ^Parser) -> (value: Value, err: Error) { - expect_token(p, .Open_Bracket) or_return; + expect_token(p, .Open_Bracket) or_return - array: Array; - array.allocator = p.allocator; + array: Array + array.allocator = p.allocator defer if err != .None { for elem in array { - destroy_value(elem); + destroy_value(elem) } - delete(array); + delete(array) } for p.curr_token.kind != .Close_Bracket { - elem := parse_value(p) or_return; - append(&array, elem); + elem := parse_value(p) or_return + append(&array, elem) // Disallow trailing commas for the time being if allow_token(p, .Comma) { - continue; + continue } else { - break; + break } } - expect_token(p, .Close_Bracket) or_return; - value = array; - return; + expect_token(p, .Close_Bracket) or_return + value = array + return } clone_string :: proc(s: string, allocator: mem.Allocator) -> string { - n := len(s); - b := make([]byte, n+1, allocator); - copy(b, s); - b[n] = 0; - return string(b[:n]); + n := len(s) + b := make([]byte, n+1, allocator) + copy(b, s) + b[n] = 0 + return string(b[:n]) } parse_object_key :: proc(p: ^Parser) -> (key: string, err: Error) { - tok := p.curr_token; + tok := p.curr_token if p.spec == Specification.JSON5 { if tok.kind == .String { - expect_token(p, .String); - key = unquote_string(tok, p.spec, p.allocator); - return; + expect_token(p, .String) + key = unquote_string(tok, p.spec, p.allocator) + return } else if tok.kind == .Ident { - expect_token(p, .Ident); - key = clone_string(tok.text, p.allocator); - return; + expect_token(p, .Ident) + key = clone_string(tok.text, p.allocator) + return } } if tok_err := expect_token(p, .String); tok_err != .None { - err = .Expected_String_For_Object_Key; - return; + err = .Expected_String_For_Object_Key + return } - key = unquote_string(tok, p.spec, p.allocator); - return; + key = unquote_string(tok, p.spec, p.allocator) + return } parse_object :: proc(p: ^Parser) -> (value: Value, err: Error) { - expect_token(p, .Open_Brace) or_return; + expect_token(p, .Open_Brace) or_return - obj: Object; - obj.allocator = p.allocator; + obj: Object + obj.allocator = p.allocator defer if err != .None { for key, elem in obj { - delete(key, p.allocator); - destroy_value(elem); + delete(key, p.allocator) + destroy_value(elem) } - delete(obj); + delete(obj) } for p.curr_token.kind != .Close_Brace { - key: string; - key, err = parse_object_key(p); + key: string + key, err = parse_object_key(p) if err != .None { - delete(key, p.allocator); - return; + delete(key, p.allocator) + return } if colon_err := expect_token(p, .Colon); colon_err != .None { - err = .Expected_Colon_After_Key; - return; + err = .Expected_Colon_After_Key + return } - elem := parse_value(p) or_return; + elem := parse_value(p) or_return if key in obj { - err = .Duplicate_Object_Key; - delete(key, p.allocator); - return; + err = .Duplicate_Object_Key + delete(key, p.allocator) + return } - obj[key] = elem; + obj[key] = elem if p.spec == Specification.JSON5 { // Allow trailing commas if allow_token(p, .Comma) { - continue; + continue } } else { // Disallow trailing commas if allow_token(p, .Comma) { - continue; + continue } else { - break; + break } } } - expect_token(p, .Close_Brace) or_return; - value = obj; - return; + expect_token(p, .Close_Brace) or_return + value = obj + return } @@ -251,177 +251,177 @@ parse_object :: proc(p: ^Parser) -> (value: Value, err: Error) { unquote_string :: proc(token: Token, spec: Specification, allocator := context.allocator) -> string { get_u2_rune :: proc(s: string) -> rune { if len(s) < 4 || s[0] != '\\' || s[1] != 'x' { - return -1; + return -1 } - r: rune; + r: rune for c in s[2:4] { - x: rune; + x: rune switch c { - case '0'..='9': x = c - '0'; - case 'a'..='f': x = c - 'a' + 10; - case 'A'..='F': x = c - 'A' + 10; - case: return -1; + case '0'..='9': x = c - '0' + case 'a'..='f': x = c - 'a' + 10 + case 'A'..='F': x = c - 'A' + 10 + case: return -1 } - r = r*16 + x; + r = r*16 + x } - return r; + return r } get_u4_rune :: proc(s: string) -> rune { if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1; + return -1 } - r: rune; + r: rune for c in s[2:6] { - x: rune; + x: rune switch c { - case '0'..='9': x = c - '0'; - case 'a'..='f': x = c - 'a' + 10; - case 'A'..='F': x = c - 'A' + 10; - case: return -1; + case '0'..='9': x = c - '0' + case 'a'..='f': x = c - 'a' + 10 + case 'A'..='F': x = c - 'A' + 10 + case: return -1 } - r = r*16 + x; + r = r*16 + x } - return r; + return r } if token.kind != .String { - return ""; + return "" } - s := token.text; + s := token.text if len(s) <= 2 { - return ""; + return "" } - quote := s[0]; + quote := s[0] if s[0] != s[len(s)-1] { // Invalid string - return ""; + return "" } - s = s[1:len(s)-1]; + s = s[1:len(s)-1] - i := 0; + i := 0 for i < len(s) { - c := s[i]; + c := s[i] if c == '\\' || c == quote || c < ' ' { - break; + break } if c < utf8.RUNE_SELF { - i += 1; - continue; + i += 1 + continue } - r, w := utf8.decode_rune_in_string(s); + r, w := utf8.decode_rune_in_string(s) if r == utf8.RUNE_ERROR && w == 1 { - break; + break } - i += w; + i += w } if i == len(s) { - return clone_string(s, allocator); + return clone_string(s, allocator) } - b := make([]byte, len(s) + 2*utf8.UTF_MAX, allocator); - w := copy(b, s[0:i]); + b := make([]byte, len(s) + 2*utf8.UTF_MAX, allocator) + w := copy(b, s[0:i]) loop: for i < len(s) { - c := s[i]; + c := s[i] switch { case c == '\\': - i += 1; + i += 1 if i >= len(s) { - break loop; + break loop } switch s[i] { - case: break loop; + case: break loop case '"', '\'', '\\', '/': - b[w] = s[i]; - i += 1; - w += 1; + b[w] = s[i] + i += 1 + w += 1 case 'b': - b[w] = '\b'; - i += 1; - w += 1; + b[w] = '\b' + i += 1 + w += 1 case 'f': - b[w] = '\f'; - i += 1; - w += 1; + b[w] = '\f' + i += 1 + w += 1 case 'r': - b[w] = '\r'; - i += 1; - w += 1; + b[w] = '\r' + i += 1 + w += 1 case 't': - b[w] = '\t'; - i += 1; - w += 1; + b[w] = '\t' + i += 1 + w += 1 case 'n': - b[w] = '\n'; - i += 1; - w += 1; + b[w] = '\n' + i += 1 + w += 1 case 'u': - i -= 1; // Include the \u in the check for sanity sake - r := get_u4_rune(s[i:]); + i -= 1 // Include the \u in the check for sanity sake + r := get_u4_rune(s[i:]) if r < 0 { - break loop; + break loop } - i += 6; + i += 6 - buf, buf_width := utf8.encode_rune(r); - copy(b[w:], buf[:buf_width]); - w += buf_width; + buf, buf_width := utf8.encode_rune(r) + copy(b[w:], buf[:buf_width]) + w += buf_width case '0': if spec == Specification.JSON5 { - b[w] = '\x00'; - i += 1; - w += 1; + b[w] = '\x00' + i += 1 + w += 1 } else { - break loop; + break loop } case 'v': if spec == Specification.JSON5 { - b[w] = '\v'; - i += 1; - w += 1; + b[w] = '\v' + i += 1 + w += 1 } else { - break loop; + break loop } case 'x': if spec == Specification.JSON5 { - i -= 1; // Include the \x in the check for sanity sake - r := get_u2_rune(s[i:]); + i -= 1 // Include the \x in the check for sanity sake + r := get_u2_rune(s[i:]) if r < 0 { - break loop; + break loop } - i += 4; + i += 4 - buf, buf_width := utf8.encode_rune(r); - copy(b[w:], buf[:buf_width]); - w += buf_width; + buf, buf_width := utf8.encode_rune(r) + copy(b[w:], buf[:buf_width]) + w += buf_width } else { - break loop; + break loop } } case c == quote, c < ' ': - break loop; + break loop case c < utf8.RUNE_SELF: - b[w] = c; - i += 1; - w += 1; + b[w] = c + i += 1 + w += 1 case: - r, width := utf8.decode_rune_in_string(s[i:]); - i += width; + r, width := utf8.decode_rune_in_string(s[i:]) + i += width - buf, buf_width := utf8.encode_rune(r); - assert(buf_width <= width); - copy(b[w:], buf[:buf_width]); - w += buf_width; + buf, buf_width := utf8.encode_rune(r) + assert(buf_width <= width) + copy(b[w:], buf[:buf_width]) + w += buf_width } } - return string(b[:w]); + return string(b[:w]) } diff --git a/core/encoding/json/tokenizer.odin b/core/encoding/json/tokenizer.odin index 7bd2c5283..69ae81bd1 100644 --- a/core/encoding/json/tokenizer.odin +++ b/core/encoding/json/tokenizer.odin @@ -54,22 +54,22 @@ Tokenizer :: struct { make_tokenizer :: proc(data: []byte, spec := Specification.JSON, parse_integers := false) -> Tokenizer { - t := Tokenizer{pos = {line=1}, data = data, spec = spec, parse_integers = parse_integers}; - next_rune(&t); + t := Tokenizer{pos = {line=1}, data = data, spec = spec, parse_integers = parse_integers} + next_rune(&t) if t.r == utf8.RUNE_BOM { - next_rune(&t); + next_rune(&t) } - return t; + return t } next_rune :: proc(t: ^Tokenizer) -> rune #no_bounds_check { if t.offset >= len(t.data) { - return utf8.RUNE_EOF; + return utf8.RUNE_EOF } - t.offset += t.w; - t.r, t.w = utf8.decode_rune(t.data[t.offset:]); - t.pos.column = t.offset - t.curr_line_offset; - return t.r; + t.offset += t.w + t.r, t.w = utf8.decode_rune(t.data[t.offset:]) + t.pos.column = t.offset - t.curr_line_offset + return t.r } @@ -79,19 +79,19 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) { if '0' <= t.r && t.r <= '9' { // Okay } else { - return; + return } - next_rune(t); + next_rune(t) } } skip_hex_digits :: proc(t: ^Tokenizer) { for t.offset < len(t.data) { - next_rune(t); + next_rune(t) switch t.r { case '0'..='9', 'a'..='f', 'A'..='F': // Okay case: - return; + return } } } @@ -99,56 +99,56 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) { scan_espace :: proc(t: ^Tokenizer) -> bool { switch t.r { case '"', '\'', '\\', '/', 'b', 'n', 'r', 't', 'f': - next_rune(t); - return true; + next_rune(t) + return true case 'u': // Expect 4 hexadecimal digits for i := 0; i < 4; i += 1 { - r := next_rune(t); + r := next_rune(t) switch r { case '0'..='9', 'a'..='f', 'A'..='F': // Okay case: - return false; + return false } } - return true; + return true case: // Ignore the next rune regardless - next_rune(t); + next_rune(t) } - return false; + return false } skip_whitespace :: proc(t: ^Tokenizer) -> rune { loop: for t.offset < len(t.data) { switch t.r { case ' ', '\t', '\v', '\f', '\r': - next_rune(t); + next_rune(t) case '\n': - t.line += 1; - t.curr_line_offset = t.offset; - t.pos.column = 1; - next_rune(t); + t.line += 1 + t.curr_line_offset = t.offset + t.pos.column = 1 + next_rune(t) case: if t.spec == .JSON5 { switch t.r { case 0x2028, 0x2029, 0xFEFF: - next_rune(t); - continue loop; + next_rune(t) + continue loop } } - break loop; + break loop } } - return t.r; + return t.r } skip_to_next_line :: proc(t: ^Tokenizer) { for t.offset < len(t.data) { - r := next_rune(t); + r := next_rune(t) if r == '\n' { - return; + return } } } @@ -157,53 +157,53 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) { for t.offset < len(t.data) { switch next_rune(t) { case 'A'..='Z', 'a'..='z', '0'..='9', '_': - continue; + continue } - return; + return } } - skip_whitespace(t); + skip_whitespace(t) - token.pos = t.pos; + token.pos = t.pos - token.kind = .Invalid; + token.kind = .Invalid - curr_rune := t.r; - next_rune(t); + curr_rune := t.r + next_rune(t) block: switch curr_rune { case utf8.RUNE_ERROR: - err = .Illegal_Character; + err = .Illegal_Character case utf8.RUNE_EOF, '\x00': - token.kind = .EOF; - err = .EOF; + token.kind = .EOF + err = .EOF case 'A'..='Z', 'a'..='z', '_': - token.kind = .Ident; + token.kind = .Ident - skip_alphanum(t); + skip_alphanum(t) switch str := string(t.data[token.offset:t.offset]); str { - case "null": token.kind = .Null; - case "false": token.kind = .False; - case "true": token.kind = .True; + case "null": token.kind = .Null + case "false": token.kind = .False + case "true": token.kind = .True case: if t.spec == .JSON5 { switch str { - case "Infinity": token.kind = .Infinity; - case "NaN": token.kind = .NaN; + case "Infinity": token.kind = .Infinity + case "NaN": token.kind = .NaN } } } case '+': - err = .Illegal_Character; + err = .Illegal_Character if t.spec != .JSON5 { - break; + break } - fallthrough; + fallthrough case '-': switch t.r { @@ -211,281 +211,281 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) { // Okay case: // Illegal use of +/- - err = .Illegal_Character; + err = .Illegal_Character if t.spec == .JSON5 { if t.r == 'I' || t.r == 'N' { - skip_alphanum(t); + skip_alphanum(t) } switch string(t.data[token.offset:t.offset]) { - case "-Infinity": token.kind = .Infinity; - case "-NaN": token.kind = .NaN; + case "-Infinity": token.kind = .Infinity + case "-NaN": token.kind = .NaN } } - break block; + break block } - fallthrough; + fallthrough case '0'..='9': - token.kind = t.parse_integers ? .Integer : .Float; + token.kind = t.parse_integers ? .Integer : .Float if t.spec == .JSON5 { // Hexadecimal Numbers if curr_rune == '0' && (t.r == 'x' || t.r == 'X') { - next_rune(t); - skip_hex_digits(t); - break; + next_rune(t) + skip_hex_digits(t) + break } } - skip_digits(t); + skip_digits(t) if t.r == '.' { - token.kind = .Float; - next_rune(t); - skip_digits(t); + token.kind = .Float + next_rune(t) + skip_digits(t) } if t.r == 'e' || t.r == 'E' { switch r := next_rune(t); r { case '+', '-': - next_rune(t); + next_rune(t) } - skip_digits(t); + skip_digits(t) } - str := string(t.data[token.offset:t.offset]); + str := string(t.data[token.offset:t.offset]) if !is_valid_number(str, t.spec) { - err = .Invalid_Number; + err = .Invalid_Number } case '.': - err = .Illegal_Character; + err = .Illegal_Character if t.spec == .JSON5 { // Allow leading decimal point - skip_digits(t); + skip_digits(t) if t.r == 'e' || t.r == 'E' { switch r := next_rune(t); r { case '+', '-': - next_rune(t); + next_rune(t) } - skip_digits(t); + skip_digits(t) } - str := string(t.data[token.offset:t.offset]); + str := string(t.data[token.offset:t.offset]) if !is_valid_number(str, t.spec) { - err = .Invalid_Number; + err = .Invalid_Number } } case '\'': - err = .Illegal_Character; + err = .Illegal_Character if t.spec != .JSON5 { - break; + break } - fallthrough; + fallthrough case '"': - token.kind = .String; - quote := curr_rune; + token.kind = .String + quote := curr_rune for t.offset < len(t.data) { - r := t.r; + r := t.r if r == '\n' || r < 0 { - err = .String_Not_Terminated; - break; + err = .String_Not_Terminated + break } - next_rune(t); + next_rune(t) if r == quote { - break; + break } if r == '\\' { - scan_espace(t); + scan_espace(t) } } - str := string(t.data[token.offset : t.offset]); + str := string(t.data[token.offset : t.offset]) if !is_valid_string_literal(str, t.spec) { - err = .Invalid_String; + err = .Invalid_String } - case ',': token.kind = .Comma; - case ':': token.kind = .Colon; - case '{': token.kind = .Open_Brace; - case '}': token.kind = .Close_Brace; - case '[': token.kind = .Open_Bracket; - case ']': token.kind = .Close_Bracket; + case ',': token.kind = .Comma + case ':': token.kind = .Colon + case '{': token.kind = .Open_Brace + case '}': token.kind = .Close_Brace + case '[': token.kind = .Open_Bracket + case ']': token.kind = .Close_Bracket case '/': - err = .Illegal_Character; + err = .Illegal_Character if t.spec == .JSON5 { switch t.r { case '/': // Single-line comments - skip_to_next_line(t); - return get_token(t); + skip_to_next_line(t) + return get_token(t) case '*': // None-nested multi-line comments for t.offset < len(t.data) { - next_rune(t); + next_rune(t) if t.r == '*' { - next_rune(t); + next_rune(t) if t.r == '/' { - next_rune(t); - return get_token(t); + next_rune(t) + return get_token(t) } } } - err = .EOF; + err = .EOF } } - case: err = .Illegal_Character; + case: err = .Illegal_Character } - token.text = string(t.data[token.offset : t.offset]); + token.text = string(t.data[token.offset : t.offset]) - return; + return } is_valid_number :: proc(str: string, spec: Specification) -> bool { - s := str; + s := str if s == "" { - return false; + return false } if s[0] == '-' { - s = s[1:]; + s = s[1:] if s == "" { - return false; + return false } } else if spec == .JSON5 { if s[0] == '+' { // Allow positive sign - s = s[1:]; + s = s[1:] if s == "" { - return false; + return false } } } switch s[0] { case '0': - s = s[1:]; + s = s[1:] case '1'..='9': - s = s[1:]; + s = s[1:] for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:]; + s = s[1:] } case '.': if spec == .JSON5 { // Allow leading decimal point - s = s[1:]; + s = s[1:] } else { - return false; + return false } case: - return false; + return false } if spec == .JSON5 { if len(s) == 1 && s[0] == '.' { // Allow trailing decimal point - return true; + return true } } if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { - s = s[2:]; + s = s[2:] for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:]; + s = s[1:] } } if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { - s = s[1:]; + s = s[1:] switch s[0] { case '+', '-': - s = s[1:]; + s = s[1:] if s == "" { - return false; + return false } } for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:]; + s = s[1:] } } // The string should be empty now to be valid - return s == ""; + return s == "" } is_valid_string_literal :: proc(str: string, spec: Specification) -> bool { - s := str; + s := str if len(s) < 2 { - return false; + return false } - quote := s[0]; + quote := s[0] if s[0] != s[len(s)-1] { - return false; + return false } if s[0] != '"' || s[len(s)-1] != '"' { if spec == .JSON5 { if s[0] != '\'' || s[len(s)-1] != '\'' { - return false; + return false } } else { - return false; + return false } } - s = s[1 : len(s)-1]; + s = s[1 : len(s)-1] - i := 0; + i := 0 for i < len(s) { - c := s[i]; + c := s[i] switch { case c == '\\': - i += 1; + i += 1 if i >= len(s) { - return false; + return false } switch s[i] { case '"', '\'', '\\', '/', 'b', 'n', 'r', 't', 'f': - i += 1; + i += 1 case 'u': if i >= len(s) { - return false; + return false } - hex := s[i+1:]; + hex := s[i+1:] if len(hex) < 4 { - return false; + return false } - hex = hex[:4]; - i += 5; + hex = hex[:4] + i += 5 for j := 0; j < 4; j += 1 { - c2 := hex[j]; + c2 := hex[j] switch c2 { case '0'..='9', 'a'..='z', 'A'..='Z': // Okay case: - return false; + return false } } - case: return false; + case: return false } case c == quote, c < ' ': - return false; + return false case c < utf8.RUNE_SELF: - i += 1; + i += 1 case: - r, width := utf8.decode_rune_in_string(s[i:]); + r, width := utf8.decode_rune_in_string(s[i:]) if r == utf8.RUNE_ERROR && width == 1 { - return false; + return false } - i += width; + i += width } } if i == len(s) { - return true; + return true } - return true; + return true } diff --git a/core/encoding/json/types.odin b/core/encoding/json/types.odin index 10b88d87d..27bbae432 100644 --- a/core/encoding/json/types.odin +++ b/core/encoding/json/types.odin @@ -6,13 +6,13 @@ Specification :: enum { // MJSON, // http://bitsquid.blogspot.com/2009/09/json-configuration-data.html } -Null :: distinct rawptr; -Integer :: i64; -Float :: f64; -Boolean :: bool; -String :: string; -Array :: distinct [dynamic]Value; -Object :: distinct map[string]Value; +Null :: distinct rawptr +Integer :: i64 +Float :: f64 +Boolean :: bool +String :: string +Array :: distinct [dynamic]Value +Object :: distinct map[string]Value Value :: union { Null, @@ -50,17 +50,17 @@ destroy_value :: proc(value: Value) { #partial switch v in value { case Object: for key, elem in v { - delete(key); - destroy_value(elem); + delete(key) + destroy_value(elem) } - delete(v); + delete(v) case Array: for elem in v { - destroy_value(elem); + destroy_value(elem) } - delete(v); + delete(v) case String: - delete(v); + delete(v) } } diff --git a/core/encoding/json/validator.odin b/core/encoding/json/validator.odin index 1d2f7a3ed..3f180c722 100644 --- a/core/encoding/json/validator.odin +++ b/core/encoding/json/validator.odin @@ -4,119 +4,119 @@ import "core:mem" // NOTE(bill): is_valid will not check for duplicate keys is_valid :: proc(data: []byte, spec := Specification.JSON, parse_integers := false) -> bool { - p := make_parser(data, spec, parse_integers, mem.nil_allocator()); + p := make_parser(data, spec, parse_integers, mem.nil_allocator()) if p.spec == Specification.JSON5 { - return validate_value(&p); + return validate_value(&p) } - return validate_object(&p); + return validate_object(&p) } validate_object_key :: proc(p: ^Parser) -> bool { - tok := p.curr_token; + tok := p.curr_token if p.spec == Specification.JSON5 { if tok.kind == .String { - expect_token(p, .String); - return true; + expect_token(p, .String) + return true } else if tok.kind == .Ident { - expect_token(p, .Ident); - return true; + expect_token(p, .Ident) + return true } } - err := expect_token(p, .String); - return err == Error.None; + err := expect_token(p, .String) + return err == Error.None } validate_object :: proc(p: ^Parser) -> bool { if err := expect_token(p, .Open_Brace); err != Error.None { - return false; + return false } for p.curr_token.kind != .Close_Brace { if !validate_object_key(p) { - return false; + return false } if colon_err := expect_token(p, .Colon); colon_err != Error.None { - return false; + return false } if !validate_value(p) { - return false; + return false } if p.spec == Specification.JSON5 { // Allow trailing commas if allow_token(p, .Comma) { - continue; + continue } } else { // Disallow trailing commas if allow_token(p, .Comma) { - continue; + continue } else { - break; + break } } } if err := expect_token(p, .Close_Brace); err != Error.None { - return false; + return false } - return true; + return true } validate_array :: proc(p: ^Parser) -> bool { if err := expect_token(p, .Open_Bracket); err != Error.None { - return false; + return false } for p.curr_token.kind != .Close_Bracket { if !validate_value(p) { - return false; + return false } // Disallow trailing commas for the time being if allow_token(p, .Comma) { - continue; + continue } else { - break; + break } } if err := expect_token(p, .Close_Bracket); err != Error.None { - return false; + return false } - return true; + return true } validate_value :: proc(p: ^Parser) -> bool { - token := p.curr_token; + token := p.curr_token #partial switch token.kind { case .Null, .False, .True: - advance_token(p); - return true; + advance_token(p) + return true case .Integer, .Float: - advance_token(p); - return true; + advance_token(p) + return true case .String: - advance_token(p); - return is_valid_string_literal(token.text, p.spec); + advance_token(p) + return is_valid_string_literal(token.text, p.spec) case .Open_Brace: - return validate_object(p); + return validate_object(p) case .Open_Bracket: - return validate_array(p); + return validate_array(p) case: if p.spec == Specification.JSON5 { #partial switch token.kind { case .Infinity, .NaN: - advance_token(p); - return true; + advance_token(p) + return true } } } - return false; + return false } |