aboutsummaryrefslogtreecommitdiff
path: root/core/encoding/json
diff options
context:
space:
mode:
authorgingerBill <bill@gingerbill.org>2021-06-14 11:15:25 +0100
committergingerBill <bill@gingerbill.org>2021-06-14 11:15:25 +0100
commit86649e6b44877df3c5d0b81ed2f97aaa063a6f1d (patch)
tree700029b1021d4702e4877dd0c0355d3443df3865 /core/encoding/json
parent3ca887a60ae1e681fd441edfe17805df97b6d6a3 (diff)
Core library clean up: Make range expressions more consistent and replace uses of `..` with `..=`
Diffstat (limited to 'core/encoding/json')
-rw-r--r--core/encoding/json/parser.odin12
-rw-r--r--core/encoding/json/tokenizer.odin16
2 files changed, 14 insertions, 14 deletions
diff --git a/core/encoding/json/parser.odin b/core/encoding/json/parser.odin
index 448a0e41f..54cf9b6ef 100644
--- a/core/encoding/json/parser.odin
+++ b/core/encoding/json/parser.odin
@@ -290,9 +290,9 @@ unquote_string :: proc(token: Token, spec: Specification, allocator := context.a
for c in s[2:4] {
x: rune;
switch c {
- case '0'..'9': x = c - '0';
- case 'a'..'f': x = c - 'a' + 10;
- case 'A'..'F': x = c - 'A' + 10;
+ case '0'..='9': x = c - '0';
+ case 'a'..='f': x = c - 'a' + 10;
+ case 'A'..='F': x = c - 'A' + 10;
case: return -1;
}
r = r*16 + x;
@@ -308,9 +308,9 @@ unquote_string :: proc(token: Token, spec: Specification, allocator := context.a
for c in s[2:6] {
x: rune;
switch c {
- case '0'..'9': x = c - '0';
- case 'a'..'f': x = c - 'a' + 10;
- case 'A'..'F': x = c - 'A' + 10;
+ case '0'..='9': x = c - '0';
+ case 'a'..='f': x = c - 'a' + 10;
+ case 'A'..='F': x = c - 'A' + 10;
case: return -1;
}
r = r*16 + x;
diff --git a/core/encoding/json/tokenizer.odin b/core/encoding/json/tokenizer.odin
index b3860d428..df12ce0b6 100644
--- a/core/encoding/json/tokenizer.odin
+++ b/core/encoding/json/tokenizer.odin
@@ -82,7 +82,7 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) {
for t.offset < len(t.data) {
next_rune(t);
switch t.r {
- case '0'..'9', 'a'..'f', 'A'..'F':
+ case '0'..='9', 'a'..='f', 'A'..='F':
// Okay
case:
return;
@@ -100,7 +100,7 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) {
for i := 0; i < 4; i += 1 {
r := next_rune(t);
switch r {
- case '0'..'9', 'a'..'f', 'A'..'F':
+ case '0'..='9', 'a'..='f', 'A'..='F':
// Okay
case:
return false;
@@ -149,7 +149,7 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) {
skip_alphanum :: proc(t: ^Tokenizer) {
for t.offset < len(t.data) {
switch next_rune(t) {
- case 'A'..'Z', 'a'..'z', '0'..'9', '_':
+ case 'A'..='Z', 'a'..='z', '0'..='9', '_':
continue;
}
@@ -173,7 +173,7 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) {
token.kind = .EOF;
err = .EOF;
- case 'A'..'Z', 'a'..'z', '_':
+ case 'A'..='Z', 'a'..='z', '_':
token.kind = .Ident;
skip_alphanum(t);
@@ -200,7 +200,7 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) {
case '-':
switch t.r {
- case '0'..'9':
+ case '0'..='9':
// Okay
case:
// Illegal use of +/-
@@ -219,7 +219,7 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) {
}
fallthrough;
- case '0'..'9':
+ case '0'..='9':
token.kind = t.parse_integers ? .Integer : .Float;
if t.spec == .JSON5 { // Hexadecimal Numbers
if curr_rune == '0' && (t.r == 'x' || t.r == 'X') {
@@ -361,7 +361,7 @@ is_valid_number :: proc(str: string, spec: Specification) -> bool {
switch s[0] {
case '0':
s = s[1:];
- case '1'..'9':
+ case '1'..='9':
s = s[1:];
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
s = s[1:];
@@ -453,7 +453,7 @@ is_valid_string_literal :: proc(str: string, spec: Specification) -> bool {
for j := 0; j < 4; j += 1 {
c2 := hex[j];
switch c2 {
- case '0'..'9', 'a'..'z', 'A'..'Z':
+ case '0'..='9', 'a'..='z', 'A'..='Z':
// Okay
case:
return false;