diff options
| author | gingerBill <bill@gingerbill.org> | 2021-06-14 11:15:25 +0100 |
|---|---|---|
| committer | gingerBill <bill@gingerbill.org> | 2021-06-14 11:15:25 +0100 |
| commit | 86649e6b44877df3c5d0b81ed2f97aaa063a6f1d (patch) | |
| tree | 700029b1021d4702e4877dd0c0355d3443df3865 /core/encoding | |
| parent | 3ca887a60ae1e681fd441edfe17805df97b6d6a3 (diff) | |
Core library clean up: Make range expressions more consistent and replace uses of `..` with `..=`
Diffstat (limited to 'core/encoding')
| -rw-r--r-- | core/encoding/cel/cel.odin | 8 | ||||
| -rw-r--r-- | core/encoding/cel/token.odin | 10 | ||||
| -rw-r--r-- | core/encoding/json/parser.odin | 12 | ||||
| -rw-r--r-- | core/encoding/json/tokenizer.odin | 16 |
4 files changed, 23 insertions, 23 deletions
diff --git a/core/encoding/cel/cel.odin b/core/encoding/cel/cel.odin index f0cd49866..94f9281b3 100644 --- a/core/encoding/cel/cel.odin +++ b/core/encoding/cel/cel.odin @@ -201,9 +201,9 @@ next_token :: proc(p: ^Parser) -> Token { unquote_char :: proc(str: string, quote: byte) -> (r: rune, multiple_bytes: bool, tail_string: string, success: bool) { hex_to_int :: proc(c: byte) -> int { switch c { - case '0'..'9': return int(c-'0'); - case 'a'..'f': return int(c-'a')+10; - case 'A'..'F': return int(c-'A')+10; + case '0'..='9': return int(c-'0'); + case 'a'..='f': return int(c-'a')+10; + case 'A'..='F': return int(c-'A')+10; } return -1; } @@ -241,7 +241,7 @@ unquote_char :: proc(str: string, quote: byte) -> (r: rune, multiple_bytes: bool case '"': r = '"'; case '\'': r = '\''; - case '0'..'7': + case '0'..='7': v := int(c-'0'); if len(s) < 2 { return; diff --git a/core/encoding/cel/token.odin b/core/encoding/cel/token.odin index 7cd9e7c12..4d6b9473c 100644 --- a/core/encoding/cel/token.odin +++ b/core/encoding/cel/token.odin @@ -232,7 +232,7 @@ get_pos :: proc(t: ^Tokenizer) -> Pos { is_letter :: proc(r: rune) -> bool { switch r { - case 'a'..'z', 'A'..'Z', '_': + case 'a'..='z', 'A'..='Z', '_': return true; } return false; @@ -240,7 +240,7 @@ is_letter :: proc(r: rune) -> bool { is_digit :: proc(r: rune) -> bool { switch r { - case '0'..'9': + case '0'..='9': return true; } return false; @@ -273,9 +273,9 @@ scan_identifier :: proc(t: ^Tokenizer) -> string { digit_value :: proc(r: rune) -> int { switch r { - case '0'..'9': return int(r - '0'); - case 'a'..'f': return int(r - 'a' + 10); - case 'A'..'F': return int(r - 'A' + 10); + case '0'..='9': return int(r - '0'); + case 'a'..='f': return int(r - 'a' + 10); + case 'A'..='F': return int(r - 'A' + 10); } return 16; } diff --git a/core/encoding/json/parser.odin b/core/encoding/json/parser.odin index 448a0e41f..54cf9b6ef 100644 --- a/core/encoding/json/parser.odin +++ b/core/encoding/json/parser.odin @@ -290,9 +290,9 @@ unquote_string :: proc(token: Token, spec: Specification, allocator := context.a for c in s[2:4] { x: rune; switch c { - case '0'..'9': x = c - '0'; - case 'a'..'f': x = c - 'a' + 10; - case 'A'..'F': x = c - 'A' + 10; + case '0'..='9': x = c - '0'; + case 'a'..='f': x = c - 'a' + 10; + case 'A'..='F': x = c - 'A' + 10; case: return -1; } r = r*16 + x; @@ -308,9 +308,9 @@ unquote_string :: proc(token: Token, spec: Specification, allocator := context.a for c in s[2:6] { x: rune; switch c { - case '0'..'9': x = c - '0'; - case 'a'..'f': x = c - 'a' + 10; - case 'A'..'F': x = c - 'A' + 10; + case '0'..='9': x = c - '0'; + case 'a'..='f': x = c - 'a' + 10; + case 'A'..='F': x = c - 'A' + 10; case: return -1; } r = r*16 + x; diff --git a/core/encoding/json/tokenizer.odin b/core/encoding/json/tokenizer.odin index b3860d428..df12ce0b6 100644 --- a/core/encoding/json/tokenizer.odin +++ b/core/encoding/json/tokenizer.odin @@ -82,7 +82,7 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) { for t.offset < len(t.data) { next_rune(t); switch t.r { - case '0'..'9', 'a'..'f', 'A'..'F': + case '0'..='9', 'a'..='f', 'A'..='F': // Okay case: return; @@ -100,7 +100,7 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) { for i := 0; i < 4; i += 1 { r := next_rune(t); switch r { - case '0'..'9', 'a'..'f', 'A'..'F': + case '0'..='9', 'a'..='f', 'A'..='F': // Okay case: return false; @@ -149,7 +149,7 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) { skip_alphanum :: proc(t: ^Tokenizer) { for t.offset < len(t.data) { switch next_rune(t) { - case 'A'..'Z', 'a'..'z', '0'..'9', '_': + case 'A'..='Z', 'a'..='z', '0'..='9', '_': continue; } @@ -173,7 +173,7 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) { token.kind = .EOF; err = .EOF; - case 'A'..'Z', 'a'..'z', '_': + case 'A'..='Z', 'a'..='z', '_': token.kind = .Ident; skip_alphanum(t); @@ -200,7 +200,7 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) { case '-': switch t.r { - case '0'..'9': + case '0'..='9': // Okay case: // Illegal use of +/- @@ -219,7 +219,7 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) { } fallthrough; - case '0'..'9': + case '0'..='9': token.kind = t.parse_integers ? .Integer : .Float; if t.spec == .JSON5 { // Hexadecimal Numbers if curr_rune == '0' && (t.r == 'x' || t.r == 'X') { @@ -361,7 +361,7 @@ is_valid_number :: proc(str: string, spec: Specification) -> bool { switch s[0] { case '0': s = s[1:]; - case '1'..'9': + case '1'..='9': s = s[1:]; for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { s = s[1:]; @@ -453,7 +453,7 @@ is_valid_string_literal :: proc(str: string, spec: Specification) -> bool { for j := 0; j < 4; j += 1 { c2 := hex[j]; switch c2 { - case '0'..'9', 'a'..'z', 'A'..'Z': + case '0'..='9', 'a'..='z', 'A'..='Z': // Okay case: return false; |