aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeroen van Rijn <Kelimion@users.noreply.github.com>2021-08-08 02:19:32 +0200
committerJeroen van Rijn <Kelimion@users.noreply.github.com>2021-08-08 02:19:32 +0200
commitebd034fff9fc6bf787311c0ef58bcd5a877c0741 (patch)
treeea97836f0908bbde16cf24dd54536b50e001428d
parent000bda841946c28bac9a94dd73651a4a1e1062f3 (diff)
CEL: Fix ISE.
-rw-r--r--core/encoding/cel/token.odin104
1 files changed, 52 insertions, 52 deletions
diff --git a/core/encoding/cel/token.odin b/core/encoding/cel/token.odin
index 4d6b9473c..47649b913 100644
--- a/core/encoding/cel/token.odin
+++ b/core/encoding/cel/token.odin
@@ -3,7 +3,7 @@ package cel
import "core:fmt"
import "core:unicode/utf8"
-using Kind :: enum {
+Kind :: enum {
Illegal,
EOF,
Comment,
@@ -95,11 +95,11 @@ Tokenizer :: struct {
keywords := map[string]Kind{
- "true" = True,
- "false" = False,
- "nil" = Nil,
- "and" = And,
- "or" = Or,
+ "true" = .True,
+ "false" = .False,
+ "nil" = .Nil,
+ "and" = .And,
+ "or" = .Or,
};
kind_to_string := [len(Kind)]string{
@@ -138,17 +138,17 @@ kind_to_string := [len(Kind)]string{
precedence :: proc(op: Kind) -> int {
#partial switch op {
- case Question:
+ case .Question:
return 1;
- case Or:
+ case .Or:
return 2;
- case And:
+ case .And:
return 3;
- case Eq, NotEq, Lt, Gt, LtEq, GtEq:
+ case .Eq, .NotEq, .Lt, .Gt, .LtEq, .GtEq:
return 4;
- case Add, Sub:
+ case .Add, .Sub:
return 5;
- case Mul, Quo, Rem:
+ case .Mul, .Quo, .Rem:
return 6;
}
return 0;
@@ -159,12 +159,12 @@ token_lookup :: proc(ident: string) -> Kind {
if tok, is_keyword := keywords[ident]; is_keyword {
return tok;
}
- return Ident;
+ return .Ident;
}
-is_literal :: proc(tok: Kind) -> bool { return _literal_start < tok && tok < _literal_end; }
-is_operator :: proc(tok: Kind) -> bool { return _operator_start < tok && tok < _operator_end; }
-is_keyword :: proc(tok: Kind) -> bool { return _keyword_start < tok && tok < _keyword_end; }
+is_literal :: proc(tok: Kind) -> bool { return ._literal_start < tok && tok < ._literal_end; }
+is_operator :: proc(tok: Kind) -> bool { return ._operator_start < tok && tok < ._operator_end; }
+is_keyword :: proc(tok: Kind) -> bool { return ._keyword_start < tok && tok < ._keyword_end; }
tokenizer_init :: proc(t: ^Tokenizer, src: []byte, file := "") {
@@ -289,7 +289,7 @@ scan_number :: proc(t: ^Tokenizer, seen_decimal_point: bool) -> (Kind, string) {
scan_exponent :: proc(t: ^Tokenizer, tok: Kind, offset: int) -> (kind: Kind, text: string) {
kind = tok;
if t.curr_rune == 'e' || t.curr_rune == 'E' {
- kind = Float;
+ kind = .Float;
advance_to_next_rune(t);
if t.curr_rune == '-' || t.curr_rune == '+' {
advance_to_next_rune(t);
@@ -306,7 +306,7 @@ scan_number :: proc(t: ^Tokenizer, seen_decimal_point: bool) -> (Kind, string) {
scan_fraction :: proc(t: ^Tokenizer, tok: Kind, offset: int) -> (kind: Kind, text: string) {
kind = tok;
if t.curr_rune == '.' {
- kind = Float;
+ kind = .Float;
advance_to_next_rune(t);
scan_mantissa(t, 10);
}
@@ -315,11 +315,11 @@ scan_number :: proc(t: ^Tokenizer, seen_decimal_point: bool) -> (Kind, string) {
}
offset := t.offset;
- tok := Integer;
+ tok := Kind.Integer;
if seen_decimal_point {
offset -= 1;
- tok = Float;
+ tok = .Float;
scan_mantissa(t, 10);
return scan_exponent(t, tok, offset);
}
@@ -378,7 +378,7 @@ scan :: proc(t: ^Tokenizer) -> Token {
case is_letter(r):
insert_semi = true;
lit = scan_identifier(t);
- tok = Ident;
+ tok = .Ident;
if len(lit) > 1 {
tok = token_lookup(lit);
}
@@ -393,18 +393,18 @@ scan :: proc(t: ^Tokenizer) -> Token {
case -1:
if t.insert_semi {
t.insert_semi = false;
- return Token{Semicolon, pos, "\n"};
+ return Token{.Semicolon, pos, "\n"};
}
- return Token{EOF, pos, "\n"};
+ return Token{.EOF, pos, "\n"};
case '\n':
t.insert_semi = false;
- return Token{Semicolon, pos, "\n"};
+ return Token{.Semicolon, pos, "\n"};
case '"':
insert_semi = true;
quote := r;
- tok = String;
+ tok = .String;
for {
this_r := t.curr_rune;
if this_r == '\n' || r < 0 {
@@ -430,70 +430,70 @@ scan :: proc(t: ^Tokenizer) -> Token {
}
if t.insert_semi {
t.insert_semi = false;
- return Token{Semicolon, pos, "\n"};
+ return Token{.Semicolon, pos, "\n"};
}
// Recursive!
return scan(t);
- case '?': tok = Question;
- case ':': tok = Colon;
- case '@': tok = At;
+ case '?': tok = .Question;
+ case ':': tok = .Colon;
+ case '@': tok = .At;
case ';':
- tok = Semicolon;
+ tok = .Semicolon;
lit = ";";
- case ',': tok = Comma;
+ case ',': tok = .Comma;
case '(':
- tok = Open_Paren;
+ tok = .Open_Paren;
case ')':
insert_semi = true;
- tok = Close_Paren;
+ tok = .Close_Paren;
case '[':
- tok = Open_Bracket;
+ tok = .Open_Bracket;
case ']':
insert_semi = true;
- tok = Close_Bracket;
+ tok = .Close_Bracket;
case '{':
- tok = Open_Brace;
+ tok = .Open_Brace;
case '}':
insert_semi = true;
- tok = Close_Brace;
+ tok = .Close_Brace;
- case '+': tok = Add;
- case '-': tok = Sub;
- case '*': tok = Mul;
- case '/': tok = Quo;
- case '%': tok = Rem;
+ case '+': tok = .Add;
+ case '-': tok = .Sub;
+ case '*': tok = .Mul;
+ case '/': tok = .Quo;
+ case '%': tok = .Rem;
case '!':
- tok = Not;
+ tok = .Not;
if t.curr_rune == '=' {
advance_to_next_rune(t);
- tok = NotEq;
+ tok = .NotEq;
}
case '=':
- tok = Assign;
+ tok = .Assign;
if t.curr_rune == '=' {
advance_to_next_rune(t);
- tok = Eq;
+ tok = .Eq;
}
case '<':
- tok = Lt;
+ tok = .Lt;
if t.curr_rune == '=' {
advance_to_next_rune(t);
- tok = LtEq;
+ tok = .LtEq;
}
case '>':
- tok = Gt;
+ tok = .Gt;
if t.curr_rune == '=' {
advance_to_next_rune(t);
- tok = GtEq;
+ tok = .GtEq;
}
case '.':
@@ -501,7 +501,7 @@ scan :: proc(t: ^Tokenizer) -> Token {
insert_semi = true;
tok, lit = scan_number(t, true);
} else {
- tok = Period;
+ tok = .Period;
}
case:
@@ -509,7 +509,7 @@ scan :: proc(t: ^Tokenizer) -> Token {
token_error(t, "Illegal character '%r'", r);
}
insert_semi = t.insert_semi;
- tok = Illegal;
+ tok = .Illegal;
}
}