diff options
| author | Ginger Bill <bill@gingerbill.org> | 2017-02-10 16:12:14 +0000 |
|---|---|---|
| committer | Ginger Bill <bill@gingerbill.org> | 2017-02-10 16:12:14 +0000 |
| commit | 73d6a55f5c96459d30eca5747d1458bcf6e9fec4 (patch) | |
| tree | 6351ed11cfbcf08c9a52498bbf8662083886fe89 /src | |
| parent | f18ae89931526df578956e63dfab288920b59873 (diff) | |
Remove need for `type` keyword
Diffstat (limited to 'src')
| -rw-r--r-- | src/check_expr.c | 9 | ||||
| -rw-r--r-- | src/check_stmt.c | 30 | ||||
| -rw-r--r-- | src/ir.c | 10 | ||||
| -rw-r--r-- | src/parser.c | 87 | ||||
| -rw-r--r-- | src/tokenizer.c | 205 |
5 files changed, 158 insertions, 183 deletions
diff --git a/src/check_expr.c b/src/check_expr.c index 3a4f57ea8..d3bc5895c 100644 --- a/src/check_expr.c +++ b/src/check_expr.c @@ -4766,11 +4766,12 @@ ExprKind check__expr_base(Checker *c, Operand *o, AstNode *node, Type *type_hint o->expr = node; case_end; - case_ast_node(te, TagExpr, node); - // TODO(bill): Tag expressions - error_node(node, "Tag expressions are not supported yet"); - kind = check_expr_base(c, o, te->expr, type_hint); + String name = te->name.string; + error_node(node, "Unknown tag expression, #%.*s", LIT(name)); + if (te->expr) { + kind = check_expr_base(c, o, te->expr, type_hint); + } o->expr = node; case_end; diff --git a/src/check_stmt.c b/src/check_stmt.c index 9155947fc..9067d1037 100644 --- a/src/check_stmt.c +++ b/src/check_stmt.c @@ -942,7 +942,25 @@ void check_stmt_internal(Checker *c, AstNode *node, u32 flags) { bool is_union_ptr = false; bool is_any = false; - check_expr(c, &x, ms->tag); + if (ms->tag->kind != AstNode_AssignStmt) { + error_node(ms->tag, "Expected an `in` assignment for this type match statement"); + break; + } + + ast_node(as, AssignStmt, ms->tag); + Token as_token = ast_node_token(ms->tag); + if (as->lhs.count != 1) { + syntax_error(as_token, "Expected 1 name before `in`"); + break; + } + if (as->rhs.count != 1) { + syntax_error(as_token, "Expected 1 expression after `in`"); + break; + } + AstNode *lhs = as->lhs.e[0]; + AstNode *rhs = as->rhs.e[0]; + + check_expr(c, &x, rhs); check_assignment(c, &x, NULL, str_lit("type match expression")); if (!check_valid_type_match_type(x.type, &is_union_ptr, &is_any)) { gbString str = type_to_string(x.type); @@ -980,7 +998,9 @@ void check_stmt_internal(Checker *c, AstNode *node, u32 flags) { } } - if (ms->var->kind != AstNode_Ident) { + + if (unparen_expr(lhs)->kind != AstNode_Ident) { + error_node(rhs, "Expected an identifier, got `%.*s`", LIT(ast_node_strings[rhs->kind])); break; } @@ -1056,10 +1076,10 @@ void check_stmt_internal(Checker *c, AstNode *node, u32 flags) { tt = make_type_pointer(c->allocator, case_type); add_type_info_type(c, tt); } - Entity *tag_var = make_entity_variable(c->allocator, c->context.scope, ms->var->Ident, tt, true); + Entity *tag_var = make_entity_variable(c->allocator, c->context.scope, lhs->Ident, tt, true); tag_var->flags |= EntityFlag_Used; - add_entity(c, c->context.scope, ms->var, tag_var); - add_entity_use(c, ms->var, tag_var); + add_entity(c, c->context.scope, lhs, tag_var); + add_entity_use(c, lhs, tag_var); } check_stmt_list(c, cc->stmts, mod_flags); check_close_scope(c); @@ -5255,7 +5255,13 @@ void ir_build_stmt_internal(irProcedure *proc, AstNode *node) { ir_emit_comment(proc, str_lit("TypeMatchStmt")); gbAllocator allocator = proc->module->allocator; - irValue *parent = ir_build_expr(proc, ms->tag); + ast_node(as, AssignStmt, ms->tag); + GB_ASSERT(as->lhs.count == 1); + GB_ASSERT(as->rhs.count == 1); + AstNode *lhs = as->lhs.e[0]; + AstNode *rhs = as->rhs.e[0]; + + irValue *parent = ir_build_expr(proc, rhs); bool is_union_ptr = false; bool is_any = false; GB_ASSERT(check_valid_type_match_type(ir_type(parent), &is_union_ptr, &is_any)); @@ -5276,7 +5282,7 @@ void ir_build_stmt_internal(irProcedure *proc, AstNode *node) { ast_node(body, BlockStmt, ms->body); - String tag_var_name = ms->var->Ident.string; + String tag_var_name = lhs->Ident.string; AstNodeArray default_stmts = {0}; diff --git a/src/parser.c b/src/parser.c index 7531412ef..3179509e7 100644 --- a/src/parser.c +++ b/src/parser.c @@ -250,7 +250,6 @@ AST_NODE_KIND(_ComplexStmtBegin, "", i32) \ AST_NODE_KIND(TypeMatchStmt, "type match statement", struct { \ Token token; \ AstNode *tag; \ - AstNode *var; \ AstNode *body; \ }) \ AST_NODE_KIND(DeferStmt, "defer statement", struct { Token token; AstNode *stmt; }) \ @@ -893,11 +892,10 @@ AstNode *ast_match_stmt(AstFile *f, Token token, AstNode *init, AstNode *tag, As } -AstNode *ast_type_match_stmt(AstFile *f, Token token, AstNode *tag, AstNode *var, AstNode *body) { +AstNode *ast_type_match_stmt(AstFile *f, Token token, AstNode *tag, AstNode *body) { AstNode *result = make_ast_node(f, AstNode_TypeMatchStmt); result->TypeMatchStmt.token = token; result->TypeMatchStmt.tag = tag; - result->TypeMatchStmt.var = var; result->TypeMatchStmt.body = body; return result; } @@ -1756,6 +1754,7 @@ AstNode *parse_operand(AstFile *f, bool lhs) { } else if (str_eq(name.string, str_lit("file"))) { return ast_basic_directive(f, token, name.string); } else if (str_eq(name.string, str_lit("line"))) { return ast_basic_directive(f, token, name.string); } else if (str_eq(name.string, str_lit("procedure"))) { return ast_basic_directive(f, token, name.string); + } else if (str_eq(name.string, str_lit("type"))) { return ast_helper_type(f, token, parse_type(f)); } else { operand = ast_tag_expr(f, token, name, parse_expr(f, false)); } @@ -2180,9 +2179,7 @@ AstNode *parse_value_decl(AstFile *f, AstNodeArray lhs) { bool is_mutable = true; if (allow_token(f, Token_Colon)) { - if (!allow_token(f, Token_type)) { - type = parse_type_attempt(f); - } + type = parse_type_attempt(f); } else if (f->curr_token.kind != Token_Eq && f->curr_token.kind != Token_Semicolon) { syntax_error(f->curr_token, "Expected a type separator `:` or `=`"); @@ -2528,10 +2525,16 @@ AstNode *parse_type_or_ident(AstFile *f) { return e; } - case Token_type: { - Token token = expect_token(f, Token_type); - AstNode *type = parse_type(f); - return ast_helper_type(f, token, type); + case Token_Hash: { + Token hash_token = expect_token(f, Token_Hash); + Token name = expect_token(f, Token_Ident); + String tag = name.string; + if (str_eq(tag, str_lit("type"))) { + AstNode *type = parse_type(f); + return ast_helper_type(f, hash_token, type); + } + syntax_error(name, "Expected `type` after #"); + return ast_bad_expr(f, hash_token, f->curr_token); } case Token_Pointer: { @@ -2941,8 +2944,7 @@ AstNode *parse_for_stmt(AstFile *f) { f->expr_level = -1; if (f->curr_token.kind != Token_Semicolon) { cond = parse_simple_stmt(f, true); - if (cond->kind == AstNode_AssignStmt && - cond->AssignStmt.op.kind == Token_in) { + if (cond->kind == AstNode_AssignStmt && cond->AssignStmt.op.kind == Token_in) { is_range = true; } } @@ -3034,6 +3036,7 @@ AstNode *parse_for_stmt(AstFile *f) { #endif } + AstNode *parse_case_clause(AstFile *f) { Token token = f->curr_token; AstNodeArray list = make_ast_node_array(f); @@ -3066,6 +3069,7 @@ AstNode *parse_type_case_clause(AstFile *f) { } + AstNode *parse_match_stmt(AstFile *f) { if (f->curr_proc == NULL) { syntax_error(f->curr_token, "You cannot use a match statement in the file scope"); @@ -3077,37 +3081,16 @@ AstNode *parse_match_stmt(AstFile *f) { AstNode *tag = NULL; AstNode *body = NULL; Token open, close; + bool is_type_match = false; - if (allow_token(f, Token_type)) { + if (f->curr_token.kind != Token_OpenBrace) { isize prev_level = f->expr_level; f->expr_level = -1; - AstNode *var = parse_ident(f); - expect_token_after(f, Token_in, "match type name"); - tag = parse_simple_stmt(f, false); - - f->expr_level = prev_level; - - open = expect_token(f, Token_OpenBrace); - AstNodeArray list = make_ast_node_array(f); - - while (f->curr_token.kind == Token_case || - f->curr_token.kind == Token_default) { - array_add(&list, parse_type_case_clause(f)); - } - - close = expect_token(f, Token_CloseBrace); - body = ast_block_stmt(f, list, open, close); - - tag = convert_stmt_to_expr(f, tag, str_lit("type match expression")); - return ast_type_match_stmt(f, token, tag, var, body); - } else { - if (f->curr_token.kind != Token_OpenBrace) { - isize prev_level = f->expr_level; - f->expr_level = -1; - if (f->curr_token.kind != Token_Semicolon) { - tag = parse_simple_stmt(f, false); - } + tag = parse_simple_stmt(f, true); + if (tag->kind == AstNode_AssignStmt && tag->AssignStmt.op.kind == Token_in) { + is_type_match = true; + } else { if (allow_token(f, Token_Semicolon)) { init = tag; tag = NULL; @@ -3115,28 +3098,33 @@ AstNode *parse_match_stmt(AstFile *f) { tag = parse_simple_stmt(f, false); } } - - f->expr_level = prev_level; } + f->expr_level = prev_level; + } + open = expect_token(f, Token_OpenBrace); + AstNodeArray list = make_ast_node_array(f); - open = expect_token(f, Token_OpenBrace); - AstNodeArray list = make_ast_node_array(f); - - while (f->curr_token.kind == Token_case || - f->curr_token.kind == Token_default) { + while (f->curr_token.kind == Token_case || + f->curr_token.kind == Token_default) { + if (is_type_match) { + array_add(&list, parse_type_case_clause(f)); + } else { array_add(&list, parse_case_clause(f)); } + } - close = expect_token(f, Token_CloseBrace); + close = expect_token(f, Token_CloseBrace); - body = ast_block_stmt(f, list, open, close); + body = ast_block_stmt(f, list, open, close); + if (!is_type_match) { tag = convert_stmt_to_expr(f, tag, str_lit("match expression")); return ast_match_stmt(f, token, init, tag, body); + } else { + return ast_type_match_stmt(f, token, tag, body); } } - AstNode *parse_defer_stmt(AstFile *f) { if (f->curr_proc == NULL) { syntax_error(f->curr_token, "You cannot use a defer statement in the file scope"); @@ -3328,6 +3316,7 @@ AstNode *parse_stmt(AstFile *f) { Token hash_token = expect_token(f, Token_Hash); Token name = expect_token(f, Token_Ident); String tag = name.string; + if (str_eq(tag, str_lit("import"))) { AstNode *cond = NULL; Token import_name = {0}; diff --git a/src/tokenizer.c b/src/tokenizer.c index 308224157..92256f021 100644 --- a/src/tokenizer.c +++ b/src/tokenizer.c @@ -12,23 +12,24 @@ TOKEN_KIND(Token__LiteralBegin, "_LiteralBegin"), \ TOKEN_KIND(Token__LiteralEnd, "_LiteralEnd"), \ \ TOKEN_KIND(Token__OperatorBegin, "_OperatorBegin"), \ - TOKEN_KIND(Token_Eq, "="), \ - TOKEN_KIND(Token_Not, "!"), \ - TOKEN_KIND(Token_Hash, "#"), \ - TOKEN_KIND(Token_At, "@"), \ - TOKEN_KIND(Token_Pointer, "^"), \ - /* TOKEN_KIND(Token_Maybe, "?"), */ \ - TOKEN_KIND(Token_Add, "+"), \ - TOKEN_KIND(Token_Sub, "-"), \ - TOKEN_KIND(Token_Mul, "*"), \ - TOKEN_KIND(Token_Quo, "/"), \ - TOKEN_KIND(Token_Mod, "%"), \ - TOKEN_KIND(Token_And, "&"), \ - TOKEN_KIND(Token_Or, "|"), \ - TOKEN_KIND(Token_Xor, "~"), \ - TOKEN_KIND(Token_AndNot, "&~"), \ - TOKEN_KIND(Token_Shl, "<<"), \ - TOKEN_KIND(Token_Shr, ">>"), \ + TOKEN_KIND(Token_Eq, "="), \ + TOKEN_KIND(Token_Not, "!"), \ + TOKEN_KIND(Token_Hash, "#"), \ + TOKEN_KIND(Token_At, "@"), \ + TOKEN_KIND(Token_Dollar, "$"), \ + TOKEN_KIND(Token_Pointer, "^"), \ + TOKEN_KIND(Token_Question, "?"), \ + TOKEN_KIND(Token_Add, "+"), \ + TOKEN_KIND(Token_Sub, "-"), \ + TOKEN_KIND(Token_Mul, "*"), \ + TOKEN_KIND(Token_Quo, "/"), \ + TOKEN_KIND(Token_Mod, "%"), \ + TOKEN_KIND(Token_And, "&"), \ + TOKEN_KIND(Token_Or, "|"), \ + TOKEN_KIND(Token_Xor, "~"), \ + TOKEN_KIND(Token_AndNot, "&~"), \ + TOKEN_KIND(Token_Shl, "<<"), \ + TOKEN_KIND(Token_Shr, ">>"), \ \ /*TOKEN_KIND(Token_as, "as"), */\ /*TOKEN_KIND(Token_transmute, "transmute"), */\ @@ -83,44 +84,44 @@ TOKEN_KIND(Token__OperatorEnd, "_OperatorEnd"), \ \ TOKEN_KIND(Token__KeywordBegin, "_KeywordBegin"), \ /* TODO(bill): Of these keywords are not used but "reserved", why not remove them? */ \ - TOKEN_KIND(Token_when, "when"), \ - TOKEN_KIND(Token_if, "if"), \ - TOKEN_KIND(Token_else, "else"), \ - TOKEN_KIND(Token_for, "for"), \ - TOKEN_KIND(Token_in, "in"), \ - TOKEN_KIND(Token_break, "break"), \ - TOKEN_KIND(Token_continue, "continue"), \ - TOKEN_KIND(Token_fallthrough, "fallthrough"), \ - TOKEN_KIND(Token_match, "match"), \ - TOKEN_KIND(Token_type, "type"), \ - TOKEN_KIND(Token_default, "default"), \ - TOKEN_KIND(Token_case, "case"), \ - TOKEN_KIND(Token_defer, "defer"), \ - TOKEN_KIND(Token_return, "return"), \ - TOKEN_KIND(Token_give, "give"), \ - TOKEN_KIND(Token_proc, "proc"), \ - TOKEN_KIND(Token_macro, "macro"), \ - TOKEN_KIND(Token_struct, "struct"), \ - TOKEN_KIND(Token_union, "union"), \ - TOKEN_KIND(Token_raw_union, "raw_union"), \ - TOKEN_KIND(Token_enum, "enum"), \ - TOKEN_KIND(Token_vector, "vector"), \ - TOKEN_KIND(Token_map, "map"), \ - TOKEN_KIND(Token_static, "static"), \ - TOKEN_KIND(Token_dynamic, "dynamic"), \ - TOKEN_KIND(Token_using, "using"), \ - TOKEN_KIND(Token_no_alias, "no_alias"), \ - /* TOKEN_KIND(Token_mutable, "mutable"), */\ + TOKEN_KIND(Token_when, "when"), \ + TOKEN_KIND(Token_if, "if"), \ + TOKEN_KIND(Token_else, "else"), \ + TOKEN_KIND(Token_for, "for"), \ + TOKEN_KIND(Token_in, "in"), \ + TOKEN_KIND(Token_break, "break"), \ + TOKEN_KIND(Token_continue, "continue"), \ + TOKEN_KIND(Token_fallthrough, "fallthrough"), \ + TOKEN_KIND(Token_match, "match"), \ + /* TOKEN_KIND(Token_type, "type"), */ \ + TOKEN_KIND(Token_default, "default"), \ + TOKEN_KIND(Token_case, "case"), \ + TOKEN_KIND(Token_defer, "defer"), \ + TOKEN_KIND(Token_return, "return"), \ + TOKEN_KIND(Token_give, "give"), \ + TOKEN_KIND(Token_proc, "proc"), \ + TOKEN_KIND(Token_macro, "macro"), \ + TOKEN_KIND(Token_struct, "struct"), \ + TOKEN_KIND(Token_union, "union"), \ + TOKEN_KIND(Token_raw_union, "raw_union"), \ + TOKEN_KIND(Token_enum, "enum"), \ + TOKEN_KIND(Token_vector, "vector"), \ + TOKEN_KIND(Token_map, "map"), \ + TOKEN_KIND(Token_static, "static"), \ + TOKEN_KIND(Token_dynamic, "dynamic"), \ + TOKEN_KIND(Token_using, "using"), \ + TOKEN_KIND(Token_no_alias, "no_alias"), \ + /* TOKEN_KIND(Token_mutable, "mutable"), */ \ /* TOKEN_KIND(Token_immutable, "immutable"), */\ - TOKEN_KIND(Token_thread_local, "thread_local"), \ - TOKEN_KIND(Token_cast, "cast"), \ - TOKEN_KIND(Token_transmute, "transmute"), \ - TOKEN_KIND(Token_down_cast, "down_cast"), \ - TOKEN_KIND(Token_union_cast, "union_cast"), \ - TOKEN_KIND(Token_context, "context"), \ - TOKEN_KIND(Token_push_context, "push_context"), \ - TOKEN_KIND(Token_push_allocator, "push_allocator"), \ - TOKEN_KIND(Token_asm, "asm"), \ + TOKEN_KIND(Token_thread_local, "thread_local"), \ + TOKEN_KIND(Token_cast, "cast"), \ + TOKEN_KIND(Token_transmute, "transmute"), \ + TOKEN_KIND(Token_down_cast, "down_cast"), \ + TOKEN_KIND(Token_union_cast, "union_cast"), \ + TOKEN_KIND(Token_context, "context"), \ + TOKEN_KIND(Token_push_context, "push_context"), \ + TOKEN_KIND(Token_push_allocator, "push_allocator"), \ + TOKEN_KIND(Token_asm, "asm"), \ TOKEN_KIND(Token__KeywordEnd, "_KeywordEnd"), \ TOKEN_KIND(Token_Count, "") @@ -480,7 +481,6 @@ gb_inline void scan_mantissa(Tokenizer *t, i32 base, bool allow_underscore) { } } - Token scan_number_to_token(Tokenizer *t, bool seen_decimal_point) { Token token = {0}; token.kind = Token_Integer; @@ -736,20 +736,10 @@ Token tokenizer_get_token(Tokenizer *t) { // NOTE(bill): All keywords are > 1 if (token.string.len > 1) { - /* if (str_eq(token.string, token_strings[Token_as])) { - token.kind = Token_as; - } else if (str_eq(token.string, token_strings[Token_transmute])) { - token.kind = Token_transmute; - } else if (str_eq(token.string, token_strings[Token_down_cast])) { - token.kind = Token_down_cast; - } else if (str_eq(token.string, token_strings[Token_union_cast])) { - token.kind = Token_union_cast; - } else */{ - for (i32 k = Token__KeywordBegin+1; k < Token__KeywordEnd; k++) { - if (str_eq(token.string, token_strings[k])) { - token.kind = cast(TokenKind)k; - break; - } + for (i32 k = Token__KeywordBegin+1; k < Token__KeywordEnd; k++) { + if (str_eq(token.string, token_strings[k])) { + token.kind = cast(TokenKind)k; + break; } } } @@ -863,57 +853,28 @@ Token tokenizer_get_token(Tokenizer *t) { } break; - case '#': - token.kind = Token_Hash; - break; - case '@': - token.kind = Token_At; - break; - case '^': - token.kind = Token_Pointer; - break; - // case '?': - // token.kind = Token_Maybe; - // break; - case ';': - token.kind = Token_Semicolon; - break; - case ',': - token.kind = Token_Comma; - break; - case ':': - token.kind = Token_Colon; - break; - case '(': - token.kind = Token_OpenParen; - break; - case ')': - token.kind = Token_CloseParen; - break; - case '[': - token.kind = Token_OpenBracket; - break; - case ']': - token.kind = Token_CloseBracket; - break; - case '{': - token.kind = Token_OpenBrace; - break; - case '}': - token.kind = Token_CloseBrace; - break; - - case '*': token.kind = token_kind_variant2(t, Token_Mul, Token_MulEq); break; - case '%': token.kind = token_kind_variant2(t, Token_Mod, Token_ModEq); break; - case '=': token.kind = token_kind_variant2(t, Token_Eq, Token_CmpEq); break; - case '~': token.kind = token_kind_variant2(t, Token_Xor, Token_XorEq); break; - case '!': token.kind = token_kind_variant2(t, Token_Not, Token_NotEq); break; - case '+': - token.kind = token_kind_variant3(t, Token_Add, Token_AddEq, '+', Token_Increment); - break; - case '-': - token.kind = token_kind_variant4(t, Token_Sub, Token_SubEq, '-', Token_Decrement, '>', Token_ArrowRight); - break; + case '#': token.kind = Token_Hash; break; + case '@': token.kind = Token_At; break; + case '$': token.kind = Token_Dollar; break; + case '?': token.kind = Token_Question; break; + case '^': token.kind = Token_Pointer; break; + case ';': token.kind = Token_Semicolon; break; + case ',': token.kind = Token_Comma; break; + case ':': token.kind = Token_Colon; break; + case '(': token.kind = Token_OpenParen; break; + case ')': token.kind = Token_CloseParen; break; + case '[': token.kind = Token_OpenBracket; break; + case ']': token.kind = Token_CloseBracket; break; + case '{': token.kind = Token_OpenBrace; break; + case '}': token.kind = Token_CloseBrace; break; + + case '*': token.kind = token_kind_variant2(t, Token_Mul, Token_MulEq); break; + case '%': token.kind = token_kind_variant2(t, Token_Mod, Token_ModEq); break; + case '=': token.kind = token_kind_variant2(t, Token_Eq, Token_CmpEq); break; + case '~': token.kind = token_kind_variant2(t, Token_Xor, Token_XorEq); break; + case '!': token.kind = token_kind_variant2(t, Token_Not, Token_NotEq); break; + case '+': token.kind = token_kind_variant3(t, Token_Add, Token_AddEq, '+', Token_Increment); break; + case '-': token.kind = token_kind_variant4(t, Token_Sub, Token_SubEq, '-', Token_Decrement, '>', Token_ArrowRight); break; case '/': { if (t->curr_rune == '/') { while (t->curr_rune != '\n' && t->curr_rune != GB_RUNE_EOF) { @@ -953,9 +914,7 @@ Token tokenizer_get_token(Tokenizer *t) { token.kind = token_kind_dub_eq(t, '<', Token_Lt, Token_LtEq, Token_Shl, Token_ShlEq); } break; - case '>': - token.kind = token_kind_dub_eq(t, '>', Token_Gt, Token_GtEq, Token_Shr, Token_ShrEq); - break; + case '>': token.kind = token_kind_dub_eq(t, '>', Token_Gt, Token_GtEq, Token_Shr, Token_ShrEq); break; case '&': token.kind = Token_And; |