aboutsummaryrefslogtreecommitdiff
path: root/src/tokenizer.cpp
diff options
context:
space:
mode:
authorgingerBill <ginger.bill.22@gmail.com>2016-07-23 11:41:11 +0100
committergingerBill <ginger.bill.22@gmail.com>2016-07-23 11:41:11 +0100
commit3fe7fc344d7d17a571a01e531db4a0e5ff057c9f (patch)
treeca617e7e190fd9cf2989bdd3a12e8bff37004f26 /src/tokenizer.cpp
parentf8fd6fce0b9aabd9562ac8d0dda712154b829f26 (diff)
Compound literals and Warnings
Diffstat (limited to 'src/tokenizer.cpp')
-rw-r--r--src/tokenizer.cpp62
1 files changed, 55 insertions, 7 deletions
diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp
index 4edf8ab1d..fd3683d91 100644
--- a/src/tokenizer.cpp
+++ b/src/tokenizer.cpp
@@ -209,18 +209,64 @@ char const *TOKEN_STRINGS[] = {
};
+struct TokenPos {
+ String file;
+ isize line, column;
+};
+
+b32 token_pos_are_equal(TokenPos a, TokenPos b) {
+ if (a.line == b.line) {
+ if (a.column == b.column) {
+ return are_strings_equal(a.file, b.file);
+ }
+ }
+ return false;
+
+}
+
// NOTE(bill): Text is UTF-8, thus why u8 and not char
-typedef struct Token Token;
struct Token {
TokenKind kind;
String string;
- isize line, column;
+ TokenPos pos;
};
-
Token empty_token = {Token_Invalid};
+struct ErrorCollector {
+ TokenPos prev;
+ isize count;
+};
+
+void error(ErrorCollector *ec, Token token, char *fmt, ...) {
+ // NOTE(bill): Duplicate error, skip it
+ if (!token_pos_are_equal(ec->prev, token.pos)) {
+ ec->prev = token.pos;
+
+ va_list va;
+ va_start(va, fmt);
+ gb_printf_err("%.*s(%td:%td) Error: %s\n",
+ LIT(token.pos.file), token.pos.line, token.pos.column,
+ gb_bprintf_va(fmt, va));
+ va_end(va);
+
+ }
+ ec->count++;
+}
+
+void warning(Token token, char *fmt, ...) {
+ va_list va;
+ va_start(va, fmt);
+ gb_printf_err("%.*s(%td:%td) Warning: %s\n",
+ LIT(token.pos.file), token.pos.line, token.pos.column,
+ gb_bprintf_va(fmt, va));
+ va_end(va);
+}
+
+
+
+
char const *token_kind_to_string(TokenKind kind) {
return TOKEN_STRINGS[kind];
@@ -432,8 +478,9 @@ Token scan_number_to_token(Tokenizer *t, b32 seen_decimal_point) {
u8 *start_curr = t->curr;
token.kind = Token_Integer;
token.string = make_string(start_curr, 1);
- token.line = t->line_count;
- token.column = t->curr-t->line+1;
+ token.pos.file = t->fullpath;
+ token.pos.line = t->line_count;
+ token.pos.column = t->curr-t->line+1;
if (seen_decimal_point) {
start_curr--;
@@ -595,8 +642,9 @@ Token tokenizer_get_token(Tokenizer *t) {
tokenizer_skip_whitespace(t);
token.string = make_string(t->curr, 1);
- token.line = t->line_count;
- token.column = t->curr - t->line + 1;
+ token.pos.file = t->fullpath;
+ token.pos.line = t->line_count;
+ token.pos.column = t->curr - t->line + 1;
curr_rune = t->curr_rune;
if (rune_is_letter(curr_rune)) {