aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorgingerBill <gingerBill@users.noreply.github.com>2023-08-07 11:02:01 +0100
committerGitHub <noreply@github.com>2023-08-07 11:02:01 +0100
commit77e5854a16ea9396752d784510169c5856f044ae (patch)
tree61f1a2a43bd1ff52a1d7fa02bb4660e524221edf /src
parentcb5c8219898445a5501a95107c0200ea68b89a39 (diff)
parent2a42dab108ea1c70962815cc714c0b4d3e42a719 (diff)
Merge branch 'master' into stdlib-parser-fixes
Diffstat (limited to 'src')
-rw-r--r--src/array.cpp11
-rw-r--r--src/build_settings.cpp44
-rw-r--r--src/check_builtin.cpp7
-rw-r--r--src/check_decl.cpp167
-rw-r--r--src/check_expr.cpp334
-rw-r--r--src/check_stmt.cpp17
-rw-r--r--src/check_type.cpp14
-rw-r--r--src/checker.cpp167
-rw-r--r--src/checker.hpp5
-rw-r--r--src/docs_format.cpp50
-rw-r--r--src/docs_writer.cpp8
-rw-r--r--src/entity.cpp19
-rw-r--r--src/error.cpp4
-rw-r--r--src/exact_value.cpp7
-rw-r--r--src/linker.cpp460
-rw-r--r--src/llvm_backend.hpp13
-rw-r--r--src/llvm_backend_const.cpp136
-rw-r--r--src/llvm_backend_debug.cpp17
-rw-r--r--src/llvm_backend_expr.cpp44
-rw-r--r--src/llvm_backend_general.cpp242
-rw-r--r--src/llvm_backend_proc.cpp21
-rw-r--r--src/llvm_backend_stmt.cpp5
-rw-r--r--src/llvm_backend_type.cpp13
-rw-r--r--src/llvm_backend_utility.cpp7
-rw-r--r--src/main.cpp599
-rw-r--r--src/parser.cpp173
-rw-r--r--src/parser.hpp3
-rw-r--r--src/tilde.cpp813
-rw-r--r--src/tilde.hpp373
-rw-r--r--src/tilde/tb.h1101
-rw-r--r--src/tilde/tb.libbin0 -> 4173944 bytes
-rw-r--r--src/tilde/tb_arena.h76
-rw-r--r--src/tilde/tb_coff.h330
-rw-r--r--src/tilde/tb_elf.h170
-rw-r--r--src/tilde/tb_formats.h132
-rw-r--r--src/tilde/tb_x64.h90
-rw-r--r--src/tilde_builtin.cpp443
-rw-r--r--src/tilde_const.cpp1040
-rw-r--r--src/tilde_debug.cpp482
-rw-r--r--src/tilde_expr.cpp3871
-rw-r--r--src/tilde_proc.cpp1307
-rw-r--r--src/tilde_stmt.cpp2614
-rw-r--r--src/tilde_type_info.cpp983
-rw-r--r--src/tokenizer.cpp4
-rw-r--r--src/types.cpp197
45 files changed, 15414 insertions, 1199 deletions
diff --git a/src/array.cpp b/src/array.cpp
index d8e25d25d..5d602cebc 100644
--- a/src/array.cpp
+++ b/src/array.cpp
@@ -168,6 +168,17 @@ gb_internal gb_inline Slice<T> slice(Slice<T> const &array, isize lo, isize hi)
}
return out;
}
+template <typename T>
+gb_internal gb_inline Slice<T> slice(Array<T> const &array, isize lo, isize hi) {
+ GB_ASSERT(0 <= lo && lo <= hi && hi <= array.count);
+ Slice<T> out = {};
+ isize len = hi-lo;
+ if (len > 0) {
+ out.data = array.data+lo;
+ out.count = len;
+ }
+ return out;
+}
template <typename T>
diff --git a/src/build_settings.cpp b/src/build_settings.cpp
index 866631f9a..ef68a399b 100644
--- a/src/build_settings.cpp
+++ b/src/build_settings.cpp
@@ -216,6 +216,43 @@ enum BuildPath : u8 {
BuildPathCOUNT,
};
+enum VetFlags : u64 {
+ VetFlag_NONE = 0,
+ VetFlag_Unused = 1u<<0, // 1
+ VetFlag_Shadowing = 1u<<1, // 2
+ VetFlag_UsingStmt = 1u<<2, // 4
+ VetFlag_UsingParam = 1u<<3, // 8
+ VetFlag_Style = 1u<<4, // 16
+ VetFlag_Semicolon = 1u<<5, // 32
+
+ VetFlag_Extra = 1u<<16,
+
+ VetFlag_All = VetFlag_Unused|VetFlag_Shadowing|VetFlag_UsingStmt, // excluding extra
+
+ VetFlag_Using = VetFlag_UsingStmt|VetFlag_UsingParam,
+};
+
+u64 get_vet_flag_from_name(String const &name) {
+ if (name == "unused") {
+ return VetFlag_Unused;
+ } else if (name == "shadowing") {
+ return VetFlag_Shadowing;
+ } else if (name == "using-stmt") {
+ return VetFlag_UsingStmt;
+ } else if (name == "using-param") {
+ return VetFlag_UsingParam;
+ } else if (name == "style") {
+ return VetFlag_Style;
+ } else if (name == "semicolon") {
+ return VetFlag_Semicolon;
+ } else if (name == "extra") {
+ return VetFlag_Extra;
+ }
+ return VetFlag_NONE;
+}
+
+
+
// This stores the information for the specify architecture of this build
struct BuildContext {
// Constants
@@ -255,6 +292,8 @@ struct BuildContext {
String resource_filepath;
String pdb_filepath;
+ u64 vet_flags;
+
bool has_resource;
String link_flags;
String extra_linker_flags;
@@ -280,15 +319,12 @@ struct BuildContext {
bool no_entry_point;
bool no_thread_local;
bool use_lld;
- bool vet;
- bool vet_extra;
bool cross_compiling;
bool different_os;
bool keep_object_files;
bool disallow_do;
bool strict_style;
- bool strict_style_init_only;
bool ignore_warnings;
bool warnings_as_errors;
@@ -318,6 +354,8 @@ struct BuildContext {
isize max_error_count;
+ bool tilde_backend;
+
u32 cmd_doc_flags;
Array<String> extra_packages;
diff --git a/src/check_builtin.cpp b/src/check_builtin.cpp
index 269a0ec48..2e65c5750 100644
--- a/src/check_builtin.cpp
+++ b/src/check_builtin.cpp
@@ -1406,7 +1406,7 @@ gb_internal bool check_builtin_procedure_directive(CheckerContext *c, Operand *o
}
return false;
} else if (name == "load_or") {
- warning(call, "'#load_or' is deprecated in favour of '#load(path) or_else default'");
+ error(call, "'#load_or' has now been removed in favour of '#load(path) or_else default'");
if (ce->args.count != 2) {
if (ce->args.count == 0) {
@@ -1748,7 +1748,7 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As
mode = Addressing_Constant;
value = exact_value_i64(at->EnumeratedArray.count);
type = t_untyped_integer;
- } else if ((is_type_slice(op_type) || is_type_relative_slice(op_type)) && id == BuiltinProc_len) {
+ } else if (is_type_slice(op_type) && id == BuiltinProc_len) {
mode = Addressing_Value;
} else if (is_type_dynamic_array(op_type)) {
mode = Addressing_Value;
@@ -3692,6 +3692,9 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As
case Type_SimdVector:
operand->type = alloc_type_multi_pointer(base_array_type(base));
break;
+ case Type_Matrix:
+ operand->type = alloc_type_multi_pointer(base->Matrix.elem);
+ break;
}
}
break;
diff --git a/src/check_decl.cpp b/src/check_decl.cpp
index b651e33e6..587d749b4 100644
--- a/src/check_decl.cpp
+++ b/src/check_decl.cpp
@@ -7,13 +7,15 @@ gb_internal Type *check_init_variable(CheckerContext *ctx, Entity *e, Operand *o
e->type == t_invalid) {
if (operand->mode == Addressing_Builtin) {
+ ERROR_BLOCK();
gbString expr_str = expr_to_string(operand->expr);
- // TODO(bill): is this a good enough error message?
error(operand->expr,
- "Cannot assign built-in procedure '%s' in %.*s",
- expr_str,
- LIT(context_name));
+ "Cannot assign built-in procedure '%s' in %.*s",
+ expr_str,
+ LIT(context_name));
+
+ error_line("\tBuilt-in procedures are implemented by the compiler and might not be actually instantiated procedure\n");
operand->mode = Addressing_Invalid;
@@ -159,9 +161,8 @@ gb_internal void check_init_constant(CheckerContext *ctx, Entity *e, Operand *op
}
if (operand->mode != Addressing_Constant) {
- // TODO(bill): better error
gbString str = expr_to_string(operand->expr);
- error(operand->expr, "'%s' is not a constant", str);
+ error(operand->expr, "'%s' is not a compile-time known constant", str);
gb_string_free(str);
if (e->type == nullptr) {
e->type = t_invalid;
@@ -354,31 +355,7 @@ gb_internal void check_type_decl(CheckerContext *ctx, Entity *e, Ast *init_expr,
// using decl
if (decl->is_using) {
- warning(init_expr, "'using' an enum declaration is not allowed, prefer using implicit selector expressions e.g. '.A'");
- #if 1
- // NOTE(bill): Must be an enum declaration
- if (te->kind == Ast_EnumType) {
- Scope *parent = e->scope;
- if (parent->flags&ScopeFlag_File) {
- // NOTE(bill): Use package scope
- parent = parent->parent;
- }
-
- Type *t = base_type(e->type);
- if (t->kind == Type_Enum) {
- for (Entity *f : t->Enum.fields) {
- if (f->kind != Entity_Constant) {
- continue;
- }
- String name = f->token.string;
- if (is_blank_ident(name)) {
- continue;
- }
- add_entity(ctx, parent, nullptr, f);
- }
- }
- }
- #endif
+ error(init_expr, "'using' an enum declaration is not allowed, prefer using implicit selector expressions e.g. '.A'");
}
}
@@ -757,6 +734,66 @@ gb_internal String handle_link_name(CheckerContext *ctx, Token token, String lin
return link_name;
}
+gb_internal void check_objc_methods(CheckerContext *ctx, Entity *e, AttributeContext const &ac) {
+ if (!(ac.objc_name.len || ac.objc_is_class_method || ac.objc_type)) {
+ return;
+ }
+ if (ac.objc_name.len == 0 && ac.objc_is_class_method) {
+ error(e->token, "@(objc_name) is required with @(objc_is_class_method)");
+ } else if (ac.objc_type == nullptr) {
+ error(e->token, "@(objc_name) requires that @(objc_type) to be set");
+ } else if (ac.objc_name.len == 0 && ac.objc_type) {
+ error(e->token, "@(objc_name) is required with @(objc_type)");
+ } else {
+ Type *t = ac.objc_type;
+ if (t->kind == Type_Named) {
+ Entity *tn = t->Named.type_name;
+
+ GB_ASSERT(tn->kind == Entity_TypeName);
+
+ if (tn->scope != e->scope) {
+ error(e->token, "@(objc_name) attribute may only be applied to procedures and types within the same scope");
+ } else {
+ mutex_lock(&global_type_name_objc_metadata_mutex);
+ defer (mutex_unlock(&global_type_name_objc_metadata_mutex));
+
+ if (!tn->TypeName.objc_metadata) {
+ tn->TypeName.objc_metadata = create_type_name_obj_c_metadata();
+ }
+ auto *md = tn->TypeName.objc_metadata;
+ mutex_lock(md->mutex);
+ defer (mutex_unlock(md->mutex));
+
+ if (!ac.objc_is_class_method) {
+ bool ok = true;
+ for (TypeNameObjCMetadataEntry const &entry : md->value_entries) {
+ if (entry.name == ac.objc_name) {
+ error(e->token, "Previous declaration of @(objc_name=\"%.*s\")", LIT(ac.objc_name));
+ ok = false;
+ break;
+ }
+ }
+ if (ok) {
+ array_add(&md->value_entries, TypeNameObjCMetadataEntry{ac.objc_name, e});
+ }
+ } else {
+ bool ok = true;
+ for (TypeNameObjCMetadataEntry const &entry : md->type_entries) {
+ if (entry.name == ac.objc_name) {
+ error(e->token, "Previous declaration of @(objc_name=\"%.*s\")", LIT(ac.objc_name));
+ ok = false;
+ break;
+ }
+ }
+ if (ok) {
+ array_add(&md->type_entries, TypeNameObjCMetadataEntry{ac.objc_name, e});
+ }
+ }
+ }
+ }
+ }
+}
+
gb_internal void check_proc_decl(CheckerContext *ctx, Entity *e, DeclInfo *d) {
GB_ASSERT(e->type == nullptr);
if (d->proc_lit->kind != Ast_ProcLit) {
@@ -840,62 +877,7 @@ gb_internal void check_proc_decl(CheckerContext *ctx, Entity *e, DeclInfo *d) {
}
e->Procedure.optimization_mode = cast(ProcedureOptimizationMode)ac.optimization_mode;
- if (ac.objc_name.len || ac.objc_is_class_method || ac.objc_type) {
- if (ac.objc_name.len == 0 && ac.objc_is_class_method) {
- error(e->token, "@(objc_name) is required with @(objc_is_class_method)");
- } else if (ac.objc_type == nullptr) {
- error(e->token, "@(objc_name) requires that @(objc_type) to be set");
- } else if (ac.objc_name.len == 0 && ac.objc_type) {
- error(e->token, "@(objc_name) is required with @(objc_type)");
- } else {
- Type *t = ac.objc_type;
- if (t->kind == Type_Named) {
- Entity *tn = t->Named.type_name;
-
- GB_ASSERT(tn->kind == Entity_TypeName);
-
- if (tn->scope != e->scope) {
- error(e->token, "@(objc_name) attribute may only be applied to procedures and types within the same scope");
- } else {
- mutex_lock(&global_type_name_objc_metadata_mutex);
- defer (mutex_unlock(&global_type_name_objc_metadata_mutex));
-
- if (!tn->TypeName.objc_metadata) {
- tn->TypeName.objc_metadata = create_type_name_obj_c_metadata();
- }
- auto *md = tn->TypeName.objc_metadata;
- mutex_lock(md->mutex);
- defer (mutex_unlock(md->mutex));
-
- if (!ac.objc_is_class_method) {
- bool ok = true;
- for (TypeNameObjCMetadataEntry const &entry : md->value_entries) {
- if (entry.name == ac.objc_name) {
- error(e->token, "Previous declaration of @(objc_name=\"%.*s\")", LIT(ac.objc_name));
- ok = false;
- break;
- }
- }
- if (ok) {
- array_add(&md->value_entries, TypeNameObjCMetadataEntry{ac.objc_name, e});
- }
- } else {
- bool ok = true;
- for (TypeNameObjCMetadataEntry const &entry : md->type_entries) {
- if (entry.name == ac.objc_name) {
- error(e->token, "Previous declaration of @(objc_name=\"%.*s\")", LIT(ac.objc_name));
- ok = false;
- break;
- }
- }
- if (ok) {
- array_add(&md->type_entries, TypeNameObjCMetadataEntry{ac.objc_name, e});
- }
- }
- }
- }
- }
- }
+ check_objc_methods(ctx, e, ac);
if (ac.require_target_feature.len != 0 && ac.enable_target_feature.len != 0) {
error(e->token, "Attributes @(require_target_feature=...) and @(enable_target_feature=...) cannot be used together");
@@ -1059,7 +1041,7 @@ gb_internal void check_proc_decl(CheckerContext *ctx, Entity *e, DeclInfo *d) {
auto *fp = &ctx->info->foreigns;
StringHashKey key = string_hash_string(name);
Entity **found = string_map_get(fp, key);
- if (found) {
+ if (found && e != *found) {
Entity *f = *found;
TokenPos pos = f->token.pos;
Type *this_type = base_type(e->type);
@@ -1241,7 +1223,7 @@ gb_internal void check_global_variable_decl(CheckerContext *ctx, Entity *&e, Ast
check_rtti_type_disallowed(e->token, e->type, "A variable declaration is using a type, %s, which has been disallowed");
}
-gb_internal void check_proc_group_decl(CheckerContext *ctx, Entity *&pg_entity, DeclInfo *d) {
+gb_internal void check_proc_group_decl(CheckerContext *ctx, Entity *pg_entity, DeclInfo *d) {
GB_ASSERT(pg_entity->kind == Entity_ProcGroup);
auto *pge = &pg_entity->ProcGroup;
String proc_group_name = pg_entity->token.string;
@@ -1366,6 +1348,11 @@ gb_internal void check_proc_group_decl(CheckerContext *ctx, Entity *&pg_entity,
}
}
+ AttributeContext ac = {};
+ check_decl_attributes(ctx, d->attributes, proc_group_attribute, &ac);
+ check_objc_methods(ctx, pg_entity, ac);
+
+
}
gb_internal void check_entity_decl(CheckerContext *ctx, Entity *e, DeclInfo *d, Type *named_type) {
@@ -1626,7 +1613,7 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de
}
check_close_scope(ctx);
- check_scope_usage(ctx->checker, ctx->scope);
+ check_scope_usage(ctx->checker, ctx->scope, check_vet_flags(body));
add_deps_from_child_to_parent(decl);
diff --git a/src/check_expr.cpp b/src/check_expr.cpp
index b662c231f..14a4eebc8 100644
--- a/src/check_expr.cpp
+++ b/src/check_expr.cpp
@@ -349,6 +349,10 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
return false;
}
+ if (base_entity->flags & EntityFlag_Disabled) {
+ return false;
+ }
+
String name = base_entity->token.string;
Type *src = base_type(base_entity->type);
@@ -462,7 +466,7 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
{
- // LEAK TODO(bill): This is technically a memory leak as it has to generate the type twice
+ // LEAK NOTE(bill): This is technically a memory leak as it has to generate the type twice
bool prev_no_polymorphic_errors = nctx.no_polymorphic_errors;
defer (nctx.no_polymorphic_errors = prev_no_polymorphic_errors);
nctx.no_polymorphic_errors = false;
@@ -470,7 +474,7 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
// NOTE(bill): Reset scope from the failed procedure type
scope_reset(scope);
- // LEAK TODO(bill): Cloning this AST may be leaky
+ // LEAK NOTE(bill): Cloning this AST may be leaky but this is not really an issue due to arena-based allocation
Ast *cloned_proc_type_node = clone_ast(pt->node);
success = check_procedure_type(&nctx, final_proc_type, cloned_proc_type_node, &operands);
if (!success) {
@@ -778,16 +782,6 @@ gb_internal i64 check_distance_between_types(CheckerContext *c, Operand *operand
}
}
- // ^T <- rawptr
-#if 0
- // TODO(bill): Should C-style (not C++) pointer cast be allowed?
- if (is_type_pointer(dst) && is_type_rawptr(src)) {
- return true;
- }
-#endif
-#if 1
-
-
// rawptr <- ^T
if (are_types_identical(type, t_rawptr) && is_type_pointer(src)) {
return 5;
@@ -808,7 +802,6 @@ gb_internal i64 check_distance_between_types(CheckerContext *c, Operand *operand
return 4;
}
}
-#endif
if (is_type_polymorphic(dst) && !is_type_polymorphic(src)) {
bool modify_type = !c->no_polymorphic_errors;
@@ -824,7 +817,6 @@ gb_internal i64 check_distance_between_types(CheckerContext *c, Operand *operand
}
}
- // TODO(bill): Determine which rule is a better on in practice
if (dst->Union.variants.count == 1) {
Type *vt = dst->Union.variants[0];
i64 score = check_distance_between_types(c, operand, vt);
@@ -864,8 +856,8 @@ gb_internal i64 check_distance_between_types(CheckerContext *c, Operand *operand
}
}
- if (is_type_relative_slice(dst)) {
- i64 score = check_distance_between_types(c, operand, dst->RelativeSlice.slice_type);
+ if (is_type_relative_multi_pointer(dst)) {
+ i64 score = check_distance_between_types(c, operand, dst->RelativeMultiPointer.pointer_type);
if (score >= 0) {
return score+2;
}
@@ -1013,8 +1005,8 @@ gb_internal AstPackage *get_package_of_type(Type *type) {
case Type_RelativePointer:
type = type->RelativePointer.pointer_type;
continue;
- case Type_RelativeSlice:
- type = type->RelativeSlice.slice_type;
+ case Type_RelativeMultiPointer:
+ type = type->RelativeMultiPointer.pointer_type;
continue;
}
return nullptr;
@@ -1093,7 +1085,7 @@ gb_internal void check_assignment(CheckerContext *c, Operand *operand, Type *typ
// TODO(bill): is this a good enough error message?
error(operand->expr,
- "Cannot assign overloaded procedure '%s' to '%s' in %.*s",
+ "Cannot assign overloaded procedure group '%s' to '%s' in %.*s",
expr_str,
op_type_str,
LIT(context_name));
@@ -1120,7 +1112,6 @@ gb_internal void check_assignment(CheckerContext *c, Operand *operand, Type *typ
switch (operand->mode) {
case Addressing_Builtin:
- // TODO(bill): Actually allow built in procedures to be passed around and thus be created on use
error(operand->expr,
"Cannot assign built-in procedure '%s' in %.*s",
expr_str,
@@ -1412,9 +1403,6 @@ gb_internal bool is_polymorphic_type_assignable(CheckerContext *c, Type *poly, T
return false;
case Type_Proc:
if (source->kind == Type_Proc) {
- // return check_is_assignable_to(c, &o, poly);
- // TODO(bill): Polymorphic type assignment
- #if 1
TypeProc *x = &poly->Proc;
TypeProc *y = &source->Proc;
if (x->calling_convention != y->calling_convention) {
@@ -1447,7 +1435,6 @@ gb_internal bool is_polymorphic_type_assignable(CheckerContext *c, Type *poly, T
}
return true;
- #endif
}
return false;
case Type_Map:
@@ -1699,7 +1686,6 @@ gb_internal bool check_unary_op(CheckerContext *c, Operand *o, Token op) {
gb_string_free(str);
return false;
}
- // TODO(bill): Handle errors correctly
Type *type = base_type(core_array_type(o->type));
gbString str = nullptr;
switch (op.kind) {
@@ -1743,7 +1729,6 @@ gb_internal bool check_unary_op(CheckerContext *c, Operand *o, Token op) {
gb_internal bool check_binary_op(CheckerContext *c, Operand *o, Token op) {
Type *main_type = o->type;
- // TODO(bill): Handle errors correctly
Type *type = base_type(core_array_type(main_type));
Type *ct = core_type(type);
@@ -2261,7 +2246,7 @@ gb_internal bool check_is_not_addressable(CheckerContext *c, Operand *o) {
}
gb_internal void check_old_for_or_switch_value_usage(Ast *expr) {
- if (!build_context.strict_style) {
+ if (!(build_context.strict_style || (check_vet_flags(expr) & VetFlag_Style))) {
return;
}
@@ -2351,7 +2336,7 @@ gb_internal void check_unary_expr(CheckerContext *c, Operand *o, Token op, Ast *
o->type = alloc_type_pointer(o->type);
}
} else {
- if (build_context.strict_style && ast_node_expect(node, Ast_UnaryExpr)) {
+ if (ast_node_expect(node, Ast_UnaryExpr)) {
ast_node(ue, UnaryExpr, node);
check_old_for_or_switch_value_usage(ue->expr);
}
@@ -2775,8 +2760,6 @@ gb_internal void check_shift(CheckerContext *c, Operand *x, Operand *y, Ast *nod
gb_string_free(err_str);
}
- // TODO(bill): Should we support shifts for fixed arrays and #simd vectors?
-
if (!is_type_integer(x->type)) {
gbString err_str = expr_to_string(x->expr);
error(node, "Shift operand '%s' must be an integer", err_str);
@@ -3099,7 +3082,7 @@ gb_internal void check_cast(CheckerContext *c, Operand *x, Type *type) {
update_untyped_expr_type(c, x->expr, final_type, true);
}
- if (build_context.vet_extra) {
+ if (check_vet_flags(x->expr) & VetFlag_Extra) {
if (are_types_identical(x->type, type)) {
gbString str = type_to_string(type);
warning(x->expr, "Unneeded cast to the same type '%s'", str);
@@ -3171,7 +3154,7 @@ gb_internal bool check_transmute(CheckerContext *c, Ast *node, Operand *o, Type
return false;
}
- if (build_context.vet_extra) {
+ if (check_vet_flags(node) & VetFlag_Extra) {
if (are_types_identical(o->type, dst_t)) {
gbString str = type_to_string(dst_t);
warning(o->expr, "Unneeded transmute to the same type '%s'", str);
@@ -4437,7 +4420,6 @@ gb_internal ExactValue get_constant_field_single(CheckerContext *c, ExactValue v
case_end;
default:
- // TODO(bill): Should this be a general fallback?
if (success_) *success_ = true;
if (finish_) *finish_ = true;
return empty_exact_value;
@@ -4793,8 +4775,6 @@ gb_internal Entity *check_selector(CheckerContext *c, Operand *operand, Ast *nod
}
if (entity == nullptr && selector->kind == Ast_Ident && is_type_array(type_deref(operand->type))) {
- // TODO(bill): Simd_Vector swizzling
-
String field_name = selector->Ident.token.string;
if (1 < field_name.len && field_name.len <= 4) {
u8 swizzles_xyzw[4] = {'x', 'y', 'z', 'w'};
@@ -5112,27 +5092,6 @@ gb_internal bool check_identifier_exists(Scope *s, Ast *node, bool nested = fals
return false;
}
-gb_internal isize add_dependencies_from_unpacking(CheckerContext *c, Entity **lhs, isize lhs_count, isize tuple_index, isize tuple_count) {
- if (lhs != nullptr && c->decl != nullptr) {
- for (isize j = 0; (tuple_index + j) < lhs_count && j < tuple_count; j++) {
- Entity *e = lhs[tuple_index + j];
- if (e != nullptr) {
- DeclInfo *decl = decl_info_of_entity(e);
- if (decl != nullptr) {
- rw_mutex_shared_lock(&decl->deps_mutex);
- rw_mutex_lock(&c->decl->deps_mutex);
- for (Entity *dep : decl->deps) {
- ptr_set_add(&c->decl->deps, dep);
- }
- rw_mutex_unlock(&c->decl->deps_mutex);
- rw_mutex_shared_unlock(&decl->deps_mutex);
- }
- }
- }
- }
- return tuple_count;
-}
-
gb_internal bool check_no_copy_assignment(Operand const &o, String const &context) {
if (o.type && is_type_no_copy(o.type)) {
Ast *expr = unparen_expr(o.expr);
@@ -5240,6 +5199,31 @@ enum UnpackFlag : u32 {
gb_internal bool check_unpack_arguments(CheckerContext *ctx, Entity **lhs, isize lhs_count, Array<Operand> *operands, Slice<Ast *> const &rhs_arguments, UnpackFlags flags) {
+ auto const &add_dependencies_from_unpacking = [](CheckerContext *c, Entity **lhs, isize lhs_count, isize tuple_index, isize tuple_count) -> isize {
+ if (lhs == nullptr || c->decl == nullptr) {
+ return tuple_count;
+ }
+ for (isize j = 0; (tuple_index + j) < lhs_count && j < tuple_count; j++) {
+ Entity *e = lhs[tuple_index + j];
+ if (e == nullptr) {
+ continue;
+ }
+ DeclInfo *decl = decl_info_of_entity(e);
+ if (decl == nullptr) {
+ continue;
+ }
+ rw_mutex_shared_lock(&decl->deps_mutex);
+ rw_mutex_lock(&c->decl->deps_mutex);
+ for (Entity *dep : decl->deps) {
+ ptr_set_add(&c->decl->deps, dep);
+ }
+ rw_mutex_unlock(&c->decl->deps_mutex);
+ rw_mutex_shared_unlock(&decl->deps_mutex);
+ }
+ return tuple_count;
+ };
+
+
bool allow_ok = (flags & UnpackFlag_AllowOk) != 0;
bool is_variadic = (flags & UnpackFlag_IsVariadic) != 0;
bool allow_undef = (flags & UnpackFlag_AllowUndef) != 0;
@@ -5494,6 +5478,8 @@ gb_internal CallArgumentError check_call_arguments_internal(CheckerContext *c, A
auto variadic_operands = slice(slice_from_array(positional_operands), positional_operand_count, positional_operands.count);
+ bool named_variadic_param = false;
+
if (named_operands.count != 0) {
GB_ASSERT(ce->split_args->named.count == named_operands.count);
for_array(i, ce->split_args->named) {
@@ -5519,6 +5505,9 @@ gb_internal CallArgumentError check_call_arguments_internal(CheckerContext *c, A
err = CallArgumentError_ParameterNotFound;
continue;
}
+ if (pt->variadic && param_index == pt->variadic_index) {
+ named_variadic_param = true;
+ }
if (visited[param_index]) {
if (show_error) {
error(arg, "Duplicate parameter '%.*s' in procedure call", LIT(name));
@@ -5720,11 +5709,6 @@ gb_internal CallArgumentError check_call_arguments_internal(CheckerContext *c, A
}
continue;
}
-
- if (param_is_variadic) {
- continue;
- }
-
score += eval_param_and_score(c, o, e->type, err, param_is_variadic, e, show_error);
}
}
@@ -6136,7 +6120,6 @@ gb_internal CallArgumentData check_call_arguments_proc_group(CheckerContext *c,
{
// NOTE(bill, 2019-07-13): This code is used to improve the type inference for procedure groups
// where the same positional parameter has the same type value (and ellipsis)
- bool proc_arg_count_all_equal = true;
isize proc_arg_count = -1;
for (Entity *p : procs) {
Type *pt = base_type(p->type);
@@ -6144,15 +6127,12 @@ gb_internal CallArgumentData check_call_arguments_proc_group(CheckerContext *c,
if (proc_arg_count < 0) {
proc_arg_count = pt->Proc.param_count;
} else {
- if (proc_arg_count != pt->Proc.param_count) {
- proc_arg_count_all_equal = false;
- break;
- }
+ proc_arg_count = gb_min(proc_arg_count, pt->Proc.param_count);
}
}
}
- if (proc_arg_count >= 0 && proc_arg_count_all_equal) {
+ if (proc_arg_count >= 0) {
lhs_count = proc_arg_count;
if (lhs_count > 0) {
lhs = gb_alloc_array(heap_allocator(), Entity *, lhs_count);
@@ -6258,14 +6238,18 @@ gb_internal CallArgumentData check_call_arguments_proc_group(CheckerContext *c,
}
isize index = i;
+ ValidIndexAndScore item = {};
+ item.score = data.score;
+
if (data.gen_entity != nullptr) {
array_add(&proc_entities, data.gen_entity);
index = proc_entities.count-1;
+
+ // prefer non-polymorphic procedures over polymorphic
+ item.score += assign_score_function(1);
}
- ValidIndexAndScore item = {};
item.index = index;
- item.score = data.score;
array_add(&valids, item);
}
}
@@ -6328,9 +6312,44 @@ gb_internal CallArgumentData check_call_arguments_proc_group(CheckerContext *c,
print_argument_types();
}
+ if (procs.count == 0) {
+ procs = proc_group_entities_cloned(c, *operand);
+ }
if (procs.count > 0) {
error_line("Did you mean to use one of the following:\n");
}
+ isize max_name_length = 0;
+ isize max_type_length = 0;
+ for (Entity *proc : procs) {
+ Type *t = base_type(proc->type);
+ if (t == t_invalid) continue;
+ String prefix = {};
+ String prefix_sep = {};
+ if (proc->pkg) {
+ prefix = proc->pkg->name;
+ prefix_sep = str_lit(".");
+ }
+ String name = proc->token.string;
+ max_name_length = gb_max(max_name_length, prefix.len + prefix_sep.len + name.len);
+
+ gbString pt;
+ if (t->Proc.node != nullptr) {
+ pt = expr_to_string(t->Proc.node);
+ } else {
+ pt = type_to_string(t);
+ }
+
+ max_type_length = gb_max(max_type_length, gb_string_length(pt));
+ gb_string_free(pt);
+ }
+
+ isize max_spaces = gb_max(max_name_length, max_type_length);
+ char *spaces = gb_alloc_array(temporary_allocator(), char, max_spaces+1);
+ for (isize i = 0; i < max_spaces; i++) {
+ spaces[i] = ' ';
+ }
+ spaces[max_spaces] = 0;
+
for (Entity *proc : procs) {
TokenPos pos = proc->token.pos;
Type *t = base_type(proc->type);
@@ -6350,12 +6369,23 @@ gb_internal CallArgumentData check_call_arguments_proc_group(CheckerContext *c,
prefix_sep = str_lit(".");
}
String name = proc->token.string;
+ isize len = prefix.len + prefix_sep.len + name.len;
+
+ int name_padding = cast(int)gb_max(max_name_length - len, 0);
+ int type_padding = cast(int)gb_max(max_type_length - gb_string_length(pt), 0);
char const *sep = "::";
if (proc->kind == Entity_Variable) {
sep = ":=";
}
- error_line("\t%.*s%.*s%.*s %s %s at %s\n", LIT(prefix), LIT(prefix_sep), LIT(name), sep, pt, token_pos_to_string(pos));
+ error_line("\t%.*s%.*s%.*s %.*s%s %s %.*sat %s\n",
+ LIT(prefix), LIT(prefix_sep), LIT(name),
+ name_padding, spaces,
+ sep,
+ pt,
+ type_padding, spaces,
+ token_pos_to_string(pos)
+ );
}
if (procs.count > 0) {
error_line("\n");
@@ -6369,8 +6399,8 @@ gb_internal CallArgumentData check_call_arguments_proc_group(CheckerContext *c,
error(operand->expr, "Ambiguous procedure group call '%s' that match with the given arguments", expr_name);
print_argument_types();
- for (isize i = 0; i < valids.count; i++) {
- Entity *proc = proc_entities[valids[i].index];
+ for (auto const &valid : valids) {
+ Entity *proc = proc_entities[valid.index];
GB_ASSERT(proc != nullptr);
TokenPos pos = proc->token.pos;
Type *t = base_type(proc->type); GB_ASSERT(t->kind == Type_Proc);
@@ -7107,7 +7137,7 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
i32 id = operand->builtin_id;
Entity *e = entity_of_node(operand->expr);
if (e != nullptr && e->token.string == "expand_to_tuple") {
- warning(operand->expr, "'expand_to_tuple' has been replaced with 'expand_values'");
+ error(operand->expr, "'expand_to_tuple' has been replaced with 'expand_values'");
}
if (!check_builtin_procedure(c, operand, call, id, type_hint)) {
operand->mode = Addressing_Invalid;
@@ -7128,6 +7158,7 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
c->decl->defer_used += 1;
}
}
+ add_entity_use(c, operand->expr, initial_entity);
}
if (operand->mode != Addressing_ProcGroup) {
@@ -7335,11 +7366,11 @@ gb_internal bool check_set_index_data(Operand *o, Type *t, bool indirection, i64
}
return true;
- case Type_RelativeSlice:
+ case Type_RelativeMultiPointer:
{
- Type *slice_type = base_type(t->RelativeSlice.slice_type);
- GB_ASSERT(slice_type->kind == Type_Slice);
- o->type = slice_type->Slice.elem;
+ Type *pointer_type = base_type(t->RelativeMultiPointer.pointer_type);
+ GB_ASSERT(pointer_type->kind == Type_MultiPointer);
+ o->type = pointer_type->MultiPointer.elem;
if (o->mode != Addressing_Constant) {
o->mode = Addressing_Variable;
}
@@ -9315,13 +9346,13 @@ gb_internal ExprKind check_selector_call_expr(CheckerContext *c, Operand *o, Ast
ExprKind kind = check_expr_base(c, &x, se->expr, nullptr);
c->allow_arrow_right_selector_expr = allow_arrow_right_selector_expr;
- if (x.mode == Addressing_Invalid || x.type == t_invalid) {
+ if (x.mode == Addressing_Invalid || (x.type == t_invalid && x.mode != Addressing_ProcGroup)) {
o->mode = Addressing_Invalid;
o->type = t_invalid;
o->expr = node;
return kind;
}
- if (!is_type_proc(x.type)) {
+ if (!is_type_proc(x.type) && x.mode != Addressing_ProcGroup) {
gbString type_str = type_to_string(x.type);
error(se->call, "Selector call expressions expect a procedure type for the call, got '%s'", type_str);
gb_string_free(type_str);
@@ -9344,76 +9375,76 @@ gb_internal ExprKind check_selector_call_expr(CheckerContext *c, Operand *o, Ast
first_arg->state_flags |= StateFlag_SelectorCallExpr;
}
- Type *pt = base_type(x.type);
- GB_ASSERT(pt->kind == Type_Proc);
- Type *first_type = nullptr;
- String first_arg_name = {};
- if (pt->Proc.param_count > 0) {
- Entity *f = pt->Proc.params->Tuple.variables[0];
- first_type = f->type;
- first_arg_name = f->token.string;
- }
- if (first_arg_name.len == 0) {
- first_arg_name = str_lit("_");
- }
+ if (e->kind != Entity_ProcGroup) {
+ Type *pt = base_type(x.type);
+ GB_ASSERT_MSG(pt->kind == Type_Proc, "%.*s %.*s %s", LIT(e->token.string), LIT(entity_strings[e->kind]), type_to_string(x.type));
+ Type *first_type = nullptr;
+ String first_arg_name = {};
+ if (pt->Proc.param_count > 0) {
+ Entity *f = pt->Proc.params->Tuple.variables[0];
+ first_type = f->type;
+ first_arg_name = f->token.string;
+ }
+ if (first_arg_name.len == 0) {
+ first_arg_name = str_lit("_");
+ }
- if (first_type == nullptr) {
- error(se->call, "Selector call expressions expect a procedure type for the call with at least 1 parameter");
- o->mode = Addressing_Invalid;
- o->type = t_invalid;
- o->expr = node;
- return Expr_Stmt;
- }
+ if (first_type == nullptr) {
+ error(se->call, "Selector call expressions expect a procedure type for the call with at least 1 parameter");
+ o->mode = Addressing_Invalid;
+ o->type = t_invalid;
+ o->expr = node;
+ return Expr_Stmt;
+ }
- Operand y = {};
- y.mode = first_arg->tav.mode;
- y.type = first_arg->tav.type;
- y.value = first_arg->tav.value;
+ Operand y = {};
+ y.mode = first_arg->tav.mode;
+ y.type = first_arg->tav.type;
+ y.value = first_arg->tav.value;
- if (check_is_assignable_to(c, &y, first_type)) {
- // Do nothing, it's valid
- } else {
- Operand z = y;
- z.type = type_deref(y.type);
- if (check_is_assignable_to(c, &z, first_type)) {
- // NOTE(bill): AST GENERATION HACK!
- Token op = {Token_Pointer};
- first_arg = ast_deref_expr(first_arg->file(), first_arg, op);
- } else if (y.mode == Addressing_Variable) {
- Operand w = y;
- w.type = alloc_type_pointer(y.type);
- if (check_is_assignable_to(c, &w, first_type)) {
+ if (check_is_assignable_to(c, &y, first_type)) {
+ // Do nothing, it's valid
+ } else {
+ Operand z = y;
+ z.type = type_deref(y.type);
+ if (check_is_assignable_to(c, &z, first_type)) {
// NOTE(bill): AST GENERATION HACK!
- Token op = {Token_And};
- first_arg = ast_unary_expr(first_arg->file(), op, first_arg);
+ Token op = {Token_Pointer};
+ first_arg = ast_deref_expr(first_arg->file(), first_arg, op);
+ } else if (y.mode == Addressing_Variable) {
+ Operand w = y;
+ w.type = alloc_type_pointer(y.type);
+ if (check_is_assignable_to(c, &w, first_type)) {
+ // NOTE(bill): AST GENERATION HACK!
+ Token op = {Token_And};
+ first_arg = ast_unary_expr(first_arg->file(), op, first_arg);
+ }
}
}
- }
- if (ce->args.count > 0) {
- bool fail = false;
- bool first_is_field_value = (ce->args[0]->kind == Ast_FieldValue);
- for (Ast *arg : ce->args) {
- bool mix = false;
- if (first_is_field_value) {
- mix = arg->kind != Ast_FieldValue;
- } else {
- mix = arg->kind == Ast_FieldValue;
+ if (ce->args.count > 0) {
+ bool fail = false;
+ bool first_is_field_value = (ce->args[0]->kind == Ast_FieldValue);
+ for (Ast *arg : ce->args) {
+ bool mix = false;
+ if (first_is_field_value) {
+ mix = arg->kind != Ast_FieldValue;
+ } else {
+ mix = arg->kind == Ast_FieldValue;
+ }
+ if (mix) {
+ fail = true;
+ break;
+ }
}
- if (mix) {
- fail = true;
- break;
+ if (!fail && first_is_field_value) {
+ Token op = {Token_Eq};
+ AstFile *f = first_arg->file();
+ first_arg = ast_field_value(f, ast_ident(f, make_token_ident(first_arg_name)), first_arg, op);
}
}
- if (!fail && first_is_field_value) {
- Token op = {Token_Eq};
- AstFile *f = first_arg->file();
- first_arg = ast_field_value(f, ast_ident(f, make_token_ident(first_arg_name)), first_arg, op);
- }
}
-
-
auto modified_args = slice_make<Ast *>(heap_allocator(), ce->args.count+1);
modified_args[0] = first_arg;
slice_copy(&modified_args, ce->args, 1);
@@ -9471,14 +9502,14 @@ gb_internal ExprKind check_index_expr(CheckerContext *c, Operand *o, Ast *node,
if (is_const) {
if (is_type_array(t)) {
- // OKay
+ // Okay
} else if (is_type_slice(t)) {
// Okay
} else if (is_type_enumerated_array(t)) {
// Okay
} else if (is_type_string(t)) {
// Okay
- } else if (is_type_relative_slice(t)) {
+ } else if (is_type_relative_multi_pointer(t)) {
// Okay
} else if (is_type_matrix(t)) {
// Okay
@@ -9616,17 +9647,9 @@ gb_internal ExprKind check_slice_expr(CheckerContext *c, Operand *o, Ast *node,
}
break;
- case Type_RelativeSlice:
+ case Type_RelativeMultiPointer:
valid = true;
- o->type = t->RelativeSlice.slice_type;
- if (o->mode != Addressing_Variable) {
- gbString str = expr_to_string(node);
- error(node, "Cannot relative slice '%s', as value is not addressable", str);
- gb_string_free(str);
- o->mode = Addressing_Invalid;
- o->expr = node;
- return kind;
- }
+ o->type = type_deref(o->type);
break;
case Type_EnumeratedArray:
@@ -9705,8 +9728,19 @@ gb_internal ExprKind check_slice_expr(CheckerContext *c, Operand *o, Ast *node,
x[i:n] -> []T
*/
o->type = alloc_type_slice(t->MultiPointer.elem);
+ } else if (t->kind == Type_RelativeMultiPointer && se->high != nullptr) {
+ /*
+ x[:] -> [^]T
+ x[i:] -> [^]T
+ x[:n] -> []T
+ x[i:n] -> []T
+ */
+ Type *pointer_type = base_type(t->RelativeMultiPointer.pointer_type);
+ GB_ASSERT(pointer_type->kind == Type_MultiPointer);
+ o->type = alloc_type_slice(pointer_type->MultiPointer.elem);
}
+
o->mode = Addressing_Value;
if (is_type_string(t) && max_count >= 0) {
@@ -9982,7 +10016,7 @@ gb_internal ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast
Type *type = type_of_expr(ac->expr);
check_cast(c, o, type_hint);
if (is_type_typed(type) && are_types_identical(type, type_hint)) {
- if (build_context.vet_extra) {
+ if (check_vet_flags(node) & VetFlag_Extra) {
error(node, "Redundant 'auto_cast' applied to expression");
}
}
diff --git a/src/check_stmt.cpp b/src/check_stmt.cpp
index a15977b7d..fa5f8f428 100644
--- a/src/check_stmt.cpp
+++ b/src/check_stmt.cpp
@@ -384,7 +384,6 @@ gb_internal Type *check_assignment_variable(CheckerContext *ctx, Operand *lhs, O
}
if (e != nullptr) {
- // HACK TODO(bill): Should the entities be freed as it's technically a leak
rhs->mode = Addressing_Value;
rhs->type = e->type;
rhs->proc_group = nullptr;
@@ -394,7 +393,7 @@ gb_internal Type *check_assignment_variable(CheckerContext *ctx, Operand *lhs, O
ast_node(i, Ident, node);
e = scope_lookup(ctx->scope, i->token.string);
if (e != nullptr && e->kind == Entity_Variable) {
- used = (e->flags & EntityFlag_Used) != 0; // TODO(bill): Make backup just in case
+ used = (e->flags & EntityFlag_Used) != 0; // NOTE(bill): Make backup just in case
}
}
@@ -888,7 +887,7 @@ gb_internal void check_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags
check_open_scope(ctx, node);
defer (check_close_scope(ctx));
- check_label(ctx, ss->label, node); // TODO(bill): What should the label's "scope" be?
+ check_label(ctx, ss->label, node);
if (ss->init != nullptr) {
check_stmt(ctx, ss->init, 0);
@@ -1125,7 +1124,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_
check_open_scope(ctx, node);
defer (check_close_scope(ctx));
- check_label(ctx, ss->label, node); // TODO(bill): What should the label's "scope" be?
+ check_label(ctx, ss->label, node);
if (ss->tag->kind != Ast_AssignStmt) {
error(ss->tag, "Expected an 'in' assignment for this type switch statement");
@@ -1960,7 +1959,7 @@ gb_internal void check_value_decl_stmt(CheckerContext *ctx, Ast *node, u32 mod_f
Token token = ast_token(node);
if (vd->type != nullptr && entity_count > 1) {
error(token, "'using' can only be applied to one variable of the same type");
- // TODO(bill): Should a 'continue' happen here?
+ // NOTE(bill): `using` will only be applied to a single declaration
}
for (isize entity_index = 0; entity_index < 1; entity_index++) {
@@ -2294,7 +2293,7 @@ gb_internal void check_for_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags) {
mod_flags |= Stmt_BreakAllowed | Stmt_ContinueAllowed;
check_open_scope(ctx, node);
- check_label(ctx, fs->label, node); // TODO(bill): What should the label's "scope" be?
+ check_label(ctx, fs->label, node);
if (fs->init != nullptr) {
check_stmt(ctx, fs->init, 0);
@@ -2464,6 +2463,12 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags)
error(us->token, "Empty 'using' list");
return;
}
+ if (check_vet_flags(node) & VetFlag_UsingStmt) {
+ ERROR_BLOCK();
+ error(node, "'using' as a statement is not allowed when '-vet' or '-vet-using' is applied");
+ error_line("\t'using' is considered bad practice to use as a statement outside of immediate refactoring\n");
+ }
+
for (Ast *expr : us->list) {
expr = unparen_expr(expr);
Entity *e = nullptr;
diff --git a/src/check_type.cpp b/src/check_type.cpp
index a68f83ba9..cae3ba22e 100644
--- a/src/check_type.cpp
+++ b/src/check_type.cpp
@@ -1474,6 +1474,12 @@ gb_internal Type *check_get_params(CheckerContext *ctx, Scope *scope, Ast *_para
Type *specialization = nullptr;
bool is_using = (p->flags&FieldFlag_using) != 0;
+ if ((check_vet_flags(param) & VetFlag_UsingParam) && is_using) {
+ ERROR_BLOCK();
+ error(param, "'using' on a procedure parameter is now allowed when '-vet' or '-vet-using-param' is applied");
+ error_line("\t'using' is considered bad practice to use as a statement/procedure parameter outside of immediate refactoring\n");
+
+ }
if (type_expr == nullptr) {
param_value = handle_parameter_value(ctx, nullptr, &type, default_value, true);
@@ -2772,16 +2778,16 @@ gb_internal bool check_type_internal(CheckerContext *ctx, Ast *e, Type **type, T
Type *relative_type = nullptr;
Type *base_type = check_type(ctx, rt->type);
- if (!is_type_pointer(base_type) && !is_type_slice(base_type)) {
- error(rt->type, "#relative types can only be a pointer or slice");
+ if (!is_type_pointer(base_type) && !is_type_multi_pointer(base_type)) {
+ error(rt->type, "#relative types can only be a pointer or multi-pointer");
relative_type = base_type;
} else if (base_integer == nullptr) {
relative_type = base_type;
} else {
if (is_type_pointer(base_type)) {
relative_type = alloc_type_relative_pointer(base_type, base_integer);
- } else if (is_type_slice(base_type)) {
- relative_type = alloc_type_relative_slice(base_type, base_integer);
+ } else if (is_type_multi_pointer(base_type)) {
+ relative_type = alloc_type_relative_multi_pointer(base_type, base_integer);
}
}
GB_ASSERT(relative_type != nullptr);
diff --git a/src/checker.cpp b/src/checker.cpp
index 396a04d7a..7fa7a9c36 100644
--- a/src/checker.cpp
+++ b/src/checker.cpp
@@ -521,6 +521,28 @@ GB_COMPARE_PROC(entity_variable_pos_cmp) {
}
+
+gb_internal u64 check_vet_flags(CheckerContext *c) {
+ AstFile *file = c->file;
+ if (file == nullptr &&
+ c->curr_proc_decl &&
+ c->curr_proc_decl->proc_lit) {
+ file = c->curr_proc_decl->proc_lit->file();
+ }
+ if (file && file->vet_flags_set) {
+ return file->vet_flags;
+ }
+ return build_context.vet_flags;
+}
+
+gb_internal u64 check_vet_flags(Ast *node) {
+ AstFile *file = node->file();
+ if (file && file->vet_flags_set) {
+ return file->vet_flags;
+ }
+ return build_context.vet_flags;
+}
+
enum VettedEntityKind {
VettedEntity_Invalid,
@@ -655,9 +677,9 @@ gb_internal bool check_vet_unused(Checker *c, Entity *e, VettedEntity *ve) {
return false;
}
-gb_internal void check_scope_usage(Checker *c, Scope *scope) {
- bool vet_unused = true;
- bool vet_shadowing = true;
+gb_internal void check_scope_usage(Checker *c, Scope *scope, u64 vet_flags) {
+ bool vet_unused = (vet_flags & VetFlag_Unused) != 0;
+ bool vet_shadowing = (vet_flags & (VetFlag_Shadowing|VetFlag_Using)) != 0;
Array<VettedEntity> vetted_entities = {};
array_init(&vetted_entities, heap_allocator());
@@ -691,15 +713,17 @@ gb_internal void check_scope_usage(Checker *c, Scope *scope) {
if (ve.kind == VettedEntity_Shadowed_And_Unused) {
error(e->token, "'%.*s' declared but not used, possibly shadows declaration at line %d", LIT(name), other->token.pos.line);
- } else if (build_context.vet) {
+ } else if (vet_flags) {
switch (ve.kind) {
case VettedEntity_Unused:
- error(e->token, "'%.*s' declared but not used", LIT(name));
+ if (vet_flags & VetFlag_Unused) {
+ error(e->token, "'%.*s' declared but not used", LIT(name));
+ }
break;
case VettedEntity_Shadowed:
- if (e->flags&EntityFlag_Using) {
+ if ((vet_flags & (VetFlag_Shadowing|VetFlag_Using)) != 0 && e->flags&EntityFlag_Using) {
error(e->token, "Declaration of '%.*s' from 'using' shadows declaration at line %d", LIT(name), other->token.pos.line);
- } else {
+ } else if ((vet_flags & (VetFlag_Shadowing)) != 0) {
error(e->token, "Declaration of '%.*s' shadows declaration at line %d", LIT(name), other->token.pos.line);
}
break;
@@ -726,7 +750,7 @@ gb_internal void check_scope_usage(Checker *c, Scope *scope) {
if (child->flags & (ScopeFlag_Proc|ScopeFlag_Type|ScopeFlag_File)) {
// Ignore these
} else {
- check_scope_usage(c, child);
+ check_scope_usage(c, child, vet_flags);
}
}
}
@@ -943,7 +967,6 @@ gb_internal void init_universal(void) {
add_global_bool_constant("true", true);
add_global_bool_constant("false", false);
- // TODO(bill): Set through flags in the compiler
add_global_string_constant("ODIN_VENDOR", bc->ODIN_VENDOR);
add_global_string_constant("ODIN_VERSION", bc->ODIN_VERSION);
add_global_string_constant("ODIN_ROOT", bc->ODIN_ROOT);
@@ -1046,6 +1069,7 @@ gb_internal void init_universal(void) {
add_global_bool_constant("ODIN_NO_RTTI", bc->no_rtti);
add_global_bool_constant("ODIN_VALGRIND_SUPPORT", bc->ODIN_VALGRIND_SUPPORT);
+ add_global_bool_constant("ODIN_TILDE", bc->tilde_backend);
add_global_constant("ODIN_COMPILE_TIMESTAMP", t_untyped_integer, exact_value_i64(odin_compile_timestamp()));
@@ -1114,6 +1138,7 @@ gb_internal void init_universal(void) {
t_u8_ptr = alloc_type_pointer(t_u8);
+ t_u8_multi_ptr = alloc_type_multi_pointer(t_u8);
t_int_ptr = alloc_type_pointer(t_int);
t_i64_ptr = alloc_type_pointer(t_i64);
t_f64_ptr = alloc_type_pointer(t_f64);
@@ -1453,7 +1478,6 @@ gb_internal void add_type_and_value(CheckerContext *ctx, Ast *expr, AddressingMo
if (ctx->decl) {
mutex = &ctx->decl->type_and_value_mutex;
} else if (ctx->pkg) {
- // TODO(bill): is a per package mutex is a good idea here?
mutex = &ctx->pkg->type_and_value_mutex;
}
@@ -1581,30 +1605,28 @@ gb_internal void add_entity_use(CheckerContext *c, Ast *identifier, Entity *enti
if (entity == nullptr) {
return;
}
- if (identifier != nullptr) {
- if (identifier->kind != Ast_Ident) {
- return;
- }
- Ast *empty_ident = nullptr;
- entity->identifier.compare_exchange_strong(empty_ident, identifier);
-
- identifier->Ident.entity = entity;
-
- String dmsg = entity->deprecated_message;
- if (dmsg.len > 0) {
- warning(identifier, "%.*s is deprecated: %.*s", LIT(entity->token.string), LIT(dmsg));
- }
- String wmsg = entity->warning_message;
- if (wmsg.len > 0) {
- warning(identifier, "%.*s: %.*s", LIT(entity->token.string), LIT(wmsg));
- }
- }
- entity->flags |= EntityFlag_Used;
add_declaration_dependency(c, entity);
+ entity->flags |= EntityFlag_Used;
if (entity_has_deferred_procedure(entity)) {
Entity *deferred = entity->Procedure.deferred_procedure.entity;
add_entity_use(c, nullptr, deferred);
}
+ if (identifier == nullptr || identifier->kind != Ast_Ident) {
+ return;
+ }
+ Ast *empty_ident = nullptr;
+ entity->identifier.compare_exchange_strong(empty_ident, identifier);
+
+ identifier->Ident.entity = entity;
+
+ String dmsg = entity->deprecated_message;
+ if (dmsg.len > 0) {
+ warning(identifier, "%.*s is deprecated: %.*s", LIT(entity->token.string), LIT(dmsg));
+ }
+ String wmsg = entity->warning_message;
+ if (wmsg.len > 0) {
+ warning(identifier, "%.*s: %.*s", LIT(entity->token.string), LIT(wmsg));
+ }
}
@@ -1947,9 +1969,9 @@ gb_internal void add_type_info_type_internal(CheckerContext *c, Type *t) {
add_type_info_type_internal(c, bt->RelativePointer.base_integer);
break;
- case Type_RelativeSlice:
- add_type_info_type_internal(c, bt->RelativeSlice.slice_type);
- add_type_info_type_internal(c, bt->RelativeSlice.base_integer);
+ case Type_RelativeMultiPointer:
+ add_type_info_type_internal(c, bt->RelativeMultiPointer.pointer_type);
+ add_type_info_type_internal(c, bt->RelativeMultiPointer.base_integer);
break;
case Type_Matrix:
@@ -2188,9 +2210,9 @@ gb_internal void add_min_dep_type_info(Checker *c, Type *t) {
add_min_dep_type_info(c, bt->RelativePointer.base_integer);
break;
- case Type_RelativeSlice:
- add_min_dep_type_info(c, bt->RelativeSlice.slice_type);
- add_min_dep_type_info(c, bt->RelativeSlice.base_integer);
+ case Type_RelativeMultiPointer:
+ add_min_dep_type_info(c, bt->RelativeMultiPointer.pointer_type);
+ add_min_dep_type_info(c, bt->RelativeMultiPointer.base_integer);
break;
case Type_Matrix:
@@ -2310,7 +2332,9 @@ gb_internal void generate_minimum_dependency_set(Checker *c, Entity *start) {
str_lit("memory_equal"),
str_lit("memory_compare"),
str_lit("memory_compare_zero"),
+ );
+ FORCE_ADD_RUNTIME_ENTITIES(!build_context.tilde_backend,
// Extended data type internal procedures
str_lit("umodti3"),
str_lit("udivti3"),
@@ -2556,9 +2580,6 @@ gb_internal Array<EntityGraphNode *> generate_entity_dependency_graph(CheckerInf
}
}
- // TODO(bill): This could be multithreaded to improve performance
- // This means that the entity graph node set will have to be thread safe
-
TIME_SECTION("generate_entity_dependency_graph: Calculate edges for graph M - Part 2");
auto G = array_make<EntityGraphNode *>(allocator, 0, M.count);
@@ -2779,7 +2800,7 @@ gb_internal void init_core_type_info(Checker *c) {
t_type_info_bit_set = find_core_type(c, str_lit("Type_Info_Bit_Set"));
t_type_info_simd_vector = find_core_type(c, str_lit("Type_Info_Simd_Vector"));
t_type_info_relative_pointer = find_core_type(c, str_lit("Type_Info_Relative_Pointer"));
- t_type_info_relative_slice = find_core_type(c, str_lit("Type_Info_Relative_Slice"));
+ t_type_info_relative_multi_pointer = find_core_type(c, str_lit("Type_Info_Relative_Multi_Pointer"));
t_type_info_matrix = find_core_type(c, str_lit("Type_Info_Matrix"));
t_type_info_soa_pointer = find_core_type(c, str_lit("Type_Info_Soa_Pointer"));
@@ -2808,7 +2829,7 @@ gb_internal void init_core_type_info(Checker *c) {
t_type_info_bit_set_ptr = alloc_type_pointer(t_type_info_bit_set);
t_type_info_simd_vector_ptr = alloc_type_pointer(t_type_info_simd_vector);
t_type_info_relative_pointer_ptr = alloc_type_pointer(t_type_info_relative_pointer);
- t_type_info_relative_slice_ptr = alloc_type_pointer(t_type_info_relative_slice);
+ t_type_info_relative_multi_pointer_ptr = alloc_type_pointer(t_type_info_relative_multi_pointer);
t_type_info_matrix_ptr = alloc_type_pointer(t_type_info_matrix);
t_type_info_soa_pointer_ptr = alloc_type_pointer(t_type_info_soa_pointer);
}
@@ -2935,6 +2956,60 @@ gb_internal DECL_ATTRIBUTE_PROC(foreign_block_decl_attribute) {
return false;
}
+gb_internal DECL_ATTRIBUTE_PROC(proc_group_attribute) {
+ if (name == ATTRIBUTE_USER_TAG_NAME) {
+ ExactValue ev = check_decl_attribute_value(c, value);
+ if (ev.kind != ExactValue_String) {
+ error(elem, "Expected a string value for '%.*s'", LIT(name));
+ }
+ return true;
+ } else if (name == "objc_name") {
+ ExactValue ev = check_decl_attribute_value(c, value);
+ if (ev.kind == ExactValue_String) {
+ if (string_is_valid_identifier(ev.value_string)) {
+ ac->objc_name = ev.value_string;
+ } else {
+ error(elem, "Invalid identifier for '%.*s', got '%.*s'", LIT(name), LIT(ev.value_string));
+ }
+ } else {
+ error(elem, "Expected a string value for '%.*s'", LIT(name));
+ }
+ return true;
+ } else if (name == "objc_is_class_method") {
+ ExactValue ev = check_decl_attribute_value(c, value);
+ if (ev.kind == ExactValue_Bool) {
+ ac->objc_is_class_method = ev.value_bool;
+ } else {
+ error(elem, "Expected a boolean value for '%.*s'", LIT(name));
+ }
+ return true;
+ } else if (name == "objc_type") {
+ if (value == nullptr) {
+ error(elem, "Expected a type for '%.*s'", LIT(name));
+ } else {
+ Type *objc_type = check_type(c, value);
+ if (objc_type != nullptr) {
+ if (!has_type_got_objc_class_attribute(objc_type)) {
+ gbString t = type_to_string(objc_type);
+ error(value, "'%.*s' expected a named type with the attribute @(obj_class=<string>), got type %s", LIT(name), t);
+ gb_string_free(t);
+ } else {
+ ac->objc_type = objc_type;
+ }
+ }
+ }
+ return true;
+ } else if (name == "require_results") {
+ if (value != nullptr) {
+ error(elem, "Expected no value for '%.*s'", LIT(name));
+ }
+ ac->require_results = true;
+ return true;
+ }
+ return false;
+}
+
+
gb_internal DECL_ATTRIBUTE_PROC(proc_decl_attribute) {
if (name == ATTRIBUTE_USER_TAG_NAME) {
ExactValue ev = check_decl_attribute_value(c, value);
@@ -3007,7 +3082,7 @@ gb_internal DECL_ATTRIBUTE_PROC(proc_decl_attribute) {
check_expr(c, &o, value);
Entity *e = entity_of_node(o.expr);
if (e != nullptr && e->kind == Entity_Procedure) {
- warning(elem, "'%.*s' is deprecated, please use one of the following instead: 'deferred_none', 'deferred_in', 'deferred_out'", LIT(name));
+ error(elem, "'%.*s' is not allowed any more, please use one of the following instead: 'deferred_none', 'deferred_in', 'deferred_out'", LIT(name));
if (ac->deferred_procedure.entity != nullptr) {
error(elem, "Previous usage of a 'deferred_*' attribute");
}
@@ -4506,7 +4581,7 @@ gb_internal DECL_ATTRIBUTE_PROC(foreign_import_decl_attribute) {
if (value != nullptr) {
error(elem, "Expected no parameter for '%.*s'", LIT(name));
} else if (name == "force") {
- warning(elem, "'force' is deprecated and is identical to 'require'");
+ error(elem, "'force' was replaced with 'require'");
}
ac->require_declaration = true;
return true;
@@ -5904,7 +5979,11 @@ gb_internal void check_parsed_files(Checker *c) {
TIME_SECTION("check scope usage");
for (auto const &entry : c->info.files) {
AstFile *f = entry.value;
- check_scope_usage(c, f->scope);
+ u64 vet_flags = build_context.vet_flags;
+ if (f->vet_flags_set) {
+ vet_flags = f->vet_flags;
+ }
+ check_scope_usage(c, f->scope, vet_flags);
}
TIME_SECTION("add basic type information");
@@ -6022,7 +6101,7 @@ gb_internal void check_parsed_files(Checker *c) {
while (mpsc_dequeue(&c->info.intrinsics_entry_point_usage, &node)) {
if (c->info.entry_point == nullptr && node != nullptr) {
if (node->file()->pkg->kind != Package_Runtime) {
- warning(node, "usage of intrinsics.__entry_point will be a no-op");
+ error(node, "usage of intrinsics.__entry_point will be a no-op");
}
}
}
diff --git a/src/checker.hpp b/src/checker.hpp
index b06d0a8f9..bf956393c 100644
--- a/src/checker.hpp
+++ b/src/checker.hpp
@@ -387,8 +387,6 @@ struct CheckerInfo {
BlockingMutex foreign_mutex; // NOT recursive
StringMap<Entity *> foreigns;
- // NOTE(bill): These are actually MPSC queues
- // TODO(bill): Convert them to be MPSC queues
MPSCQueue<Entity *> definition_queue;
MPSCQueue<Entity *> entity_queue;
MPSCQueue<Entity *> required_global_variable_queue;
@@ -449,6 +447,9 @@ struct CheckerContext {
Ast *assignment_lhs_hint;
};
+gb_internal u64 check_vet_flags(CheckerContext *c);
+gb_internal u64 check_vet_flags(Ast *node);
+
struct Checker {
Parser * parser;
diff --git a/src/docs_format.cpp b/src/docs_format.cpp
index 34114f08e..d0bca214b 100644
--- a/src/docs_format.cpp
+++ b/src/docs_format.cpp
@@ -59,31 +59,31 @@ struct OdinDocPosition {
};
enum OdinDocTypeKind : u32 {
- OdinDocType_Invalid = 0,
- OdinDocType_Basic = 1,
- OdinDocType_Named = 2,
- OdinDocType_Generic = 3,
- OdinDocType_Pointer = 4,
- OdinDocType_Array = 5,
- OdinDocType_EnumeratedArray = 6,
- OdinDocType_Slice = 7,
- OdinDocType_DynamicArray = 8,
- OdinDocType_Map = 9,
- OdinDocType_Struct = 10,
- OdinDocType_Union = 11,
- OdinDocType_Enum = 12,
- OdinDocType_Tuple = 13,
- OdinDocType_Proc = 14,
- OdinDocType_BitSet = 15,
- OdinDocType_SimdVector = 16,
- OdinDocType_SOAStructFixed = 17,
- OdinDocType_SOAStructSlice = 18,
- OdinDocType_SOAStructDynamic = 19,
- OdinDocType_RelativePointer = 20,
- OdinDocType_RelativeSlice = 21,
- OdinDocType_MultiPointer = 22,
- OdinDocType_Matrix = 23,
- OdinDocType_SoaPointer = 24,
+ OdinDocType_Invalid = 0,
+ OdinDocType_Basic = 1,
+ OdinDocType_Named = 2,
+ OdinDocType_Generic = 3,
+ OdinDocType_Pointer = 4,
+ OdinDocType_Array = 5,
+ OdinDocType_EnumeratedArray = 6,
+ OdinDocType_Slice = 7,
+ OdinDocType_DynamicArray = 8,
+ OdinDocType_Map = 9,
+ OdinDocType_Struct = 10,
+ OdinDocType_Union = 11,
+ OdinDocType_Enum = 12,
+ OdinDocType_Tuple = 13,
+ OdinDocType_Proc = 14,
+ OdinDocType_BitSet = 15,
+ OdinDocType_SimdVector = 16,
+ OdinDocType_SOAStructFixed = 17,
+ OdinDocType_SOAStructSlice = 18,
+ OdinDocType_SOAStructDynamic = 19,
+ OdinDocType_RelativePointer = 20,
+ OdinDocType_RelativeMultiPointer = 21,
+ OdinDocType_MultiPointer = 22,
+ OdinDocType_Matrix = 23,
+ OdinDocType_SoaPointer = 24,
};
enum OdinDocTypeFlag_Basic : u32 {
diff --git a/src/docs_writer.cpp b/src/docs_writer.cpp
index 2dd2f338b..6b42d2e7a 100644
--- a/src/docs_writer.cpp
+++ b/src/docs_writer.cpp
@@ -771,12 +771,12 @@ gb_internal OdinDocTypeIndex odin_doc_type(OdinDocWriter *w, Type *type) {
doc_type.types = odin_write_slice(w, types, gb_count_of(types));
}
break;
- case Type_RelativeSlice:
- doc_type.kind = OdinDocType_RelativeSlice;
+ case Type_RelativeMultiPointer:
+ doc_type.kind = OdinDocType_RelativeMultiPointer;
{
OdinDocTypeIndex types[2] = {};
- types[0] = odin_doc_type(w, type->RelativeSlice.slice_type);
- types[1] = odin_doc_type(w, type->RelativeSlice.base_integer);
+ types[0] = odin_doc_type(w, type->RelativeMultiPointer.pointer_type);
+ types[1] = odin_doc_type(w, type->RelativeMultiPointer.base_integer);
doc_type.types = odin_write_slice(w, types, gb_count_of(types));
}
break;
diff --git a/src/entity.cpp b/src/entity.cpp
index 649dd900d..ce27da3f2 100644
--- a/src/entity.cpp
+++ b/src/entity.cpp
@@ -2,8 +2,6 @@ struct Scope;
struct Checker;
struct Type;
struct DeclInfo;
-struct lbModule;
-struct lbProcedure;
#define ENTITY_KINDS \
@@ -183,8 +181,14 @@ struct Entity {
Entity * aliased_of;
- lbModule * code_gen_module;
- lbProcedure *code_gen_procedure;
+ union {
+ struct lbModule *code_gen_module;
+ struct cgModule *cg_module;
+ };
+ union {
+ struct lbProcedure *code_gen_procedure;
+ struct cgProcedure *cg_procedure;
+ };
u64 order_in_src;
String deprecated_message;
@@ -287,7 +291,6 @@ gb_internal bool is_entity_kind_exported(EntityKind kind, bool allow_builtin = f
}
gb_internal bool is_entity_exported(Entity *e, bool allow_builtin = false) {
- // TODO(bill): Determine the actual exportation rules for imports of entities
GB_ASSERT(e != nullptr);
if (!is_entity_kind_exported(e->kind, allow_builtin)) {
return false;
@@ -401,7 +404,7 @@ gb_internal Entity *alloc_entity_array_elem(Scope *scope, Token token, Type *typ
return entity;
}
-gb_internal Entity *alloc_entity_procedure(Scope *scope, Token token, Type *signature_type, u64 tags) {
+gb_internal Entity *alloc_entity_procedure(Scope *scope, Token token, Type *signature_type, u64 tags=0) {
Entity *entity = alloc_entity(Entity_Procedure, scope, token, signature_type);
entity->Procedure.tags = tags;
return entity;
@@ -418,7 +421,7 @@ gb_internal Entity *alloc_entity_import_name(Scope *scope, Token token, Type *ty
entity->ImportName.path = path;
entity->ImportName.name = name;
entity->ImportName.scope = import_scope;
- entity->state = EntityState_Resolved; // TODO(bill): Is this correct?
+ entity->state = EntityState_Resolved;
return entity;
}
@@ -427,7 +430,7 @@ gb_internal Entity *alloc_entity_library_name(Scope *scope, Token token, Type *t
Entity *entity = alloc_entity(Entity_LibraryName, scope, token, type);
entity->LibraryName.paths = paths;
entity->LibraryName.name = name;
- entity->state = EntityState_Resolved; // TODO(bill): Is this correct?
+ entity->state = EntityState_Resolved;
return entity;
}
diff --git a/src/error.cpp b/src/error.cpp
index eb010eb36..6a039006b 100644
--- a/src/error.cpp
+++ b/src/error.cpp
@@ -411,7 +411,7 @@ gb_internal void error_line_va(char const *fmt, va_list va) {
gb_internal void error_no_newline_va(TokenPos const &pos, char const *fmt, va_list va) {
mutex_lock(&global_error_collector.mutex);
- global_error_collector.count++;
+ global_error_collector.count.fetch_add(1);
// NOTE(bill): Duplicate error, skip it
if (pos.line == 0) {
error_out_coloured("Error: ", TerminalStyle_Normal, TerminalColour_Red);
@@ -425,7 +425,7 @@ gb_internal void error_no_newline_va(TokenPos const &pos, char const *fmt, va_li
error_out_va(fmt, va);
}
mutex_unlock(&global_error_collector.mutex);
- if (global_error_collector.count > MAX_ERROR_COLLECTOR_COUNT()) {
+ if (global_error_collector.count.load() > MAX_ERROR_COLLECTOR_COUNT()) {
gb_exit(1);
}
}
diff --git a/src/exact_value.cpp b/src/exact_value.cpp
index ff940aabb..cd499272f 100644
--- a/src/exact_value.cpp
+++ b/src/exact_value.cpp
@@ -26,8 +26,8 @@ enum ExactValueKind {
ExactValue_Complex = 5,
ExactValue_Quaternion = 6,
ExactValue_Pointer = 7,
- ExactValue_Compound = 8, // TODO(bill): Is this good enough?
- ExactValue_Procedure = 9, // TODO(bill): Is this good enough?
+ ExactValue_Compound = 8,
+ ExactValue_Procedure = 9,
ExactValue_Typeid = 10,
ExactValue_Count,
@@ -101,7 +101,6 @@ gb_internal ExactValue exact_value_bool(bool b) {
}
gb_internal ExactValue exact_value_string(String string) {
- // TODO(bill): Allow for numbers with underscores in them
ExactValue result = {ExactValue_String};
result.value_string = string;
return result;
@@ -702,7 +701,6 @@ gb_internal void match_exact_values(ExactValue *x, ExactValue *y) {
compiler_error("match_exact_values: How'd you get here? Invalid ExactValueKind %d", x->kind);
}
-// TODO(bill): Allow for pointer arithmetic? Or are pointer slices good enough?
gb_internal ExactValue exact_binary_operator_value(TokenKind op, ExactValue x, ExactValue y) {
match_exact_values(&x, &y);
@@ -943,7 +941,6 @@ gb_internal bool compare_exact_values(TokenKind op, ExactValue x, ExactValue y)
case ExactValue_String: {
String a = x.value_string;
String b = y.value_string;
- // TODO(bill): gb_memcompare is used because the strings are UTF-8
switch (op) {
case Token_CmpEq: return a == b;
case Token_NotEq: return a != b;
diff --git a/src/linker.cpp b/src/linker.cpp
new file mode 100644
index 000000000..c0fbf596f
--- /dev/null
+++ b/src/linker.cpp
@@ -0,0 +1,460 @@
+struct LinkerData {
+ BlockingMutex foreign_mutex;
+ PtrSet<Entity *> foreign_libraries_set;
+ Array<Entity *> foreign_libraries;
+
+ Array<String> output_object_paths;
+ Array<String> output_temp_paths;
+ String output_base;
+ String output_name;
+};
+
+gb_internal i32 system_exec_command_line_app(char const *name, char const *fmt, ...);
+
+gb_internal void linker_data_init(LinkerData *ld, CheckerInfo *info, String const &init_fullpath) {
+ gbAllocator ha = heap_allocator();
+ array_init(&ld->output_object_paths, ha);
+ array_init(&ld->output_temp_paths, ha);
+ array_init(&ld->foreign_libraries, ha, 0, 1024);
+ ptr_set_init(&ld->foreign_libraries_set, 1024);
+
+ if (build_context.out_filepath.len == 0) {
+ ld->output_name = remove_directory_from_path(init_fullpath);
+ ld->output_name = remove_extension_from_path(ld->output_name);
+ ld->output_name = string_trim_whitespace(ld->output_name);
+ if (ld->output_name.len == 0) {
+ ld->output_name = info->init_scope->pkg->name;
+ }
+ ld->output_base = ld->output_name;
+ } else {
+ ld->output_name = build_context.out_filepath;
+ ld->output_name = string_trim_whitespace(ld->output_name);
+ if (ld->output_name.len == 0) {
+ ld->output_name = info->init_scope->pkg->name;
+ }
+ isize pos = string_extension_position(ld->output_name);
+ if (pos < 0) {
+ ld->output_base = ld->output_name;
+ } else {
+ ld->output_base = substring(ld->output_name, 0, pos);
+ }
+ }
+
+ ld->output_base = path_to_full_path(ha, ld->output_base);
+
+}
+
+gb_internal i32 linker_stage(LinkerData *gen) {
+ i32 result = 0;
+ Timings *timings = &global_timings;
+
+ String output_filename = path_to_string(heap_allocator(), build_context.build_paths[BuildPath_Output]);
+ debugf("Linking %.*s\n", LIT(output_filename));
+
+ // TOOD(Jeroen): Make a `build_paths[BuildPath_Object] to avoid `%.*s.o`.
+
+ if (is_arch_wasm()) {
+ timings_start_section(timings, str_lit("wasm-ld"));
+
+ #if defined(GB_SYSTEM_WINDOWS)
+ result = system_exec_command_line_app("wasm-ld",
+ "\"%.*s\\bin\\wasm-ld\" \"%.*s.o\" -o \"%.*s\" %.*s %.*s",
+ LIT(build_context.ODIN_ROOT),
+ LIT(output_filename), LIT(output_filename), LIT(build_context.link_flags), LIT(build_context.extra_linker_flags));
+ #else
+ result = system_exec_command_line_app("wasm-ld",
+ "wasm-ld \"%.*s.o\" -o \"%.*s\" %.*s %.*s",
+ LIT(output_filename), LIT(output_filename), LIT(build_context.link_flags), LIT(build_context.extra_linker_flags));
+ #endif
+ return result;
+ }
+
+ if (build_context.cross_compiling && selected_target_metrics->metrics == &target_essence_amd64) {
+#if defined(GB_SYSTEM_UNIX)
+ result = system_exec_command_line_app("linker", "x86_64-essence-gcc \"%.*s.o\" -o \"%.*s\" %.*s %.*s",
+ LIT(output_filename), LIT(output_filename), LIT(build_context.link_flags), LIT(build_context.extra_linker_flags));
+#else
+ gb_printf_err("Linking for cross compilation for this platform is not yet supported (%.*s %.*s)\n",
+ LIT(target_os_names[build_context.metrics.os]),
+ LIT(target_arch_names[build_context.metrics.arch])
+ );
+#endif
+ } else if (build_context.cross_compiling && build_context.different_os) {
+ gb_printf_err("Linking for cross compilation for this platform is not yet supported (%.*s %.*s)\n",
+ LIT(target_os_names[build_context.metrics.os]),
+ LIT(target_arch_names[build_context.metrics.arch])
+ );
+ build_context.keep_object_files = true;
+ } else {
+ #if defined(GB_SYSTEM_WINDOWS)
+ bool is_windows = true;
+ #else
+ bool is_windows = false;
+ #endif
+ #if defined(GB_SYSTEM_OSX)
+ bool is_osx = true;
+ #else
+ bool is_osx = false;
+ #endif
+
+
+ if (is_windows) {
+ String section_name = str_lit("msvc-link");
+ if (build_context.use_lld) {
+ section_name = str_lit("lld-link");
+ }
+ timings_start_section(timings, section_name);
+
+ gbString lib_str = gb_string_make(heap_allocator(), "");
+ defer (gb_string_free(lib_str));
+
+ gbString link_settings = gb_string_make_reserve(heap_allocator(), 256);
+ defer (gb_string_free(link_settings));
+
+ // Add library search paths.
+ if (build_context.build_paths[BuildPath_VS_LIB].basename.len > 0) {
+ String path = {};
+ auto add_path = [&](String path) {
+ if (path[path.len-1] == '\\') {
+ path.len -= 1;
+ }
+ link_settings = gb_string_append_fmt(link_settings, " /LIBPATH:\"%.*s\"", LIT(path));
+ };
+ add_path(build_context.build_paths[BuildPath_Win_SDK_UM_Lib].basename);
+ add_path(build_context.build_paths[BuildPath_Win_SDK_UCRT_Lib].basename);
+ add_path(build_context.build_paths[BuildPath_VS_LIB].basename);
+ }
+
+
+ StringSet libs = {};
+ string_set_init(&libs, 64);
+ defer (string_set_destroy(&libs));
+
+ StringSet asm_files = {};
+ string_set_init(&asm_files, 64);
+ defer (string_set_destroy(&asm_files));
+
+ for (Entity *e : gen->foreign_libraries) {
+ GB_ASSERT(e->kind == Entity_LibraryName);
+ for_array(i, e->LibraryName.paths) {
+ String lib = string_trim_whitespace(e->LibraryName.paths[i]);
+ // IMPORTANT NOTE(bill): calling `string_to_lower` here is not an issue because
+ // we will never uses these strings afterwards
+ string_to_lower(&lib);
+ if (lib.len == 0) {
+ continue;
+ }
+
+ if (has_asm_extension(lib)) {
+ if (!string_set_update(&asm_files, lib)) {
+ String asm_file = asm_files.entries[i].value;
+ String obj_file = concatenate_strings(permanent_allocator(), asm_file, str_lit(".obj"));
+
+ result = system_exec_command_line_app("nasm",
+ "\"%.*s\\bin\\nasm\\windows\\nasm.exe\" \"%.*s\" "
+ "-f win64 "
+ "-o \"%.*s\" "
+ "%.*s "
+ "",
+ LIT(build_context.ODIN_ROOT), LIT(asm_file),
+ LIT(obj_file),
+ LIT(build_context.extra_assembler_flags)
+ );
+
+ if (result) {
+ return result;
+ }
+ array_add(&gen->output_object_paths, obj_file);
+ }
+ } else {
+ if (!string_set_update(&libs, lib)) {
+ lib_str = gb_string_append_fmt(lib_str, " \"%.*s\"", LIT(lib));
+ }
+ }
+ }
+ }
+
+ for (Entity *e : gen->foreign_libraries) {
+ GB_ASSERT(e->kind == Entity_LibraryName);
+ if (e->LibraryName.extra_linker_flags.len != 0) {
+ lib_str = gb_string_append_fmt(lib_str, " %.*s", LIT(e->LibraryName.extra_linker_flags));
+ }
+ }
+
+ if (build_context.build_mode == BuildMode_DynamicLibrary) {
+ link_settings = gb_string_append_fmt(link_settings, " /DLL");
+ } else {
+ link_settings = gb_string_append_fmt(link_settings, " /ENTRY:mainCRTStartup");
+ }
+
+ if (build_context.pdb_filepath != "") {
+ String pdb_path = path_to_string(heap_allocator(), build_context.build_paths[BuildPath_PDB]);
+ link_settings = gb_string_append_fmt(link_settings, " /PDB:%.*s", LIT(pdb_path));
+ }
+
+ if (build_context.no_crt) {
+ link_settings = gb_string_append_fmt(link_settings, " /nodefaultlib");
+ } else {
+ link_settings = gb_string_append_fmt(link_settings, " /defaultlib:libcmt");
+ }
+
+ if (build_context.ODIN_DEBUG) {
+ link_settings = gb_string_append_fmt(link_settings, " /DEBUG");
+ }
+
+ gbString object_files = gb_string_make(heap_allocator(), "");
+ defer (gb_string_free(object_files));
+ for (String const &object_path : gen->output_object_paths) {
+ object_files = gb_string_append_fmt(object_files, "\"%.*s\" ", LIT(object_path));
+ }
+
+ String vs_exe_path = path_to_string(heap_allocator(), build_context.build_paths[BuildPath_VS_EXE]);
+ defer (gb_free(heap_allocator(), vs_exe_path.text));
+
+ String windows_sdk_bin_path = path_to_string(heap_allocator(), build_context.build_paths[BuildPath_Win_SDK_Bin_Path]);
+ defer (gb_free(heap_allocator(), windows_sdk_bin_path.text));
+
+ char const *subsystem_str = build_context.use_subsystem_windows ? "WINDOWS" : "CONSOLE";
+ if (!build_context.use_lld) { // msvc
+ String res_path = {};
+ defer (gb_free(heap_allocator(), res_path.text));
+ if (build_context.has_resource) {
+ String temp_res_path = path_to_string(heap_allocator(), build_context.build_paths[BuildPath_RES]);
+ res_path = concatenate3_strings(heap_allocator(), str_lit("\""), temp_res_path, str_lit("\""));
+ gb_free(heap_allocator(), temp_res_path.text);
+
+ String rc_path = path_to_string(heap_allocator(), build_context.build_paths[BuildPath_RC]);
+ defer (gb_free(heap_allocator(), rc_path.text));
+
+ result = system_exec_command_line_app("msvc-link",
+ "\"%.*src.exe\" /nologo /fo \"%.*s\" \"%.*s\"",
+ LIT(windows_sdk_bin_path),
+ LIT(res_path),
+ LIT(rc_path)
+ );
+
+ if (result) {
+ return result;
+ }
+ }
+
+ switch (build_context.build_mode) {
+ case BuildMode_Executable:
+ link_settings = gb_string_append_fmt(link_settings, " /NOIMPLIB /NOEXP");
+ break;
+ }
+
+ result = system_exec_command_line_app("msvc-link",
+ "\"%.*slink.exe\" %s %.*s -OUT:\"%.*s\" %s "
+ "/nologo /incremental:no /opt:ref /subsystem:%s "
+ "%.*s "
+ "%.*s "
+ "%s "
+ "",
+ LIT(vs_exe_path), object_files, LIT(res_path), LIT(output_filename),
+ link_settings,
+ subsystem_str,
+ LIT(build_context.link_flags),
+ LIT(build_context.extra_linker_flags),
+ lib_str
+ );
+ if (result) {
+ return result;
+ }
+ } else { // lld
+ result = system_exec_command_line_app("msvc-lld-link",
+ "\"%.*s\\bin\\lld-link\" %s -OUT:\"%.*s\" %s "
+ "/nologo /incremental:no /opt:ref /subsystem:%s "
+ "%.*s "
+ "%.*s "
+ "%s "
+ "",
+ LIT(build_context.ODIN_ROOT), object_files, LIT(output_filename),
+ link_settings,
+ subsystem_str,
+ LIT(build_context.link_flags),
+ LIT(build_context.extra_linker_flags),
+ lib_str
+ );
+
+ if (result) {
+ return result;
+ }
+ }
+ } else {
+ timings_start_section(timings, str_lit("ld-link"));
+
+ // NOTE(vassvik): get cwd, for used for local shared libs linking, since those have to be relative to the exe
+ char cwd[256];
+ #if !defined(GB_SYSTEM_WINDOWS)
+ getcwd(&cwd[0], 256);
+ #endif
+ //printf("%s\n", cwd);
+
+ // NOTE(vassvik): needs to add the root to the library search paths, so that the full filenames of the library
+ // files can be passed with -l:
+ gbString lib_str = gb_string_make(heap_allocator(), "-L/");
+ defer (gb_string_free(lib_str));
+
+ StringSet libs = {};
+ string_set_init(&libs, 64);
+ defer (string_set_destroy(&libs));
+
+ for (Entity *e : gen->foreign_libraries) {
+ GB_ASSERT(e->kind == Entity_LibraryName);
+ for (String lib : e->LibraryName.paths) {
+ lib = string_trim_whitespace(lib);
+ if (lib.len == 0) {
+ continue;
+ }
+ if (string_set_update(&libs, lib)) {
+ continue;
+ }
+
+ // NOTE(zangent): Sometimes, you have to use -framework on MacOS.
+ // This allows you to specify '-f' in a #foreign_system_library,
+ // without having to implement any new syntax specifically for MacOS.
+ if (build_context.metrics.os == TargetOs_darwin) {
+ if (string_ends_with(lib, str_lit(".framework"))) {
+ // framework thingie
+ String lib_name = lib;
+ lib_name = remove_extension_from_path(lib_name);
+ lib_str = gb_string_append_fmt(lib_str, " -framework %.*s ", LIT(lib_name));
+ } else if (string_ends_with(lib, str_lit(".a")) || string_ends_with(lib, str_lit(".o")) || string_ends_with(lib, str_lit(".dylib"))) {
+ // For:
+ // object
+ // dynamic lib
+ // static libs, absolute full path relative to the file in which the lib was imported from
+ lib_str = gb_string_append_fmt(lib_str, " %.*s ", LIT(lib));
+ } else {
+ // dynamic or static system lib, just link regularly searching system library paths
+ lib_str = gb_string_append_fmt(lib_str, " -l%.*s ", LIT(lib));
+ }
+ } else {
+ // NOTE(vassvik): static libraries (.a files) in linux can be linked to directly using the full path,
+ // since those are statically linked to at link time. shared libraries (.so) has to be
+ // available at runtime wherever the executable is run, so we make require those to be
+ // local to the executable (unless the system collection is used, in which case we search
+ // the system library paths for the library file).
+ if (string_ends_with(lib, str_lit(".a")) || string_ends_with(lib, str_lit(".o"))) {
+ // static libs and object files, absolute full path relative to the file in which the lib was imported from
+ lib_str = gb_string_append_fmt(lib_str, " -l:\"%.*s\" ", LIT(lib));
+ } else if (string_ends_with(lib, str_lit(".so"))) {
+ // dynamic lib, relative path to executable
+ // NOTE(vassvik): it is the user's responsibility to make sure the shared library files are visible
+ // at runtime to the executable
+ lib_str = gb_string_append_fmt(lib_str, " -l:\"%s/%.*s\" ", cwd, LIT(lib));
+ } else {
+ // dynamic or static system lib, just link regularly searching system library paths
+ lib_str = gb_string_append_fmt(lib_str, " -l%.*s ", LIT(lib));
+ }
+ }
+ }
+ }
+
+ for (Entity *e : gen->foreign_libraries) {
+ GB_ASSERT(e->kind == Entity_LibraryName);
+ if (e->LibraryName.extra_linker_flags.len != 0) {
+ lib_str = gb_string_append_fmt(lib_str, " %.*s", LIT(e->LibraryName.extra_linker_flags));
+ }
+ }
+
+ gbString object_files = gb_string_make(heap_allocator(), "");
+ defer (gb_string_free(object_files));
+ for (String object_path : gen->output_object_paths) {
+ object_files = gb_string_append_fmt(object_files, "\"%.*s\" ", LIT(object_path));
+ }
+
+ gbString link_settings = gb_string_make_reserve(heap_allocator(), 32);
+
+ if (build_context.no_crt) {
+ link_settings = gb_string_append_fmt(link_settings, "-nostdlib ");
+ }
+
+ // NOTE(dweiler): We use clang as a frontend for the linker as there are
+ // other runtime and compiler support libraries that need to be linked in
+ // very specific orders such as libgcc_s, ld-linux-so, unwind, etc.
+ // These are not always typically inside /lib, /lib64, or /usr versions
+ // of that, e.g libgcc.a is in /usr/lib/gcc/{version}, and can vary on
+ // the distribution of Linux even. The gcc or clang specs is the only
+ // reliable way to query this information to call ld directly.
+ if (build_context.build_mode == BuildMode_DynamicLibrary) {
+ // NOTE(dweiler): Let the frontend know we're building a shared library
+ // so it doesn't generate symbols which cannot be relocated.
+ link_settings = gb_string_appendc(link_settings, "-shared ");
+
+ // NOTE(dweiler): _odin_entry_point must be called at initialization
+ // time of the shared object, similarly, _odin_exit_point must be called
+ // at deinitialization. We can pass both -init and -fini to the linker by
+ // using a comma separated list of arguments to -Wl.
+ //
+ // This previously used ld but ld cannot actually build a shared library
+ // correctly this way since all the other dependencies provided implicitly
+ // by the compiler frontend are still needed and most of the command
+ // line arguments prepared previously are incompatible with ld.
+ if (build_context.metrics.os == TargetOs_darwin) {
+ link_settings = gb_string_appendc(link_settings, "-Wl,-init,'__odin_entry_point' ");
+ // NOTE(weshardee): __odin_exit_point should also be added, but -fini
+ // does not exist on MacOS
+ } else {
+ link_settings = gb_string_appendc(link_settings, "-Wl,-init,'_odin_entry_point' ");
+ link_settings = gb_string_appendc(link_settings, "-Wl,-fini,'_odin_exit_point' ");
+ }
+
+ } else if (build_context.metrics.os != TargetOs_openbsd) {
+ // OpenBSD defaults to PIE executable. do not pass -no-pie for it.
+ link_settings = gb_string_appendc(link_settings, "-no-pie ");
+ }
+
+ gbString platform_lib_str = gb_string_make(heap_allocator(), "");
+ defer (gb_string_free(platform_lib_str));
+ if (build_context.metrics.os == TargetOs_darwin) {
+ platform_lib_str = gb_string_appendc(platform_lib_str, "-lSystem -lm -Wl,-syslibroot /Library/Developer/CommandLineTools/SDKs/MacOSX.sdk -L/usr/local/lib");
+ } else {
+ platform_lib_str = gb_string_appendc(platform_lib_str, "-lc -lm");
+ }
+
+ if (build_context.metrics.os == TargetOs_darwin) {
+ // This sets a requirement of Mountain Lion and up, but the compiler doesn't work without this limit.
+ if (build_context.minimum_os_version_string.len) {
+ link_settings = gb_string_append_fmt(link_settings, " -mmacosx-version-min=%.*s ", LIT(build_context.minimum_os_version_string));
+ } else if (build_context.metrics.arch == TargetArch_arm64) {
+ link_settings = gb_string_appendc(link_settings, " -mmacosx-version-min=12.0.0 ");
+ } else {
+ link_settings = gb_string_appendc(link_settings, " -mmacosx-version-min=10.12.0 ");
+ }
+ // This points the linker to where the entry point is
+ link_settings = gb_string_appendc(link_settings, " -e _main ");
+ }
+
+ gbString link_command_line = gb_string_make(heap_allocator(), "clang -Wno-unused-command-line-argument ");
+ defer (gb_string_free(link_command_line));
+
+ link_command_line = gb_string_appendc(link_command_line, object_files);
+ link_command_line = gb_string_append_fmt(link_command_line, " -o \"%.*s\" ", LIT(output_filename));
+ link_command_line = gb_string_append_fmt(link_command_line, " %s ", platform_lib_str);
+ link_command_line = gb_string_append_fmt(link_command_line, " %s ", lib_str);
+ link_command_line = gb_string_append_fmt(link_command_line, " %.*s ", LIT(build_context.link_flags));
+ link_command_line = gb_string_append_fmt(link_command_line, " %.*s ", LIT(build_context.extra_linker_flags));
+ link_command_line = gb_string_append_fmt(link_command_line, " %s ", link_settings);
+
+ result = system_exec_command_line_app("ld-link", link_command_line);
+
+ if (result) {
+ return result;
+ }
+
+ if (is_osx && build_context.ODIN_DEBUG) {
+ // NOTE: macOS links DWARF symbols dynamically. Dsymutil will map the stubs in the exe
+ // to the symbols in the object file
+ result = system_exec_command_line_app("dsymutil", "dsymutil %.*s", LIT(output_filename));
+
+ if (result) {
+ return result;
+ }
+ }
+ }
+ }
+
+ return result;
+}
diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp
index ce01485ff..3178ac9db 100644
--- a/src/llvm_backend.hpp
+++ b/src/llvm_backend.hpp
@@ -69,7 +69,6 @@ enum lbAddrKind {
lbAddr_SoaVariable,
lbAddr_RelativePointer,
- lbAddr_RelativeSlice,
lbAddr_Swizzle,
lbAddr_SwizzleLarge,
@@ -190,13 +189,9 @@ struct lbModule {
LLVMPassManagerRef function_pass_managers[lbFunctionPassManager_COUNT];
};
-struct lbGenerator {
+struct lbGenerator : LinkerData {
CheckerInfo *info;
- Array<String> output_object_paths;
- Array<String> output_temp_paths;
- String output_base;
- String output_name;
PtrMap<void *, lbModule *> modules; // key is `AstPackage *` (`void *` is used for future use)
PtrMap<LLVMContextRef, lbModule *> modules_through_ctx;
lbModule default_module;
@@ -204,10 +199,6 @@ struct lbGenerator {
RecursiveMutex anonymous_proc_lits_mutex;
PtrMap<Ast *, lbProcedure *> anonymous_proc_lits;
- BlockingMutex foreign_mutex;
- PtrSet<Entity *> foreign_libraries_set;
- Array<Entity *> foreign_libraries;
-
std::atomic<u32> global_array_index;
std::atomic<u32> global_generated_index;
@@ -346,7 +337,9 @@ struct lbProcedure {
};
+#ifndef ABI_PKG_NAME_SEPARATOR
#define ABI_PKG_NAME_SEPARATOR "."
+#endif
#if !ODIN_LLVM_MINIMUM_VERSION_14
diff --git a/src/llvm_backend_const.cpp b/src/llvm_backend_const.cpp
index 2a121ff5d..5c390a370 100644
--- a/src/llvm_backend_const.cpp
+++ b/src/llvm_backend_const.cpp
@@ -567,7 +567,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo
}
} else if (is_type_array(type) && value.kind == ExactValue_String && !is_type_u8(core_array_type(type))) {
- if (is_type_rune_array(type) && value.kind == ExactValue_String) {
+ if (is_type_rune_array(type)) {
i64 count = type->Array.count;
Type *elem = type->Array.elem;
LLVMTypeRef et = lb_type(m, elem);
@@ -1036,86 +1036,84 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo
LLVMValueRef *values = gb_alloc_array(temporary_allocator(), LLVMValueRef, value_count);
bool *visited = gb_alloc_array(temporary_allocator(), bool, value_count);
- if (cl->elems.count > 0) {
- if (cl->elems[0]->kind == Ast_FieldValue) {
- isize elem_count = cl->elems.count;
- for (isize i = 0; i < elem_count; i++) {
- ast_node(fv, FieldValue, cl->elems[i]);
- String name = fv->field->Ident.token.string;
+ if (cl->elems[0]->kind == Ast_FieldValue) {
+ isize elem_count = cl->elems.count;
+ for (isize i = 0; i < elem_count; i++) {
+ ast_node(fv, FieldValue, cl->elems[i]);
+ String name = fv->field->Ident.token.string;
- TypeAndValue tav = fv->value->tav;
- GB_ASSERT(tav.mode != Addressing_Invalid);
+ TypeAndValue tav = fv->value->tav;
+ GB_ASSERT(tav.mode != Addressing_Invalid);
- Selection sel = lookup_field(type, name, false);
- GB_ASSERT(!sel.indirect);
+ Selection sel = lookup_field(type, name, false);
+ GB_ASSERT(!sel.indirect);
- Entity *f = type->Struct.fields[sel.index[0]];
- i32 index = field_remapping[f->Variable.field_index];
- if (elem_type_can_be_constant(f->type)) {
- if (sel.index.count == 1) {
- values[index] = lb_const_value(m, f->type, tav.value, allow_local).value;
+ Entity *f = type->Struct.fields[sel.index[0]];
+ i32 index = field_remapping[f->Variable.field_index];
+ if (elem_type_can_be_constant(f->type)) {
+ if (sel.index.count == 1) {
+ values[index] = lb_const_value(m, f->type, tav.value, allow_local).value;
+ visited[index] = true;
+ } else {
+ if (!visited[index]) {
+ values[index] = lb_const_value(m, f->type, {}, false).value;
visited[index] = true;
- } else {
- if (!visited[index]) {
- values[index] = lb_const_value(m, f->type, {}, false).value;
- visited[index] = true;
- }
- unsigned idx_list_len = cast(unsigned)sel.index.count-1;
- unsigned *idx_list = gb_alloc_array(temporary_allocator(), unsigned, idx_list_len);
-
- if (lb_is_nested_possibly_constant(type, sel, fv->value)) {
- bool is_constant = true;
- Type *cv_type = f->type;
- for (isize j = 1; j < sel.index.count; j++) {
- i32 index = sel.index[j];
- Type *cvt = base_type(cv_type);
-
- if (cvt->kind == Type_Struct) {
- if (cvt->Struct.is_raw_union) {
- // sanity check which should have been caught by `lb_is_nested_possibly_constant`
- is_constant = false;
- break;
- }
- cv_type = cvt->Struct.fields[index]->type;
-
- if (is_type_struct(cvt)) {
- auto cv_field_remapping = lb_get_struct_remapping(m, cvt);
- unsigned remapped_index = cast(unsigned)cv_field_remapping[index];
- idx_list[j-1] = remapped_index;
- } else {
- idx_list[j-1] = cast(unsigned)index;
- }
- } else if (cvt->kind == Type_Array) {
- cv_type = cvt->Array.elem;
+ }
+ unsigned idx_list_len = cast(unsigned)sel.index.count-1;
+ unsigned *idx_list = gb_alloc_array(temporary_allocator(), unsigned, idx_list_len);
+
+ if (lb_is_nested_possibly_constant(type, sel, fv->value)) {
+ bool is_constant = true;
+ Type *cv_type = f->type;
+ for (isize j = 1; j < sel.index.count; j++) {
+ i32 index = sel.index[j];
+ Type *cvt = base_type(cv_type);
+
+ if (cvt->kind == Type_Struct) {
+ if (cvt->Struct.is_raw_union) {
+ // sanity check which should have been caught by `lb_is_nested_possibly_constant`
+ is_constant = false;
+ break;
+ }
+ cv_type = cvt->Struct.fields[index]->type;
- idx_list[j-1] = cast(unsigned)index;
+ if (is_type_struct(cvt)) {
+ auto cv_field_remapping = lb_get_struct_remapping(m, cvt);
+ unsigned remapped_index = cast(unsigned)cv_field_remapping[index];
+ idx_list[j-1] = remapped_index;
} else {
- GB_PANIC("UNKNOWN TYPE: %s", type_to_string(cv_type));
+ idx_list[j-1] = cast(unsigned)index;
}
+ } else if (cvt->kind == Type_Array) {
+ cv_type = cvt->Array.elem;
+
+ idx_list[j-1] = cast(unsigned)index;
+ } else {
+ GB_PANIC("UNKNOWN TYPE: %s", type_to_string(cv_type));
}
- if (is_constant) {
- LLVMValueRef elem_value = lb_const_value(m, tav.type, tav.value, allow_local).value;
- GB_ASSERT(LLVMIsConstant(elem_value));
- values[index] = LLVMConstInsertValue(values[index], elem_value, idx_list, idx_list_len);
- }
+ }
+ if (is_constant) {
+ LLVMValueRef elem_value = lb_const_value(m, tav.type, tav.value, allow_local).value;
+ GB_ASSERT(LLVMIsConstant(elem_value));
+ values[index] = LLVMConstInsertValue(values[index], elem_value, idx_list, idx_list_len);
}
}
}
}
- } else {
- for_array(i, cl->elems) {
- Entity *f = type->Struct.fields[i];
- TypeAndValue tav = cl->elems[i]->tav;
- ExactValue val = {};
- if (tav.mode != Addressing_Invalid) {
- val = tav.value;
- }
-
- i32 index = field_remapping[f->Variable.field_index];
- if (elem_type_can_be_constant(f->type)) {
- values[index] = lb_const_value(m, f->type, val, allow_local).value;
- visited[index] = true;
- }
+ }
+ } else {
+ for_array(i, cl->elems) {
+ Entity *f = type->Struct.fields[i];
+ TypeAndValue tav = cl->elems[i]->tav;
+ ExactValue val = {};
+ if (tav.mode != Addressing_Invalid) {
+ val = tav.value;
+ }
+
+ i32 index = field_remapping[f->Variable.field_index];
+ if (elem_type_can_be_constant(f->type)) {
+ values[index] = lb_const_value(m, f->type, val, allow_local).value;
+ visited[index] = true;
}
}
}
diff --git a/src/llvm_backend_debug.cpp b/src/llvm_backend_debug.cpp
index b9c6c606e..e053c5b40 100644
--- a/src/llvm_backend_debug.cpp
+++ b/src/llvm_backend_debug.cpp
@@ -442,19 +442,12 @@ gb_internal LLVMMetadataRef lb_debug_type_internal(lbModule *m, Type *type) {
gbString name = type_to_string(type, temporary_allocator());
return LLVMDIBuilderCreateTypedef(m->debug_builder, base_integer, name, gb_string_length(name), nullptr, 0, nullptr, cast(u32)(8*type_align_of(type)));
}
+ case Type_RelativeMultiPointer: {
+ LLVMMetadataRef base_integer = lb_debug_type(m, type->RelativeMultiPointer.base_integer);
+ gbString name = type_to_string(type, temporary_allocator());
+ return LLVMDIBuilderCreateTypedef(m->debug_builder, base_integer, name, gb_string_length(name), nullptr, 0, nullptr, cast(u32)(8*type_align_of(type)));
+ }
- case Type_RelativeSlice:
- {
- unsigned element_count = 0;
- LLVMMetadataRef elements[2] = {};
- Type *base_integer = type->RelativeSlice.base_integer;
- unsigned base_bits = cast(unsigned)(8*type_size_of(base_integer));
- elements[0] = lb_debug_struct_field(m, str_lit("data_offset"), base_integer, 0);
- elements[1] = lb_debug_struct_field(m, str_lit("len"), base_integer, base_bits);
- gbString name = type_to_string(type, temporary_allocator());
- return LLVMDIBuilderCreateStructType(m->debug_builder, nullptr, name, gb_string_length(name), nullptr, 0, 2*base_bits, base_bits, LLVMDIFlagZero, nullptr, elements, element_count, 0, nullptr, "", 0);
- }
-
case Type_Matrix: {
LLVMMetadataRef subscripts[1] = {};
subscripts[0] = LLVMDIBuilderGetOrCreateSubrange(m->debug_builder,
diff --git a/src/llvm_backend_expr.cpp b/src/llvm_backend_expr.cpp
index c30170553..33768cc12 100644
--- a/src/llvm_backend_expr.cpp
+++ b/src/llvm_backend_expr.cpp
@@ -2863,7 +2863,6 @@ gb_internal lbValue lb_build_unary_and(lbProcedure *p, Ast *expr) {
ast_node(ue, UnaryExpr, expr);
auto tv = type_and_value_of_expr(expr);
-
Ast *ue_expr = unparen_expr(ue->expr);
if (ue_expr->kind == Ast_IndexExpr && tv.mode == Addressing_OptionalOkPtr && is_type_tuple(tv.type)) {
Type *tuple = tv.type;
@@ -3803,25 +3802,32 @@ gb_internal lbAddr lb_build_addr_index_expr(lbProcedure *p, Ast *expr) {
lbValue v = {};
LLVMValueRef indices[1] = {index.value};
- v.value = LLVMBuildGEP2(p->builder, lb_type(p->module, t->MultiPointer.elem), multi_ptr.value, indices, 1, "foo");
+ v.value = LLVMBuildGEP2(p->builder, lb_type(p->module, t->MultiPointer.elem), multi_ptr.value, indices, 1, "");
v.type = alloc_type_pointer(t->MultiPointer.elem);
return lb_addr(v);
}
- case Type_RelativeSlice: {
- lbAddr slice_addr = {};
+ case Type_RelativeMultiPointer: {
+ lbAddr rel_ptr_addr = {};
if (deref) {
- slice_addr = lb_addr(lb_build_expr(p, ie->expr));
+ lbValue rel_ptr_ptr = lb_build_expr(p, ie->expr);
+ rel_ptr_addr = lb_addr(rel_ptr_ptr);
} else {
- slice_addr = lb_build_addr(p, ie->expr);
+ rel_ptr_addr = lb_build_addr(p, ie->expr);
}
- lbValue slice = lb_addr_load(p, slice_addr);
+ lbValue rel_ptr = lb_relative_pointer_to_pointer(p, rel_ptr_addr);
- lbValue elem = lb_slice_elem(p, slice);
- lbValue index = lb_emit_conv(p, lb_build_expr(p, ie->index), t_int);
- lbValue len = lb_slice_len(p, slice);
- lb_emit_bounds_check(p, ast_token(ie->index), index, len);
- lbValue v = lb_emit_ptr_offset(p, elem, index);
+ lbValue index = lb_build_expr(p, ie->index);
+ index = lb_emit_conv(p, index, t_int);
+ lbValue v = {};
+
+ Type *pointer_type = base_type(t->RelativeMultiPointer.pointer_type);
+ GB_ASSERT(pointer_type->kind == Type_MultiPointer);
+ Type *elem = pointer_type->MultiPointer.elem;
+
+ LLVMValueRef indices[1] = {index.value};
+ v.value = LLVMBuildGEP2(p->builder, lb_type(p->module, elem), rel_ptr.value, indices, 1, "");
+ v.type = alloc_type_pointer(elem);
return lb_addr(v);
}
@@ -3925,8 +3931,11 @@ gb_internal lbAddr lb_build_addr_slice_expr(lbProcedure *p, Ast *expr) {
return slice;
}
- case Type_RelativeSlice:
- GB_PANIC("TODO(bill): Type_RelativeSlice should be handled above already on the lb_addr_load");
+ case Type_RelativePointer:
+ GB_PANIC("TODO(bill): Type_RelativePointer should be handled above already on the lb_addr_load");
+ break;
+ case Type_RelativeMultiPointer:
+ GB_PANIC("TODO(bill): Type_RelativeMultiPointer should be handled above already on the lb_addr_load");
break;
case Type_DynamicArray: {
@@ -3996,7 +4005,7 @@ gb_internal lbAddr lb_build_addr_slice_expr(lbProcedure *p, Ast *expr) {
}
case Type_Basic: {
- GB_ASSERT(type == t_string);
+ GB_ASSERT_MSG(type == t_string, "got %s", type_to_string(type));
lbValue len = lb_string_len(p, base);
if (high.value == nullptr) high = len;
@@ -4171,7 +4180,7 @@ gb_internal lbAddr lb_build_addr_compound_lit(lbProcedure *p, Ast *expr) {
// HACK TODO(bill): THIS IS A MASSIVE HACK!!!!
if (is_type_union(ft) && !are_types_identical(fet, ft) && !is_type_untyped(fet)) {
- GB_ASSERT_MSG(union_variant_index(ft, fet) > 0, "%s", type_to_string(fet));
+ GB_ASSERT_MSG(union_variant_index(ft, fet) >= 0, "%s", type_to_string(fet));
lb_emit_store_union_variant(p, gep, field_expr, fet);
} else {
@@ -4519,8 +4528,9 @@ gb_internal lbAddr lb_build_addr_internal(lbProcedure *p, Ast *expr) {
Selection sel = lookup_field(type, selector, false);
GB_ASSERT(sel.entity != nullptr);
if (sel.pseudo_field) {
- GB_ASSERT(sel.entity->kind == Entity_Procedure);
+ GB_ASSERT(sel.entity->kind == Entity_Procedure || sel.entity->kind == Entity_ProcGroup);
Entity *e = entity_of_node(sel_node);
+ GB_ASSERT(e->kind == Entity_Procedure);
return lb_addr(lb_find_value_from_entity(p->module, e));
}
diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp
index ad8a1816a..5dfc7aff9 100644
--- a/src/llvm_backend_general.cpp
+++ b/src/llvm_backend_general.cpp
@@ -103,37 +103,7 @@ gb_internal bool lb_init_generator(lbGenerator *gen, Checker *c) {
}
String init_fullpath = c->parser->init_fullpath;
-
- if (build_context.out_filepath.len == 0) {
- gen->output_name = remove_directory_from_path(init_fullpath);
- gen->output_name = remove_extension_from_path(gen->output_name);
- gen->output_name = string_trim_whitespace(gen->output_name);
- if (gen->output_name.len == 0) {
- gen->output_name = c->info.init_scope->pkg->name;
- }
- gen->output_base = gen->output_name;
- } else {
- gen->output_name = build_context.out_filepath;
- gen->output_name = string_trim_whitespace(gen->output_name);
- if (gen->output_name.len == 0) {
- gen->output_name = c->info.init_scope->pkg->name;
- }
- isize pos = string_extension_position(gen->output_name);
- if (pos < 0) {
- gen->output_base = gen->output_name;
- } else {
- gen->output_base = substring(gen->output_name, 0, pos);
- }
- }
- gbAllocator ha = heap_allocator();
- array_init(&gen->output_object_paths, ha);
- array_init(&gen->output_temp_paths, ha);
-
- gen->output_base = path_to_full_path(ha, gen->output_base);
-
- gbString output_file_path = gb_string_make_length(ha, gen->output_base.text, gen->output_base.len);
- output_file_path = gb_string_appendc(output_file_path, ".obj");
- defer (gb_string_free(output_file_path));
+ linker_data_init(gen, &c->info, init_fullpath);
gen->info = &c->info;
@@ -141,10 +111,6 @@ gb_internal bool lb_init_generator(lbGenerator *gen, Checker *c) {
map_init(&gen->modules_through_ctx, gen->info->packages.count*2);
map_init(&gen->anonymous_proc_lits, 1024);
-
- array_init(&gen->foreign_libraries, heap_allocator(), 0, 1024);
- ptr_set_init(&gen->foreign_libraries_set, 1024);
-
if (USE_SEPARATE_MODULES) {
for (auto const &entry : gen->info->packages) {
AstPackage *pkg = entry.value;
@@ -383,9 +349,10 @@ gb_internal lbAddr lb_addr(lbValue addr) {
if (addr.type != nullptr && is_type_relative_pointer(type_deref(addr.type))) {
GB_ASSERT(is_type_pointer(addr.type));
v.kind = lbAddr_RelativePointer;
- } else if (addr.type != nullptr && is_type_relative_slice(type_deref(addr.type))) {
- GB_ASSERT(is_type_pointer(addr.type));
- v.kind = lbAddr_RelativeSlice;
+ } else if (addr.type != nullptr && is_type_relative_multi_pointer(type_deref(addr.type))) {
+ GB_ASSERT(is_type_pointer(addr.type) ||
+ is_type_multi_pointer(addr.type));
+ v.kind = lbAddr_RelativePointer;
}
return v;
}
@@ -458,6 +425,43 @@ gb_internal Type *lb_addr_type(lbAddr const &addr) {
return type_deref(addr.addr.type);
}
+
+gb_internal lbValue lb_relative_pointer_to_pointer(lbProcedure *p, lbAddr const &addr) {
+ GB_ASSERT(addr.kind == lbAddr_RelativePointer);
+
+ Type *t = base_type(lb_addr_type(addr));
+ GB_ASSERT(is_type_relative_pointer(t) || is_type_relative_multi_pointer(t));
+
+ Type *pointer_type = nullptr;
+ Type *base_integer = nullptr;
+ if (t->kind == Type_RelativePointer) {
+ pointer_type = t->RelativePointer.pointer_type;
+ base_integer = t->RelativePointer.base_integer;
+ } else if (t->kind == Type_RelativeMultiPointer) {
+ pointer_type = t->RelativeMultiPointer.pointer_type;
+ base_integer = t->RelativeMultiPointer.base_integer;
+ }
+
+ lbValue ptr = lb_emit_conv(p, addr.addr, t_uintptr);
+ lbValue offset = lb_emit_conv(p, ptr, alloc_type_pointer(base_integer));
+ offset = lb_emit_load(p, offset);
+
+ if (!is_type_unsigned(base_integer)) {
+ offset = lb_emit_conv(p, offset, t_i64);
+ }
+ offset = lb_emit_conv(p, offset, t_uintptr);
+ lbValue absolute_ptr = lb_emit_arith(p, Token_Add, ptr, offset, t_uintptr);
+ absolute_ptr = lb_emit_conv(p, absolute_ptr, pointer_type);
+
+ lbValue cond = lb_emit_comp(p, Token_CmpEq, offset, lb_const_nil(p->module, base_integer));
+
+ // NOTE(bill): nil check
+ lbValue nil_ptr = lb_const_nil(p->module, pointer_type);
+ lbValue final_ptr = lb_emit_select(p, cond, nil_ptr, absolute_ptr);
+ return final_ptr;
+}
+
+
gb_internal lbValue lb_addr_get_ptr(lbProcedure *p, lbAddr const &addr) {
if (addr.addr.value == nullptr) {
GB_PANIC("Illegal addr -> nullptr");
@@ -468,28 +472,8 @@ gb_internal lbValue lb_addr_get_ptr(lbProcedure *p, lbAddr const &addr) {
case lbAddr_Map:
return lb_internal_dynamic_map_get_ptr(p, addr.addr, addr.map.key);
- case lbAddr_RelativePointer: {
- Type *rel_ptr = base_type(lb_addr_type(addr));
- GB_ASSERT(rel_ptr->kind == Type_RelativePointer);
-
- lbValue ptr = lb_emit_conv(p, addr.addr, t_uintptr);
- lbValue offset = lb_emit_conv(p, ptr, alloc_type_pointer(rel_ptr->RelativePointer.base_integer));
- offset = lb_emit_load(p, offset);
-
- if (!is_type_unsigned(rel_ptr->RelativePointer.base_integer)) {
- offset = lb_emit_conv(p, offset, t_i64);
- }
- offset = lb_emit_conv(p, offset, t_uintptr);
- lbValue absolute_ptr = lb_emit_arith(p, Token_Add, ptr, offset, t_uintptr);
- absolute_ptr = lb_emit_conv(p, absolute_ptr, rel_ptr->RelativePointer.pointer_type);
-
- lbValue cond = lb_emit_comp(p, Token_CmpEq, offset, lb_const_nil(p->module, rel_ptr->RelativePointer.base_integer));
-
- // NOTE(bill): nil check
- lbValue nil_ptr = lb_const_nil(p->module, rel_ptr->RelativePointer.pointer_type);
- lbValue final_ptr = lb_emit_select(p, cond, nil_ptr, absolute_ptr);
- return final_ptr;
- }
+ case lbAddr_RelativePointer:
+ return lb_relative_pointer_to_pointer(p, addr);
case lbAddr_SoaVariable:
// TODO(bill): FIX THIS HACK
@@ -511,6 +495,9 @@ gb_internal lbValue lb_addr_get_ptr(lbProcedure *p, lbAddr const &addr) {
gb_internal lbValue lb_build_addr_ptr(lbProcedure *p, Ast *expr) {
lbAddr addr = lb_build_addr(p, expr);
+ if (addr.kind == lbAddr_RelativePointer) {
+ return addr.addr;
+ }
return lb_addr_get_ptr(p, addr);
}
@@ -719,9 +706,20 @@ gb_internal void lb_addr_store(lbProcedure *p, lbAddr addr, lbValue value) {
if (addr.kind == lbAddr_RelativePointer) {
Type *rel_ptr = base_type(lb_addr_type(addr));
- GB_ASSERT(rel_ptr->kind == Type_RelativePointer);
+ GB_ASSERT(rel_ptr->kind == Type_RelativePointer ||
+ rel_ptr->kind == Type_RelativeMultiPointer);
+ Type *pointer_type = nullptr;
+ Type *base_integer = nullptr;
+
+ if (rel_ptr->kind == Type_RelativePointer) {
+ pointer_type = rel_ptr->RelativePointer.pointer_type;
+ base_integer = rel_ptr->RelativePointer.base_integer;
+ } else if (rel_ptr->kind == Type_RelativeMultiPointer) {
+ pointer_type = rel_ptr->RelativeMultiPointer.pointer_type;
+ base_integer = rel_ptr->RelativeMultiPointer.base_integer;
+ }
- value = lb_emit_conv(p, value, rel_ptr->RelativePointer.pointer_type);
+ value = lb_emit_conv(p, value, pointer_type);
GB_ASSERT(is_type_pointer(addr.addr.type));
lbValue ptr = lb_emit_conv(p, addr.addr, t_uintptr);
@@ -730,54 +728,20 @@ gb_internal void lb_addr_store(lbProcedure *p, lbAddr addr, lbValue value) {
offset.value = LLVMBuildSub(p->builder, val_ptr.value, ptr.value, "");
offset.type = t_uintptr;
- if (!is_type_unsigned(rel_ptr->RelativePointer.base_integer)) {
+ if (!is_type_unsigned(base_integer)) {
offset = lb_emit_conv(p, offset, t_i64);
}
- offset = lb_emit_conv(p, offset, rel_ptr->RelativePointer.base_integer);
+ offset = lb_emit_conv(p, offset, base_integer);
- lbValue offset_ptr = lb_emit_conv(p, addr.addr, alloc_type_pointer(rel_ptr->RelativePointer.base_integer));
+ lbValue offset_ptr = lb_emit_conv(p, addr.addr, alloc_type_pointer(base_integer));
offset = lb_emit_select(p,
lb_emit_comp(p, Token_CmpEq, val_ptr, lb_const_nil(p->module, t_uintptr)),
- lb_const_nil(p->module, rel_ptr->RelativePointer.base_integer),
+ lb_const_nil(p->module, base_integer),
offset
);
LLVMBuildStore(p->builder, offset.value, offset_ptr.value);
return;
- } else if (addr.kind == lbAddr_RelativeSlice) {
- Type *rel_ptr = base_type(lb_addr_type(addr));
- GB_ASSERT(rel_ptr->kind == Type_RelativeSlice);
-
- value = lb_emit_conv(p, value, rel_ptr->RelativeSlice.slice_type);
-
- GB_ASSERT(is_type_pointer(addr.addr.type));
- lbValue ptr = lb_emit_conv(p, lb_emit_struct_ep(p, addr.addr, 0), t_uintptr);
- lbValue val_ptr = lb_emit_conv(p, lb_slice_elem(p, value), t_uintptr);
- lbValue offset = {};
- offset.value = LLVMBuildSub(p->builder, val_ptr.value, ptr.value, "");
- offset.type = t_uintptr;
-
- if (!is_type_unsigned(rel_ptr->RelativePointer.base_integer)) {
- offset = lb_emit_conv(p, offset, t_i64);
- }
- offset = lb_emit_conv(p, offset, rel_ptr->RelativePointer.base_integer);
-
-
- lbValue offset_ptr = lb_emit_conv(p, addr.addr, alloc_type_pointer(rel_ptr->RelativePointer.base_integer));
- offset = lb_emit_select(p,
- lb_emit_comp(p, Token_CmpEq, val_ptr, lb_const_nil(p->module, t_uintptr)),
- lb_const_nil(p->module, rel_ptr->RelativePointer.base_integer),
- offset
- );
- LLVMBuildStore(p->builder, offset.value, offset_ptr.value);
-
- lbValue len = lb_slice_len(p, value);
- len = lb_emit_conv(p, len, rel_ptr->RelativePointer.base_integer);
-
- lbValue len_ptr = lb_emit_struct_ep(p, addr.addr, 1);
- LLVMBuildStore(p->builder, len.value, len_ptr.value);
-
- return;
} else if (addr.kind == lbAddr_Map) {
lb_internal_dynamic_map_set(p, addr.addr, addr.map.type, addr.map.key, value, p->curr_stmt);
return;
@@ -1054,67 +1018,43 @@ gb_internal lbValue lb_addr_load(lbProcedure *p, lbAddr const &addr) {
if (addr.kind == lbAddr_RelativePointer) {
Type *rel_ptr = base_type(lb_addr_type(addr));
- GB_ASSERT(rel_ptr->kind == Type_RelativePointer);
+ Type *base_integer = nullptr;
+ Type *pointer_type = nullptr;
+ GB_ASSERT(rel_ptr->kind == Type_RelativePointer ||
+ rel_ptr->kind == Type_RelativeMultiPointer);
+
+ if (rel_ptr->kind == Type_RelativePointer) {
+ base_integer = rel_ptr->RelativePointer.base_integer;
+ pointer_type = rel_ptr->RelativePointer.pointer_type;
+ } else if (rel_ptr->kind == Type_RelativeMultiPointer) {
+ base_integer = rel_ptr->RelativeMultiPointer.base_integer;
+ pointer_type = rel_ptr->RelativeMultiPointer.pointer_type;
+ }
lbValue ptr = lb_emit_conv(p, addr.addr, t_uintptr);
- lbValue offset = lb_emit_conv(p, ptr, alloc_type_pointer(rel_ptr->RelativePointer.base_integer));
+ lbValue offset = lb_emit_conv(p, ptr, alloc_type_pointer(base_integer));
offset = lb_emit_load(p, offset);
- if (!is_type_unsigned(rel_ptr->RelativePointer.base_integer)) {
+ if (!is_type_unsigned(base_integer)) {
offset = lb_emit_conv(p, offset, t_i64);
}
offset = lb_emit_conv(p, offset, t_uintptr);
lbValue absolute_ptr = lb_emit_arith(p, Token_Add, ptr, offset, t_uintptr);
- absolute_ptr = lb_emit_conv(p, absolute_ptr, rel_ptr->RelativePointer.pointer_type);
+ absolute_ptr = lb_emit_conv(p, absolute_ptr, pointer_type);
- lbValue cond = lb_emit_comp(p, Token_CmpEq, offset, lb_const_nil(p->module, rel_ptr->RelativePointer.base_integer));
+ lbValue cond = lb_emit_comp(p, Token_CmpEq, offset, lb_const_nil(p->module, base_integer));
// NOTE(bill): nil check
- lbValue nil_ptr = lb_const_nil(p->module, rel_ptr->RelativePointer.pointer_type);
+ lbValue nil_ptr = lb_const_nil(p->module, pointer_type);
lbValue final_ptr = {};
final_ptr.type = absolute_ptr.type;
final_ptr.value = LLVMBuildSelect(p->builder, cond.value, nil_ptr.value, absolute_ptr.value, "");
- return lb_emit_load(p, final_ptr);
-
- } else if (addr.kind == lbAddr_RelativeSlice) {
- Type *rel_ptr = base_type(lb_addr_type(addr));
- GB_ASSERT(rel_ptr->kind == Type_RelativeSlice);
-
- lbValue offset_ptr = lb_emit_struct_ep(p, addr.addr, 0);
- lbValue ptr = lb_emit_conv(p, offset_ptr, t_uintptr);
- lbValue offset = lb_emit_load(p, offset_ptr);
-
-
- if (!is_type_unsigned(rel_ptr->RelativeSlice.base_integer)) {
- offset = lb_emit_conv(p, offset, t_i64);
+ if (rel_ptr->kind == Type_RelativeMultiPointer) {
+ return final_ptr;
}
- offset = lb_emit_conv(p, offset, t_uintptr);
- lbValue absolute_ptr = lb_emit_arith(p, Token_Add, ptr, offset, t_uintptr);
-
- Type *slice_type = base_type(rel_ptr->RelativeSlice.slice_type);
- GB_ASSERT(rel_ptr->RelativeSlice.slice_type->kind == Type_Slice);
- Type *slice_elem = slice_type->Slice.elem;
- Type *slice_elem_ptr = alloc_type_pointer(slice_elem);
-
- absolute_ptr = lb_emit_conv(p, absolute_ptr, slice_elem_ptr);
-
- lbValue cond = lb_emit_comp(p, Token_CmpEq, offset, lb_const_nil(p->module, rel_ptr->RelativeSlice.base_integer));
-
- // NOTE(bill): nil check
- lbValue nil_ptr = lb_const_nil(p->module, slice_elem_ptr);
- lbValue data = {};
- data.type = absolute_ptr.type;
- data.value = LLVMBuildSelect(p->builder, cond.value, nil_ptr.value, absolute_ptr.value, "");
-
- lbValue len = lb_emit_load(p, lb_emit_struct_ep(p, addr.addr, 1));
- len = lb_emit_conv(p, len, t_int);
-
- lbAddr slice = lb_add_local_generated(p, slice_type, false);
- lb_fill_slice(p, slice, data, len);
- return lb_addr_load(p, slice);
-
+ return lb_emit_load(p, final_ptr);
} else if (addr.kind == lbAddr_Map) {
Type *map_type = base_type(type_deref(addr.addr.type));
@@ -1895,8 +1835,8 @@ gb_internal LLVMTypeRef lb_type_internal(lbModule *m, Type *type) {
case Type_SimdVector:
return lb_type_internal(m, base);
- // TODO(bill): Deal with this correctly. Can this be named?
case Type_Proc:
+ // TODO(bill): Deal with this correctly. Can this be named?
return lb_type_internal(m, base);
case Type_Tuple:
@@ -2173,17 +2113,10 @@ gb_internal LLVMTypeRef lb_type_internal(lbModule *m, Type *type) {
case Type_RelativePointer:
return lb_type_internal(m, type->RelativePointer.base_integer);
+ case Type_RelativeMultiPointer:
+ return lb_type_internal(m, type->RelativeMultiPointer.base_integer);
- case Type_RelativeSlice:
- {
- LLVMTypeRef base_integer = lb_type_internal(m, type->RelativeSlice.base_integer);
- unsigned field_count = 2;
- LLVMTypeRef *fields = gb_alloc_array(permanent_allocator(), LLVMTypeRef, field_count);
- fields[0] = base_integer;
- fields[1] = base_integer;
- return LLVMStructTypeInContext(ctx, fields, field_count, false);
- }
case Type_Matrix:
{
@@ -2869,7 +2802,6 @@ gb_internal lbValue lb_find_value_from_entity(lbModule *m, Entity *e) {
if (USE_SEPARATE_MODULES) {
lbModule *other_module = lb_module_of_entity(m->gen, e);
- // TODO(bill): correct this logic
bool is_external = other_module != m;
if (!is_external) {
if (e->code_gen_module != nullptr) {
diff --git a/src/llvm_backend_proc.cpp b/src/llvm_backend_proc.cpp
index c27c55337..66edda825 100644
--- a/src/llvm_backend_proc.cpp
+++ b/src/llvm_backend_proc.cpp
@@ -362,7 +362,6 @@ gb_internal lbProcedure *lb_create_dummy_procedure(lbModule *m, String link_name
Type *pt = p->type;
lbCallingConventionKind cc_kind = lbCallingConvention_C;
- // TODO(bill): Clean up this logic
if (!is_arch_wasm()) {
cc_kind = lb_calling_convention_map[pt->Proc.calling_convention];
}
@@ -1702,7 +1701,6 @@ gb_internal lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValu
lbValue v = lb_build_expr(p, ce->args[0]);
Type *t = base_type(v.type);
if (is_type_pointer(t)) {
- // IMPORTANT TODO(bill): Should there be a nil pointer check?
v = lb_emit_load(p, v);
t = type_deref(t);
}
@@ -1712,7 +1710,7 @@ gb_internal lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValu
return lb_string_len(p, v);
} else if (is_type_array(t)) {
GB_PANIC("Array lengths are constant");
- } else if (is_type_slice(t) || is_type_relative_slice(t)) {
+ } else if (is_type_slice(t)) {
return lb_slice_len(p, v);
} else if (is_type_dynamic_array(t)) {
return lb_dynamic_array_len(p, v);
@@ -1730,7 +1728,6 @@ gb_internal lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValu
lbValue v = lb_build_expr(p, ce->args[0]);
Type *t = base_type(v.type);
if (is_type_pointer(t)) {
- // IMPORTANT TODO(bill): Should there be a nil pointer check?
v = lb_emit_load(p, v);
t = type_deref(t);
}
@@ -1738,7 +1735,7 @@ gb_internal lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValu
GB_PANIC("Unreachable");
} else if (is_type_array(t)) {
GB_PANIC("Array lengths are constant");
- } else if (is_type_slice(t) || is_type_relative_slice(t)) {
+ } else if (is_type_slice(t)) {
return lb_slice_len(p, v);
} else if (is_type_dynamic_array(t)) {
return lb_dynamic_array_cap(p, v);
@@ -3144,7 +3141,7 @@ gb_internal lbValue lb_build_call_expr(lbProcedure *p, Ast *expr) {
lbValue res = lb_build_call_expr_internal(p, expr);
- if (ce->optional_ok_one) { // TODO(bill): Minor hack for #optional_ok procedures
+ if (ce->optional_ok_one) {
GB_ASSERT(is_type_tuple(res.type));
GB_ASSERT(res.type->Tuple.variables.count == 2);
return lb_emit_struct_ev(p, res, 0);
@@ -3332,9 +3329,15 @@ gb_internal lbValue lb_build_call_expr_internal(lbProcedure *p, Ast *expr) {
isize param_index = lookup_procedure_parameter(pt, name);
GB_ASSERT(param_index >= 0);
- lbValue value = lb_build_expr(p, fv->value);
- GB_ASSERT(!is_type_tuple(value.type));
- args[param_index] = value;
+ Entity *e = pt->params->Tuple.variables[param_index];
+ if (e->kind == Entity_TypeName) {
+ lbValue value = lb_const_nil(p->module, e->type);
+ args[param_index] = value;
+ } else {
+ lbValue value = lb_build_expr(p, fv->value);
+ GB_ASSERT(!is_type_tuple(value.type));
+ args[param_index] = value;
+ }
}
TokenPos pos = ast_token(ce->proc).pos;
diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp
index 60420402a..9d688be6a 100644
--- a/src/llvm_backend_stmt.cpp
+++ b/src/llvm_backend_stmt.cpp
@@ -1688,7 +1688,6 @@ gb_internal void lb_build_type_switch_stmt(lbProcedure *p, AstTypeSwitchStmt *ss
lb_add_entity(p->module, case_entity, ptr);
lb_add_debug_local_variable(p, ptr.value, case_entity->type, case_entity->token);
} else {
- // TODO(bill): is the correct expected behaviour?
lb_store_type_case_implicit(p, clause, parent_value);
}
@@ -2014,12 +2013,10 @@ gb_internal void lb_build_if_stmt(lbProcedure *p, Ast *node) {
defer (lb_close_scope(p, lbDeferExit_Default, nullptr));
if (is->init != nullptr) {
- // TODO(bill): Should this have a separate block to begin with?
- #if 1
lbBlock *init = lb_create_block(p, "if.init");
lb_emit_jump(p, init);
lb_start_block(p, init);
- #endif
+
lb_build_stmt(p, is->init);
}
lbBlock *then = lb_create_block(p, "if.then");
diff --git a/src/llvm_backend_type.cpp b/src/llvm_backend_type.cpp
index 4716733cc..62a67f96a 100644
--- a/src/llvm_backend_type.cpp
+++ b/src/llvm_backend_type.cpp
@@ -57,7 +57,7 @@ gb_internal lbValue lb_typeid(lbModule *m, Type *type) {
case Type_BitSet: kind = Typeid_Bit_Set; break;
case Type_SimdVector: kind = Typeid_Simd_Vector; break;
case Type_RelativePointer: kind = Typeid_Relative_Pointer; break;
- case Type_RelativeSlice: kind = Typeid_Relative_Slice; break;
+ case Type_RelativeMultiPointer: kind = Typeid_Relative_Multi_Pointer; break;
case Type_SoaPointer: kind = Typeid_SoaPointer; break;
}
@@ -731,7 +731,6 @@ gb_internal void lb_setup_type_info_data(lbProcedure *p) { // NOTE(bill): Setup
type_set_offsets(t); // NOTE(bill): Just incase the offsets have not been set yet
for (isize source_index = 0; source_index < count; source_index++) {
- // TODO(bill): Order fields in source order not layout order
Entity *f = t->Struct.fields[source_index];
lbValue tip = lb_type_info(m, f->type);
i64 foffset = 0;
@@ -858,12 +857,13 @@ gb_internal void lb_setup_type_info_data(lbProcedure *p) { // NOTE(bill): Setup
lb_emit_store(p, tag, res);
}
break;
- case Type_RelativeSlice:
+
+ case Type_RelativeMultiPointer:
{
- tag = lb_const_ptr_cast(m, variant_ptr, t_type_info_relative_slice_ptr);
+ tag = lb_const_ptr_cast(m, variant_ptr, t_type_info_relative_multi_pointer_ptr);
LLVMValueRef vals[2] = {
- lb_type_info(m, t->RelativeSlice.slice_type).value,
- lb_type_info(m, t->RelativeSlice.base_integer).value,
+ lb_type_info(m, t->RelativeMultiPointer.pointer_type).value,
+ lb_type_info(m, t->RelativeMultiPointer.base_integer).value,
};
lbValue res = {};
@@ -872,6 +872,7 @@ gb_internal void lb_setup_type_info_data(lbProcedure *p) { // NOTE(bill): Setup
lb_emit_store(p, tag, res);
}
break;
+
case Type_Matrix:
{
tag = lb_const_ptr_cast(m, variant_ptr, t_type_info_matrix_ptr);
diff --git a/src/llvm_backend_utility.cpp b/src/llvm_backend_utility.cpp
index 2ecad1703..8dd6b14b6 100644
--- a/src/llvm_backend_utility.cpp
+++ b/src/llvm_backend_utility.cpp
@@ -1124,11 +1124,6 @@ gb_internal lbValue lb_emit_struct_ep(lbProcedure *p, lbValue s, i32 index) {
}
} else if (is_type_array(t)) {
return lb_emit_array_epi(p, s, index);
- } else if (is_type_relative_slice(t)) {
- switch (index) {
- case 0: result_type = t->RelativeSlice.base_integer; break;
- case 1: result_type = t->RelativeSlice.base_integer; break;
- }
} else if (is_type_soa_pointer(t)) {
switch (index) {
case 0: result_type = alloc_type_pointer(t->SoaPointer.elem); break;
@@ -1547,7 +1542,7 @@ gb_internal lbValue lb_slice_elem(lbProcedure *p, lbValue slice) {
return lb_emit_struct_ev(p, slice, 0);
}
gb_internal lbValue lb_slice_len(lbProcedure *p, lbValue slice) {
- GB_ASSERT(is_type_slice(slice.type) || is_type_relative_slice(slice.type));
+ GB_ASSERT(is_type_slice(slice.type));
return lb_emit_struct_ev(p, slice, 1);
}
gb_internal lbValue lb_dynamic_array_elem(lbProcedure *p, lbValue da) {
diff --git a/src/main.cpp b/src/main.cpp
index db2702b19..2a7c90744 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -1,5 +1,4 @@
// #define NO_ARRAY_BOUNDS_CHECK
-
#include "common.cpp"
#include "timings.cpp"
#include "tokenizer.cpp"
@@ -71,6 +70,18 @@ gb_global Timings global_timings = {0};
#include "checker.cpp"
#include "docs.cpp"
+#include "linker.cpp"
+
+#if defined(GB_SYSTEM_WINDOWS) && defined(ODIN_TILDE_BACKEND)
+#define ALLOW_TILDE 1
+#else
+#define ALLOW_TILDE 0
+#endif
+
+#if ALLOW_TILDE
+#include "tilde.cpp"
+#endif
+
#include "llvm_backend.cpp"
#if defined(GB_SYSTEM_OSX)
@@ -147,422 +158,6 @@ gb_internal i32 system_exec_command_line_app(char const *name, char const *fmt,
return exit_code;
}
-
-gb_internal i32 linker_stage(lbGenerator *gen) {
- i32 result = 0;
- Timings *timings = &global_timings;
-
- String output_filename = path_to_string(heap_allocator(), build_context.build_paths[BuildPath_Output]);
- debugf("Linking %.*s\n", LIT(output_filename));
-
- // TOOD(Jeroen): Make a `build_paths[BuildPath_Object] to avoid `%.*s.o`.
-
- if (is_arch_wasm()) {
- timings_start_section(timings, str_lit("wasm-ld"));
-
- #if defined(GB_SYSTEM_WINDOWS)
- result = system_exec_command_line_app("wasm-ld",
- "\"%.*s\\bin\\wasm-ld\" \"%.*s.o\" -o \"%.*s\" %.*s %.*s",
- LIT(build_context.ODIN_ROOT),
- LIT(output_filename), LIT(output_filename), LIT(build_context.link_flags), LIT(build_context.extra_linker_flags));
- #else
- result = system_exec_command_line_app("wasm-ld",
- "wasm-ld \"%.*s.o\" -o \"%.*s\" %.*s %.*s",
- LIT(output_filename), LIT(output_filename), LIT(build_context.link_flags), LIT(build_context.extra_linker_flags));
- #endif
- return result;
- }
-
- if (build_context.cross_compiling && selected_target_metrics->metrics == &target_essence_amd64) {
-#if defined(GB_SYSTEM_UNIX)
- result = system_exec_command_line_app("linker", "x86_64-essence-gcc \"%.*s.o\" -o \"%.*s\" %.*s %.*s",
- LIT(output_filename), LIT(output_filename), LIT(build_context.link_flags), LIT(build_context.extra_linker_flags));
-#else
- gb_printf_err("Linking for cross compilation for this platform is not yet supported (%.*s %.*s)\n",
- LIT(target_os_names[build_context.metrics.os]),
- LIT(target_arch_names[build_context.metrics.arch])
- );
-#endif
- } else if (build_context.cross_compiling && build_context.different_os) {
- gb_printf_err("Linking for cross compilation for this platform is not yet supported (%.*s %.*s)\n",
- LIT(target_os_names[build_context.metrics.os]),
- LIT(target_arch_names[build_context.metrics.arch])
- );
- build_context.keep_object_files = true;
- } else {
- #if defined(GB_SYSTEM_WINDOWS)
- bool is_windows = true;
- #else
- bool is_windows = false;
- #endif
- #if defined(GB_SYSTEM_OSX)
- bool is_osx = true;
- #else
- bool is_osx = false;
- #endif
-
-
- if (is_windows) {
- String section_name = str_lit("msvc-link");
- if (build_context.use_lld) {
- section_name = str_lit("lld-link");
- }
- timings_start_section(timings, section_name);
-
- gbString lib_str = gb_string_make(heap_allocator(), "");
- defer (gb_string_free(lib_str));
-
- gbString link_settings = gb_string_make_reserve(heap_allocator(), 256);
- defer (gb_string_free(link_settings));
-
- // Add library search paths.
- if (build_context.build_paths[BuildPath_VS_LIB].basename.len > 0) {
- String path = {};
- auto add_path = [&](String path) {
- if (path[path.len-1] == '\\') {
- path.len -= 1;
- }
- link_settings = gb_string_append_fmt(link_settings, " /LIBPATH:\"%.*s\"", LIT(path));
- };
- add_path(build_context.build_paths[BuildPath_Win_SDK_UM_Lib].basename);
- add_path(build_context.build_paths[BuildPath_Win_SDK_UCRT_Lib].basename);
- add_path(build_context.build_paths[BuildPath_VS_LIB].basename);
- }
-
-
- StringSet libs = {};
- string_set_init(&libs, 64);
- defer (string_set_destroy(&libs));
-
- StringSet asm_files = {};
- string_set_init(&asm_files, 64);
- defer (string_set_destroy(&asm_files));
-
- for (Entity *e : gen->foreign_libraries) {
- GB_ASSERT(e->kind == Entity_LibraryName);
- for_array(i, e->LibraryName.paths) {
- String lib = string_trim_whitespace(e->LibraryName.paths[i]);
- // IMPORTANT NOTE(bill): calling `string_to_lower` here is not an issue because
- // we will never uses these strings afterwards
- string_to_lower(&lib);
- if (lib.len == 0) {
- continue;
- }
-
- if (has_asm_extension(lib)) {
- if (!string_set_update(&asm_files, lib)) {
- String asm_file = asm_files.entries[i].value;
- String obj_file = concatenate_strings(permanent_allocator(), asm_file, str_lit(".obj"));
-
- result = system_exec_command_line_app("nasm",
- "\"%.*s\\bin\\nasm\\windows\\nasm.exe\" \"%.*s\" "
- "-f win64 "
- "-o \"%.*s\" "
- "%.*s "
- "",
- LIT(build_context.ODIN_ROOT), LIT(asm_file),
- LIT(obj_file),
- LIT(build_context.extra_assembler_flags)
- );
-
- if (result) {
- return result;
- }
- array_add(&gen->output_object_paths, obj_file);
- }
- } else {
- if (!string_set_update(&libs, lib)) {
- lib_str = gb_string_append_fmt(lib_str, " \"%.*s\"", LIT(lib));
- }
- }
- }
- }
-
- for (Entity *e : gen->foreign_libraries) {
- GB_ASSERT(e->kind == Entity_LibraryName);
- if (e->LibraryName.extra_linker_flags.len != 0) {
- lib_str = gb_string_append_fmt(lib_str, " %.*s", LIT(e->LibraryName.extra_linker_flags));
- }
- }
-
- if (build_context.build_mode == BuildMode_DynamicLibrary) {
- link_settings = gb_string_append_fmt(link_settings, " /DLL");
- } else {
- link_settings = gb_string_append_fmt(link_settings, " /ENTRY:mainCRTStartup");
- }
-
- if (build_context.pdb_filepath != "") {
- String pdb_path = path_to_string(heap_allocator(), build_context.build_paths[BuildPath_PDB]);
- link_settings = gb_string_append_fmt(link_settings, " /PDB:%.*s", LIT(pdb_path));
- }
-
- if (build_context.no_crt) {
- link_settings = gb_string_append_fmt(link_settings, " /nodefaultlib");
- } else {
- link_settings = gb_string_append_fmt(link_settings, " /defaultlib:libcmt");
- }
-
- if (build_context.ODIN_DEBUG) {
- link_settings = gb_string_append_fmt(link_settings, " /DEBUG");
- }
-
- gbString object_files = gb_string_make(heap_allocator(), "");
- defer (gb_string_free(object_files));
- for (String const &object_path : gen->output_object_paths) {
- object_files = gb_string_append_fmt(object_files, "\"%.*s\" ", LIT(object_path));
- }
-
- String vs_exe_path = path_to_string(heap_allocator(), build_context.build_paths[BuildPath_VS_EXE]);
- defer (gb_free(heap_allocator(), vs_exe_path.text));
-
- String windows_sdk_bin_path = path_to_string(heap_allocator(), build_context.build_paths[BuildPath_Win_SDK_Bin_Path]);
- defer (gb_free(heap_allocator(), windows_sdk_bin_path.text));
-
- char const *subsystem_str = build_context.use_subsystem_windows ? "WINDOWS" : "CONSOLE";
- if (!build_context.use_lld) { // msvc
- String res_path = {};
- defer (gb_free(heap_allocator(), res_path.text));
- if (build_context.has_resource) {
- String temp_res_path = path_to_string(heap_allocator(), build_context.build_paths[BuildPath_RES]);
- res_path = concatenate3_strings(heap_allocator(), str_lit("\""), temp_res_path, str_lit("\""));
- gb_free(heap_allocator(), temp_res_path.text);
-
- String rc_path = path_to_string(heap_allocator(), build_context.build_paths[BuildPath_RC]);
- defer (gb_free(heap_allocator(), rc_path.text));
-
- result = system_exec_command_line_app("msvc-link",
- "\"%.*src.exe\" /nologo /fo \"%.*s\" \"%.*s\"",
- LIT(windows_sdk_bin_path),
- LIT(res_path),
- LIT(rc_path)
- );
-
- if (result) {
- return result;
- }
- }
-
- switch (build_context.build_mode) {
- case BuildMode_Executable:
- link_settings = gb_string_append_fmt(link_settings, " /NOIMPLIB /NOEXP");
- break;
- }
-
- result = system_exec_command_line_app("msvc-link",
- "\"%.*slink.exe\" %s %.*s -OUT:\"%.*s\" %s "
- "/nologo /incremental:no /opt:ref /subsystem:%s "
- "%.*s "
- "%.*s "
- "%s "
- "",
- LIT(vs_exe_path), object_files, LIT(res_path), LIT(output_filename),
- link_settings,
- subsystem_str,
- LIT(build_context.link_flags),
- LIT(build_context.extra_linker_flags),
- lib_str
- );
- if (result) {
- return result;
- }
- } else { // lld
- result = system_exec_command_line_app("msvc-lld-link",
- "\"%.*s\\bin\\lld-link\" %s -OUT:\"%.*s\" %s "
- "/nologo /incremental:no /opt:ref /subsystem:%s "
- "%.*s "
- "%.*s "
- "%s "
- "",
- LIT(build_context.ODIN_ROOT), object_files, LIT(output_filename),
- link_settings,
- subsystem_str,
- LIT(build_context.link_flags),
- LIT(build_context.extra_linker_flags),
- lib_str
- );
-
- if (result) {
- return result;
- }
- }
- } else {
- timings_start_section(timings, str_lit("ld-link"));
-
- // NOTE(vassvik): get cwd, for used for local shared libs linking, since those have to be relative to the exe
- char cwd[256];
- #if !defined(GB_SYSTEM_WINDOWS)
- getcwd(&cwd[0], 256);
- #endif
- //printf("%s\n", cwd);
-
- // NOTE(vassvik): needs to add the root to the library search paths, so that the full filenames of the library
- // files can be passed with -l:
- gbString lib_str = gb_string_make(heap_allocator(), "-L/");
- defer (gb_string_free(lib_str));
-
- StringSet libs = {};
- string_set_init(&libs, 64);
- defer (string_set_destroy(&libs));
-
- for (Entity *e : gen->foreign_libraries) {
- GB_ASSERT(e->kind == Entity_LibraryName);
- for (String lib : e->LibraryName.paths) {
- lib = string_trim_whitespace(lib);
- if (lib.len == 0) {
- continue;
- }
- if (string_set_update(&libs, lib)) {
- continue;
- }
-
- // NOTE(zangent): Sometimes, you have to use -framework on MacOS.
- // This allows you to specify '-f' in a #foreign_system_library,
- // without having to implement any new syntax specifically for MacOS.
- if (build_context.metrics.os == TargetOs_darwin) {
- if (string_ends_with(lib, str_lit(".framework"))) {
- // framework thingie
- String lib_name = lib;
- lib_name = remove_extension_from_path(lib_name);
- lib_str = gb_string_append_fmt(lib_str, " -framework %.*s ", LIT(lib_name));
- } else if (string_ends_with(lib, str_lit(".a")) || string_ends_with(lib, str_lit(".o")) || string_ends_with(lib, str_lit(".dylib"))) {
- // For:
- // object
- // dynamic lib
- // static libs, absolute full path relative to the file in which the lib was imported from
- lib_str = gb_string_append_fmt(lib_str, " %.*s ", LIT(lib));
- } else {
- // dynamic or static system lib, just link regularly searching system library paths
- lib_str = gb_string_append_fmt(lib_str, " -l%.*s ", LIT(lib));
- }
- } else {
- // NOTE(vassvik): static libraries (.a files) in linux can be linked to directly using the full path,
- // since those are statically linked to at link time. shared libraries (.so) has to be
- // available at runtime wherever the executable is run, so we make require those to be
- // local to the executable (unless the system collection is used, in which case we search
- // the system library paths for the library file).
- if (string_ends_with(lib, str_lit(".a")) || string_ends_with(lib, str_lit(".o"))) {
- // static libs and object files, absolute full path relative to the file in which the lib was imported from
- lib_str = gb_string_append_fmt(lib_str, " -l:\"%.*s\" ", LIT(lib));
- } else if (string_ends_with(lib, str_lit(".so"))) {
- // dynamic lib, relative path to executable
- // NOTE(vassvik): it is the user's responsibility to make sure the shared library files are visible
- // at runtime to the executable
- lib_str = gb_string_append_fmt(lib_str, " -l:\"%s/%.*s\" ", cwd, LIT(lib));
- } else {
- // dynamic or static system lib, just link regularly searching system library paths
- lib_str = gb_string_append_fmt(lib_str, " -l%.*s ", LIT(lib));
- }
- }
- }
- }
-
- for (Entity *e : gen->foreign_libraries) {
- GB_ASSERT(e->kind == Entity_LibraryName);
- if (e->LibraryName.extra_linker_flags.len != 0) {
- lib_str = gb_string_append_fmt(lib_str, " %.*s", LIT(e->LibraryName.extra_linker_flags));
- }
- }
-
- gbString object_files = gb_string_make(heap_allocator(), "");
- defer (gb_string_free(object_files));
- for (String object_path : gen->output_object_paths) {
- object_files = gb_string_append_fmt(object_files, "\"%.*s\" ", LIT(object_path));
- }
-
- gbString link_settings = gb_string_make_reserve(heap_allocator(), 32);
-
- if (build_context.no_crt) {
- link_settings = gb_string_append_fmt(link_settings, "-nostdlib ");
- }
-
- // NOTE(dweiler): We use clang as a frontend for the linker as there are
- // other runtime and compiler support libraries that need to be linked in
- // very specific orders such as libgcc_s, ld-linux-so, unwind, etc.
- // These are not always typically inside /lib, /lib64, or /usr versions
- // of that, e.g libgcc.a is in /usr/lib/gcc/{version}, and can vary on
- // the distribution of Linux even. The gcc or clang specs is the only
- // reliable way to query this information to call ld directly.
- if (build_context.build_mode == BuildMode_DynamicLibrary) {
- // NOTE(dweiler): Let the frontend know we're building a shared library
- // so it doesn't generate symbols which cannot be relocated.
- link_settings = gb_string_appendc(link_settings, "-shared ");
-
- // NOTE(dweiler): _odin_entry_point must be called at initialization
- // time of the shared object, similarly, _odin_exit_point must be called
- // at deinitialization. We can pass both -init and -fini to the linker by
- // using a comma separated list of arguments to -Wl.
- //
- // This previously used ld but ld cannot actually build a shared library
- // correctly this way since all the other dependencies provided implicitly
- // by the compiler frontend are still needed and most of the command
- // line arguments prepared previously are incompatible with ld.
- if (build_context.metrics.os == TargetOs_darwin) {
- link_settings = gb_string_appendc(link_settings, "-Wl,-init,'__odin_entry_point' ");
- // NOTE(weshardee): __odin_exit_point should also be added, but -fini
- // does not exist on MacOS
- } else {
- link_settings = gb_string_appendc(link_settings, "-Wl,-init,'_odin_entry_point' ");
- link_settings = gb_string_appendc(link_settings, "-Wl,-fini,'_odin_exit_point' ");
- }
-
- } else if (build_context.metrics.os != TargetOs_openbsd) {
- // OpenBSD defaults to PIE executable. do not pass -no-pie for it.
- link_settings = gb_string_appendc(link_settings, "-no-pie ");
- }
-
- gbString platform_lib_str = gb_string_make(heap_allocator(), "");
- defer (gb_string_free(platform_lib_str));
- if (build_context.metrics.os == TargetOs_darwin) {
- platform_lib_str = gb_string_appendc(platform_lib_str, "-lSystem -lm -Wl,-syslibroot /Library/Developer/CommandLineTools/SDKs/MacOSX.sdk -L/usr/local/lib");
- } else {
- platform_lib_str = gb_string_appendc(platform_lib_str, "-lc -lm");
- }
-
- if (build_context.metrics.os == TargetOs_darwin) {
- // This sets a requirement of Mountain Lion and up, but the compiler doesn't work without this limit.
- if (build_context.minimum_os_version_string.len) {
- link_settings = gb_string_append_fmt(link_settings, " -mmacosx-version-min=%.*s ", LIT(build_context.minimum_os_version_string));
- } else if (build_context.metrics.arch == TargetArch_arm64) {
- link_settings = gb_string_appendc(link_settings, " -mmacosx-version-min=12.0.0 ");
- } else {
- link_settings = gb_string_appendc(link_settings, " -mmacosx-version-min=10.12.0 ");
- }
- // This points the linker to where the entry point is
- link_settings = gb_string_appendc(link_settings, " -e _main ");
- }
-
- gbString link_command_line = gb_string_make(heap_allocator(), "clang -Wno-unused-command-line-argument ");
- defer (gb_string_free(link_command_line));
-
- link_command_line = gb_string_appendc(link_command_line, object_files);
- link_command_line = gb_string_append_fmt(link_command_line, " -o \"%.*s\" ", LIT(output_filename));
- link_command_line = gb_string_append_fmt(link_command_line, " %s ", platform_lib_str);
- link_command_line = gb_string_append_fmt(link_command_line, " %s ", lib_str);
- link_command_line = gb_string_append_fmt(link_command_line, " %.*s ", LIT(build_context.link_flags));
- link_command_line = gb_string_append_fmt(link_command_line, " %.*s ", LIT(build_context.extra_linker_flags));
- link_command_line = gb_string_append_fmt(link_command_line, " %s ", link_settings);
-
- result = system_exec_command_line_app("ld-link", link_command_line);
-
- if (result) {
- return result;
- }
-
- if (is_osx && build_context.ODIN_DEBUG) {
- // NOTE: macOS links DWARF symbols dynamically. Dsymutil will map the stubs in the exe
- // to the symbols in the object file
- result = system_exec_command_line_app("dsymutil", "dsymutil %.*s", LIT(output_filename));
-
- if (result) {
- return result;
- }
- }
- }
- }
-
- return result;
-}
-
gb_internal Array<String> setup_args(int argc, char const **argv) {
gbAllocator a = heap_allocator();
@@ -653,8 +248,16 @@ enum BuildFlagKind {
BuildFlag_UseSeparateModules,
BuildFlag_NoThreadedChecker,
BuildFlag_ShowDebugMessages,
+
BuildFlag_Vet,
+ BuildFlag_VetShadowing,
+ BuildFlag_VetUnused,
+ BuildFlag_VetUsingStmt,
+ BuildFlag_VetUsingParam,
+ BuildFlag_VetStyle,
+ BuildFlag_VetSemicolon,
BuildFlag_VetExtra,
+
BuildFlag_IgnoreUnknownAttributes,
BuildFlag_ExtraLinkerFlags,
BuildFlag_ExtraAssemblerFlags,
@@ -671,7 +274,6 @@ enum BuildFlagKind {
BuildFlag_DisallowDo,
BuildFlag_DefaultToNilAllocator,
BuildFlag_StrictStyle,
- BuildFlag_StrictStyleInitOnly,
BuildFlag_ForeignErrorProcedures,
BuildFlag_NoRTTI,
BuildFlag_DynamicMapCalls,
@@ -695,6 +297,8 @@ enum BuildFlagKind {
BuildFlag_InternalIgnoreLazy,
BuildFlag_InternalIgnoreLLVMBuild,
+ BuildFlag_Tilde,
+
#if defined(GB_SYSTEM_WINDOWS)
BuildFlag_IgnoreVsSearch,
BuildFlag_ResourceFile,
@@ -830,8 +434,16 @@ gb_internal bool parse_build_flags(Array<String> args) {
add_flag(&build_flags, BuildFlag_UseSeparateModules, str_lit("use-separate-modules"), BuildFlagParam_None, Command__does_build);
add_flag(&build_flags, BuildFlag_NoThreadedChecker, str_lit("no-threaded-checker"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_ShowDebugMessages, str_lit("show-debug-messages"), BuildFlagParam_None, Command_all);
+
add_flag(&build_flags, BuildFlag_Vet, str_lit("vet"), BuildFlagParam_None, Command__does_check);
+ add_flag(&build_flags, BuildFlag_VetUnused, str_lit("vet-unused"), BuildFlagParam_None, Command__does_check);
+ add_flag(&build_flags, BuildFlag_VetShadowing, str_lit("vet-shadowing"), BuildFlagParam_None, Command__does_check);
+ add_flag(&build_flags, BuildFlag_VetUsingStmt, str_lit("vet-using-stmt"), BuildFlagParam_None, Command__does_check);
+ add_flag(&build_flags, BuildFlag_VetUsingParam, str_lit("vet-using-param"), BuildFlagParam_None, Command__does_check);
+ add_flag(&build_flags, BuildFlag_VetStyle, str_lit("vet-style"), BuildFlagParam_None, Command__does_check);
+ add_flag(&build_flags, BuildFlag_VetSemicolon, str_lit("vet-semicolon"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_VetExtra, str_lit("vet-extra"), BuildFlagParam_None, Command__does_check);
+
add_flag(&build_flags, BuildFlag_IgnoreUnknownAttributes, str_lit("ignore-unknown-attributes"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_ExtraLinkerFlags, str_lit("extra-linker-flags"), BuildFlagParam_String, Command__does_build);
add_flag(&build_flags, BuildFlag_ExtraAssemblerFlags, str_lit("extra-assembler-flags"), BuildFlagParam_String, Command__does_build);
@@ -847,7 +459,6 @@ gb_internal bool parse_build_flags(Array<String> args) {
add_flag(&build_flags, BuildFlag_DisallowDo, str_lit("disallow-do"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_DefaultToNilAllocator, str_lit("default-to-nil-allocator"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_StrictStyle, str_lit("strict-style"), BuildFlagParam_None, Command__does_check);
- add_flag(&build_flags, BuildFlag_StrictStyleInitOnly, str_lit("strict-style-init-only"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_ForeignErrorProcedures, str_lit("foreign-error-procedures"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_NoRTTI, str_lit("no-rtti"), BuildFlagParam_None, Command__does_check);
@@ -869,6 +480,10 @@ gb_internal bool parse_build_flags(Array<String> args) {
add_flag(&build_flags, BuildFlag_InternalIgnoreLazy, str_lit("internal-ignore-lazy"), BuildFlagParam_None, Command_all);
add_flag(&build_flags, BuildFlag_InternalIgnoreLLVMBuild, str_lit("internal-ignore-llvm-build"),BuildFlagParam_None, Command_all);
+#if ALLOW_TILDE
+ add_flag(&build_flags, BuildFlag_Tilde, str_lit("tilde"), BuildFlagParam_None, Command__does_build);
+#endif
+
#if defined(GB_SYSTEM_WINDOWS)
add_flag(&build_flags, BuildFlag_IgnoreVsSearch, str_lit("ignore-vs-search"), BuildFlagParam_None, Command__does_build);
add_flag(&build_flags, BuildFlag_ResourceFile, str_lit("resource"), BuildFlagParam_String, Command__does_build);
@@ -1362,13 +977,25 @@ gb_internal bool parse_build_flags(Array<String> args) {
build_context.show_debug_messages = true;
break;
case BuildFlag_Vet:
- build_context.vet = true;
+ if (build_context.vet_flags & VetFlag_Extra) {
+ build_context.vet_flags |= VetFlag_All;
+ } else {
+ build_context.vet_flags &= ~VetFlag_Extra;
+ build_context.vet_flags |= VetFlag_All;
+ }
break;
- case BuildFlag_VetExtra: {
- build_context.vet = true;
- build_context.vet_extra = true;
+
+ case BuildFlag_VetUnused: build_context.vet_flags |= VetFlag_Unused; break;
+ case BuildFlag_VetShadowing: build_context.vet_flags |= VetFlag_Shadowing; break;
+ case BuildFlag_VetUsingStmt: build_context.vet_flags |= VetFlag_UsingStmt; break;
+ case BuildFlag_VetUsingParam: build_context.vet_flags |= VetFlag_UsingParam; break;
+ case BuildFlag_VetStyle: build_context.vet_flags |= VetFlag_Style; break;
+ case BuildFlag_VetSemicolon: build_context.vet_flags |= VetFlag_Semicolon; break;
+
+ case BuildFlag_VetExtra:
+ build_context.vet_flags = VetFlag_All | VetFlag_Extra;
break;
- }
+
case BuildFlag_IgnoreUnknownAttributes:
build_context.ignore_unknown_attributes = true;
break;
@@ -1456,20 +1083,9 @@ gb_internal bool parse_build_flags(Array<String> args) {
case BuildFlag_ForeignErrorProcedures:
build_context.ODIN_FOREIGN_ERROR_PROCEDURES = true;
break;
- case BuildFlag_StrictStyle: {
- if (build_context.strict_style_init_only) {
- gb_printf_err("-strict-style and -strict-style-init-only cannot be used together\n");
- }
+ case BuildFlag_StrictStyle:
build_context.strict_style = true;
break;
- }
- case BuildFlag_StrictStyleInitOnly: {
- if (build_context.strict_style) {
- gb_printf_err("-strict-style and -strict-style-init-only cannot be used together\n");
- }
- build_context.strict_style_init_only = true;
- break;
- }
case BuildFlag_Short:
build_context.cmd_doc_flags |= CmdDocFlag_Short;
break;
@@ -1536,6 +1152,10 @@ gb_internal bool parse_build_flags(Array<String> args) {
case BuildFlag_InternalIgnoreLLVMBuild:
build_context.ignore_llvm_build = true;
break;
+ case BuildFlag_Tilde:
+ build_context.tilde_backend = true;
+ break;
+
#if defined(GB_SYSTEM_WINDOWS)
case BuildFlag_IgnoreVsSearch: {
GB_ASSERT(value.kind == ExactValue_Invalid);
@@ -1572,7 +1192,7 @@ gb_internal bool parse_build_flags(Array<String> args) {
if (path_is_directory(path)) {
gb_printf_err("Invalid -pdb-name path. %.*s, is a directory.\n", LIT(path));
bad_flags = true;
- break;
+ break;
}
// #if defined(GB_SYSTEM_WINDOWS)
// String ext = path_extension(path);
@@ -2005,6 +1625,10 @@ gb_internal void print_show_help(String const arg0, String const &command) {
print_usage_line(2, "Shows an advanced overview of the timings of different stages within the compiler in milliseconds");
print_usage_line(0, "");
+ print_usage_line(1, "-show-system-calls");
+ print_usage_line(2, "Prints the whole command and arguments for calls to external tools like linker and assembler");
+ print_usage_line(0, "");
+
print_usage_line(1, "-export-timings:<format>");
print_usage_line(2, "Export timings to one of a few formats. Requires `-show-timings` or `-show-more-timings`");
print_usage_line(2, "Available options:");
@@ -2114,29 +1738,55 @@ gb_internal void print_show_help(String const arg0, String const &command) {
}
if (check) {
- #if defined(GB_SYSTEM_WINDOWS)
print_usage_line(1, "-no-threaded-checker");
print_usage_line(2, "Disabled multithreading in the semantic checker stage");
print_usage_line(0, "");
- #else
- print_usage_line(1, "-threaded-checker");
- print_usage_line(1, "[EXPERIMENTAL]");
- print_usage_line(2, "Multithread the semantic checker stage");
- print_usage_line(0, "");
- #endif
+ }
+ if (check) {
print_usage_line(1, "-vet");
print_usage_line(2, "Do extra checks on the code");
print_usage_line(2, "Extra checks include:");
- print_usage_line(3, "Variable shadowing within procedures");
- print_usage_line(3, "Unused declarations");
+ print_usage_line(2, "-vet-unused");
+ print_usage_line(2, "-vet-shadowing");
+ print_usage_line(2, "-vet-using-stmt");
+ print_usage_line(0, "");
+
+ print_usage_line(1, "-vet-unused");
+ print_usage_line(2, "Checks for unused declarations");
+ print_usage_line(0, "");
+
+ print_usage_line(1, "-vet-shadowing");
+ print_usage_line(2, "Checks for variable shadowing within procedures");
+ print_usage_line(0, "");
+
+ print_usage_line(1, "-vet-using-stmt");
+ print_usage_line(2, "Checks for the use of 'using' as a statement");
+ print_usage_line(2, "'using' is considered bad practice outside of immediate refactoring");
+ print_usage_line(0, "");
+
+ print_usage_line(1, "-vet-using-param");
+ print_usage_line(2, "Checks for the use of 'using' on procedure parameters");
+ print_usage_line(2, "'using' is considered bad practice outside of immediate refactoring");
+ print_usage_line(0, "");
+
+ print_usage_line(1, "-vet-style");
+ print_usage_line(2, "Errs on missing trailing commas followed by a newline");
+ print_usage_line(2, "Errs on deprecated syntax");
+ print_usage_line(2, "Does not err on unneeded tokens (unlike -strict-style)");
+ print_usage_line(0, "");
+
+ print_usage_line(1, "-vet-semicolon");
+ print_usage_line(2, "Errs on unneeded semicolons");
print_usage_line(0, "");
print_usage_line(1, "-vet-extra");
print_usage_line(2, "Do even more checks than standard vet on the code");
print_usage_line(2, "To treat the extra warnings as errors, use -warnings-as-errors");
print_usage_line(0, "");
+ }
+ if (check) {
print_usage_line(1, "-ignore-unknown-attributes");
print_usage_line(2, "Ignores unknown attributes");
print_usage_line(2, "This can be used with metaprogramming tools");
@@ -2206,10 +1856,8 @@ gb_internal void print_show_help(String const arg0, String const &command) {
print_usage_line(1, "-strict-style");
print_usage_line(2, "Errs on unneeded tokens, such as unneeded semicolons");
- print_usage_line(0, "");
-
- print_usage_line(1, "-strict-style-init-only");
- print_usage_line(2, "Errs on unneeded tokens, such as unneeded semicolons, only on the initial project");
+ print_usage_line(2, "Errs on missing trailing commas followed by a newline");
+ print_usage_line(2, "Errs on deprecated syntax");
print_usage_line(0, "");
print_usage_line(1, "-ignore-warnings");
@@ -2347,6 +1995,8 @@ gb_internal void print_show_unused(Checker *c) {
}
gb_internal bool check_env(void) {
+ TIME_SECTION("init check env");
+
gbAllocator a = heap_allocator();
char const *odin_root = gb_get_env("ODIN_ROOT", a);
defer (gb_free(a, cast(void *)odin_root));
@@ -2548,6 +2198,7 @@ gb_internal int strip_semicolons(Parser *parser) {
}
gb_internal void init_terminal(void) {
+ TIME_SECTION("init terminal");
build_context.has_ansi_terminal_colours = false;
gbAllocator a = heap_allocator();
@@ -2614,11 +2265,13 @@ int main(int arg_count, char const **arg_ptr) {
return 1;
}
+ TIME_SECTION("init default library collections");
array_init(&library_collections, heap_allocator());
// NOTE(bill): 'core' cannot be (re)defined by the user
add_library_collection(str_lit("core"), get_fullpath_relative(heap_allocator(), odin_root_dir(), str_lit("core")));
add_library_collection(str_lit("vendor"), get_fullpath_relative(heap_allocator(), odin_root_dir(), str_lit("vendor")));
+ TIME_SECTION("init args");
map_init(&build_context.defined_values);
build_context.extra_packages.allocator = heap_allocator();
string_set_init(&build_context.test_names);
@@ -2814,12 +2467,14 @@ int main(int arg_count, char const **arg_ptr) {
for_array(i, build_context.build_paths) {
String build_path = path_to_string(heap_allocator(), build_context.build_paths[i]);
debugf("build_paths[%ld]: %.*s\n", i, LIT(build_path));
- }
+ }
}
+ TIME_SECTION("init thread pool");
init_global_thread_pool();
defer (thread_pool_destroy(&global_thread_pool));
+ TIME_SECTION("init universal");
init_universal();
// TODO(bill): prevent compiling without a linker
@@ -2882,16 +2537,18 @@ int main(int arg_count, char const **arg_ptr) {
return 0;
}
- MAIN_TIME_SECTION("LLVM API Code Gen");
- lbGenerator *gen = gb_alloc_item(permanent_allocator(), lbGenerator);
- if (!lb_init_generator(gen, checker)) {
- return 1;
- }
- if (lb_generate_code(gen)) {
+#if ALLOW_TILDE
+ if (build_context.tilde_backend) {
+ LinkerData linker_data = {};
+ MAIN_TIME_SECTION("Tilde Code Gen");
+ if (!cg_generate_code(checker, &linker_data)) {
+ return 1;
+ }
+
switch (build_context.build_mode) {
case BuildMode_Executable:
case BuildMode_DynamicLibrary:
- i32 result = linker_stage(gen);
+ i32 result = linker_stage(&linker_data);
if (result) {
if (build_context.show_timings) {
show_timings(checker, &global_timings);
@@ -2900,9 +2557,31 @@ int main(int arg_count, char const **arg_ptr) {
}
break;
}
- }
+ } else
+#endif
+ {
+ MAIN_TIME_SECTION("LLVM API Code Gen");
+ lbGenerator *gen = gb_alloc_item(permanent_allocator(), lbGenerator);
+ if (!lb_init_generator(gen, checker)) {
+ return 1;
+ }
+ if (lb_generate_code(gen)) {
+ switch (build_context.build_mode) {
+ case BuildMode_Executable:
+ case BuildMode_DynamicLibrary:
+ i32 result = linker_stage(gen);
+ if (result) {
+ if (build_context.show_timings) {
+ show_timings(checker, &global_timings);
+ }
+ return result;
+ }
+ break;
+ }
+ }
- remove_temp_files(gen);
+ remove_temp_files(gen);
+ }
if (build_context.show_timings) {
show_timings(checker, &global_timings);
diff --git a/src/parser.cpp b/src/parser.cpp
index b756412ff..56d1e2d6c 100644
--- a/src/parser.cpp
+++ b/src/parser.cpp
@@ -1,7 +1,21 @@
#include "parser_pos.cpp"
-// #undef at the bottom of this file
-#define ALLOW_NEWLINE (!build_context.strict_style)
+gb_internal u64 ast_file_vet_flags(AstFile *f) {
+ if (f->vet_flags_set) {
+ return f->vet_flags;
+ }
+ return build_context.vet_flags;
+}
+
+gb_internal bool ast_file_vet_style(AstFile *f) {
+ return (ast_file_vet_flags(f) & VetFlag_Style) != 0;
+}
+
+
+gb_internal bool file_allow_newline(AstFile *f) {
+ bool is_strict = build_context.strict_style || ast_file_vet_style(f);
+ return !is_strict;
+}
gb_internal Token token_end_of_line(AstFile *f, Token tok) {
u8 const *start = f->tokenizer.start + tok.pos.offset;
@@ -1567,29 +1581,29 @@ gb_internal void assign_removal_flag_to_semicolon(AstFile *f) {
Token *prev_token = &f->tokens[f->prev_token_index];
Token *curr_token = &f->tokens[f->curr_token_index];
GB_ASSERT(prev_token->kind == Token_Semicolon);
- if (prev_token->string == ";") {
- bool ok = false;
- if (curr_token->pos.line > prev_token->pos.line) {
+ if (prev_token->string != ";") {
+ return;
+ }
+ bool ok = false;
+ if (curr_token->pos.line > prev_token->pos.line) {
+ ok = true;
+ } else if (curr_token->pos.line == prev_token->pos.line) {
+ switch (curr_token->kind) {
+ case Token_CloseBrace:
+ case Token_CloseParen:
+ case Token_EOF:
ok = true;
- } else if (curr_token->pos.line == prev_token->pos.line) {
- switch (curr_token->kind) {
- case Token_CloseBrace:
- case Token_CloseParen:
- case Token_EOF:
- ok = true;
- break;
- }
- }
-
- if (ok) {
- if (build_context.strict_style) {
- syntax_error(*prev_token, "Found unneeded semicolon");
- } else if (build_context.strict_style_init_only && f->pkg->kind == Package_Init) {
- syntax_error(*prev_token, "Found unneeded semicolon");
- }
- prev_token->flags |= TokenFlag_Remove;
+ break;
}
}
+ if (!ok) {
+ return;
+ }
+
+ if (build_context.strict_style || (ast_file_vet_flags(f) & VetFlag_Semicolon)) {
+ syntax_error(*prev_token, "Found unneeded semicolon");
+ }
+ prev_token->flags |= TokenFlag_Remove;
}
gb_internal void expect_semicolon(AstFile *f) {
@@ -2221,7 +2235,11 @@ gb_internal Ast *parse_operand(AstFile *f, bool lhs) {
return parse_check_directive_for_statement(operand, name, StateFlag_no_type_assert);
} else if (name.string == "relative") {
Ast *tag = ast_basic_directive(f, token, name);
- tag = parse_call_expr(f, tag);
+ if (f->curr_token.kind != Token_OpenParen) {
+ syntax_error(tag, "expected #relative(<integer type>) <type>");
+ } else {
+ tag = parse_call_expr(f, tag);
+ }
Ast *type = parse_type(f);
return ast_relative_type(f, tag, type);
} else if (name.string == "force_inline" ||
@@ -2748,7 +2766,7 @@ gb_internal Ast *parse_call_expr(AstFile *f, Ast *operand) {
isize prev_expr_level = f->expr_level;
bool prev_allow_newline = f->allow_newline;
f->expr_level = 0;
- f->allow_newline = ALLOW_NEWLINE;
+ f->allow_newline = file_allow_newline(f);
open_paren = expect_token(f, Token_OpenParen);
@@ -3147,7 +3165,7 @@ gb_internal Ast *parse_expr(AstFile *f, bool lhs) {
gb_internal Array<Ast *> parse_expr_list(AstFile *f, bool lhs) {
bool allow_newline = f->allow_newline;
- f->allow_newline = ALLOW_NEWLINE;
+ f->allow_newline = file_allow_newline(f);
auto list = array_make<Ast *>(heap_allocator());
for (;;) {
@@ -3472,7 +3490,7 @@ gb_internal Ast *parse_results(AstFile *f, bool *diverging) {
Ast *list = nullptr;
expect_token(f, Token_OpenParen);
list = parse_field_list(f, nullptr, FieldFlag_Results, Token_CloseParen, true, false);
- if (ALLOW_NEWLINE) {
+ if (file_allow_newline(f)) {
skip_possible_newline(f);
}
expect_token_after(f, Token_CloseParen, "parameter list");
@@ -3532,7 +3550,7 @@ gb_internal Ast *parse_proc_type(AstFile *f, Token proc_token) {
expect_token(f, Token_OpenParen);
params = parse_field_list(f, nullptr, FieldFlag_Signature, Token_CloseParen, true, true);
- if (ALLOW_NEWLINE) {
+ if (file_allow_newline(f)) {
skip_possible_newline(f);
}
expect_token_after(f, Token_CloseParen, "parameter list");
@@ -3754,7 +3772,7 @@ gb_internal bool allow_field_separator(AstFile *f) {
}
if (token.kind == Token_Semicolon) {
bool ok = false;
- if (ALLOW_NEWLINE && token_is_newline(token)) {
+ if (file_allow_newline(f) && token_is_newline(token)) {
TokenKind next = peek_token(f).kind;
switch (next) {
case Token_CloseBrace:
@@ -3818,7 +3836,7 @@ gb_internal bool check_procedure_name_list(Array<Ast *> const &names) {
gb_internal Ast *parse_field_list(AstFile *f, isize *name_count_, u32 allowed_flags, TokenKind follow, bool allow_default_parameters, bool allow_typeid_token) {
bool prev_allow_newline = f->allow_newline;
defer (f->allow_newline = prev_allow_newline);
- f->allow_newline = ALLOW_NEWLINE;
+ f->allow_newline = file_allow_newline(f);
Token start_token = f->curr_token;
@@ -4169,6 +4187,8 @@ gb_internal Ast *parse_when_stmt(AstFile *f) {
syntax_error(f->curr_token, "Expected condition for when statement");
}
+ bool was_in_when_statement = f->in_when_statement;
+ f->in_when_statement = true;
if (allow_token(f, Token_do)) {
body = parse_do_body(f, cond ? ast_token(cond) : token, "then when statement");
} else {
@@ -4195,6 +4215,7 @@ gb_internal Ast *parse_when_stmt(AstFile *f) {
break;
}
}
+ f->in_when_statement = was_in_when_statement;
return ast_when_stmt(f, token, cond, body, else_stmt);
}
@@ -4456,6 +4477,10 @@ gb_internal Ast *parse_import_decl(AstFile *f, ImportDeclKind kind) {
array_add(&f->imports, s);
}
+ if (f->in_when_statement) {
+ syntax_error(import_name, "Cannot use 'import' within a 'when' statement. Prefer using the file suffixes (e.g. foo_windows.odin) or '//+build' tags");
+ }
+
if (kind != ImportDecl_Standard) {
syntax_error(import_name, "'using import' is not allowed, please use the import name explicitly");
}
@@ -4954,7 +4979,6 @@ gb_internal bool init_parser(Parser *p) {
gb_internal void destroy_parser(Parser *p) {
GB_ASSERT(p != nullptr);
- // TODO(bill): Fix memory leak
for (AstPackage *pkg : p->packages) {
for (AstFile *file : pkg->files) {
destroy_ast_file(file);
@@ -4998,7 +5022,6 @@ gb_internal WORKER_TASK_PROC(parser_worker_proc) {
gb_internal void parser_add_file_to_process(Parser *p, AstPackage *pkg, FileInfo fi, TokenPos pos) {
- // TODO(bill): Use a better allocator
ImportedFile f = {pkg, fi, pos, p->file_to_process_count++};
auto wd = gb_alloc_item(permanent_allocator(), ParserWorkerData);
wd->parser = p;
@@ -5528,6 +5551,88 @@ gb_internal bool parse_build_tag(Token token_for_pos, String s) {
return any_correct;
}
+gb_internal String vet_tag_get_token(String s, String *out) {
+ s = string_trim_whitespace(s);
+ isize n = 0;
+ while (n < s.len) {
+ Rune rune = 0;
+ isize width = utf8_decode(&s[n], s.len-n, &rune);
+ if (n == 0 && rune == '!') {
+
+ } else if (!rune_is_letter(rune) && !rune_is_digit(rune) && rune != '-') {
+ isize k = gb_max(gb_max(n, width), 1);
+ *out = substring(s, k, s.len);
+ return substring(s, 0, k);
+ }
+ n += width;
+ }
+ out->len = 0;
+ return s;
+}
+
+
+gb_internal u64 parse_vet_tag(Token token_for_pos, String s) {
+ String const prefix = str_lit("+vet");
+ GB_ASSERT(string_starts_with(s, prefix));
+ s = string_trim_whitespace(substring(s, prefix.len, s.len));
+
+ if (s.len == 0) {
+ return VetFlag_All;
+ }
+
+
+ u64 vet_flags = 0;
+ u64 vet_not_flags = 0;
+
+ while (s.len > 0) {
+ String p = string_trim_whitespace(vet_tag_get_token(s, &s));
+ if (p.len == 0) {
+ break;
+ }
+
+ bool is_notted = false;
+ if (p[0] == '!') {
+ is_notted = true;
+ p = substring(p, 1, p.len);
+ if (p.len == 0) {
+ syntax_error(token_for_pos, "Expected a vet flag name after '!'");
+ return build_context.vet_flags;
+ }
+ }
+
+ u64 flag = get_vet_flag_from_name(p);
+ if (flag != VetFlag_NONE) {
+ if (is_notted) {
+ vet_not_flags |= flag;
+ } else {
+ vet_flags |= flag;
+ }
+ } else {
+ ERROR_BLOCK();
+ syntax_error(token_for_pos, "Invalid vet flag name: %.*s", LIT(p));
+ error_line("\tExpected one of the following\n");
+ error_line("\tunused\n");
+ error_line("\tshadowing\n");
+ error_line("\tusing-stmt\n");
+ error_line("\tusing-param\n");
+ error_line("\textra\n");
+ return build_context.vet_flags;
+ }
+ }
+
+ if (vet_flags == 0 && vet_not_flags == 0) {
+ return build_context.vet_flags;
+ }
+ if (vet_flags == 0 && vet_not_flags != 0) {
+ return build_context.vet_flags &~ vet_not_flags;
+ }
+ if (vet_flags != 0 && vet_not_flags == 0) {
+ return vet_flags;
+ }
+ GB_ASSERT(vet_flags != 0 && vet_not_flags != 0);
+ return vet_flags &~ vet_not_flags;
+}
+
gb_internal String dir_from_path(String path) {
String base_dir = path;
for (isize i = path.len-1; i >= 0; i--) {
@@ -5679,6 +5784,9 @@ gb_internal bool parse_file(Parser *p, AstFile *f) {
if (!parse_build_tag(tok, lc)) {
return false;
}
+ } else if (string_starts_with(lc, str_lit("+vet"))) {
+ f->vet_flags = parse_vet_tag(tok, lc);
+ f->vet_flags_set = true;
} else if (string_starts_with(lc, str_lit("+ignore"))) {
return false;
} else if (string_starts_with(lc, str_lit("+private"))) {
@@ -5920,6 +6028,3 @@ gb_internal ParseFileError parse_packages(Parser *p, String init_filename) {
return ParseFile_None;
}
-
-
-#undef ALLOW_NEWLINE
diff --git a/src/parser.hpp b/src/parser.hpp
index 900fddbab..109068da7 100644
--- a/src/parser.hpp
+++ b/src/parser.hpp
@@ -104,6 +104,8 @@ struct AstFile {
Token package_token;
String package_name;
+ u64 vet_flags;
+ bool vet_flags_set;
// >= 0: In Expression
// < 0: In Control Clause
@@ -114,6 +116,7 @@ struct AstFile {
bool allow_in_expr; // NOTE(bill): in expression are only allowed in certain cases
bool in_foreign_block;
bool allow_type;
+ bool in_when_statement;
isize total_file_decl_count;
isize delayed_decl_count;
diff --git a/src/tilde.cpp b/src/tilde.cpp
new file mode 100644
index 000000000..0cbc975c4
--- /dev/null
+++ b/src/tilde.cpp
@@ -0,0 +1,813 @@
+#include "tilde.hpp"
+
+
+gb_global Slice<TB_Arena> global_tb_arenas;
+
+gb_internal TB_Arena *cg_arena(void) {
+ return &global_tb_arenas[current_thread_index()];
+}
+
+gb_internal void cg_global_arena_init(void) {
+ global_tb_arenas = slice_make<TB_Arena>(permanent_allocator(), global_thread_pool.threads.count);
+ for_array(i, global_tb_arenas) {
+ tb_arena_create(&global_tb_arenas[i], 2ull<<20);
+ }
+}
+
+// returns TB_TYPE_VOID if not trivially possible
+gb_internal TB_DataType cg_data_type(Type *t) {
+ GB_ASSERT(t != nullptr);
+ t = core_type(t);
+ i64 sz = type_size_of(t);
+ switch (t->kind) {
+ case Type_Basic:
+ switch (t->Basic.kind) {
+ case Basic_bool:
+ case Basic_b8:
+ case Basic_b16:
+ case Basic_b32:
+ case Basic_b64:
+
+ case Basic_i8:
+ case Basic_u8:
+ case Basic_i16:
+ case Basic_u16:
+ case Basic_i32:
+ case Basic_u32:
+ case Basic_i64:
+ case Basic_u64:
+ case Basic_i128:
+ case Basic_u128:
+
+ case Basic_rune:
+
+ case Basic_int:
+ case Basic_uint:
+ case Basic_uintptr:
+ case Basic_typeid:
+ return TB_TYPE_INTN(cast(u16)gb_min(8*sz, 64));
+
+ case Basic_f16: return TB_TYPE_F16;
+ case Basic_f32: return TB_TYPE_F32;
+ case Basic_f64: return TB_TYPE_F64;
+
+ case Basic_rawptr: return TB_TYPE_PTR;
+ case Basic_cstring: return TB_TYPE_PTR;
+
+
+ // Endian Specific Types
+ case Basic_i16le:
+ case Basic_u16le:
+ case Basic_i32le:
+ case Basic_u32le:
+ case Basic_i64le:
+ case Basic_u64le:
+ case Basic_i128le:
+ case Basic_u128le:
+ case Basic_i16be:
+ case Basic_u16be:
+ case Basic_i32be:
+ case Basic_u32be:
+ case Basic_i64be:
+ case Basic_u64be:
+ case Basic_i128be:
+ case Basic_u128be:
+ return TB_TYPE_INTN(cast(u16)gb_min(8*sz, 64));
+
+ case Basic_f16le: return TB_TYPE_F16;
+ case Basic_f32le: return TB_TYPE_F32;
+ case Basic_f64le: return TB_TYPE_F64;
+
+ case Basic_f16be: return TB_TYPE_F16;
+ case Basic_f32be: return TB_TYPE_F32;
+ case Basic_f64be: return TB_TYPE_F64;
+ }
+ break;
+
+ case Type_Pointer:
+ case Type_MultiPointer:
+ case Type_Proc:
+ return TB_TYPE_PTR;
+
+ case Type_BitSet:
+ return cg_data_type(bit_set_to_int(t));
+
+ case Type_RelativePointer:
+ return cg_data_type(t->RelativePointer.base_integer);
+ }
+
+ // unknown
+ return {};
+}
+
+
+gb_internal cgValue cg_value(TB_Global *g, Type *type) {
+ return cg_value((TB_Symbol *)g, type);
+}
+gb_internal cgValue cg_value(TB_External *e, Type *type) {
+ return cg_value((TB_Symbol *)e, type);
+}
+gb_internal cgValue cg_value(TB_Function *f, Type *type) {
+ return cg_value((TB_Symbol *)f, type);
+}
+gb_internal cgValue cg_value(TB_Symbol *s, Type *type) {
+ cgValue v = {};
+ v.kind = cgValue_Symbol;
+ v.type = type;
+ v.symbol = s;
+ return v;
+}
+gb_internal cgValue cg_value(TB_Node *node, Type *type) {
+ cgValue v = {};
+ v.kind = cgValue_Value;
+ v.type = type;
+ v.node = node;
+ return v;
+}
+gb_internal cgValue cg_lvalue_addr(TB_Node *node, Type *type) {
+ GB_ASSERT(node->dt.type == TB_PTR);
+ cgValue v = {};
+ v.kind = cgValue_Addr;
+ v.type = type;
+ v.node = node;
+ return v;
+}
+
+gb_internal cgValue cg_lvalue_addr_to_value(cgValue v) {
+ if (v.kind == cgValue_Value) {
+ GB_ASSERT(is_type_pointer(v.type));
+ GB_ASSERT(v.node->dt.type == TB_PTR);
+ } else {
+ GB_ASSERT(v.kind == cgValue_Addr);
+ GB_ASSERT(v.node->dt.type == TB_PTR);
+ v.kind = cgValue_Value;
+ v.type = alloc_type_pointer(v.type);
+ }
+ return v;
+}
+
+gb_internal cgValue cg_value_multi(cgValueMulti *multi, Type *type) {
+ GB_ASSERT(type->kind == Type_Tuple);
+ GB_ASSERT(multi != nullptr);
+ GB_ASSERT(type->Tuple.variables.count > 1);
+ GB_ASSERT(multi->values.count == type->Tuple.variables.count);
+ cgValue v = {};
+ v.kind = cgValue_Multi;
+ v.type = type;
+ v.multi = multi;
+ return v;
+}
+
+gb_internal cgValue cg_value_multi(Slice<cgValue> const &values, Type *type) {
+ cgValueMulti *multi = gb_alloc_item(permanent_allocator(), cgValueMulti);
+ multi->values = values;
+ return cg_value_multi(multi, type);
+}
+
+
+gb_internal cgValue cg_value_multi2(cgValue const &x, cgValue const &y, Type *type) {
+ GB_ASSERT(type->kind == Type_Tuple);
+ GB_ASSERT(type->Tuple.variables.count == 2);
+ cgValueMulti *multi = gb_alloc_item(permanent_allocator(), cgValueMulti);
+ multi->values = slice_make<cgValue>(permanent_allocator(), 2);
+ multi->values[0] = x;
+ multi->values[1] = y;
+ return cg_value_multi(multi, type);
+}
+
+
+gb_internal cgAddr cg_addr(cgValue const &value) {
+ GB_ASSERT(value.kind != cgValue_Multi);
+ cgAddr addr = {};
+ addr.kind = cgAddr_Default;
+ addr.addr = value;
+ if (addr.addr.kind == cgValue_Addr) {
+ GB_ASSERT(addr.addr.node != nullptr);
+ addr.addr.kind = cgValue_Value;
+ addr.addr.type = alloc_type_pointer(addr.addr.type);
+ }
+ return addr;
+}
+
+gb_internal void cg_set_debug_pos_from_node(cgProcedure *p, Ast *node) {
+ if (node) {
+ TokenPos pos = ast_token(node).pos;
+ TB_FileID *file_id = map_get(&p->module->file_id_map, cast(uintptr)pos.file_id);
+ if (file_id) {
+ tb_inst_set_location(p->func, *file_id, pos.line);
+ }
+ }
+}
+
+gb_internal void cg_add_symbol(cgModule *m, Entity *e, TB_Symbol *symbol) {
+ if (e) {
+ rw_mutex_lock(&m->values_mutex);
+ map_set(&m->symbols, e, symbol);
+ rw_mutex_unlock(&m->values_mutex);
+ }
+}
+
+gb_internal void cg_add_entity(cgModule *m, Entity *e, cgValue const &val) {
+ if (e) {
+ rw_mutex_lock(&m->values_mutex);
+ GB_ASSERT(val.node != nullptr);
+ map_set(&m->values, e, val);
+ rw_mutex_unlock(&m->values_mutex);
+ }
+}
+
+gb_internal void cg_add_member(cgModule *m, String const &name, cgValue const &val) {
+ if (name.len > 0) {
+ rw_mutex_lock(&m->values_mutex);
+ string_map_set(&m->members, name, val);
+ rw_mutex_unlock(&m->values_mutex);
+ }
+}
+
+gb_internal void cg_add_procedure_value(cgModule *m, cgProcedure *p) {
+ rw_mutex_lock(&m->values_mutex);
+ if (p->entity != nullptr) {
+ map_set(&m->procedure_values, p->func, p->entity);
+ if (p->symbol != nullptr) {
+ map_set(&m->symbols, p->entity, p->symbol);
+ }
+ }
+ string_map_set(&m->procedures, p->name, p);
+ rw_mutex_unlock(&m->values_mutex);
+
+}
+
+gb_internal TB_Symbol *cg_find_symbol_from_entity(cgModule *m, Entity *e) {
+ GB_ASSERT(e != nullptr);
+
+ rw_mutex_lock(&m->values_mutex);
+ TB_Symbol **found = map_get(&m->symbols, e);
+ if (found) {
+ rw_mutex_unlock(&m->values_mutex);
+ return *found;
+ }
+
+ String link_name = cg_get_entity_name(m, e);
+ cgProcedure **proc_found = string_map_get(&m->procedures, link_name);
+ if (proc_found) {
+ TB_Symbol *symbol = (*proc_found)->symbol;
+ map_set(&m->symbols, e, symbol);
+ rw_mutex_unlock(&m->values_mutex);
+ return symbol;
+ }
+ rw_mutex_unlock(&m->values_mutex);
+
+ if (e->kind == Entity_Procedure) {
+ debugf("[Tilde] try to generate procedure %.*s as it was not in the minimum_dependency_set", LIT(e->token.string));
+ // IMPORTANT TODO(bill): This is an utter bodge, try and fix this shit
+ cgProcedure *p = cg_procedure_create(m, e);
+ if (p != nullptr) {
+ GB_ASSERT(p->symbol != nullptr);
+ cg_add_procedure_to_queue(p);
+ return p->symbol;
+ }
+ }
+
+
+ GB_PANIC("could not find entity's symbol %.*s", LIT(e->token.string));
+ return nullptr;
+}
+
+
+struct cgGlobalVariable {
+ cgValue var;
+ cgValue init;
+ DeclInfo *decl;
+ bool is_initialized;
+};
+
+// Returns already_has_entry_point
+gb_internal bool cg_global_variables_create(cgModule *m, Array<cgGlobalVariable> *global_variables) {
+ isize global_variable_max_count = 0;
+ bool already_has_entry_point = false;
+
+ for (Entity *e : m->info->entities) {
+ String name = e->token.string;
+
+ if (e->kind == Entity_Variable) {
+ global_variable_max_count++;
+ } else if (e->kind == Entity_Procedure) {
+ if ((e->scope->flags&ScopeFlag_Init) && name == "main") {
+ GB_ASSERT(e == m->info->entry_point);
+ }
+ if (build_context.command_kind == Command_test &&
+ (e->Procedure.is_export || e->Procedure.link_name.len > 0)) {
+ String link_name = e->Procedure.link_name;
+ if (e->pkg->kind == Package_Runtime) {
+ if (link_name == "main" ||
+ link_name == "DllMain" ||
+ link_name == "WinMain" ||
+ link_name == "wWinMain" ||
+ link_name == "mainCRTStartup" ||
+ link_name == "_start") {
+ already_has_entry_point = true;
+ }
+ }
+ }
+ }
+ }
+ *global_variables = array_make<cgGlobalVariable>(permanent_allocator(), 0, global_variable_max_count);
+
+ auto *min_dep_set = &m->info->minimum_dependency_set;
+
+ for (DeclInfo *d : m->info->variable_init_order) {
+ Entity *e = d->entity;
+
+ if ((e->scope->flags & ScopeFlag_File) == 0) {
+ continue;
+ }
+
+ if (!ptr_set_exists(min_dep_set, e)) {
+ continue;
+ }
+
+ DeclInfo *decl = decl_info_of_entity(e);
+ if (decl == nullptr) {
+ continue;
+ }
+ GB_ASSERT(e->kind == Entity_Variable);
+
+ bool is_foreign = e->Variable.is_foreign;
+ bool is_export = e->Variable.is_export;
+
+ String name = cg_get_entity_name(m, e);
+
+ TB_Linkage linkage = TB_LINKAGE_PRIVATE;
+
+ if (is_foreign) {
+ linkage = TB_LINKAGE_PUBLIC;
+ // lb_add_foreign_library_path(m, e->Variable.foreign_library);
+ // lb_set_wasm_import_attributes(g.value, e, name);
+ } else if (is_export) {
+ linkage = TB_LINKAGE_PUBLIC;
+ }
+ // lb_set_linkage_from_entity_flags(m, g.value, e->flags);
+
+ TB_DebugType *debug_type = cg_debug_type(m, e->type);
+ TB_Global *global = tb_global_create(m->mod, name.len, cast(char const *)name.text, debug_type, linkage);
+ cgValue g = cg_value(global, alloc_type_pointer(e->type));
+
+ TB_ModuleSection *section = tb_module_get_data(m->mod);
+
+ if (e->Variable.thread_local_model != "") {
+ section = tb_module_get_tls(m->mod);
+ }
+ if (e->Variable.link_section.len > 0) {
+ // TODO(bill): custom module sections
+ // LLVMSetSection(g.value, alloc_cstring(permanent_allocator(), e->Variable.link_section));
+ }
+
+
+ cgGlobalVariable var = {};
+ var.var = g;
+ var.decl = decl;
+
+ if (decl->init_expr != nullptr) {
+ TypeAndValue tav = type_and_value_of_expr(decl->init_expr);
+
+ isize max_regions = cg_global_const_calculate_region_count(tav.value, e->type);
+ tb_global_set_storage(m->mod, section, global, type_size_of(e->type), type_align_of(e->type), max_regions);
+
+ if (tav.mode == Addressing_Constant &&
+ tav.value.kind != ExactValue_Invalid) {
+ cg_global_const_add_region(m, tav.value, e->type, global, 0);
+ var.is_initialized = true;
+ }
+ if (!var.is_initialized && is_type_untyped_nil(tav.type)) {
+ var.is_initialized = true;
+ }
+ } else {
+ var.is_initialized = true;
+ // TODO(bill): is this even needed;
+ i64 max_regions = cg_global_const_calculate_region_count_from_basic_type(e->type);
+ tb_global_set_storage(m->mod, section, global, type_size_of(e->type), type_align_of(e->type), max_regions);
+ }
+
+ array_add(global_variables, var);
+
+ cg_add_symbol(m, e, cast(TB_Symbol *)global);
+ cg_add_entity(m, e, g);
+ cg_add_member(m, name, g);
+ }
+
+ cg_setup_type_info_data(m);
+
+ return already_has_entry_point;
+}
+
+gb_internal void cg_global_variables_initialize(cgProcedure *p, Array<cgGlobalVariable> *global_variables) {
+ for (cgGlobalVariable &var : *global_variables) {
+ if (var.is_initialized) {
+ continue;
+ }
+ cgValue src = cg_build_expr(p, var.decl->init_expr);
+ cgValue dst = cg_flatten_value(p, var.var);
+ cg_emit_store(p, dst, src);
+ }
+}
+
+
+gb_internal cgModule *cg_module_create(Checker *c) {
+ cgModule *m = gb_alloc_item(permanent_allocator(), cgModule);
+
+ m->checker = c;
+ m->info = &c->info;
+
+
+ TB_FeatureSet feature_set = {};
+ bool is_jit = false;
+ m->mod = tb_module_create(TB_ARCH_X86_64, TB_SYSTEM_WINDOWS, &feature_set, is_jit);
+ tb_module_set_tls_index(m->mod, 10, "_tls_index");
+
+ map_init(&m->values);
+ map_init(&m->symbols);
+ map_init(&m->file_id_map);
+ map_init(&m->debug_type_map);
+ map_init(&m->proc_debug_type_map);
+ map_init(&m->proc_proto_map);
+ map_init(&m->anonymous_proc_lits_map);
+ map_init(&m->equal_procs);
+ map_init(&m->hasher_procs);
+ map_init(&m->map_get_procs);
+ map_init(&m->map_set_procs);
+
+ array_init(&m->single_threaded_procedure_queue, heap_allocator());
+
+
+ for_array(id, global_files) {
+ if (AstFile *f = global_files[id]) {
+ char const *path = alloc_cstring(permanent_allocator(), f->fullpath);
+ map_set(&m->file_id_map, cast(uintptr)id, tb_file_create(m->mod, path));
+ }
+ }
+
+ return m;
+}
+
+gb_internal void cg_module_destroy(cgModule *m) {
+ map_destroy(&m->values);
+ map_destroy(&m->symbols);
+ map_destroy(&m->file_id_map);
+ map_destroy(&m->debug_type_map);
+ map_destroy(&m->proc_debug_type_map);
+ map_destroy(&m->proc_proto_map);
+ map_destroy(&m->anonymous_proc_lits_map);
+ map_destroy(&m->equal_procs);
+ map_destroy(&m->hasher_procs);
+ map_destroy(&m->map_get_procs);
+ map_destroy(&m->map_set_procs);
+
+ array_free(&m->single_threaded_procedure_queue);
+
+ tb_module_destroy(m->mod);
+}
+
+gb_internal String cg_set_nested_type_name_ir_mangled_name(Entity *e, cgProcedure *p) {
+ // NOTE(bill, 2020-03-08): A polymorphic procedure may take a nested type declaration
+ // and as a result, the declaration does not have time to determine what it should be
+
+ GB_ASSERT(e != nullptr && e->kind == Entity_TypeName);
+ if (e->TypeName.ir_mangled_name.len != 0) {
+ return e->TypeName.ir_mangled_name;
+ }
+ GB_ASSERT((e->scope->flags & ScopeFlag_File) == 0);
+
+ if (p == nullptr) {
+ Entity *proc = nullptr;
+ if (e->parent_proc_decl != nullptr) {
+ proc = e->parent_proc_decl->entity;
+ } else {
+ Scope *scope = e->scope;
+ while (scope != nullptr && (scope->flags & ScopeFlag_Proc) == 0) {
+ scope = scope->parent;
+ }
+ GB_ASSERT(scope != nullptr);
+ GB_ASSERT(scope->flags & ScopeFlag_Proc);
+ proc = scope->procedure_entity;
+ }
+ GB_ASSERT(proc->kind == Entity_Procedure);
+ if (proc->cg_procedure != nullptr) {
+ p = proc->cg_procedure;
+ }
+ }
+
+ // NOTE(bill): Generate a new name
+ // parent_proc.name-guid
+ String ts_name = e->token.string;
+
+ if (p != nullptr) {
+ isize name_len = p->name.len + 1 + ts_name.len + 1 + 10 + 1;
+ char *name_text = gb_alloc_array(permanent_allocator(), char, name_len);
+ u32 guid = 1+p->module->nested_type_name_guid.fetch_add(1);
+ name_len = gb_snprintf(name_text, name_len, "%.*s" ABI_PKG_NAME_SEPARATOR "%.*s-%u", LIT(p->name), LIT(ts_name), guid);
+
+ String name = make_string(cast(u8 *)name_text, name_len-1);
+ e->TypeName.ir_mangled_name = name;
+ return name;
+ } else {
+ // NOTE(bill): a nested type be required before its parameter procedure exists. Just give it a temp name for now
+ isize name_len = 9 + 1 + ts_name.len + 1 + 10 + 1;
+ char *name_text = gb_alloc_array(permanent_allocator(), char, name_len);
+ static std::atomic<u32> guid;
+ name_len = gb_snprintf(name_text, name_len, "_internal" ABI_PKG_NAME_SEPARATOR "%.*s-%u", LIT(ts_name), 1+guid.fetch_add(1));
+
+ String name = make_string(cast(u8 *)name_text, name_len-1);
+ e->TypeName.ir_mangled_name = name;
+ return name;
+ }
+}
+
+gb_internal String cg_mangle_name(cgModule *m, Entity *e) {
+ String name = e->token.string;
+
+ AstPackage *pkg = e->pkg;
+ GB_ASSERT_MSG(pkg != nullptr, "Missing package for '%.*s'", LIT(name));
+ String pkgn = pkg->name;
+ GB_ASSERT(!rune_is_digit(pkgn[0]));
+ if (pkgn == "llvm") {
+ GB_PANIC("llvm. entities are not allowed with the tilde backend");
+ }
+
+ isize max_len = pkgn.len + 1 + name.len + 1;
+ bool require_suffix_id = is_type_polymorphic(e->type, true);
+
+ if ((e->scope->flags & (ScopeFlag_File | ScopeFlag_Pkg)) == 0) {
+ require_suffix_id = true;
+ } else if (is_blank_ident(e->token)) {
+ require_suffix_id = true;
+ }if (e->flags & EntityFlag_NotExported) {
+ require_suffix_id = true;
+ }
+
+ if (require_suffix_id) {
+ max_len += 21;
+ }
+
+ char *new_name = gb_alloc_array(permanent_allocator(), char, max_len);
+ isize new_name_len = gb_snprintf(
+ new_name, max_len,
+ "%.*s" ABI_PKG_NAME_SEPARATOR "%.*s", LIT(pkgn), LIT(name)
+ );
+ if (require_suffix_id) {
+ char *str = new_name + new_name_len-1;
+ isize len = max_len-new_name_len;
+ isize extra = gb_snprintf(str, len, "-%llu", cast(unsigned long long)e->id);
+ new_name_len += extra-1;
+ }
+
+ String mangled_name = make_string((u8 const *)new_name, new_name_len-1);
+ return mangled_name;
+}
+
+gb_internal String cg_get_entity_name(cgModule *m, Entity *e) {
+ if (e != nullptr && e->kind == Entity_TypeName && e->TypeName.ir_mangled_name.len != 0) {
+ return e->TypeName.ir_mangled_name;
+ }
+ GB_ASSERT(e != nullptr);
+
+ if (e->pkg == nullptr) {
+ return e->token.string;
+ }
+
+ if (e->kind == Entity_TypeName && (e->scope->flags & ScopeFlag_File) == 0) {
+ return cg_set_nested_type_name_ir_mangled_name(e, nullptr);
+ }
+
+ String name = {};
+
+ bool no_name_mangle = false;
+
+ if (e->kind == Entity_Variable) {
+ bool is_foreign = e->Variable.is_foreign;
+ bool is_export = e->Variable.is_export;
+ no_name_mangle = e->Variable.link_name.len > 0 || is_foreign || is_export;
+ if (e->Variable.link_name.len > 0) {
+ return e->Variable.link_name;
+ }
+ } else if (e->kind == Entity_Procedure && e->Procedure.link_name.len > 0) {
+ return e->Procedure.link_name;
+ } else if (e->kind == Entity_Procedure && e->Procedure.is_export) {
+ no_name_mangle = true;
+ }
+
+ if (!no_name_mangle) {
+ name = cg_mangle_name(m, e);
+ }
+ if (name.len == 0) {
+ name = e->token.string;
+ }
+
+ if (e->kind == Entity_TypeName) {
+ e->TypeName.ir_mangled_name = name;
+ } else if (e->kind == Entity_Procedure) {
+ e->Procedure.link_name = name;
+ }
+
+ return name;
+}
+
+#include "tilde_const.cpp"
+#include "tilde_debug.cpp"
+#include "tilde_expr.cpp"
+#include "tilde_builtin.cpp"
+#include "tilde_type_info.cpp"
+#include "tilde_proc.cpp"
+#include "tilde_stmt.cpp"
+
+
+gb_internal String cg_filepath_obj_for_module(cgModule *m) {
+ String path = concatenate3_strings(permanent_allocator(),
+ build_context.build_paths[BuildPath_Output].basename,
+ STR_LIT("/"),
+ build_context.build_paths[BuildPath_Output].name
+ );
+
+ // if (m->file) {
+ // char buf[32] = {};
+ // isize n = gb_snprintf(buf, gb_size_of(buf), "-%u", m->file->id);
+ // String suffix = make_string((u8 *)buf, n-1);
+ // path = concatenate_strings(permanent_allocator(), path, suffix);
+ // } else if (m->pkg) {
+ // path = concatenate3_strings(permanent_allocator(), path, STR_LIT("-"), m->pkg->name);
+ // }
+
+ String ext = {};
+
+ if (build_context.build_mode == BuildMode_Assembly) {
+ ext = STR_LIT(".S");
+ } else {
+ if (is_arch_wasm()) {
+ ext = STR_LIT(".wasm.o");
+ } else {
+ switch (build_context.metrics.os) {
+ case TargetOs_windows:
+ ext = STR_LIT(".obj");
+ break;
+ default:
+ case TargetOs_darwin:
+ case TargetOs_linux:
+ case TargetOs_essence:
+ ext = STR_LIT(".o");
+ break;
+
+ case TargetOs_freestanding:
+ switch (build_context.metrics.abi) {
+ default:
+ case TargetABI_Default:
+ case TargetABI_SysV:
+ ext = STR_LIT(".o");
+ break;
+ case TargetABI_Win64:
+ ext = STR_LIT(".obj");
+ break;
+ }
+ break;
+ }
+ }
+ }
+
+ return concatenate_strings(permanent_allocator(), path, ext);
+}
+
+
+gb_internal WORKER_TASK_PROC(cg_procedure_generate_worker_proc) {
+ cgProcedure *p = cast(cgProcedure *)data;
+ cg_procedure_generate(p);
+ return 0;
+}
+
+gb_internal void cg_add_procedure_to_queue(cgProcedure *p) {
+ if (p == nullptr) {
+ return;
+ }
+ cgModule *m = p->module;
+ if (m->do_threading) {
+ thread_pool_add_task(cg_procedure_generate_worker_proc, p);
+ } else {
+ array_add(&m->single_threaded_procedure_queue, p);
+ }
+}
+
+gb_internal bool cg_generate_code(Checker *c, LinkerData *linker_data) {
+ TIME_SECTION("Tilde Module Initializtion");
+
+ CheckerInfo *info = &c->info;
+
+ linker_data_init(linker_data, info, c->parser->init_fullpath);
+
+ cg_global_arena_init();
+
+ cgModule *m = cg_module_create(c);
+ defer (cg_module_destroy(m));
+
+ m->do_threading = false;
+
+ TIME_SECTION("Tilde Global Variables");
+
+ Array<cgGlobalVariable> global_variables = {};
+ bool already_has_entry_point = cg_global_variables_create(m, &global_variables);
+ gb_unused(already_has_entry_point);
+
+ if (true) {
+ Type *proc_type = alloc_type_proc(nullptr, nullptr, 0, nullptr, 0, false, ProcCC_Odin);
+ cgProcedure *p = cg_procedure_create_dummy(m, str_lit(CG_STARTUP_RUNTIME_PROC_NAME), proc_type);
+ p->is_startup = true;
+ cg_startup_runtime_proc = p;
+ }
+
+ if (true) {
+ Type *proc_type = alloc_type_proc(nullptr, nullptr, 0, nullptr, 0, false, ProcCC_Odin);
+ cgProcedure *p = cg_procedure_create_dummy(m, str_lit(CG_CLEANUP_RUNTIME_PROC_NAME), proc_type);
+ p->is_startup = true;
+ cg_cleanup_runtime_proc = p;
+ }
+
+ auto *min_dep_set = &info->minimum_dependency_set;
+
+ Array<cgProcedure *> procedures_to_generate = {};
+ array_init(&procedures_to_generate, heap_allocator());
+ defer (array_free(&procedures_to_generate));
+
+ for (Entity *e : info->entities) {
+ String name = e->token.string;
+ Scope *scope = e->scope;
+
+ if ((scope->flags & ScopeFlag_File) == 0) {
+ continue;
+ }
+
+ Scope *package_scope = scope->parent;
+ GB_ASSERT(package_scope->flags & ScopeFlag_Pkg);
+
+ if (e->kind != Entity_Procedure) {
+ continue;
+ }
+
+ if (!ptr_set_exists(min_dep_set, e)) {
+ // NOTE(bill): Nothing depends upon it so doesn't need to be built
+ continue;
+ }
+ if (cgProcedure *p = cg_procedure_create(m, e)) {
+ array_add(&procedures_to_generate, p);
+ }
+ }
+ for (cgProcedure *p : procedures_to_generate) {
+ cg_add_procedure_to_queue(p);
+ }
+
+ if (!m->do_threading) {
+ for (isize i = 0; i < m->single_threaded_procedure_queue.count; i++) {
+ cgProcedure *p = m->single_threaded_procedure_queue[i];
+ cg_procedure_generate(p);
+ }
+ }
+
+ thread_pool_wait();
+
+ {
+ cgProcedure *p = cg_startup_runtime_proc;
+ cg_procedure_begin(p);
+ cg_global_variables_initialize(p, &global_variables);
+ tb_inst_ret(p->func, 0, nullptr);
+ cg_procedure_end(p);
+ }
+ {
+ cgProcedure *p = cg_cleanup_runtime_proc;
+ cg_procedure_begin(p);
+ tb_inst_ret(p->func, 0, nullptr);
+ cg_procedure_end(p);
+ }
+
+
+
+ TB_DebugFormat debug_format = TB_DEBUGFMT_NONE;
+ if (build_context.ODIN_DEBUG) {
+ switch (build_context.metrics.os) {
+ case TargetOs_windows:
+ debug_format = TB_DEBUGFMT_CODEVIEW;
+ break;
+ case TargetOs_darwin:
+ case TargetOs_linux:
+ case TargetOs_essence:
+ case TargetOs_freebsd:
+ case TargetOs_openbsd:
+ debug_format = TB_DEBUGFMT_DWARF;
+ break;
+ }
+ }
+ TB_ExportBuffer export_buffer = tb_module_object_export(m->mod, debug_format);
+ defer (tb_export_buffer_free(export_buffer));
+
+ String filepath_obj = cg_filepath_obj_for_module(m);
+ array_add(&linker_data->output_object_paths, filepath_obj);
+ GB_ASSERT(tb_export_buffer_to_file(export_buffer, cast(char const *)filepath_obj.text));
+
+ return true;
+}
+
+#undef ABI_PKG_NAME_SEPARATOR
diff --git a/src/tilde.hpp b/src/tilde.hpp
new file mode 100644
index 000000000..5944c9ef7
--- /dev/null
+++ b/src/tilde.hpp
@@ -0,0 +1,373 @@
+#if defined(GB_SYSTEM_WINDOWS)
+ #pragma warning(push)
+ #pragma warning(disable: 4200)
+ #pragma warning(disable: 4201)
+ #define restrict gb_restrict
+#endif
+
+#include "tilde/tb.h"
+#include "tilde/tb_arena.h"
+
+#define TB_TYPE_F16 TB_DataType{ { TB_INT, 0, 16 } }
+#define TB_TYPE_I128 TB_DataType{ { TB_INT, 0, 128 } }
+#define TB_TYPE_INT TB_TYPE_INTN(cast(u16)(8*build_context.int_size))
+#define TB_TYPE_INTPTR TB_TYPE_INTN(cast(u16)(8*build_context.ptr_size))
+
+#if defined(GB_SYSTEM_WINDOWS)
+ #pragma warning(pop)
+#endif
+
+#define CG_STARTUP_RUNTIME_PROC_NAME "__$startup_runtime"
+#define CG_CLEANUP_RUNTIME_PROC_NAME "__$cleanup_runtime"
+#define CG_STARTUP_TYPE_INFO_PROC_NAME "__$startup_type_info"
+#define CG_TYPE_INFO_DATA_NAME "__$type_info_data"
+#define CG_TYPE_INFO_TYPES_NAME "__$type_info_types_data"
+#define CG_TYPE_INFO_NAMES_NAME "__$type_info_names_data"
+#define CG_TYPE_INFO_OFFSETS_NAME "__$type_info_offsets_data"
+#define CG_TYPE_INFO_USINGS_NAME "__$type_info_usings_data"
+#define CG_TYPE_INFO_TAGS_NAME "__$type_info_tags_data"
+#define CG_TYPE_INFO_ENUM_VALUES_NAME "__$type_info_enum_values_data"
+
+struct cgModule;
+
+
+enum cgValueKind : u32 {
+ cgValue_Value, // rvalue
+ cgValue_Addr, // lvalue
+ cgValue_Symbol, // global
+ cgValue_Multi, // multiple values
+};
+
+struct cgValueMulti;
+
+struct cgValue {
+ cgValueKind kind;
+ Type * type;
+ union {
+ // NOTE: any value in this union must be a pointer
+ TB_Symbol * symbol;
+ TB_Node * node;
+ cgValueMulti *multi;
+ };
+};
+
+struct cgValueMulti {
+ Slice<cgValue> values;
+};
+
+
+enum cgAddrKind {
+ cgAddr_Default,
+ cgAddr_Map,
+ cgAddr_Context,
+ cgAddr_SoaVariable,
+
+ cgAddr_RelativePointer,
+ cgAddr_RelativeSlice,
+
+ cgAddr_Swizzle,
+ cgAddr_SwizzleLarge,
+};
+
+struct cgAddr {
+ cgAddrKind kind;
+ cgValue addr;
+ union {
+ struct {
+ cgValue key;
+ Type *type;
+ Type *result;
+ } map;
+ struct {
+ Selection sel;
+ } ctx;
+ struct {
+ cgValue index;
+ Ast *index_expr;
+ } soa;
+ struct {
+ cgValue index;
+ Ast *node;
+ } index_set;
+ struct {
+ bool deref;
+ } relative;
+ struct {
+ Type *type;
+ u8 count; // 2, 3, or 4 components
+ u8 indices[4];
+ } swizzle;
+ struct {
+ Type *type;
+ Slice<i32> indices;
+ } swizzle_large;
+ };
+};
+
+
+struct cgTargetList {
+ cgTargetList *prev;
+ bool is_block;
+ // control regions
+ TB_Node * break_;
+ TB_Node * continue_;
+ TB_Node * fallthrough_;
+};
+
+struct cgBranchRegions {
+ Ast * label;
+ TB_Node *break_;
+ TB_Node *continue_;
+};
+
+enum cgDeferExitKind {
+ cgDeferExit_Default,
+ cgDeferExit_Return,
+ cgDeferExit_Branch,
+};
+
+enum cgDeferKind {
+ cgDefer_Node,
+ cgDefer_Proc,
+};
+
+struct cgDefer {
+ cgDeferKind kind;
+ isize scope_index;
+ isize context_stack_count;
+ TB_Node * control_region;
+ union {
+ Ast *stmt;
+ struct {
+ cgValue deferred;
+ Slice<cgValue> result_as_args;
+ } proc;
+ };
+};
+
+
+struct cgContextData {
+ cgAddr ctx;
+ isize scope_index;
+ isize uses;
+};
+
+struct cgControlRegion {
+ TB_Node *control_region;
+ isize scope_index;
+};
+
+struct cgProcedure {
+ u32 flags;
+ u16 state_flags;
+
+ cgProcedure *parent;
+ Array<cgProcedure *> children;
+
+ TB_Function *func;
+ TB_FunctionPrototype *proto;
+ TB_Symbol *symbol;
+
+ Entity * entity;
+ cgModule *module;
+ String name;
+ Type * type;
+ Ast * type_expr;
+ Ast * body;
+ u64 tags;
+ ProcInlining inlining;
+ bool is_foreign;
+ bool is_export;
+ bool is_entry_point;
+ bool is_startup;
+
+ TB_DebugType *debug_type;
+
+ cgValue value;
+
+ Ast *curr_stmt;
+
+ cgTargetList * target_list;
+ Array<cgDefer> defer_stack;
+ Array<Scope *> scope_stack;
+ Array<cgContextData> context_stack;
+
+ Array<cgControlRegion> control_regions;
+ Array<cgBranchRegions> branch_regions;
+
+ Scope *curr_scope;
+ i32 scope_index;
+ bool in_multi_assignment;
+ isize split_returns_index;
+ bool return_by_ptr;
+
+ PtrMap<Entity *, cgAddr> variable_map;
+};
+
+
+struct cgModule {
+ TB_Module * mod;
+ Checker * checker;
+ CheckerInfo *info;
+ LinkerData * linker_data;
+
+ bool do_threading;
+ Array<cgProcedure *> single_threaded_procedure_queue;
+
+ RwMutex values_mutex;
+ PtrMap<Entity *, cgValue> values;
+ PtrMap<Entity *, TB_Symbol *> symbols;
+ StringMap<cgValue> members;
+ StringMap<cgProcedure *> procedures;
+ PtrMap<TB_Function *, Entity *> procedure_values;
+
+ RecursiveMutex debug_type_mutex;
+ PtrMap<Type *, TB_DebugType *> debug_type_map;
+ PtrMap<Type *, TB_DebugType *> proc_debug_type_map; // not pointer to
+
+ RecursiveMutex proc_proto_mutex;
+ PtrMap<Type *, TB_FunctionPrototype *> proc_proto_map;
+
+ BlockingMutex anonymous_proc_lits_mutex;
+ PtrMap<Ast *, cgProcedure *> anonymous_proc_lits_map;
+
+ RecursiveMutex generated_procs_mutex;
+ PtrMap<Type *, cgProcedure *> equal_procs;
+ PtrMap<Type *, cgProcedure *> hasher_procs;
+ PtrMap<Type *, cgProcedure *> map_get_procs;
+ PtrMap<Type *, cgProcedure *> map_set_procs;
+
+
+ // NOTE(bill): no need to protect this with a mutex
+ PtrMap<uintptr, TB_FileID> file_id_map; // Key: AstFile.id (i32 cast to uintptr)
+
+ std::atomic<u32> nested_type_name_guid;
+ std::atomic<u32> const_nil_guid;
+};
+
+#ifndef ABI_PKG_NAME_SEPARATOR
+#define ABI_PKG_NAME_SEPARATOR "@"
+#endif
+
+struct GlobalTypeInfoData {
+ TB_Global *global;
+ Type * array_type;
+ Type * elem_type;
+ isize index;
+};
+
+gb_global Entity *cg_global_type_info_data_entity = {};
+gb_global GlobalTypeInfoData cg_global_type_info_member_types = {};
+gb_global GlobalTypeInfoData cg_global_type_info_member_names = {};
+gb_global GlobalTypeInfoData cg_global_type_info_member_offsets = {};
+gb_global GlobalTypeInfoData cg_global_type_info_member_usings = {};
+gb_global GlobalTypeInfoData cg_global_type_info_member_tags = {};
+gb_global GlobalTypeInfoData cg_global_type_info_member_enum_values = {};
+
+gb_global cgProcedure *cg_startup_runtime_proc = nullptr;
+gb_global cgProcedure *cg_cleanup_runtime_proc = nullptr;
+
+
+
+gb_internal TB_Arena *cg_arena(void);
+
+gb_internal cgProcedure *cg_procedure_create(cgModule *m, Entity *entity, bool ignore_body=false);
+gb_internal void cg_add_procedure_to_queue(cgProcedure *p);
+gb_internal void cg_setup_type_info_data(cgModule *m);
+gb_internal cgProcedure *cg_procedure_generate_anonymous(cgModule *m, Ast *expr, cgProcedure *parent);
+
+gb_internal isize cg_global_const_calculate_region_count(ExactValue const &value, Type *type);
+gb_internal i64 cg_global_const_calculate_region_count_from_basic_type(Type *type);
+gb_internal bool cg_global_const_add_region(cgModule *m, ExactValue const &value, Type *type, TB_Global *global, i64 offset);
+
+gb_internal String cg_get_entity_name(cgModule *m, Entity *e);
+
+gb_internal cgValue cg_value(TB_Global * g, Type *type);
+gb_internal cgValue cg_value(TB_External *e, Type *type);
+gb_internal cgValue cg_value(TB_Function *f, Type *type);
+gb_internal cgValue cg_value(TB_Symbol * s, Type *type);
+gb_internal cgValue cg_value(TB_Node * node, Type *type);
+
+gb_internal cgAddr cg_addr(cgValue const &value);
+
+gb_internal u64 cg_typeid_as_u64(cgModule *m, Type *type);
+gb_internal cgValue cg_type_info(cgProcedure *p, Type *type);
+gb_internal isize cg_type_info_index(CheckerInfo *info, Type *type, bool err_on_not_found=true);
+
+gb_internal cgValue cg_const_value(cgProcedure *p, Type *type, ExactValue const &value);
+gb_internal cgValue cg_const_nil(cgProcedure *p, Type *type);
+
+gb_internal cgValue cg_flatten_value(cgProcedure *p, cgValue value);
+
+gb_internal void cg_build_stmt(cgProcedure *p, Ast *stmt);
+gb_internal void cg_build_stmt_list(cgProcedure *p, Slice<Ast *> const &stmts);
+gb_internal void cg_build_when_stmt(cgProcedure *p, AstWhenStmt *ws);
+
+
+gb_internal cgValue cg_build_expr(cgProcedure *p, Ast *expr);
+gb_internal cgAddr cg_build_addr(cgProcedure *p, Ast *expr);
+gb_internal cgValue cg_build_addr_ptr(cgProcedure *p, Ast *expr);
+gb_internal cgValue cg_build_cond(cgProcedure *p, Ast *cond, TB_Node *true_block, TB_Node *false_block);
+
+gb_internal Type * cg_addr_type(cgAddr const &addr);
+gb_internal cgValue cg_addr_load(cgProcedure *p, cgAddr addr);
+gb_internal void cg_addr_store(cgProcedure *p, cgAddr addr, cgValue value);
+gb_internal cgValue cg_addr_get_ptr(cgProcedure *p, cgAddr const &addr);
+
+gb_internal cgValue cg_emit_load(cgProcedure *p, cgValue const &ptr, bool is_volatile=false);
+gb_internal void cg_emit_store(cgProcedure *p, cgValue dst, cgValue src, bool is_volatile=false);
+
+gb_internal cgAddr cg_add_local (cgProcedure *p, Type *type, Entity *e, bool zero_init);
+gb_internal cgAddr cg_add_global(cgProcedure *p, Type *type, Entity *e);
+gb_internal cgValue cg_address_from_load_or_generate_local(cgProcedure *p, cgValue value);
+gb_internal cgValue cg_copy_value_to_ptr(cgProcedure *p, cgValue value, Type *original_type, isize min_alignment);
+
+gb_internal cgValue cg_build_call_expr(cgProcedure *p, Ast *expr);
+
+gb_internal void cg_build_return_stmt(cgProcedure *p, Slice<Ast *> const &return_results);
+gb_internal void cg_build_return_stmt_internal(cgProcedure *p, Slice<cgValue> const &results);
+gb_internal void cg_build_return_stmt_internal_single(cgProcedure *p, cgValue result);
+gb_internal void cg_build_range_stmt(cgProcedure *p, Ast *node);
+
+gb_internal cgValue cg_find_value_from_entity(cgModule *m, Entity *e);
+gb_internal cgValue cg_find_procedure_value_from_entity(cgModule *m, Entity *e);
+
+gb_internal TB_DebugType *cg_debug_type(cgModule *m, Type *type);
+
+gb_internal String cg_get_entity_name(cgModule *m, Entity *e);
+
+gb_internal cgValue cg_typeid(cgProcedure *m, Type *t);
+
+gb_internal cgValue cg_emit_ptr_offset(cgProcedure *p, cgValue ptr, cgValue index);
+gb_internal cgValue cg_emit_array_ep(cgProcedure *p, cgValue s, cgValue index);
+gb_internal cgValue cg_emit_array_epi(cgProcedure *p, cgValue s, i64 index);
+gb_internal cgValue cg_emit_struct_ep(cgProcedure *p, cgValue s, i64 index);
+gb_internal cgValue cg_emit_deep_field_gep(cgProcedure *p, cgValue e, Selection const &sel);
+gb_internal cgValue cg_emit_struct_ev(cgProcedure *p, cgValue s, i64 index);
+
+gb_internal cgValue cg_emit_conv(cgProcedure *p, cgValue value, Type *t);
+gb_internal cgValue cg_emit_comp_against_nil(cgProcedure *p, TokenKind op_kind, cgValue x);
+gb_internal cgValue cg_emit_comp(cgProcedure *p, TokenKind op_kind, cgValue left, cgValue right);
+gb_internal cgValue cg_emit_arith(cgProcedure *p, TokenKind op, cgValue lhs, cgValue rhs, Type *type);
+gb_internal cgValue cg_emit_unary_arith(cgProcedure *p, TokenKind op, cgValue x, Type *type);
+gb_internal void cg_emit_increment(cgProcedure *p, cgValue addr);
+
+gb_internal cgProcedure *cg_equal_proc_for_type (cgModule *m, Type *type);
+gb_internal cgProcedure *cg_hasher_proc_for_type(cgModule *m, Type *type);
+gb_internal cgValue cg_hasher_proc_value_for_type(cgProcedure *p, Type *type);
+gb_internal cgValue cg_equal_proc_value_for_type(cgProcedure *p, Type *type);
+
+gb_internal cgValue cg_emit_call(cgProcedure * p, cgValue value, Slice<cgValue> const &args);
+gb_internal cgValue cg_emit_runtime_call(cgProcedure *p, char const *name, Slice<cgValue> const &args);
+
+gb_internal bool cg_emit_goto(cgProcedure *p, TB_Node *control_region);
+
+gb_internal TB_Node *cg_control_region(cgProcedure *p, char const *name);
+
+gb_internal isize cg_append_tuple_values(cgProcedure *p, Array<cgValue> *dst_values, cgValue src_value);
+
+gb_internal cgValue cg_handle_param_value(cgProcedure *p, Type *parameter_type, ParameterValue const &param_value, TokenPos const &pos);
+
+gb_internal cgValue cg_builtin_len(cgProcedure *p, cgValue value);
+gb_internal cgValue cg_builtin_raw_data(cgProcedure *p, cgValue const &x);
+
diff --git a/src/tilde/tb.h b/src/tilde/tb.h
new file mode 100644
index 000000000..b20b98b35
--- /dev/null
+++ b/src/tilde/tb.h
@@ -0,0 +1,1101 @@
+#ifndef TB_CORE_H
+#define TB_CORE_H
+
+#include <assert.h>
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+
+// https://semver.org/
+#define TB_VERSION_MAJOR 0
+#define TB_VERSION_MINOR 2
+#define TB_VERSION_PATCH 0
+
+#ifndef TB_API
+# ifdef __cplusplus
+# define TB_EXTERN extern "C"
+# else
+# define TB_EXTERN
+# endif
+# ifdef TB_DLL
+# ifdef TB_IMPORT_DLL
+# define TB_API TB_EXTERN __declspec(dllimport)
+# else
+# define TB_API TB_EXTERN __declspec(dllexport)
+# endif
+# else
+# define TB_API TB_EXTERN
+# endif
+#endif
+
+// These are flags
+typedef enum TB_ArithmeticBehavior {
+ TB_ARITHMATIC_NONE = 0,
+ TB_ARITHMATIC_NSW = 1,
+ TB_ARITHMATIC_NUW = 2,
+} TB_ArithmeticBehavior;
+
+typedef enum TB_DebugFormat {
+ TB_DEBUGFMT_NONE,
+
+ TB_DEBUGFMT_DWARF,
+ TB_DEBUGFMT_CODEVIEW,
+
+ TB_DEBUGFMT_COLINPILLED
+} TB_DebugFormat;
+
+typedef enum TB_Arch {
+ TB_ARCH_UNKNOWN,
+
+ TB_ARCH_X86_64,
+ TB_ARCH_AARCH64, // unsupported but planned
+ TB_ARCH_WASM32,
+} TB_Arch;
+
+typedef enum TB_System {
+ TB_SYSTEM_WINDOWS,
+ TB_SYSTEM_LINUX,
+ TB_SYSTEM_MACOS,
+ TB_SYSTEM_ANDROID, // Not supported yet
+ TB_SYSTEM_WEB,
+
+ TB_SYSTEM_MAX,
+} TB_System;
+
+typedef enum TB_WindowsSubsystem {
+ TB_WIN_SUBSYSTEM_UNKNOWN,
+
+ TB_WIN_SUBSYSTEM_WINDOWS,
+ TB_WIN_SUBSYSTEM_CONSOLE,
+ TB_WIN_SUBSYSTEM_EFI_APP,
+} TB_WindowsSubsystem;
+
+typedef enum TB_ABI {
+ // Used on 64bit Windows platforms
+ TB_ABI_WIN64,
+
+ // Used on Mac, BSD and Linux platforms
+ TB_ABI_SYSTEMV,
+} TB_ABI;
+
+typedef enum TB_OutputFlavor {
+ TB_FLAVOR_OBJECT, // .o .obj
+ TB_FLAVOR_SHARED, // .so .dll
+ TB_FLAVOR_STATIC, // .a .lib
+ TB_FLAVOR_EXECUTABLE, // .exe
+} TB_OutputFlavor;
+
+typedef enum TB_CallingConv {
+ TB_CDECL,
+ TB_STDCALL
+} TB_CallingConv;
+
+typedef enum TB_FeatureSet_X64 {
+ TB_FEATURE_X64_SSE3 = (1u << 0u),
+ TB_FEATURE_X64_SSE41 = (1u << 1u),
+ TB_FEATURE_X64_SSE42 = (1u << 2u),
+
+ TB_FEATURE_X64_POPCNT = (1u << 3u),
+ TB_FEATURE_X64_LZCNT = (1u << 4u),
+
+ TB_FEATURE_X64_CLMUL = (1u << 5u),
+ TB_FEATURE_X64_F16C = (1u << 6u),
+
+ TB_FEATURE_X64_BMI1 = (1u << 7u),
+ TB_FEATURE_X64_BMI2 = (1u << 8u),
+
+ TB_FEATURE_X64_AVX = (1u << 9u),
+ TB_FEATURE_X64_AVX2 = (1u << 10u),
+} TB_FeatureSet_X64;
+
+typedef struct TB_FeatureSet {
+ TB_FeatureSet_X64 x64;
+} TB_FeatureSet;
+
+typedef enum TB_BranchHint {
+ TB_BRANCH_HINT_NONE,
+ TB_BRANCH_HINT_LIKELY,
+ TB_BRANCH_HINT_UNLIKELY
+} TB_BranchHint;
+
+typedef enum TB_Linkage {
+ TB_LINKAGE_PUBLIC,
+ TB_LINKAGE_PRIVATE
+} TB_Linkage;
+
+typedef enum {
+ TB_COMDAT_NONE,
+
+ TB_COMDAT_MATCH_ANY,
+} TB_ComdatType;
+
+typedef enum TB_MemoryOrder {
+ TB_MEM_ORDER_RELAXED,
+ TB_MEM_ORDER_CONSUME,
+ TB_MEM_ORDER_ACQUIRE,
+ TB_MEM_ORDER_RELEASE,
+ TB_MEM_ORDER_ACQ_REL,
+ TB_MEM_ORDER_SEQ_CST,
+} TB_MemoryOrder;
+
+typedef enum TB_ISelMode {
+ // FastISel
+ TB_ISEL_FAST,
+ TB_ISEL_COMPLEX
+} TB_ISelMode;
+
+typedef enum TB_DataTypeEnum {
+ // Integers, note void is an i0 and bool is an i1
+ // i(0-2047)
+ TB_INT,
+ // Floating point numbers
+ // f{32,64}
+ TB_FLOAT,
+ // Pointers
+ // ptr(0-2047)
+ TB_PTR,
+ // Tuples, these cannot be used in memory ops, just accessed via projections
+ TB_TUPLE,
+ // represents control flow as a kind of data
+ TB_CONTROL,
+} TB_DataTypeEnum;
+
+typedef enum TB_FloatFormat {
+ // IEEE 754 floats
+ TB_FLT_32, TB_FLT_64
+} TB_FloatFormat;
+
+typedef union TB_DataType {
+ struct {
+ uint8_t type;
+ // Only integers and floats can be wide.
+ uint8_t width;
+ // for integers it's the bitwidth
+ uint16_t data;
+ };
+ uint32_t raw;
+} TB_DataType;
+
+// classify data types
+#define TB_IS_VOID_TYPE(x) ((x).type == TB_INT && (x).data == 0)
+#define TB_IS_BOOL_TYPE(x) ((x).type == TB_INT && (x).data == 1)
+#define TB_IS_INTEGER_TYPE(x) ((x).type == TB_INT)
+#define TB_IS_FLOAT_TYPE(x) ((x).type == TB_FLOAT)
+#define TB_IS_POINTER_TYPE(x) ((x).type == TB_PTR)
+
+// accessors
+#define TB_GET_INT_BITWIDTH(x) ((x).data)
+#define TB_GET_FLOAT_FORMAT(x) ((x).data)
+#define TB_GET_PTR_ADDRSPACE(x) ((x).data)
+
+typedef enum TB_NodeTypeEnum {
+ TB_NULL = 0,
+
+ // Immediates
+ TB_INTEGER_CONST,
+ TB_FLOAT32_CONST,
+ TB_FLOAT64_CONST,
+
+ // only one per function
+ TB_START, // fn()
+
+ // regions represent the begining of BBs
+ TB_REGION, // fn(preds: []region)
+
+ // projection
+ TB_PROJ,
+
+ TB_CALL, // normal call
+ TB_SCALL, // system call
+
+ // Managed ops
+ TB_SAFEPOINT,
+
+ // Memory operations
+ TB_STORE, // fn(r: control, addr: data, src: data)
+ TB_MEMCPY,
+ TB_MEMSET,
+
+ // Atomics
+ TB_ATOMIC_TEST_AND_SET,
+ TB_ATOMIC_CLEAR,
+
+ TB_ATOMIC_LOAD,
+ TB_ATOMIC_XCHG,
+ TB_ATOMIC_ADD,
+ TB_ATOMIC_SUB,
+ TB_ATOMIC_AND,
+ TB_ATOMIC_XOR,
+ TB_ATOMIC_OR,
+
+ TB_ATOMIC_CMPXCHG,
+ TB_DEBUGBREAK,
+
+ // Terminators
+ TB_BRANCH,
+ TB_RET,
+ TB_UNREACHABLE,
+ TB_TRAP,
+
+ TB_POISON,
+
+ // Load
+ TB_LOAD,
+
+ // Pointers
+ TB_LOCAL,
+
+ TB_GET_SYMBOL_ADDRESS,
+
+ TB_MEMBER_ACCESS,
+ TB_ARRAY_ACCESS,
+
+ // Conversions
+ TB_TRUNCATE,
+ TB_FLOAT_EXT,
+ TB_SIGN_EXT,
+ TB_ZERO_EXT,
+ TB_INT2PTR,
+ TB_PTR2INT,
+ TB_UINT2FLOAT,
+ TB_FLOAT2UINT,
+ TB_INT2FLOAT,
+ TB_FLOAT2INT,
+ TB_BITCAST,
+
+ // Select
+ TB_SELECT,
+
+ // Bitmagic
+ TB_BSWAP,
+ TB_CLZ,
+ TB_CTZ,
+ TB_POPCNT,
+
+ // Unary operations
+ TB_NOT,
+ TB_NEG,
+
+ // Integer arithmatic
+ TB_AND,
+ TB_OR,
+ TB_XOR,
+ TB_ADD,
+ TB_SUB,
+ TB_MUL,
+
+ TB_SHL,
+ TB_SHR,
+ TB_SAR,
+ TB_ROL,
+ TB_ROR,
+ TB_UDIV,
+ TB_SDIV,
+ TB_UMOD,
+ TB_SMOD,
+
+ // Float arithmatic
+ TB_FADD,
+ TB_FSUB,
+ TB_FMUL,
+ TB_FDIV,
+
+ // Comparisons
+ TB_CMP_EQ,
+ TB_CMP_NE,
+ TB_CMP_ULT,
+ TB_CMP_ULE,
+ TB_CMP_SLT,
+ TB_CMP_SLE,
+ TB_CMP_FLT,
+ TB_CMP_FLE,
+
+ // Special ops
+ // does full multiplication (64x64=128 and so on) returning
+ // the low and high values in separate projections
+ TB_MULPAIR,
+
+ // PHI
+ TB_PHI, // fn(r: region, x: []data)
+
+ // variadic
+ TB_VA_START,
+
+ // x86 intrinsics
+ TB_X86INTRIN_RDTSC,
+ TB_X86INTRIN_LDMXCSR,
+ TB_X86INTRIN_STMXCSR,
+ TB_X86INTRIN_SQRT,
+ TB_X86INTRIN_RSQRT,
+} TB_NodeTypeEnum;
+typedef uint8_t TB_NodeType;
+
+typedef int TB_Label;
+
+// just represents some region of bytes, usually in file parsing crap
+typedef struct {
+ size_t length;
+ const uint8_t* data;
+} TB_Slice;
+
+// represents byte counts
+typedef uint32_t TB_CharUnits;
+
+typedef unsigned int TB_FileID;
+
+// SO refers to shared objects which mean either shared libraries (.so or .dll)
+// or executables (.exe or ELF executables)
+typedef enum {
+ // exports to the rest of the shared object
+ TB_EXTERNAL_SO_LOCAL,
+
+ // exports outside of the shared object
+ TB_EXTERNAL_SO_EXPORT,
+} TB_ExternalType;
+
+typedef struct TB_Global TB_Global;
+typedef struct TB_External TB_External;
+typedef struct TB_Function TB_Function;
+
+typedef struct TB_Module TB_Module;
+typedef struct TB_Attrib TB_Attrib;
+typedef struct TB_DebugType TB_DebugType;
+typedef struct TB_ModuleSection TB_ModuleSection;
+typedef struct TB_FunctionPrototype TB_FunctionPrototype;
+
+// Refers generically to objects within a module
+//
+// TB_Function, TB_Global, and TB_External are all subtypes of TB_Symbol
+// and thus are safely allowed to cast into a symbol for operations.
+typedef struct TB_Symbol {
+ enum TB_SymbolTag {
+ TB_SYMBOL_NONE,
+
+ // symbol is dead now
+ TB_SYMBOL_TOMBSTONE,
+
+ TB_SYMBOL_EXTERNAL,
+ TB_SYMBOL_GLOBAL,
+ TB_SYMBOL_FUNCTION,
+
+ TB_SYMBOL_MAX,
+ } tag;
+
+ // refers to the prev or next symbol with the same tag
+ struct TB_Symbol* next;
+ char* name;
+
+ // It's kinda a weird circular reference but yea
+ TB_Module* module;
+
+ // helpful for sorting and getting consistent builds
+ uint64_t ordinal;
+
+ union {
+ // if we're JITing then this maps to the address of the symbol
+ void* address;
+ size_t symbol_id;
+ };
+
+ // after this point it's tag-specific storage
+} TB_Symbol;
+
+typedef int TB_Reg;
+
+typedef struct TB_Node TB_Node;
+struct TB_Node {
+ TB_NodeType type;
+ TB_DataType dt;
+ uint16_t input_count; // number of node inputs
+ uint16_t extra_count; // number of bytes for extra operand data
+
+ TB_Attrib* first_attrib;
+ TB_Node** inputs;
+
+ char extra[];
+};
+
+#define TB_KILL_NODE(n) ((n)->type = TB_NULL)
+
+// These are the extra data in specific nodes
+#define TB_NODE_GET_EXTRA(n) ((void*) n->extra)
+#define TB_NODE_GET_EXTRA_T(n, T) ((T*) (n)->extra)
+#define TB_NODE_SET_EXTRA(n, T, ...) (*((T*) (n)->extra) = (T){ __VA_ARGS__ })
+
+// this represents switch (many targets), if (one target) and goto (only default) logic.
+typedef struct { // TB_BRANCH
+ // avoid empty structs with flexible members
+ int64_t _;
+ int64_t keys[];
+} TB_NodeBranch;
+
+typedef struct { // TB_PROJ
+ int index;
+} TB_NodeProj;
+
+typedef struct { // TB_INT
+ uint64_t num_words;
+ uint64_t words[];
+} TB_NodeInt;
+
+typedef struct { // any compare operator
+ TB_DataType cmp_dt;
+} TB_NodeCompare;
+
+typedef struct { // any integer binary operator
+ TB_ArithmeticBehavior ab;
+} TB_NodeBinopInt;
+
+typedef struct { // TB_MULPAIR
+ TB_Node *lo, *hi;
+} TB_NodeMulPair;
+
+typedef struct {
+ TB_CharUnits align;
+ bool is_volatile;
+} TB_NodeMemAccess;
+
+typedef struct {
+ TB_CharUnits size, align;
+} TB_NodeLocal;
+
+typedef struct {
+ TB_FileID file;
+ int line;
+} TB_NodeLine;
+
+typedef struct {
+ float value;
+} TB_NodeFloat32;
+
+typedef struct {
+ double value;
+} TB_NodeFloat64;
+
+typedef struct {
+ int64_t stride;
+} TB_NodeArray;
+
+typedef struct {
+ int64_t offset;
+} TB_NodeMember;
+
+typedef struct {
+ TB_Symbol* sym;
+} TB_NodeSymbol;
+
+typedef struct {
+ TB_MemoryOrder order;
+ TB_MemoryOrder order2;
+} TB_NodeAtomic;
+
+typedef struct {
+ TB_FunctionPrototype* proto;
+ TB_Node* projs[];
+} TB_NodeCall;
+
+typedef struct {
+ uint32_t id;
+} TB_NodeSafepoint;
+
+typedef struct {
+ TB_Node* end;
+ const char* tag;
+
+ size_t succ_count;
+ TB_Node** succ;
+
+ size_t proj_count;
+ TB_Node** projs;
+} TB_NodeRegion;
+
+typedef struct TB_MultiOutput {
+ size_t count;
+ union {
+ // count = 1
+ TB_Node* single;
+ // count > 1
+ TB_Node** multiple;
+ };
+} TB_MultiOutput;
+#define TB_MULTI_OUTPUT(o) ((o).count > 1 ? (o).multiple : &(o).single)
+
+typedef struct {
+ int64_t key;
+ TB_Node* value;
+} TB_SwitchEntry;
+
+typedef enum {
+ TB_EXECUTABLE_UNKNOWN,
+ TB_EXECUTABLE_PE,
+ TB_EXECUTABLE_ELF,
+} TB_ExecutableType;
+
+typedef struct {
+ TB_Node* node; // type == TB_SAFEPOINT
+ void* userdata;
+
+ uint32_t ip; // relative to the function body.
+ uint32_t count; // same as node->input_count
+ int32_t values[];
+} TB_Safepoint;
+
+// *******************************
+// Public macros
+// *******************************
+#ifdef __cplusplus
+
+#define TB_TYPE_TUPLE TB_DataType{ { TB_TUPLE } }
+#define TB_TYPE_CONTROL TB_DataType{ { TB_CONTROL } }
+#define TB_TYPE_VOID TB_DataType{ { TB_INT, 0, 0 } }
+#define TB_TYPE_I8 TB_DataType{ { TB_INT, 0, 8 } }
+#define TB_TYPE_I16 TB_DataType{ { TB_INT, 0, 16 } }
+#define TB_TYPE_I32 TB_DataType{ { TB_INT, 0, 32 } }
+#define TB_TYPE_I64 TB_DataType{ { TB_INT, 0, 64 } }
+#define TB_TYPE_F32 TB_DataType{ { TB_FLOAT, 0, TB_FLT_32 } }
+#define TB_TYPE_F64 TB_DataType{ { TB_FLOAT, 0, TB_FLT_64 } }
+#define TB_TYPE_BOOL TB_DataType{ { TB_INT, 0, 1 } }
+#define TB_TYPE_PTR TB_DataType{ { TB_PTR, 0, 0 } }
+
+#define TB_TYPE_INTN(N) TB_DataType{ { TB_INT, 0, (N) } }
+#define TB_TYPE_PTRN(N) TB_DataType{ { TB_PTR, 0, (N) } }
+
+#else
+
+#define TB_TYPE_TUPLE (TB_DataType){ { TB_TUPLE } }
+#define TB_TYPE_CONTROL (TB_DataType){ { TB_CONTROL } }
+#define TB_TYPE_VOID (TB_DataType){ { TB_INT, 0, 0 } }
+#define TB_TYPE_I8 (TB_DataType){ { TB_INT, 0, 8 } }
+#define TB_TYPE_I16 (TB_DataType){ { TB_INT, 0, 16 } }
+#define TB_TYPE_I32 (TB_DataType){ { TB_INT, 0, 32 } }
+#define TB_TYPE_I64 (TB_DataType){ { TB_INT, 0, 64 } }
+#define TB_TYPE_F32 (TB_DataType){ { TB_FLOAT, 0, TB_FLT_32 } }
+#define TB_TYPE_F64 (TB_DataType){ { TB_FLOAT, 0, TB_FLT_64 } }
+#define TB_TYPE_BOOL (TB_DataType){ { TB_INT, 0, 1 } }
+#define TB_TYPE_PTR (TB_DataType){ { TB_PTR, 0, 0 } }
+#define TB_TYPE_INTN(N) (TB_DataType){ { TB_INT, 0, (N) } }
+#define TB_TYPE_PTRN(N) (TB_DataType){ { TB_PTR, 0, (N) } }
+
+#endif
+
+typedef void (*TB_PrintCallback)(void* user_data, const char* fmt, ...);
+
+// defined in common/arena.h
+typedef struct TB_Arena TB_Arena;
+
+// 0 for default
+TB_API void tb_arena_create(TB_Arena* restrict arena, size_t chunk_size);
+TB_API void tb_arena_destroy(TB_Arena* restrict arena);
+TB_API bool tb_arena_is_empty(TB_Arena* arena);
+
+////////////////////////////////
+// Module management
+////////////////////////////////
+// Creates a module with the correct target and settings
+TB_API TB_Module* tb_module_create(TB_Arch arch, TB_System sys, const TB_FeatureSet* features, bool is_jit);
+
+// Creates a module but defaults on the architecture and system based on the host machine
+TB_API TB_Module* tb_module_create_for_host(const TB_FeatureSet* features, bool is_jit);
+
+TB_API size_t tb_module_get_function_count(TB_Module* m);
+
+// Frees all resources for the TB_Module and it's functions, globals and
+// compiled code.
+TB_API void tb_module_destroy(TB_Module* m);
+
+// When targetting windows & thread local storage, you'll need to bind a tls index
+// which is usually just a global that the runtime support has initialized, if you
+// dont and the tls_index is used, it'll crash
+TB_API void tb_module_set_tls_index(TB_Module* m, ptrdiff_t len, const char* name);
+
+// You don't need to manually call this unless you want to resolve locations before
+// exporting.
+TB_API void tb_module_layout_sections(TB_Module* m);
+
+////////////////////////////////
+// Compiled code introspection
+////////////////////////////////
+enum { TB_ASSEMBLY_CHUNK_CAP = 4*1024 - sizeof(size_t[2]) };
+
+typedef struct TB_Assembly TB_Assembly;
+struct TB_Assembly {
+ TB_Assembly* next;
+
+ // nice chunk of text here
+ size_t length;
+ char data[];
+};
+
+// this is where the machine code and other relevant pieces go.
+typedef struct TB_FunctionOutput TB_FunctionOutput;
+
+TB_API void tb_output_print_asm(TB_FunctionOutput* out, FILE* fp);
+
+TB_API uint8_t* tb_output_get_code(TB_FunctionOutput* out, size_t* out_length);
+
+// returns NULL if no assembly was generated
+TB_API TB_Assembly* tb_output_get_asm(TB_FunctionOutput* out);
+
+// this is relative to the start of the function (the start of the prologue)
+TB_API TB_Safepoint* tb_safepoint_get(TB_Function* f, uint32_t relative_ip);
+
+////////////////////////////////
+// Exporter
+////////////////////////////////
+// Export buffers are generated in chunks because it's easier, usually the
+// chunks are "massive" (representing some connected piece of the buffer)
+// but they don't have to be.
+typedef struct TB_ExportChunk TB_ExportChunk;
+struct TB_ExportChunk {
+ TB_ExportChunk* next;
+ size_t pos, size;
+ uint8_t data[];
+};
+
+typedef struct {
+ size_t total;
+ TB_ExportChunk *head, *tail;
+} TB_ExportBuffer;
+
+TB_API TB_ExportBuffer tb_module_object_export(TB_Module* m, TB_DebugFormat debug_fmt);
+TB_API bool tb_export_buffer_to_file(TB_ExportBuffer buffer, const char* path);
+TB_API void tb_export_buffer_free(TB_ExportBuffer buffer);
+
+////////////////////////////////
+// Linker exporter
+////////////////////////////////
+// This is used to export shared objects or executables
+typedef struct TB_Linker TB_Linker;
+typedef struct TB_LinkerSection TB_LinkerSection;
+typedef struct TB_LinkerSectionPiece TB_LinkerSectionPiece;
+
+typedef struct {
+ enum {
+ TB_LINKER_MSG_NULL,
+
+ // pragma comment(lib, "blah")
+ TB_LINKER_MSG_IMPORT,
+ } tag;
+ union {
+ // pragma lib request
+ TB_Slice import_path;
+ };
+} TB_LinkerMsg;
+
+TB_API TB_ExecutableType tb_system_executable_format(TB_System s);
+
+TB_API TB_Linker* tb_linker_create(TB_ExecutableType type, TB_Arch arch);
+TB_API TB_ExportBuffer tb_linker_export(TB_Linker* l);
+TB_API void tb_linker_destroy(TB_Linker* l);
+
+TB_API bool tb_linker_get_msg(TB_Linker* l, TB_LinkerMsg* msg);
+
+// windows only
+TB_API void tb_linker_set_subsystem(TB_Linker* l, TB_WindowsSubsystem subsystem);
+
+TB_API void tb_linker_set_entrypoint(TB_Linker* l, const char* name);
+
+// Links compiled module into output
+TB_API void tb_linker_append_module(TB_Linker* l, TB_Module* m);
+
+// Adds object file to output
+TB_API void tb_linker_append_object(TB_Linker* l, TB_Slice obj_name, TB_Slice content);
+
+// Adds static library to output
+// this can include imports (wrappers for DLL symbols) along with
+// normal sections.
+TB_API void tb_linker_append_library(TB_Linker* l, TB_Slice ar_name, TB_Slice content);
+
+////////////////////////////////
+// JIT compilation
+////////////////////////////////
+typedef struct TB_JITContext TB_JITContext;
+
+// passing 0 to jit_heap_capacity will default to 4MiB
+TB_API TB_JITContext* tb_module_begin_jit(TB_Module* m, size_t jit_heap_capacity);
+TB_API void* tb_module_apply_function(TB_JITContext* jit, TB_Function* f);
+TB_API void* tb_module_apply_global(TB_JITContext* jit, TB_Global* g);
+// fixes page permissions, applies missing relocations
+TB_API void tb_module_ready_jit(TB_JITContext* jit);
+TB_API void tb_module_end_jit(TB_JITContext* jit);
+
+#define TB_FOR_FUNCTIONS(it, module) for (TB_Function* it = tb_first_function(module); it != NULL; it = tb_next_function(it))
+TB_API TB_Function* tb_first_function(TB_Module* m);
+TB_API TB_Function* tb_next_function(TB_Function* f);
+
+#define TB_FOR_EXTERNALS(it, module) for (TB_External* it = tb_first_external(module); it != NULL; it = tb_next_external(it))
+TB_API TB_External* tb_first_external(TB_Module* m);
+TB_API TB_External* tb_next_external(TB_External* e);
+
+// this is used JIT scenarios to tell the compiler what externals map to
+TB_API TB_ExternalType tb_extern_get_type(TB_External* e);
+TB_Global* tb_extern_transmute(TB_External* e, TB_DebugType* dbg_type, TB_Linkage linkage);
+
+TB_API TB_External* tb_extern_create(TB_Module* m, ptrdiff_t len, const char* name, TB_ExternalType type);
+TB_API TB_FileID tb_file_create(TB_Module* m, const char* path);
+
+// Called once you're done with TB operations on a thread (or i guess when it's
+// about to be killed :p), not calling it can only result in leaks on that thread
+// and calling it too early will result in TB potentially reallocating it but there's
+// should be no crashes from this, just potential slowdown or higher than expected memory
+// usage.
+TB_API void tb_free_thread_resources(void);
+
+////////////////////////////////
+// Function Prototypes
+////////////////////////////////
+typedef struct TB_PrototypeParam {
+ TB_DataType dt;
+ TB_DebugType* debug_type;
+
+ // does not apply for returns
+ const char* name;
+} TB_PrototypeParam;
+
+struct TB_FunctionPrototype {
+ // header
+ TB_CallingConv call_conv;
+ uint16_t return_count, param_count;
+ bool has_varargs;
+
+ // params are directly followed by returns
+ TB_PrototypeParam params[];
+};
+#define TB_PROTOTYPE_RETURNS(p) ((p)->params + (p)->param_count)
+
+// creates a function prototype used to define a function's parameters and returns.
+//
+// function prototypes do not get freed individually and last for the entire run
+// of the backend, they can also be reused for multiple functions which have
+// matching signatures.
+TB_API TB_FunctionPrototype* tb_prototype_create(TB_Module* m, TB_CallingConv cc, size_t param_count, const TB_PrototypeParam* params, size_t return_count, const TB_PrototypeParam* returns, bool has_varargs);
+
+// same as tb_function_set_prototype except it will handle lowering from types like the TB_DebugType
+// into the correct ABI and exposing sane looking nodes to the parameters.
+//
+// returns the parameters
+TB_API TB_Node** tb_function_set_prototype_from_dbg(TB_Function* f, TB_DebugType* dbg, TB_Arena* arena, size_t* out_param_count);
+TB_API TB_FunctionPrototype* tb_prototype_from_dbg(TB_Module* m, TB_DebugType* dbg);
+
+// used for ABI parameter passing
+typedef enum {
+ // needs a direct value
+ TB_PASSING_DIRECT,
+
+ // needs an address to the value
+ TB_PASSING_INDIRECT,
+
+ // doesn't use this parameter
+ TB_PASSING_IGNORE,
+} TB_PassingRule;
+
+TB_API TB_PassingRule tb_get_passing_rule_from_dbg(TB_Module* mod, TB_DebugType* param_type, bool is_return);
+
+////////////////////////////////
+// Globals
+////////////////////////////////
+TB_API TB_Global* tb_global_create(TB_Module* m, ptrdiff_t len, const char* name, TB_DebugType* dbg_type, TB_Linkage linkage);
+
+// allocate space for the global
+TB_API void tb_global_set_storage(TB_Module* m, TB_ModuleSection* section, TB_Global* global, size_t size, size_t align, size_t max_objects);
+
+// returns a buffer which the user can fill to then have represented in the initializer
+TB_API void* tb_global_add_region(TB_Module* m, TB_Global* global, size_t offset, size_t size);
+
+// places a relocation for a global at offset, the size of the relocation
+// depends on the pointer size
+TB_API void tb_global_add_symbol_reloc(TB_Module* m, TB_Global* global, size_t offset, const TB_Symbol* symbol);
+
+TB_API TB_ModuleSection* tb_module_get_text(TB_Module* m);
+TB_API TB_ModuleSection* tb_module_get_rdata(TB_Module* m);
+TB_API TB_ModuleSection* tb_module_get_data(TB_Module* m);
+TB_API TB_ModuleSection* tb_module_get_tls(TB_Module* m);
+
+////////////////////////////////
+// Function Attributes
+////////////////////////////////
+TB_API void tb_node_append_attrib(TB_Node* n, TB_Attrib* a);
+
+// These are parts of a function that describe metadata for instructions
+TB_API TB_Attrib* tb_function_attrib_variable(TB_Function* f, ptrdiff_t len, const char* name, TB_DebugType* type);
+TB_API TB_Attrib* tb_function_attrib_scope(TB_Function* f, TB_Attrib* parent_scope);
+
+////////////////////////////////
+// Debug info Generation
+////////////////////////////////
+TB_API TB_DebugType* tb_debug_get_void(TB_Module* m);
+TB_API TB_DebugType* tb_debug_get_bool(TB_Module* m);
+TB_API TB_DebugType* tb_debug_get_integer(TB_Module* m, bool is_signed, int bits);
+TB_API TB_DebugType* tb_debug_get_float(TB_Module* m, TB_FloatFormat fmt);
+TB_API TB_DebugType* tb_debug_create_ptr(TB_Module* m, TB_DebugType* base);
+TB_API TB_DebugType* tb_debug_create_array(TB_Module* m, TB_DebugType* base, size_t count);
+TB_API TB_DebugType* tb_debug_create_alias(TB_Module* m, TB_DebugType* base, ptrdiff_t len, const char* tag);
+TB_API TB_DebugType* tb_debug_create_struct(TB_Module* m, ptrdiff_t len, const char* tag);
+TB_API TB_DebugType* tb_debug_create_union(TB_Module* m, ptrdiff_t len, const char* tag);
+TB_API TB_DebugType* tb_debug_create_field(TB_Module* m, TB_DebugType* type, ptrdiff_t len, const char* name, TB_CharUnits offset);
+
+// returns the array you need to fill with fields
+TB_API TB_DebugType** tb_debug_record_begin(TB_DebugType* type, size_t count);
+TB_API void tb_debug_record_end(TB_DebugType* type, TB_CharUnits size, TB_CharUnits align);
+
+TB_API TB_DebugType* tb_debug_create_func(TB_Module* m, TB_CallingConv cc, size_t param_count, size_t return_count, bool has_varargs);
+
+TB_API TB_DebugType* tb_debug_field_type(TB_DebugType* type);
+
+TB_API size_t tb_debug_func_return_count(TB_DebugType* type);
+TB_API size_t tb_debug_func_param_count(TB_DebugType* type);
+
+// you'll need to fill these if you make a function
+TB_API TB_DebugType** tb_debug_func_params(TB_DebugType* type);
+TB_API TB_DebugType** tb_debug_func_returns(TB_DebugType* type);
+
+////////////////////////////////
+// IR access
+////////////////////////////////
+// it is an index to the input
+#define TB_FOR_INPUT_IN_NODE(it, parent) for (TB_Node **it = parent->inputs, **__end = it + (parent)->input_count; it != __end; it++)
+
+////////////////////////////////
+// Symbols
+////////////////////////////////
+TB_API bool tb_symbol_is_comdat(const TB_Symbol* s);
+
+// returns NULL if the tag doesn't match
+TB_API TB_Function* tb_symbol_as_function(TB_Symbol* s);
+TB_API TB_External* tb_symbol_as_external(TB_Symbol* s);
+TB_API TB_Global* tb_symbol_as_global(TB_Symbol* s);
+
+////////////////////////////////
+// Function IR Generation
+////////////////////////////////
+TB_API void tb_get_data_type_size(TB_Module* mod, TB_DataType dt, size_t* size, size_t* align);
+
+// the user_data is expected to be a valid FILE*
+TB_API void tb_default_print_callback(void* user_data, const char* fmt, ...);
+
+TB_API void tb_inst_set_location(TB_Function* f, TB_FileID file, int line);
+
+// if section is NULL, default to .text
+TB_API TB_Function* tb_function_create(TB_Module* m, ptrdiff_t len, const char* name, TB_Linkage linkage, TB_ComdatType comdat);
+
+TB_API void* tb_function_get_jit_pos(TB_Function* f);
+
+// if len is -1, it's null terminated
+TB_API void tb_symbol_set_name(TB_Symbol* s, ptrdiff_t len, const char* name);
+
+TB_API void tb_symbol_bind_ptr(TB_Symbol* s, void* ptr);
+TB_API const char* tb_symbol_get_name(TB_Symbol* s);
+
+// if arena is NULL, defaults to module arena which is freed on tb_free_thread_resources
+TB_API void tb_function_set_prototype(TB_Function* f, TB_FunctionPrototype* p, TB_Arena* arena);
+TB_API TB_FunctionPrototype* tb_function_get_prototype(TB_Function* f);
+
+TB_API void tb_function_print(TB_Function* f, TB_PrintCallback callback, void* user_data);
+
+TB_API void tb_inst_set_control(TB_Function* f, TB_Node* control);
+TB_API TB_Node* tb_inst_get_control(TB_Function* f);
+
+TB_API TB_Node* tb_inst_region(TB_Function* f);
+
+// if len is -1, it's null terminated
+TB_API void tb_inst_set_region_name(TB_Node* n, ptrdiff_t len, const char* name);
+
+TB_API void tb_inst_unreachable(TB_Function* f);
+TB_API void tb_inst_debugbreak(TB_Function* f);
+TB_API void tb_inst_trap(TB_Function* f);
+TB_API TB_Node* tb_inst_poison(TB_Function* f);
+
+TB_API TB_Node* tb_inst_param(TB_Function* f, int param_id);
+
+TB_API TB_Node* tb_inst_fpxt(TB_Function* f, TB_Node* src, TB_DataType dt);
+TB_API TB_Node* tb_inst_sxt(TB_Function* f, TB_Node* src, TB_DataType dt);
+TB_API TB_Node* tb_inst_zxt(TB_Function* f, TB_Node* src, TB_DataType dt);
+TB_API TB_Node* tb_inst_trunc(TB_Function* f, TB_Node* src, TB_DataType dt);
+TB_API TB_Node* tb_inst_int2ptr(TB_Function* f, TB_Node* src);
+TB_API TB_Node* tb_inst_ptr2int(TB_Function* f, TB_Node* src, TB_DataType dt);
+TB_API TB_Node* tb_inst_int2float(TB_Function* f, TB_Node* src, TB_DataType dt, bool is_signed);
+TB_API TB_Node* tb_inst_float2int(TB_Function* f, TB_Node* src, TB_DataType dt, bool is_signed);
+TB_API TB_Node* tb_inst_bitcast(TB_Function* f, TB_Node* src, TB_DataType dt);
+
+TB_API TB_Node* tb_inst_local(TB_Function* f, TB_CharUnits size, TB_CharUnits align);
+TB_API TB_Node* tb_inst_load(TB_Function* f, TB_DataType dt, TB_Node* addr, TB_CharUnits align, bool is_volatile);
+TB_API void tb_inst_store(TB_Function* f, TB_DataType dt, TB_Node* addr, TB_Node* val, TB_CharUnits align, bool is_volatile);
+
+TB_API TB_Node* tb_inst_bool(TB_Function* f, bool imm);
+TB_API TB_Node* tb_inst_sint(TB_Function* f, TB_DataType dt, int64_t imm);
+TB_API TB_Node* tb_inst_uint(TB_Function* f, TB_DataType dt, uint64_t imm);
+TB_API TB_Node* tb_inst_float32(TB_Function* f, float imm);
+TB_API TB_Node* tb_inst_float64(TB_Function* f, double imm);
+TB_API TB_Node* tb_inst_cstring(TB_Function* f, const char* str);
+TB_API TB_Node* tb_inst_string(TB_Function* f, size_t len, const char* str);
+
+// write 'val' over 'count' bytes on 'dst'
+TB_API void tb_inst_memset(TB_Function* f, TB_Node* dst, TB_Node* val, TB_Node* count, TB_CharUnits align, bool is_volatile);
+
+// zero 'count' bytes on 'dst'
+TB_API void tb_inst_memzero(TB_Function* f, TB_Node* dst, TB_Node* count, TB_CharUnits align, bool is_volatile);
+
+// performs a copy of 'count' elements from one memory location to another
+// both locations cannot overlap.
+TB_API void tb_inst_memcpy(TB_Function* f, TB_Node* dst, TB_Node* src, TB_Node* count, TB_CharUnits align, bool is_volatile);
+
+// result = base + (index * stride)
+TB_API TB_Node* tb_inst_array_access(TB_Function* f, TB_Node* base, TB_Node* index, int64_t stride);
+
+// result = base + offset
+// where base is a pointer
+TB_API TB_Node* tb_inst_member_access(TB_Function* f, TB_Node* base, int64_t offset);
+
+TB_API TB_Node* tb_inst_get_symbol_address(TB_Function* f, TB_Symbol* target);
+
+// Performs a conditional select between two values, if the operation is
+// performed wide then the cond is expected to be the same type as a and b where
+// the condition is resolved as true if the MSB (per component) is 1.
+//
+// result = cond ? a : b
+// a, b must match in type
+TB_API TB_Node* tb_inst_select(TB_Function* f, TB_Node* cond, TB_Node* a, TB_Node* b);
+
+// Integer arithmatic
+TB_API TB_Node* tb_inst_add(TB_Function* f, TB_Node* a, TB_Node* b, TB_ArithmeticBehavior arith_behavior);
+TB_API TB_Node* tb_inst_sub(TB_Function* f, TB_Node* a, TB_Node* b, TB_ArithmeticBehavior arith_behavior);
+TB_API TB_Node* tb_inst_mul(TB_Function* f, TB_Node* a, TB_Node* b, TB_ArithmeticBehavior arith_behavior);
+TB_API TB_Node* tb_inst_div(TB_Function* f, TB_Node* a, TB_Node* b, bool signedness);
+TB_API TB_Node* tb_inst_mod(TB_Function* f, TB_Node* a, TB_Node* b, bool signedness);
+
+// Bitmagic operations
+TB_API TB_Node* tb_inst_bswap(TB_Function* f, TB_Node* n);
+TB_API TB_Node* tb_inst_clz(TB_Function* f, TB_Node* n);
+TB_API TB_Node* tb_inst_ctz(TB_Function* f, TB_Node* n);
+TB_API TB_Node* tb_inst_popcount(TB_Function* f, TB_Node* n);
+
+// Bitwise operations
+TB_API TB_Node* tb_inst_not(TB_Function* f, TB_Node* n);
+TB_API TB_Node* tb_inst_neg(TB_Function* f, TB_Node* n);
+TB_API TB_Node* tb_inst_and(TB_Function* f, TB_Node* a, TB_Node* b);
+TB_API TB_Node* tb_inst_or(TB_Function* f, TB_Node* a, TB_Node* b);
+TB_API TB_Node* tb_inst_xor(TB_Function* f, TB_Node* a, TB_Node* b);
+TB_API TB_Node* tb_inst_sar(TB_Function* f, TB_Node* a, TB_Node* b);
+TB_API TB_Node* tb_inst_shl(TB_Function* f, TB_Node* a, TB_Node* b, TB_ArithmeticBehavior arith_behavior);
+TB_API TB_Node* tb_inst_shr(TB_Function* f, TB_Node* a, TB_Node* b);
+TB_API TB_Node* tb_inst_rol(TB_Function* f, TB_Node* a, TB_Node* b);
+TB_API TB_Node* tb_inst_ror(TB_Function* f, TB_Node* a, TB_Node* b);
+
+// Atomics
+// By default you can use TB_MEM_ORDER_SEQ_CST for the memory order to get
+// correct but possibly slower results on certain platforms (those with relaxed
+// memory models).
+
+// Must be aligned to the natural alignment of dt
+TB_API TB_Node* tb_inst_atomic_load(TB_Function* f, TB_Node* addr, TB_DataType dt, TB_MemoryOrder order);
+
+// All atomic operations here return the old value and the operations are
+// performed in the same data type as 'src' with alignment of 'addr' being
+// the natural alignment of 'src'
+TB_API TB_Node* tb_inst_atomic_xchg(TB_Function* f, TB_Node* addr, TB_Node* src, TB_MemoryOrder order);
+TB_API TB_Node* tb_inst_atomic_add(TB_Function* f, TB_Node* addr, TB_Node* src, TB_MemoryOrder order);
+TB_API TB_Node* tb_inst_atomic_sub(TB_Function* f, TB_Node* addr, TB_Node* src, TB_MemoryOrder order);
+TB_API TB_Node* tb_inst_atomic_and(TB_Function* f, TB_Node* addr, TB_Node* src, TB_MemoryOrder order);
+TB_API TB_Node* tb_inst_atomic_xor(TB_Function* f, TB_Node* addr, TB_Node* src, TB_MemoryOrder order);
+TB_API TB_Node* tb_inst_atomic_or(TB_Function* f, TB_Node* addr, TB_Node* src, TB_MemoryOrder order);
+
+// returns old_value from *addr
+TB_API TB_Node* tb_inst_atomic_cmpxchg(TB_Function* f, TB_Node* addr, TB_Node* expected, TB_Node* desired, TB_MemoryOrder succ, TB_MemoryOrder fail);
+
+// Float math
+TB_API TB_Node* tb_inst_fadd(TB_Function* f, TB_Node* a, TB_Node* b);
+TB_API TB_Node* tb_inst_fsub(TB_Function* f, TB_Node* a, TB_Node* b);
+TB_API TB_Node* tb_inst_fmul(TB_Function* f, TB_Node* a, TB_Node* b);
+TB_API TB_Node* tb_inst_fdiv(TB_Function* f, TB_Node* a, TB_Node* b);
+
+// Comparisons
+TB_API TB_Node* tb_inst_cmp_eq(TB_Function* f, TB_Node* a, TB_Node* b);
+TB_API TB_Node* tb_inst_cmp_ne(TB_Function* f, TB_Node* a, TB_Node* b);
+
+TB_API TB_Node* tb_inst_cmp_ilt(TB_Function* f, TB_Node* a, TB_Node* b, bool signedness);
+TB_API TB_Node* tb_inst_cmp_ile(TB_Function* f, TB_Node* a, TB_Node* b, bool signedness);
+TB_API TB_Node* tb_inst_cmp_igt(TB_Function* f, TB_Node* a, TB_Node* b, bool signedness);
+TB_API TB_Node* tb_inst_cmp_ige(TB_Function* f, TB_Node* a, TB_Node* b, bool signedness);
+
+TB_API TB_Node* tb_inst_cmp_flt(TB_Function* f, TB_Node* a, TB_Node* b);
+TB_API TB_Node* tb_inst_cmp_fle(TB_Function* f, TB_Node* a, TB_Node* b);
+TB_API TB_Node* tb_inst_cmp_fgt(TB_Function* f, TB_Node* a, TB_Node* b);
+TB_API TB_Node* tb_inst_cmp_fge(TB_Function* f, TB_Node* a, TB_Node* b);
+
+// General intrinsics
+TB_API TB_Node* tb_inst_va_start(TB_Function* f, TB_Node* a);
+
+// x86 Intrinsics
+TB_API TB_Node* tb_inst_x86_rdtsc(TB_Function* f);
+TB_API TB_Node* tb_inst_x86_ldmxcsr(TB_Function* f, TB_Node* a);
+TB_API TB_Node* tb_inst_x86_stmxcsr(TB_Function* f);
+TB_API TB_Node* tb_inst_x86_sqrt(TB_Function* f, TB_Node* a);
+TB_API TB_Node* tb_inst_x86_rsqrt(TB_Function* f, TB_Node* a);
+
+// Control flow
+TB_API TB_Node* tb_inst_syscall(TB_Function* f, TB_DataType dt, TB_Node* syscall_num, size_t param_count, TB_Node** params);
+TB_API TB_MultiOutput tb_inst_call(TB_Function* f, TB_FunctionPrototype* proto, TB_Node* target, size_t param_count, TB_Node** params);
+
+// Managed
+TB_API TB_Node* tb_inst_safepoint(TB_Function* f, size_t param_count, TB_Node** params);
+
+TB_API TB_Node* tb_inst_incomplete_phi(TB_Function* f, TB_DataType dt, TB_Node* region, size_t preds);
+TB_API bool tb_inst_add_phi_operand(TB_Function* f, TB_Node* phi, TB_Node* region, TB_Node* val);
+
+TB_API TB_Node* tb_inst_phi2(TB_Function* f, TB_Node* region, TB_Node* a, TB_Node* b);
+TB_API void tb_inst_goto(TB_Function* f, TB_Node* target);
+TB_API void tb_inst_if(TB_Function* f, TB_Node* cond, TB_Node* true_case, TB_Node* false_case);
+TB_API void tb_inst_branch(TB_Function* f, TB_DataType dt, TB_Node* key, TB_Node* default_case, size_t entry_count, const TB_SwitchEntry* keys);
+
+TB_API void tb_inst_ret(TB_Function* f, size_t count, TB_Node** values);
+
+////////////////////////////////
+// Passes
+////////////////////////////////
+// Function analysis, optimizations, and codegen are all part of this
+typedef struct TB_Passes TB_Passes;
+
+// the arena is used to allocate the nodes while passes are being done.
+TB_API TB_Passes* tb_pass_enter(TB_Function* f, TB_Arena* arena);
+TB_API void tb_pass_exit(TB_Passes* opt);
+
+// transformation passes:
+// peephole: runs most simple reductions on the code,
+// should be run after any bigger passes (it's incremental
+// so it's not that bad)
+//
+// mem2reg: lowers TB_LOCALs into SSA values, this makes more
+// data flow analysis possible on the code and allows to codegen
+// to place variables into registers.
+//
+// loop: NOT READY
+//
+TB_API bool tb_pass_peephole(TB_Passes* opt);
+TB_API bool tb_pass_mem2reg(TB_Passes* opt);
+TB_API bool tb_pass_loop(TB_Passes* opt);
+
+// analysis
+// print: prints IR in a flattened text form.
+TB_API bool tb_pass_print(TB_Passes* opt);
+
+// codegen
+TB_API TB_FunctionOutput* tb_pass_codegen(TB_Passes* opt, bool emit_asm);
+
+TB_API void tb_pass_kill_node(TB_Passes* opt, TB_Node* n);
+TB_API bool tb_pass_mark(TB_Passes* opt, TB_Node* n);
+TB_API void tb_pass_mark_users(TB_Passes* opt, TB_Node* n);
+
+////////////////////////////////
+// IR access
+////////////////////////////////
+TB_API const char* tb_node_get_name(TB_Node* n);
+
+TB_API TB_Node* tb_get_parent_region(TB_Node* n);
+TB_API bool tb_node_is_constant_non_zero(TB_Node* n);
+TB_API bool tb_node_is_constant_zero(TB_Node* n);
+
+#endif /* TB_CORE_H */
diff --git a/src/tilde/tb.lib b/src/tilde/tb.lib
new file mode 100644
index 000000000..1dc4f3557
--- /dev/null
+++ b/src/tilde/tb.lib
Binary files differ
diff --git a/src/tilde/tb_arena.h b/src/tilde/tb_arena.h
new file mode 100644
index 000000000..d50e777da
--- /dev/null
+++ b/src/tilde/tb_arena.h
@@ -0,0 +1,76 @@
+#pragma once
+#include <stddef.h>
+#include <stdbool.h>
+
+#ifndef TB_API
+# ifdef __cplusplus
+# define TB_EXTERN extern "C"
+# else
+# define TB_EXTERN
+# endif
+# ifdef TB_DLL
+# ifdef TB_IMPORT_DLL
+# define TB_API TB_EXTERN __declspec(dllimport)
+# else
+# define TB_API TB_EXTERN __declspec(dllexport)
+# endif
+# else
+# define TB_API TB_EXTERN
+# endif
+#endif
+
+enum {
+ TB_ARENA_SMALL_CHUNK_SIZE = 4 * 1024,
+ TB_ARENA_MEDIUM_CHUNK_SIZE = 512 * 1024,
+ TB_ARENA_LARGE_CHUNK_SIZE = 2 * 1024 * 1024,
+
+ TB_ARENA_ALIGNMENT = 16,
+};
+
+typedef struct TB_ArenaChunk TB_ArenaChunk;
+struct TB_ArenaChunk {
+ TB_ArenaChunk* next;
+ size_t pad;
+ char data[];
+};
+
+typedef struct TB_Arena {
+ size_t chunk_size;
+ TB_ArenaChunk* base;
+ TB_ArenaChunk* top;
+
+ // top of the allocation space
+ char* watermark;
+ char* high_point; // &top->data[chunk_size]
+} TB_Arena;
+
+typedef struct TB_ArenaSavepoint {
+ TB_ArenaChunk* top;
+ char* watermark;
+} TB_ArenaSavepoint;
+
+#define TB_ARENA_FOR(it, arena) for (TB_ArenaChunk* it = (arena)->base; it != NULL; it = it->next)
+
+#define TB_ARENA_ALLOC(arena, T) tb_arena_alloc(arena, sizeof(T))
+#define TB_ARENA_ARR_ALLOC(arena, count, T) tb_arena_alloc(arena, (count) * sizeof(T))
+
+TB_API void tb_arena_create(TB_Arena* restrict arena, size_t chunk_size);
+TB_API void tb_arena_destroy(TB_Arena* restrict arena);
+
+TB_API void* tb_arena_unaligned_alloc(TB_Arena* restrict arena, size_t size);
+TB_API void* tb_arena_alloc(TB_Arena* restrict arena, size_t size);
+
+// asserts if ptr+size != watermark
+TB_API void tb_arena_pop(TB_Arena* restrict arena, void* ptr, size_t size);
+
+// in case you wanna mix unaligned and aligned arenas
+TB_API void tb_arena_realign(TB_Arena* restrict arena);
+
+TB_API bool tb_arena_is_empty(TB_Arena* arena);
+
+// savepoints
+TB_API TB_ArenaSavepoint tb_arena_save(TB_Arena* arena);
+TB_API void tb_arena_restore(TB_Arena* arena, TB_ArenaSavepoint sp);
+
+// resets to only having one chunk
+TB_API void tb_arena_clear(TB_Arena* arena);
diff --git a/src/tilde/tb_coff.h b/src/tilde/tb_coff.h
new file mode 100644
index 000000000..ae8f63863
--- /dev/null
+++ b/src/tilde/tb_coff.h
@@ -0,0 +1,330 @@
+// PE/COFF is the executable/object format used by Microsoft.
+#ifndef TB_COFF_H
+#define TB_COFF_H
+
+#include "tb_formats.h"
+
+#define IMAGE_SCN_LNK_NRELOC_OVFL 0x01000000
+
+#define IMAGE_SYM_CLASS_EXTERNAL 0x0002
+#define IMAGE_SYM_CLASS_STATIC 0x0003
+#define IMAGE_SYM_CLASS_LABEL 0x0006
+#define IMAGE_SYM_CLASS_FILE 0x0067
+#define IMAGE_SYM_CLASS_SECTION 0x0068
+#define IMAGE_SYM_CLASS_WEAK_EXTERNAL 0x0069
+
+#define IMAGE_FILE_LINE_NUMS_STRIPPED 0x0004
+
+#define IMAGE_REL_AMD64_ADDR64 0x0001
+#define IMAGE_REL_AMD64_ADDR32 0x0002
+#define IMAGE_REL_AMD64_ADDR32NB 0x0003
+#define IMAGE_REL_AMD64_REL32 0x0004
+#define IMAGE_REL_AMD64_REL32_1 0x0005
+#define IMAGE_REL_AMD64_REL32_2 0x0006
+#define IMAGE_REL_AMD64_REL32_3 0x0007
+#define IMAGE_REL_AMD64_REL32_4 0x0008
+#define IMAGE_REL_AMD64_REL32_5 0x0009
+#define IMAGE_REL_AMD64_SECTION 0x000A
+#define IMAGE_REL_AMD64_SECREL 0x000B
+
+#define IMAGE_SCN_LNK_REMOVE 0x00000800
+#define IMAGE_SCN_LNK_COMDAT 0x00001000
+#define IMAGE_SCN_MEM_DISCARDABLE 0x02000000
+#define IMAGE_SCN_MEM_EXECUTE 0x20000000
+#define IMAGE_SCN_MEM_READ 0x40000000
+#define IMAGE_SCN_MEM_WRITE 0x80000000
+
+#define IMAGE_SCN_CNT_CODE 0x00000020 /* Section contains code. */
+#define IMAGE_SCN_CNT_INITIALIZED_DATA 0x00000040 /* Section contains initialized data. */
+#define IMAGE_SCN_CNT_UNINITIALIZED_DATA 0x00000080 /* Section contains uninitialized data. */
+
+#define IMAGE_DIRECTORY_ENTRY_EXPORT 0 // Export Directory
+#define IMAGE_DIRECTORY_ENTRY_IMPORT 1 // Import Directory
+#define IMAGE_DIRECTORY_ENTRY_RESOURCE 2 // Resource Directory
+#define IMAGE_DIRECTORY_ENTRY_EXCEPTION 3 // Exception Directory
+#define IMAGE_DIRECTORY_ENTRY_SECURITY 4 // Security Directory
+#define IMAGE_DIRECTORY_ENTRY_BASERELOC 5 // Base Relocation Table
+#define IMAGE_DIRECTORY_ENTRY_DEBUG 6 // Debug Directory
+#define IMAGE_DIRECTORY_ENTRY_ARCHITECTURE 7 // Architecture Specific Data
+#define IMAGE_DIRECTORY_ENTRY_GLOBALPTR 8 // RVA of GP
+#define IMAGE_DIRECTORY_ENTRY_TLS 9 // TLS Directory
+#define IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG 10 // Load Configuration Directory
+#define IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT 11 // Bound Import Directory in headers
+#define IMAGE_DIRECTORY_ENTRY_IAT 12 // Import Address Table
+#define IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT 13 // Delay Load Import Descriptors
+#define IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR 14 // COM Runtime descriptor
+
+#define IMAGE_SUBSYSTEM_WINDOWS_GUI 2
+#define IMAGE_SUBSYSTEM_WINDOWS_CUI 3
+#define IMAGE_SUBSYSTEM_EFI_APPLICATION 10
+
+typedef enum {
+ TB_COFF_SECTION_NO_PAD = 0x00000008,
+ TB_COFF_SECTION_CODE = 0x00000020,
+ TB_COFF_SECTION_INIT = 0x00000040,
+ TB_COFF_SECTION_UNINIT = 0x00000080,
+ TB_COFF_SECTION_OTHER = 0x00000100,
+ TB_COFF_SECTION_INFO = 0x00000200,
+ TB_COFF_SECTION_REMOVE = 0x00000800,
+ TB_COFF_SECTION_COMDAT = 0x00001000,
+
+ // this is actually a 4bit field
+ TB_COFF_SECTION_ALIGN = 0x00F00000,
+
+ // if we have more than 65535 relocations we do this
+ TB_COFF_SECTION_RELOC_OVR = 0x00F00000,
+
+ // memory flags
+ TB_COFF_SECTION_DISCARDABLE = 0x02000000,
+ TB_COFF_SECTION_NOT_CACHED = 0x04000000,
+ TB_COFF_SECTION_NOT_PAGED = 0x08000000,
+ TB_COFF_SECTION_SHARED = 0x10000000,
+ TB_COFF_SECTION_EXECUTE = 0x20000000,
+ TB_COFF_SECTION_READ = 0x40000000,
+ TB_COFF_SECTION_WRITE = 0x80000000,
+} TB_COFF_SectionFlags;
+
+typedef struct TB_COFF_Parser {
+ // inputs
+ TB_Slice name, file;
+
+ // results
+ size_t section_count;
+ size_t symbol_table, symbol_count;
+
+ // private
+ TB_Slice string_table;
+} TB_COFF_Parser;
+
+// fills the parser with results from the COFF header
+bool tb_coff_parse_init(TB_COFF_Parser* restrict parser);
+bool tb_coff_parse_section(TB_COFF_Parser* restrict parser, size_t i, TB_ObjectSection* out_sec);
+
+// how many symbols does this one symbol take up (basically 1 + aux symbols).
+// returns 0 if error.
+size_t tb_coff_parse_symbol(TB_COFF_Parser* restrict parser, size_t i, TB_ObjectSymbol* restrict out_sym);
+
+#endif // TB_COFF_H
+
+#ifdef TB_COFF_IMPL
+#include <common.h>
+
+#pragma pack(push, 2)
+typedef struct COFF_SectionHeader {
+ char name[8];
+ union {
+ uint32_t physical_address;
+ uint32_t virtual_size;
+ } misc;
+ uint32_t virtual_address;
+ uint32_t raw_data_size;
+ uint32_t raw_data_pos;
+ uint32_t pointer_to_reloc;
+ uint32_t pointer_to_lineno;
+ uint16_t num_reloc;
+ uint16_t num_lineno;
+ uint32_t characteristics;
+} COFF_SectionHeader;
+
+typedef struct COFF_FileHeader {
+ uint16_t machine;
+ uint16_t section_count;
+ uint32_t timestamp;
+ uint32_t symbol_table;
+ uint32_t symbol_count;
+ uint16_t optional_header_size;
+ uint16_t flags;
+} COFF_FileHeader;
+
+typedef struct COFF_Symbol {
+ union {
+ uint8_t short_name[8];
+ uint32_t long_name[2];
+ };
+ uint32_t value;
+ int16_t section_number;
+ uint16_t type;
+ uint8_t storage_class;
+ uint8_t aux_symbols_count;
+} COFF_Symbol;
+
+typedef struct COFF_ImageReloc {
+ union {
+ uint32_t VirtualAddress;
+ uint32_t RelocCount;
+ };
+ uint32_t SymbolTableIndex;
+ uint16_t Type;
+} COFF_ImageReloc;
+#pragma pack(pop)
+
+// sanity checks
+static_assert(sizeof(COFF_SectionHeader) == 40, "COFF Section header size != 40 bytes");
+static_assert(sizeof(COFF_ImageReloc) == 10, "COFF Image Relocation size != 10 bytes");
+static_assert(sizeof(COFF_FileHeader) == 20, "COFF File header size != 20 bytes");
+static_assert(sizeof(COFF_Symbol) == 18, "COFF Symbol size != 18 bytes");
+
+bool tb_coff_parse_init(TB_COFF_Parser* restrict parser) {
+ TB_Slice file = parser->file;
+
+ if (file.length < sizeof(COFF_FileHeader)) return false;
+ COFF_FileHeader* header = (COFF_FileHeader*) &parser->file.data[0];
+
+ // locate string table (it spans until the end of the file)
+ size_t string_table_pos = header->symbol_table + (header->symbol_count * sizeof(COFF_Symbol));
+ if (file.length < string_table_pos) return false;
+
+ parser->symbol_count = header->symbol_count;
+ parser->symbol_table = header->symbol_table;
+ parser->section_count = header->section_count;
+ parser->string_table = (TB_Slice){
+ .length = file.length - string_table_pos,
+ .data = &file.data[string_table_pos]
+ };
+
+ return true;
+}
+
+static long long tb__parse_decimal_int(size_t n, const char* str) {
+ const char* end = &str[n];
+
+ int result = 0;
+ while (str != end) {
+ if (*str < '0' || *str > '9') break;
+
+ result *= 10;
+ result += *str - '0';
+ str++;
+ }
+
+ return result;
+}
+
+bool tb_coff_parse_section(TB_COFF_Parser* restrict parser, size_t i, TB_ObjectSection* restrict out_sec) {
+ TB_Slice file = parser->file;
+ size_t section_offset = sizeof(COFF_FileHeader) + (i * sizeof(COFF_SectionHeader));
+
+ if (file.length < section_offset + sizeof(COFF_SectionHeader)) {
+ return false;
+ }
+
+ COFF_SectionHeader* sec = (COFF_SectionHeader*) &file.data[section_offset];
+ *out_sec = (TB_ObjectSection) { .flags = sec->characteristics };
+
+ // Parse string table name stuff
+ if (sec->name[0] == '/') {
+ // string table access
+ int offset = tb__parse_decimal_int(7, &sec->name[1]);
+ if (file.length > offset) {
+ return false;
+ }
+
+ const uint8_t* data = &parser->string_table.data[offset];
+ out_sec->name = (TB_Slice){ strlen((const char*) data), data };
+ } else {
+ // normal inplace string
+ size_t len = strlen(sec->name);
+ out_sec->name = (TB_Slice){ len, (uint8_t*) sec->name };
+ }
+
+ // Parse relocations
+ if (sec->num_reloc > 0) {
+ out_sec->relocation_count = sec->num_reloc;
+ COFF_ImageReloc* src_relocs = (COFF_ImageReloc*) &file.data[sec->pointer_to_reloc];
+
+ TB_ObjectReloc* dst_relocs = tb_platform_heap_alloc(sec->num_reloc * sizeof(TB_ObjectReloc));
+ FOREACH_N(j, 0, sec->num_reloc) {
+ dst_relocs[j] = (TB_ObjectReloc){ 0 };
+ switch (src_relocs[j].Type) {
+ case IMAGE_REL_AMD64_ADDR32NB: dst_relocs[j].type = TB_OBJECT_RELOC_ADDR32NB; break;
+ case IMAGE_REL_AMD64_ADDR32: dst_relocs[j].type = TB_OBJECT_RELOC_ADDR32; break;
+ case IMAGE_REL_AMD64_ADDR64: dst_relocs[j].type = TB_OBJECT_RELOC_ADDR64; break;
+ case IMAGE_REL_AMD64_SECREL: dst_relocs[j].type = TB_OBJECT_RELOC_SECREL; break;
+ case IMAGE_REL_AMD64_SECTION: dst_relocs[j].type = TB_OBJECT_RELOC_SECTION; break;
+
+ case IMAGE_REL_AMD64_REL32:
+ case IMAGE_REL_AMD64_REL32_1:
+ case IMAGE_REL_AMD64_REL32_2:
+ case IMAGE_REL_AMD64_REL32_3:
+ case IMAGE_REL_AMD64_REL32_4:
+ case IMAGE_REL_AMD64_REL32_5:
+ dst_relocs[j].type = TB_OBJECT_RELOC_REL32;
+ break;
+
+ default: tb_todo();
+ }
+
+ if (src_relocs[j].Type >= IMAGE_REL_AMD64_REL32 && src_relocs[j].Type <= IMAGE_REL_AMD64_REL32_5) {
+ dst_relocs[j].addend = 4 + (src_relocs[j].Type - IMAGE_REL_AMD64_REL32);
+ }
+
+ dst_relocs[j].symbol_index = src_relocs[j].SymbolTableIndex;
+ dst_relocs[j].virtual_address = src_relocs[j].VirtualAddress;
+ }
+
+ out_sec->relocations = dst_relocs;
+ }
+
+ // Parse virtual region
+ out_sec->virtual_address = sec->virtual_address;
+ out_sec->virtual_size = sec->misc.virtual_size;
+
+ // Read raw data (if applies)
+ if (sec->raw_data_size) {
+ assert(sec->raw_data_pos + sec->raw_data_size < file.length);
+ out_sec->raw_data = (TB_Slice){ sec->raw_data_size, &file.data[sec->raw_data_pos] };
+ }
+
+ return true;
+}
+
+TB_ObjectSymbolType classify_symbol_type(uint16_t st_class) {
+ switch (st_class) {
+ case 2: return TB_OBJECT_SYMBOL_EXTERN;
+ case 3: return TB_OBJECT_SYMBOL_STATIC;
+ case 6: return TB_OBJECT_SYMBOL_STATIC;
+ case 0x68: return TB_OBJECT_SYMBOL_SECTION;
+ case 0x69: return TB_OBJECT_SYMBOL_WEAK_EXTERN;
+ default: return TB_OBJECT_SYMBOL_UNKNOWN;
+ }
+}
+
+size_t tb_coff_parse_symbol(TB_COFF_Parser* restrict parser, size_t i, TB_ObjectSymbol* restrict out_sym) {
+ TB_Slice file = parser->file;
+ size_t symbol_offset = parser->symbol_table + (i * sizeof(COFF_Symbol));
+
+ if (file.length < symbol_offset + sizeof(COFF_Symbol)) {
+ return 0;
+ }
+
+ COFF_Symbol* sym = (COFF_Symbol*) &file.data[symbol_offset];
+ *out_sym = (TB_ObjectSymbol) {
+ .ordinal = i,
+ .type = classify_symbol_type(sym->storage_class),
+ .section_num = sym->section_number,
+ .value = sym->value
+ };
+
+ // Parse string table name stuff
+ if (sym->long_name[0] == 0) {
+ // string table access (read a cstring)
+ // TODO(NeGate): bounds check this
+ const uint8_t* data = &parser->string_table.data[sym->long_name[1]];
+ out_sym->name = (TB_Slice){ strlen((const char*) data), data };
+ } else {
+ // normal inplace string
+ size_t len = strlen((const char*) sym->short_name);
+ out_sym->name = (TB_Slice){ len, sym->short_name };
+ }
+
+ // TODO(NeGate): Process aux symbols
+ if (sym->aux_symbols_count) {
+ out_sym->extra = &sym[1];
+
+ // FOREACH_N(j, 0, sym->aux_symbols_count) {}
+ }
+
+ return sym->aux_symbols_count + 1;
+}
+
+#endif // TB_COFF_IMPL
diff --git a/src/tilde/tb_elf.h b/src/tilde/tb_elf.h
new file mode 100644
index 000000000..5260d7372
--- /dev/null
+++ b/src/tilde/tb_elf.h
@@ -0,0 +1,170 @@
+#ifndef TB_ELF_H
+#define TB_ELF_H
+
+#include <stdint.h>
+
+#define TB_EI_MAG0 0
+#define TB_EI_MAG1 1
+#define TB_EI_MAG2 2
+#define TB_EI_MAG3 3
+#define TB_EI_CLASS 4 /* Class of machine. */
+#define TB_EI_DATA 5 /* Data format. */
+#define TB_EI_VERSION 6 /* ELF format version. */
+#define TB_EI_OSABI 7 /* Operating system / ABI identification */
+#define TB_EI_ABIVERSION 8 /* ABI version */
+#define TB_OLD_EI_BRAND 8 /* Start of architecture identification. */
+#define TB_EI_PAD 9 /* Start of padding (per SVR4 ABI). */
+#define TB_EI_NIDENT 16 /* Size of e_ident array. */
+
+/* Values for e_type. */
+#define TB_ET_NONE 0 /* Unknown type. */
+#define TB_ET_REL 1 /* Relocatable. */
+#define TB_ET_EXEC 2 /* Executable. */
+#define TB_ET_DYN 3 /* Shared object. */
+#define TB_ET_CORE 4 /* Core file. */
+#define TB_ET_LOOS 0xfe00 /* First operating system specific. */
+#define TB_ET_HIOS 0xfeff /* Last operating system-specific. */
+#define TB_ET_LOPROC 0xff00 /* First processor-specific. */
+#define TB_ET_HIPROC 0xffff /* Last processor-specific. */
+
+/* Values for e_machine. */
+#define TB_EM_NONE 0 /* Unknown machine. */
+#define TB_EM_X86_64 62 /* Advanced Micro Devices x86-64 */
+#define TB_EM_AARCH64 183 /* AArch64 (64-bit ARM) */
+
+/* sh_type */
+#define TB_SHT_NULL 0 /* inactive */
+#define TB_SHT_PROGBITS 1 /* program defined information */
+#define TB_SHT_SYMTAB 2 /* symbol table section */
+#define TB_SHT_STRTAB 3 /* string table section */
+#define TB_SHT_RELA 4 /* relocation section with addends */
+#define TB_SHT_NOBITS 8 /* no space section */
+
+/* Flags for sh_flags. */
+#define TB_SHF_WRITE 0x1 /* Section contains writable data. */
+#define TB_SHF_ALLOC 0x2 /* Section occupies memory. */
+#define TB_SHF_EXECINSTR 0x4 /* Section contains instructions. */
+#define TB_SHF_MERGE 0x10 /* Section may be merged. */
+#define TB_SHF_STRINGS 0x20 /* Section contains strings. */
+#define TB_SHF_INFO_LINK 0x40 /* sh_info holds section index. */
+#define TB_SHF_LINK_ORDER 0x80 /* Special ordering requirements. */
+#define TB_SHF_OS_NONCONFORMING 0x100 /* OS-specific processing required. */
+#define TB_SHF_GROUP 0x200 /* Member of section group. */
+#define TB_SHF_TLS 0x400 /* Section contains TLS data. */
+#define TB_SHF_MASKOS 0x0ff00000 /* OS-specific semantics. */
+#define TB_SHF_MASKPROC 0xf0000000 /* Processor-specific semantics. */
+
+/* Values for p_flags. */
+#define TB_PF_X 0x1 /* Executable. */
+#define TB_PF_W 0x2 /* Writable. */
+#define TB_PF_R 0x4 /* Readable. */
+#define TB_PF_MASKOS 0x0ff00000 /* Operating system-specific. */
+#define TB_PF_MASKPROC 0xf0000000 /* Processor-specific. */
+
+/* Values for p_type. */
+#define TB_PT_NULL 0 /* Unused entry. */
+#define TB_PT_LOAD 1 /* Loadable segment. */
+#define TB_PT_DYNAMIC 2 /* Dynamic linking information segment. */
+#define TB_PT_INTERP 3 /* Pathname of interpreter. */
+#define TB_PT_NOTE 4 /* Auxiliary information. */
+#define TB_PT_SHLIB 5 /* Reserved (not used). */
+#define TB_PT_PHDR 6 /* Location of program header itself. */
+#define TB_PT_TLS 7 /* Thread local storage segment */
+
+/* Values for relocation */
+typedef enum {
+ TB_ELF_X86_64_NONE = 0,
+ TB_ELF_X86_64_64 = 1,
+ TB_ELF_X86_64_PC32 = 2,
+ TB_ELF_X86_64_GOT32 = 3,
+ TB_ELF_X86_64_PLT32 = 4,
+ TB_ELF_X86_64_GOTPCREL = 9,
+} TB_ELF_RelocType;
+
+// ST_TYPE
+#define TB_ELF64_STT_NOTYPE 0
+#define TB_ELF64_STT_OBJECT 1
+#define TB_ELF64_STT_FUNC 2
+#define TB_ELF64_STT_SECTION 3
+
+// ST_INFO
+#define TB_ELF64_STB_LOCAL 0
+#define TB_ELF64_STB_GLOBAL 1
+#define TB_ELF64_STB_WEAK 2
+
+/* Macros for accessing the fields of st_info. */
+#define TB_ELF64_ST_BIND(info) ((info) >> 4)
+#define TB_ELF64_ST_TYPE(info) ((info) & 0xf)
+
+#define TB_ELF64_ST_INFO(b, t) (((b) << 4) | ((t) & 0xF))
+
+#define TB_ELF64_R_SYM(i) ((i) >> 32u)
+#define TB_ELF64_R_TYPE(i) ((i)&0xffffffffULL)
+#define TB_ELF64_R_INFO(s, t) (((uint64_t)(s) << 32ULL) + ((uint64_t)(t) & 0xffffffffULL))
+
+// http://web.mit.edu/freebsd/head/sys/sys/elf64.h
+// https://cirosantilli.com/elf-hello-world#minimal-elf-file
+// https://en.wikipedia.org/wiki/Executable_and_Linkable_Format
+typedef struct {
+ uint8_t ident[16];
+ uint16_t type;
+ uint16_t machine;
+ uint32_t version;
+ uint64_t entry;
+ uint64_t phoff;
+ uint64_t shoff;
+ uint32_t flags;
+ uint16_t ehsize;
+ uint16_t phentsize;
+ uint16_t phnum;
+ uint16_t shentsize;
+ uint16_t shnum;
+ uint16_t shstrndx;
+} TB_Elf64_Ehdr;
+
+typedef struct {
+ uint32_t name;
+ uint32_t type;
+ uint64_t flags;
+ uint64_t addr;
+ uint64_t offset;
+ uint64_t size;
+ uint32_t link;
+ uint32_t info;
+ uint64_t addralign;
+ uint64_t entsize;
+} TB_Elf64_Shdr;
+
+// Segment header for ELF64.
+typedef struct {
+ uint32_t type; // Type of segment
+ uint32_t flags; // Segment flags
+ uint64_t offset; // File offset where segment is located, in bytes
+ uint64_t vaddr; // Virtual address of beginning of segment
+ uint64_t paddr; // Physical addr of beginning of segment (OS-specific)
+ uint64_t filesz; // Num. of bytes in file image of segment (may be zero)
+ uint64_t memsz; // Num. of bytes in mem image of segment (may be zero)
+ uint64_t align; // Segment alignment constraint
+} TB_Elf64_Phdr;
+
+typedef struct {
+ uint32_t name;
+ uint8_t info;
+ uint8_t other;
+ uint16_t shndx;
+ uint64_t value;
+ uint64_t size;
+} TB_Elf64_Sym;
+
+typedef struct {
+ uint64_t offset;
+ uint64_t info;
+ int64_t addend;
+} TB_Elf64_Rela;
+
+typedef struct {
+ uint64_t offset;
+ uint64_t info;
+} TB_Elf64_Rel;
+
+#endif /* TB_ELF_H */
diff --git a/src/tilde/tb_formats.h b/src/tilde/tb_formats.h
new file mode 100644
index 000000000..1975d5848
--- /dev/null
+++ b/src/tilde/tb_formats.h
@@ -0,0 +1,132 @@
+// This handles the generalized executable/object format parsing stuff
+#ifndef TB_OBJECT_H
+#define TB_OBJECT_H
+
+#include <stdint.h>
+#include <stddef.h>
+
+typedef enum {
+ TB_OBJECT_RELOC_NONE, // how?
+
+ // Target independent
+ TB_OBJECT_RELOC_ADDR32,
+ TB_OBJECT_RELOC_ADDR64, // unsupported on 32bit platforms
+ TB_OBJECT_RELOC_SECREL,
+ TB_OBJECT_RELOC_SECTION,
+
+ // COFF only
+ TB_OBJECT_RELOC_ADDR32NB, // Relative virtual address
+
+ // x64 only
+ TB_OBJECT_RELOC_REL32, // relative 32bit displacement
+
+ // Aarch64 only
+ TB_OBJECT_RELOC_BRANCH26, // 26bit displacement for B and BL instructions
+ TB_OBJECT_RELOC_REL21, // for ADR instructions
+
+ // TODO(NeGate): fill in the rest of this later
+} TB_ObjectRelocType;
+
+typedef struct {
+ TB_ObjectRelocType type;
+ uint32_t symbol_index;
+ size_t virtual_address;
+ size_t addend;
+} TB_ObjectReloc;
+
+typedef enum {
+ TB_OBJECT_SYMBOL_UNKNOWN,
+ TB_OBJECT_SYMBOL_EXTERN, // exported
+ TB_OBJECT_SYMBOL_WEAK_EXTERN, // weak
+ TB_OBJECT_SYMBOL_IMPORT, // forward decl
+ TB_OBJECT_SYMBOL_STATIC, // local
+ TB_OBJECT_SYMBOL_SECTION, // local
+} TB_ObjectSymbolType;
+
+typedef struct {
+ TB_ObjectSymbolType type;
+ int section_num;
+
+ uint32_t ordinal;
+ uint32_t value;
+
+ TB_Slice name;
+
+ // for COFF, this is the auxillary
+ void* extra;
+
+ // this is zeroed out by the loader and left for the user to do crap with
+ void* user_data;
+} TB_ObjectSymbol;
+
+typedef struct {
+ TB_Slice name;
+ uint32_t flags;
+
+ size_t virtual_address;
+ size_t virtual_size;
+
+ // You can have a virtual size without having a raw
+ // data size, that's how the BSS section works
+ TB_Slice raw_data;
+
+ size_t relocation_count;
+ TB_ObjectReloc* relocations;
+
+ // this is zeroed out by the loader and left for the user to do crap with
+ void* user_data;
+} TB_ObjectSection;
+
+typedef enum {
+ TB_OBJECT_FILE_UNKNOWN,
+
+ TB_OBJECT_FILE_COFF,
+ TB_OBJECT_FILE_ELF64
+} TB_ObjectFileType;
+
+typedef struct {
+ TB_ObjectFileType type;
+ TB_Arch arch;
+
+ TB_Slice name;
+ TB_Slice ar_name;
+
+ size_t symbol_count;
+ TB_ObjectSymbol* symbols;
+
+ size_t section_count;
+ TB_ObjectSection sections[];
+} TB_ObjectFile;
+
+////////////////////////////////
+// Archive parser
+////////////////////////////////
+typedef struct {
+ TB_Slice name;
+
+ // if import_name is empty, we're dealing with an object file
+ TB_Slice import_name;
+ uint16_t ordinal;
+
+ TB_Slice content;
+} TB_ArchiveEntry;
+
+typedef struct {
+ TB_Slice file;
+ size_t pos;
+
+ size_t member_count;
+ uint32_t* members;
+
+ size_t symbol_count;
+ uint16_t* symbols;
+
+ TB_Slice strtbl;
+} TB_ArchiveFileParser;
+
+// We do this to parse the header
+bool tb_archive_parse(TB_Slice file, TB_ArchiveFileParser* restrict out_parser);
+// After that we can enumerate any symbol entries to resolve imports
+size_t tb_archive_parse_entries(TB_ArchiveFileParser* restrict parser, size_t i, size_t count, TB_ArchiveEntry* out_entry);
+
+#endif // TB_OBJECT_H
diff --git a/src/tilde/tb_x64.h b/src/tilde/tb_x64.h
new file mode 100644
index 000000000..5f93f6bdb
--- /dev/null
+++ b/src/tilde/tb_x64.h
@@ -0,0 +1,90 @@
+#ifndef TB_X64_H
+#define TB_X64_H
+
+#include <stdint.h>
+#include <stdbool.h>
+
+typedef enum {
+ // uses xmm registers for the reg array
+ TB_X86_INSTR_XMMREG = (1u << 0u),
+
+ // r/m is a memory operand
+ TB_X86_INSTR_USE_MEMOP = (1u << 1u),
+
+ // r/m is a rip-relative address (TB_X86_INSTR_USE_MEMOP is always set when this is set)
+ TB_X86_INSTR_USE_RIPMEM = (1u << 2u),
+
+ // LOCK prefix is present
+ TB_X86_INSTR_LOCK = (1u << 3u),
+
+ // uses a signed immediate
+ TB_X86_INSTR_IMMEDIATE = (1u << 4u),
+
+ // absolute means it's using the 64bit immediate (cannot be applied while a memory operand is active)
+ TB_X86_INSTR_ABSOLUTE = (1u << 5u),
+
+ // set if the r/m can be found on the right hand side
+ TB_X86_INSTR_DIRECTION = (1u << 6u),
+
+ // uses the second data type because the instruction is weird like MOVSX or MOVZX
+ TB_X86_INSTR_TWO_DATA_TYPES = (1u << 7u)
+} TB_X86_InstFlags;
+
+typedef enum {
+ TB_X86_SEGMENT_DEFAULT = 0,
+
+ TB_X86_SEGMENT_ES, TB_X86_SEGMENT_CS,
+ TB_X86_SEGMENT_SS, TB_X86_SEGMENT_DS,
+ TB_X86_SEGMENT_GS, TB_X86_SEGMENT_FS,
+} TB_X86_Segment;
+
+typedef enum {
+ TB_X86_TYPE_NONE = 0,
+
+ TB_X86_TYPE_BYTE, // 1
+ TB_X86_TYPE_WORD, // 2
+ TB_X86_TYPE_DWORD, // 4
+ TB_X86_TYPE_QWORD, // 8
+
+ TB_X86_TYPE_PBYTE, // int8 x 16 = 16
+ TB_X86_TYPE_PWORD, // int16 x 8 = 16
+ TB_X86_TYPE_PDWORD, // int32 x 4 = 16
+ TB_X86_TYPE_PQWORD, // int64 x 2 = 16
+
+ TB_X86_TYPE_SSE_SS, // float32 x 1 = 4
+ TB_X86_TYPE_SSE_SD, // float64 x 1 = 8
+ TB_X86_TYPE_SSE_PS, // float32 x 4 = 16
+ TB_X86_TYPE_SSE_PD, // float64 x 2 = 16
+
+ TB_X86_TYPE_XMMWORD, // the generic idea of them
+} TB_X86_DataType;
+
+typedef struct {
+ int16_t type;
+
+ // registers (there's 4 max taking up 4bit slots each)
+ uint16_t regs;
+ uint8_t flags;
+
+ // bitpacking amirite
+ TB_X86_DataType data_type : 4;
+ TB_X86_DataType data_type2 : 4;
+ TB_X86_Segment segment : 4;
+ uint8_t length : 4;
+
+ // memory operand
+ // X86_INSTR_USE_MEMOP
+ int32_t disp;
+
+ // immediate operand
+ // imm for INSTR_IMMEDIATE
+ // abs for INSTR_ABSOLUTE
+ union {
+ int32_t imm;
+ uint64_t abs;
+ };
+} TB_X86_Inst;
+
+TB_X86_Inst tb_x86_disasm(size_t length, const uint8_t data[length]);
+
+#endif /* TB_X64_H */
diff --git a/src/tilde_builtin.cpp b/src/tilde_builtin.cpp
new file mode 100644
index 000000000..f036ce583
--- /dev/null
+++ b/src/tilde_builtin.cpp
@@ -0,0 +1,443 @@
+gb_internal cgValue cg_builtin_len(cgProcedure *p, cgValue value) {
+ Type *t = base_type(value.type);
+
+ switch (t->kind) {
+ case Type_Basic:
+ switch (t->Basic.kind) {
+ case Basic_string:
+ {
+ GB_ASSERT(value.kind == cgValue_Addr);
+ cgValue ptr = cg_value(value.node, alloc_type_pointer(value.type));
+ cgValue len_ptr = cg_emit_struct_ep(p, ptr, 1);
+ return cg_emit_load(p, len_ptr);
+ }
+ case Basic_cstring:
+ GB_PANIC("TODO(bill): len(cstring)");
+ break;
+ }
+ break;
+ case Type_Array:
+ return cg_const_int(p, t_int, t->Array.count);
+ case Type_EnumeratedArray:
+ return cg_const_int(p, t_int, t->EnumeratedArray.count);
+ case Type_Slice:
+ {
+ GB_ASSERT(value.kind == cgValue_Addr);
+ cgValue ptr = cg_value(value.node, alloc_type_pointer(value.type));
+ cgValue len_ptr = cg_emit_struct_ep(p, ptr, 1);
+ return cg_emit_load(p, len_ptr);
+ }
+ case Type_DynamicArray:
+ {
+ GB_ASSERT(value.kind == cgValue_Addr);
+ cgValue ptr = cg_value(value.node, alloc_type_pointer(value.type));
+ cgValue len_ptr = cg_emit_struct_ep(p, ptr, 1);
+ return cg_emit_load(p, len_ptr);
+ }
+ case Type_Map:
+ {
+ GB_ASSERT(value.kind == cgValue_Addr);
+ cgValue ptr = cg_value(value.node, alloc_type_pointer(value.type));
+ cgValue len_ptr = cg_emit_struct_ep(p, ptr, 1);
+ return cg_emit_conv(p, cg_emit_load(p, len_ptr), t_int);
+ }
+ case Type_Struct:
+ GB_ASSERT(is_type_soa_struct(t));
+ break;
+ }
+
+ GB_PANIC("TODO(bill): cg_builtin_len %s", type_to_string(t));
+ return {};
+}
+
+gb_internal cgValue cg_builtin_cap(cgProcedure *p, cgValue value) {
+ Type *t = base_type(value.type);
+
+ switch (t->kind) {
+ case Type_Basic:
+ switch (t->Basic.kind) {
+ case Basic_string:
+ {
+ GB_ASSERT(value.kind == cgValue_Addr);
+ cgValue ptr = cg_value(value.node, alloc_type_pointer(value.type));
+ cgValue len_ptr = cg_emit_struct_ep(p, ptr, 1);
+ return cg_emit_load(p, len_ptr);
+ }
+ case Basic_cstring:
+ GB_PANIC("TODO(bill): cap(cstring)");
+ break;
+ }
+ break;
+ case Type_Array:
+ return cg_const_int(p, t_int, t->Array.count);
+ case Type_EnumeratedArray:
+ return cg_const_int(p, t_int, t->EnumeratedArray.count);
+ case Type_Slice:
+ {
+ GB_ASSERT(value.kind == cgValue_Addr);
+ cgValue ptr = cg_value(value.node, alloc_type_pointer(value.type));
+ cgValue len_ptr = cg_emit_struct_ep(p, ptr, 1);
+ return cg_emit_load(p, len_ptr);
+ }
+ case Type_DynamicArray:
+ {
+ GB_ASSERT(value.kind == cgValue_Addr);
+ cgValue ptr = cg_value(value.node, alloc_type_pointer(value.type));
+ cgValue len_ptr = cg_emit_struct_ep(p, ptr, 2);
+ return cg_emit_load(p, len_ptr);
+ }
+ case Type_Map:
+ {
+ TB_DataType dt_uintptr = cg_data_type(t_uintptr);
+ TB_Node *zero = tb_inst_uint(p->func, dt_uintptr, 0);
+ TB_Node *one = tb_inst_uint(p->func, dt_uintptr, 0);
+ TB_Node *mask = tb_inst_uint(p->func, dt_uintptr, MAP_CACHE_LINE_SIZE-1);
+
+ TB_Node *data = cg_emit_struct_ev(p, value, 0).node;
+ TB_Node *log2_cap = tb_inst_and(p->func, data, mask);
+ TB_Node *cap = tb_inst_shl(p->func, one, log2_cap, cast(TB_ArithmeticBehavior)0);
+ TB_Node *cmp = tb_inst_cmp_eq(p->func, data, zero);
+
+ cgValue res = cg_value(tb_inst_select(p->func, cmp, zero, cap), t_uintptr);
+ return cg_emit_conv(p, res, t_int);
+ }
+ case Type_Struct:
+ GB_ASSERT(is_type_soa_struct(t));
+ break;
+ }
+
+ GB_PANIC("TODO(bill): cg_builtin_cap %s", type_to_string(t));
+ return {};
+}
+
+
+gb_internal cgValue cg_builtin_raw_data(cgProcedure *p, cgValue const &value) {
+ Type *t = base_type(value.type);
+ cgValue res = {};
+ switch (t->kind) {
+ case Type_Slice:
+ {
+ GB_ASSERT(value.kind == cgValue_Addr);
+ cgValue ptr = cg_value(value.node, alloc_type_pointer(value.type));
+ cgValue data_ptr = cg_emit_struct_ep(p, ptr, 0);
+ res = cg_emit_load(p, data_ptr);
+ GB_ASSERT(is_type_multi_pointer(res.type));
+ }
+ break;
+ case Type_DynamicArray:
+ {
+ GB_ASSERT(value.kind == cgValue_Addr);
+ cgValue ptr = cg_value(value.node, alloc_type_pointer(value.type));
+ cgValue data_ptr = cg_emit_struct_ep(p, ptr, 0);
+ res = cg_emit_load(p, data_ptr);
+ }
+ break;
+ case Type_Basic:
+ if (t->Basic.kind == Basic_string) {
+ GB_ASSERT(value.kind == cgValue_Addr);
+ cgValue ptr = cg_value(value.node, alloc_type_pointer(value.type));
+ cgValue data_ptr = cg_emit_struct_ep(p, ptr, 0);
+ res = cg_emit_load(p, data_ptr);
+ } else if (t->Basic.kind == Basic_cstring) {
+ res = cg_emit_conv(p, value, t_u8_multi_ptr);
+ }
+ break;
+ case Type_Pointer:
+ GB_ASSERT(is_type_array_like(t->Pointer.elem));
+ GB_ASSERT(value.kind == cgValue_Value);
+ res = cg_value(value.node, alloc_type_multi_pointer(base_array_type(t->Pointer.elem)));
+ break;
+ case Type_MultiPointer:
+
+ GB_PANIC("TODO(bill) %s", type_to_string(value.type));
+ // res = cg_emit_conv(p, value, tv.type);
+ break;
+ }
+ GB_ASSERT(res.node != nullptr);
+ return res;
+}
+
+gb_internal cgValue cg_builtin_min(cgProcedure *p, Type *t, cgValue x, cgValue y) {
+ x = cg_emit_conv(p, x, t);
+ y = cg_emit_conv(p, y, t);
+ return cg_emit_select(p, cg_emit_comp(p, Token_Lt, x, y), x, y);
+}
+gb_internal cgValue cg_builtin_max(cgProcedure *p, Type *t, cgValue x, cgValue y) {
+ x = cg_emit_conv(p, x, t);
+ y = cg_emit_conv(p, y, t);
+ return cg_emit_select(p, cg_emit_comp(p, Token_Gt, x, y), x, y);
+}
+
+gb_internal cgValue cg_builtin_abs(cgProcedure *p, cgValue const &x) {
+ if (is_type_unsigned(x.type)) {
+ return x;
+ }
+
+ if (is_type_quaternion(x.type)) {
+ GB_PANIC("TODO(bill): abs quaternion");
+ } else if (is_type_complex(x.type)) {
+ GB_PANIC("TODO(bill): abs complex");
+ }
+
+ TB_DataType dt = cg_data_type(x.type);
+ GB_ASSERT(!TB_IS_VOID_TYPE(dt));
+ TB_Node *zero = nullptr;
+ if (dt.type == TB_FLOAT) {
+ if (dt.data == 32) {
+ zero = tb_inst_float32(p->func, 0);
+ } else if (dt.data == 64) {
+ zero = tb_inst_float64(p->func, 0);
+ }
+ } else {
+ zero = tb_inst_uint(p->func, dt, 0);
+ }
+ GB_ASSERT(zero != nullptr);
+
+ cgValue cond = cg_emit_comp(p, Token_Lt, x, cg_value(zero, x.type));
+ cgValue neg = cg_emit_unary_arith(p, Token_Sub, x, x.type);
+ return cg_emit_select(p, cond, neg, x);
+}
+
+gb_internal cgValue cg_builtin_clamp(cgProcedure *p, Type *t, cgValue const &x, cgValue const &min, cgValue const &max) {
+ cgValue z = x;
+ z = cg_builtin_max(p, t, z, min);
+ z = cg_builtin_min(p, t, z, max);
+ return z;
+}
+
+
+
+gb_internal cgValue cg_builtin_mem_zero(cgProcedure *p, cgValue const &ptr, cgValue const &len) {
+ GB_ASSERT(ptr.kind == cgValue_Value);
+ GB_ASSERT(len.kind == cgValue_Value);
+ tb_inst_memzero(p->func, ptr.node, len.node, 1, false);
+ return ptr;
+}
+
+gb_internal cgValue cg_builtin_mem_copy(cgProcedure *p, cgValue const &dst, cgValue const &src, cgValue const &len) {
+ GB_ASSERT(dst.kind == cgValue_Value);
+ GB_ASSERT(src.kind == cgValue_Value);
+ GB_ASSERT(len.kind == cgValue_Value);
+ // TODO(bill): This needs to be memmove
+ tb_inst_memcpy(p->func, dst.node, src.node, len.node, 1, false);
+ return dst;
+}
+
+gb_internal cgValue cg_builtin_mem_copy_non_overlapping(cgProcedure *p, cgValue const &dst, cgValue const &src, cgValue const &len) {
+ GB_ASSERT(dst.kind == cgValue_Value);
+ GB_ASSERT(src.kind == cgValue_Value);
+ GB_ASSERT(len.kind == cgValue_Value);
+ tb_inst_memcpy(p->func, dst.node, src.node, len.node, 1, false);
+ return dst;
+}
+
+
+gb_internal cgValue cg_build_builtin(cgProcedure *p, BuiltinProcId id, Ast *expr) {
+ ast_node(ce, CallExpr, expr);
+
+ if (BuiltinProc__simd_begin < id && id < BuiltinProc__simd_end) {
+ GB_PANIC("TODO(bill): cg_build_builtin_simd_proc");
+ // return cg_build_builtin_simd_proc(p, expr, tv, id);
+ }
+
+ String builtin_name = builtin_procs[id].name;
+
+ switch (id) {
+ case BuiltinProc_DIRECTIVE: {
+ ast_node(bd, BasicDirective, ce->proc);
+ String name = bd->name.string;
+ GB_ASSERT(name == "location");
+ String procedure = p->entity->token.string;
+ TokenPos pos = ast_token(ce->proc).pos;
+ if (ce->args.count > 0) {
+ Ast *ident = unselector_expr(ce->args[0]);
+ GB_ASSERT(ident->kind == Ast_Ident);
+ Entity *e = entity_of_node(ident);
+ GB_ASSERT(e != nullptr);
+
+ if (e->parent_proc_decl != nullptr && e->parent_proc_decl->entity != nullptr) {
+ procedure = e->parent_proc_decl->entity->token.string;
+ } else {
+ procedure = str_lit("");
+ }
+ pos = e->token.pos;
+
+ }
+ return cg_emit_source_code_location_as_global(p, procedure, pos);
+ } break;
+
+ case BuiltinProc_len: {
+ cgValue v = cg_build_expr(p, ce->args[0]);
+ Type *t = base_type(v.type);
+ if (is_type_pointer(t)) {
+ // IMPORTANT TODO(bill): Should there be a nil pointer check?
+ v = cg_emit_load(p, v);
+ t = type_deref(t);
+ }
+ return cg_builtin_len(p, v);
+ }
+
+ case BuiltinProc_cap: {
+ cgValue v = cg_build_expr(p, ce->args[0]);
+ Type *t = base_type(v.type);
+ if (is_type_pointer(t)) {
+ // IMPORTANT TODO(bill): Should there be a nil pointer check?
+ v = cg_emit_load(p, v);
+ t = type_deref(t);
+ }
+ return cg_builtin_cap(p, v);
+ }
+
+ case BuiltinProc_raw_data:
+ {
+ cgValue v = cg_build_expr(p, ce->args[0]);
+ return cg_builtin_raw_data(p, v);
+ }
+
+ case BuiltinProc_min:
+ if (ce->args.count == 2) {
+ Type *t = type_of_expr(expr);
+ cgValue x = cg_build_expr(p, ce->args[0]);
+ cgValue y = cg_build_expr(p, ce->args[1]);
+ return cg_builtin_min(p, t, x, y);
+ } else {
+ Type *t = type_of_expr(expr);
+ cgValue x = cg_build_expr(p, ce->args[0]);
+ for (isize i = 1; i < ce->args.count; i++) {
+ cgValue y = cg_build_expr(p, ce->args[i]);
+ x = cg_builtin_min(p, t, x, y);
+ }
+ return x;
+ }
+ break;
+ case BuiltinProc_max:
+ if (ce->args.count == 2) {
+ Type *t = type_of_expr(expr);
+ cgValue x = cg_build_expr(p, ce->args[0]);
+ cgValue y = cg_build_expr(p, ce->args[1]);
+ return cg_builtin_max(p, t, x, y);
+ } else {
+ Type *t = type_of_expr(expr);
+ cgValue x = cg_build_expr(p, ce->args[0]);
+ for (isize i = 1; i < ce->args.count; i++) {
+ cgValue y = cg_build_expr(p, ce->args[i]);
+ x = cg_builtin_max(p, t, x, y);
+ }
+ return x;
+ }
+ break;
+
+ case BuiltinProc_abs:
+ {
+ cgValue x = cg_build_expr(p, ce->args[0]);
+ return cg_builtin_abs(p, x);
+ }
+
+ case BuiltinProc_clamp:
+ {
+ cgValue x = cg_build_expr(p, ce->args[0]);
+ cgValue min = cg_build_expr(p, ce->args[1]);
+ cgValue max = cg_build_expr(p, ce->args[2]);
+ return cg_builtin_clamp(p, type_of_expr(expr), x, min, max);
+ }
+
+ case BuiltinProc_debug_trap:
+ tb_inst_debugbreak(p->func);
+ return {};
+ case BuiltinProc_trap:
+ tb_inst_trap(p->func);
+ return {};
+
+ case BuiltinProc_mem_zero:
+ {
+ cgValue ptr = cg_build_expr(p, ce->args[0]);
+ cgValue len = cg_build_expr(p, ce->args[1]);
+ return cg_builtin_mem_zero(p, ptr, len);
+ }
+
+ case BuiltinProc_mem_copy:
+ {
+ cgValue dst = cg_build_expr(p, ce->args[0]);
+ cgValue src = cg_build_expr(p, ce->args[1]);
+ cgValue len = cg_build_expr(p, ce->args[2]);
+ return cg_builtin_mem_copy(p, dst, src, len);
+ }
+
+ case BuiltinProc_mem_copy_non_overlapping:
+ {
+ cgValue dst = cg_build_expr(p, ce->args[0]);
+ cgValue src = cg_build_expr(p, ce->args[1]);
+ cgValue len = cg_build_expr(p, ce->args[2]);
+ return cg_builtin_mem_copy_non_overlapping(p, dst, src, len);
+ }
+
+
+ case BuiltinProc_overflow_add:
+ {
+ Type *res_type = type_of_expr(expr);
+ GB_ASSERT(res_type->kind == Type_Tuple);
+ GB_ASSERT(res_type->Tuple.variables.count == 2);
+ // TODO(bill): do a proper overflow add
+ Type *type = res_type->Tuple.variables[0]->type;
+ Type *ok_type = res_type->Tuple.variables[1]->type;
+ cgValue x = cg_build_expr(p, ce->args[0]);
+ cgValue y = cg_build_expr(p, ce->args[1]);
+ x = cg_emit_conv(p, x, type);
+ y = cg_emit_conv(p, y, type);
+ cgValue res = cg_emit_arith(p, Token_Add, x, y, type);
+ cgValue ok = cg_const_int(p, ok_type, false);
+
+ return cg_value_multi2(res, ok, res_type);
+ }
+
+
+ case BuiltinProc_ptr_offset:
+ {
+ cgValue ptr = cg_build_expr(p, ce->args[0]);
+ cgValue len = cg_build_expr(p, ce->args[1]);
+ len = cg_emit_conv(p, len, t_int);
+ return cg_emit_ptr_offset(p, ptr, len);
+ }
+ case BuiltinProc_ptr_sub:
+ {
+ Type *elem0 = type_deref(type_of_expr(ce->args[0]));
+ Type *elem1 = type_deref(type_of_expr(ce->args[1]));
+ GB_ASSERT(are_types_identical(elem0, elem1));
+ Type *elem = elem0;
+
+ cgValue ptr0 = cg_emit_conv(p, cg_build_expr(p, ce->args[0]), t_uintptr);
+ cgValue ptr1 = cg_emit_conv(p, cg_build_expr(p, ce->args[1]), t_uintptr);
+
+ cgValue diff = cg_emit_arith(p, Token_Sub, ptr0, ptr1, t_uintptr);
+ diff = cg_emit_conv(p, diff, t_int);
+ return cg_emit_arith(p, Token_Quo, diff, cg_const_int(p, t_int, type_size_of(elem)), t_int);
+ }
+
+ case BuiltinProc_type_info_of:
+ {
+ Ast *arg = ce->args[0];
+ TypeAndValue tav = type_and_value_of_expr(arg);
+ if (tav.mode == Addressing_Type) {
+ Type *t = default_type(type_of_expr(arg));
+ return cg_type_info(p, t);
+ }
+ GB_ASSERT(is_type_typeid(tav.type));
+
+ auto args = slice_make<cgValue>(permanent_allocator(), 1);
+ args[0] = cg_build_expr(p, arg);
+ return cg_emit_runtime_call(p, "__type_info_of", args);
+ }
+
+
+ case BuiltinProc_type_equal_proc:
+ return cg_equal_proc_value_for_type(p, ce->args[0]->tav.type);
+
+ case BuiltinProc_type_hasher_proc:
+ return cg_hasher_proc_value_for_type(p, ce->args[0]->tav.type);
+ }
+
+
+ GB_PANIC("TODO(bill): builtin procs %d %.*s", id, LIT(builtin_name));
+ return {};
+}
+
diff --git a/src/tilde_const.cpp b/src/tilde_const.cpp
new file mode 100644
index 000000000..f9187e3e1
--- /dev/null
+++ b/src/tilde_const.cpp
@@ -0,0 +1,1040 @@
+gb_internal bool cg_is_expr_constant_zero(Ast *expr) {
+ GB_ASSERT(expr != nullptr);
+ auto v = exact_value_to_integer(expr->tav.value);
+ if (v.kind == ExactValue_Integer) {
+ return big_int_cmp_zero(&v.value_integer) == 0;
+ }
+ return false;
+}
+
+gb_internal cgValue cg_const_nil(cgModule *m, cgProcedure *p, Type *type) {
+ GB_ASSERT(m != nullptr);
+ Type *original_type = type;
+ type = core_type(type);
+ i64 size = type_size_of(type);
+ i64 align = type_align_of(type);
+ TB_DataType dt = cg_data_type(type);
+ if (TB_IS_VOID_TYPE(dt)) {
+ char name[32] = {};
+ gb_snprintf(name, 31, "cnil$%u", 1+m->const_nil_guid.fetch_add(1));
+ TB_Global *global = tb_global_create(m->mod, -1, name, cg_debug_type(m, type), TB_LINKAGE_PRIVATE);
+ tb_global_set_storage(m->mod, tb_module_get_rdata(m->mod), global, size, align, 0);
+
+ TB_Symbol *symbol = cast(TB_Symbol *)global;
+ if (p) {
+ TB_Node *node = tb_inst_get_symbol_address(p->func, symbol);
+ return cg_lvalue_addr(node, type);
+ } else {
+ return cg_value(symbol, type);
+ }
+ }
+
+ if (is_type_internally_pointer_like(type)) {
+ return cg_value(tb_inst_uint(p->func, dt, 0), type);
+ } else if (is_type_integer(type) || is_type_boolean(type) || is_type_bit_set(type) || is_type_typeid(type)) {
+ return cg_value(tb_inst_uint(p->func, dt, 0), type);
+ } else if (is_type_float(type)) {
+ switch (size) {
+ case 2:
+ return cg_value(tb_inst_uint(p->func, dt, 0), type);
+ case 4:
+ return cg_value(tb_inst_float32(p->func, 0), type);
+ case 8:
+ return cg_value(tb_inst_float64(p->func, 0), type);
+ }
+ }
+ GB_PANIC("TODO(bill): cg_const_nil %s", type_to_string(original_type));
+ return {};
+}
+
+gb_internal cgValue cg_const_nil(cgProcedure *p, Type *type) {
+ return cg_const_nil(p->module, p, type);
+}
+
+gb_internal TB_Global *cg_global_const_string(cgModule *m, String const &str, Type *type, TB_Global *global, i64 offset);
+gb_internal void cg_write_int_at_ptr(void *dst, i64 i, Type *original_type);
+
+gb_internal void cg_global_source_code_location_const(cgModule *m, String const &proc_name, TokenPos pos, TB_Global *global, i64 offset) {
+ // Source_Code_Location :: struct {
+ // file_path: string,
+ // line, column: i32,
+ // procedure: string,
+ // }
+
+ i64 file_path_offset = type_offset_of(t_source_code_location, 0);
+ i64 line_offset = type_offset_of(t_source_code_location, 1);
+ i64 column_offset = type_offset_of(t_source_code_location, 2);
+ i64 procedure_offset = type_offset_of(t_source_code_location, 3);
+
+ String file_path = get_file_path_string(pos.file_id);
+ if (file_path.len != 0) {
+ cg_global_const_string(m, file_path, t_string, global, offset+file_path_offset);
+ }
+
+ void *line_ptr = tb_global_add_region(m->mod, global, offset+line_offset, 4);
+ void *column_ptr = tb_global_add_region(m->mod, global, offset+column_offset, 4);
+ cg_write_int_at_ptr(line_ptr, pos.line, t_i32);
+ cg_write_int_at_ptr(column_ptr, pos.column, t_i32);
+
+ if (proc_name.len != 0) {
+ cg_global_const_string(m, proc_name, t_string, global, offset+procedure_offset);
+ }
+}
+
+
+gb_internal cgValue cg_emit_source_code_location_as_global(cgProcedure *p, String const &proc_name, TokenPos pos) {
+ cgModule *m = p->module;
+ char name[32] = {};
+ gb_snprintf(name, 31, "scl$%u", 1+m->const_nil_guid.fetch_add(1));
+
+ TB_Global *global = tb_global_create(m->mod, -1, name, cg_debug_type(m, t_source_code_location), TB_LINKAGE_PRIVATE);
+ tb_global_set_storage(m->mod, tb_module_get_rdata(m->mod), global, type_size_of(t_source_code_location), type_align_of(t_source_code_location), 6);
+
+ cg_global_source_code_location_const(m, proc_name, pos, global, 0);
+
+ TB_Node *ptr = tb_inst_get_symbol_address(p->func, cast(TB_Symbol *)global);
+ return cg_lvalue_addr(ptr, t_source_code_location);
+}
+
+
+
+gb_internal void cg_write_big_int_at_ptr(void *dst, BigInt const *a, Type *original_type) {
+ GB_ASSERT(build_context.endian_kind == TargetEndian_Little);
+ size_t sz = cast(size_t)type_size_of(original_type);
+ if (big_int_is_zero(a)) {
+ gb_memset(dst, 0, sz);
+ return;
+ }
+ u64 rop64[4] = {}; // 2 u64 is the maximum we will ever need, so doubling it will be fine :P
+ u8 *rop = cast(u8 *)rop64;
+
+ size_t max_count = 0;
+ size_t written = 0;
+ size_t size = 1;
+ size_t nails = 0;
+ mp_endian endian = MP_LITTLE_ENDIAN;
+
+ max_count = mp_pack_count(a, nails, size);
+ if (sz < max_count) {
+ debug_print_big_int(a);
+ gb_printf_err("%s -> %tu\n", type_to_string(original_type), sz);;
+ }
+ GB_ASSERT_MSG(sz >= max_count, "max_count: %tu, sz: %tu, written: %tu, type %s", max_count, sz, written, type_to_string(original_type));
+ GB_ASSERT(gb_size_of(rop64) >= sz);
+
+ mp_err err = mp_pack(rop, sz, &written,
+ MP_LSB_FIRST,
+ size, endian, nails,
+ a);
+ GB_ASSERT(err == MP_OKAY);
+
+ if (!is_type_endian_little(original_type)) {
+ for (size_t i = 0; i < sz/2; i++) {
+ u8 tmp = rop[i];
+ rop[i] = rop[sz-1-i];
+ rop[sz-1-i] = tmp;
+ }
+ }
+
+ gb_memcopy(dst, rop, sz);
+ return;
+}
+
+
+gb_internal void cg_write_int_at_ptr(void *dst, i64 i, Type *original_type) {
+ ExactValue v = exact_value_i64(i);
+ cg_write_big_int_at_ptr(dst, &v.value_integer, original_type);
+}
+gb_internal void cg_write_uint_at_ptr(void *dst, u64 i, Type *original_type) {
+ ExactValue v = exact_value_u64(i);
+ cg_write_big_int_at_ptr(dst, &v.value_integer, original_type);
+}
+
+gb_internal TB_Global *cg_global_const_string(cgModule *m, String const &str, Type *type, TB_Global *global, i64 offset) {
+ GB_ASSERT(is_type_string(type));
+
+ char name[32] = {};
+ gb_snprintf(name, 31, "csb$%u", 1+m->const_nil_guid.fetch_add(1));
+ TB_Global *str_global = tb_global_create(m->mod, -1, name, nullptr, TB_LINKAGE_PRIVATE);
+ i64 size = str.len+1;
+ tb_global_set_storage(m->mod, tb_module_get_rdata(m->mod), str_global, size, 1, 1);
+ u8 *data = cast(u8 *)tb_global_add_region(m->mod, str_global, 0, size);
+ gb_memcopy(data, str.text, str.len);
+ data[str.len] = 0;
+
+ if (is_type_cstring(type)) {
+ if (global) {
+ tb_global_add_symbol_reloc(m->mod, global, offset+0, cast(TB_Symbol *)str_global);
+ }
+ return str_global;
+ }
+
+ if (global == nullptr) {
+ gb_snprintf(name, 31, "cstr$%u", 1+m->const_nil_guid.fetch_add(1));
+ global = tb_global_create(m->mod, -1, name, cg_debug_type(m, type), TB_LINKAGE_PRIVATE);
+ tb_global_set_storage(m->mod, tb_module_get_rdata(m->mod), global, type_size_of(type), type_align_of(type), 2);
+ }
+
+ tb_global_add_symbol_reloc(m->mod, global, offset+0, cast(TB_Symbol *)str_global);
+ void *len_ptr = tb_global_add_region(m->mod, global, offset+build_context.int_size, build_context.int_size);
+ cg_write_int_at_ptr(len_ptr, str.len, t_int);
+
+ return global;
+}
+
+gb_internal bool cg_elem_type_can_be_constant(Type *t) {
+ t = base_type(t);
+ if (t == t_invalid) {
+ return false;
+ }
+ if (is_type_dynamic_array(t) || is_type_map(t)) {
+ return false;
+ }
+ return true;
+}
+
+
+gb_internal bool cg_is_elem_const(Ast *elem, Type *elem_type) {
+ if (!cg_elem_type_can_be_constant(elem_type)) {
+ return false;
+ }
+ if (elem->kind == Ast_FieldValue) {
+ elem = elem->FieldValue.value;
+ }
+ TypeAndValue tav = type_and_value_of_expr(elem);
+ GB_ASSERT_MSG(tav.mode != Addressing_Invalid, "%s %s", expr_to_string(elem), type_to_string(tav.type));
+ return tav.value.kind != ExactValue_Invalid;
+}
+
+gb_internal bool cg_is_nested_possibly_constant(Type *ft, Selection const &sel, Ast *elem) {
+ GB_ASSERT(!sel.indirect);
+ for (i32 index : sel.index) {
+ Type *bt = base_type(ft);
+ switch (bt->kind) {
+ case Type_Struct:
+ // if (bt->Struct.is_raw_union) {
+ // return false;
+ // }
+ ft = bt->Struct.fields[index]->type;
+ break;
+ case Type_Array:
+ ft = bt->Array.elem;
+ break;
+ default:
+ return false;
+ }
+ }
+ return cg_is_elem_const(elem, ft);
+}
+
+gb_internal i64 cg_global_const_calculate_region_count_from_basic_type(Type *type) {
+ type = core_type(type);
+
+ switch (type->kind) {
+ case Type_Basic:
+ switch (type->Basic.kind) {
+ case Basic_string: // ^u8 + int
+ case Basic_any: // rawptr + typeid
+ return 2;
+ }
+ return 1;
+ case Type_Pointer:
+ case Type_MultiPointer:
+ return 2; // allows for offsets
+ case Type_Proc:
+ return 1;
+ case Type_Slice:
+ return 3; // alows for offsets
+ case Type_DynamicArray:
+ return 5;
+ case Type_Map:
+ return 4;
+
+ case Type_Enum:
+ case Type_BitSet:
+ return 1;
+
+ case Type_RelativePointer:
+ case Type_RelativeMultiPointer:
+ return 2; // allows for offsets
+
+ case Type_Matrix:
+ return 1;
+
+ case Type_Array:
+ {
+ Type *elem = type->Array.elem;
+ i64 count = cg_global_const_calculate_region_count_from_basic_type(elem);
+ return count*type->Array.count;
+ }
+ case Type_EnumeratedArray:
+ {
+ Type *elem = type->EnumeratedArray.elem;
+ i64 count = cg_global_const_calculate_region_count_from_basic_type(elem);
+ return count*type->EnumeratedArray.count;
+ }
+
+ case Type_Struct:
+ if (type->Struct.is_raw_union) {
+ i64 max_count = 0;
+ for (Entity *f : type->Struct.fields) {
+ i64 count = cg_global_const_calculate_region_count_from_basic_type(f->type);
+ max_count = gb_max(count, max_count);
+ }
+ return max_count;
+ } else {
+ i64 max_count = 0;
+ for (Entity *f : type->Struct.fields) {
+ max_count += cg_global_const_calculate_region_count_from_basic_type(f->type);
+ }
+ return max_count;
+ }
+ break;
+ case Type_Union:
+ {
+ i64 max_count = 0;
+ for (Type *t : type->Union.variants) {
+ i64 count = cg_global_const_calculate_region_count_from_basic_type(t);
+ max_count = gb_max(count, max_count);
+ }
+ return max_count+1;
+ }
+ break;
+
+ default:
+ GB_PANIC("TODO(bill): %s", type_to_string(type));
+ break;
+ }
+ return -1;
+}
+gb_internal isize cg_global_const_calculate_region_count(ExactValue const &value, Type *type) {
+ Type *bt = base_type(type);
+ if (is_type_array(type) && value.kind == ExactValue_String && !is_type_u8(core_array_type(type))) {
+ if (is_type_rune_array(type)) {
+ return 1;
+ }
+
+ Type *et = base_array_type(type);
+ i64 base_count = 2;
+ if (is_type_cstring(et)) {
+ base_count = 1;
+ }
+ return base_count * bt->Array.count;
+ } else if (is_type_u8_array(type) && value.kind == ExactValue_String) {
+ return 1;
+ } else if (is_type_array(type) &&
+ value.kind != ExactValue_Invalid &&
+ value.kind != ExactValue_String &&
+ value.kind != ExactValue_Compound) {
+ Type *elem = type->Array.elem;
+
+ i64 base_count = cg_global_const_calculate_region_count(value, elem);
+ return base_count * type->Array.count;
+ } else if (is_type_matrix(type) &&
+ value.kind != ExactValue_Invalid &&
+ value.kind != ExactValue_Compound) {
+ return 1;
+ } else if (is_type_simd_vector(type) &&
+ value.kind != ExactValue_Invalid &&
+ value.kind != ExactValue_Compound) {
+ return 1;
+ }
+
+ isize count = 0;
+ switch (value.kind) {
+ case ExactValue_Invalid:
+ return 0;
+ case ExactValue_Bool:
+ case ExactValue_Integer:
+ case ExactValue_Float:
+ case ExactValue_Typeid:
+ case ExactValue_Complex:
+ case ExactValue_Quaternion:
+ return 1;
+ case ExactValue_Pointer:
+ return 2;
+
+ case ExactValue_Procedure:
+ return 1;
+
+ case ExactValue_String:
+ if (is_type_string(type)) {
+ return 3;
+ } else if (is_type_cstring(type) || is_type_array_like(type)) {
+ return 2;
+ }
+ return 3;
+
+ case ExactValue_Compound: {
+ ast_node(cl, CompoundLit, value.value_compound);
+ Type *bt = base_type(type);
+ switch (bt->kind) {
+ case Type_Struct:
+ if (cl->elems[0]->kind == Ast_FieldValue) {
+ for (isize i = 0; i < cl->elems.count; i++) {
+ ast_node(fv, FieldValue, cl->elems[i]);
+ String name = fv->field->Ident.token.string;
+
+ Selection sel = lookup_field(type, name, false);
+ GB_ASSERT(!sel.indirect);
+
+ Entity *f = bt->Struct.fields[sel.index[0]];
+
+ if (!cg_elem_type_can_be_constant(f->type)) {
+ continue;
+ }
+
+ if (sel.index.count == 1) {
+ count += cg_global_const_calculate_region_count(fv->value->tav.value, f->type);
+ } else {
+ count += 1; // just in case
+ if (cg_is_nested_possibly_constant(type, sel, fv->value)) {
+ Type *cv_type = sel.entity->type;
+ count += cg_global_const_calculate_region_count(fv->value->tav.value, cv_type);
+ }
+ }
+ }
+ } else {
+ for_array(i, cl->elems) {
+ i64 field_index = i;
+ Ast *elem = cl->elems[i];
+ TypeAndValue tav = elem->tav;
+ Entity *f = bt->Struct.fields[field_index];
+ if (!cg_elem_type_can_be_constant(f->type)) {
+ continue;
+ }
+
+ ExactValue value = {};
+ if (tav.mode != Addressing_Invalid) {
+ value = tav.value;
+ }
+ count += cg_global_const_calculate_region_count(value, type);
+ }
+ }
+ break;
+ case Type_Array:
+ case Type_EnumeratedArray:
+ case Type_SimdVector: {
+ Type *et = base_array_type(bt);
+ if (!cg_elem_type_can_be_constant(et)) {
+ break;
+ }
+ for (Ast *elem : cl->elems) {
+ if (elem->kind == Ast_FieldValue) {
+ ast_node(fv, FieldValue, elem);
+ ExactValue const &value = elem->FieldValue.value->tav.value;
+ if (is_ast_range(fv->field)) {
+ ast_node(ie, BinaryExpr, fv->field);
+ TypeAndValue lo_tav = ie->left->tav;
+ TypeAndValue hi_tav = ie->right->tav;
+ GB_ASSERT(lo_tav.mode == Addressing_Constant);
+ GB_ASSERT(hi_tav.mode == Addressing_Constant);
+
+ TokenKind op = ie->op.kind;
+ i64 lo = exact_value_to_i64(lo_tav.value);
+ i64 hi = exact_value_to_i64(hi_tav.value);
+ if (op != Token_RangeHalf) {
+ hi += 1;
+ }
+
+ for (i64 i = lo; i < hi; i++) {
+ count += cg_global_const_calculate_region_count(value, et);
+ }
+ } else {
+ count += cg_global_const_calculate_region_count(value, et);
+ }
+ } else {
+ ExactValue const &value = elem->tav.value;
+ count += cg_global_const_calculate_region_count(value, et);
+ }
+ }
+ } break;
+
+ case Type_BitSet:
+ return 1;
+ case Type_Matrix:
+ return 1;
+
+ case Type_Slice:
+ return 3;
+
+ default:
+ GB_PANIC("TODO(bill): %s", type_to_string(type));
+ break;
+ }
+ }break;
+ }
+ return count;
+}
+
+gb_internal TB_Global *cg_global_const_comp_literal(cgModule *m, Type *type, ExactValue const &value, TB_Global *global, i64 base_offset);
+
+gb_internal bool cg_global_const_add_region(cgModule *m, ExactValue const &value, Type *type, TB_Global *global, i64 offset) {
+ GB_ASSERT(is_type_endian_little(type));
+ GB_ASSERT(!is_type_different_to_arch_endianness(type));
+
+ GB_ASSERT(global != nullptr);
+
+ Type *bt = base_type(type);
+ i64 size = type_size_of(type);
+ if (value.kind == ExactValue_Invalid) {
+ return false;
+ }
+ if (is_type_array(type) && value.kind == ExactValue_String && !is_type_u8(core_array_type(type))) {
+ if (is_type_rune_array(type)) {
+ i64 count = type->Array.count;
+ Rune rune;
+ isize rune_offset = 0;
+ isize width = 1;
+ String s = value.value_string;
+
+ Rune *runes = cast(Rune *)tb_global_add_region(m->mod, global, offset, count*4);
+
+ for (i64 i = 0; i < count && rune_offset < s.len; i++) {
+ width = utf8_decode(s.text+rune_offset, s.len-rune_offset, &rune);
+ runes[i] = rune;
+ rune_offset += width;
+
+ }
+ GB_ASSERT(offset == s.len);
+ return true;
+ }
+ Type *et = bt->Array.elem;
+ i64 elem_size = type_size_of(et);
+
+ for (i64 i = 0; i < bt->Array.count; i++) {
+ cg_global_const_add_region(m, value, et, global, offset+(i * elem_size));
+ }
+ return true;
+ } else if (is_type_u8_array(type) && value.kind == ExactValue_String) {
+ u8 *dst = cast(u8 *)tb_global_add_region(m->mod, global, offset, size);
+ gb_memcopy(dst, value.value_string.text, gb_min(value.value_string.len, size));
+ return true;
+ } else if (is_type_array(type) &&
+ value.kind != ExactValue_Invalid &&
+ value.kind != ExactValue_String &&
+ value.kind != ExactValue_Compound) {
+
+ Type *et = bt->Array.elem;
+ i64 elem_size = type_size_of(et);
+
+ for (i64 i = 0; i < bt->Array.count; i++) {
+ cg_global_const_add_region(m, value, et, global, offset+(i * elem_size));
+ }
+
+ return true;
+ } else if (is_type_matrix(type) &&
+ value.kind != ExactValue_Invalid &&
+ value.kind != ExactValue_Compound) {
+ GB_PANIC("TODO(bill): matrices");
+
+ i64 row = bt->Matrix.row_count;
+ i64 column = bt->Matrix.column_count;
+ GB_ASSERT(row == column);
+
+ Type *elem = bt->Matrix.elem;
+
+ i64 elem_size = type_size_of(elem);
+ gb_unused(elem_size);
+
+ // 1 region in memory, not many
+
+ return true;
+ } else if (is_type_simd_vector(type) &&
+ value.kind != ExactValue_Invalid &&
+ value.kind != ExactValue_Compound) {
+
+ GB_PANIC("TODO(bill): #simd vectors");
+
+ Type *et = type->SimdVector.elem;
+ i64 elem_size = type_size_of(et);
+ gb_unused(elem_size);
+
+ // 1 region in memory, not many
+
+ return true;
+ }
+
+
+ switch (value.kind) {
+ case ExactValue_Bool:
+ {
+ GB_ASSERT_MSG(!is_type_array_like(bt), "%s", type_to_string(type));
+ bool *res = cast(bool *)tb_global_add_region(m->mod, global, offset, size);
+ *res = !!value.value_bool;
+ }
+ break;
+
+ case ExactValue_Integer:
+ {
+ GB_ASSERT_MSG(!is_type_array_like(bt), "%s", type_to_string(type));
+ void *res = tb_global_add_region(m->mod, global, offset, size);
+ cg_write_big_int_at_ptr(res, &value.value_integer, type);
+ }
+ break;
+
+ case ExactValue_Float:
+ {
+ GB_ASSERT_MSG(!is_type_array_like(bt), "%s", type_to_string(type));
+ f64 f = exact_value_to_f64(value);
+ void *res = tb_global_add_region(m->mod, global, offset, size);
+ switch (size) {
+ case 2: *(u16 *)res = f32_to_f16(cast(f32)f); break;
+ case 4: *(f32 *)res = cast(f32)f; break;
+ case 8: *(f64 *)res = cast(f64)f; break;
+ }
+ }
+ break;
+
+ case ExactValue_Pointer:
+ {
+ GB_ASSERT_MSG(!is_type_array_like(bt), "%s", type_to_string(type));
+ void *res = tb_global_add_region(m->mod, global, offset, size);
+ *(u64 *)res = exact_value_to_u64(value);
+ }
+ break;
+
+ case ExactValue_String:
+ if (is_type_array_like(type)) {
+ GB_ASSERT(global != nullptr);
+ void *data = tb_global_add_region(m->mod, global, offset, size);
+ gb_memcopy(data, value.value_string.text, gb_min(value.value_string.len, size));
+ } else {
+ cg_global_const_string(m, value.value_string, type, global, offset);
+ }
+ break;
+
+ case ExactValue_Typeid:
+ {
+ GB_ASSERT_MSG(!is_type_array_like(bt), "%s", type_to_string(type));
+ void *dst = tb_global_add_region(m->mod, global, offset, size);
+ u64 id = cg_typeid_as_u64(m, value.value_typeid);
+ cg_write_uint_at_ptr(dst, id, t_typeid);
+ }
+ break;
+
+ case ExactValue_Compound:
+ {
+ TB_Global *out_global = cg_global_const_comp_literal(m, type, value, global, offset);
+ GB_ASSERT(out_global == global);
+ }
+ break;
+
+ case ExactValue_Procedure:
+ GB_PANIC("TODO(bill): nested procedure values/literals\n");
+ break;
+ case ExactValue_Complex:
+ {
+ GB_ASSERT_MSG(!is_type_array_like(bt), "%s", type_to_string(type));
+ Complex128 c = {};
+ if (value.value_complex) {
+ c = *value.value_complex;
+ }
+ void *res = tb_global_add_region(m->mod, global, offset, size);
+ switch (size) {
+ case 4:
+ ((u16 *)res)[0] = f32_to_f16(cast(f32)c.real);
+ ((u16 *)res)[1] = f32_to_f16(cast(f32)c.imag);
+ break;
+ case 8:
+ ((f32 *)res)[0] = cast(f32)c.real;
+ ((f32 *)res)[1] = cast(f32)c.imag;
+ break;
+ case 16:
+ ((f64 *)res)[0] = cast(f64)c.real;
+ ((f64 *)res)[1] = cast(f64)c.imag;
+ break;
+ }
+ }
+ break;
+ case ExactValue_Quaternion:
+ {
+ GB_ASSERT_MSG(!is_type_array_like(bt), "%s", type_to_string(type));
+ // @QuaternionLayout
+ Quaternion256 q = {};
+ if (value.value_quaternion) {
+ q = *value.value_quaternion;
+ }
+ void *res = tb_global_add_region(m->mod, global, offset, size);
+ switch (size) {
+ case 8:
+ ((u16 *)res)[0] = f32_to_f16(cast(f32)q.imag);
+ ((u16 *)res)[1] = f32_to_f16(cast(f32)q.jmag);
+ ((u16 *)res)[2] = f32_to_f16(cast(f32)q.kmag);
+ ((u16 *)res)[3] = f32_to_f16(cast(f32)q.real);
+ break;
+ case 16:
+ ((f32 *)res)[0] = cast(f32)q.imag;
+ ((f32 *)res)[1] = cast(f32)q.jmag;
+ ((f32 *)res)[2] = cast(f32)q.kmag;
+ ((f32 *)res)[3] = cast(f32)q.real;
+ break;
+ case 32:
+ ((f64 *)res)[0] = cast(f64)q.imag;
+ ((f64 *)res)[1] = cast(f64)q.jmag;
+ ((f64 *)res)[2] = cast(f64)q.kmag;
+ ((f64 *)res)[3] = cast(f64)q.real;
+ break;
+ }
+ }
+ break;
+ default:
+ GB_PANIC("%s", type_to_string(type));
+ break;
+ }
+ return true;
+}
+
+
+gb_internal TB_Global *cg_global_const_comp_literal(cgModule *m, Type *original_type, ExactValue const &value, TB_Global *global, i64 base_offset) {
+ GB_ASSERT(value.kind == ExactValue_Compound);
+ Ast *value_compound = value.value_compound;
+ ast_node(cl, CompoundLit, value_compound);
+
+ TEMPORARY_ALLOCATOR_GUARD();
+
+ if (global == nullptr) {
+ char name[32] = {};
+ gb_snprintf(name, 31, "complit$%u", 1+m->const_nil_guid.fetch_add(1));
+ global = tb_global_create(m->mod, -1, name, cg_debug_type(m, original_type), TB_LINKAGE_PRIVATE);
+ i64 size = type_size_of(original_type);
+ i64 align = type_align_of(original_type);
+
+ // READ ONLY?
+ TB_ModuleSection *section = nullptr;
+ if (is_type_string(original_type) || is_type_cstring(original_type)) {
+ section = tb_module_get_rdata(m->mod);
+ } else {
+ section = tb_module_get_data(m->mod);
+ }
+
+ if (cl->elems.count == 0) {
+ tb_global_set_storage(m->mod, section, global, size, align, 0);
+ return global;
+ }
+
+
+ isize global_region_count = cg_global_const_calculate_region_count(value, original_type);
+ tb_global_set_storage(m->mod, section, global, size, align, global_region_count);
+ }
+
+ if (cl->elems.count == 0) {
+ return global;
+ }
+
+
+ Type *bt = base_type(original_type);
+ i64 bt_size = type_size_of(bt);
+
+ switch (bt->kind) {
+ case Type_Struct:
+ if (cl->elems[0]->kind == Ast_FieldValue) {
+ isize elem_count = cl->elems.count;
+ for (isize i = 0; i < elem_count; i++) {
+ ast_node(fv, FieldValue, cl->elems[i]);
+ String name = fv->field->Ident.token.string;
+
+ TypeAndValue tav = fv->value->tav;
+ GB_ASSERT(tav.mode != Addressing_Invalid);
+ ExactValue value = tav.value;
+
+ Selection sel = lookup_field(bt, name, false);
+ GB_ASSERT(!sel.indirect);
+
+ if (!cg_is_nested_possibly_constant(bt, sel, fv->value)) {
+ continue;
+ }
+
+ i64 offset = type_offset_of_from_selection(bt, sel);
+ cg_global_const_add_region(m, value, sel.entity->type, global, base_offset+offset);
+ }
+ } else {
+ for_array(i, cl->elems) {
+ i64 field_index = i;
+ Ast *elem = cl->elems[i];
+ TypeAndValue tav = elem->tav;
+ Entity *f = bt->Struct.fields[field_index];
+ if (!cg_elem_type_can_be_constant(f->type)) {
+ continue;
+ }
+
+ i64 offset = bt->Struct.offsets[field_index];
+
+ ExactValue value = {};
+ if (tav.mode != Addressing_Invalid) {
+ value = tav.value;
+ }
+ cg_global_const_add_region(m, value, f->type, global, base_offset+offset);
+ }
+ }
+ return global;
+
+ case Type_Array:
+ case Type_EnumeratedArray:
+ case Type_SimdVector:
+ if (cl->elems[0]->kind == Ast_FieldValue) {
+ Type *et = base_array_type(bt);
+ i64 elem_size = type_size_of(et);
+ for (Ast *elem : cl->elems) {
+ ast_node(fv, FieldValue, elem);
+
+ ExactValue const &value = fv->value->tav.value;
+
+ if (is_ast_range(fv->field)) {
+ ast_node(ie, BinaryExpr, fv->field);
+ TypeAndValue lo_tav = ie->left->tav;
+ TypeAndValue hi_tav = ie->right->tav;
+ GB_ASSERT(lo_tav.mode == Addressing_Constant);
+ GB_ASSERT(hi_tav.mode == Addressing_Constant);
+
+ TokenKind op = ie->op.kind;
+ i64 lo = exact_value_to_i64(lo_tav.value);
+ i64 hi = exact_value_to_i64(hi_tav.value);
+ if (op != Token_RangeHalf) {
+ hi += 1;
+ }
+
+ for (i64 i = lo; i < hi; i++) {
+ i64 offset = i * elem_size;
+ cg_global_const_add_region(m, value, et, global, base_offset+offset);
+ }
+ } else {
+ TypeAndValue index_tav = fv->field->tav;
+ GB_ASSERT(index_tav.mode == Addressing_Constant);
+ i64 i = exact_value_to_i64(index_tav.value);
+ i64 offset = i * elem_size;
+ cg_global_const_add_region(m, value, et, global, base_offset+offset);
+ }
+ }
+ } else {
+ Type *et = base_array_type(bt);
+ i64 elem_size = type_size_of(et);
+ i64 offset = 0;
+ for (Ast *elem : cl->elems) {
+ ExactValue const &value = elem->tav.value;
+ cg_global_const_add_region(m, value, et, global, base_offset+offset);
+ offset += elem_size;
+ }
+ }
+
+ return global;
+
+ case Type_BitSet:
+ if (bt_size > 0) {
+ BigInt bits = {};
+ BigInt one = {};
+ big_int_from_u64(&one, 1);
+
+ for_array(i, cl->elems) {
+ Ast *e = cl->elems[i];
+ GB_ASSERT(e->kind != Ast_FieldValue);
+
+ TypeAndValue tav = e->tav;
+ if (tav.mode != Addressing_Constant) {
+ continue;
+ }
+ GB_ASSERT(tav.value.kind == ExactValue_Integer);
+ i64 v = big_int_to_i64(&tav.value.value_integer);
+ i64 lower = bt->BitSet.lower;
+ u64 index = cast(u64)(v-lower);
+ BigInt bit = {};
+ big_int_from_u64(&bit, index);
+ big_int_shl(&bit, &one, &bit);
+ big_int_or(&bits, &bits, &bit);
+ }
+
+ void *dst = tb_global_add_region(m->mod, global, base_offset, bt_size);
+ cg_write_big_int_at_ptr(dst, &bits, original_type);
+ }
+ return global;
+
+ case Type_Matrix:
+ GB_PANIC("TODO(bill): constant compound literal for %s", type_to_string(original_type));
+ break;
+
+ case Type_Slice:
+ {
+ i64 count = gb_max(cl->elems.count, cl->max_count);
+ Type *elem = bt->Slice.elem;
+ Type *t = alloc_type_array(elem, count);
+ TB_Global *backing_array = cg_global_const_comp_literal(m, t, value, nullptr, 0);
+
+ tb_global_add_symbol_reloc(m->mod, global, base_offset+0, cast(TB_Symbol *)backing_array);
+
+ void *len_ptr = tb_global_add_region(m->mod, global, base_offset+build_context.int_size, build_context.int_size);
+ cg_write_int_at_ptr(len_ptr, count, t_int);
+ }
+ return global;
+ }
+
+ GB_PANIC("TODO(bill): constant compound literal for %s", type_to_string(original_type));
+ return nullptr;
+}
+
+
+gb_internal cgValue cg_const_value(cgProcedure *p, Type *type, ExactValue const &value) {
+ GB_ASSERT(p != nullptr);
+ TB_Node *node = nullptr;
+
+ if (is_type_untyped(type)) {
+ // TODO(bill): THIS IS A COMPLETE HACK, WHY DOES THIS NOT A TYPE?
+ GB_ASSERT(type->kind == Type_Basic);
+ switch (type->Basic.kind) {
+ case Basic_UntypedBool:
+ type = t_bool;
+ break;
+ case Basic_UntypedInteger:
+ type = t_i64;
+ break;
+ case Basic_UntypedFloat:
+ type = t_f64;
+ break;
+ case Basic_UntypedComplex:
+ type = t_complex128;
+ break;
+ case Basic_UntypedQuaternion:
+ type = t_quaternion256;
+ break;
+ case Basic_UntypedString:
+ type = t_string;
+ break;
+ case Basic_UntypedRune:
+ type = t_rune;
+ break;
+ case Basic_UntypedNil:
+ case Basic_UntypedUninit:
+ return cg_value(cast(TB_Node *)nullptr, type);
+ }
+ }
+ TB_DataType dt = cg_data_type(type);
+
+ switch (value.kind) {
+ case ExactValue_Invalid:
+ return cg_const_nil(p, type);
+
+ case ExactValue_Typeid:
+ return cg_typeid(p, value.value_typeid);
+
+ case ExactValue_Procedure:
+ {
+ Ast *expr = unparen_expr(value.value_procedure);
+ if (expr->kind == Ast_ProcLit) {
+ cgProcedure *anon = cg_procedure_generate_anonymous(p->module, expr, p);
+ TB_Node *ptr = tb_inst_get_symbol_address(p->func, anon->symbol);
+ GB_ASSERT(are_types_identical(type, anon->type));
+ return cg_value(ptr, type);
+ }
+
+ Entity *e = entity_of_node(expr);
+ if (e != nullptr) {
+ TB_Symbol *found = cg_find_symbol_from_entity(p->module, e);
+ GB_ASSERT_MSG(found != nullptr, "could not find '%.*s'", LIT(e->token.string));
+ TB_Node *ptr = tb_inst_get_symbol_address(p->func, found);
+ GB_ASSERT(type != nullptr);
+ GB_ASSERT(are_types_identical(type, e->type));
+ return cg_value(ptr, type);
+ }
+
+ GB_PANIC("TODO(bill): cg_const_value ExactValue_Procedure %s", expr_to_string(expr));
+ }
+ break;
+ }
+
+ switch (value.kind) {
+ case ExactValue_Bool:
+ GB_ASSERT(!TB_IS_VOID_TYPE(dt));
+ return cg_value(tb_inst_uint(p->func, dt, value.value_bool), type);
+
+ case ExactValue_Integer:
+ GB_ASSERT(!TB_IS_VOID_TYPE(dt));
+ // GB_ASSERT(dt.raw != TB_TYPE_I128.raw);
+ if (is_type_unsigned(type)) {
+ u64 i = exact_value_to_u64(value);
+ return cg_value(tb_inst_uint(p->func, dt, i), type);
+ } else {
+ i64 i = exact_value_to_i64(value);
+ return cg_value(tb_inst_sint(p->func, dt, i), type);
+ }
+ break;
+
+ case ExactValue_Float:
+ GB_ASSERT(!TB_IS_VOID_TYPE(dt));
+ GB_ASSERT(dt.raw != TB_TYPE_F16.raw);
+ GB_ASSERT(!is_type_different_to_arch_endianness(type));
+ {
+ f64 f = exact_value_to_f64(value);
+ if (type_size_of(type) == 8) {
+ return cg_value(tb_inst_float64(p->func, f), type);
+ } else {
+ return cg_value(tb_inst_float32(p->func, cast(f32)f), type);
+ }
+ }
+ break;
+
+ case ExactValue_String:
+ {
+ GB_ASSERT(is_type_string(type));
+ cgModule *m = p->module;
+
+ String str = value.value_string;
+
+ char name[32] = {};
+ gb_snprintf(name, 31, "csb$%u", 1+m->const_nil_guid.fetch_add(1));
+ TB_Global *cstr_global = tb_global_create(m->mod, -1, name, nullptr, TB_LINKAGE_PRIVATE);
+
+ i64 size = str.len+1;
+ tb_global_set_storage(m->mod, tb_module_get_rdata(m->mod), cstr_global, size, 1, 1);
+ u8 *data = cast(u8 *)tb_global_add_region(m->mod, cstr_global, 0, size);
+ gb_memcopy(data, str.text, str.len);
+ data[str.len] = 0;
+
+ if (is_type_cstring(type)) {
+ cgValue s = cg_value(cstr_global, type);
+ return cg_flatten_value(p, s);
+ }
+
+ gb_snprintf(name, 31, "str$%u", 1+m->const_nil_guid.fetch_add(1));
+ TB_Global *str_global = tb_global_create(m->mod, -1, name, cg_debug_type(m, type), TB_LINKAGE_PRIVATE);
+ tb_global_set_storage(m->mod, tb_module_get_rdata(m->mod), str_global, type_size_of(type), type_align_of(type), 2);
+
+ tb_global_add_symbol_reloc(m->mod, str_global, 0, cast(TB_Symbol *)cstr_global);
+ void *len_ptr = tb_global_add_region(m->mod, str_global, build_context.int_size, build_context.int_size);
+ cg_write_int_at_ptr(len_ptr, str.len, t_int);
+
+ TB_Node *s = tb_inst_get_symbol_address(p->func, cast(TB_Symbol *)str_global);
+ return cg_lvalue_addr(s, type);
+
+ }
+
+ case ExactValue_Pointer:
+ return cg_value(tb_inst_uint(p->func, dt, exact_value_to_u64(value)), type);
+
+ case ExactValue_Compound:
+ {
+ TB_Symbol *symbol = cast(TB_Symbol *)cg_global_const_comp_literal(p->module, type, value, nullptr, 0);
+ TB_Node *node = tb_inst_get_symbol_address(p->func, symbol);
+ return cg_lvalue_addr(node, type);
+ }
+ break;
+ }
+
+
+ GB_ASSERT(node != nullptr);
+ return cg_value(node, type);
+}
+
+gb_internal cgValue cg_const_int(cgProcedure *p, Type *type, i64 i) {
+ return cg_const_value(p, type, exact_value_i64(i));
+}
+gb_internal cgValue cg_const_bool(cgProcedure *p, Type *type, bool v) {
+ return cg_value(tb_inst_bool(p->func, v), type);
+}
+
+gb_internal cgValue cg_const_string(cgProcedure *p, Type *type, String const &str) {
+ return cg_const_value(p, type, exact_value_string(str));
+}
+
+gb_internal cgValue cg_const_union_tag(cgProcedure *p, Type *u, Type *v) {
+ return cg_const_value(p, union_tag_type(u), exact_value_i64(union_variant_index(u, v)));
+}
+
diff --git a/src/tilde_debug.cpp b/src/tilde_debug.cpp
new file mode 100644
index 000000000..926cf9cd0
--- /dev/null
+++ b/src/tilde_debug.cpp
@@ -0,0 +1,482 @@
+gb_internal TB_DebugType *cg_debug_type_internal(cgModule *m, Type *type);
+gb_internal TB_DebugType *cg_debug_type(cgModule *m, Type *type) {
+ type = reduce_tuple_to_single_type(type);
+
+ mutex_lock(&m->debug_type_mutex);
+ defer (mutex_unlock(&m->debug_type_mutex));
+ TB_DebugType **found = map_get(&m->debug_type_map, type);
+ if (found) {
+ return *found;
+ }
+
+ TB_DebugType *res = cg_debug_type_internal(m, type);
+ map_set(&m->debug_type_map, type, res);
+ return res;
+}
+
+gb_internal TB_DebugType *cg_debug_type_for_proc(cgModule *m, Type *type) {
+ GB_ASSERT(is_type_proc(type));
+ TB_DebugType **func_found = nullptr;
+ TB_DebugType *func_ptr = cg_debug_type(m, type);
+ GB_ASSERT(func_ptr != nullptr);
+
+ mutex_lock(&m->debug_type_mutex);
+ func_found = map_get(&m->proc_debug_type_map, type);
+ mutex_unlock(&m->debug_type_mutex);
+ GB_ASSERT(func_found != nullptr);
+ return *func_found;
+}
+
+
+gb_internal TB_DebugType *cg_debug_type_internal_record(cgModule *m, Type *type, String const &record_name) {
+ Type *bt = base_type(type);
+ switch (bt->kind) {
+ case Type_Struct:
+ {
+ type_set_offsets(bt);
+
+ TB_DebugType *record = nullptr;
+ if (bt->Struct.is_raw_union) {
+ record = tb_debug_create_union(m->mod, record_name.len, cast(char const *)record_name.text);
+ } else {
+ record = tb_debug_create_struct(m->mod, record_name.len, cast(char const *)record_name.text);
+ }
+ if (record_name.len != 0) {
+ map_set(&m->debug_type_map, type, record);
+ }
+
+ TB_DebugType **fields = tb_debug_record_begin(record, bt->Struct.fields.count);
+ for_array(i, bt->Struct.fields) {
+ Entity *e = bt->Struct.fields[i];
+ Type *type = e->type;
+ if (is_type_proc(type)) {
+ type = t_rawptr;
+ }
+ TB_DebugType *field_type = cg_debug_type(m, type);
+ String name = e->token.string;
+ TB_CharUnits offset = cast(TB_CharUnits)bt->Struct.offsets[i];
+ if (name.len == 0) {
+ name = str_lit("_");
+ }
+
+ fields[i] = tb_debug_create_field(m->mod, field_type, name.len, cast(char const *)name.text, offset);
+ }
+ tb_debug_record_end(
+ record,
+ cast(TB_CharUnits)type_size_of(type),
+ cast(TB_CharUnits)type_align_of(type)
+ );
+ return record;
+ }
+ break;
+
+ case Type_Tuple:
+ {
+ GB_ASSERT(record_name.len == 0);
+ type_set_offsets(bt);
+
+ TB_DebugType *record = tb_debug_create_struct(m->mod, 0, "");
+ isize record_count = 0;
+ for (Entity *e : bt->Tuple.variables) {
+ if (e->kind == Entity_Variable) {
+ record_count += 1;
+ }
+ }
+ TB_DebugType **fields = tb_debug_record_begin(record, record_count);
+ for_array(i, bt->Tuple.variables) {
+ Entity *e = bt->Tuple.variables[i];
+ if (e->kind != Entity_Variable) {
+ continue;
+ }
+ Type *type = e->type;
+ if (is_type_proc(type)) {
+ type = t_rawptr;
+ }
+ TB_DebugType *field_type = cg_debug_type(m, type);
+ String name = e->token.string;
+ TB_CharUnits offset = cast(TB_CharUnits)bt->Tuple.offsets[i];
+ if (name.len == 0) {
+ name = str_lit("_");
+ }
+
+ fields[i] = tb_debug_create_field(m->mod, field_type, name.len, cast(char const *)name.text, offset);
+ }
+ tb_debug_record_end(
+ record,
+ cast(TB_CharUnits)type_size_of(type),
+ cast(TB_CharUnits)type_align_of(type)
+ );
+ return record;
+ }
+ break;
+ case Type_Union:
+ {
+ TB_DebugType *record = tb_debug_create_struct(m->mod, record_name.len, cast(char const *)record_name.text);
+ if (record_name.len != 0) {
+ map_set(&m->debug_type_map, type, record);
+ }
+
+ i64 variant_count = bt->Union.variants.count;
+ if (is_type_union_maybe_pointer(bt)) {
+ // NO TAG
+ GB_ASSERT(variant_count == 1);
+ TB_DebugType **fields = tb_debug_record_begin(record, variant_count);
+ TB_DebugType *variant_type = cg_debug_type(m, bt->Union.variants[0]);
+ fields[0] = tb_debug_create_field(m->mod, variant_type, -1, "v0", 0);
+ tb_debug_record_end(
+ record,
+ cast(TB_CharUnits)type_size_of(type),
+ cast(TB_CharUnits)type_align_of(type)
+ );
+ } else {
+ TB_DebugType **fields = tb_debug_record_begin(record, variant_count+1);
+ for_array(i, bt->Union.variants) {
+ Type *v = bt->Union.variants[i];
+ TB_DebugType *variant_type = cg_debug_type(m, v);
+ char name[32] = {};
+ u32 v_index = cast(u32)i;
+ if (bt->Union.kind != UnionType_no_nil) {
+ v_index += 1;
+ }
+ gb_snprintf(name, 31, "v%u", v_index);
+ fields[i] = tb_debug_create_field(m->mod, variant_type, -1, name, 0);
+ }
+
+ TB_DebugType *tag_type = cg_debug_type(m, union_tag_type(bt));
+ fields[variant_count] = tb_debug_create_field(m->mod, tag_type, -1, "tag", cast(TB_CharUnits)bt->Union.variant_block_size);
+
+ }
+ tb_debug_record_end(
+ record,
+ cast(TB_CharUnits)type_size_of(type),
+ cast(TB_CharUnits)type_align_of(type)
+ );
+ return record;
+ }
+ break;
+ }
+ return nullptr;
+}
+
+
+gb_internal TB_DebugType *cg_debug_type_internal(cgModule *m, Type *type) {
+ if (type == nullptr) {
+ return tb_debug_get_void(m->mod);
+ }
+ Type *original_type = type;
+ if (type->kind == Type_Named) {
+ String name = type->Named.name;
+ TB_DebugType *res = cg_debug_type_internal_record(m, type, name);
+ if (res) {
+ return res;
+ }
+ type = base_type(type->Named.base);
+ }
+
+ TB_CharUnits int_size = cast(TB_CharUnits)build_context.int_size;
+ TB_CharUnits ptr_size = cast(TB_CharUnits)build_context.ptr_size;
+ TB_CharUnits size = cast(TB_CharUnits)type_size_of(type);
+ TB_CharUnits align = cast(TB_CharUnits)type_align_of(type);
+ int bits = cast(int)(8*size);
+ bool is_signed = is_type_integer(core_type(type)) && !is_type_unsigned(core_type(type));
+
+ switch (type->kind) {
+ case Type_Basic:
+ switch (type->Basic.kind) {
+ case Basic_bool: return tb_debug_get_bool(m->mod);
+ case Basic_b8: return tb_debug_get_bool(m->mod);
+ case Basic_b16: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_b32: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_b64: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_i8: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_u8: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_i16: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_u16: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_i32: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_u32: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_i64: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_u64: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_i128: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_u128: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_rune: return tb_debug_get_integer(m->mod, is_signed, bits);
+
+ case Basic_f16: return tb_debug_get_integer(m->mod, false, bits);
+ case Basic_f32: return tb_debug_get_float(m->mod, TB_FLT_32);
+ case Basic_f64: return tb_debug_get_float(m->mod, TB_FLT_64);
+
+ case Basic_complex32:
+ case Basic_complex64:
+ case Basic_complex128:
+ {
+ String name = basic_types[type->Basic.kind].Basic.name;
+ TB_DebugType *record = tb_debug_create_struct(m->mod, name.len, cast(char const *)name.text);
+ Type *et = base_complex_elem_type(type);
+ TB_CharUnits elem_size = cast(TB_CharUnits)type_size_of(et);
+ TB_DebugType *elem = cg_debug_type(m, et);
+
+ TB_DebugType **fields = tb_debug_record_begin(record, 2);
+ fields[0] = tb_debug_create_field(m->mod, elem, -1, "real", 0*elem_size);
+ fields[1] = tb_debug_create_field(m->mod, elem, -1, "imag", 1*elem_size);
+
+ tb_debug_record_end(record, size, align);
+ return record;
+ }
+ case Basic_quaternion64:
+ case Basic_quaternion128:
+ case Basic_quaternion256:
+ {
+ String name = basic_types[type->Basic.kind].Basic.name;
+ TB_DebugType *record = tb_debug_create_struct(m->mod, name.len, cast(char const *)name.text);
+ Type *et = base_complex_elem_type(type);
+ TB_CharUnits elem_size = cast(TB_CharUnits)type_size_of(et);
+ TB_DebugType *elem = cg_debug_type(m, et);
+
+ // @QuaternionLayout
+ TB_DebugType **fields = tb_debug_record_begin(record, 4);
+ fields[0] = tb_debug_create_field(m->mod, elem, -1, "imag", 0*elem_size);
+ fields[1] = tb_debug_create_field(m->mod, elem, -1, "jmag", 1*elem_size);
+ fields[2] = tb_debug_create_field(m->mod, elem, -1, "kmag", 2*elem_size);
+ fields[3] = tb_debug_create_field(m->mod, elem, -1, "real", 3*elem_size);
+
+ tb_debug_record_end(record, size, align);
+ return record;
+ }
+
+ case Basic_int: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_uint: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_uintptr: return tb_debug_get_integer(m->mod, is_signed, bits);
+
+ case Basic_rawptr:
+ return tb_debug_create_ptr(m->mod, tb_debug_get_void(m->mod));
+ case Basic_string:
+ {
+ String name = basic_types[type->Basic.kind].Basic.name;
+ TB_DebugType *record = tb_debug_create_struct(m->mod, name.len, cast(char const *)name.text);
+ // @QuaternionLayout
+ TB_DebugType **fields = tb_debug_record_begin(record, 2);
+ fields[0] = tb_debug_create_field(m->mod, cg_debug_type(m, t_u8_ptr), -1, "data", 0*int_size);
+ fields[1] = tb_debug_create_field(m->mod, cg_debug_type(m, t_int), -1, "len", 1*int_size);
+
+ tb_debug_record_end(record, size, align);
+ return record;
+ }
+ case Basic_cstring:
+ return tb_debug_create_ptr(m->mod, tb_debug_get_integer(m->mod, false, 8));
+
+ case Basic_any:
+ {
+ String name = basic_types[type->Basic.kind].Basic.name;
+ TB_DebugType *record = tb_debug_create_struct(m->mod, name.len, cast(char const *)name.text);
+ // @QuaternionLayout
+ TB_DebugType **fields = tb_debug_record_begin(record, 2);
+ fields[0] = tb_debug_create_field(m->mod, cg_debug_type(m, t_rawptr), -1, "data", 0*ptr_size);
+ fields[1] = tb_debug_create_field(m->mod, cg_debug_type(m, t_typeid), -1, "id", 1*ptr_size);
+
+ tb_debug_record_end(record, size, align);
+ return record;
+ }
+ case Basic_typeid: return tb_debug_get_integer(m->mod, false, bits);
+
+ case Basic_i16le: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_u16le: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_i32le: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_u32le: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_i64le: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_u64le: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_i128le: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_u128le: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_i16be: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_u16be: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_i32be: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_u32be: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_i64be: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_u64be: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_i128be: return tb_debug_get_integer(m->mod, is_signed, bits);
+ case Basic_u128be: return tb_debug_get_integer(m->mod, is_signed, bits);
+
+ case Basic_f16le: return tb_debug_get_integer(m->mod, false, bits);
+ case Basic_f32le: return tb_debug_get_float(m->mod, TB_FLT_32);
+ case Basic_f64le: return tb_debug_get_float(m->mod, TB_FLT_64);
+ case Basic_f16be: return tb_debug_get_integer(m->mod, false, bits);
+ case Basic_f32be: return tb_debug_get_float(m->mod, TB_FLT_32);
+ case Basic_f64be: return tb_debug_get_float(m->mod, TB_FLT_64);
+ }
+ break;
+ case Type_Generic:
+ GB_PANIC("SHOULD NEVER HIT");
+ break;
+ case Type_Pointer:
+ return tb_debug_create_ptr(m->mod, cg_debug_type(m, type->Pointer.elem));
+ case Type_MultiPointer:
+ return tb_debug_create_ptr(m->mod, cg_debug_type(m, type->MultiPointer.elem));
+ case Type_Array:
+ return tb_debug_create_array(m->mod, cg_debug_type(m, type->Array.elem), type->Array.count);
+ case Type_EnumeratedArray:
+ return tb_debug_create_array(m->mod, cg_debug_type(m, type->EnumeratedArray.elem), type->EnumeratedArray.count);
+ case Type_Slice:
+ {
+ String name = {};
+ TB_DebugType *record = tb_debug_create_struct(m->mod, name.len, cast(char const *)name.text);
+ TB_DebugType **fields = tb_debug_record_begin(record, 2);
+ fields[0] = tb_debug_create_field(m->mod, cg_debug_type(m, alloc_type_pointer(type->Slice.elem)), -1, "data", 0*int_size);
+ fields[1] = tb_debug_create_field(m->mod, cg_debug_type(m, t_int), -1, "len", 1*int_size);
+
+ tb_debug_record_end(record, size, align);
+ return record;
+ }
+ case Type_DynamicArray:
+ {
+ String name = {};
+ TB_DebugType *record = tb_debug_create_struct(m->mod, name.len, cast(char const *)name.text);
+ TB_DebugType **fields = tb_debug_record_begin(record, 4);
+ fields[0] = tb_debug_create_field(m->mod, cg_debug_type(m, alloc_type_pointer(type->Slice.elem)), -1, "data", 0*int_size);
+ fields[1] = tb_debug_create_field(m->mod, cg_debug_type(m, t_int), -1, "len", 1*int_size);
+ fields[2] = tb_debug_create_field(m->mod, cg_debug_type(m, t_int), -1, "cap", 2*int_size);
+ fields[3] = tb_debug_create_field(m->mod, cg_debug_type(m, t_allocator), -1, "allocator", 3*int_size);
+
+ tb_debug_record_end(record, size, align);
+ return record;
+ }
+ case Type_Map:
+ return cg_debug_type(m, t_raw_map);
+
+ case Type_Struct:
+ case Type_Tuple:
+ case Type_Union:
+ return cg_debug_type_internal_record(m, type, {});
+
+ case Type_Enum:
+ return tb_debug_get_integer(m->mod, is_signed, bits);
+
+ case Type_Proc:
+ {
+ TypeProc *pt = &type->Proc;
+ isize param_count = 0;
+ isize return_count = 0;
+
+ bool is_odin_cc = is_calling_convention_odin(pt->calling_convention);
+
+ if (pt->params) for (Entity *e : pt->params->Tuple.variables) {
+ if (e->kind == Entity_Variable) {
+ param_count += 1;
+ }
+ }
+
+ if (pt->result_count > 0) {
+ if (is_odin_cc) {
+ // Split returns
+ param_count += pt->result_count-1;
+ return_count = 1;
+ } else {
+ return_count = 1;
+ }
+ }
+
+ if (pt->calling_convention == ProcCC_Odin) {
+ // `context` ptr
+ param_count += 1;
+ }
+
+ TB_CallingConv tb_cc = TB_CDECL;
+ if (pt->calling_convention == ProcCC_StdCall) {
+ tb_cc = TB_STDCALL;
+ }
+ TB_DebugType *func = tb_debug_create_func(m->mod, tb_cc, param_count, return_count, pt->c_vararg);
+
+ map_set(&m->proc_debug_type_map, original_type, func);
+ map_set(&m->proc_debug_type_map, type, func);
+
+ TB_DebugType *func_ptr = tb_debug_create_ptr(m->mod, func);
+ map_set(&m->debug_type_map, original_type, func_ptr);
+ map_set(&m->debug_type_map, type, func_ptr);
+
+ TB_DebugType **params = tb_debug_func_params(func);
+ TB_DebugType **returns = tb_debug_func_returns(func);
+
+ isize param_index = 0;
+ isize return_index = 0;
+ if (pt->params) for (Entity *e : pt->params->Tuple.variables) {
+ if (e->kind == Entity_Variable) {
+ Type *type = e->type;
+ if (is_type_proc(type)) {
+ type = t_rawptr;
+ }
+ String name = e->token.string;
+ if (name.len == 0) {
+ name = str_lit("_");
+ }
+ params[param_index++] = tb_debug_create_field(m->mod, cg_debug_type(m, type), name.len, cast(char const *)name.text, 0);
+ }
+ }
+
+ if (pt->result_count) {
+ GB_ASSERT(pt->results);
+ if (is_odin_cc) {
+ // Split Returns
+ for (isize i = 0; i < pt->results->Tuple.variables.count-1; i++) {
+ Entity *e = pt->results->Tuple.variables[i];
+ GB_ASSERT(e->kind == Entity_Variable);
+ Type *type = e->type;
+ if (is_type_proc(e->type)) {
+ type = t_rawptr;
+ }
+ type = alloc_type_pointer(type);
+
+ String name = e->token.string;
+ if (name.len == 0) {
+ name = str_lit("_");
+ }
+ params[param_index++] = tb_debug_create_field(m->mod, cg_debug_type(m, type), name.len, cast(char const *)name.text, 0);
+ }
+
+ Type *last_type = pt->results->Tuple.variables[pt->results->Tuple.variables.count-1]->type;
+ if (is_type_proc(last_type)) {
+ last_type = t_rawptr;
+ }
+ returns[return_index++] = cg_debug_type(m, last_type);
+ } else {
+ returns[return_index++] = cg_debug_type(m, pt->results);
+ }
+ }
+
+ if (pt->calling_convention == ProcCC_Odin) {
+ Type *type = t_context_ptr;
+ String name = str_lit("__.context_ptr");
+ params[param_index++] = tb_debug_create_field(m->mod, cg_debug_type(m, type), name.len, cast(char const *)name.text, 0);
+ }
+
+ GB_ASSERT_MSG(param_index == param_count, "%td vs %td for %s", param_index, param_count, type_to_string(type));
+ GB_ASSERT_MSG(return_index == return_count, "%td vs %td for %s", return_index, return_count, type_to_string(type));
+
+ return func_ptr;
+ }
+ break;
+ case Type_BitSet:
+ return cg_debug_type(m, bit_set_to_int(type));
+ case Type_SimdVector:
+ return tb_debug_create_array(m->mod, cg_debug_type(m, type->SimdVector.elem), type->SimdVector.count);
+ case Type_RelativePointer:
+ return cg_debug_type(m, type->RelativePointer.base_integer);
+ case Type_RelativeMultiPointer:
+ return cg_debug_type(m, type->RelativeMultiPointer.base_integer);
+ case Type_Matrix:
+ {
+ i64 count = matrix_type_total_internal_elems(type);
+ return tb_debug_create_array(m->mod, cg_debug_type(m, type->Matrix.elem), count);
+ }
+ case Type_SoaPointer:
+ {
+ String name = {};
+ TB_DebugType *record = tb_debug_create_struct(m->mod, name.len, cast(char const *)name.text);
+ TB_DebugType **fields = tb_debug_record_begin(record, 2);
+ fields[0] = tb_debug_create_field(m->mod, cg_debug_type(m, alloc_type_pointer(type->SoaPointer.elem)), -1, "ptr", 0*int_size);
+ fields[1] = tb_debug_create_field(m->mod, cg_debug_type(m, t_int), -1, "offset", 1*int_size);
+
+ tb_debug_record_end(record, size, align);
+ return record;
+ }
+ }
+
+ // TODO(bill): cg_debug_type
+ return tb_debug_get_void(m->mod);
+}
diff --git a/src/tilde_expr.cpp b/src/tilde_expr.cpp
new file mode 100644
index 000000000..6ff912dd9
--- /dev/null
+++ b/src/tilde_expr.cpp
@@ -0,0 +1,3871 @@
+gb_internal cgValue cg_flatten_value(cgProcedure *p, cgValue value) {
+ GB_ASSERT(value.kind != cgValue_Multi);
+ if (value.kind == cgValue_Symbol) {
+ GB_ASSERT(is_type_internally_pointer_like(value.type));
+ return cg_value(tb_inst_get_symbol_address(p->func, value.symbol), value.type);
+ } else if (value.kind == cgValue_Addr) {
+ // TODO(bill): Is this a good idea?
+ // this converts an lvalue to an rvalue if trivially possible
+ TB_DataType dt = cg_data_type(value.type);
+ if (!TB_IS_VOID_TYPE(dt)) {
+ TB_CharUnits align = cast(TB_CharUnits)type_align_of(value.type);
+ return cg_value(tb_inst_load(p->func, dt, value.node, align, false), value.type);
+ }
+ }
+ return value;
+}
+
+gb_internal cgValue cg_emit_select(cgProcedure *p, cgValue const &cond, cgValue const &x, cgValue const &y) {
+ GB_ASSERT(x.kind == y.kind);
+ GB_ASSERT(cond.kind == cgValue_Value);
+ cgValue res = x;
+ res.node = tb_inst_select(p->func, cond.node, x.node, y.node);
+ return res;
+}
+
+
+gb_internal bool cg_is_expr_untyped_const(Ast *expr) {
+ auto const &tv = type_and_value_of_expr(expr);
+ if (is_type_untyped(tv.type)) {
+ return tv.value.kind != ExactValue_Invalid;
+ }
+ return false;
+}
+gb_internal cgValue cg_expr_untyped_const_to_typed(cgProcedure *p, Ast *expr, Type *t) {
+ GB_ASSERT(is_type_typed(t));
+ auto const &tv = type_and_value_of_expr(expr);
+ return cg_const_value(p, t, tv.value);
+}
+
+gb_internal cgContextData *cg_push_context_onto_stack(cgProcedure *p, cgAddr ctx) {
+ ctx.kind = cgAddr_Context;
+ cgContextData *cd = array_add_and_get(&p->context_stack);
+ cd->ctx = ctx;
+ cd->scope_index = p->scope_index;
+ return cd;
+}
+
+gb_internal cgAddr cg_find_or_generate_context_ptr(cgProcedure *p) {
+ if (p->context_stack.count > 0) {
+ return p->context_stack[p->context_stack.count-1].ctx;
+ }
+
+ Type *pt = base_type(p->type);
+ GB_ASSERT(pt->kind == Type_Proc);
+ GB_ASSERT(pt->Proc.calling_convention != ProcCC_Odin);
+
+ cgAddr c = cg_add_local(p, t_context, nullptr, true);
+ tb_node_append_attrib(c.addr.node, tb_function_attrib_variable(p->func, -1, "context", cg_debug_type(p->module, t_context)));
+ c.kind = cgAddr_Context;
+ // lb_emit_init_context(p, c);
+ cg_push_context_onto_stack(p, c);
+ // lb_add_debug_context_variable(p, c);
+
+ return c;
+}
+
+gb_internal cgValue cg_find_value_from_entity(cgModule *m, Entity *e) {
+ e = strip_entity_wrapping(e);
+ GB_ASSERT(e != nullptr);
+
+ GB_ASSERT(e->token.string != "_");
+
+ if (e->kind == Entity_Procedure) {
+ return cg_find_procedure_value_from_entity(m, e);
+ }
+
+ cgValue *found = nullptr;
+ rw_mutex_shared_lock(&m->values_mutex);
+ found = map_get(&m->values, e);
+ rw_mutex_shared_unlock(&m->values_mutex);
+ if (found) {
+ return *found;
+ }
+
+ GB_PANIC("\n\tError in: %s, missing value '%.*s'\n", token_pos_to_string(e->token.pos), LIT(e->token.string));
+ return {};
+}
+
+gb_internal cgValue cg_get_using_variable(cgProcedure *p, Entity *e) {
+ GB_ASSERT(e->kind == Entity_Variable && e->flags & EntityFlag_Using);
+ String name = e->token.string;
+ Entity *parent = e->using_parent;
+ Selection sel = lookup_field(parent->type, name, false);
+ GB_ASSERT(sel.entity != nullptr);
+ cgValue *pv = map_get(&p->module->values, parent);
+
+ cgValue v = {};
+
+ if (pv == nullptr && parent->flags & EntityFlag_SoaPtrField) {
+ // NOTE(bill): using SOA value (probably from for-in statement)
+ GB_PANIC("TODO(bill): cg_get_soa_variable_addr");
+ // cgAddr parent_addr = cg_get_soa_variable_addr(p, parent);
+ // v = cg_addr_get_ptr(p, parent_addr);
+ } else if (pv != nullptr) {
+ v = *pv;
+ } else {
+ GB_ASSERT_MSG(e->using_expr != nullptr, "%.*s %.*s", LIT(e->token.string), LIT(name));
+ v = cg_build_addr_ptr(p, e->using_expr);
+ }
+ GB_ASSERT(v.node != nullptr);
+ GB_ASSERT_MSG(parent->type == type_deref(v.type), "%s %s", type_to_string(parent->type), type_to_string(v.type));
+ cgValue ptr = cg_emit_deep_field_gep(p, v, sel);
+ // if (parent->scope) {
+ // if ((parent->scope->flags & (ScopeFlag_File|ScopeFlag_Pkg)) == 0) {
+ // cg_add_debug_local_variable(p, ptr.value, e->type, e->token);
+ // }
+ // } else {
+ // cg_add_debug_local_variable(p, ptr.value, e->type, e->token);
+ // }
+ return ptr;
+}
+gb_internal cgAddr cg_build_addr_from_entity(cgProcedure *p, Entity *e, Ast *expr) {
+ GB_ASSERT(e != nullptr);
+ if (e->kind == Entity_Constant) {
+ Type *t = default_type(type_of_expr(expr));
+ cgValue v = cg_const_value(p, t, e->Constant.value);
+ GB_PANIC("TODO(bill): cg_add_global_generated");
+ // return cg_add_global_generated(p->module, t, v);
+ return {};
+ }
+
+ cgAddr *local_found = map_get(&p->variable_map, e);
+ if (local_found) {
+ return *local_found;
+ }
+
+ cgValue v = {};
+
+ cgModule *m = p->module;
+
+ rw_mutex_lock(&m->values_mutex);
+ cgValue *found = map_get(&m->values, e);
+ rw_mutex_unlock(&m->values_mutex);
+ if (found) {
+ v = *found;
+ } else if (e->kind == Entity_Variable && e->flags & EntityFlag_Using) {
+ // NOTE(bill): Calculate the using variable every time
+ v = cg_get_using_variable(p, e);
+ } else if (e->flags & EntityFlag_SoaPtrField) {
+ GB_PANIC("TODO(bill): cg_get_soa_variable_addr");
+ // return cg_get_soa_variable_addr(p, e);
+ }
+
+
+ if (v.node == nullptr) {
+ cgValue v = cg_find_value_from_entity(m, e);
+ v = cg_flatten_value(p, v);
+ return cg_addr(v);
+ }
+
+ return cg_addr(v);
+}
+
+gb_internal cgValue cg_emit_union_tag_ptr(cgProcedure *p, cgValue const &parent_ptr) {
+ Type *t = parent_ptr.type;
+ Type *ut = base_type(type_deref(t));
+ GB_ASSERT_MSG(is_type_pointer(t), "%s", type_to_string(t));
+ GB_ASSERT_MSG(ut->kind == Type_Union, "%s", type_to_string(t));
+
+ GB_ASSERT(!is_type_union_maybe_pointer_original_alignment(ut));
+ GB_ASSERT(!is_type_union_maybe_pointer(ut));
+ GB_ASSERT(type_size_of(ut) > 0);
+
+ Type *tag_type = union_tag_type(ut);
+ i64 tag_offset = ut->Union.variant_block_size;
+
+ GB_ASSERT(parent_ptr.kind == cgValue_Value);
+ TB_Node *ptr = parent_ptr.node;
+ TB_Node *tag_ptr = tb_inst_member_access(p->func, ptr, tag_offset);
+ return cg_value(tag_ptr, alloc_type_pointer(tag_type));
+}
+
+
+
+gb_internal cgValue cg_correct_endianness(cgProcedure *p, cgValue value) {
+ Type *src = core_type(value.type);
+ GB_ASSERT(is_type_integer(src) || is_type_float(src));
+ if (is_type_different_to_arch_endianness(src)) {
+ GB_PANIC("TODO(bill): cg_correct_endianness");
+ // Type *platform_src_type = integer_endian_type_to_platform_type(src);
+ // value = cg_emit_byte_swap(p, value, platform_src_type);
+ }
+ return value;
+}
+
+gb_internal cgValue cg_emit_transmute(cgProcedure *p, cgValue value, Type *type) {
+ GB_ASSERT(type_size_of(value.type) == type_size_of(type));
+
+ value = cg_flatten_value(p, value);
+
+ if (are_types_identical(value.type, type)) {
+ return value;
+ }
+ if (are_types_identical(core_type(value.type), core_type(type))) {
+ value.type = type;
+ return value;
+ }
+
+ i64 src_align = type_align_of(value.type);
+ i64 dst_align = type_align_of(type);
+
+ if (dst_align > src_align) {
+ cgAddr local = cg_add_local(p, type, nullptr, false);
+ cgValue dst = local.addr;
+ dst.type = alloc_type_pointer(value.type);
+ cg_emit_store(p, dst, value);
+ return cg_addr_load(p, local);
+ }
+
+ TB_DataType dt = cg_data_type(type);
+ switch (value.kind) {
+ case cgValue_Value:
+ GB_ASSERT_MSG(!TB_IS_VOID_TYPE(dt), "%d %s -> %s", dt.type, type_to_string(value.type), type_to_string(type));
+ value.type = type;
+ if (value.node->dt.raw != dt.raw) {
+ switch (value.node->dt.type) {
+ case TB_INT:
+ switch (value.node->dt.type) {
+ case TB_INT:
+ break;
+ case TB_FLOAT:
+ value.node = tb_inst_bitcast(p->func, value.node, dt);
+ break;
+ case TB_PTR:
+ value.node = tb_inst_int2ptr(p->func, value.node);
+ break;
+ }
+ break;
+ case TB_FLOAT:
+ switch (value.node->dt.type) {
+ case TB_INT:
+ value.node = tb_inst_bitcast(p->func, value.node, dt);
+ break;
+ case TB_FLOAT:
+ break;
+ case TB_PTR:
+ value.node = tb_inst_bitcast(p->func, value.node, TB_TYPE_INTPTR);
+ value.node = tb_inst_int2ptr(p->func, value.node);
+ break;
+ }
+ break;
+ case TB_PTR:
+ switch (value.node->dt.type) {
+ case TB_INT:
+ value.node = tb_inst_ptr2int(p->func, value.node, dt);
+ break;
+ case TB_FLOAT:
+ value.node = tb_inst_ptr2int(p->func, value.node, TB_TYPE_INTPTR);
+ value.node = tb_inst_bitcast(p->func, value.node, dt);
+ break;
+ case TB_PTR:
+ break;
+ }
+ break;
+ }
+ }
+ return value;
+ case cgValue_Addr:
+ value.type = type;
+ return value;
+ case cgValue_Symbol:
+ GB_PANIC("should be handled above");
+ break;
+ case cgValue_Multi:
+ GB_PANIC("cannot transmute multiple values at once");
+ break;
+ }
+ return value;
+
+}
+gb_internal cgValue cg_emit_byte_swap(cgProcedure *p, cgValue value, Type *end_type) {
+ GB_ASSERT(type_size_of(value.type) == type_size_of(end_type));
+
+ if (type_size_of(value.type) < 2) {
+ return value;
+ }
+
+ if (is_type_float(value.type)) {
+ i64 sz = type_size_of(value.type);
+ Type *integer_type = nullptr;
+ switch (sz) {
+ case 2: integer_type = t_u16; break;
+ case 4: integer_type = t_u32; break;
+ case 8: integer_type = t_u64; break;
+ }
+ GB_ASSERT(integer_type != nullptr);
+ value = cg_emit_transmute(p, value, integer_type);
+ }
+
+ GB_ASSERT(value.kind == cgValue_Value);
+
+ // TODO(bill): bswap
+ // value.node = tb_inst_bswap(p->func, value.node);
+ return cg_emit_transmute(p, value, end_type);
+}
+
+gb_internal cgValue cg_emit_comp_records(cgProcedure *p, TokenKind op_kind, cgValue left, cgValue right, Type *type) {
+ GB_ASSERT((is_type_struct(type) || is_type_union(type)) && is_type_comparable(type));
+ cgValue left_ptr = cg_address_from_load_or_generate_local(p, left);
+ cgValue right_ptr = cg_address_from_load_or_generate_local(p, right);
+ cgValue res = {};
+ if (type_size_of(type) == 0) {
+ switch (op_kind) {
+ case Token_CmpEq:
+ return cg_const_bool(p, t_bool, true);
+ case Token_NotEq:
+ return cg_const_bool(p, t_bool, false);
+ }
+ GB_PANIC("invalid operator");
+ }
+ TEMPORARY_ALLOCATOR_GUARD();
+ if (is_type_simple_compare(type)) {
+ // TODO(bill): Test to see if this is actually faster!!!!
+ auto args = slice_make<cgValue>(temporary_allocator(), 3);
+ args[0] = cg_emit_conv(p, left_ptr, t_rawptr);
+ args[1] = cg_emit_conv(p, right_ptr, t_rawptr);
+ args[2] = cg_const_int(p, t_int, type_size_of(type));
+ res = cg_emit_runtime_call(p, "memory_equal", args);
+ } else {
+ cgProcedure *equal_proc = cg_equal_proc_for_type(p->module, type);
+ cgValue value = cg_value(tb_inst_get_symbol_address(p->func, equal_proc->symbol), equal_proc->type);
+ auto args = slice_make<cgValue>(temporary_allocator(), 2);
+ args[0] = cg_emit_conv(p, left_ptr, t_rawptr);
+ args[1] = cg_emit_conv(p, right_ptr, t_rawptr);
+ res = cg_emit_call(p, value, args);
+ }
+ if (op_kind == Token_NotEq) {
+ res = cg_emit_unary_arith(p, Token_Not, res, res.type);
+ }
+ return res;
+}
+
+gb_internal cgValue cg_emit_comp(cgProcedure *p, TokenKind op_kind, cgValue left, cgValue right) {
+ GB_ASSERT(gb_is_between(op_kind, Token__ComparisonBegin+1, Token__ComparisonEnd-1));
+
+ Type *a = core_type(left.type);
+ Type *b = core_type(right.type);
+
+ cgValue nil_check = {};
+ if (is_type_array_like(left.type) || is_type_array_like(right.type)) {
+ // don't do `nil` check if it is array-like
+ } else if (is_type_untyped_nil(left.type)) {
+ nil_check = cg_emit_comp_against_nil(p, op_kind, right);
+ } else if (is_type_untyped_nil(right.type)) {
+ nil_check = cg_emit_comp_against_nil(p, op_kind, left);
+ }
+ if (nil_check.node != nullptr) {
+ return nil_check;
+ }
+
+ if (are_types_identical(a, b)) {
+ // NOTE(bill): No need for a conversion
+ } /*else if (cg_is_const(left) || cg_is_const_nil(left)) {
+ left = cg_emit_conv(p, left, right.type);
+ } else if (cg_is_const(right) || cg_is_const_nil(right)) {
+ right = cg_emit_conv(p, right, left.type);
+ }*/ else {
+ Type *lt = left.type;
+ Type *rt = right.type;
+
+ lt = left.type;
+ rt = right.type;
+ i64 ls = type_size_of(lt);
+ i64 rs = type_size_of(rt);
+
+ // NOTE(bill): Quick heuristic, larger types are usually the target type
+ if (ls < rs) {
+ left = cg_emit_conv(p, left, rt);
+ } else if (ls > rs) {
+ right = cg_emit_conv(p, right, lt);
+ } else {
+ if (is_type_union(rt)) {
+ left = cg_emit_conv(p, left, rt);
+ } else {
+ right = cg_emit_conv(p, right, lt);
+ }
+ }
+ }
+
+ a = core_type(left.type);
+ b = core_type(right.type);
+ left = cg_flatten_value(p, left);
+ right = cg_flatten_value(p, right);
+
+
+ if (is_type_matrix(a) && (op_kind == Token_CmpEq || op_kind == Token_NotEq)) {
+ GB_PANIC("TODO(bill): cg_emit_comp matrix");
+ // Type *tl = base_type(a);
+ // lbValue lhs = lb_address_from_load_or_generate_local(p, left);
+ // lbValue rhs = lb_address_from_load_or_generate_local(p, right);
+
+
+ // // TODO(bill): Test to see if this is actually faster!!!!
+ // auto args = array_make<lbValue>(permanent_allocator(), 3);
+ // args[0] = lb_emit_conv(p, lhs, t_rawptr);
+ // args[1] = lb_emit_conv(p, rhs, t_rawptr);
+ // args[2] = lb_const_int(p->module, t_int, type_size_of(tl));
+ // lbValue val = lb_emit_runtime_call(p, "memory_compare", args);
+ // lbValue res = lb_emit_comp(p, op_kind, val, lb_const_nil(p->module, val.type));
+ // return lb_emit_conv(p, res, t_bool);
+ }
+ if (is_type_array_like(a)) {
+ GB_PANIC("TODO(bill): cg_emit_comp is_type_array_like");
+ // Type *tl = base_type(a);
+ // lbValue lhs = lb_address_from_load_or_generate_local(p, left);
+ // lbValue rhs = lb_address_from_load_or_generate_local(p, right);
+
+
+ // TokenKind cmp_op = Token_And;
+ // lbValue res = lb_const_bool(p->module, t_bool, true);
+ // if (op_kind == Token_NotEq) {
+ // res = lb_const_bool(p->module, t_bool, false);
+ // cmp_op = Token_Or;
+ // } else if (op_kind == Token_CmpEq) {
+ // res = lb_const_bool(p->module, t_bool, true);
+ // cmp_op = Token_And;
+ // }
+
+ // bool inline_array_arith = lb_can_try_to_inline_array_arith(tl);
+ // i32 count = 0;
+ // switch (tl->kind) {
+ // case Type_Array: count = cast(i32)tl->Array.count; break;
+ // case Type_EnumeratedArray: count = cast(i32)tl->EnumeratedArray.count; break;
+ // }
+
+ // if (inline_array_arith) {
+ // // inline
+ // lbAddr val = lb_add_local_generated(p, t_bool, false);
+ // lb_addr_store(p, val, res);
+ // for (i32 i = 0; i < count; i++) {
+ // lbValue x = lb_emit_load(p, lb_emit_array_epi(p, lhs, i));
+ // lbValue y = lb_emit_load(p, lb_emit_array_epi(p, rhs, i));
+ // lbValue cmp = lb_emit_comp(p, op_kind, x, y);
+ // lbValue new_res = lb_emit_arith(p, cmp_op, lb_addr_load(p, val), cmp, t_bool);
+ // lb_addr_store(p, val, lb_emit_conv(p, new_res, t_bool));
+ // }
+
+ // return lb_addr_load(p, val);
+ // } else {
+ // if (is_type_simple_compare(tl) && (op_kind == Token_CmpEq || op_kind == Token_NotEq)) {
+ // // TODO(bill): Test to see if this is actually faster!!!!
+ // auto args = array_make<lbValue>(permanent_allocator(), 3);
+ // args[0] = lb_emit_conv(p, lhs, t_rawptr);
+ // args[1] = lb_emit_conv(p, rhs, t_rawptr);
+ // args[2] = lb_const_int(p->module, t_int, type_size_of(tl));
+ // lbValue val = lb_emit_runtime_call(p, "memory_compare", args);
+ // lbValue res = lb_emit_comp(p, op_kind, val, lb_const_nil(p->module, val.type));
+ // return lb_emit_conv(p, res, t_bool);
+ // } else {
+ // lbAddr val = lb_add_local_generated(p, t_bool, false);
+ // lb_addr_store(p, val, res);
+ // auto loop_data = lb_loop_start(p, count, t_i32);
+ // {
+ // lbValue i = loop_data.idx;
+ // lbValue x = lb_emit_load(p, lb_emit_array_ep(p, lhs, i));
+ // lbValue y = lb_emit_load(p, lb_emit_array_ep(p, rhs, i));
+ // lbValue cmp = lb_emit_comp(p, op_kind, x, y);
+ // lbValue new_res = lb_emit_arith(p, cmp_op, lb_addr_load(p, val), cmp, t_bool);
+ // lb_addr_store(p, val, lb_emit_conv(p, new_res, t_bool));
+ // }
+ // lb_loop_end(p, loop_data);
+
+ // return lb_addr_load(p, val);
+ // }
+ // }
+ }
+
+ if ((is_type_struct(a) || is_type_union(a)) && is_type_comparable(a)) {
+ return cg_emit_comp_records(p, op_kind, left, right, a);
+ }
+
+ if ((is_type_struct(b) || is_type_union(b)) && is_type_comparable(b)) {
+ return cg_emit_comp_records(p, op_kind, left, right, b);
+ }
+
+ if (is_type_string(a)) {
+ if (is_type_cstring(a)) {
+ left = cg_emit_conv(p, left, t_string);
+ right = cg_emit_conv(p, right, t_string);
+ }
+
+ char const *runtime_procedure = nullptr;
+ switch (op_kind) {
+ case Token_CmpEq: runtime_procedure = "string_eq"; break;
+ case Token_NotEq: runtime_procedure = "string_ne"; break;
+ case Token_Lt: runtime_procedure = "string_lt"; break;
+ case Token_Gt: runtime_procedure = "string_gt"; break;
+ case Token_LtEq: runtime_procedure = "string_le"; break;
+ case Token_GtEq: runtime_procedure = "string_gt"; break;
+ }
+ GB_ASSERT(runtime_procedure != nullptr);
+
+ auto args = slice_make<cgValue>(permanent_allocator(), 2);
+ args[0] = left;
+ args[1] = right;
+ return cg_emit_runtime_call(p, runtime_procedure, args);
+ }
+
+ if (is_type_complex(a)) {
+ char const *runtime_procedure = "";
+ i64 sz = 8*type_size_of(a);
+ switch (sz) {
+ case 32:
+ switch (op_kind) {
+ case Token_CmpEq: runtime_procedure = "complex32_eq"; break;
+ case Token_NotEq: runtime_procedure = "complex32_ne"; break;
+ }
+ break;
+ case 64:
+ switch (op_kind) {
+ case Token_CmpEq: runtime_procedure = "complex64_eq"; break;
+ case Token_NotEq: runtime_procedure = "complex64_ne"; break;
+ }
+ break;
+ case 128:
+ switch (op_kind) {
+ case Token_CmpEq: runtime_procedure = "complex128_eq"; break;
+ case Token_NotEq: runtime_procedure = "complex128_ne"; break;
+ }
+ break;
+ }
+ GB_ASSERT(runtime_procedure != nullptr);
+
+ GB_PANIC("TODO(bill): cg_emit_runtime_call");
+ // auto args = array_make<lbValue>(permanent_allocator(), 2);
+ // args[0] = left;
+ // args[1] = right;
+ // return lb_emit_runtime_call(p, runtime_procedure, args);
+ }
+
+ if (is_type_quaternion(a)) {
+ char const *runtime_procedure = "";
+ i64 sz = 8*type_size_of(a);
+ switch (sz) {
+ case 64:
+ switch (op_kind) {
+ case Token_CmpEq: runtime_procedure = "quaternion64_eq"; break;
+ case Token_NotEq: runtime_procedure = "quaternion64_ne"; break;
+ }
+ break;
+ case 128:
+ switch (op_kind) {
+ case Token_CmpEq: runtime_procedure = "quaternion128_eq"; break;
+ case Token_NotEq: runtime_procedure = "quaternion128_ne"; break;
+ }
+ break;
+ case 256:
+ switch (op_kind) {
+ case Token_CmpEq: runtime_procedure = "quaternion256_eq"; break;
+ case Token_NotEq: runtime_procedure = "quaternion256_ne"; break;
+ }
+ break;
+ }
+ GB_ASSERT(runtime_procedure != nullptr);
+
+ GB_PANIC("TODO(bill): cg_emit_runtime_call");
+ // auto args = array_make<lbValue>(permanent_allocator(), 2);
+ // args[0] = left;
+ // args[1] = right;
+ // return lb_emit_runtime_call(p, runtime_procedure, args);
+ }
+
+ if (is_type_bit_set(a)) {
+ switch (op_kind) {
+ case Token_Lt:
+ case Token_LtEq:
+ case Token_Gt:
+ case Token_GtEq:
+ {
+ Type *it = bit_set_to_int(a);
+ cgValue lhs = cg_emit_transmute(p, left, it);
+ cgValue rhs = cg_emit_transmute(p, right, it);
+ cgValue res = cg_emit_arith(p, Token_And, lhs, rhs, it);
+ GB_ASSERT(lhs.kind == cgValue_Value);
+ GB_ASSERT(rhs.kind == cgValue_Value);
+ GB_ASSERT(res.kind == cgValue_Value);
+
+ if (op_kind == Token_Lt || op_kind == Token_LtEq) {
+ // (lhs & rhs) == lhs
+ res = cg_value(tb_inst_cmp_eq(p->func, res.node, lhs.node), t_bool);
+ } else if (op_kind == Token_Gt || op_kind == Token_GtEq) {
+ // (lhs & rhs) == rhs
+ res = cg_value(tb_inst_cmp_eq(p->func, res.node, rhs.node), t_bool);
+ }
+
+ // NOTE(bill): Strict subsets
+ if (op_kind == Token_Lt || op_kind == Token_Gt) {
+ // res &~ (lhs == rhs)
+ cgValue eq = cg_value(tb_inst_cmp_eq(p->func, lhs.node, rhs.node), t_bool);
+ res = cg_emit_arith(p, Token_AndNot, res, eq, t_bool);
+ }
+ return res;
+ }
+
+ case Token_CmpEq:
+ GB_ASSERT(left.kind == cgValue_Value);
+ GB_ASSERT(right.kind == cgValue_Value);
+ return cg_value(tb_inst_cmp_eq(p->func, left.node, right.node), t_bool);
+ case Token_NotEq:
+ GB_ASSERT(left.kind == cgValue_Value);
+ GB_ASSERT(right.kind == cgValue_Value);
+ return cg_value(tb_inst_cmp_ne(p->func, left.node, right.node), t_bool);
+ }
+ }
+
+ if (op_kind != Token_CmpEq && op_kind != Token_NotEq) {
+ Type *t = left.type;
+ if (is_type_integer(t) && is_type_different_to_arch_endianness(t)) {
+ Type *platform_type = integer_endian_type_to_platform_type(t);
+ cgValue x = cg_emit_byte_swap(p, left, platform_type);
+ cgValue y = cg_emit_byte_swap(p, right, platform_type);
+ left = x;
+ right = y;
+ } else if (is_type_float(t) && is_type_different_to_arch_endianness(t)) {
+ Type *platform_type = integer_endian_type_to_platform_type(t);
+ cgValue x = cg_emit_conv(p, left, platform_type);
+ cgValue y = cg_emit_conv(p, right, platform_type);
+ left = x;
+ right = y;
+ }
+ }
+
+ a = core_type(left.type);
+ b = core_type(right.type);
+
+
+ if (is_type_integer(a) ||
+ is_type_boolean(a) ||
+ is_type_pointer(a) ||
+ is_type_multi_pointer(a) ||
+ is_type_proc(a) ||
+ is_type_enum(a) ||
+ is_type_typeid(a)) {
+ TB_Node *lhs = left.node;
+ TB_Node *rhs = right.node;
+ TB_Node *res = nullptr;
+
+ bool is_signed = is_type_integer(left.type) && !is_type_unsigned(left.type);
+ switch (op_kind) {
+ case Token_CmpEq: res = tb_inst_cmp_eq(p->func, lhs, rhs); break;
+ case Token_NotEq: res = tb_inst_cmp_ne(p->func, lhs, rhs); break;
+ case Token_Gt: res = tb_inst_cmp_igt(p->func, lhs, rhs, is_signed); break;
+ case Token_GtEq: res = tb_inst_cmp_ige(p->func, lhs, rhs, is_signed); break;
+ case Token_Lt: res = tb_inst_cmp_ilt(p->func, lhs, rhs, is_signed); break;
+ case Token_LtEq: res = tb_inst_cmp_ile(p->func, lhs, rhs, is_signed); break;
+ }
+
+ GB_ASSERT(res != nullptr);
+ return cg_value(res, t_bool);
+ } else if (is_type_float(a)) {
+ TB_Node *lhs = left.node;
+ TB_Node *rhs = right.node;
+ TB_Node *res = nullptr;
+ switch (op_kind) {
+ case Token_CmpEq: res = tb_inst_cmp_eq(p->func, lhs, rhs); break;
+ case Token_NotEq: res = tb_inst_cmp_ne(p->func, lhs, rhs); break;
+ case Token_Gt: res = tb_inst_cmp_fgt(p->func, lhs, rhs); break;
+ case Token_GtEq: res = tb_inst_cmp_fge(p->func, lhs, rhs); break;
+ case Token_Lt: res = tb_inst_cmp_flt(p->func, lhs, rhs); break;
+ case Token_LtEq: res = tb_inst_cmp_fle(p->func, lhs, rhs); break;
+ }
+ GB_ASSERT(res != nullptr);
+ return cg_value(res, t_bool);
+ } else if (is_type_simd_vector(a)) {
+ GB_PANIC("TODO(bill): #simd vector");
+ // LLVMValueRef mask = nullptr;
+ // Type *elem = base_array_type(a);
+ // if (is_type_float(elem)) {
+ // LLVMRealPredicate pred = {};
+ // switch (op_kind) {
+ // case Token_CmpEq: pred = LLVMRealOEQ; break;
+ // case Token_NotEq: pred = LLVMRealONE; break;
+ // }
+ // mask = LLVMBuildFCmp(p->builder, pred, left.value, right.value, "");
+ // } else {
+ // LLVMIntPredicate pred = {};
+ // switch (op_kind) {
+ // case Token_CmpEq: pred = LLVMIntEQ; break;
+ // case Token_NotEq: pred = LLVMIntNE; break;
+ // }
+ // mask = LLVMBuildICmp(p->builder, pred, left.value, right.value, "");
+ // }
+ // GB_ASSERT_MSG(mask != nullptr, "Unhandled comparison kind %s (%s) %.*s %s (%s)", type_to_string(left.type), type_to_string(base_type(left.type)), LIT(token_strings[op_kind]), type_to_string(right.type), type_to_string(base_type(right.type)));
+
+ // /* NOTE(bill, 2022-05-28):
+ // Thanks to Per Vognsen, sign extending <N x i1> to
+ // a vector of the same width as the input vector, bit casting to an integer,
+ // and then comparing against zero is the better option
+ // See: https://lists.llvm.org/pipermail/llvm-dev/2012-September/053046.html
+
+ // // Example assuming 128-bit vector
+
+ // %1 = <4 x float> ...
+ // %2 = <4 x float> ...
+ // %3 = fcmp oeq <4 x float> %1, %2
+ // %4 = sext <4 x i1> %3 to <4 x i32>
+ // %5 = bitcast <4 x i32> %4 to i128
+ // %6 = icmp ne i128 %5, 0
+ // br i1 %6, label %true1, label %false2
+
+ // This will result in 1 cmpps + 1 ptest + 1 br
+ // (even without SSE4.1, contrary to what the mail list states, because of pmovmskb)
+
+ // */
+
+ // unsigned count = cast(unsigned)get_array_type_count(a);
+ // unsigned elem_sz = cast(unsigned)(type_size_of(elem)*8);
+ // LLVMTypeRef mask_type = LLVMVectorType(LLVMIntTypeInContext(p->module->ctx, elem_sz), count);
+ // mask = LLVMBuildSExtOrBitCast(p->builder, mask, mask_type, "");
+
+ // LLVMTypeRef mask_int_type = LLVMIntTypeInContext(p->module->ctx, cast(unsigned)(8*type_size_of(a)));
+ // LLVMValueRef mask_int = LLVMBuildBitCast(p->builder, mask, mask_int_type, "");
+ // res.value = LLVMBuildICmp(p->builder, LLVMIntNE, mask_int, LLVMConstNull(LLVMTypeOf(mask_int)), "");
+ // return res;
+ }
+
+ GB_PANIC("Unhandled comparison kind %s (%s) %.*s %s (%s)", type_to_string(left.type), type_to_string(base_type(left.type)), LIT(token_strings[op_kind]), type_to_string(right.type), type_to_string(base_type(right.type)));
+ return {};
+}
+
+gb_internal cgValue cg_emit_comp_against_nil(cgProcedure *p, TokenKind op_kind, cgValue x) {
+ GB_ASSERT(op_kind == Token_CmpEq || op_kind == Token_NotEq);
+ x = cg_flatten_value(p, x);
+ cgValue res = {};
+ Type *t = x.type;
+
+ TB_DataType dt = cg_data_type(t);
+
+ Type *bt = base_type(t);
+ TypeKind type_kind = bt->kind;
+
+ switch (type_kind) {
+ case Type_Basic:
+ switch (bt->Basic.kind) {
+ case Basic_rawptr:
+ case Basic_cstring:
+ GB_ASSERT(x.kind == cgValue_Value);
+ if (op_kind == Token_CmpEq) {
+ return cg_value(tb_inst_cmp_eq(p->func, x.node, tb_inst_uint(p->func, dt, 0)), t_bool);
+ } else if (op_kind == Token_NotEq) {
+ return cg_value(tb_inst_cmp_ne(p->func, x.node, tb_inst_uint(p->func, dt, 0)), t_bool);
+ }
+ break;
+ case Basic_any:
+ {
+ GB_ASSERT(x.kind == cgValue_Addr);
+ // // TODO(bill): is this correct behaviour for nil comparison for any?
+ cgValue data = cg_emit_struct_ev(p, x, 0);
+ cgValue id = cg_emit_struct_ev(p, x, 1);
+
+ if (op_kind == Token_CmpEq) {
+ TB_Node *a = tb_inst_cmp_eq(p->func, data.node, tb_inst_uint(p->func, data.node->dt, 0));
+ TB_Node *b = tb_inst_cmp_eq(p->func, id.node, tb_inst_uint(p->func, id.node->dt, 0));
+ TB_Node *c = tb_inst_or(p->func, a, b);
+ return cg_value(c, t_bool);
+ } else if (op_kind == Token_NotEq) {
+ TB_Node *a = tb_inst_cmp_ne(p->func, data.node, tb_inst_uint(p->func, data.node->dt, 0));
+ TB_Node *b = tb_inst_cmp_ne(p->func, id.node, tb_inst_uint(p->func, id.node->dt, 0));
+ TB_Node *c = tb_inst_and(p->func, a, b);
+ return cg_value(c, t_bool);
+ }
+ }
+ break;
+ case Basic_typeid:
+ cgValue invalid_typeid = cg_const_value(p, t_typeid, exact_value_i64(0));
+ return cg_emit_comp(p, op_kind, x, invalid_typeid);
+ }
+ break;
+
+ case Type_Enum:
+ case Type_Pointer:
+ case Type_MultiPointer:
+ case Type_Proc:
+ case Type_BitSet:
+ GB_ASSERT(x.kind == cgValue_Value);
+ if (op_kind == Token_CmpEq) {
+ return cg_value(tb_inst_cmp_eq(p->func, x.node, tb_inst_uint(p->func, dt, 0)), t_bool);
+ } else if (op_kind == Token_NotEq) {
+ return cg_value(tb_inst_cmp_ne(p->func, x.node, tb_inst_uint(p->func, dt, 0)), t_bool);
+ }
+ break;
+
+ case Type_Slice:
+ case Type_DynamicArray:
+ case Type_Map:
+ {
+ // NOTE(bill): all of their data "pointer-like" fields are at the 0-index
+ cgValue data = cg_emit_struct_ev(p, x, 0);
+ if (op_kind == Token_CmpEq) {
+ TB_Node *a = tb_inst_cmp_eq(p->func, data.node, tb_inst_uint(p->func, data.node->dt, 0));
+ return cg_value(a, t_bool);
+ } else if (op_kind == Token_NotEq) {
+ TB_Node *a = tb_inst_cmp_ne(p->func, data.node, tb_inst_uint(p->func, data.node->dt, 0));
+ return cg_value(a, t_bool);
+ }
+ }
+ break;
+
+ case Type_Union:
+ {
+ if (type_size_of(t) == 0) {
+ return cg_const_bool(p, t_bool, op_kind == Token_CmpEq);
+ } else if (is_type_union_maybe_pointer(t)) {
+ cgValue tag = cg_emit_transmute(p, x, t_rawptr);
+ return cg_emit_comp_against_nil(p, op_kind, tag);
+ } else {
+ GB_ASSERT("TODO(bill): cg_emit_union_tag_value");
+ // cgValue tag = cg_emit_union_tag_value(p, x);
+ // return cg_emit_comp(p, op_kind, tag, cg_zero(p->module, tag.type));
+ }
+ }
+ break;
+ case Type_Struct:
+ GB_PANIC("TODO(bill): cg_emit_struct_ev");
+ // if (is_type_soa_struct(t)) {
+ // Type *bt = base_type(t);
+ // if (bt->Struct.soa_kind == StructSoa_Slice) {
+ // LLVMValueRef the_value = {};
+ // if (bt->Struct.fields.count == 0) {
+ // cgValue len = cg_soa_struct_len(p, x);
+ // the_value = len.value;
+ // } else {
+ // cgValue first_field = cg_emit_struct_ev(p, x, 0);
+ // the_value = first_field.value;
+ // }
+ // if (op_kind == Token_CmpEq) {
+ // res.value = LLVMBuildIsNull(p->builder, the_value, "");
+ // return res;
+ // } else if (op_kind == Token_NotEq) {
+ // res.value = LLVMBuildIsNotNull(p->builder, the_value, "");
+ // return res;
+ // }
+ // } else if (bt->Struct.soa_kind == StructSoa_Dynamic) {
+ // LLVMValueRef the_value = {};
+ // if (bt->Struct.fields.count == 0) {
+ // cgValue cap = cg_soa_struct_cap(p, x);
+ // the_value = cap.value;
+ // } else {
+ // cgValue first_field = cg_emit_struct_ev(p, x, 0);
+ // the_value = first_field.value;
+ // }
+ // if (op_kind == Token_CmpEq) {
+ // res.value = LLVMBuildIsNull(p->builder, the_value, "");
+ // return res;
+ // } else if (op_kind == Token_NotEq) {
+ // res.value = LLVMBuildIsNotNull(p->builder, the_value, "");
+ // return res;
+ // }
+ // }
+ // } else if (is_type_struct(t) && type_has_nil(t)) {
+ // auto args = array_make<cgValue>(permanent_allocator(), 2);
+ // cgValue lhs = cg_address_from_load_or_generate_local(p, x);
+ // args[0] = cg_emit_conv(p, lhs, t_rawptr);
+ // args[1] = cg_const_int(p->module, t_int, type_size_of(t));
+ // cgValue val = cg_emit_runtime_call(p, "memory_compare_zero", args);
+ // cgValue res = cg_emit_comp(p, op_kind, val, cg_const_int(p->module, t_int, 0));
+ // return res;
+ // }
+ break;
+ }
+ GB_PANIC("Unknown handled type: %s -> %s", type_to_string(t), type_to_string(bt));
+ return {};
+}
+
+gb_internal cgValue cg_emit_conv(cgProcedure *p, cgValue value, Type *t) {
+ t = reduce_tuple_to_single_type(t);
+
+ value = cg_flatten_value(p, value);
+
+ Type *src_type = value.type;
+ if (are_types_identical(t, src_type)) {
+ return value;
+ }
+
+ if (is_type_untyped_uninit(src_type)) {
+ // return cg_const_undef(m, t);
+ return cg_const_nil(p, t);
+ }
+ if (is_type_untyped_nil(src_type)) {
+ return cg_const_nil(p, t);
+ }
+
+ Type *src = core_type(src_type);
+ Type *dst = core_type(t);
+ GB_ASSERT(src != nullptr);
+ GB_ASSERT(dst != nullptr);
+
+ if (are_types_identical(src, dst)) {
+ return cg_emit_transmute(p, value, t);
+ }
+
+ TB_DataType st = cg_data_type(src);
+ if (value.kind == cgValue_Value && !TB_IS_VOID_TYPE(value.node->dt)) {
+ st = value.node->dt;
+ }
+ TB_DataType dt = cg_data_type(t);
+
+ if (is_type_integer(src) && is_type_integer(dst)) {
+ GB_ASSERT(src->kind == Type_Basic &&
+ dst->kind == Type_Basic);
+ GB_ASSERT(value.kind == cgValue_Value);
+
+ i64 sz = type_size_of(default_type(src));
+ i64 dz = type_size_of(default_type(dst));
+
+ if (sz == dz) {
+ if (dz > 1 && !types_have_same_internal_endian(src, dst)) {
+ return cg_emit_byte_swap(p, value, t);
+ }
+ value.type = t;
+ return value;
+ }
+
+ if (sz > 1 && is_type_different_to_arch_endianness(src)) {
+ Type *platform_src_type = integer_endian_type_to_platform_type(src);
+ value = cg_emit_byte_swap(p, value, platform_src_type);
+ }
+
+ TB_Node* (*op)(TB_Function* f, TB_Node* src, TB_DataType dt) = tb_inst_trunc;
+
+ if (dz < sz) {
+ op = tb_inst_trunc;
+ } else if (dz == sz) {
+ op = tb_inst_bitcast;
+ } else if (dz > sz) {
+ op = is_type_unsigned(src) ? tb_inst_zxt : tb_inst_sxt; // zero extent
+ }
+
+ if (dz > 1 && is_type_different_to_arch_endianness(dst)) {
+ Type *platform_dst_type = integer_endian_type_to_platform_type(dst);
+
+ cgValue res = cg_value(op(p->func, value.node, cg_data_type(platform_dst_type)), platform_dst_type);
+ return cg_emit_byte_swap(p, res, t);
+ } else {
+ return cg_value(op(p->func, value.node, dt), t);
+ }
+ }
+
+ // boolean -> boolean/integer
+ if (is_type_boolean(src) && (is_type_boolean(dst) || is_type_integer(dst))) {
+ TB_Node *v = tb_inst_cmp_ne(p->func, value.node, tb_inst_uint(p->func, st, 0));
+ return cg_value(tb_inst_zxt(p->func, v, dt), t);
+ }
+
+ // integer -> boolean
+ if (is_type_integer(src) && is_type_boolean(dst)) {
+ TB_Node *v = tb_inst_cmp_ne(p->func, value.node, tb_inst_uint(p->func, st, 0));
+ return cg_value(tb_inst_zxt(p->func, v, dt), t);
+ }
+
+ if (is_type_cstring(src) && is_type_u8_ptr(dst)) {
+ return cg_emit_transmute(p, value, dst);
+ }
+ if (is_type_u8_ptr(src) && is_type_cstring(dst)) {
+ return cg_emit_transmute(p, value, dst);
+ }
+ if (is_type_cstring(src) && is_type_u8_multi_ptr(dst)) {
+ return cg_emit_transmute(p, value, dst);
+ }
+ if (is_type_u8_multi_ptr(src) && is_type_cstring(dst)) {
+ return cg_emit_transmute(p, value, dst);
+ }
+ if (is_type_cstring(src) && is_type_rawptr(dst)) {
+ return cg_emit_transmute(p, value, dst);
+ }
+ if (is_type_rawptr(src) && is_type_cstring(dst)) {
+ return cg_emit_transmute(p, value, dst);
+ }
+
+
+ if (are_types_identical(src, t_cstring) && are_types_identical(dst, t_string)) {
+ TEMPORARY_ALLOCATOR_GUARD();
+ cgValue c = cg_emit_conv(p, value, t_cstring);
+ auto args = slice_make<cgValue>(temporary_allocator(), 1);
+ args[0] = c;
+ cgValue s = cg_emit_runtime_call(p, "cstring_to_string", args);
+ return cg_emit_conv(p, s, dst);
+ }
+
+ // float -> float
+ if (is_type_float(src) && is_type_float(dst)) {
+ i64 sz = type_size_of(src);
+ i64 dz = type_size_of(dst);
+
+ if (sz == 2 || dz == 2) {
+ GB_PANIC("TODO(bill): f16 conversions");
+ }
+
+
+ if (dz == sz) {
+ if (types_have_same_internal_endian(src, dst)) {
+ return cg_value(value.node, t);
+ } else {
+ return cg_emit_byte_swap(p, value, t);
+ }
+ }
+
+ if (is_type_different_to_arch_endianness(src) || is_type_different_to_arch_endianness(dst)) {
+ Type *platform_src_type = integer_endian_type_to_platform_type(src);
+ Type *platform_dst_type = integer_endian_type_to_platform_type(dst);
+ cgValue res = {};
+ res = cg_emit_conv(p, value, platform_src_type);
+ res = cg_emit_conv(p, res, platform_dst_type);
+ if (is_type_different_to_arch_endianness(dst)) {
+ res = cg_emit_byte_swap(p, res, t);
+ }
+ return cg_emit_conv(p, res, t);
+ }
+
+
+ if (dz >= sz) {
+ return cg_value(tb_inst_fpxt(p->func, value.node, dt), t);
+ }
+ return cg_value(tb_inst_trunc(p->func, value.node, dt), t);
+ }
+
+ if (is_type_complex(src) && is_type_complex(dst)) {
+ GB_PANIC("TODO(bill): complex -> complex");
+ }
+
+ if (is_type_quaternion(src) && is_type_quaternion(dst)) {
+ // @QuaternionLayout
+ GB_PANIC("TODO(bill): quaternion -> quaternion");
+ }
+ if (is_type_integer(src) && is_type_complex(dst)) {
+ GB_PANIC("TODO(bill): int -> complex");
+ }
+ if (is_type_float(src) && is_type_complex(dst)) {
+ GB_PANIC("TODO(bill): float -> complex");
+ }
+ if (is_type_integer(src) && is_type_quaternion(dst)) {
+ GB_PANIC("TODO(bill): int -> quaternion");
+ }
+ if (is_type_float(src) && is_type_quaternion(dst)) {
+ GB_PANIC("TODO(bill): float -> quaternion");
+ }
+ if (is_type_complex(src) && is_type_quaternion(dst)) {
+ GB_PANIC("TODO(bill): complex -> quaternion");
+ }
+
+
+ // float <-> integer
+ if (is_type_float(src) && is_type_integer(dst)) {
+ if (is_type_different_to_arch_endianness(src) || is_type_different_to_arch_endianness(dst)) {
+ Type *platform_src_type = integer_endian_type_to_platform_type(src);
+ Type *platform_dst_type = integer_endian_type_to_platform_type(dst);
+ cgValue res = {};
+ res = cg_emit_conv(p, value, platform_src_type);
+ res = cg_emit_conv(p, res, platform_dst_type);
+ return cg_emit_conv(p, res, t);
+ }
+
+ // if (is_type_integer_128bit(dst)) {
+ // TEMPORARY_ALLOCATOR_GUARD();
+
+ // auto args = array_make<lbValue>(temporary_allocator(), 1);
+ // args[0] = value;
+ // char const *call = "fixunsdfdi";
+ // if (is_type_unsigned(dst)) {
+ // call = "fixunsdfti";
+ // }
+ // lbValue res_i128 = lb_emit_runtime_call(p, call, args);
+ // return lb_emit_conv(p, res_i128, t);
+ // }
+
+ bool is_signed = !is_type_unsigned(dst);
+ return cg_value(tb_inst_float2int(p->func, value.node, dt, is_signed), t);
+ }
+ if (is_type_integer(src) && is_type_float(dst)) {
+ if (is_type_different_to_arch_endianness(src) || is_type_different_to_arch_endianness(dst)) {
+ Type *platform_src_type = integer_endian_type_to_platform_type(src);
+ Type *platform_dst_type = integer_endian_type_to_platform_type(dst);
+ cgValue res = {};
+ res = cg_emit_conv(p, value, platform_src_type);
+ res = cg_emit_conv(p, res, platform_dst_type);
+ if (is_type_different_to_arch_endianness(dst)) {
+ res = cg_emit_byte_swap(p, res, t);
+ }
+ return cg_emit_conv(p, res, t);
+ }
+
+ // if (is_type_integer_128bit(src)) {
+ // TEMPORARY_ALLOCATOR_GUARD();
+
+ // auto args = array_make<lbValue>(temporary_allocator(), 1);
+ // args[0] = value;
+ // char const *call = "floattidf";
+ // if (is_type_unsigned(src)) {
+ // call = "floattidf_unsigned";
+ // }
+ // lbValue res_f64 = lb_emit_runtime_call(p, call, args);
+ // return lb_emit_conv(p, res_f64, t);
+ // }
+
+ bool is_signed = !is_type_unsigned(dst);
+ return cg_value(tb_inst_int2float(p->func, value.node, dt, is_signed), t);
+ }
+
+ if (is_type_simd_vector(dst)) {
+ GB_PANIC("TODO(bill): ? -> #simd vector");
+ }
+
+
+ // Pointer <-> uintptr
+ if (is_type_pointer(src) && is_type_uintptr(dst)) {
+ return cg_value(tb_inst_ptr2int(p->func, value.node, dt), t);
+ }
+ if (is_type_uintptr(src) && is_type_pointer(dst)) {
+ return cg_value(tb_inst_int2ptr(p->func, value.node), t);
+ }
+ if (is_type_multi_pointer(src) && is_type_uintptr(dst)) {
+ return cg_value(tb_inst_ptr2int(p->func, value.node, dt), t);
+ }
+ if (is_type_uintptr(src) && is_type_multi_pointer(dst)) {
+ return cg_value(tb_inst_int2ptr(p->func, value.node), t);
+ }
+
+ if (is_type_union(dst)) {
+ GB_PANIC("TODO(bill): ? -> union");
+ }
+
+ // NOTE(bill): This has to be done before 'Pointer <-> Pointer' as it's
+ // subtype polymorphism casting
+ if (check_is_assignable_to_using_subtype(src_type, t)) {
+ GB_PANIC("TODO(bill): ? -> subtyping");
+ }
+
+ // Pointer <-> Pointer
+ if (is_type_pointer(src) && is_type_pointer(dst)) {
+ return cg_value(value.node, t);
+ }
+ if (is_type_multi_pointer(src) && is_type_pointer(dst)) {
+ return cg_value(value.node, t);
+ }
+ if (is_type_pointer(src) && is_type_multi_pointer(dst)) {
+ return cg_value(value.node, t);
+ }
+ if (is_type_multi_pointer(src) && is_type_multi_pointer(dst)) {
+ return cg_value(value.node, t);
+ }
+
+ // proc <-> proc
+ if (is_type_proc(src) && is_type_proc(dst)) {
+ return cg_value(value.node, t);
+ }
+
+ // pointer -> proc
+ if (is_type_pointer(src) && is_type_proc(dst)) {
+ return cg_value(value.node, t);
+ }
+ // proc -> pointer
+ if (is_type_proc(src) && is_type_pointer(dst)) {
+ return cg_value(value.node, t);
+ }
+
+ // []byte/[]u8 <-> string
+ if (is_type_u8_slice(src) && is_type_string(dst)) {
+ return cg_emit_transmute(p, value, t);
+ }
+ if (is_type_string(src) && is_type_u8_slice(dst)) {
+ return cg_emit_transmute(p, value, t);
+ }
+
+ if (is_type_matrix(dst) && !is_type_matrix(src)) {
+ GB_PANIC("TODO(bill): !matrix -> matrix");
+ }
+
+ if (is_type_matrix(dst) && is_type_matrix(src)) {
+ GB_PANIC("TODO(bill): matrix -> matrix");
+ }
+
+ if (is_type_any(dst)) {
+ if (is_type_untyped_nil(src) ||
+ is_type_untyped_uninit(src)) {
+ return cg_const_nil(p, t);
+ }
+
+ cgAddr result = cg_add_local(p, t, nullptr, false);
+
+ Type *st = default_type(src_type);
+
+ cgValue data = cg_address_from_load_or_generate_local(p, value);
+ GB_ASSERT(is_type_pointer(data.type));
+ GB_ASSERT(is_type_typed(st));
+
+ data = cg_emit_conv(p, data, t_rawptr);
+
+ cgValue id = cg_typeid(p, st);
+ cgValue data_ptr = cg_emit_struct_ep(p, result.addr, 0);
+ cgValue id_ptr = cg_emit_struct_ep(p, result.addr, 1);
+
+ cg_emit_store(p, data_ptr, data);
+ cg_emit_store(p, id_ptr, id);
+
+ return cg_addr_load(p, result);
+ }
+
+ i64 src_sz = type_size_of(src);
+ i64 dst_sz = type_size_of(dst);
+
+ if (src_sz == dst_sz) {
+ // bit_set <-> integer
+ if (is_type_integer(src) && is_type_bit_set(dst)) {
+ cgValue v = cg_emit_conv(p, value, bit_set_to_int(dst));
+ return cg_emit_transmute(p, v, t);
+ }
+ if (is_type_bit_set(src) && is_type_integer(dst)) {
+ cgValue bs = cg_emit_transmute(p, value, bit_set_to_int(src));
+ return cg_emit_conv(p, bs, dst);
+ }
+
+ // typeid <-> integer
+ if (is_type_integer(src) && is_type_typeid(dst)) {
+ return cg_emit_transmute(p, value, dst);
+ }
+ if (is_type_typeid(src) && is_type_integer(dst)) {
+ return cg_emit_transmute(p, value, dst);
+ }
+ }
+
+
+ if (is_type_untyped(src)) {
+ if (is_type_string(src) && is_type_string(dst)) {
+ cgAddr result = cg_add_local(p, t, nullptr, false);
+ cg_addr_store(p, result, value);
+ return cg_addr_load(p, result);
+ }
+ }
+
+
+ gb_printf_err("%.*s\n", LIT(p->name));
+ gb_printf_err("cg_emit_conv: src -> dst\n");
+ gb_printf_err("Not Identical %s != %s\n", type_to_string(src_type), type_to_string(t));
+ gb_printf_err("Not Identical %s != %s\n", type_to_string(src), type_to_string(dst));
+ gb_printf_err("Not Identical %p != %p\n", src_type, t);
+ gb_printf_err("Not Identical %p != %p\n", src, dst);
+
+
+ GB_PANIC("Invalid type conversion: '%s' to '%s' for procedure '%.*s'",
+ type_to_string(src_type), type_to_string(t),
+ LIT(p->name));
+
+ return {};
+}
+
+gb_internal cgValue cg_emit_arith(cgProcedure *p, TokenKind op, cgValue lhs, cgValue rhs, Type *type) {
+ if (is_type_array_like(lhs.type) || is_type_array_like(rhs.type)) {
+ GB_PANIC("TODO(bill): cg_emit_arith_array");
+ } else if (is_type_matrix(lhs.type) || is_type_matrix(rhs.type)) {
+ GB_PANIC("TODO(bill): cg_emit_arith_matrix");
+ } else if (is_type_complex(type)) {
+ GB_PANIC("TODO(bill): cg_emit_arith complex");
+ } else if (is_type_quaternion(type)) {
+ GB_PANIC("TODO(bill): cg_emit_arith quaternion");
+ }
+
+ lhs = cg_flatten_value(p, cg_emit_conv(p, lhs, type));
+ rhs = cg_flatten_value(p, cg_emit_conv(p, rhs, type));
+ GB_ASSERT(lhs.kind == cgValue_Value);
+ GB_ASSERT(rhs.kind == cgValue_Value);
+
+ if (is_type_integer(type) && is_type_different_to_arch_endianness(type)) {
+ switch (op) {
+ case Token_AndNot:
+ case Token_And:
+ case Token_Or:
+ case Token_Xor:
+ goto handle_op;
+ }
+
+ Type *platform_type = integer_endian_type_to_platform_type(type);
+ cgValue x = cg_emit_byte_swap(p, lhs, integer_endian_type_to_platform_type(lhs.type));
+ cgValue y = cg_emit_byte_swap(p, rhs, integer_endian_type_to_platform_type(rhs.type));
+
+ cgValue res = cg_emit_arith(p, op, x, y, platform_type);
+
+ return cg_emit_byte_swap(p, res, type);
+ }
+
+ if (is_type_float(type) && is_type_different_to_arch_endianness(type)) {
+ Type *platform_type = integer_endian_type_to_platform_type(type);
+ cgValue x = cg_emit_conv(p, lhs, integer_endian_type_to_platform_type(lhs.type));
+ cgValue y = cg_emit_conv(p, rhs, integer_endian_type_to_platform_type(rhs.type));
+
+ cgValue res = cg_emit_arith(p, op, x, y, platform_type);
+
+ return cg_emit_byte_swap(p, res, type);
+ }
+
+handle_op:;
+
+ // NOTE(bill): Bit Set Aliases for + and -
+ if (is_type_bit_set(type)) {
+ switch (op) {
+ case Token_Add: op = Token_Or; break;
+ case Token_Sub: op = Token_AndNot; break;
+ }
+ }
+
+ TB_ArithmeticBehavior arith_behavior = cast(TB_ArithmeticBehavior)0;
+
+ Type *integral_type = type;
+ if (is_type_simd_vector(integral_type)) {
+ GB_PANIC("TODO(bill): cg_emit_arith #simd vector");
+ // integral_type = core_array_type(integral_type);
+ }
+
+ switch (op) {
+ case Token_Add:
+ if (is_type_float(integral_type)) {
+ return cg_value(tb_inst_fadd(p->func, lhs.node, rhs.node), type);
+ }
+ return cg_value(tb_inst_add(p->func, lhs.node, rhs.node, arith_behavior), type);
+ case Token_Sub:
+ if (is_type_float(integral_type)) {
+ return cg_value(tb_inst_fsub(p->func, lhs.node, rhs.node), type);
+ }
+ return cg_value(tb_inst_sub(p->func, lhs.node, rhs.node, arith_behavior), type);
+ case Token_Mul:
+ if (is_type_float(integral_type)) {
+ return cg_value(tb_inst_fmul(p->func, lhs.node, rhs.node), type);
+ }
+ return cg_value(tb_inst_mul(p->func, lhs.node, rhs.node, arith_behavior), type);
+ case Token_Quo:
+ if (is_type_float(integral_type)) {
+ return cg_value(tb_inst_fdiv(p->func, lhs.node, rhs.node), type);
+ }
+ return cg_value(tb_inst_div(p->func, lhs.node, rhs.node, !is_type_unsigned(integral_type)), type);
+ case Token_Mod:
+ if (is_type_float(integral_type)) {
+ GB_PANIC("TODO(bill): float %% float");
+ }
+ return cg_value(tb_inst_mod(p->func, lhs.node, rhs.node, !is_type_unsigned(integral_type)), type);
+ case Token_ModMod:
+ if (is_type_unsigned(integral_type)) {
+ return cg_value(tb_inst_mod(p->func, lhs.node, rhs.node, false), type);
+ } else {
+ TB_Node *a = tb_inst_mod(p->func, lhs.node, rhs.node, true);
+ TB_Node *b = tb_inst_add(p->func, a, rhs.node, arith_behavior);
+ TB_Node *c = tb_inst_mod(p->func, b, rhs.node, true);
+ return cg_value(c, type);
+ }
+
+ case Token_And:
+ return cg_value(tb_inst_and(p->func, lhs.node, rhs.node), type);
+ case Token_Or:
+ return cg_value(tb_inst_or(p->func, lhs.node, rhs.node), type);
+ case Token_Xor:
+ return cg_value(tb_inst_xor(p->func, lhs.node, rhs.node), type);
+ case Token_Shl:
+ {
+ rhs = cg_emit_conv(p, rhs, lhs.type);
+ TB_DataType dt = cg_data_type(lhs.type);
+ TB_Node *lhsval = lhs.node;
+ TB_Node *bits = rhs.node;
+
+ TB_Node *bit_size = tb_inst_uint(p->func, dt, 8*type_size_of(lhs.type));
+ TB_Node *zero = tb_inst_uint(p->func, dt, 0);
+
+ TB_Node *width_test = tb_inst_cmp_ilt(p->func, bits, bit_size, false);
+
+ TB_Node *res = tb_inst_shl(p->func, lhsval, bits, arith_behavior);
+ res = tb_inst_select(p->func, width_test, res, zero);
+ return cg_value(res, type);
+ }
+ case Token_Shr:
+ {
+ rhs = cg_emit_conv(p, rhs, lhs.type);
+ TB_DataType dt = cg_data_type(lhs.type);
+ TB_Node *lhsval = lhs.node;
+ TB_Node *bits = rhs.node;
+
+ TB_Node *bit_size = tb_inst_uint(p->func, dt, 8*type_size_of(lhs.type));
+ TB_Node *zero = tb_inst_uint(p->func, dt, 0);
+
+ TB_Node *width_test = tb_inst_cmp_ilt(p->func, bits, bit_size, false);
+
+ TB_Node *res = nullptr;
+
+ if (is_type_unsigned(integral_type)) {
+ res = tb_inst_shr(p->func, lhsval, bits);
+ } else {
+ res = tb_inst_sar(p->func, lhsval, bits);
+ }
+
+
+ res = tb_inst_select(p->func, width_test, res, zero);
+ return cg_value(res, type);
+ }
+ case Token_AndNot:
+ return cg_value(tb_inst_and(p->func, lhs.node, tb_inst_not(p->func, rhs.node)), type);
+ }
+
+ GB_PANIC("unhandled operator of cg_emit_arith");
+
+ return {};
+}
+
+
+gb_internal void cg_fill_slice(cgProcedure *p, cgAddr const &slice, cgValue data, cgValue len) {
+ cgValue slice_ptr = cg_addr_get_ptr(p, slice);
+ cgValue data_ptr = cg_emit_struct_ep(p, slice_ptr, 0);
+ cgValue len_ptr = cg_emit_struct_ep(p, slice_ptr, 1);
+
+ data = cg_emit_conv(p, data, type_deref(data_ptr.type));
+ len = cg_emit_conv(p, len, t_int);
+ cg_emit_store(p, data_ptr, data);
+ cg_emit_store(p, len_ptr, len);
+}
+
+gb_internal cgAddr cg_build_addr_slice_expr(cgProcedure *p, Ast *expr) {
+ ast_node(se, SliceExpr, expr);
+
+ cgValue low = cg_const_int(p, t_int, 0);
+ cgValue high = {};
+
+ if (se->low != nullptr) {
+ low = cg_correct_endianness(p, cg_build_expr(p, se->low));
+ }
+ if (se->high != nullptr) {
+ high = cg_correct_endianness(p, cg_build_expr(p, se->high));
+ }
+
+ bool no_indices = se->low == nullptr && se->high == nullptr;
+ gb_unused(no_indices);
+
+ cgAddr addr = cg_build_addr(p, se->expr);
+ cgValue base = cg_addr_load(p, addr);
+ Type *type = base_type(base.type);
+
+ if (is_type_pointer(type)) {
+ type = base_type(type_deref(type));
+ addr = cg_addr(base);
+ base = cg_addr_load(p, addr);
+ }
+
+ switch (type->kind) {
+ case Type_Basic:
+ case Type_Slice: {
+ if (type->kind == Type_Basic) {
+ GB_ASSERT(type->Basic.kind == Basic_string);
+ }
+
+ Type *slice_type = type;
+ if (high.node == nullptr) {
+ cgValue len = cg_builtin_len(p, base);
+ high = len;
+ }
+
+ if (!no_indices) {
+ // cg_emit_slice_bounds_check(p, se->open, low, high, len, se->low != nullptr);
+ }
+
+ cgValue elem = cg_emit_ptr_offset(p, cg_builtin_raw_data(p, base), low);
+ cgValue new_len = cg_emit_arith(p, Token_Sub, high, low, t_int);
+
+ cgAddr slice = cg_add_local(p, slice_type, nullptr, true);
+ cg_fill_slice(p, slice, elem, new_len);
+ return slice;
+ }
+
+ case Type_RelativeMultiPointer:
+ GB_PANIC("TODO(bill): Type_RelativeMultiPointer should be handled above already on the cg_addr_load");
+ break;
+
+ case Type_DynamicArray: {
+ // Type *elem_type = type->DynamicArray.elem;
+ // Type *slice_type = alloc_type_slice(elem_type);
+
+ // lbValue len = lb_dynamic_array_len(p, base);
+ // if (high.value == nullptr) high = len;
+
+ // if (!no_indices) {
+ // lb_emit_slice_bounds_check(p, se->open, low, high, len, se->low != nullptr);
+ // }
+
+ // lbValue elem = lb_emit_ptr_offset(p, lb_dynamic_array_elem(p, base), low);
+ // lbValue new_len = lb_emit_arith(p, Token_Sub, high, low, t_int);
+
+ // lbAddr slice = lb_add_local_generated(p, slice_type, false);
+ // lb_fill_slice(p, slice, elem, new_len);
+ // return slice;
+ GB_PANIC("cg_build_addr_slice_expr Type_DynamicArray");
+ break;
+ }
+
+ case Type_MultiPointer: {
+ Type *res_type = type_of_expr(expr);
+ if (se->high == nullptr) {
+ cgAddr res = cg_add_local(p, res_type, nullptr, false);
+ GB_ASSERT(base.kind == cgValue_Value);
+ GB_ASSERT(low.kind == cgValue_Value);
+
+ i64 stride = type_size_of(type->MultiPointer.elem);
+ cgValue offset = cg_value(tb_inst_array_access(p->func, base.node, low.node, stride), base.type);
+ cg_addr_store(p, res, offset);
+ return res;
+ } else {
+ cgAddr res = cg_add_local(p, res_type, nullptr, true);
+ low = cg_emit_conv(p, low, t_int);
+ high = cg_emit_conv(p, high, t_int);
+
+ // cg_emit_multi_pointer_slice_bounds_check(p, se->open, low, high);
+
+ i64 stride = type_size_of(type->MultiPointer.elem);
+ TB_Node *offset = tb_inst_array_access(p->func, base.node, low.node, stride);
+ TB_Node *len = tb_inst_sub(p->func, high.node, low.node, cast(TB_ArithmeticBehavior)0);
+
+ TB_Node *data_ptr = tb_inst_member_access(p->func, res.addr.node, type_offset_of(res_type, 0));
+ TB_Node *len_ptr = tb_inst_member_access(p->func, res.addr.node, type_offset_of(res_type, 1));
+
+ tb_inst_store(p->func, TB_TYPE_PTR, data_ptr, offset, cast(TB_CharUnits)build_context.ptr_size, false);
+ tb_inst_store(p->func, TB_TYPE_INT, len_ptr, len, cast(TB_CharUnits)build_context.int_size, false);
+ return res;
+ }
+ }
+
+ case Type_Array: {
+ Type *slice_type = type_of_expr(expr);
+ GB_ASSERT(is_type_slice(slice_type));
+ cgValue len = cg_const_int(p, t_int, type->Array.count);
+ if (high.node == nullptr) high = len;
+
+ // bool low_const = type_and_value_of_expr(se->low).mode == Addressing_Constant;
+ // bool high_const = type_and_value_of_expr(se->high).mode == Addressing_Constant;
+ // if (!low_const || !high_const) {
+ // if (!no_indices) {
+ // lb_emit_slice_bounds_check(p, se->open, low, high, len, se->low != nullptr);
+ // }
+ // }
+ cgValue elem = cg_emit_ptr_offset(p, cg_builtin_raw_data(p, cg_addr_get_ptr(p, addr)), low);
+ cgValue new_len = cg_emit_arith(p, Token_Sub, high, low, t_int);
+
+ cgAddr slice = cg_add_local(p, slice_type, nullptr, true);
+ cg_fill_slice(p, slice, elem, new_len);
+ return slice;
+ }
+
+
+ case Type_Struct:
+ // if (is_type_soa_struct(type)) {
+ // lbValue len = lb_soa_struct_len(p, lb_addr_get_ptr(p, addr));
+ // if (high.value == nullptr) high = len;
+
+ // if (!no_indices) {
+ // lb_emit_slice_bounds_check(p, se->open, low, high, len, se->low != nullptr);
+ // }
+ // #if 1
+
+ // lbAddr dst = lb_add_local_generated(p, type_of_expr(expr), true);
+ // if (type->Struct.soa_kind == StructSoa_Fixed) {
+ // i32 field_count = cast(i32)type->Struct.fields.count;
+ // for (i32 i = 0; i < field_count; i++) {
+ // lbValue field_dst = lb_emit_struct_ep(p, dst.addr, i);
+ // lbValue field_src = lb_emit_struct_ep(p, lb_addr_get_ptr(p, addr), i);
+ // field_src = lb_emit_array_ep(p, field_src, low);
+ // lb_emit_store(p, field_dst, field_src);
+ // }
+
+ // lbValue len_dst = lb_emit_struct_ep(p, dst.addr, field_count);
+ // lbValue new_len = lb_emit_arith(p, Token_Sub, high, low, t_int);
+ // lb_emit_store(p, len_dst, new_len);
+ // } else if (type->Struct.soa_kind == StructSoa_Slice) {
+ // if (no_indices) {
+ // lb_addr_store(p, dst, base);
+ // } else {
+ // i32 field_count = cast(i32)type->Struct.fields.count - 1;
+ // for (i32 i = 0; i < field_count; i++) {
+ // lbValue field_dst = lb_emit_struct_ep(p, dst.addr, i);
+ // lbValue field_src = lb_emit_struct_ev(p, base, i);
+ // field_src = lb_emit_ptr_offset(p, field_src, low);
+ // lb_emit_store(p, field_dst, field_src);
+ // }
+
+
+ // lbValue len_dst = lb_emit_struct_ep(p, dst.addr, field_count);
+ // lbValue new_len = lb_emit_arith(p, Token_Sub, high, low, t_int);
+ // lb_emit_store(p, len_dst, new_len);
+ // }
+ // } else if (type->Struct.soa_kind == StructSoa_Dynamic) {
+ // i32 field_count = cast(i32)type->Struct.fields.count - 3;
+ // for (i32 i = 0; i < field_count; i++) {
+ // lbValue field_dst = lb_emit_struct_ep(p, dst.addr, i);
+ // lbValue field_src = lb_emit_struct_ev(p, base, i);
+ // field_src = lb_emit_ptr_offset(p, field_src, low);
+ // lb_emit_store(p, field_dst, field_src);
+ // }
+
+
+ // lbValue len_dst = lb_emit_struct_ep(p, dst.addr, field_count);
+ // lbValue new_len = lb_emit_arith(p, Token_Sub, high, low, t_int);
+ // lb_emit_store(p, len_dst, new_len);
+ // }
+
+ // return dst;
+ // #endif
+ // }
+ GB_PANIC("cg_build_addr_slice_expr Type_Struct");
+ break;
+
+ }
+
+ GB_PANIC("Unknown slicable type");
+ return {};
+}
+
+gb_internal cgValue cg_emit_unary_arith(cgProcedure *p, TokenKind op, cgValue x, Type *type) {
+ switch (op) {
+ case Token_Add:
+ return x;
+ case Token_Not: // Boolean not
+ case Token_Xor: // Bitwise not
+ case Token_Sub: // Number negation
+ break;
+ case Token_Pointer:
+ GB_PANIC("This should be handled elsewhere");
+ break;
+ }
+
+ x = cg_flatten_value(p, x);
+
+ if (is_type_array_like(x.type)) {
+ GB_PANIC("TODO(bill): cg_emit_unary_arith is_type_array_like");
+ // // IMPORTANT TODO(bill): This is very wasteful with regards to stack memory
+ // Type *tl = base_type(x.type);
+ // cgValue val = cg_address_from_load_or_generate_local(p, x);
+ // GB_ASSERT(is_type_array_like(type));
+ // Type *elem_type = base_array_type(type);
+
+ // // NOTE(bill): Doesn't need to be zero because it will be initialized in the loops
+ // cgAddr res_addr = cg_add_local(p, type, nullptr, false);
+ // cgValue res = cg_addr_get_ptr(p, res_addr);
+
+ // bool inline_array_arith = cg_can_try_to_inline_array_arith(type);
+
+ // i32 count = cast(i32)get_array_type_count(tl);
+
+ // LLVMTypeRef vector_type = nullptr;
+ // if (op != Token_Not && cg_try_vector_cast(p->module, val, &vector_type)) {
+ // LLVMValueRef vp = LLVMBuildPointerCast(p->builder, val.value, LLVMPointerType(vector_type, 0), "");
+ // LLVMValueRef v = LLVMBuildLoad2(p->builder, vector_type, vp, "");
+
+ // LLVMValueRef opv = nullptr;
+ // switch (op) {
+ // case Token_Xor:
+ // opv = LLVMBuildNot(p->builder, v, "");
+ // break;
+ // case Token_Sub:
+ // if (is_type_float(elem_type)) {
+ // opv = LLVMBuildFNeg(p->builder, v, "");
+ // } else {
+ // opv = LLVMBuildNeg(p->builder, v, "");
+ // }
+ // break;
+ // }
+
+ // if (opv != nullptr) {
+ // LLVMSetAlignment(res.value, cast(unsigned)cg_alignof(vector_type));
+ // LLVMValueRef res_ptr = LLVMBuildPointerCast(p->builder, res.value, LLVMPointerType(vector_type, 0), "");
+ // LLVMBuildStore(p->builder, opv, res_ptr);
+ // return cg_emit_conv(p, cg_emit_load(p, res), type);
+ // }
+ // }
+
+ // if (inline_array_arith) {
+ // // inline
+ // for (i32 i = 0; i < count; i++) {
+ // cgValue e = cg_emit_load(p, cg_emit_array_epi(p, val, i));
+ // cgValue z = cg_emit_unary_arith(p, op, e, elem_type);
+ // cg_emit_store(p, cg_emit_array_epi(p, res, i), z);
+ // }
+ // } else {
+ // auto loop_data = cg_loop_start(p, count, t_i32);
+
+ // cgValue e = cg_emit_load(p, cg_emit_array_ep(p, val, loop_data.idx));
+ // cgValue z = cg_emit_unary_arith(p, op, e, elem_type);
+ // cg_emit_store(p, cg_emit_array_ep(p, res, loop_data.idx), z);
+
+ // cg_loop_end(p, loop_data);
+ // }
+ // return cg_emit_load(p, res);
+ }
+
+ if (op == Token_Xor) {
+ GB_ASSERT(x.kind == cgValue_Value);
+ cgValue cmp = cg_value(tb_inst_not(p->func, x.node), x.type);
+ return cg_emit_conv(p, cmp, type);
+ }
+
+ if (op == Token_Not) {
+ TB_Node *zero = cg_const_nil(p, x.type).node;
+ cgValue cmp = cg_value(tb_inst_cmp_ne(p->func, x.node, zero), x.type);
+ return cg_emit_conv(p, cmp, type);
+ }
+
+ if (op == Token_Sub && is_type_integer(type) && is_type_different_to_arch_endianness(type)) {
+ Type *platform_type = integer_endian_type_to_platform_type(type);
+ cgValue v = cg_emit_byte_swap(p, x, platform_type);
+
+ cgValue res = cg_value(tb_inst_neg(p->func, v.node), platform_type);
+ return cg_emit_byte_swap(p, res, type);
+ }
+
+ if (op == Token_Sub && is_type_float(type) && is_type_different_to_arch_endianness(type)) {
+ Type *platform_type = integer_endian_type_to_platform_type(type);
+ cgValue v = cg_emit_byte_swap(p, x, platform_type);
+
+ cgValue res = cg_value(tb_inst_neg(p->func, v.node), platform_type);
+ return cg_emit_byte_swap(p, res, type);
+ }
+
+ cgValue res = {};
+
+ if (op == Token_Sub) { // Number negation
+ if (is_type_integer(x.type)) {
+ res = cg_value(tb_inst_neg(p->func, x.node), x.type);
+ } else if (is_type_float(x.type)) {
+ res = cg_value(tb_inst_neg(p->func, x.node), x.type);
+ } else if (is_type_complex(x.type)) {
+ GB_PANIC("TODO(bill): neg complex");
+ // LLVMValueRef v0 = LLVMBuildFNeg(p->builder, LLVMBuildExtractValue(p->builder, x.value, 0, ""), "");
+ // LLVMValueRef v1 = LLVMBuildFNeg(p->builder, LLVMBuildExtractValue(p->builder, x.value, 1, ""), "");
+
+ // cgAddr addr = cg_add_local_generated(p, x.type, false);
+ // LLVMTypeRef type = llvm_addr_type(p->module, addr.addr);
+ // LLVMBuildStore(p->builder, v0, LLVMBuildStructGEP2(p->builder, type, addr.addr.value, 0, ""));
+ // LLVMBuildStore(p->builder, v1, LLVMBuildStructGEP2(p->builder, type, addr.addr.value, 1, ""));
+ // return cg_addr_load(p, addr);
+
+ } else if (is_type_quaternion(x.type)) {
+ GB_PANIC("TODO(bill): neg quaternion");
+ // LLVMValueRef v0 = LLVMBuildFNeg(p->builder, LLVMBuildExtractValue(p->builder, x.value, 0, ""), "");
+ // LLVMValueRef v1 = LLVMBuildFNeg(p->builder, LLVMBuildExtractValue(p->builder, x.value, 1, ""), "");
+ // LLVMValueRef v2 = LLVMBuildFNeg(p->builder, LLVMBuildExtractValue(p->builder, x.value, 2, ""), "");
+ // LLVMValueRef v3 = LLVMBuildFNeg(p->builder, LLVMBuildExtractValue(p->builder, x.value, 3, ""), "");
+
+ // cgAddr addr = cg_add_local_generated(p, x.type, false);
+ // LLVMTypeRef type = llvm_addr_type(p->module, addr.addr);
+ // LLVMBuildStore(p->builder, v0, LLVMBuildStructGEP2(p->builder, type, addr.addr.value, 0, ""));
+ // LLVMBuildStore(p->builder, v1, LLVMBuildStructGEP2(p->builder, type, addr.addr.value, 1, ""));
+ // LLVMBuildStore(p->builder, v2, LLVMBuildStructGEP2(p->builder, type, addr.addr.value, 2, ""));
+ // LLVMBuildStore(p->builder, v3, LLVMBuildStructGEP2(p->builder, type, addr.addr.value, 3, ""));
+ // return cg_addr_load(p, addr);
+ } else if (is_type_simd_vector(x.type)) {
+ GB_PANIC("TODO(bill): neg simd");
+ // Type *elem = base_array_type(x.type);
+ // if (is_type_float(elem)) {
+ // res.value = LLVMBuildFNeg(p->builder, x.value, "");
+ // } else {
+ // res.value = LLVMBuildNeg(p->builder, x.value, "");
+ // }
+ } else if (is_type_matrix(x.type)) {
+ GB_PANIC("TODO(bill): neg matrix");
+ // cgValue zero = {};
+ // zero.value = LLVMConstNull(cg_type(p->module, type));
+ // zero.type = type;
+ // return cg_emit_arith_matrix(p, Token_Sub, zero, x, type, true);
+ } else {
+ GB_PANIC("Unhandled type %s", type_to_string(x.type));
+ }
+ res.type = x.type;
+ return res;
+ }
+
+ return res;
+}
+
+gb_internal void cg_emit_if(cgProcedure *p, cgValue const &cond, TB_Node *true_region, TB_Node *false_region) {
+ GB_ASSERT(cond.kind == cgValue_Value);
+ tb_inst_if(p->func, cond.node, true_region, false_region);
+}
+
+
+struct cgLoopData {
+ cgAddr index_addr;
+ cgValue index;
+ TB_Node *body;
+ TB_Node *done;
+ TB_Node *loop;
+};
+
+gb_internal cgLoopData cg_loop_start(cgProcedure *p, isize count, Type *index_type) {
+ cgLoopData data = {};
+
+ cgValue max = cg_const_int(p, index_type, count);
+
+ data.index_addr = cg_add_local(p, index_type, nullptr, true);
+
+ data.body = cg_control_region(p, "loop_body");
+ data.done = cg_control_region(p, "loop_done");
+ data.loop = cg_control_region(p, "loop_loop");
+
+ cg_emit_goto(p, data.loop);
+ tb_inst_set_control(p->func, data.loop);
+
+ data.index = cg_addr_load(p, data.index_addr);
+
+ cgValue cond = cg_emit_comp(p, Token_Lt, data.index, max);
+ cg_emit_if(p, cond, data.body, data.done);
+ tb_inst_set_control(p->func, data.body);
+
+ return data;
+}
+
+gb_internal void cg_loop_end(cgProcedure *p, cgLoopData const &data) {
+ if (data.index_addr.addr.node != nullptr) {
+ cg_emit_increment(p, data.index_addr.addr);
+ cg_emit_goto(p, data.loop);
+ tb_inst_set_control(p->func, data.done);
+ }
+}
+
+
+
+gb_internal void cg_build_try_lhs_rhs(cgProcedure *p, Ast *arg, Type *final_type, cgValue *lhs_, cgValue *rhs_) {
+ cgValue lhs = {};
+ cgValue rhs = {};
+
+ cgValue value = cg_build_expr(p, arg);
+ if (value.kind == cgValue_Multi) {
+ auto const &values = value.multi->values;
+ if (values.count == 2) {
+ lhs = values[0];
+ rhs = values[1];
+ } else {
+ rhs = values[values.count-1];
+ if (values.count > 1) {
+ lhs = cg_value_multi(slice(values, 0, values.count-1), final_type);
+ }
+ }
+ } else {
+ rhs = value;
+ }
+
+ GB_ASSERT(rhs.node != nullptr);
+
+ if (lhs_) *lhs_ = lhs;
+ if (rhs_) *rhs_ = rhs;
+}
+
+gb_internal cgValue cg_emit_try_has_value(cgProcedure *p, cgValue rhs) {
+ cgValue has_value = {};
+ if (is_type_boolean(rhs.type)) {
+ has_value = rhs;
+ } else {
+ GB_ASSERT_MSG(type_has_nil(rhs.type), "%s", type_to_string(rhs.type));
+ has_value = cg_emit_comp_against_nil(p, Token_CmpEq, rhs);
+ }
+ GB_ASSERT(has_value.node != nullptr);
+ return has_value;
+}
+
+gb_internal cgValue cg_build_or_return(cgProcedure *p, Ast *arg, Type *final_type) {
+ cgValue lhs = {};
+ cgValue rhs = {};
+ cg_build_try_lhs_rhs(p, arg, final_type, &lhs, &rhs);
+
+ TB_Node *return_region = cg_control_region(p, "or_return_return");
+ TB_Node *continue_region = cg_control_region(p, "or_return_continue");
+
+ cgValue cond = cg_emit_try_has_value(p, rhs);
+ cg_emit_if(p, cond, continue_region, return_region);
+ tb_inst_set_control(p->func, return_region);
+ {
+ Type *proc_type = base_type(p->type);
+ Type *results = proc_type->Proc.results;
+ GB_ASSERT(results != nullptr && results->kind == Type_Tuple);
+ TypeTuple *tuple = &results->Tuple;
+
+ GB_ASSERT(tuple->variables.count != 0);
+
+ Entity *end_entity = tuple->variables[tuple->variables.count-1];
+ rhs = cg_emit_conv(p, rhs, end_entity->type);
+ if (p->type->Proc.has_named_results) {
+ GB_ASSERT(end_entity->token.string.len != 0);
+
+ // NOTE(bill): store the named values before returning
+ cgAddr found = map_must_get(&p->variable_map, end_entity);
+ cg_addr_store(p, found, rhs);
+
+ cg_build_return_stmt(p, {});
+ } else {
+ GB_ASSERT(tuple->variables.count == 1);
+ Slice<cgValue> results = {};
+ results.data = &rhs;
+ results.count = 1;;
+ cg_build_return_stmt_internal(p, results);
+ }
+ }
+ tb_inst_set_control(p->func, continue_region);
+ if (final_type != nullptr && !is_type_tuple(final_type)) {
+ return cg_emit_conv(p, lhs, final_type);
+ }
+ return {};
+}
+
+gb_internal cgValue cg_build_or_else(cgProcedure *p, Ast *arg, Ast *else_expr, Type *final_type) {
+ if (arg->state_flags & StateFlag_DirectiveWasFalse) {
+ return cg_build_expr(p, else_expr);
+ }
+
+ cgValue lhs = {};
+ cgValue rhs = {};
+ cg_build_try_lhs_rhs(p, arg, final_type, &lhs, &rhs);
+
+ GB_ASSERT(else_expr != nullptr);
+
+ if (is_diverging_expr(else_expr)) {
+ TB_Node *then = cg_control_region(p, "or_else_then");
+ TB_Node *else_ = cg_control_region(p, "or_else_else");
+
+ cg_emit_if(p, cg_emit_try_has_value(p, rhs), then, else_);
+ // NOTE(bill): else block needs to be straight afterwards to make sure that the actual value is used
+ // from the then block
+ tb_inst_set_control(p->func, else_);
+
+ cg_build_expr(p, else_expr);
+
+ tb_inst_set_control(p->func, then);
+ return cg_emit_conv(p, lhs, final_type);
+ } else {
+ TB_Node *incoming_values[2] = {};
+ TB_Node *incoming_regions[2] = {};
+
+ TB_Node *then = cg_control_region(p, "or_else_then");
+ TB_Node *done = cg_control_region(p, "or_else_done"); // NOTE(bill): Append later
+ TB_Node *else_ = cg_control_region(p, "or_else_else");
+
+ cg_emit_if(p, cg_emit_try_has_value(p, rhs), then, else_);
+ tb_inst_set_control(p->func, then);
+
+ cgValue x = cg_emit_conv(p, lhs, final_type);
+ incoming_values[0] = x.node;
+ incoming_regions[0] = tb_inst_get_control(p->func);
+
+ tb_inst_goto(p->func, done);
+ tb_inst_set_control(p->func, else_);
+
+ cgValue y = cg_emit_conv(p, cg_build_expr(p, else_expr), final_type);
+ incoming_values[1] = y.node;
+ incoming_regions[1] = tb_inst_get_control(p->func);
+
+ tb_inst_goto(p->func, done);
+ tb_inst_set_control(p->func, done);
+
+ GB_ASSERT(x.kind == y.kind);
+ GB_ASSERT(incoming_values[0]->dt.raw == incoming_values[1]->dt.raw);
+ cgValue res = {};
+ res.kind = x.kind;
+ res.type = final_type;
+
+ res.node = tb_inst_incomplete_phi(p->func, incoming_values[0]->dt, done, 2);
+ tb_inst_add_phi_operand(p->func, res.node, incoming_regions[0], incoming_values[0]);
+ tb_inst_add_phi_operand(p->func, res.node, incoming_regions[1], incoming_values[1]);
+ return res;
+ }
+}
+
+
+gb_internal isize cg_control_region_pred_count(TB_Node *region) {
+ GB_ASSERT(region->type == TB_REGION);
+ GB_ASSERT(region->input_count > 0);
+ return region->input_count;
+}
+
+gb_internal cgValue cg_build_logical_binary_expr(cgProcedure *p, TokenKind op, Ast *left, Ast *right, Type *final_type) {
+ TB_Node *rhs = cg_control_region(p, "logical_cmp_rhs");
+ TB_Node *done = cg_control_region(p, "logical_cmp_done");
+
+ cgValue short_circuit = {};
+ if (op == Token_CmpAnd) {
+ cg_build_cond(p, left, rhs, done);
+ short_circuit = cg_const_bool(p, t_bool, false);
+ } else if (op == Token_CmpOr) {
+ cg_build_cond(p, left, done, rhs);
+ short_circuit = cg_const_bool(p, t_bool, true);
+ }
+
+ if (rhs->input_count == 0) {
+ tb_inst_set_control(p->func, done);
+ return cg_emit_conv(p, short_circuit, final_type);
+ }
+
+ if (done->input_count == 0) {
+ tb_inst_set_control(p->func, rhs);
+ return cg_build_expr(p, right);
+ }
+
+ tb_inst_set_control(p->func, rhs);
+ cgValue edge = cg_build_expr(p, right);
+ TB_Node *edge_region = tb_inst_get_control(p->func);
+
+ tb_inst_goto(p->func, done);
+ tb_inst_set_control(p->func, done);
+
+ TB_DataType dt = edge.node->dt;
+ TB_Node *phi = tb_inst_incomplete_phi(p->func, dt, done, done->input_count);
+ for (size_t i = 0; i < done->input_count; i++) {
+ TB_Node *val = short_circuit.node;
+ TB_Node *region = done->inputs[i];
+ if (region == edge_region) {
+ val = edge.node;
+ }
+ tb_inst_add_phi_operand(p->func, phi, region, val);
+ }
+ return cg_emit_conv(p, cg_value(phi, t_bool), final_type);
+}
+
+
+
+gb_internal cgValue cg_build_binary_expr(cgProcedure *p, Ast *expr) {
+ ast_node(be, BinaryExpr, expr);
+
+ TypeAndValue tv = type_and_value_of_expr(expr);
+
+ if (is_type_matrix(be->left->tav.type) || is_type_matrix(be->right->tav.type)) {
+ cgValue left = cg_build_expr(p, be->left);
+ cgValue right = cg_build_expr(p, be->right);
+ GB_PANIC("TODO(bill): cg_emit_arith_matrix");
+ // return cg_emit_arith_matrix(p, be->op.kind, left, right, default_type(tv.type), false);
+ }
+
+
+ switch (be->op.kind) {
+ case Token_Add:
+ case Token_Sub:
+ case Token_Mul:
+ case Token_Quo:
+ case Token_Mod:
+ case Token_ModMod:
+ case Token_And:
+ case Token_Or:
+ case Token_Xor:
+ case Token_AndNot: {
+ Type *type = default_type(tv.type);
+ cgValue left = cg_build_expr(p, be->left);
+ cgValue right = cg_build_expr(p, be->right);
+ return cg_emit_arith(p, be->op.kind, left, right, type);
+ }
+
+ case Token_Shl:
+ case Token_Shr: {
+ cgValue left, right;
+ Type *type = default_type(tv.type);
+ left = cg_build_expr(p, be->left);
+
+ if (cg_is_expr_untyped_const(be->right)) {
+ // NOTE(bill): RHS shift operands can still be untyped
+ // Just bypass the standard cg_build_expr
+ right = cg_expr_untyped_const_to_typed(p, be->right, type);
+ } else {
+ right = cg_build_expr(p, be->right);
+ }
+ return cg_emit_arith(p, be->op.kind, left, right, type);
+ }
+
+ case Token_CmpEq:
+ case Token_NotEq:
+ if (is_type_untyped_nil(be->right->tav.type)) {
+ // `x == nil` or `x != nil`
+ cgValue left = cg_build_expr(p, be->left);
+ cgValue cmp = cg_emit_comp_against_nil(p, be->op.kind, left);
+ Type *type = default_type(tv.type);
+ return cg_emit_conv(p, cmp, type);
+ } else if (is_type_untyped_nil(be->left->tav.type)) {
+ // `nil == x` or `nil != x`
+ cgValue right = cg_build_expr(p, be->right);
+ cgValue cmp = cg_emit_comp_against_nil(p, be->op.kind, right);
+ Type *type = default_type(tv.type);
+ return cg_emit_conv(p, cmp, type);
+ }/* else if (cg_is_empty_string_constant(be->right)) {
+ // `x == ""` or `x != ""`
+ cgValue s = cg_build_expr(p, be->left);
+ s = cg_emit_conv(p, s, t_string);
+ cgValue len = cg_string_len(p, s);
+ cgValue cmp = cg_emit_comp(p, be->op.kind, len, cg_const_int(p->module, t_int, 0));
+ Type *type = default_type(tv.type);
+ return cg_emit_conv(p, cmp, type);
+ } else if (cg_is_empty_string_constant(be->left)) {
+ // `"" == x` or `"" != x`
+ cgValue s = cg_build_expr(p, be->right);
+ s = cg_emit_conv(p, s, t_string);
+ cgValue len = cg_string_len(p, s);
+ cgValue cmp = cg_emit_comp(p, be->op.kind, len, cg_const_int(p->module, t_int, 0));
+ Type *type = default_type(tv.type);
+ return cg_emit_conv(p, cmp, type);
+ }*/
+ /*fallthrough*/
+ case Token_Lt:
+ case Token_LtEq:
+ case Token_Gt:
+ case Token_GtEq:
+ {
+ cgValue left = {};
+ cgValue right = {};
+
+ if (be->left->tav.mode == Addressing_Type) {
+ left = cg_typeid(p, be->left->tav.type);
+ }
+ if (be->right->tav.mode == Addressing_Type) {
+ right = cg_typeid(p, be->right->tav.type);
+ }
+ if (left.node == nullptr) left = cg_build_expr(p, be->left);
+ if (right.node == nullptr) right = cg_build_expr(p, be->right);
+ cgValue cmp = cg_emit_comp(p, be->op.kind, left, right);
+ Type *type = default_type(tv.type);
+ return cg_emit_conv(p, cmp, type);
+ }
+
+ case Token_CmpAnd:
+ case Token_CmpOr:
+ return cg_build_logical_binary_expr(p, be->op.kind, be->left, be->right, tv.type);
+
+ case Token_in:
+ case Token_not_in:
+ {
+ cgValue left = cg_build_expr(p, be->left);
+ cgValue right = cg_build_expr(p, be->right);
+ Type *rt = base_type(right.type);
+ if (is_type_pointer(rt)) {
+ right = cg_emit_load(p, right);
+ rt = base_type(type_deref(rt));
+ }
+
+ switch (rt->kind) {
+ case Type_Map:
+ {
+ GB_PANIC("TODO(bill): in/not_in for maps");
+ // cgValue map_ptr = cg_address_from_load_or_generate_local(p, right);
+ // cgValue key = left;
+ // cgValue ptr = cg_internal_dynamic_map_get_ptr(p, map_ptr, key);
+ // if (be->op.kind == Token_in) {
+ // return cg_emit_conv(p, cg_emit_comp_against_nil(p, Token_NotEq, ptr), t_bool);
+ // } else {
+ // return cg_emit_conv(p, cg_emit_comp_against_nil(p, Token_CmpEq, ptr), t_bool);
+ // }
+ }
+ break;
+ case Type_BitSet:
+ {
+ Type *key_type = rt->BitSet.elem;
+ GB_ASSERT(are_types_identical(left.type, key_type));
+
+ Type *it = bit_set_to_int(rt);
+ left = cg_emit_conv(p, left, it);
+ if (is_type_different_to_arch_endianness(it)) {
+ left = cg_emit_byte_swap(p, left, integer_endian_type_to_platform_type(it));
+ }
+
+ cgValue lower = cg_const_value(p, left.type, exact_value_i64(rt->BitSet.lower));
+ cgValue key = cg_emit_arith(p, Token_Sub, left, lower, left.type);
+ cgValue bit = cg_emit_arith(p, Token_Shl, cg_const_int(p, left.type, 1), key, left.type);
+ bit = cg_emit_conv(p, bit, it);
+
+ cgValue old_value = cg_emit_transmute(p, right, it);
+ cgValue new_value = cg_emit_arith(p, Token_And, old_value, bit, it);
+
+ GB_PANIC("TODO(bill): cg_emit_comp");
+ // TokenKind op = (be->op.kind == Token_in) ? Token_NotEq : Token_CmpEq;
+ // return cg_emit_conv(p, cg_emit_comp(p, op, new_value, cg_const_int(p, new_value.type, 0)), t_bool);
+ }
+ break;
+ default:
+ GB_PANIC("Invalid 'in' type");
+ }
+ break;
+ }
+ break;
+ default:
+ GB_PANIC("Invalid binary expression");
+ break;
+ }
+ return {};
+}
+
+gb_internal cgValue cg_build_cond(cgProcedure *p, Ast *cond, TB_Node *true_block, TB_Node *false_block) {
+ cond = unparen_expr(cond);
+
+ GB_ASSERT(cond != nullptr);
+ GB_ASSERT(true_block != nullptr);
+ GB_ASSERT(false_block != nullptr);
+
+ // Use to signal not to do compile time short circuit for consts
+ cgValue no_comptime_short_circuit = {};
+
+ switch (cond->kind) {
+ case_ast_node(ue, UnaryExpr, cond);
+ if (ue->op.kind == Token_Not) {
+ cgValue cond_val = cg_build_cond(p, ue->expr, false_block, true_block);
+ return cond_val;
+ // if (cond_val.value && LLVMIsConstant(cond_val.value)) {
+ // return cg_const_bool(p->module, cond_val.type, LLVMConstIntGetZExtValue(cond_val.value) == 0);
+ // }
+ // return no_comptime_short_circuit;
+ }
+ case_end;
+
+ case_ast_node(be, BinaryExpr, cond);
+ if (be->op.kind == Token_CmpAnd) {
+ TB_Node *block = cg_control_region(p, "cmp_and");
+ cg_build_cond(p, be->left, block, false_block);
+ tb_inst_set_control(p->func, block);
+ cg_build_cond(p, be->right, true_block, false_block);
+ return no_comptime_short_circuit;
+ } else if (be->op.kind == Token_CmpOr) {
+ TB_Node *block = cg_control_region(p, "cmp_or");
+ cg_build_cond(p, be->left, true_block, block);
+ tb_inst_set_control(p->func, block);
+ cg_build_cond(p, be->right, true_block, false_block);
+ return no_comptime_short_circuit;
+ }
+ case_end;
+ }
+
+ cgValue v = {};
+ if (cg_is_expr_untyped_const(cond)) {
+ v = cg_expr_untyped_const_to_typed(p, cond, t_bool);
+ } else {
+ v = cg_build_expr(p, cond);
+ }
+ cg_emit_if(p, v, true_block, false_block);
+ return v;
+}
+
+gb_internal cgValue cg_build_expr_internal(cgProcedure *p, Ast *expr);
+gb_internal cgValue cg_build_expr(cgProcedure *p, Ast *expr) {
+ cg_set_debug_pos_from_node(p, expr);
+
+ u16 prev_state_flags = p->state_flags;
+ defer (p->state_flags = prev_state_flags);
+
+ if (expr->state_flags != 0) {
+ u16 in = expr->state_flags;
+ u16 out = p->state_flags;
+
+ if (in & StateFlag_bounds_check) {
+ out |= StateFlag_bounds_check;
+ out &= ~StateFlag_no_bounds_check;
+ } else if (in & StateFlag_no_bounds_check) {
+ out |= StateFlag_no_bounds_check;
+ out &= ~StateFlag_bounds_check;
+ }
+
+ if (in & StateFlag_type_assert) {
+ out |= StateFlag_type_assert;
+ out &= ~StateFlag_no_type_assert;
+ } else if (in & StateFlag_no_type_assert) {
+ out |= StateFlag_no_type_assert;
+ out &= ~StateFlag_type_assert;
+ }
+
+ p->state_flags = out;
+ }
+
+
+ // IMPORTANT NOTE(bill):
+ // Selector Call Expressions (foo->bar(...))
+ // must only evaluate `foo` once as it gets transformed into
+ // `foo.bar(foo, ...)`
+ // And if `foo` is a procedure call or something more complex, storing the value
+ // once is a very good idea
+ // If a stored value is found, it must be removed from the cache
+ if (expr->state_flags & StateFlag_SelectorCallExpr) {
+ // cgValue *pp = map_get(&p->selector_values, expr);
+ // if (pp != nullptr) {
+ // cgValue res = *pp;
+ // map_remove(&p->selector_values, expr);
+ // return res;
+ // }
+ // cgAddr *pa = map_get(&p->selector_addr, expr);
+ // if (pa != nullptr) {
+ // cgAddr res = *pa;
+ // map_remove(&p->selector_addr, expr);
+ // return cg_addr_load(p, res);
+ // }
+ }
+
+ cgValue res = cg_build_expr_internal(p, expr);
+ if (res.kind == cgValue_Symbol) {
+ GB_ASSERT(is_type_internally_pointer_like(res.type));
+ res = cg_value(tb_inst_get_symbol_address(p->func, res.symbol), res.type);
+ }
+
+ if (expr->state_flags & StateFlag_SelectorCallExpr) {
+ // map_set(&p->selector_values, expr, res);
+ }
+ return res;
+}
+
+
+gb_internal cgValue cg_find_ident(cgProcedure *p, Entity *e, Ast *expr) {
+ cgAddr *found_addr = map_get(&p->variable_map, e);
+ if (found_addr) {
+ return cg_addr_load(p, *found_addr);
+ }
+
+ cgValue *found = nullptr;
+ rw_mutex_shared_lock(&p->module->values_mutex);
+ found = map_get(&p->module->values, e);
+ rw_mutex_shared_unlock(&p->module->values_mutex);
+
+ if (found) {
+
+ auto v = *found;
+ // NOTE(bill): This is because pointers are already pointers in LLVM
+ if (is_type_proc(v.type)) {
+ return v;
+ }
+ return cg_emit_load(p, v);
+ } else if (e != nullptr && e->kind == Entity_Variable) {
+ return cg_addr_load(p, cg_build_addr(p, expr));
+ }
+
+ if (e->kind == Entity_Procedure) {
+ return cg_find_procedure_value_from_entity(p->module, e);
+ }
+
+ String pkg = {};
+ if (e->pkg) {
+ pkg = e->pkg->name;
+ }
+ gb_printf_err("Error in: %s\n", token_pos_to_string(ast_token(expr).pos));
+ GB_PANIC("nullptr value for expression from identifier: %.*s.%.*s (%p) : %s @ %p", LIT(pkg), LIT(e->token.string), e, type_to_string(e->type), expr);
+ return {};
+}
+
+cgAddr cg_build_addr_compound_lit(cgProcedure *p, Ast *expr) {
+ struct cgCompoundLitElemTempData {
+ Ast * expr;
+ cgValue value;
+ i64 elem_index;
+ i64 elem_length;
+ cgValue gep;
+ };
+
+
+ auto const &populate = [](cgProcedure *p, Slice<Ast *> const &elems, Array<cgCompoundLitElemTempData> *temp_data, Type *compound_type) {
+ Type *bt = base_type(compound_type);
+ Type *et = nullptr;
+ switch (bt->kind) {
+ case Type_Array: et = bt->Array.elem; break;
+ case Type_EnumeratedArray: et = bt->EnumeratedArray.elem; break;
+ case Type_Slice: et = bt->Slice.elem; break;
+ case Type_BitSet: et = bt->BitSet.elem; break;
+ case Type_DynamicArray: et = bt->DynamicArray.elem; break;
+ case Type_SimdVector: et = bt->SimdVector.elem; break;
+ case Type_Matrix: et = bt->Matrix.elem; break;
+ }
+ GB_ASSERT(et != nullptr);
+
+
+ // NOTE(bill): Separate value, gep, store into their own chunks
+ for_array(i, elems) {
+ Ast *elem = elems[i];
+ if (elem->kind == Ast_FieldValue) {
+ ast_node(fv, FieldValue, elem);
+ if (is_ast_range(fv->field)) {
+ ast_node(ie, BinaryExpr, fv->field);
+ TypeAndValue lo_tav = ie->left->tav;
+ TypeAndValue hi_tav = ie->right->tav;
+ GB_ASSERT(lo_tav.mode == Addressing_Constant);
+ GB_ASSERT(hi_tav.mode == Addressing_Constant);
+
+ TokenKind op = ie->op.kind;
+ i64 lo = exact_value_to_i64(lo_tav.value);
+ i64 hi = exact_value_to_i64(hi_tav.value);
+ if (op != Token_RangeHalf) {
+ hi += 1;
+ }
+
+ cgValue value = cg_emit_conv(p, cg_build_expr(p, fv->value), et);
+
+ GB_ASSERT((hi-lo) > 0);
+
+ if (bt->kind == Type_Matrix) {
+ GB_PANIC("TODO(bill): Type_Matrix");
+ // for (i64 k = lo; k < hi; k++) {
+ // cgCompoundLitElemTempData data = {};
+ // data.value = value;
+
+ // data.elem_index = matrix_row_major_index_to_offset(bt, k);
+ // array_add(temp_data, data);
+ // }
+ } else {
+ enum {MAX_ELEMENT_AMOUNT = 32};
+ if ((hi-lo) <= MAX_ELEMENT_AMOUNT) {
+ for (i64 k = lo; k < hi; k++) {
+ cgCompoundLitElemTempData data = {};
+ data.value = value;
+ data.elem_index = k;
+ array_add(temp_data, data);
+ }
+ } else {
+ cgCompoundLitElemTempData data = {};
+ data.value = value;
+ data.elem_index = lo;
+ data.elem_length = hi-lo;
+ array_add(temp_data, data);
+ }
+ }
+ } else {
+ auto tav = fv->field->tav;
+ GB_ASSERT(tav.mode == Addressing_Constant);
+ i64 index = exact_value_to_i64(tav.value);
+
+ cgValue value = cg_emit_conv(p, cg_build_expr(p, fv->value), et);
+ GB_ASSERT(!is_type_tuple(value.type));
+
+ cgCompoundLitElemTempData data = {};
+ data.value = value;
+ data.expr = fv->value;
+ if (bt->kind == Type_Matrix) {
+ GB_PANIC("TODO(bill): Type_Matrix");
+ // data.elem_index = matrix_row_major_index_to_offset(bt, index);
+ } else {
+ data.elem_index = index;
+ }
+ array_add(temp_data, data);
+ }
+
+ } else {
+ // if (bt->kind != Type_DynamicArray && lb_is_elem_const(elem, et)) {
+ // continue;
+ // }
+
+ cgValue field_expr = cg_build_expr(p, elem);
+ GB_ASSERT(!is_type_tuple(field_expr.type));
+
+ cgValue ev = cg_emit_conv(p, field_expr, et);
+
+ cgCompoundLitElemTempData data = {};
+ data.value = ev;
+ if (bt->kind == Type_Matrix) {
+ GB_PANIC("TODO(bill): Type_Matrix");
+ // data.elem_index = matrix_row_major_index_to_offset(bt, i);
+ } else {
+ data.elem_index = i;
+ }
+ array_add(temp_data, data);
+ }
+ }
+ };
+
+ auto const &assign_array = [](cgProcedure *p, Array<cgCompoundLitElemTempData> const &temp_data) {
+ for (auto const &td : temp_data) if (td.value.node != nullptr) {
+ if (td.elem_length > 0) {
+ GB_PANIC("TODO(bill): range");
+ // auto loop_data = cg_loop_start(p, cast(isize)td.elem_length, t_i32);
+ // {
+ // cgValue dst = td.gep;
+ // dst = cg_emit_ptr_offset(p, dst, loop_data.idx);
+ // cg_emit_store(p, dst, td.value);
+ // }
+ // cg_loop_end(p, loop_data);
+ } else {
+ cg_emit_store(p, td.gep, td.value);
+ }
+ }
+ };
+
+
+
+ ast_node(cl, CompoundLit, expr);
+
+ Type *type = type_of_expr(expr);
+ Type *bt = base_type(type);
+
+ cgAddr v = {};
+ if (p->is_startup) {
+ v = cg_add_global(p, type, nullptr);
+ } else {
+ v = cg_add_local(p, type, nullptr, true);
+ }
+
+ if (cl->elems.count == 0) {
+ // No need to create it
+ return v;
+ }
+
+ TEMPORARY_ALLOCATOR_GUARD();
+
+ Type *et = nullptr;
+ switch (bt->kind) {
+ case Type_Array: et = bt->Array.elem; break;
+ case Type_EnumeratedArray: et = bt->EnumeratedArray.elem; break;
+ case Type_Slice: et = bt->Slice.elem; break;
+ case Type_BitSet: et = bt->BitSet.elem; break;
+ case Type_SimdVector: et = bt->SimdVector.elem; break;
+ case Type_Matrix: et = bt->Matrix.elem; break;
+ }
+
+ String proc_name = {};
+ if (p->entity) {
+ proc_name = p->entity->token.string;
+ }
+ TokenPos pos = ast_token(expr).pos;
+
+
+ switch (bt->kind) {
+ default: GB_PANIC("Unknown CompoundLit type: %s", type_to_string(type)); break;
+
+ case Type_Struct: {
+ TypeStruct *st = &bt->Struct;
+ cgValue comp_lit_ptr = cg_addr_get_ptr(p, v);
+
+ for_array(field_index, cl->elems) {
+ Ast *elem = cl->elems[field_index];
+
+ cgValue field_expr = {};
+ Entity *field = nullptr;
+ isize index = field_index;
+
+ if (elem->kind == Ast_FieldValue) {
+ ast_node(fv, FieldValue, elem);
+ String name = fv->field->Ident.token.string;
+ Selection sel = lookup_field(bt, name, false);
+ GB_ASSERT(!sel.indirect);
+
+ elem = fv->value;
+ if (sel.index.count > 1) {
+ cgValue dst = cg_emit_deep_field_gep(p, comp_lit_ptr, sel);
+ field_expr = cg_build_expr(p, elem);
+ field_expr = cg_emit_conv(p, field_expr, sel.entity->type);
+ cg_emit_store(p, dst, field_expr);
+ continue;
+ }
+
+ index = sel.index[0];
+ } else {
+ Selection sel = lookup_field_from_index(bt, st->fields[field_index]->Variable.field_index);
+ GB_ASSERT(sel.index.count == 1);
+ GB_ASSERT(!sel.indirect);
+ index = sel.index[0];
+ }
+
+ field = st->fields[index];
+ Type *ft = field->type;
+
+ field_expr = cg_build_expr(p, elem);
+
+ cgValue gep = {};
+ if (st->is_raw_union) {
+ gep = cg_emit_conv(p, comp_lit_ptr, alloc_type_pointer(ft));
+ } else {
+ gep = cg_emit_struct_ep(p, comp_lit_ptr, cast(i32)index);
+ }
+
+ Type *fet = field_expr.type;
+ GB_ASSERT(fet->kind != Type_Tuple);
+
+ // HACK TODO(bill): THIS IS A MASSIVE HACK!!!!
+ if (is_type_union(ft) && !are_types_identical(fet, ft) && !is_type_untyped(fet)) {
+ GB_ASSERT_MSG(union_variant_index(ft, fet) >= 0, "%s", type_to_string(fet));
+
+ GB_PANIC("TODO(bill): cg_emit_store_union_variant");
+ // cg_emit_store_union_variant(p, gep, field_expr, fet);
+ } else {
+ cgValue fv = cg_emit_conv(p, field_expr, ft);
+ cg_emit_store(p, gep, fv);
+ }
+ }
+ return v;
+ }
+
+ case Type_Map: {
+ GB_ASSERT(!build_context.no_dynamic_literals);
+ GB_PANIC("TODO(bill): map literals");
+
+ // cgValue err = cg_dynamic_map_reserve(p, v.addr, 2*cl->elems.count, pos);
+ // gb_unused(err);
+
+ // for (Ast *elem : cl->elems) {
+ // ast_node(fv, FieldValue, elem);
+
+ // cgValue key = cg_build_expr(p, fv->field);
+ // cgValue value = cg_build_expr(p, fv->value);
+ // cg_internal_dynamic_map_set(p, v.addr, type, key, value, elem);
+ // }
+ break;
+ }
+
+ case Type_Array: {
+ auto temp_data = array_make<cgCompoundLitElemTempData>(temporary_allocator(), 0, cl->elems.count);
+
+ populate(p, cl->elems, &temp_data, type);
+
+ cgValue dst_ptr = cg_addr_get_ptr(p, v);
+ for_array(i, temp_data) {
+ i32 index = cast(i32)(temp_data[i].elem_index);
+ temp_data[i].gep = cg_emit_array_epi(p, dst_ptr, index);
+ }
+
+ assign_array(p, temp_data);
+ break;
+ }
+ case Type_EnumeratedArray: {
+ auto temp_data = array_make<cgCompoundLitElemTempData>(temporary_allocator(), 0, cl->elems.count);
+
+ populate(p, cl->elems, &temp_data, type);
+
+ cgValue dst_ptr = cg_addr_get_ptr(p, v);
+ i64 index_offset = exact_value_to_i64(*bt->EnumeratedArray.min_value);
+ for_array(i, temp_data) {
+ i32 index = cast(i32)(temp_data[i].elem_index - index_offset);
+ temp_data[i].gep = cg_emit_array_epi(p, dst_ptr, index);
+ }
+
+ assign_array(p, temp_data);
+ break;
+ }
+ case Type_Slice: {
+ isize count = gb_max(cl->elems.count, cl->max_count);
+
+ TB_CharUnits backing_size = cast(TB_CharUnits)(type_size_of(bt->Slice.elem) * count);
+ TB_CharUnits align = cast(TB_CharUnits)type_align_of(bt->Slice.elem);
+
+ TB_Node *backing = nullptr;
+ if (p->is_startup) {
+ TB_Global *global = tb_global_create(p->module->mod, 0, "", nullptr, TB_LINKAGE_PRIVATE);
+ tb_global_set_storage(p->module->mod, tb_module_get_data(p->module->mod), global, backing_size, align, 0);
+ backing = tb_inst_get_symbol_address(p->func, cast(TB_Symbol *)global);
+ } else {
+ backing = tb_inst_local(p->func, backing_size, align);
+ }
+
+ cgValue data = cg_value(backing, alloc_type_multi_pointer(bt->Slice.elem));
+
+ auto temp_data = array_make<cgCompoundLitElemTempData>(temporary_allocator(), 0, cl->elems.count);
+ populate(p, cl->elems, &temp_data, type);
+
+
+ for_array(i, temp_data) {
+ temp_data[i].gep = cg_emit_ptr_offset(p, data, cg_const_int(p, t_int, temp_data[i].elem_index));
+ }
+
+ assign_array(p, temp_data);
+ cg_fill_slice(p, v, data, cg_const_int(p, t_int, count));
+ return v;
+ }
+
+ case Type_DynamicArray: {
+ GB_ASSERT(!build_context.no_dynamic_literals);
+
+ Type *et = bt->DynamicArray.elem;
+ cgValue size = cg_const_int(p, t_int, type_size_of(et));
+ cgValue align = cg_const_int(p, t_int, type_align_of(et));
+
+ i64 item_count = gb_max(cl->max_count, cl->elems.count);
+ {
+
+ auto args = slice_make<cgValue>(temporary_allocator(), 5);
+ args[0] = cg_emit_conv(p, cg_addr_get_ptr(p, v), t_rawptr);
+ args[1] = size;
+ args[2] = align;
+ args[3] = cg_const_int(p, t_int, item_count);
+ args[4] = cg_emit_source_code_location_as_global(p, proc_name, pos);
+ cg_emit_runtime_call(p, "__dynamic_array_reserve", args);
+ }
+
+ Type *array_type = alloc_type_array(et, item_count);
+ cgAddr items_addr = cg_add_local(p, array_type, nullptr, true);
+ cgValue items = cg_addr_get_ptr(p, items_addr);
+
+ auto temp_data = array_make<cgCompoundLitElemTempData>(temporary_allocator(), 0, cl->elems.count);
+ populate(p, cl->elems, &temp_data, type);
+
+ for_array(i, temp_data) {
+ temp_data[i].gep = cg_emit_array_epi(p, items, temp_data[i].elem_index);
+ }
+ assign_array(p, temp_data);
+
+ {
+ auto args = slice_make<cgValue>(temporary_allocator(), 6);
+ args[0] = cg_emit_conv(p, v.addr, t_rawptr);
+ args[1] = size;
+ args[2] = align;
+ args[3] = cg_emit_conv(p, items, t_rawptr);
+ args[4] = cg_const_int(p, t_int, item_count);
+ args[5] = cg_emit_source_code_location_as_global(p, proc_name, pos);
+ cg_emit_runtime_call(p, "__dynamic_array_append", args);
+ }
+ break;
+ }
+
+ case Type_Basic: {
+ GB_ASSERT(is_type_any(bt));
+ String field_names[2] = {
+ str_lit("data"),
+ str_lit("id"),
+ };
+ Type *field_types[2] = {
+ t_rawptr,
+ t_typeid,
+ };
+
+ for_array(field_index, cl->elems) {
+ Ast *elem = cl->elems[field_index];
+
+ cgValue field_expr = {};
+ isize index = field_index;
+
+ if (elem->kind == Ast_FieldValue) {
+ ast_node(fv, FieldValue, elem);
+ Selection sel = lookup_field(bt, fv->field->Ident.token.string, false);
+ index = sel.index[0];
+ elem = fv->value;
+ } else {
+ TypeAndValue tav = type_and_value_of_expr(elem);
+ Selection sel = lookup_field(bt, field_names[field_index], false);
+ index = sel.index[0];
+ }
+
+ field_expr = cg_build_expr(p, elem);
+
+ GB_ASSERT(field_expr.type->kind != Type_Tuple);
+
+ Type *ft = field_types[index];
+ cgValue fv = cg_emit_conv(p, field_expr, ft);
+ cgValue gep = cg_emit_struct_ep(p, cg_addr_get_ptr(p, v), index);
+ cg_emit_store(p, gep, fv);
+ }
+ break;
+ }
+
+ case Type_BitSet: {
+ i64 sz = type_size_of(type);
+ if (sz == 0) {
+ return v;
+ }
+ cgValue lower = cg_const_value(p, t_int, exact_value_i64(bt->BitSet.lower));
+ Type *it = bit_set_to_int(bt);
+ cgValue one = cg_const_value(p, it, exact_value_i64(1));
+ for (Ast *elem : cl->elems) {
+ GB_ASSERT(elem->kind != Ast_FieldValue);
+
+ cgValue expr = cg_build_expr(p, elem);
+ GB_ASSERT(expr.type->kind != Type_Tuple);
+
+ cgValue e = cg_emit_conv(p, expr, it);
+ e = cg_emit_arith(p, Token_Sub, e, lower, it);
+ e = cg_emit_arith(p, Token_Shl, one, e, it);
+
+ cgValue old_value = cg_emit_transmute(p, cg_addr_load(p, v), it);
+ cgValue new_value = cg_emit_arith(p, Token_Or, old_value, e, it);
+ new_value = cg_emit_transmute(p, new_value, type);
+ cg_addr_store(p, v, new_value);
+ }
+ return v;
+ }
+
+ case Type_Matrix: {
+ auto temp_data = array_make<cgCompoundLitElemTempData>(temporary_allocator(), 0, cl->elems.count);
+
+ populate(p, cl->elems, &temp_data, type);
+
+ cgValue dst_ptr = cg_addr_get_ptr(p, v);
+ for_array(i, temp_data) {
+ temp_data[i].gep = cg_emit_array_epi(p, dst_ptr, temp_data[i].elem_index);
+ }
+
+ assign_array(p, temp_data);
+ break;
+ }
+
+ case Type_SimdVector: {
+ // auto temp_data = array_make<cgCompoundLitElemTempData>(temporary_allocator(), 0, cl->elems.count);
+
+ // populate(p, cl->elems, &temp_data, type);
+
+ // // TODO(bill): reduce the need for individual `insertelement` if a `shufflevector`
+ // // might be a better option
+ // for (auto const &td : temp_data) if (td.value.node != nullptr) {
+ // if (td.elem_length > 0) {
+ // for (i64 k = 0; k < td.elem_length; k++) {
+ // LLVMValueRef index = cg_const_int(p->module, t_u32, td.elem_index + k).value;
+ // vector_value.value = LLVMBuildInsertElement(p->builder, vector_value.value, td.value.value, index, "");
+ // }
+ // } else {
+ // LLVMValueRef index = cg_const_int(p->module, t_u32, td.elem_index).value;
+ // vector_value.value = LLVMBuildInsertElement(p->builder, vector_value.value, td.value.value, index, "");
+
+ // }
+ // }
+ break;
+ }
+ }
+
+ return v;
+}
+
+gb_internal cgValue cg_build_unary_and(cgProcedure *p, Ast *expr) {
+ ast_node(ue, UnaryExpr, expr);
+ auto tv = type_and_value_of_expr(expr);
+
+
+ Ast *ue_expr = unparen_expr(ue->expr);
+ if (ue_expr->kind == Ast_IndexExpr && tv.mode == Addressing_OptionalOkPtr && is_type_tuple(tv.type)) {
+ GB_PANIC("TODO(bill): &m[k]");
+ // Type *tuple = tv.type;
+
+ // Type *map_type = type_of_expr(ue_expr->IndexExpr.expr);
+ // Type *ot = base_type(map_type);
+ // Type *t = base_type(type_deref(ot));
+ // bool deref = t != ot;
+ // GB_ASSERT(t->kind == Type_Map);
+ // ast_node(ie, IndexExpr, ue_expr);
+
+ // cgValue map_val = cg_build_addr_ptr(p, ie->expr);
+ // if (deref) {
+ // map_val = cg_emit_load(p, map_val);
+ // }
+
+ // cgValue key = lb_build_expr(p, ie->index);
+ // key = lb_emit_conv(p, key, t->Map.key);
+
+ // lbAddr addr = lb_addr_map(map_val, key, t, alloc_type_pointer(t->Map.value));
+ // lbValue ptr = lb_addr_get_ptr(p, addr);
+
+ // lbValue ok = lb_emit_comp_against_nil(p, Token_NotEq, ptr);
+ // ok = lb_emit_conv(p, ok, tuple->Tuple.variables[1]->type);
+
+ // lbAddr res = lb_add_local_generated(p, tuple, false);
+ // lbValue gep0 = lb_emit_struct_ep(p, res.addr, 0);
+ // lbValue gep1 = lb_emit_struct_ep(p, res.addr, 1);
+ // lb_emit_store(p, gep0, ptr);
+ // lb_emit_store(p, gep1, ok);
+ // return lb_addr_load(p, res);
+
+ } else if (is_type_soa_pointer(tv.type)) {
+ GB_PANIC("TODO(bill): &soa[i]");
+ // ast_node(ie, IndexExpr, ue_expr);
+ // lbValue addr = lb_build_addr_ptr(p, ie->expr);
+ // lbValue index = lb_build_expr(p, ie->index);
+
+ // if (!build_context.no_bounds_check) {
+ // // TODO(bill): soa bounds checking
+ // }
+
+ // return lb_make_soa_pointer(p, tv.type, addr, index);
+ } else if (ue_expr->kind == Ast_CompoundLit) {
+ cgAddr addr = cg_build_addr_compound_lit(p, expr);
+ return addr.addr;
+ } else if (ue_expr->kind == Ast_TypeAssertion) {
+ GB_PANIC("TODO(bill): &v.(T)");
+ // if (is_type_tuple(tv.type)) {
+ // Type *tuple = tv.type;
+ // Type *ptr_type = tuple->Tuple.variables[0]->type;
+ // Type *ok_type = tuple->Tuple.variables[1]->type;
+
+ // ast_node(ta, TypeAssertion, ue_expr);
+ // TokenPos pos = ast_token(expr).pos;
+ // Type *type = type_of_expr(ue_expr);
+ // GB_ASSERT(!is_type_tuple(type));
+
+ // lbValue e = lb_build_expr(p, ta->expr);
+ // Type *t = type_deref(e.type);
+ // if (is_type_union(t)) {
+ // lbValue v = e;
+ // if (!is_type_pointer(v.type)) {
+ // v = lb_address_from_load_or_generate_local(p, v);
+ // }
+ // Type *src_type = type_deref(v.type);
+ // Type *dst_type = type;
+
+ // lbValue src_tag = {};
+ // lbValue dst_tag = {};
+ // if (is_type_union_maybe_pointer(src_type)) {
+ // src_tag = lb_emit_comp_against_nil(p, Token_NotEq, v);
+ // dst_tag = lb_const_bool(p->module, t_bool, true);
+ // } else {
+ // src_tag = lb_emit_load(p, lb_emit_union_tag_ptr(p, v));
+ // dst_tag = lb_const_union_tag(p->module, src_type, dst_type);
+ // }
+
+ // lbValue ok = lb_emit_comp(p, Token_CmpEq, src_tag, dst_tag);
+
+ // lbValue data_ptr = lb_emit_conv(p, v, ptr_type);
+ // lbAddr res = lb_add_local_generated(p, tuple, true);
+ // lbValue gep0 = lb_emit_struct_ep(p, res.addr, 0);
+ // lbValue gep1 = lb_emit_struct_ep(p, res.addr, 1);
+ // lb_emit_store(p, gep0, lb_emit_select(p, ok, data_ptr, lb_const_nil(p->module, ptr_type)));
+ // lb_emit_store(p, gep1, lb_emit_conv(p, ok, ok_type));
+ // return lb_addr_load(p, res);
+ // } else if (is_type_any(t)) {
+ // lbValue v = e;
+ // if (is_type_pointer(v.type)) {
+ // v = lb_emit_load(p, v);
+ // }
+
+ // lbValue data_ptr = lb_emit_conv(p, lb_emit_struct_ev(p, v, 0), ptr_type);
+ // lbValue any_id = lb_emit_struct_ev(p, v, 1);
+ // lbValue id = lb_typeid(p->module, type);
+
+ // lbValue ok = lb_emit_comp(p, Token_CmpEq, any_id, id);
+
+ // lbAddr res = lb_add_local_generated(p, tuple, false);
+ // lbValue gep0 = lb_emit_struct_ep(p, res.addr, 0);
+ // lbValue gep1 = lb_emit_struct_ep(p, res.addr, 1);
+ // lb_emit_store(p, gep0, lb_emit_select(p, ok, data_ptr, lb_const_nil(p->module, ptr_type)));
+ // lb_emit_store(p, gep1, lb_emit_conv(p, ok, ok_type));
+ // return lb_addr_load(p, res);
+ // } else {
+ // GB_PANIC("TODO(bill): type assertion %s", type_to_string(type));
+ // }
+
+ // } else {
+ // GB_ASSERT(is_type_pointer(tv.type));
+
+ // ast_node(ta, TypeAssertion, ue_expr);
+ // TokenPos pos = ast_token(expr).pos;
+ // Type *type = type_of_expr(ue_expr);
+ // GB_ASSERT(!is_type_tuple(type));
+
+ // lbValue e = lb_build_expr(p, ta->expr);
+ // Type *t = type_deref(e.type);
+ // if (is_type_union(t)) {
+ // lbValue v = e;
+ // if (!is_type_pointer(v.type)) {
+ // v = lb_address_from_load_or_generate_local(p, v);
+ // }
+ // Type *src_type = type_deref(v.type);
+ // Type *dst_type = type;
+
+
+ // if ((p->state_flags & StateFlag_no_type_assert) == 0) {
+ // lbValue src_tag = {};
+ // lbValue dst_tag = {};
+ // if (is_type_union_maybe_pointer(src_type)) {
+ // src_tag = lb_emit_comp_against_nil(p, Token_NotEq, v);
+ // dst_tag = lb_const_bool(p->module, t_bool, true);
+ // } else {
+ // src_tag = lb_emit_load(p, lb_emit_union_tag_ptr(p, v));
+ // dst_tag = lb_const_union_tag(p->module, src_type, dst_type);
+ // }
+
+
+ // isize arg_count = 6;
+ // if (build_context.no_rtti) {
+ // arg_count = 4;
+ // }
+
+ // lbValue ok = lb_emit_comp(p, Token_CmpEq, src_tag, dst_tag);
+ // auto args = array_make<lbValue>(permanent_allocator(), arg_count);
+ // args[0] = ok;
+
+ // args[1] = lb_find_or_add_entity_string(p->module, get_file_path_string(pos.file_id));
+ // args[2] = lb_const_int(p->module, t_i32, pos.line);
+ // args[3] = lb_const_int(p->module, t_i32, pos.column);
+
+ // if (!build_context.no_rtti) {
+ // args[4] = lb_typeid(p->module, src_type);
+ // args[5] = lb_typeid(p->module, dst_type);
+ // }
+ // lb_emit_runtime_call(p, "type_assertion_check", args);
+ // }
+
+ // lbValue data_ptr = v;
+ // return lb_emit_conv(p, data_ptr, tv.type);
+ // } else if (is_type_any(t)) {
+ // lbValue v = e;
+ // if (is_type_pointer(v.type)) {
+ // v = lb_emit_load(p, v);
+ // }
+ // lbValue data_ptr = lb_emit_struct_ev(p, v, 0);
+ // if ((p->state_flags & StateFlag_no_type_assert) == 0) {
+ // GB_ASSERT(!build_context.no_rtti);
+
+ // lbValue any_id = lb_emit_struct_ev(p, v, 1);
+
+ // lbValue id = lb_typeid(p->module, type);
+ // lbValue ok = lb_emit_comp(p, Token_CmpEq, any_id, id);
+ // auto args = array_make<lbValue>(permanent_allocator(), 6);
+ // args[0] = ok;
+
+ // args[1] = lb_find_or_add_entity_string(p->module, get_file_path_string(pos.file_id));
+ // args[2] = lb_const_int(p->module, t_i32, pos.line);
+ // args[3] = lb_const_int(p->module, t_i32, pos.column);
+
+ // args[4] = any_id;
+ // args[5] = id;
+ // lb_emit_runtime_call(p, "type_assertion_check", args);
+ // }
+
+ // return lb_emit_conv(p, data_ptr, tv.type);
+ // } else {
+ // GB_PANIC("TODO(bill): type assertion %s", type_to_string(type));
+ // }
+ // }
+ }
+
+ return cg_build_addr_ptr(p, ue->expr);
+}
+
+gb_internal cgValue cg_emit_cast_union(cgProcedure *p, cgValue value, Type *type, TokenPos pos) {
+ Type *src_type = value.type;
+ bool is_ptr = is_type_pointer(src_type);
+
+ bool is_tuple = true;
+ Type *tuple = type;
+ if (type->kind != Type_Tuple) {
+ is_tuple = false;
+ tuple = make_optional_ok_type(type);
+ }
+
+
+ if (is_ptr) {
+ value = cg_emit_load(p, value);
+ }
+ Type *src = base_type(type_deref(src_type));
+ GB_ASSERT_MSG(is_type_union(src), "%s", type_to_string(src_type));
+ Type *dst = tuple->Tuple.variables[0]->type;
+
+ cgValue value_ = cg_address_from_load_or_generate_local(p, value);
+
+ if ((p->state_flags & StateFlag_no_type_assert) != 0 && !is_tuple) {
+ // just do a bit cast of the data at the front
+ cgValue ptr = cg_emit_conv(p, value_, alloc_type_pointer(type));
+ return cg_emit_load(p, ptr);
+ }
+
+
+ cgValue tag = {};
+ cgValue dst_tag = {};
+ cgValue cond = {};
+ cgValue data = {};
+
+ cgValue gep0 = cg_add_local(p, tuple->Tuple.variables[0]->type, nullptr, true).addr;
+ cgValue gep1 = cg_add_local(p, tuple->Tuple.variables[1]->type, nullptr, true).addr;
+
+ if (is_type_union_maybe_pointer(src)) {
+ data = cg_emit_load(p, cg_emit_conv(p, value_, gep0.type));
+ } else {
+ tag = cg_emit_load(p, cg_emit_union_tag_ptr(p, value_));
+ dst_tag = cg_const_union_tag(p, src, dst);
+ }
+
+ TB_Node *ok_block = cg_control_region(p, "union_cast_ok");
+ TB_Node *end_block = cg_control_region(p, "union_cast_end");
+
+ if (data.node != nullptr) {
+ GB_ASSERT(is_type_union_maybe_pointer(src));
+ cond = cg_emit_comp_against_nil(p, Token_NotEq, data);
+ } else {
+ cond = cg_emit_comp(p, Token_CmpEq, tag, dst_tag);
+ }
+
+ cg_emit_if(p, cond, ok_block, end_block);
+ tb_inst_set_control(p->func, ok_block);
+
+ if (data.node == nullptr) {
+ data = cg_emit_load(p, cg_emit_conv(p, value_, gep0.type));
+ }
+ cg_emit_store(p, gep0, data);
+ cg_emit_store(p, gep1, cg_const_bool(p, t_bool, true));
+
+ cg_emit_goto(p, end_block);
+ tb_inst_set_control(p->func, end_block);
+
+ if (!is_tuple) {
+ GB_ASSERT((p->state_flags & StateFlag_no_type_assert) == 0);
+ // NOTE(bill): Panic on invalid conversion
+ Type *dst_type = tuple->Tuple.variables[0]->type;
+
+ isize arg_count = 7;
+ if (build_context.no_rtti) {
+ arg_count = 4;
+ }
+
+ cgValue ok = cg_emit_load(p, gep1);
+ auto args = slice_make<cgValue>(permanent_allocator(), arg_count);
+ args[0] = ok;
+
+ args[1] = cg_const_string(p, t_string, get_file_path_string(pos.file_id));
+ args[2] = cg_const_int(p, t_i32, pos.line);
+ args[3] = cg_const_int(p, t_i32, pos.column);
+
+ if (!build_context.no_rtti) {
+ args[4] = cg_typeid(p, src_type);
+ args[5] = cg_typeid(p, dst_type);
+ args[6] = cg_emit_conv(p, value_, t_rawptr);
+ }
+ cg_emit_runtime_call(p, "type_assertion_check2", args);
+
+ return cg_emit_load(p, gep0);
+ }
+
+ return cg_value_multi2(cg_emit_load(p, gep0), cg_emit_load(p, gep1), tuple);
+}
+
+gb_internal cgValue cg_emit_cast_any(cgProcedure *p, cgValue value, Type *type, TokenPos pos) {
+ Type *src_type = value.type;
+
+ if (is_type_pointer(src_type)) {
+ value = cg_emit_load(p, value);
+ }
+
+ bool is_tuple = true;
+ Type *tuple = type;
+ if (type->kind != Type_Tuple) {
+ is_tuple = false;
+ tuple = make_optional_ok_type(type);
+ }
+ Type *dst_type = tuple->Tuple.variables[0]->type;
+
+ if ((p->state_flags & StateFlag_no_type_assert) != 0 && !is_tuple) {
+ // just do a bit cast of the data at the front
+ cgValue ptr = cg_emit_struct_ev(p, value, 0);
+ ptr = cg_emit_conv(p, ptr, alloc_type_pointer(type));
+ return cg_emit_load(p, ptr);
+ }
+
+ cgValue dst_typeid = cg_typeid(p, dst_type);
+ cgValue any_typeid = cg_emit_struct_ev(p, value, 1);
+
+
+ TB_Node *ok_block = cg_control_region(p, "any_cast_ok");
+ TB_Node *end_block = cg_control_region(p, "any_cast_end");
+ cgValue cond = cg_emit_comp(p, Token_CmpEq, any_typeid, dst_typeid);
+ cg_emit_if(p, cond, ok_block, end_block);
+ tb_inst_set_control(p->func, ok_block);
+
+ cgValue gep0 = cg_add_local(p, tuple->Tuple.variables[0]->type, nullptr, true).addr;
+ cgValue gep1 = cg_add_local(p, tuple->Tuple.variables[1]->type, nullptr, true).addr;
+
+ cgValue any_data = cg_emit_struct_ev(p, value, 0);
+ cgValue ptr = cg_emit_conv(p, any_data, alloc_type_pointer(dst_type));
+ cg_emit_store(p, gep0, cg_emit_load(p, ptr));
+ cg_emit_store(p, gep1, cg_const_bool(p, t_bool, true));
+
+ cg_emit_goto(p, end_block);
+ tb_inst_set_control(p->func, end_block);
+
+ if (!is_tuple) {
+ // NOTE(bill): Panic on invalid conversion
+ cgValue ok = cg_emit_load(p, gep1);
+
+ isize arg_count = 7;
+ if (build_context.no_rtti) {
+ arg_count = 4;
+ }
+ auto args = slice_make<cgValue>(permanent_allocator(), arg_count);
+ args[0] = ok;
+
+ args[1] = cg_const_string(p, t_string, get_file_path_string(pos.file_id));
+ args[2] = cg_const_int(p, t_i32, pos.line);
+ args[3] = cg_const_int(p, t_i32, pos.column);
+
+ if (!build_context.no_rtti) {
+ args[4] = any_typeid;
+ args[5] = dst_typeid;
+ args[6] = cg_emit_struct_ev(p, value, 0);
+ }
+ cg_emit_runtime_call(p, "type_assertion_check2", args);
+
+ return cg_emit_load(p, gep0);
+ }
+
+ return cg_value_multi2(cg_emit_load(p, gep0), cg_emit_load(p, gep1), tuple);
+}
+
+
+gb_internal cgValue cg_build_type_assertion(cgProcedure *p, Ast *expr, Type *type) {
+ ast_node(ta, TypeAssertion, expr);
+
+ TokenPos pos = ast_token(expr).pos;
+ cgValue e = cg_build_expr(p, ta->expr);
+ Type *t = type_deref(e.type);
+
+ if (is_type_union(t)) {
+ return cg_emit_cast_union(p, e, type, pos);
+ } else if (is_type_any(t)) {
+ return cg_emit_cast_any(p, e, type, pos);
+ }
+ GB_PANIC("TODO(bill): type assertion %s", type_to_string(e.type));
+ return {};
+}
+
+
+gb_internal cgValue cg_build_expr_internal(cgProcedure *p, Ast *expr) {
+ expr = unparen_expr(expr);
+
+ TokenPos expr_pos = ast_token(expr).pos;
+ TypeAndValue tv = type_and_value_of_expr(expr);
+ Type *type = type_of_expr(expr);
+ GB_ASSERT_MSG(tv.mode != Addressing_Invalid, "invalid expression '%s' (tv.mode = %d, tv.type = %s) @ %s\n Current Proc: %.*s : %s", expr_to_string(expr), tv.mode, type_to_string(tv.type), token_pos_to_string(expr_pos), LIT(p->name), type_to_string(p->type));
+
+ if (tv.value.kind != ExactValue_Invalid &&
+ expr->kind != Ast_CompoundLit) {
+ // NOTE(bill): The commented out code below is just for debug purposes only
+ // if (is_type_untyped(type)) {
+ // gb_printf_err("%s %s : %s @ %p\n", token_pos_to_string(expr_pos), expr_to_string(expr), type_to_string(expr->tav.type), expr);
+ // GB_PANIC("%s\n", type_to_string(tv.type));
+ // }
+ // NOTE(bill): Short on constant values
+ return cg_const_value(p, type, tv.value);
+ } else if (tv.mode == Addressing_Type) {
+ // NOTE(bill, 2023-01-16): is this correct? I hope so at least
+ return cg_typeid(p, tv.type);
+ }
+
+ switch (expr->kind) {
+ case_ast_node(bl, BasicLit, expr);
+ TokenPos pos = bl->token.pos;
+ GB_PANIC("Non-constant basic literal %s - %.*s", token_pos_to_string(pos), LIT(token_strings[bl->token.kind]));
+ case_end;
+
+ case_ast_node(bd, BasicDirective, expr);
+ TokenPos pos = bd->token.pos;
+ GB_PANIC("Non-constant basic literal %s - %.*s", token_pos_to_string(pos), LIT(bd->name.string));
+ case_end;
+
+ case_ast_node(i, Ident, expr);
+ Entity *e = entity_from_expr(expr);
+ e = strip_entity_wrapping(e);
+
+ GB_ASSERT_MSG(e != nullptr, "%s in %.*s %p", expr_to_string(expr), LIT(p->name), expr);
+
+ if (e->kind == Entity_Builtin) {
+ Token token = ast_token(expr);
+ GB_PANIC("TODO(bill): lb_build_expr Entity_Builtin '%.*s'\n"
+ "\t at %s", LIT(builtin_procs[e->Builtin.id].name),
+ token_pos_to_string(token.pos));
+ return {};
+ } else if (e->kind == Entity_Nil) {
+ // TODO(bill): is this correct?
+ return cg_value(cast(TB_Node *)nullptr, e->type);
+ }
+ GB_ASSERT(e->kind != Entity_ProcGroup);
+
+ cgAddr *addr = map_get(&p->variable_map, e);
+ if (addr) {
+ return cg_addr_load(p, *addr);
+ }
+ return cg_find_ident(p, e, expr);
+ case_end;
+
+ case_ast_node(i, Implicit, expr);
+ return cg_addr_load(p, cg_build_addr(p, expr));
+ case_end;
+
+ case_ast_node(u, Uninit, expr);
+ if (is_type_untyped(type)) {
+ return cg_value(cast(TB_Node *)nullptr, t_untyped_uninit);
+ }
+ return cg_value(tb_inst_poison(p->func), type);
+ case_end;
+
+ case_ast_node(de, DerefExpr, expr);
+ return cg_addr_load(p, cg_build_addr(p, expr));
+ case_end;
+
+
+ case_ast_node(se, SelectorExpr, expr);
+ TypeAndValue tav = type_and_value_of_expr(expr);
+ GB_ASSERT(tav.mode != Addressing_Invalid);
+ return cg_addr_load(p, cg_build_addr(p, expr));
+ case_end;
+
+ case_ast_node(ise, ImplicitSelectorExpr, expr);
+ TypeAndValue tav = type_and_value_of_expr(expr);
+ GB_ASSERT(tav.mode == Addressing_Constant);
+
+ return cg_const_value(p, type, tv.value);
+ case_end;
+
+
+ case_ast_node(se, SelectorCallExpr, expr);
+ GB_ASSERT(se->modified_call);
+ return cg_build_call_expr(p, se->call);
+ case_end;
+
+ case_ast_node(i, CallExpr, expr);
+ return cg_build_call_expr(p, expr);
+ case_end;
+
+ case_ast_node(cl, CompoundLit, expr);
+ cgAddr addr = cg_build_addr_compound_lit(p, expr);
+ return cg_addr_load(p, addr);
+ case_end;
+
+
+ case_ast_node(te, TernaryIfExpr, expr);
+ cgValue incoming_values[2] = {};
+ TB_Node *incoming_regions[2] = {};
+
+ TB_Node *then = cg_control_region(p, "if_then");
+ TB_Node *done = cg_control_region(p, "if_done");
+ TB_Node *else_ = cg_control_region(p, "if_else");
+
+ cg_build_cond(p, te->cond, then, else_);
+ tb_inst_set_control(p->func, then);
+
+ Type *type = default_type(type_of_expr(expr));
+
+ incoming_values [0] = cg_emit_conv(p, cg_build_expr(p, te->x), type);
+ incoming_regions[0] = tb_inst_get_control(p->func);
+
+ cg_emit_goto(p, done);
+ tb_inst_set_control(p->func, else_);
+
+ incoming_values [1] = cg_emit_conv(p, cg_build_expr(p, te->y), type);
+ incoming_regions[1] = tb_inst_get_control(p->func);
+
+ cg_emit_goto(p, done);
+ tb_inst_set_control(p->func, done);
+
+ GB_ASSERT(incoming_values[0].kind == cgValue_Value ||
+ incoming_values[0].kind == cgValue_Addr);
+ GB_ASSERT(incoming_values[0].kind == incoming_values[1].kind);
+
+ cgValue res = {};
+ res.kind = incoming_values[0].kind;
+ res.type = type;
+ TB_DataType dt = cg_data_type(type);
+ if (res.kind == cgValue_Addr) {
+ dt = TB_TYPE_PTR;
+ }
+ res.node = tb_inst_incomplete_phi(p->func, dt, done, 2);
+ tb_inst_add_phi_operand(p->func, res.node, incoming_regions[0], incoming_values[0].node);
+ tb_inst_add_phi_operand(p->func, res.node, incoming_regions[1], incoming_values[1].node);
+ return res;
+ case_end;
+
+ case_ast_node(te, TernaryWhenExpr, expr);
+ TypeAndValue tav = type_and_value_of_expr(te->cond);
+ GB_ASSERT(tav.mode == Addressing_Constant);
+ GB_ASSERT(tav.value.kind == ExactValue_Bool);
+ if (tav.value.value_bool) {
+ return cg_build_expr(p, te->x);
+ } else {
+ return cg_build_expr(p, te->y);
+ }
+ case_end;
+
+ case_ast_node(tc, TypeCast, expr);
+ cgValue e = cg_build_expr(p, tc->expr);
+ switch (tc->token.kind) {
+ case Token_cast:
+ return cg_emit_conv(p, e, type);
+ case Token_transmute:
+ return cg_emit_transmute(p, e, type);
+ }
+ GB_PANIC("Invalid AST TypeCast");
+ case_end;
+
+ case_ast_node(ac, AutoCast, expr);
+ cgValue value = cg_build_expr(p, ac->expr);
+ return cg_emit_conv(p, value, type);
+ case_end;
+
+ case_ast_node(se, SliceExpr, expr);
+ if (is_type_slice(type_of_expr(se->expr))) {
+ // NOTE(bill): Quick optimization
+ if (se->high == nullptr &&
+ (se->low == nullptr || cg_is_expr_constant_zero(se->low))) {
+ return cg_build_expr(p, se->expr);
+ }
+ }
+ return cg_addr_load(p, cg_build_addr(p, expr));
+ case_end;
+
+ case_ast_node(ie, IndexExpr, expr);
+ return cg_addr_load(p, cg_build_addr(p, expr));
+ case_end;
+
+ case_ast_node(ie, MatrixIndexExpr, expr);
+ return cg_addr_load(p, cg_build_addr(p, expr));
+ case_end;
+
+ case_ast_node(ue, UnaryExpr, expr);
+ if (ue->op.kind == Token_And) {
+ return cg_build_unary_and(p, expr);
+ }
+ cgValue v = cg_build_expr(p, ue->expr);
+ return cg_emit_unary_arith(p, ue->op.kind, v, type);
+ case_end;
+ case_ast_node(be, BinaryExpr, expr);
+ return cg_build_binary_expr(p, expr);
+ case_end;
+
+ case_ast_node(oe, OrReturnExpr, expr);
+ return cg_build_or_return(p, oe->expr, tv.type);
+ case_end;
+
+ case_ast_node(oe, OrElseExpr, expr);
+ return cg_build_or_else(p, oe->x, oe->y, tv.type);
+ case_end;
+
+ case_ast_node(ta, TypeAssertion, expr);
+ return cg_build_type_assertion(p, expr, tv.type);
+ case_end;
+
+ case_ast_node(pl, ProcLit, expr);
+ cgProcedure *anon = cg_procedure_generate_anonymous(p->module, expr, p);
+ GB_ASSERT(anon != nullptr);
+ GB_ASSERT(anon->symbol != nullptr);
+ return cg_value(tb_inst_get_symbol_address(p->func, anon->symbol), type);
+ case_end;
+
+ }
+ TokenPos token_pos = ast_token(expr).pos;
+ GB_PANIC("Unexpected expression\n"
+ "\tAst: %.*s @ "
+ "%s\n",
+ LIT(ast_strings[expr->kind]),
+ token_pos_to_string(token_pos));
+
+ return {};
+
+}
+
+gb_internal cgValue cg_build_addr_ptr(cgProcedure *p, Ast *expr) {
+ cgAddr addr = cg_build_addr(p, expr);
+ return cg_addr_get_ptr(p, addr);
+}
+
+gb_internal cgAddr cg_build_addr_internal(cgProcedure *p, Ast *expr);
+gb_internal cgAddr cg_build_addr(cgProcedure *p, Ast *expr) {
+ expr = unparen_expr(expr);
+
+ // IMPORTANT NOTE(bill):
+ // Selector Call Expressions (foo->bar(...))
+ // must only evaluate `foo` once as it gets transformed into
+ // `foo.bar(foo, ...)`
+ // And if `foo` is a procedure call or something more complex, storing the value
+ // once is a very good idea
+ // If a stored value is found, it must be removed from the cache
+ if (expr->state_flags & StateFlag_SelectorCallExpr) {
+ // lbAddr *pp = map_get(&p->selector_addr, expr);
+ // if (pp != nullptr) {
+ // lbAddr res = *pp;
+ // map_remove(&p->selector_addr, expr);
+ // return res;
+ // }
+ }
+ cgAddr addr = cg_build_addr_internal(p, expr);
+ if (expr->state_flags & StateFlag_SelectorCallExpr) {
+ // map_set(&p->selector_addr, expr, addr);
+ }
+ return addr;
+}
+
+gb_internal cgAddr cg_build_addr_index_expr(cgProcedure *p, Ast *expr) {
+ ast_node(ie, IndexExpr, expr);
+
+ Type *t = base_type(type_of_expr(ie->expr));
+
+ bool deref = is_type_pointer(t);
+ t = base_type(type_deref(t));
+ if (is_type_soa_struct(t)) {
+ GB_PANIC("TODO(bill): #soa");
+ // // SOA STRUCTURES!!!!
+ // lbValue val = cg_build_addr_ptr(p, ie->expr);
+ // if (deref) {
+ // val = cg_emit_load(p, val);
+ // }
+
+ // cgValue index = cg_build_expr(p, ie->index);
+ // return cg_addr_soa_variable(val, index, ie->index);
+ }
+
+ if (ie->expr->tav.mode == Addressing_SoaVariable) {
+ GB_PANIC("TODO(bill): #soa");
+ // // SOA Structures for slices/dynamic arrays
+ // GB_ASSERT(is_type_pointer(type_of_expr(ie->expr)));
+
+ // lbValue field = lb_build_expr(p, ie->expr);
+ // lbValue index = lb_build_expr(p, ie->index);
+
+
+ // if (!build_context.no_bounds_check) {
+ // // TODO HACK(bill): Clean up this hack to get the length for bounds checking
+ // // GB_ASSERT(LLVMIsALoadInst(field.value));
+
+ // // lbValue a = {};
+ // // a.value = LLVMGetOperand(field.value, 0);
+ // // a.type = alloc_type_pointer(field.type);
+
+ // // irInstr *b = &a->Instr;
+ // // GB_ASSERT(b->kind == irInstr_StructElementPtr);
+ // // lbValue base_struct = b->StructElementPtr.address;
+
+ // // GB_ASSERT(is_type_soa_struct(type_deref(ir_type(base_struct))));
+ // // lbValue len = ir_soa_struct_len(p, base_struct);
+ // // lb_emit_bounds_check(p, ast_token(ie->index), index, len);
+ // }
+ // lbValue val = lb_emit_ptr_offset(p, field, index);
+ // return lb_addr(val);
+ }
+
+ GB_ASSERT_MSG(is_type_indexable(t), "%s %s", type_to_string(t), expr_to_string(expr));
+
+ if (is_type_map(t)) {
+ GB_PANIC("TODO(bill): map indexing");
+ // lbAddr map_addr = lb_build_addr(p, ie->expr);
+ // lbValue key = lb_build_expr(p, ie->index);
+ // key = lb_emit_conv(p, key, t->Map.key);
+
+ // Type *result_type = type_of_expr(expr);
+ // lbValue map_ptr = lb_addr_get_ptr(p, map_addr);
+ // if (is_type_pointer(type_deref(map_ptr.type))) {
+ // map_ptr = lb_emit_load(p, map_ptr);
+ // }
+ // return lb_addr_map(map_ptr, key, t, result_type);
+ }
+
+ switch (t->kind) {
+ case Type_Array: {
+ cgValue array = {};
+ array = cg_build_addr_ptr(p, ie->expr);
+ if (deref) {
+ array = cg_emit_load(p, array);
+ }
+ cgValue index = cg_build_expr(p, ie->index);
+ index = cg_emit_conv(p, index, t_int);
+ cgValue elem = cg_emit_array_ep(p, array, index);
+
+ auto index_tv = type_and_value_of_expr(ie->index);
+ if (index_tv.mode != Addressing_Constant) {
+ // cgValue len = cg_const_int(p->module, t_int, t->Array.count);
+ // cg_emit_bounds_check(p, ast_token(ie->index), index, len);
+ }
+ return cg_addr(elem);
+ }
+
+ case Type_EnumeratedArray: {
+ cgValue array = {};
+ array = cg_build_addr_ptr(p, ie->expr);
+ if (deref) {
+ array = cg_emit_load(p, array);
+ }
+
+ Type *index_type = t->EnumeratedArray.index;
+
+ auto index_tv = type_and_value_of_expr(ie->index);
+
+ cgValue index = {};
+ if (compare_exact_values(Token_NotEq, *t->EnumeratedArray.min_value, exact_value_i64(0))) {
+ if (index_tv.mode == Addressing_Constant) {
+ ExactValue idx = exact_value_sub(index_tv.value, *t->EnumeratedArray.min_value);
+ index = cg_const_value(p, index_type, idx);
+ } else {
+ index = cg_emit_arith(p, Token_Sub,
+ cg_build_expr(p, ie->index),
+ cg_const_value(p, index_type, *t->EnumeratedArray.min_value),
+ index_type);
+ index = cg_emit_conv(p, index, t_int);
+ }
+ } else {
+ index = cg_emit_conv(p, cg_build_expr(p, ie->index), t_int);
+ }
+
+ cgValue elem = cg_emit_array_ep(p, array, index);
+
+ if (index_tv.mode != Addressing_Constant) {
+ // cgValue len = cg_const_int(p->module, t_int, t->EnumeratedArray.count);
+ // cg_emit_bounds_check(p, ast_token(ie->index), index, len);
+ }
+ return cg_addr(elem);
+ }
+
+ case Type_Slice: {
+ cgValue slice = {};
+ slice = cg_build_expr(p, ie->expr);
+ if (deref) {
+ slice = cg_emit_load(p, slice);
+ }
+ cgValue elem = cg_builtin_raw_data(p, slice);
+ cgValue index = cg_emit_conv(p, cg_build_expr(p, ie->index), t_int);
+ // cgValue len = cg_builtin_len(p, slice);
+ // cg_emit_bounds_check(p, ast_token(ie->index), index, len);
+ cgValue v = cg_emit_ptr_offset(p, elem, index);
+ v.type = alloc_type_pointer(type_deref(v.type, true));
+ return cg_addr(v);
+ }
+
+ case Type_MultiPointer: {
+ cgValue multi_ptr = {};
+ multi_ptr = cg_build_expr(p, ie->expr);
+ if (deref) {
+ multi_ptr = cg_emit_load(p, multi_ptr);
+ }
+ cgValue index = cg_build_expr(p, ie->index);
+ index = cg_emit_conv(p, index, t_int);
+
+ cgValue v = cg_emit_ptr_offset(p, multi_ptr, index);
+ v.type = alloc_type_pointer(type_deref(v.type, true));
+ return cg_addr(v);
+ }
+
+ case Type_RelativeMultiPointer: {
+ cgValue multi_ptr = {};
+ multi_ptr = cg_build_expr(p, ie->expr);
+ if (deref) {
+ multi_ptr = cg_emit_load(p, multi_ptr);
+ }
+ cgValue index = cg_build_expr(p, ie->index);
+ index = cg_emit_conv(p, index, t_int);
+
+ cgValue v = cg_emit_ptr_offset(p, multi_ptr, index);
+ v.type = alloc_type_pointer(type_deref(v.type, true));
+ return cg_addr(v);
+ }
+
+ case Type_DynamicArray: {
+ cgValue dynamic_array = {};
+ dynamic_array = cg_build_expr(p, ie->expr);
+ if (deref) {
+ dynamic_array = cg_emit_load(p, dynamic_array);
+ }
+ cgValue elem = cg_builtin_raw_data(p, dynamic_array);
+ cgValue index = cg_emit_conv(p, cg_build_expr(p, ie->index), t_int);
+ // cgValue len = cg_dynamic_array_len(p, dynamic_array);
+ // cg_emit_bounds_check(p, ast_token(ie->index), index, len);
+ cgValue v = cg_emit_ptr_offset(p, elem, index);
+ v.type = alloc_type_pointer(type_deref(v.type, true));
+ return cg_addr(v);
+ }
+
+ case Type_Matrix: {
+ GB_PANIC("TODO(bill): matrix");
+ // lbValue matrix = {};
+ // matrix = lb_build_addr_ptr(p, ie->expr);
+ // if (deref) {
+ // matrix = lb_emit_load(p, matrix);
+ // }
+ // lbValue index = lb_build_expr(p, ie->index);
+ // index = lb_emit_conv(p, index, t_int);
+ // lbValue elem = lb_emit_matrix_ep(p, matrix, lb_const_int(p->module, t_int, 0), index);
+ // elem = lb_emit_conv(p, elem, alloc_type_pointer(type_of_expr(expr)));
+
+ // auto index_tv = type_and_value_of_expr(ie->index);
+ // if (index_tv.mode != Addressing_Constant) {
+ // lbValue len = lb_const_int(p->module, t_int, t->Matrix.column_count);
+ // lb_emit_bounds_check(p, ast_token(ie->index), index, len);
+ // }
+ // return lb_addr(elem);
+ }
+
+
+ case Type_Basic: { // Basic_string
+ cgValue str;
+ cgValue elem;
+ cgValue len;
+ cgValue index;
+
+ str = cg_build_expr(p, ie->expr);
+ if (deref) {
+ str = cg_emit_load(p, str);
+ }
+ elem = cg_builtin_raw_data(p, str);
+ len = cg_builtin_len(p, str);
+
+ index = cg_emit_conv(p, cg_build_expr(p, ie->index), t_int);
+ // cg_emit_bounds_check(p, ast_token(ie->index), index, len);
+
+ cgValue v = cg_emit_ptr_offset(p, elem, index);
+ v.type = alloc_type_pointer(type_deref(v.type, true));
+ return cg_addr(v);
+ }
+ }
+ return {};
+}
+
+gb_internal cgAddr cg_build_addr_internal(cgProcedure *p, Ast *expr) {
+ switch (expr->kind) {
+ case_ast_node(i, Implicit, expr);
+ cgAddr v = {};
+ switch (i->kind) {
+ case Token_context:
+ v = cg_find_or_generate_context_ptr(p);
+ break;
+ }
+
+ GB_ASSERT(v.addr.node != nullptr);
+ return v;
+ case_end;
+
+ case_ast_node(i, Ident, expr);
+ if (is_blank_ident(expr)) {
+ cgAddr val = {};
+ return val;
+ }
+ String name = i->token.string;
+ Entity *e = entity_of_node(expr);
+ return cg_build_addr_from_entity(p, e, expr);
+ case_end;
+
+ case_ast_node(de, DerefExpr, expr);
+ Type *t = type_of_expr(de->expr);
+ if (is_type_relative_pointer(t)) {
+ cgAddr addr = cg_build_addr(p, de->expr);
+ addr.relative.deref = true;
+ return addr;
+ } else if (is_type_soa_pointer(t)) {
+ cgValue value = cg_build_expr(p, de->expr);
+ cgValue ptr = cg_emit_struct_ev(p, value, 0);
+ cgValue idx = cg_emit_struct_ev(p, value, 1);
+ GB_PANIC("TODO(bill): cg_addr_soa_variable");
+ // return cg_addr_soa_variable(ptr, idx, nullptr);
+ }
+ cgValue addr = cg_build_expr(p, de->expr);
+ return cg_addr(addr);
+ case_end;
+
+ case_ast_node(ie, IndexExpr, expr);
+ return cg_build_addr_index_expr(p, expr);
+ case_end;
+
+ case_ast_node(se, SliceExpr, expr);
+ return cg_build_addr_slice_expr(p, expr);
+ case_end;
+
+ case_ast_node(se, SelectorExpr, expr);
+ Ast *sel_node = unparen_expr(se->selector);
+ if (sel_node->kind != Ast_Ident) {
+ GB_PANIC("Unsupported selector expression");
+ }
+ String selector = sel_node->Ident.token.string;
+ TypeAndValue tav = type_and_value_of_expr(se->expr);
+
+ if (tav.mode == Addressing_Invalid) {
+ // NOTE(bill): Imports
+ Entity *imp = entity_of_node(se->expr);
+ if (imp != nullptr) {
+ GB_ASSERT(imp->kind == Entity_ImportName);
+ }
+ return cg_build_addr(p, unparen_expr(se->selector));
+ }
+
+
+ Type *type = base_type(tav.type);
+ if (tav.mode == Addressing_Type) { // Addressing_Type
+ Selection sel = lookup_field(tav.type, selector, true);
+ if (sel.pseudo_field) {
+ GB_ASSERT(sel.entity->kind == Entity_Procedure);
+ return cg_addr(cg_find_value_from_entity(p->module, sel.entity));
+ }
+ GB_PANIC("Unreachable %.*s", LIT(selector));
+ }
+
+ if (se->swizzle_count > 0) {
+ Type *array_type = base_type(type_deref(tav.type));
+ GB_ASSERT(array_type->kind == Type_Array);
+ u8 swizzle_count = se->swizzle_count;
+ u8 swizzle_indices_raw = se->swizzle_indices;
+ u8 swizzle_indices[4] = {};
+ for (u8 i = 0; i < swizzle_count; i++) {
+ u8 index = swizzle_indices_raw>>(i*2) & 3;
+ swizzle_indices[i] = index;
+ }
+ cgValue a = {};
+ if (is_type_pointer(tav.type)) {
+ a = cg_build_expr(p, se->expr);
+ } else {
+ cgAddr addr = cg_build_addr(p, se->expr);
+ a = cg_addr_get_ptr(p, addr);
+ }
+
+ GB_ASSERT(is_type_array(expr->tav.type));
+ GB_PANIC("TODO(bill): cg_addr_swizzle");
+ // return cg_addr_swizzle(a, expr->tav.type, swizzle_count, swizzle_indices);
+ }
+
+ Selection sel = lookup_field(type, selector, false);
+ GB_ASSERT(sel.entity != nullptr);
+ if (sel.pseudo_field) {
+ GB_ASSERT(sel.entity->kind == Entity_Procedure);
+ Entity *e = entity_of_node(sel_node);
+ return cg_addr(cg_find_value_from_entity(p->module, e));
+ }
+
+ {
+ cgAddr addr = cg_build_addr(p, se->expr);
+ if (addr.kind == cgAddr_Map) {
+ cgValue v = cg_addr_load(p, addr);
+ cgValue a = cg_address_from_load_or_generate_local(p, v);
+ a = cg_emit_deep_field_gep(p, a, sel);
+ return cg_addr(a);
+ } else if (addr.kind == cgAddr_Context) {
+ GB_ASSERT(sel.index.count > 0);
+ if (addr.ctx.sel.index.count >= 0) {
+ sel = selection_combine(addr.ctx.sel, sel);
+ }
+ addr.ctx.sel = sel;
+ addr.kind = cgAddr_Context;
+ return addr;
+ } else if (addr.kind == cgAddr_SoaVariable) {
+ cgValue index = addr.soa.index;
+ i64 first_index = sel.index[0];
+ Selection sub_sel = sel;
+ sub_sel.index.data += 1;
+ sub_sel.index.count -= 1;
+
+ cgValue arr = cg_emit_struct_ep(p, addr.addr, first_index);
+
+ Type *t = base_type(type_deref(addr.addr.type));
+ GB_ASSERT(is_type_soa_struct(t));
+
+ // TODO(bill): bounds checking for soa variable
+ // if (addr.soa.index_expr != nullptr && (!cg_is_const(addr.soa.index) || t->Struct.soa_kind != StructSoa_Fixed)) {
+ // cgValue len = cg_soa_struct_len(p, addr.addr);
+ // cg_emit_bounds_check(p, ast_token(addr.soa.index_expr), addr.soa.index, len);
+ // }
+
+ cgValue item = {};
+
+ if (t->Struct.soa_kind == StructSoa_Fixed) {
+ item = cg_emit_array_ep(p, arr, index);
+ } else {
+ item = cg_emit_ptr_offset(p, cg_emit_load(p, arr), index);
+ }
+ if (sub_sel.index.count > 0) {
+ item = cg_emit_deep_field_gep(p, item, sub_sel);
+ }
+ item.type = alloc_type_pointer(type_deref(item.type, true));
+ return cg_addr(item);
+ } else if (addr.kind == cgAddr_Swizzle) {
+ GB_ASSERT(sel.index.count > 0);
+ // NOTE(bill): just patch the index in place
+ sel.index[0] = addr.swizzle.indices[sel.index[0]];
+ } else if (addr.kind == cgAddr_SwizzleLarge) {
+ GB_ASSERT(sel.index.count > 0);
+ // NOTE(bill): just patch the index in place
+ sel.index[0] = addr.swizzle.indices[sel.index[0]];
+ }
+
+ cgValue a = cg_addr_get_ptr(p, addr);
+ a = cg_emit_deep_field_gep(p, a, sel);
+ return cg_addr(a);
+ }
+ case_end;
+
+ case_ast_node(ce, CallExpr, expr);
+ cgValue res = cg_build_expr(p, expr);
+ switch (res.kind) {
+ case cgValue_Value:
+ return cg_addr(cg_address_from_load_or_generate_local(p, res));
+ case cgValue_Addr:
+ return cg_addr(res);
+ case cgValue_Multi:
+ GB_PANIC("cannot address a multi-valued expression");
+ break;
+ }
+ case_end;
+
+ case_ast_node(cl, CompoundLit, expr);
+ return cg_build_addr_compound_lit(p, expr);
+ case_end;
+
+ }
+
+ TokenPos token_pos = ast_token(expr).pos;
+ GB_PANIC("Unexpected address expression\n"
+ "\tAst: %.*s @ "
+ "%s\n",
+ LIT(ast_strings[expr->kind]),
+ token_pos_to_string(token_pos));
+
+ return {};
+} \ No newline at end of file
diff --git a/src/tilde_proc.cpp b/src/tilde_proc.cpp
new file mode 100644
index 000000000..1981d32ce
--- /dev/null
+++ b/src/tilde_proc.cpp
@@ -0,0 +1,1307 @@
+gb_internal TB_FunctionPrototype *cg_procedure_type_as_prototype(cgModule *m, Type *type) {
+ GB_ASSERT(is_type_proc(type));
+ mutex_lock(&m->proc_proto_mutex);
+ defer (mutex_unlock(&m->proc_proto_mutex));
+
+ if (type->kind == Type_Named) {
+ type = base_type(type);
+ }
+ TB_FunctionPrototype **found = map_get(&m->proc_proto_map, type);
+ if (found) {
+ return *found;
+ }
+
+ TB_DebugType *dbg = cg_debug_type_for_proc(m, type);
+ TB_FunctionPrototype *proto = tb_prototype_from_dbg(m->mod, dbg);
+
+ map_set(&m->proc_proto_map, type, proto);
+ return proto;
+}
+
+gb_internal cgProcedure *cg_procedure_create(cgModule *m, Entity *entity, bool ignore_body) {
+ GB_ASSERT(entity != nullptr);
+ GB_ASSERT(entity->kind == Entity_Procedure);
+ if (!entity->Procedure.is_foreign) {
+ if ((entity->flags & EntityFlag_ProcBodyChecked) == 0) {
+ GB_PANIC("%.*s :: %s (was parapoly: %d %d)", LIT(entity->token.string), type_to_string(entity->type), is_type_polymorphic(entity->type, true), is_type_polymorphic(entity->type, false));
+ }
+ }
+
+ String link_name = cg_get_entity_name(m, entity);
+
+ cgProcedure *p = nullptr;
+ {
+ StringHashKey key = string_hash_string(link_name);
+ cgValue *found = string_map_get(&m->members, key);
+ if (found) {
+ cg_add_entity(m, entity, *found);
+ rw_mutex_lock(&m->values_mutex);
+ p = string_map_must_get(&m->procedures, key);
+ rw_mutex_unlock(&m->values_mutex);
+ if (!ignore_body && p->func != nullptr) {
+ return nullptr;
+ }
+ }
+ }
+
+ if (p == nullptr) {
+ p = gb_alloc_item(permanent_allocator(), cgProcedure);
+ }
+
+ p->module = m;
+ p->entity = entity;
+ p->name = link_name;
+
+ DeclInfo *decl = entity->decl_info;
+
+ ast_node(pl, ProcLit, decl->proc_lit);
+ Type *pt = base_type(entity->type);
+ GB_ASSERT(pt->kind == Type_Proc);
+
+ p->type = entity->type;
+ p->type_expr = decl->type_expr;
+ p->body = pl->body;
+ p->inlining = pl->inlining;
+ p->is_foreign = entity->Procedure.is_foreign;
+ p->is_export = entity->Procedure.is_export;
+ p->is_entry_point = false;
+ p->split_returns_index = -1;
+
+ gbAllocator a = heap_allocator();
+ p->children.allocator = a;
+
+ p->defer_stack.allocator = a;
+ p->scope_stack.allocator = a;
+ p->context_stack.allocator = a;
+
+ p->control_regions.allocator = a;
+ p->branch_regions.allocator = a;
+
+ map_init(&p->variable_map);
+
+ TB_Linkage linkage = TB_LINKAGE_PRIVATE;
+ if (p->is_export) {
+ linkage = TB_LINKAGE_PUBLIC;
+ } else if (p->is_foreign || ignore_body) {
+ if (ignore_body) {
+ linkage = TB_LINKAGE_PUBLIC;
+ }
+ p->symbol = cast(TB_Symbol *)tb_extern_create(m->mod, link_name.len, cast(char const *)link_name.text, TB_EXTERNAL_SO_LOCAL);
+ }
+ if (p->name == "main") {
+ // TODO(bill): figure out when this should be public or not
+ linkage = TB_LINKAGE_PUBLIC;
+ }
+
+ if (p->symbol == nullptr) {
+ p->func = tb_function_create(m->mod, link_name.len, cast(char const *)link_name.text, linkage, TB_COMDAT_NONE);
+
+ p->debug_type = cg_debug_type_for_proc(m, p->type);
+ p->proto = tb_prototype_from_dbg(m->mod, p->debug_type);
+
+ p->symbol = cast(TB_Symbol *)p->func;
+ }
+
+ p->value = cg_value(p->symbol, p->type);
+
+ cg_add_symbol(m, entity, p->symbol);
+ cg_add_entity(m, entity, p->value);
+ cg_add_member(m, p->name, p->value);
+ cg_add_procedure_value(m, p);
+
+
+ return p;
+}
+
+gb_internal cgProcedure *cg_procedure_create_dummy(cgModule *m, String const &link_name, Type *type) {
+ auto *prev_found = string_map_get(&m->members, link_name);
+ GB_ASSERT_MSG(prev_found == nullptr, "failed to create dummy procedure for: %.*s", LIT(link_name));
+
+ cgProcedure *p = gb_alloc_item(permanent_allocator(), cgProcedure);
+
+ p->module = m;
+ p->name = link_name;
+
+ p->type = type;
+ p->type_expr = nullptr;
+ p->body = nullptr;
+ p->tags = 0;
+ p->inlining = ProcInlining_none;
+ p->is_foreign = false;
+ p->is_export = false;
+ p->is_entry_point = false;
+ p->split_returns_index = -1;
+
+ gbAllocator a = heap_allocator();
+ p->children.allocator = a;
+
+ p->defer_stack.allocator = a;
+ p->scope_stack.allocator = a;
+ p->context_stack.allocator = a;
+
+ p->control_regions.allocator = a;
+ p->branch_regions.allocator = a;
+
+ map_init(&p->variable_map);
+
+
+ TB_Linkage linkage = TB_LINKAGE_PRIVATE;
+
+ p->func = tb_function_create(m->mod, link_name.len, cast(char const *)link_name.text, linkage, TB_COMDAT_NONE);
+
+ p->debug_type = cg_debug_type_for_proc(m, p->type);
+ p->proto = tb_prototype_from_dbg(m->mod, p->debug_type);
+
+ p->symbol = cast(TB_Symbol *)p->func;
+
+ cgValue proc_value = cg_value(p->symbol, p->type);
+ cg_add_member(m, p->name, proc_value);
+ cg_add_procedure_value(m, p);
+
+ return p;
+}
+
+gb_internal cgProcedure *cg_procedure_generate_anonymous(cgModule *m, Ast *expr, cgProcedure *parent) {
+ expr = unparen_expr(expr);
+ ast_node(pl, ProcLit, expr);
+
+ mutex_lock(&m->anonymous_proc_lits_mutex);
+ defer (mutex_unlock(&m->anonymous_proc_lits_mutex));
+
+ cgProcedure **found = map_get(&m->anonymous_proc_lits_map, expr);
+ if (found) {
+ return *found;
+ }
+
+ TokenPos pos = ast_token(expr).pos;
+
+ // NOTE(bill): Generate a new name
+ // parent$count
+
+ String prefix_name = str_lit("proc_lit");
+ if (parent) {
+ prefix_name = parent->name;
+ }
+
+ isize name_len = prefix_name.len + 6 + 11;
+ char *name_text = gb_alloc_array(permanent_allocator(), char, name_len);
+
+ static std::atomic<i32> name_id;
+ name_len = gb_snprintf(name_text, name_len, "%.*s$anon-%d", LIT(prefix_name), 1+name_id.fetch_add(1));
+ String name = make_string((u8 *)name_text, name_len-1);
+
+ Type *type = type_of_expr(expr);
+
+ GB_ASSERT(pl->decl->entity == nullptr);
+ Token token = {};
+ token.pos = ast_token(expr).pos;
+ token.kind = Token_Ident;
+ token.string = name;
+ Entity *e = alloc_entity_procedure(nullptr, token, type, pl->tags);
+ e->file = expr->file();
+
+ // NOTE(bill): this is to prevent a race condition since these procedure literals can be created anywhere at any time
+ e->decl_info = pl->decl;
+ pl->decl->entity = e;
+ e->flags |= EntityFlag_ProcBodyChecked;
+
+ cgProcedure *p = cg_procedure_create(m, e);
+
+ map_set(&m->anonymous_proc_lits_map, expr, p);
+
+ if (parent != nullptr) {
+ array_add(&parent->children, p);
+ }
+
+ cg_add_procedure_to_queue(p);
+ return p;
+
+}
+
+gb_internal void cg_procedure_begin(cgProcedure *p) {
+ if (p == nullptr || p->func == nullptr) {
+ return;
+ }
+
+ tb_function_set_prototype(p->func, p->proto, cg_arena());
+
+ if (p->body == nullptr) {
+ return;
+ }
+
+
+ DeclInfo *decl = decl_info_of_entity(p->entity);
+ if (decl != nullptr) {
+ for_array(i, decl->labels) {
+ BlockLabel bl = decl->labels[i];
+ cgBranchRegions bb = {bl.label, nullptr, nullptr};
+ array_add(&p->branch_regions, bb);
+ }
+ }
+
+ GB_ASSERT(p->type->kind == Type_Proc);
+ TypeProc *pt = &p->type->Proc;
+ bool is_odin_like_cc = is_calling_convention_odin(pt->calling_convention);
+ int param_index = 0;
+ int param_count = p->proto->param_count;
+
+ if (pt->results) {
+ Type *result_type = nullptr;
+ if (is_odin_like_cc) {
+ result_type = pt->results->Tuple.variables[pt->results->Tuple.variables.count-1]->type;
+ } else {
+ result_type = pt->results;
+ }
+ TB_DebugType *debug_type = cg_debug_type(p->module, result_type);
+ TB_PassingRule rule = tb_get_passing_rule_from_dbg(p->module->mod, debug_type, true);
+ if (rule == TB_PASSING_INDIRECT) {
+ p->return_by_ptr = true;
+ param_index++;
+ }
+ }
+
+ if (pt->params != nullptr) for (Entity *e : pt->params->Tuple.variables) {
+ if (e->kind != Entity_Variable) {
+ continue;
+ }
+
+ GB_ASSERT_MSG(param_index < param_count, "%d < %d %.*s :: %s", param_index, param_count, LIT(p->name), type_to_string(p->type));
+
+ TB_Node *param_ptr = nullptr;
+
+ TB_CharUnits size = cast(TB_CharUnits)type_size_of(e->type);
+ TB_CharUnits align = cast(TB_CharUnits)type_align_of(e->type);
+ TB_DebugType *debug_type = cg_debug_type(p->module, e->type);
+ TB_PassingRule rule = tb_get_passing_rule_from_dbg(p->module->mod, debug_type, false);
+ switch (rule) {
+ case TB_PASSING_DIRECT: {
+ TB_Node *param = tb_inst_param(p->func, param_index++);
+ param_ptr = tb_inst_local(p->func, size, align);
+ tb_inst_store(p->func, param->dt, param_ptr, param, align, false);
+ } break;
+ case TB_PASSING_INDIRECT:
+ // TODO(bill): does this need a copy? for non-odin calling convention stuff?
+ param_ptr = tb_inst_param(p->func, param_index++);
+ break;
+ case TB_PASSING_IGNORE:
+ continue;
+ }
+
+ GB_ASSERT(param_ptr->dt.type == TB_PTR);
+
+ cgValue local = cg_value(param_ptr, alloc_type_pointer(e->type));
+
+ if (e != nullptr && e->token.string.len > 0 && e->token.string != "_") {
+ // NOTE(bill): for debugging purposes only
+ String name = e->token.string;
+ TB_DebugType *param_debug_type = debug_type;
+ TB_Node * param_ptr_to_use = param_ptr;
+ if (rule == TB_PASSING_INDIRECT) {
+ // HACK TODO(bill): this is just to get the debug information
+ TB_CharUnits ptr_size = cast(TB_CharUnits)build_context.ptr_size;
+ TB_Node *dummy_param = tb_inst_local(p->func, ptr_size, ptr_size);
+ tb_inst_store(p->func, TB_TYPE_PTR, dummy_param, param_ptr, ptr_size, false);
+ param_ptr_to_use = dummy_param;
+ param_debug_type = tb_debug_create_ptr(p->module->mod, param_debug_type);
+ }
+ tb_node_append_attrib(
+ param_ptr_to_use,
+ tb_function_attrib_variable(
+ p->func,
+ name.len, cast(char const *)name.text,
+ param_debug_type
+ )
+ );
+ }
+ cgAddr addr = cg_addr(local);
+ if (e) {
+ map_set(&p->variable_map, e, addr);
+ }
+ }
+
+ if (is_odin_like_cc) {
+ p->split_returns_index = param_index;
+ }
+
+ if (pt->calling_convention == ProcCC_Odin) {
+ // NOTE(bill): Push context on to stack from implicit parameter
+
+ String name = str_lit("__.context_ptr");
+
+ Entity *e = alloc_entity_param(nullptr, make_token_ident(name), t_context_ptr, false, false);
+ e->flags |= EntityFlag_NoAlias;
+
+ TB_Node *param_ptr = tb_inst_param(p->func, param_count-1);
+ cgValue local = cg_value(param_ptr, t_context_ptr);
+ cgAddr addr = cg_addr(local);
+ map_set(&p->variable_map, e, addr);
+
+
+ cgContextData *cd = array_add_and_get(&p->context_stack);
+ cd->ctx = addr;
+ cd->scope_index = -1;
+ cd->uses = +1; // make sure it has been used already
+ }
+
+ if (pt->has_named_results) {
+ auto const &results = pt->results->Tuple.variables;
+ for_array(i, results) {
+ Entity *e = results[i];
+ GB_ASSERT(e->kind == Entity_Variable);
+
+ if (e->token.string == "") {
+ continue;
+ }
+ GB_ASSERT(!is_blank_ident(e->token));
+
+ cgAddr res = cg_add_local(p, e->type, e, true);
+
+ if (e->Variable.param_value.kind != ParameterValue_Invalid) {
+ cgValue c = cg_handle_param_value(p, e->type, e->Variable.param_value, e->token.pos);
+ cg_addr_store(p, res, c);
+ }
+ }
+ }
+}
+
+
+gb_internal WORKER_TASK_PROC(cg_procedure_compile_worker_proc) {
+ cgProcedure *p = cast(cgProcedure *)data;
+
+ TB_Passes *opt = tb_pass_enter(p->func, cg_arena());
+ defer (tb_pass_exit(opt));
+
+ // optimization passes
+ if (false) {
+ tb_pass_peephole(opt);
+ tb_pass_mem2reg(opt);
+ tb_pass_peephole(opt);
+ }
+
+ bool emit_asm = false;
+ if (
+ // string_starts_with(p->name, str_lit("runtime@_windows_default_alloc_or_resize")) ||
+ false
+ ) {
+ emit_asm = true;
+ }
+
+ // emit ir
+ if (
+ // string_starts_with(p->name, str_lit("bug@main")) ||
+ // p->name == str_lit("runtime@_windows_default_alloc_or_resize") ||
+ false
+ ) { // IR Printing
+ TB_Arena *arena = cg_arena();
+ TB_Passes *passes = tb_pass_enter(p->func, arena);
+ defer (tb_pass_exit(passes));
+
+ tb_pass_print(passes);
+ fprintf(stdout, "\n");
+ }
+ if (false) { // GraphViz printing
+ tb_function_print(p->func, tb_default_print_callback, stdout);
+ }
+
+ // compile
+ TB_FunctionOutput *output = tb_pass_codegen(opt, emit_asm);
+ if (emit_asm) {
+ tb_output_print_asm(output, stdout);
+ fprintf(stdout, "\n");
+ }
+
+ return 0;
+}
+
+gb_internal void cg_procedure_end(cgProcedure *p) {
+ if (p == nullptr || p->func == nullptr) {
+ return;
+ }
+ if (tb_inst_get_control(p->func)) {
+ if (p->type->Proc.result_count == 0) {
+ tb_inst_ret(p->func, 0, nullptr);
+ } else {
+ tb_inst_unreachable(p->func);
+ }
+ }
+
+ if (p->module->do_threading) {
+ thread_pool_add_task(cg_procedure_compile_worker_proc, p);
+ } else {
+ cg_procedure_compile_worker_proc(p);
+ }
+}
+
+gb_internal void cg_procedure_generate(cgProcedure *p) {
+ if (p->body == nullptr) {
+ return;
+ }
+
+ cg_procedure_begin(p);
+ cg_build_stmt(p, p->body);
+ cg_procedure_end(p);
+}
+
+gb_internal void cg_build_nested_proc(cgProcedure *p, AstProcLit *pd, Entity *e) {
+ GB_ASSERT(pd->body != nullptr);
+ cgModule *m = p->module;
+ auto *min_dep_set = &m->info->minimum_dependency_set;
+
+ if (ptr_set_exists(min_dep_set, e) == false) {
+ // NOTE(bill): Nothing depends upon it so doesn't need to be built
+ return;
+ }
+
+ // NOTE(bill): Generate a new name
+ // parent.name-guid
+ String original_name = e->token.string;
+ String pd_name = original_name;
+ if (e->Procedure.link_name.len > 0) {
+ pd_name = e->Procedure.link_name;
+ }
+
+
+ isize name_len = p->name.len + 1 + pd_name.len + 1 + 10 + 1;
+ char *name_text = gb_alloc_array(permanent_allocator(), char, name_len);
+
+ i32 guid = cast(i32)p->children.count;
+ name_len = gb_snprintf(name_text, name_len, "%.*s" ABI_PKG_NAME_SEPARATOR "%.*s-%d", LIT(p->name), LIT(pd_name), guid);
+ String name = make_string(cast(u8 *)name_text, name_len-1);
+
+ e->Procedure.link_name = name;
+
+ cgProcedure *nested_proc = cg_procedure_create(p->module, e);
+ e->cg_procedure = nested_proc;
+
+ cgValue value = nested_proc->value;
+
+ cg_add_entity(m, e, value);
+ array_add(&p->children, nested_proc);
+ cg_add_procedure_to_queue(nested_proc);
+}
+
+
+
+
+
+gb_internal cgValue cg_find_procedure_value_from_entity(cgModule *m, Entity *e) {
+ GB_ASSERT(is_type_proc(e->type));
+ e = strip_entity_wrapping(e);
+ GB_ASSERT(e != nullptr);
+ GB_ASSERT(e->kind == Entity_Procedure);
+
+ cgValue *found = nullptr;
+ rw_mutex_shared_lock(&m->values_mutex);
+ found = map_get(&m->values, e);
+ rw_mutex_shared_unlock(&m->values_mutex);
+ if (found) {
+ GB_ASSERT(found->node != nullptr);
+ return *found;
+ }
+
+ GB_PANIC("Error in: %s, missing procedure %.*s\n", token_pos_to_string(e->token.pos), LIT(e->token.string));
+ return {};
+}
+
+
+
+gb_internal cgValue cg_build_call_expr_internal(cgProcedure *p, Ast *expr);
+gb_internal cgValue cg_build_call_expr(cgProcedure *p, Ast *expr) {
+ expr = unparen_expr(expr);
+ ast_node(ce, CallExpr, expr);
+
+ cgValue res = cg_build_call_expr_internal(p, expr);
+
+ if (ce->optional_ok_one) { // TODO(bill): Minor hack for #optional_ok procedures
+ GB_ASSERT(res.kind == cgValue_Multi);
+ GB_ASSERT(res.multi->values.count == 2);
+ return res.multi->values[0];
+ }
+ return res;
+}
+
+gb_internal cgValue cg_emit_call(cgProcedure * p, cgValue value, Slice<cgValue> const &args) {
+ if (value.kind == cgValue_Symbol) {
+ value = cg_value(tb_inst_get_symbol_address(p->func, value.symbol), value.type);
+ }
+ GB_ASSERT(value.kind == cgValue_Value);
+ TEMPORARY_ALLOCATOR_GUARD();
+
+ TB_Module *m = p->module->mod;
+
+
+ Type *type = base_type(value.type);
+ GB_ASSERT(type->kind == Type_Proc);
+ TypeProc *pt = &type->Proc;
+ gb_unused(pt);
+
+ TB_FunctionPrototype *proto = cg_procedure_type_as_prototype(p->module, type);
+ TB_Node *target = value.node;
+ auto params = slice_make<TB_Node *>(temporary_allocator(), proto->param_count);
+
+
+ GB_ASSERT(build_context.metrics.os == TargetOs_windows);
+ // TODO(bill): Support more than Win64 ABI
+
+ bool is_odin_like_cc = is_calling_convention_odin(pt->calling_convention);
+
+ bool return_is_indirect = false;
+
+ Slice<Entity *> result_entities = {};
+ Slice<Entity *> param_entities = {};
+ if (pt->results) {
+ result_entities = pt->results->Tuple.variables;
+ }
+ if (pt->params) {
+ param_entities = pt->params->Tuple.variables;
+ }
+
+ isize param_index = 0;
+ if (pt->result_count != 0) {
+ Type *return_type = nullptr;
+ if (is_odin_like_cc) {
+ return_type = result_entities[result_entities.count-1]->type;
+ } else {
+ return_type = pt->results;
+ }
+ TB_DebugType *dbg = cg_debug_type(p->module, return_type);
+ TB_PassingRule rule = tb_get_passing_rule_from_dbg(m, dbg, true);
+ if (rule == TB_PASSING_INDIRECT) {
+ return_is_indirect = true;
+ TB_CharUnits size = cast(TB_CharUnits)type_size_of(return_type);
+ TB_CharUnits align = cast(TB_CharUnits)gb_max(type_align_of(return_type), 16);
+ TB_Node *local = tb_inst_local(p->func, size, align);
+ tb_inst_memzero(p->func, local, tb_inst_uint(p->func, TB_TYPE_INT, size), align, false);
+ params[param_index++] = local;
+ }
+ }
+ isize param_entity_index = 0;
+ for_array(i, args) {
+ Entity *param_entity = nullptr;
+ do {
+ param_entity = param_entities[param_entity_index++];
+ } while (param_entity->kind != Entity_Variable);
+ Type *param_type = param_entity->type;
+ cgValue arg = args[i];
+ arg = cg_emit_conv(p, arg, param_type);
+ arg = cg_flatten_value(p, arg);
+
+ TB_Node *param = nullptr;
+
+ TB_DebugType *dbg = cg_debug_type(p->module, param_type);
+ TB_PassingRule rule = tb_get_passing_rule_from_dbg(m, dbg, false);
+ switch (rule) {
+ case TB_PASSING_DIRECT:
+ GB_ASSERT(arg.kind == cgValue_Value);
+ param = arg.node;
+ break;
+ case TB_PASSING_INDIRECT:
+ {
+ cgValue arg_ptr = {};
+ // indirect
+ if (is_odin_like_cc) {
+ arg_ptr = cg_address_from_load_or_generate_local(p, arg);
+ } else {
+ arg_ptr = cg_copy_value_to_ptr(p, arg, param_type, 16);
+ }
+ GB_ASSERT(arg_ptr.kind == cgValue_Value);
+ param = arg_ptr.node;
+ }
+ break;
+ case TB_PASSING_IGNORE:
+ continue;
+ }
+
+ params[param_index++] = param;
+ }
+
+ // Split returns
+ isize split_offset = -1;
+ if (is_odin_like_cc) {
+ split_offset = param_index;
+ for (isize i = 0; i < pt->result_count-1; i++) {
+ Type *result = result_entities[i]->type;
+ TB_CharUnits size = cast(TB_CharUnits)type_size_of(result);
+ TB_CharUnits align = cast(TB_CharUnits)gb_max(type_align_of(result), 16);
+ TB_Node *local = tb_inst_local(p->func, size, align);
+ // TODO(bill): Should this need to be zeroed any way?
+ tb_inst_memzero(p->func, local, tb_inst_uint(p->func, TB_TYPE_INT, size), align, false);
+ params[param_index++] = local;
+ }
+ }
+
+ if (pt->calling_convention == ProcCC_Odin) {
+ cgValue ctx_ptr = cg_find_or_generate_context_ptr(p).addr;
+ GB_ASSERT(ctx_ptr.kind == cgValue_Value);
+ params[param_index++] = ctx_ptr.node;
+ }
+ GB_ASSERT_MSG(param_index == params.count, "%td vs %td\n %s %u %u",
+ param_index, params.count,
+ type_to_string(type),
+ proto->return_count,
+ proto->param_count);
+
+ for (TB_Node *param : params) {
+ GB_ASSERT(param != nullptr);
+ }
+
+ GB_ASSERT(target != nullptr);
+ TB_MultiOutput multi_output = tb_inst_call(p->func, proto, target, params.count, params.data);
+ gb_unused(multi_output);
+
+ switch (pt->result_count) {
+ case 0:
+ return {};
+ case 1:
+ if (return_is_indirect) {
+ return cg_lvalue_addr(params[0], pt->results->Tuple.variables[0]->type);
+ } else {
+ GB_ASSERT(multi_output.count == 1);
+ TB_Node *node = multi_output.single;
+ return cg_value(node, pt->results->Tuple.variables[0]->type);
+ }
+ }
+
+ cgValueMulti *multi = gb_alloc_item(permanent_allocator(), cgValueMulti);
+ multi->values = slice_make<cgValue>(permanent_allocator(), pt->result_count);
+
+ if (is_odin_like_cc) {
+ GB_ASSERT(split_offset >= 0);
+ for (isize i = 0; i < pt->result_count-1; i++) {
+ multi->values[i] = cg_lvalue_addr(params[split_offset+i], result_entities[i]->type);
+ }
+
+ Type *end_type = result_entities[pt->result_count-1]->type;
+ if (return_is_indirect) {
+ multi->values[pt->result_count-1] = cg_lvalue_addr(params[0], end_type);
+ } else {
+ GB_ASSERT(multi_output.count == 1);
+ TB_DataType dt = cg_data_type(end_type);
+ TB_Node *res = multi_output.single;
+ if (res->dt.raw != dt.raw) {
+ // struct-like returns passed in registers
+ TB_CharUnits size = cast(TB_CharUnits)type_size_of(end_type);
+ TB_CharUnits align = cast(TB_CharUnits)type_align_of(end_type);
+ TB_Node *addr = tb_inst_local(p->func, size, align);
+ tb_inst_store(p->func, res->dt, addr, res, align, false);
+ multi->values[pt->result_count-1] = cg_lvalue_addr(addr, end_type);
+ } else {
+ multi->values[pt->result_count-1] = cg_value(res, end_type);
+ }
+ }
+ } else {
+ TB_Node *the_tuple = {};
+ if (return_is_indirect) {
+ the_tuple = params[0];
+ } else {
+ GB_ASSERT(multi_output.count == 1);
+ TB_Node *res = multi_output.single;
+
+ // struct-like returns passed in registers
+ TB_CharUnits size = cast(TB_CharUnits)type_size_of(pt->results);
+ TB_CharUnits align = cast(TB_CharUnits)type_align_of(pt->results);
+ the_tuple = tb_inst_local(p->func, size, align);
+ tb_inst_store(p->func, res->dt, the_tuple, res, align, false);
+ }
+ for (isize i = 0; i < pt->result_count; i++) {
+ i64 offset = type_offset_of(pt->results, i, nullptr);
+ TB_Node *ptr = tb_inst_member_access(p->func, the_tuple, offset);
+ multi->values[i] = cg_lvalue_addr(ptr, result_entities[i]->type);
+ }
+ }
+
+ return cg_value_multi(multi, pt->results);
+}
+
+gb_internal cgValue cg_emit_runtime_call(cgProcedure *p, char const *name, Slice<cgValue> const &args) {
+ AstPackage *pkg = p->module->info->runtime_package;
+ Entity *e = scope_lookup_current(pkg->scope, make_string_c(name));
+ cgValue value = cg_find_procedure_value_from_entity(p->module, e);
+ return cg_emit_call(p, value, args);
+}
+
+gb_internal cgValue cg_handle_param_value(cgProcedure *p, Type *parameter_type, ParameterValue const &param_value, TokenPos const &pos) {
+ switch (param_value.kind) {
+ case ParameterValue_Constant:
+ if (is_type_constant_type(parameter_type)) {
+ auto res = cg_const_value(p, parameter_type, param_value.value);
+ return res;
+ } else {
+ ExactValue ev = param_value.value;
+ cgValue arg = {};
+ Type *type = type_of_expr(param_value.original_ast_expr);
+ if (type != nullptr) {
+ arg = cg_const_value(p, type, ev);
+ } else {
+ arg = cg_const_value(p, parameter_type, param_value.value);
+ }
+ return cg_emit_conv(p, arg, parameter_type);
+ }
+
+ case ParameterValue_Nil:
+ return cg_const_nil(p, parameter_type);
+ case ParameterValue_Location:
+ {
+ String proc_name = {};
+ if (p->entity != nullptr) {
+ proc_name = p->entity->token.string;
+ }
+ return cg_emit_source_code_location_as_global(p, proc_name, pos);
+ }
+ case ParameterValue_Value:
+ return cg_build_expr(p, param_value.ast_value);
+ }
+ return cg_const_nil(p, parameter_type);
+}
+
+gb_internal cgValue cg_build_call_expr_internal(cgProcedure *p, Ast *expr) {
+ ast_node(ce, CallExpr, expr);
+
+ TypeAndValue tv = type_and_value_of_expr(expr);
+
+ TypeAndValue proc_tv = type_and_value_of_expr(ce->proc);
+ AddressingMode proc_mode = proc_tv.mode;
+ if (proc_mode == Addressing_Type) {
+ GB_ASSERT(ce->args.count == 1);
+ cgValue x = cg_build_expr(p, ce->args[0]);
+ return cg_emit_conv(p, x, tv.type);
+ }
+
+ Ast *proc_expr = unparen_expr(ce->proc);
+ if (proc_mode == Addressing_Builtin) {
+ Entity *e = entity_of_node(proc_expr);
+ BuiltinProcId id = BuiltinProc_Invalid;
+ if (e != nullptr) {
+ id = cast(BuiltinProcId)e->Builtin.id;
+ } else {
+ id = BuiltinProc_DIRECTIVE;
+ }
+ if (id == BuiltinProc___entry_point) {
+ if (p->module->info->entry_point) {
+ cgValue entry_point = cg_find_procedure_value_from_entity(p->module, p->module->info->entry_point);
+ GB_ASSERT(entry_point.node != nullptr);
+ cg_emit_call(p, entry_point, {});
+ }
+ return {};
+ }
+
+ return cg_build_builtin(p, id, expr);
+ }
+
+ // NOTE(bill): Regular call
+ cgValue value = {};
+
+ Entity *proc_entity = entity_of_node(proc_expr);
+ if (proc_entity != nullptr) {
+ if (proc_entity->flags & EntityFlag_Disabled) {
+ GB_ASSERT(tv.type == nullptr);
+ return {};
+ }
+ }
+
+ if (proc_expr->tav.mode == Addressing_Constant) {
+ ExactValue v = proc_expr->tav.value;
+ switch (v.kind) {
+ case ExactValue_Integer:
+ {
+ u64 u = big_int_to_u64(&v.value_integer);
+ cgValue x = cg_value(tb_inst_uint(p->func, TB_TYPE_PTR, u), t_rawptr);
+ value = cg_emit_conv(p, x, proc_expr->tav.type);
+ break;
+ }
+ case ExactValue_Pointer:
+ {
+ u64 u = cast(u64)v.value_pointer;
+ cgValue x = cg_value(tb_inst_uint(p->func, TB_TYPE_PTR, u), t_rawptr);
+ value = cg_emit_conv(p, x, proc_expr->tav.type);
+ break;
+ }
+ }
+ }
+
+ if (value.node == nullptr) {
+ value = cg_build_expr(p, proc_expr);
+ }
+ if (value.kind == cgValue_Addr) {
+ value = cg_emit_load(p, value);
+ }
+ GB_ASSERT(value.kind == cgValue_Value);
+ GB_ASSERT(value.node != nullptr);
+ GB_ASSERT(is_type_proc(value.type));
+
+ TEMPORARY_ALLOCATOR_GUARD();
+
+ Type *proc_type_ = base_type(value.type);
+ GB_ASSERT(proc_type_->kind == Type_Proc);
+ TypeProc *pt = &proc_type_->Proc;
+
+ GB_ASSERT(ce->split_args != nullptr);
+
+ isize internal_param_count = 0;
+ if (pt->params) for (Entity *e : pt->params->Tuple.variables) {
+ if (e->kind == Entity_Variable) {
+ internal_param_count += 1;
+ }
+ }
+ GB_ASSERT(internal_param_count <= pt->param_count);
+
+ auto args = array_make<cgValue>(temporary_allocator(), 0, internal_param_count);
+
+ bool vari_expand = (ce->ellipsis.pos.line != 0);
+ bool is_c_vararg = pt->c_vararg;
+
+ for_array(i, ce->split_args->positional) {
+ Entity *e = pt->params->Tuple.variables[i];
+ if (e->kind == Entity_TypeName) {
+ continue;
+ } else if (e->kind == Entity_Constant) {
+ continue;
+ }
+
+ GB_ASSERT(e->kind == Entity_Variable);
+
+ if (pt->variadic && pt->variadic_index == i) {
+ cgValue variadic_args = cg_const_nil(p, e->type);
+ auto variadic = slice(ce->split_args->positional, pt->variadic_index, ce->split_args->positional.count);
+ if (variadic.count != 0) {
+ // variadic call argument generation
+ Type *slice_type = e->type;
+ GB_ASSERT(slice_type->kind == Type_Slice);
+
+ if (is_c_vararg) {
+ GB_ASSERT(!vari_expand);
+
+ Type *elem_type = slice_type->Slice.elem;
+
+ for (Ast *var_arg : variadic) {
+ cgValue arg = cg_build_expr(p, var_arg);
+ if (is_type_any(elem_type)) {
+ array_add(&args, cg_emit_conv(p, arg, default_type(arg.type)));
+ } else {
+ array_add(&args, cg_emit_conv(p, arg, elem_type));
+ }
+ }
+ break;
+ } else if (vari_expand) {
+ GB_ASSERT(variadic.count == 1);
+ variadic_args = cg_build_expr(p, variadic[0]);
+ variadic_args = cg_emit_conv(p, variadic_args, slice_type);
+ } else {
+ Type *elem_type = slice_type->Slice.elem;
+
+ auto var_args = array_make<cgValue>(temporary_allocator(), 0, variadic.count);
+ for (Ast *var_arg : variadic) {
+ cgValue v = cg_build_expr(p, var_arg);
+ cg_append_tuple_values(p, &var_args, v);
+ }
+ isize slice_len = var_args.count;
+ if (slice_len > 0) {
+ cgAddr slice = cg_add_local(p, slice_type, nullptr, true);
+ cgAddr base_array = cg_add_local(p, alloc_type_array(elem_type, slice_len), nullptr, true);
+
+ for (isize i = 0; i < var_args.count; i++) {
+ cgValue addr = cg_emit_array_epi(p, base_array.addr, cast(i32)i);
+ cgValue var_arg = var_args[i];
+ var_arg = cg_emit_conv(p, var_arg, elem_type);
+ cg_emit_store(p, addr, var_arg);
+ }
+
+ cgValue base_elem = cg_emit_array_epi(p, base_array.addr, 0);
+ cgValue len = cg_const_int(p, t_int, slice_len);
+ cg_fill_slice(p, slice, base_elem, len);
+
+ variadic_args = cg_addr_load(p, slice);
+ }
+ }
+ }
+ array_add(&args, variadic_args);
+
+ break;
+ } else {
+ cgValue value = cg_build_expr(p, ce->split_args->positional[i]);
+ cg_append_tuple_values(p, &args, value);
+ }
+ }
+
+ if (!is_c_vararg) {
+ array_resize(&args, internal_param_count);
+ }
+
+ for (Ast *arg : ce->split_args->named) {
+ ast_node(fv, FieldValue, arg);
+ GB_ASSERT(fv->field->kind == Ast_Ident);
+ String name = fv->field->Ident.token.string;
+ gb_unused(name);
+ isize param_index = lookup_procedure_parameter(pt, name);
+ GB_ASSERT(param_index >= 0);
+
+ cgValue value = cg_build_expr(p, fv->value);
+ GB_ASSERT(!is_type_tuple(value.type));
+ args[param_index] = value;
+ }
+
+ TokenPos pos = ast_token(ce->proc).pos;
+
+
+ if (pt->params != nullptr) {
+ isize min_count = internal_param_count;
+ if (is_c_vararg) {
+ min_count -= 1;
+ }
+ GB_ASSERT_MSG(args.count >= min_count, "in %.*s", LIT(p->name));
+ isize arg_index = 0;
+ for_array(param_index, pt->params->Tuple.variables) {
+ Entity *e = pt->params->Tuple.variables[param_index];
+ if (e->kind == Entity_TypeName) {
+ continue;
+ } else if (e->kind == Entity_Constant) {
+ continue;
+ }
+ GB_ASSERT(e->kind == Entity_Variable);
+
+ if (pt->variadic && param_index == pt->variadic_index) {
+ if (!is_c_vararg && args[arg_index].node == nullptr) {
+ args[arg_index++] = cg_const_nil(p, e->type);
+ }
+ continue;
+ }
+
+ cgValue arg = args[arg_index];
+ if (arg.node == nullptr) {
+ GB_ASSERT(e->kind == Entity_Variable);
+ args[arg_index++] = cg_handle_param_value(p, e->type, e->Variable.param_value, pos);
+ } else {
+ args[arg_index++] = cg_emit_conv(p, arg, e->type);
+ }
+ }
+ }
+
+ isize final_count = is_c_vararg ? args.count : internal_param_count;
+ auto call_args = slice(args, 0, final_count);
+
+ return cg_emit_call(p, value, call_args);
+}
+
+
+
+gb_internal cgValue cg_hasher_proc_value_for_type(cgProcedure *p, Type *type) {
+ cgProcedure *found = cg_hasher_proc_for_type(p->module, type);
+ return cg_value(tb_inst_get_symbol_address(p->func, found->symbol), found->type);
+}
+
+gb_internal cgValue cg_equal_proc_value_for_type(cgProcedure *p, Type *type) {
+ cgProcedure *found = cg_equal_proc_for_type(p->module, type);
+ return cg_value(tb_inst_get_symbol_address(p->func, found->symbol), found->type);
+}
+
+
+
+gb_internal cgProcedure *cg_equal_proc_for_type(cgModule *m, Type *type) {
+ type = base_type(type);
+ GB_ASSERT(is_type_comparable(type));
+
+ mutex_lock(&m->generated_procs_mutex);
+ defer (mutex_unlock(&m->generated_procs_mutex));
+
+ cgProcedure **found = map_get(&m->equal_procs, type);
+ if (found) {
+ return *found;
+ }
+
+ static std::atomic<u32> proc_index;
+
+ char buf[32] = {};
+ isize n = gb_snprintf(buf, 32, "__$equal%u", 1+proc_index.fetch_add(1));
+ char *str = gb_alloc_str_len(permanent_allocator(), buf, n-1);
+ String proc_name = make_string_c(str);
+
+
+ cgProcedure *p = cg_procedure_create_dummy(m, proc_name, t_equal_proc);
+ map_set(&m->equal_procs, type, p);
+
+ cg_procedure_begin(p);
+
+ TB_Node *x = tb_inst_param(p->func, 0);
+ TB_Node *y = tb_inst_param(p->func, 1);
+ GB_ASSERT(x->dt.type == TB_PTR);
+ GB_ASSERT(y->dt.type == TB_PTR);
+
+ TB_DataType ret_dt = TB_PROTOTYPE_RETURNS(p->proto)->dt;
+
+ TB_Node *node_true = tb_inst_uint(p->func, ret_dt, true);
+ TB_Node *node_false = tb_inst_uint(p->func, ret_dt, false);
+
+ TB_Node *same_ptr_region = cg_control_region(p, "same_ptr");
+ TB_Node *diff_ptr_region = cg_control_region(p, "diff_ptr");
+
+ TB_Node *is_same_ptr = tb_inst_cmp_eq(p->func, x, y);
+ tb_inst_if(p->func, is_same_ptr, same_ptr_region, diff_ptr_region);
+
+ tb_inst_set_control(p->func, same_ptr_region);
+ tb_inst_ret(p->func, 1, &node_true);
+
+ tb_inst_set_control(p->func, diff_ptr_region);
+
+ Type *pt = alloc_type_pointer(type);
+ cgValue lhs = cg_value(x, pt);
+ cgValue rhs = cg_value(y, pt);
+
+ if (type->kind == Type_Struct) {
+ type_set_offsets(type);
+
+ TB_Node *false_region = cg_control_region(p, "bfalse");
+ cgValue res = cg_const_bool(p, t_bool, true);
+
+ for_array(i, type->Struct.fields) {
+ TB_Node *next_region = cg_control_region(p, "btrue");
+
+ cgValue plhs = cg_emit_struct_ep(p, lhs, i);
+ cgValue prhs = cg_emit_struct_ep(p, rhs, i);
+ cgValue left = cg_emit_load(p, plhs);
+ cgValue right = cg_emit_load(p, prhs);
+ cgValue ok = cg_emit_comp(p, Token_CmpEq, left, right);
+
+ cg_emit_if(p, ok, next_region, false_region);
+
+ cg_emit_goto(p, next_region);
+ tb_inst_set_control(p->func, next_region);
+ }
+
+ tb_inst_ret(p->func, 1, &node_true);
+ tb_inst_set_control(p->func, false_region);
+ tb_inst_ret(p->func, 1, &node_false);
+
+ } else if (type->kind == Type_Union) {
+ if (type_size_of(type) == 0) {
+ tb_inst_ret(p->func, 1, &node_true);
+ } else if (is_type_union_maybe_pointer(type)) {
+ Type *v = type->Union.variants[0];
+ Type *pv = alloc_type_pointer(v);
+
+ cgValue left = cg_emit_load(p, cg_emit_conv(p, lhs, pv));
+ cgValue right = cg_emit_load(p, cg_emit_conv(p, rhs, pv));
+ cgValue ok = cg_emit_comp(p, Token_CmpEq, left, right);
+ cg_build_return_stmt_internal_single(p, ok);
+ } else {
+ TB_Node *false_region = cg_control_region(p, "bfalse");
+ TB_Node *switch_region = cg_control_region(p, "bswitch");
+
+ cgValue lhs_tag = cg_emit_load(p, cg_emit_union_tag_ptr(p, lhs));
+ cgValue rhs_tag = cg_emit_load(p, cg_emit_union_tag_ptr(p, rhs));
+
+ cgValue tag_eq = cg_emit_comp(p, Token_CmpEq, lhs_tag, rhs_tag);
+ cg_emit_if(p, tag_eq, switch_region, false_region);
+
+ size_t entry_count = type->Union.variants.count;
+ TB_SwitchEntry *keys = gb_alloc_array(temporary_allocator(), TB_SwitchEntry, entry_count);
+ for (size_t i = 0; i < entry_count; i++) {
+ TB_Node *region = cg_control_region(p, "bcase");
+ Type *variant = type->Union.variants[i];
+ keys[i].key = union_variant_index(type, variant);
+ keys[i].value = region;
+
+ tb_inst_set_control(p->func, region);
+ Type *vp = alloc_type_pointer(variant);
+ cgValue left = cg_emit_load(p, cg_emit_conv(p, lhs, vp));
+ cgValue right = cg_emit_load(p, cg_emit_conv(p, rhs, vp));
+ cgValue ok = cg_emit_comp(p, Token_CmpEq, left, right);
+ cg_build_return_stmt_internal_single(p, ok);
+ }
+
+
+ tb_inst_set_control(p->func, switch_region);
+ TB_DataType tag_dt = cg_data_type(lhs_tag.type);
+ GB_ASSERT(lhs_tag.kind == cgValue_Value);
+ tb_inst_branch(p->func, tag_dt, lhs_tag.node, false_region, entry_count, keys);
+
+ tb_inst_set_control(p->func, false_region);
+ tb_inst_ret(p->func, 1, &node_false);
+ }
+ } else {
+ cgValue left = cg_lvalue_addr(x, type);
+ cgValue right = cg_lvalue_addr(y, type);
+ cgValue ok = cg_emit_comp(p, Token_CmpEq, left, right);
+ cg_build_return_stmt_internal_single(p, ok);
+ }
+
+ cg_procedure_end(p);
+
+ return p;
+}
+
+
+gb_internal cgValue cg_simple_compare_hash(cgProcedure *p, Type *type, cgValue data, cgValue seed) {
+ TEMPORARY_ALLOCATOR_GUARD();
+
+ GB_ASSERT_MSG(is_type_simple_compare(type), "%s", type_to_string(type));
+
+ auto args = slice_make<cgValue>(temporary_allocator(), 3);
+ args[0] = data;
+ args[1] = seed;
+ args[2] = cg_const_int(p, t_int, type_size_of(type));
+ return cg_emit_runtime_call(p, "default_hasher", args);
+}
+
+
+
+
+
+gb_internal cgProcedure *cg_hasher_proc_for_type(cgModule *m, Type *type) {
+ type = base_type(type);
+ GB_ASSERT(is_type_valid_for_keys(type));
+
+ mutex_lock(&m->generated_procs_mutex);
+ defer (mutex_unlock(&m->generated_procs_mutex));
+
+ cgProcedure **found = map_get(&m->hasher_procs, type);
+ if (found) {
+ return *found;
+ }
+
+ static std::atomic<u32> proc_index;
+
+ char buf[32] = {};
+ isize n = gb_snprintf(buf, 32, "__$hasher%u", 1+proc_index.fetch_add(1));
+ char *str = gb_alloc_str_len(permanent_allocator(), buf, n-1);
+ String proc_name = make_string_c(str);
+
+
+ cgProcedure *p = cg_procedure_create_dummy(m, proc_name, t_hasher_proc);
+ map_set(&m->hasher_procs, type, p);
+
+ cg_procedure_begin(p);
+ defer (cg_procedure_end(p));
+
+ TB_Node *x = tb_inst_param(p->func, 0); // data
+ TB_Node *y = tb_inst_param(p->func, 1); // seed
+
+ cgValue data = cg_value(x, t_rawptr);
+ cgValue seed = cg_value(y, t_uintptr);
+
+ if (is_type_simple_compare(type)) {
+ cgValue res = cg_simple_compare_hash(p, type, data, seed);
+ cg_build_return_stmt_internal_single(p, res);
+ return p;
+ }
+
+ TEMPORARY_ALLOCATOR_GUARD();
+
+ auto args = slice_make<cgValue>(temporary_allocator(), 2);
+
+ if (type->kind == Type_Struct) {
+ type_set_offsets(type);
+ for_array(i, type->Struct.fields) {
+ i64 offset = type->Struct.offsets[i];
+ Entity *field = type->Struct.fields[i];
+ cgValue field_hasher = cg_hasher_proc_value_for_type(p, field->type);
+
+ TB_Node *ptr = tb_inst_member_access(p->func, data.node, offset);
+
+ args[0] = cg_value(ptr, alloc_type_pointer(field->type));
+ args[1] = seed;
+ seed = cg_emit_call(p, field_hasher, args);
+ }
+
+ cg_build_return_stmt_internal_single(p, seed);
+ } else if (type->kind == Type_Union) {
+ if (type_size_of(type) == 0) {
+ cg_build_return_stmt_internal_single(p, seed);
+ } else if (is_type_union_maybe_pointer(type)) {
+ Type *v = type->Union.variants[0];
+ cgValue variant_hasher = cg_hasher_proc_value_for_type(p, v);
+
+ args[0] = data;
+ args[1] = seed;
+ cgValue res = cg_emit_call(p, variant_hasher, args);
+ cg_build_return_stmt_internal_single(p, seed);
+ } else {
+ TB_Node *end_region = cg_control_region(p, "bend");
+ TB_Node *switch_region = cg_control_region(p, "bswitch");
+
+ cg_emit_goto(p, switch_region);
+
+ size_t entry_count = type->Union.variants.count;
+ TB_SwitchEntry *keys = gb_alloc_array(temporary_allocator(), TB_SwitchEntry, entry_count);
+ for (size_t i = 0; i < entry_count; i++) {
+ TB_Node *region = cg_control_region(p, "bcase");
+ Type *variant = type->Union.variants[i];
+ keys[i].key = union_variant_index(type, variant);
+ keys[i].value = region;
+
+ tb_inst_set_control(p->func, region);
+
+ cgValue variant_hasher = cg_hasher_proc_value_for_type(p, variant);
+
+ args[0] = data;
+ args[1] = seed;
+ cgValue res = cg_emit_call(p, variant_hasher, args);
+ cg_build_return_stmt_internal_single(p, res);
+ }
+
+ tb_inst_set_control(p->func, switch_region);
+
+ cgValue tag_ptr = cg_emit_union_tag_ptr(p, data);
+ cgValue tag = cg_emit_load(p, tag_ptr);
+
+ TB_DataType tag_dt = cg_data_type(tag.type);
+ GB_ASSERT(tag.kind == cgValue_Value);
+ tb_inst_branch(p->func, tag_dt, tag.node, end_region, entry_count, keys);
+
+ tb_inst_set_control(p->func, end_region);
+ cg_build_return_stmt_internal_single(p, seed);
+ }
+ } else if (type->kind == Type_Array) {
+ cgAddr pres = cg_add_local(p, t_uintptr, nullptr, false);
+ cg_addr_store(p, pres, seed);
+
+ cgValue elem_hasher = cg_hasher_proc_value_for_type(p, type->Array.elem);
+
+ auto loop_data = cg_loop_start(p, type->Array.count, t_int);
+
+ i64 stride = type_size_of(type->Array.elem);
+ TB_Node *ptr = tb_inst_array_access(p->func, data.node, loop_data.index.node, stride);
+ args[0] = cg_value(ptr, alloc_type_pointer(type->Array.elem));
+ args[1] = cg_addr_load(p, pres);
+
+ cgValue new_seed = cg_emit_call(p, elem_hasher, args);
+ cg_addr_store(p, pres, new_seed);
+
+ cg_loop_end(p, loop_data);
+
+ cgValue res = cg_addr_load(p, pres);
+ cg_build_return_stmt_internal_single(p, res);
+ } else if (type->kind == Type_EnumeratedArray) {
+ cgAddr pres = cg_add_local(p, t_uintptr, nullptr, false);
+ cg_addr_store(p, pres, seed);
+
+ cgValue elem_hasher = cg_hasher_proc_value_for_type(p, type->EnumeratedArray.elem);
+
+ auto loop_data = cg_loop_start(p, type->EnumeratedArray.count, t_int);
+
+ i64 stride = type_size_of(type->EnumeratedArray.elem);
+ TB_Node *ptr = tb_inst_array_access(p->func, data.node, loop_data.index.node, stride);
+ args[0] = cg_value(ptr, alloc_type_pointer(type->EnumeratedArray.elem));
+ args[1] = cg_addr_load(p, pres);
+
+ cgValue new_seed = cg_emit_call(p, elem_hasher, args);
+ cg_addr_store(p, pres, new_seed);
+
+ cg_loop_end(p, loop_data);
+
+ cgValue res = cg_addr_load(p, pres);
+ cg_build_return_stmt_internal_single(p, res);
+ } else if (is_type_cstring(type)) {
+ args[0] = data;
+ args[1] = seed;
+ cgValue res = cg_emit_runtime_call(p, "default_hasher_cstring", args);
+ cg_build_return_stmt_internal_single(p, seed);
+ } else if (is_type_string(type)) {
+ args[0] = data;
+ args[1] = seed;
+ cgValue res = cg_emit_runtime_call(p, "default_hasher_string", args);
+ cg_build_return_stmt_internal_single(p, seed);
+ } else {
+ GB_PANIC("Unhandled type for hasher: %s", type_to_string(type));
+ }
+ return p;
+} \ No newline at end of file
diff --git a/src/tilde_stmt.cpp b/src/tilde_stmt.cpp
new file mode 100644
index 000000000..2a2aa31aa
--- /dev/null
+++ b/src/tilde_stmt.cpp
@@ -0,0 +1,2614 @@
+gb_internal bool cg_emit_goto(cgProcedure *p, TB_Node *control_region) {
+ if (tb_inst_get_control(p->func)) {
+ tb_inst_goto(p->func, control_region);
+ return true;
+ }
+ return false;
+}
+
+gb_internal TB_Node *cg_control_region(cgProcedure *p, char const *name) {
+ TEMPORARY_ALLOCATOR_GUARD();
+
+ isize n = gb_strlen(name);
+
+ char *new_name = gb_alloc_array(temporary_allocator(), char, n+12);
+ n = -1 + gb_snprintf(new_name, n+11, "%.*s_%u", cast(int)n, name, p->control_regions.count);
+
+ TB_Node *region = tb_inst_region(p->func);
+ tb_inst_set_region_name(region, n, new_name);
+
+ GB_ASSERT(p->scope_index >= 0);
+ array_add(&p->control_regions, cgControlRegion{region, p->scope_index});
+
+ return region;
+}
+
+gb_internal cgValue cg_emit_load(cgProcedure *p, cgValue const &ptr, bool is_volatile) {
+ GB_ASSERT(is_type_pointer(ptr.type));
+ Type *type = type_deref(ptr.type);
+ TB_DataType dt = cg_data_type(type);
+
+ if (TB_IS_VOID_TYPE(dt)) {
+ switch (ptr.kind) {
+ case cgValue_Value:
+ return cg_lvalue_addr(ptr.node, type);
+ case cgValue_Addr:
+ GB_PANIC("NOT POSSIBLE - Cannot load an lvalue to begin with");
+ break;
+ case cgValue_Multi:
+ GB_PANIC("NOT POSSIBLE - Cannot load multiple values at once");
+ break;
+ case cgValue_Symbol:
+ return cg_lvalue_addr(tb_inst_get_symbol_address(p->func, ptr.symbol), type);
+ }
+ }
+
+ // use the natural alignment
+ // if people need a special alignment, they can use `intrinsics.unaligned_load`
+ TB_CharUnits alignment = cast(TB_CharUnits)type_align_of(type);
+
+ TB_Node *the_ptr = nullptr;
+ switch (ptr.kind) {
+ case cgValue_Value:
+ the_ptr = ptr.node;
+ break;
+ case cgValue_Addr:
+ the_ptr = tb_inst_load(p->func, TB_TYPE_PTR, ptr.node, alignment, is_volatile);
+ break;
+ case cgValue_Multi:
+ GB_PANIC("NOT POSSIBLE - Cannot load multiple values at once");
+ break;
+ case cgValue_Symbol:
+ the_ptr = tb_inst_get_symbol_address(p->func, ptr.symbol);
+ break;
+ }
+ return cg_value(tb_inst_load(p->func, dt, the_ptr, alignment, is_volatile), type);
+}
+
+gb_internal void cg_emit_store(cgProcedure *p, cgValue dst, cgValue src, bool is_volatile) {
+ GB_ASSERT_MSG(dst.kind != cgValue_Multi, "cannot store to multiple values at once");
+
+ if (dst.kind == cgValue_Addr) {
+ dst = cg_emit_load(p, dst, is_volatile);
+ } else if (dst.kind == cgValue_Symbol) {
+ dst = cg_value(tb_inst_get_symbol_address(p->func, dst.symbol), dst.type);
+ }
+
+ GB_ASSERT(is_type_pointer(dst.type));
+ Type *dst_type = type_deref(dst.type);
+
+ GB_ASSERT_MSG(are_types_identical(dst_type, src.type), "%s vs %s", type_to_string(dst_type), type_to_string(src.type));
+
+ TB_DataType dt = cg_data_type(dst_type);
+ TB_DataType st = cg_data_type(src.type);
+ GB_ASSERT(dt.raw == st.raw);
+
+ // use the natural alignment
+ // if people need a special alignment, they can use `intrinsics.unaligned_store`
+ TB_CharUnits alignment = cast(TB_CharUnits)type_align_of(dst_type);
+
+ if (TB_IS_VOID_TYPE(dt)) {
+ TB_Node *dst_ptr = nullptr;
+ TB_Node *src_ptr = nullptr;
+
+ switch (dst.kind) {
+ case cgValue_Value:
+ dst_ptr = dst.node;
+ break;
+ case cgValue_Addr:
+ GB_PANIC("DST cgValue_Addr should be handled above");
+ break;
+ case cgValue_Symbol:
+ dst_ptr = tb_inst_get_symbol_address(p->func, dst.symbol);
+ break;
+ }
+
+ switch (src.kind) {
+ case cgValue_Value:
+ GB_PANIC("SRC cgValue_Value should be handled above");
+ break;
+ case cgValue_Symbol:
+ GB_PANIC("SRC cgValue_Symbol should be handled above");
+ break;
+ case cgValue_Addr:
+ src_ptr = src.node;
+ break;
+ }
+
+ // IMPORTANT TODO(bill): needs to be memmove
+ i64 sz = type_size_of(dst_type);
+ TB_Node *count = tb_inst_uint(p->func, TB_TYPE_INT, cast(u64)sz);
+ tb_inst_memcpy(p->func, dst_ptr, src_ptr, count, alignment, is_volatile);
+ return;
+ }
+
+
+ switch (dst.kind) {
+ case cgValue_Value:
+ switch (src.kind) {
+ case cgValue_Value:
+ if (src.node->dt.type == TB_INT && src.node->dt.data == 1) {
+ src.node = tb_inst_zxt(p->func, src.node, dt);
+ }
+ tb_inst_store(p->func, dt, dst.node, src.node, alignment, is_volatile);
+ return;
+ case cgValue_Addr:
+ tb_inst_store(p->func, dt, dst.node,
+ tb_inst_load(p->func, st, src.node, alignment, is_volatile),
+ alignment, is_volatile);
+ return;
+ case cgValue_Symbol:
+ tb_inst_store(p->func, dt, dst.node,
+ tb_inst_get_symbol_address(p->func, src.symbol),
+ alignment, is_volatile);
+ return;
+ }
+ case cgValue_Addr:
+ GB_PANIC("cgValue_Addr should be handled above");
+ break;
+ case cgValue_Symbol:
+ GB_PANIC(" cgValue_Symbol should be handled above");
+ break;
+ }
+}
+
+
+gb_internal cgValue cg_address_from_load(cgProcedure *p, cgValue value) {
+ switch (value.kind) {
+ case cgValue_Value:
+ {
+ TB_Node *load_inst = value.node;
+ GB_ASSERT_MSG(load_inst->type == TB_LOAD, "expected a load instruction");
+ TB_Node *ptr = load_inst->inputs[1];
+ return cg_value(ptr, alloc_type_pointer(value.type));
+ }
+ case cgValue_Addr:
+ return cg_value(value.node, alloc_type_pointer(value.type));
+ case cgValue_Symbol:
+ GB_PANIC("Symbol is an invalid use case for cg_address_from_load");
+ return {};
+ case cgValue_Multi:
+ GB_PANIC("Multi is an invalid use case for cg_address_from_load");
+ break;
+ }
+ GB_PANIC("Invalid cgValue for cg_address_from_load");
+ return {};
+
+}
+
+gb_internal bool cg_addr_is_empty(cgAddr const &addr) {
+ switch (addr.kind) {
+ case cgValue_Value:
+ case cgValue_Addr:
+ return addr.addr.node == nullptr;
+ case cgValue_Symbol:
+ return addr.addr.symbol == nullptr;
+ case cgValue_Multi:
+ return addr.addr.multi == nullptr;
+ }
+ return true;
+}
+
+gb_internal Type *cg_addr_type(cgAddr const &addr) {
+ if (cg_addr_is_empty(addr)) {
+ return nullptr;
+ }
+ switch (addr.kind) {
+ case cgAddr_Map:
+ {
+ Type *t = base_type(addr.map.type);
+ GB_ASSERT(is_type_map(t));
+ return t->Map.value;
+ }
+ case cgAddr_Swizzle:
+ return addr.swizzle.type;
+ case cgAddr_SwizzleLarge:
+ return addr.swizzle_large.type;
+ case cgAddr_Context:
+ if (addr.ctx.sel.index.count > 0) {
+ Type *t = t_context;
+ for_array(i, addr.ctx.sel.index) {
+ GB_ASSERT(is_type_struct(t));
+ t = base_type(t)->Struct.fields[addr.ctx.sel.index[i]]->type;
+ }
+ return t;
+ }
+ break;
+ }
+ return type_deref(addr.addr.type);
+}
+
+gb_internal cgValue cg_addr_load(cgProcedure *p, cgAddr addr) {
+ if (addr.addr.node == nullptr) {
+ return {};
+ }
+ switch (addr.kind) {
+ case cgAddr_Default:
+ return cg_emit_load(p, addr.addr);
+ }
+ GB_PANIC("TODO(bill): cg_addr_load %p", addr.addr.node);
+ return {};
+}
+
+
+gb_internal void cg_addr_store(cgProcedure *p, cgAddr addr, cgValue value) {
+ if (cg_addr_is_empty(addr)) {
+ return;
+ }
+ GB_ASSERT(value.type != nullptr);
+ if (is_type_untyped_uninit(value.type)) {
+ Type *t = cg_addr_type(addr);
+ value = cg_value(tb_inst_poison(p->func), t);
+ // TODO(bill): IS THIS EVEN A GOOD IDEA?
+ } else if (is_type_untyped_nil(value.type)) {
+ Type *t = cg_addr_type(addr);
+ value = cg_const_nil(p, t);
+ }
+
+ if (addr.kind == cgAddr_RelativePointer && addr.relative.deref) {
+ addr = cg_addr(cg_address_from_load(p, cg_addr_load(p, addr)));
+ }
+
+ if (addr.kind == cgAddr_RelativePointer) {
+ GB_PANIC("TODO(bill): cgAddr_RelativePointer");
+ } else if (addr.kind == cgAddr_RelativeSlice) {
+ GB_PANIC("TODO(bill): cgAddr_RelativeSlice");
+ } else if (addr.kind == cgAddr_Map) {
+ GB_PANIC("TODO(bill): cgAddr_Map");
+ } else if (addr.kind == cgAddr_Context) {
+ cgAddr old_addr = cg_find_or_generate_context_ptr(p);
+
+ bool create_new = true;
+ for_array(i, p->context_stack) {
+ cgContextData *ctx_data = &p->context_stack[i];
+ if (ctx_data->ctx.addr.node == old_addr.addr.node) {
+ if (ctx_data->uses > 0) {
+ create_new = true;
+ } else if (p->scope_index > ctx_data->scope_index) {
+ create_new = true;
+ } else {
+ // gb_printf_err("%.*s (curr:%td) (ctx:%td) (uses:%td)\n", LIT(p->name), p->scope_index, ctx_data->scope_index, ctx_data->uses);
+ create_new = false;
+ }
+ break;
+ }
+ }
+
+ cgValue next = {};
+ if (create_new) {
+ cgValue old = cg_addr_load(p, old_addr);
+ cgAddr next_addr = cg_add_local(p, t_context, nullptr, true);
+ cg_addr_store(p, next_addr, old);
+ cg_push_context_onto_stack(p, next_addr);
+ next = next_addr.addr;
+ } else {
+ next = old_addr.addr;
+ }
+
+ if (addr.ctx.sel.index.count > 0) {
+ cgValue lhs = cg_emit_deep_field_gep(p, next, addr.ctx.sel);
+ cgValue rhs = cg_emit_conv(p, value, type_deref(lhs.type));
+ cg_emit_store(p, lhs, rhs);
+ } else {
+ cgValue lhs = next;
+ cgValue rhs = cg_emit_conv(p, value, cg_addr_type(addr));
+ cg_emit_store(p, lhs, rhs);
+ }
+ return;
+ } else if (addr.kind == cgAddr_SoaVariable) {
+ GB_PANIC("TODO(bill): cgAddr_SoaVariable");
+ } else if (addr.kind == cgAddr_Swizzle) {
+ GB_ASSERT(addr.swizzle.count <= 4);
+ GB_PANIC("TODO(bill): cgAddr_Swizzle");
+ } else if (addr.kind == cgAddr_SwizzleLarge) {
+ GB_PANIC("TODO(bill): cgAddr_SwizzleLarge");
+ }
+
+ value = cg_emit_conv(p, value, cg_addr_type(addr));
+ cg_emit_store(p, addr.addr, value);
+}
+
+gb_internal cgValue cg_addr_get_ptr(cgProcedure *p, cgAddr const &addr) {
+ if (cg_addr_is_empty(addr)) {
+ GB_PANIC("Illegal addr -> nullptr");
+ return {};
+ }
+
+ switch (addr.kind) {
+ case cgAddr_Map:
+ GB_PANIC("TODO(bill): cg_addr_get_ptr cgAddr_Map");
+ // return cg_internal_dynamic_map_get_ptr(p, addr.addr, addr.map.key);
+ break;
+
+ case cgAddr_RelativePointer: {
+ Type *rel_ptr = base_type(cg_addr_type(addr));
+ GB_ASSERT(rel_ptr->kind == Type_RelativePointer);
+
+ cgValue ptr = cg_emit_conv(p, addr.addr, t_uintptr);
+ cgValue offset = cg_emit_conv(p, ptr, alloc_type_pointer(rel_ptr->RelativePointer.base_integer));
+ offset = cg_emit_load(p, offset);
+
+ if (!is_type_unsigned(rel_ptr->RelativePointer.base_integer)) {
+ offset = cg_emit_conv(p, offset, t_i64);
+ }
+ offset = cg_emit_conv(p, offset, t_uintptr);
+
+ cgValue absolute_ptr = cg_emit_arith(p, Token_Add, ptr, offset, t_uintptr);
+ absolute_ptr = cg_emit_conv(p, absolute_ptr, rel_ptr->RelativePointer.pointer_type);
+
+ GB_PANIC("TODO(bill): cg_addr_get_ptr cgAddr_RelativePointer");
+ // cgValue cond = cg_emit_comp(p, Token_CmpEq, offset, cg_const_nil(p->module, rel_ptr->RelativePointer.base_integer));
+
+ // NOTE(bill): nil check
+ // cgValue nil_ptr = cg_const_nil(p->module, rel_ptr->RelativePointer.pointer_type);
+ // cgValue final_ptr = cg_emit_select(p, cond, nil_ptr, absolute_ptr);
+ // return final_ptr;
+ break;
+ }
+
+ case cgAddr_SoaVariable:
+ // TODO(bill): FIX THIS HACK
+ return cg_address_from_load(p, cg_addr_load(p, addr));
+
+ case cgAddr_Context:
+ GB_PANIC("cgAddr_Context should be handled elsewhere");
+ break;
+
+ case cgAddr_Swizzle:
+ case cgAddr_SwizzleLarge:
+ // TOOD(bill): is this good enough logic?
+ break;
+ }
+
+ return addr.addr;
+}
+
+gb_internal cgValue cg_emit_ptr_offset(cgProcedure *p, cgValue ptr, cgValue index) {
+ GB_ASSERT(ptr.kind == cgValue_Value);
+ GB_ASSERT(index.kind == cgValue_Value);
+ GB_ASSERT(is_type_pointer(ptr.type) || is_type_multi_pointer(ptr.type));
+ GB_ASSERT(is_type_integer(index.type));
+
+ Type *elem = type_deref(ptr.type, true);
+ i64 stride = type_size_of(elem);
+ return cg_value(tb_inst_array_access(p->func, ptr.node, index.node, stride), alloc_type_pointer(elem));
+}
+gb_internal cgValue cg_emit_array_ep(cgProcedure *p, cgValue s, cgValue index) {
+ GB_ASSERT(s.kind == cgValue_Value);
+ GB_ASSERT(index.kind == cgValue_Value);
+
+ Type *t = s.type;
+ GB_ASSERT_MSG(is_type_pointer(t), "%s", type_to_string(t));
+ Type *st = base_type(type_deref(t));
+ GB_ASSERT_MSG(is_type_array(st) || is_type_enumerated_array(st) || is_type_matrix(st), "%s", type_to_string(st));
+ GB_ASSERT_MSG(is_type_integer(core_type(index.type)), "%s", type_to_string(index.type));
+
+
+ Type *elem = base_array_type(st);
+ i64 stride = type_size_of(elem);
+ return cg_value(tb_inst_array_access(p->func, s.node, index.node, stride), alloc_type_pointer(elem));
+}
+gb_internal cgValue cg_emit_array_epi(cgProcedure *p, cgValue s, i64 index) {
+ return cg_emit_array_ep(p, s, cg_const_int(p, t_int, index));
+}
+
+
+gb_internal cgValue cg_emit_struct_ep(cgProcedure *p, cgValue s, i64 index) {
+ s = cg_flatten_value(p, s);
+
+ GB_ASSERT(is_type_pointer(s.type));
+ Type *t = base_type(type_deref(s.type));
+ Type *result_type = nullptr;
+
+ if (is_type_relative_pointer(t)) {
+ s = cg_addr_get_ptr(p, cg_addr(s));
+ }
+ i64 offset = -1;
+ i64 int_size = build_context.int_size;
+ i64 ptr_size = build_context.ptr_size;
+
+ switch (t->kind) {
+ case Type_Struct:
+ type_set_offsets(t);
+ result_type = t->Struct.fields[index]->type;
+ offset = t->Struct.offsets[index];
+ break;
+ case Type_Union:
+ GB_ASSERT(index == -1);
+ GB_PANIC("TODO(bill): cg_emit_union_tag_ptr");
+ break;
+ // return cg_emit_union_tag_ptr(p, s);
+ case Type_Tuple:
+ type_set_offsets(t);
+ result_type = t->Tuple.variables[index]->type;
+ offset = t->Tuple.offsets[index];
+ GB_PANIC("TODO(bill): cg_emit_tuple_ep %d", s.kind);
+ break;
+ // return cg_emit_tuple_ep(p, s, index);
+ case Type_Slice:
+ switch (index) {
+ case 0:
+ result_type = alloc_type_multi_pointer(t->Slice.elem);
+ offset = 0;
+ break;
+ case 1:
+ result_type = t_int;
+ offset = int_size;
+ break;
+ }
+ break;
+ case Type_Basic:
+ switch (t->Basic.kind) {
+ case Basic_string:
+ switch (index) {
+ case 0:
+ result_type = t_u8_multi_ptr;
+ offset = 0;
+ break;
+ case 1:
+ result_type = t_int;
+ offset = int_size;
+ break;
+ }
+ break;
+ case Basic_any:
+ switch (index) {
+ case 0:
+ result_type = t_rawptr;
+ offset = 0;
+ break;
+ case 1:
+ result_type = t_typeid;
+ offset = ptr_size;
+ break;
+ }
+ break;
+
+ case Basic_complex32:
+ case Basic_complex64:
+ case Basic_complex128:
+ {
+ Type *ft = base_complex_elem_type(t);
+ i64 sz = type_size_of(ft);
+ switch (index) {
+ case 0: case 1:
+ result_type = ft; offset = sz * index; break;
+ default: goto error_case;
+ }
+ break;
+ }
+ case Basic_quaternion64:
+ case Basic_quaternion128:
+ case Basic_quaternion256:
+ {
+ Type *ft = base_complex_elem_type(t);
+ i64 sz = type_size_of(ft);
+ switch (index) {
+ case 0: case 1: case 2: case 3:
+ result_type = ft; offset = sz * index; break;
+ default: goto error_case;
+ }
+ }
+ break;
+ default:
+ goto error_case;
+ }
+ break;
+ case Type_DynamicArray:
+ switch (index) {
+ case 0:
+ result_type = alloc_type_multi_pointer(t->DynamicArray.elem);
+ offset = index*int_size;
+ break;
+ case 1: case 2:
+ result_type = t_int;
+ offset = index*int_size;
+ break;
+ case 3:
+ result_type = t_allocator;
+ offset = index*int_size;
+ break;
+ default: goto error_case;
+ }
+ break;
+ case Type_Map:
+ {
+ init_map_internal_types(t);
+ Type *itp = alloc_type_pointer(t_raw_map);
+ s = cg_emit_transmute(p, s, itp);
+
+ Type *rms = base_type(t_raw_map);
+ GB_ASSERT(rms->kind == Type_Struct);
+
+ if (0 <= index && index < 3) {
+ result_type = rms->Struct.fields[index]->type;
+ offset = rms->Struct.offsets[index];
+ } else {
+ goto error_case;
+ }
+ break;
+ }
+ case Type_Array:
+ return cg_emit_array_epi(p, s, index);
+ case Type_SoaPointer:
+ switch (index) {
+ case 0: result_type = alloc_type_pointer(t->SoaPointer.elem); break;
+ case 1: result_type = t_int; break;
+ }
+ break;
+ default:
+ error_case:;
+ GB_PANIC("TODO(bill): struct_gep type: %s, %d", type_to_string(s.type), index);
+ break;
+ }
+
+ GB_ASSERT_MSG(result_type != nullptr, "%s %d", type_to_string(t), index);
+ GB_ASSERT(offset >= 0);
+
+ GB_ASSERT(s.kind == cgValue_Value);
+ return cg_value(
+ tb_inst_member_access(p->func, s.node, offset),
+ alloc_type_pointer(result_type)
+ );
+}
+
+
+gb_internal cgValue cg_emit_struct_ev(cgProcedure *p, cgValue s, i64 index) {
+ s = cg_address_from_load_or_generate_local(p, s);
+ cgValue ptr = cg_emit_struct_ep(p, s, index);
+ return cg_flatten_value(p, cg_emit_load(p, ptr));
+}
+
+
+gb_internal cgValue cg_emit_deep_field_gep(cgProcedure *p, cgValue e, Selection const &sel) {
+ GB_ASSERT(sel.index.count > 0);
+ Type *type = type_deref(e.type);
+
+ for_array(i, sel.index) {
+ i64 index = sel.index[i];
+ if (is_type_pointer(type)) {
+ type = type_deref(type);
+ e = cg_emit_load(p, e);
+ }
+ type = core_type(type);
+
+ switch (type->kind) {
+ case Type_SoaPointer: {
+ cgValue addr = cg_emit_struct_ep(p, e, 0);
+ cgValue index = cg_emit_struct_ep(p, e, 1);
+ addr = cg_emit_load(p, addr);
+ index = cg_emit_load(p, index);
+
+ i64 first_index = sel.index[0];
+ Selection sub_sel = sel;
+ sub_sel.index.data += 1;
+ sub_sel.index.count -= 1;
+
+ cgValue arr = cg_emit_struct_ep(p, addr, first_index);
+
+ Type *t = base_type(type_deref(addr.type));
+ GB_ASSERT(is_type_soa_struct(t));
+
+ if (t->Struct.soa_kind == StructSoa_Fixed) {
+ e = cg_emit_array_ep(p, arr, index);
+ } else {
+ e = cg_emit_ptr_offset(p, cg_emit_load(p, arr), index);
+ }
+ break;
+ }
+ case Type_Basic:
+ switch (type->Basic.kind) {
+ case Basic_any:
+ if (index == 0) {
+ type = t_rawptr;
+ } else if (index == 1) {
+ type = t_type_info_ptr;
+ }
+ e = cg_emit_struct_ep(p, e, index);
+ break;
+ default:
+ e = cg_emit_struct_ep(p, e, index);
+ break;
+ }
+ break;
+ case Type_Struct:
+ if (type->Struct.is_raw_union) {
+ type = get_struct_field_type(type, index);
+ GB_ASSERT(is_type_pointer(e.type));
+ e = cg_emit_transmute(p, e, alloc_type_pointer(type));
+ } else {
+ type = get_struct_field_type(type, index);
+ e = cg_emit_struct_ep(p, e, index);
+ }
+ break;
+ case Type_Union:
+ GB_ASSERT(index == -1);
+ type = t_type_info_ptr;
+ e = cg_emit_struct_ep(p, e, index);
+ break;
+ case Type_Tuple:
+ type = type->Tuple.variables[index]->type;
+ e = cg_emit_struct_ep(p, e, index);
+ break;
+ case Type_Slice:
+ case Type_DynamicArray:
+ case Type_Map:
+ case Type_RelativePointer:
+ e = cg_emit_struct_ep(p, e, index);
+ break;
+ case Type_Array:
+ e = cg_emit_array_epi(p, e, index);
+ break;
+ default:
+ GB_PANIC("un-gep-able type %s", type_to_string(type));
+ break;
+ }
+ }
+
+ return e;
+}
+
+
+
+
+
+
+
+
+gb_internal cgBranchRegions cg_lookup_branch_regions(cgProcedure *p, Ast *ident) {
+ GB_ASSERT(ident->kind == Ast_Ident);
+ Entity *e = entity_of_node(ident);
+ GB_ASSERT(e->kind == Entity_Label);
+ for (cgBranchRegions const &b : p->branch_regions) {
+ if (b.label == e->Label.node) {
+ return b;
+ }
+ }
+
+ GB_PANIC("Unreachable");
+ cgBranchRegions empty = {};
+ return empty;
+}
+
+gb_internal cgTargetList *cg_push_target_list(cgProcedure *p, Ast *label, TB_Node *break_, TB_Node *continue_, TB_Node *fallthrough_) {
+ cgTargetList *tl = gb_alloc_item(permanent_allocator(), cgTargetList);
+ tl->prev = p->target_list;
+ tl->break_ = break_;
+ tl->continue_ = continue_;
+ tl->fallthrough_ = fallthrough_;
+ p->target_list = tl;
+
+ if (label != nullptr) { // Set label blocks
+ GB_ASSERT(label->kind == Ast_Label);
+
+ for (cgBranchRegions &b : p->branch_regions) {
+ GB_ASSERT(b.label != nullptr && label != nullptr);
+ GB_ASSERT(b.label->kind == Ast_Label);
+ if (b.label == label) {
+ b.break_ = break_;
+ b.continue_ = continue_;
+ return tl;
+ }
+ }
+
+ GB_PANIC("Unreachable");
+ }
+
+ return tl;
+}
+
+gb_internal void cg_pop_target_list(cgProcedure *p) {
+ p->target_list = p->target_list->prev;
+}
+gb_internal cgAddr cg_add_local(cgProcedure *p, Type *type, Entity *e, bool zero_init) {
+ GB_ASSERT(type != nullptr);
+
+ isize size = type_size_of(type);
+ TB_CharUnits alignment = cast(TB_CharUnits)type_align_of(type);
+ if (is_type_matrix(type)) {
+ alignment *= 2; // NOTE(bill): Just in case
+ }
+
+ TB_Node *local = tb_inst_local(p->func, cast(u32)size, alignment);
+
+ if (e != nullptr && e->token.string.len > 0 && e->token.string != "_") {
+ // NOTE(bill): for debugging purposes only
+ String name = e->token.string;
+ TB_DebugType *debug_type = cg_debug_type(p->module, type);
+ tb_node_append_attrib(local, tb_function_attrib_variable(p->func, name.len, cast(char const *)name.text, debug_type));
+ }
+
+ if (zero_init) {
+ bool is_volatile = false;
+ TB_Node *zero = tb_inst_uint(p->func, TB_TYPE_I8, 0);
+ TB_Node *count = tb_inst_uint(p->func, TB_TYPE_I32, cast(u64)size);
+ tb_inst_memset(p->func, local, zero, count, alignment, is_volatile);
+ }
+
+ cgAddr addr = cg_addr(cg_value(local, alloc_type_pointer(type)));
+ if (e) {
+ map_set(&p->variable_map, e, addr);
+ }
+ return addr;
+}
+
+gb_internal cgAddr cg_add_global(cgProcedure *p, Type *type, Entity *e) {
+ GB_ASSERT(type != nullptr);
+
+ isize size = type_size_of(type);
+ TB_CharUnits alignment = cast(TB_CharUnits)type_align_of(type);
+ if (is_type_matrix(type)) {
+ alignment *= 2; // NOTE(bill): Just in case
+ }
+
+ TB_Global *global = tb_global_create(p->module->mod, 0, "", nullptr, TB_LINKAGE_PRIVATE);
+ tb_global_set_storage(p->module->mod, tb_module_get_data(p->module->mod), global, size, alignment, 0);
+ TB_Node *local = tb_inst_get_symbol_address(p->func, cast(TB_Symbol *)global);
+
+ if (e != nullptr && e->token.string.len > 0 && e->token.string != "_") {
+ // NOTE(bill): for debugging purposes only
+ String name = e->token.string;
+ TB_DebugType *debug_type = cg_debug_type(p->module, type);
+ tb_node_append_attrib(local, tb_function_attrib_variable(p->func, name.len, cast(char const *)name.text, debug_type));
+ }
+
+ cgAddr addr = cg_addr(cg_value(local, alloc_type_pointer(type)));
+ if (e) {
+ map_set(&p->variable_map, e, addr);
+ }
+ return addr;
+}
+
+
+gb_internal cgValue cg_copy_value_to_ptr(cgProcedure *p, cgValue value, Type *original_type, isize min_alignment) {
+ TB_CharUnits size = cast(TB_CharUnits)type_size_of(original_type);
+ TB_CharUnits align = cast(TB_CharUnits)gb_max(type_align_of(original_type), min_alignment);
+ TB_Node *copy = tb_inst_local(p->func, size, align);
+ if (value.kind == cgValue_Value) {
+ tb_inst_store(p->func, cg_data_type(original_type), copy, value.node, align, false);
+ } else {
+ GB_ASSERT(value.kind == cgValue_Addr);
+ tb_inst_memcpy(p->func, copy, value.node, tb_inst_uint(p->func, TB_TYPE_INT, size), align, false);
+ }
+
+ return cg_value(copy, alloc_type_pointer(original_type));
+}
+
+gb_internal cgValue cg_address_from_load_or_generate_local(cgProcedure *p, cgValue value) {
+ switch (value.kind) {
+ case cgValue_Value:
+ if (value.node->type == TB_LOAD) {
+ TB_Node *ptr = value.node->inputs[1];
+ return cg_value(ptr, alloc_type_pointer(value.type));
+ }
+ break;
+ case cgValue_Addr:
+ return cg_value(value.node, alloc_type_pointer(value.type));
+ case cgValue_Multi:
+ GB_PANIC("cgValue_Multi not allowed");
+ }
+
+ cgAddr res = cg_add_local(p, value.type, nullptr, false);
+ cg_addr_store(p, res, value);
+ return res.addr;
+}
+
+
+gb_internal void cg_build_defer_stmt(cgProcedure *p, cgDefer const &d) {
+ TB_Node *curr_region = tb_inst_get_control(p->func);
+ if (curr_region == nullptr) {
+ return;
+ }
+
+ // NOTE(bill): The prev block may defer injection before it's terminator
+ TB_Node *last_inst = nullptr;
+ // if (curr_region->input_count) {
+ // last_inst = *(curr_region->inputs + curr_region->input_count);
+ // }
+ // if (last_inst && TB_IS_NODE_TERMINATOR(last_inst->type)) {
+ // // NOTE(bill): ReturnStmt defer stuff will be handled previously
+ // return;
+ // }
+
+ isize prev_context_stack_count = p->context_stack.count;
+ GB_ASSERT(prev_context_stack_count <= p->context_stack.capacity);
+ defer (p->context_stack.count = prev_context_stack_count);
+ p->context_stack.count = d.context_stack_count;
+
+ TB_Node *b = cg_control_region(p, "defer");
+ if (last_inst == nullptr) {
+ cg_emit_goto(p, b);
+ }
+
+ tb_inst_set_control(p->func, b);
+ if (d.kind == cgDefer_Node) {
+ cg_build_stmt(p, d.stmt);
+ } else if (d.kind == cgDefer_Proc) {
+ cg_emit_call(p, d.proc.deferred, d.proc.result_as_args);
+ }
+}
+
+
+gb_internal void cg_emit_defer_stmts(cgProcedure *p, cgDeferExitKind kind, TB_Node *control_region) {
+ isize count = p->defer_stack.count;
+ isize i = count;
+ while (i --> 0) {
+ cgDefer const &d = p->defer_stack[i];
+
+ if (kind == cgDeferExit_Default) {
+ if (p->scope_index == d.scope_index &&
+ d.scope_index > 0) {
+ cg_build_defer_stmt(p, d);
+ array_pop(&p->defer_stack);
+ continue;
+ } else {
+ break;
+ }
+ } else if (kind == cgDeferExit_Return) {
+ cg_build_defer_stmt(p, d);
+ } else if (kind == cgDeferExit_Branch) {
+ GB_ASSERT(control_region != nullptr);
+ isize lower_limit = -1;
+ for (auto const &cr : p->control_regions) {
+ if (cr.control_region == control_region) {
+ lower_limit = cr.scope_index;
+ break;
+ }
+ }
+ GB_ASSERT(lower_limit >= 0);
+ if (lower_limit < d.scope_index) {
+ cg_build_defer_stmt(p, d);
+ }
+ }
+ }
+}
+
+gb_internal void cg_scope_open(cgProcedure *p, Scope *scope) {
+ // TODO(bill): debug scope information
+
+ p->scope_index += 1;
+ array_add(&p->scope_stack, scope);
+}
+
+gb_internal void cg_scope_close(cgProcedure *p, cgDeferExitKind kind, TB_Node *control_region) {
+ cg_emit_defer_stmts(p, kind, control_region);
+ GB_ASSERT(p->scope_index > 0);
+
+ while (p->context_stack.count > 0) {
+ auto *ctx = &p->context_stack[p->context_stack.count-1];
+ if (ctx->scope_index < p->scope_index) {
+ break;
+ }
+ array_pop(&p->context_stack);
+ }
+
+ p->scope_index -= 1;
+ array_pop(&p->scope_stack);
+}
+
+
+gb_internal isize cg_append_tuple_values(cgProcedure *p, Array<cgValue> *dst_values, cgValue src_value) {
+ isize init_count = dst_values->count;
+ Type *t = src_value.type;
+ if (t && t->kind == Type_Tuple) {
+ GB_ASSERT(src_value.kind == cgValue_Multi);
+ GB_ASSERT(src_value.multi != nullptr);
+ GB_ASSERT(src_value.multi->values.count == t->Tuple.variables.count);
+ for (cgValue const &value : src_value.multi->values) {
+ array_add(dst_values, value);
+ }
+ } else {
+ array_add(dst_values, src_value);
+ }
+ return dst_values->count - init_count;
+}
+gb_internal void cg_build_assignment(cgProcedure *p, Array<cgAddr> const &lvals, Slice<Ast *> const &values) {
+ if (values.count == 0) {
+ return;
+ }
+
+ auto inits = array_make<cgValue>(permanent_allocator(), 0, lvals.count);
+
+ for (Ast *rhs : values) {
+ cgValue init = cg_build_expr(p, rhs);
+ cg_append_tuple_values(p, &inits, init);
+ }
+
+ bool prev_in_assignment = p->in_multi_assignment;
+
+ isize lval_count = 0;
+ for (cgAddr const &lval : lvals) {
+ if (!cg_addr_is_empty(lval)) {
+ // check if it is not a blank identifier
+ lval_count += 1;
+ }
+ }
+ p->in_multi_assignment = lval_count > 1;
+
+ GB_ASSERT(lvals.count == inits.count);
+
+
+ if (inits.count > 1) for_array(i, inits) {
+ cgAddr lval = lvals[i];
+ cgValue init = cg_flatten_value(p, inits[i]);
+
+ GB_ASSERT(init.kind != cgValue_Multi);
+ if (init.type == nullptr) {
+ continue;
+ }
+
+ Type *type = cg_addr_type(lval);
+ if (!cg_addr_is_empty(lval)) {
+ GB_ASSERT_MSG(are_types_identical(init.type, type), "%s = %s", type_to_string(init.type), type_to_string(type));
+ }
+
+ if (init.kind == cgValue_Addr &&
+ !cg_addr_is_empty(lval)) {
+ // NOTE(bill): This is needed for certain constructs such as this:
+ // a, b = b, a
+ // NOTE(bill): This is a bodge and not necessarily a good way of doing things whatsoever
+ TB_CharUnits size = cast(TB_CharUnits)type_size_of(type);
+ TB_CharUnits align = cast(TB_CharUnits)type_align_of(type);
+ TB_Node *copy = tb_inst_local(p->func, size, align);
+ tb_inst_memcpy(p->func, copy, init.node, tb_inst_uint(p->func, TB_TYPE_INT, size), align, false);
+ // use the copy instead
+ init.node = copy;
+ }
+ inits[i] = init;
+ }
+
+ for_array(i, inits) {
+ cgAddr lval = lvals[i];
+ cgValue init = inits[i];
+ GB_ASSERT(init.kind != cgValue_Multi);
+ if (init.type == nullptr) {
+ continue;
+ }
+ cg_addr_store(p, lval, init);
+ }
+
+ p->in_multi_assignment = prev_in_assignment;
+}
+
+gb_internal void cg_build_assign_stmt(cgProcedure *p, AstAssignStmt *as) {
+ if (as->op.kind == Token_Eq) {
+ auto lvals = array_make<cgAddr>(permanent_allocator(), 0, as->lhs.count);
+
+ for (Ast *lhs : as->lhs) {
+ cgAddr lval = {};
+ if (!is_blank_ident(lhs)) {
+ lval = cg_build_addr(p, lhs);
+ }
+ array_add(&lvals, lval);
+ }
+ cg_build_assignment(p, lvals, as->rhs);
+ return;
+ }
+
+ GB_ASSERT(as->lhs.count == 1);
+ GB_ASSERT(as->rhs.count == 1);
+ // NOTE(bill): Only 1 += 1 is allowed, no tuples
+ // +=, -=, etc
+
+ i32 op_ = cast(i32)as->op.kind;
+ op_ += Token_Add - Token_AddEq; // Convert += to +
+ TokenKind op = cast(TokenKind)op_;
+
+ if (op == Token_CmpAnd || op == Token_CmpOr) {
+ GB_PANIC("TODO(bill): cg_emit_logical_binary_expr");
+ // Type *type = as->lhs[0]->tav.type;
+ // cgValue new_value = cg_emit_logical_binary_expr(p, op, as->lhs[0], as->rhs[0], type);
+
+ // cgAddr lhs = cg_build_addr(p, as->lhs[0]);
+ // cg_addr_store(p, lhs, new_value);
+ } else {
+ cgAddr lhs = cg_build_addr(p, as->lhs[0]);
+ cgValue value = cg_build_expr(p, as->rhs[0]);
+ Type *lhs_type = cg_addr_type(lhs);
+
+ // NOTE(bill): Allow for the weird edge case of:
+ // array *= matrix
+ if (op == Token_Mul && is_type_matrix(value.type) && is_type_array(lhs_type)) {
+ GB_PANIC("TODO(bill): array *= matrix");
+ // cgValue old_value = cg_addr_load(p, lhs);
+ // Type *type = old_value.type;
+ // cgValue new_value = cg_emit_vector_mul_matrix(p, old_value, value, type);
+ // cg_addr_store(p, lhs, new_value);
+ // return;
+ }
+
+ if (is_type_array(lhs_type)) {
+ GB_PANIC("TODO(bill): cg_build_assign_stmt_array");
+ // cg_build_assign_stmt_array(p, op, lhs, value);
+ // return;
+ } else {
+ cgValue old_value = cg_addr_load(p, lhs);
+ Type *type = old_value.type;
+
+ cgValue change = cg_emit_conv(p, value, type);
+ cgValue new_value = cg_emit_arith(p, op, old_value, change, type);
+ cg_addr_store(p, lhs, new_value);
+ }
+ }
+}
+
+gb_internal void cg_build_return_stmt_internal_single(cgProcedure *p, cgValue result) {
+ Slice<cgValue> results = {};
+ results.data = &result;
+ results.count = 1;
+ cg_build_return_stmt_internal(p, results);
+}
+
+
+gb_internal void cg_build_return_stmt_internal(cgProcedure *p, Slice<cgValue> const &results) {
+ TypeTuple *tuple = &p->type->Proc.results->Tuple;
+ isize return_count = p->type->Proc.result_count;
+
+ if (return_count == 0) {
+ tb_inst_ret(p->func, 0, nullptr);
+ return;
+ }
+
+ if (p->split_returns_index >= 0) {
+ GB_ASSERT(is_calling_convention_odin(p->type->Proc.calling_convention));
+
+ for (isize i = 0; i < return_count-1; i++) {
+ Entity *e = tuple->variables[i];
+ TB_Node *ret_ptr = tb_inst_param(p->func, cast(int)(p->split_returns_index+i));
+ cgValue ptr = cg_value(ret_ptr, alloc_type_pointer(e->type));
+ cg_emit_store(p, ptr, results[i]);
+ }
+
+ if (p->return_by_ptr) {
+ Entity *e = tuple->variables[return_count-1];
+ TB_Node *ret_ptr = tb_inst_param(p->func, 0);
+ cgValue ptr = cg_value(ret_ptr, alloc_type_pointer(e->type));
+ cg_emit_store(p, ptr, results[return_count-1]);
+
+ tb_inst_ret(p->func, 0, nullptr);
+ return;
+ } else {
+ GB_ASSERT(p->proto->return_count == 1);
+ TB_DataType dt = TB_PROTOTYPE_RETURNS(p->proto)->dt;
+
+ cgValue result = results[return_count-1];
+ result = cg_flatten_value(p, result);
+ TB_Node *final_res = nullptr;
+ if (result.kind == cgValue_Addr) {
+ TB_CharUnits align = cast(TB_CharUnits)type_align_of(result.type);
+ final_res = tb_inst_load(p->func, dt, result.node, align, false);
+ } else {
+ GB_ASSERT(result.kind == cgValue_Value);
+ TB_DataType st = result.node->dt;
+ GB_ASSERT(st.type == dt.type);
+ if (st.raw == dt.raw) {
+ final_res = result.node;
+ } else if (st.type == TB_INT && st.data == 1) {
+ final_res = tb_inst_zxt(p->func, result.node, dt);
+ } else {
+ final_res = tb_inst_bitcast(p->func, result.node, dt);
+ }
+ }
+ GB_ASSERT(final_res != nullptr);
+
+ tb_inst_ret(p->func, 1, &final_res);
+ return;
+ }
+
+ } else {
+ GB_ASSERT(!is_calling_convention_odin(p->type->Proc.calling_convention));
+
+ if (p->return_by_ptr) {
+ Entity *e = tuple->variables[return_count-1];
+ TB_Node *ret_ptr = tb_inst_param(p->func, 0);
+ cgValue ptr = cg_value(ret_ptr, alloc_type_pointer(e->type));
+ cg_emit_store(p, ptr, results[return_count-1]);
+
+ tb_inst_ret(p->func, 0, nullptr);
+ return;
+ } else {
+ GB_ASSERT(p->proto->return_count == 1);
+ TB_DataType dt = TB_PROTOTYPE_RETURNS(p->proto)->dt;
+ if (results.count == 1) {
+ cgValue result = results[0];
+ result = cg_flatten_value(p, result);
+
+ TB_Node *final_res = nullptr;
+ if (result.kind == cgValue_Addr) {
+ TB_CharUnits align = cast(TB_CharUnits)type_align_of(result.type);
+ final_res = tb_inst_load(p->func, dt, result.node, align, false);
+ } else {
+ GB_ASSERT(result.kind == cgValue_Value);
+ TB_DataType st = result.node->dt;
+ GB_ASSERT(st.type == dt.type);
+ if (st.raw == dt.raw) {
+ final_res = result.node;
+ } else if (st.type == TB_INT && st.data == 1) {
+ final_res = tb_inst_zxt(p->func, result.node, dt);
+ } else {
+ final_res = tb_inst_bitcast(p->func, result.node, dt);
+ }
+ }
+
+ GB_ASSERT(final_res != nullptr);
+
+ tb_inst_ret(p->func, 1, &final_res);
+ return;
+ } else {
+ GB_ASSERT_MSG(results.count == 1, "TODO(bill): multi-return values for the return");
+ return;
+ }
+ }
+
+ }
+}
+
+
+gb_internal void cg_build_return_stmt(cgProcedure *p, Slice<Ast *> const &return_results) {
+ TypeTuple *tuple = &p->type->Proc.results->Tuple;
+ isize return_count = p->type->Proc.result_count;
+
+ if (return_count == 0) {
+ tb_inst_ret(p->func, 0, nullptr);
+ return;
+ }
+ TEMPORARY_ALLOCATOR_GUARD();
+
+ auto results = array_make<cgValue>(temporary_allocator(), 0, return_count);
+
+ if (return_results.count != 0) {
+ for (isize i = 0; i < return_results.count; i++) {
+ cgValue res = cg_build_expr(p, return_results[i]);
+ cg_append_tuple_values(p, &results, res);
+ }
+ } else {
+ for_array(i, tuple->variables) {
+ Entity *e = tuple->variables[i];
+ cgAddr addr = map_must_get(&p->variable_map, e);
+ cgValue res = cg_addr_load(p, addr);
+ array_add(&results, res);
+ }
+ }
+ GB_ASSERT(results.count == return_count);
+
+ if (return_results.count != 0 && p->type->Proc.has_named_results) {
+ // NOTE(bill): store the named values before returning
+ for_array(i, tuple->variables) {
+ Entity *e = tuple->variables[i];
+ cgAddr addr = map_must_get(&p->variable_map, e);
+ cg_addr_store(p, addr, results[i]);
+ }
+ }
+ for_array(i, tuple->variables) {
+ Entity *e = tuple->variables[i];
+ results[i] = cg_emit_conv(p, results[i], e->type);
+ }
+
+ cg_build_return_stmt_internal(p, slice_from_array(results));
+}
+
+gb_internal void cg_build_if_stmt(cgProcedure *p, Ast *node) {
+ ast_node(is, IfStmt, node);
+ cg_scope_open(p, is->scope); // Scope #1
+ defer (cg_scope_close(p, cgDeferExit_Default, nullptr));
+
+ if (is->init != nullptr) {
+ TB_Node *init = cg_control_region(p, "if_init");
+ cg_emit_goto(p, init);
+ tb_inst_set_control(p->func, init);
+ cg_build_stmt(p, is->init);
+ }
+
+ TB_Node *then = cg_control_region(p, "if_then");
+ TB_Node *done = cg_control_region(p, "if_done");
+ TB_Node *else_ = done;
+ if (is->else_stmt != nullptr) {
+ else_ = cg_control_region(p, "if_else");
+ }
+
+ cgValue cond = cg_build_cond(p, is->cond, then, else_);
+ gb_unused(cond);
+
+ if (is->label != nullptr) {
+ cgTargetList *tl = cg_push_target_list(p, is->label, done, nullptr, nullptr);
+ tl->is_block = true;
+ }
+
+ // TODO(bill): should we do a constant check?
+ // Which philosophy are we following?
+ // - IR represents what the code represents (probably this)
+ // - IR represents what the code executes
+
+ tb_inst_set_control(p->func, then);
+
+ cg_build_stmt(p, is->body);
+
+ cg_emit_goto(p, done);
+
+ if (is->else_stmt != nullptr) {
+ tb_inst_set_control(p->func, else_);
+
+ cg_scope_open(p, scope_of_node(is->else_stmt));
+ cg_build_stmt(p, is->else_stmt);
+ cg_scope_close(p, cgDeferExit_Default, nullptr);
+
+ cg_emit_goto(p, done);
+ }
+
+ tb_inst_set_control(p->func, done);
+}
+
+gb_internal void cg_build_for_stmt(cgProcedure *p, Ast *node) {
+ ast_node(fs, ForStmt, node);
+
+ cg_scope_open(p, fs->scope);
+ defer (cg_scope_close(p, cgDeferExit_Default, nullptr));
+
+ if (fs->init != nullptr) {
+ TB_Node *init = cg_control_region(p, "for_init");
+ cg_emit_goto(p, init);
+ tb_inst_set_control(p->func, init);
+ cg_build_stmt(p, fs->init);
+ }
+ TB_Node *body = cg_control_region(p, "for_body");
+ TB_Node *done = cg_control_region(p, "for_done");
+ TB_Node *loop = body;
+ if (fs->cond != nullptr) {
+ loop = cg_control_region(p, "for_loop");
+ }
+ TB_Node *post = loop;
+ if (fs->post != nullptr) {
+ post = cg_control_region(p, "for_post");
+ }
+
+ cg_emit_goto(p, loop);
+ tb_inst_set_control(p->func, loop);
+
+ if (loop != body) {
+ cg_build_cond(p, fs->cond, body, done);
+ tb_inst_set_control(p->func, body);
+ }
+
+ cg_push_target_list(p, fs->label, done, post, nullptr);
+ cg_build_stmt(p, fs->body);
+ cg_pop_target_list(p);
+
+ cg_emit_goto(p, post);
+
+ if (fs->post != nullptr) {
+ tb_inst_set_control(p->func, post);
+ cg_build_stmt(p, fs->post);
+ cg_emit_goto(p, loop);
+ }
+ tb_inst_set_control(p->func, done);
+}
+
+
+gb_internal Ast *cg_strip_and_prefix(Ast *ident) {
+ if (ident != nullptr) {
+ if (ident->kind == Ast_UnaryExpr && ident->UnaryExpr.op.kind == Token_And) {
+ ident = ident->UnaryExpr.expr;
+ }
+ GB_ASSERT(ident->kind == Ast_Ident);
+ }
+ return ident;
+}
+
+gb_internal void cg_emit_increment(cgProcedure *p, cgValue addr) {
+ GB_ASSERT(is_type_pointer(addr.type));
+ Type *type = type_deref(addr.type);
+ cgValue v_one = cg_const_value(p, type, exact_value_i64(1));
+ cg_emit_store(p, addr, cg_emit_arith(p, Token_Add, cg_emit_load(p, addr), v_one, type));
+
+}
+
+gb_internal void cg_range_stmt_store_val(cgProcedure *p, Ast *stmt_val, cgValue const &value) {
+ Entity *e = entity_of_node(stmt_val);
+ if (e == nullptr) {
+ return;
+ }
+
+ if (e->flags & EntityFlag_Value) {
+ if (value.kind == cgValue_Addr) {
+ cgValue ptr = cg_address_from_load_or_generate_local(p, value);
+ cg_add_entity(p->module, e, ptr);
+ return;
+ }
+ }
+
+ cgAddr addr = cg_add_local(p, e->type, e, false);
+ cg_addr_store(p, addr, value);
+ return;
+}
+
+gb_internal void cg_build_range_stmt_interval(cgProcedure *p, AstBinaryExpr *node,
+ AstRangeStmt *rs, Scope *scope) {
+ bool ADD_EXTRA_WRAPPING_CHECK = true;
+
+ cg_scope_open(p, scope);
+
+ Ast *val0 = rs->vals.count > 0 ? cg_strip_and_prefix(rs->vals[0]) : nullptr;
+ Ast *val1 = rs->vals.count > 1 ? cg_strip_and_prefix(rs->vals[1]) : nullptr;
+ Type *val0_type = nullptr;
+ Type *val1_type = nullptr;
+ if (val0 != nullptr && !is_blank_ident(val0)) {
+ val0_type = type_of_expr(val0);
+ }
+ if (val1 != nullptr && !is_blank_ident(val1)) {
+ val1_type = type_of_expr(val1);
+ }
+
+ TokenKind op = Token_Lt;
+ switch (node->op.kind) {
+ case Token_Ellipsis: op = Token_LtEq; break;
+ case Token_RangeFull: op = Token_LtEq; break;
+ case Token_RangeHalf: op = Token_Lt; break;
+ default: GB_PANIC("Invalid interval operator"); break;
+ }
+
+
+ cgValue lower = cg_build_expr(p, node->left);
+ cgValue upper = {}; // initialized each time in the loop
+
+ cgAddr value;
+ if (val0_type != nullptr) {
+ value = cg_add_local(p, val0_type, entity_of_node(val0), false);
+ } else {
+ value = cg_add_local(p, lower.type, nullptr, false);
+ }
+ cg_addr_store(p, value, lower);
+
+ cgAddr index;
+ if (val1_type != nullptr) {
+ index = cg_add_local(p, val1_type, entity_of_node(val1), false);
+ } else {
+ index = cg_add_local(p, t_int, nullptr, false);
+ }
+ cg_addr_store(p, index, cg_const_int(p, t_int, 0));
+
+ TB_Node *loop = cg_control_region(p, "for_interval_loop");
+ TB_Node *body = cg_control_region(p, "for_interval_body");
+ TB_Node *done = cg_control_region(p, "for_interval_done");
+
+ cg_emit_goto(p, loop);
+ tb_inst_set_control(p->func, loop);
+
+ upper = cg_build_expr(p, node->right);
+ cgValue curr_value = cg_addr_load(p, value);
+ cgValue cond = cg_emit_comp(p, op, curr_value, upper);
+ cg_emit_if(p, cond, body, done);
+ tb_inst_set_control(p->func, body);
+
+ cgValue val = cg_addr_load(p, value);
+ cgValue idx = cg_addr_load(p, index);
+
+ if (val0_type) cg_range_stmt_store_val(p, val0, val);
+ if (val1_type) cg_range_stmt_store_val(p, val1, idx);
+
+
+ {
+ // NOTE: this check block will most likely be optimized out, and is here
+ // to make this code easier to read
+ TB_Node *check = nullptr;
+ TB_Node *post = cg_control_region(p, "for_interval_post");
+
+ TB_Node *continue_block = post;
+
+ if (ADD_EXTRA_WRAPPING_CHECK &&
+ op == Token_LtEq) {
+ check = cg_control_region(p, "for_interval_check");
+ continue_block = check;
+ }
+
+ cg_push_target_list(p, rs->label, done, continue_block, nullptr);
+
+ cg_build_stmt(p, rs->body);
+
+ cg_scope_close(p, cgDeferExit_Default, nullptr);
+ cg_pop_target_list(p);
+
+ if (check != nullptr) {
+ cg_emit_goto(p, check);
+ tb_inst_set_control(p->func, check);
+
+ cgValue check_cond = cg_emit_comp(p, Token_NotEq, curr_value, upper);
+ cg_emit_if(p, check_cond, post, done);
+ } else {
+ cg_emit_goto(p, post);
+ }
+
+ tb_inst_set_control(p->func, post);
+ cg_emit_increment(p, value.addr);
+ cg_emit_increment(p, index.addr);
+ cg_emit_goto(p, loop);
+ }
+
+ tb_inst_set_control(p->func, done);
+}
+
+gb_internal void cg_build_range_stmt_indexed(cgProcedure *p, cgValue expr, Type *val_type, cgValue count_ptr,
+ cgValue *val_, cgValue *idx_, TB_Node **loop_, TB_Node **done_,
+ bool is_reverse) {
+ cgValue count = {};
+ Type *expr_type = base_type(type_deref(expr.type));
+ switch (expr_type->kind) {
+ case Type_Array:
+ count = cg_const_int(p, t_int, expr_type->Array.count);
+ break;
+ }
+
+ cgValue val = {};
+ cgValue idx = {};
+ TB_Node *loop = nullptr;
+ TB_Node *done = nullptr;
+ TB_Node *body = nullptr;
+
+ loop = cg_control_region(p, "for_index_loop");
+ body = cg_control_region(p, "for_index_body");
+ done = cg_control_region(p, "for_index_done");
+
+ cgAddr index = cg_add_local(p, t_int, nullptr, false);
+
+ if (!is_reverse) {
+ /*
+ for x, i in array {
+ ...
+ }
+
+ i := -1
+ for {
+ i += 1
+ if !(i < len(array)) {
+ break
+ }
+ #no_bounds_check x := array[i]
+ ...
+ }
+ */
+
+ cg_addr_store(p, index, cg_const_int(p, t_int, cast(u64)-1));
+
+ cg_emit_goto(p, loop);
+ tb_inst_set_control(p->func, loop);
+
+ cgValue incr = cg_emit_arith(p, Token_Add, cg_addr_load(p, index), cg_const_int(p, t_int, 1), t_int);
+ cg_addr_store(p, index, incr);
+
+ if (count.node == nullptr) {
+ GB_ASSERT(count_ptr.node != nullptr);
+ count = cg_emit_load(p, count_ptr);
+ }
+ cgValue cond = cg_emit_comp(p, Token_Lt, incr, count);
+ cg_emit_if(p, cond, body, done);
+ } else {
+ // NOTE(bill): REVERSED LOGIC
+ /*
+ #reverse for x, i in array {
+ ...
+ }
+
+ i := len(array)
+ for {
+ i -= 1
+ if i < 0 {
+ break
+ }
+ #no_bounds_check x := array[i]
+ ...
+ }
+ */
+
+ if (count.node == nullptr) {
+ GB_ASSERT(count_ptr.node != nullptr);
+ count = cg_emit_load(p, count_ptr);
+ }
+ count = cg_emit_conv(p, count, t_int);
+ cg_addr_store(p, index, count);
+
+ cg_emit_goto(p, loop);
+ tb_inst_set_control(p->func, loop);
+
+ cgValue incr = cg_emit_arith(p, Token_Sub, cg_addr_load(p, index), cg_const_int(p, t_int, 1), t_int);
+ cg_addr_store(p, index, incr);
+
+ cgValue anti_cond = cg_emit_comp(p, Token_Lt, incr, cg_const_int(p, t_int, 0));
+ cg_emit_if(p, anti_cond, done, body);
+ }
+
+ tb_inst_set_control(p->func, body);
+
+ idx = cg_addr_load(p, index);
+ switch (expr_type->kind) {
+ case Type_Array: {
+ if (val_type != nullptr) {
+ val = cg_emit_load(p, cg_emit_array_ep(p, expr, idx));
+ }
+ break;
+ }
+ case Type_EnumeratedArray: {
+ if (val_type != nullptr) {
+ val = cg_emit_load(p, cg_emit_array_ep(p, expr, idx));
+ // NOTE(bill): Override the idx value for the enumeration
+ Type *index_type = expr_type->EnumeratedArray.index;
+ if (compare_exact_values(Token_NotEq, *expr_type->EnumeratedArray.min_value, exact_value_u64(0))) {
+ idx = cg_emit_arith(p, Token_Add, idx, cg_const_value(p, index_type, *expr_type->EnumeratedArray.min_value), index_type);
+ }
+ }
+ break;
+ }
+ case Type_Slice: {
+ if (val_type != nullptr) {
+ cgValue elem = cg_builtin_raw_data(p, expr);
+ val = cg_emit_load(p, cg_emit_ptr_offset(p, elem, idx));
+ }
+ break;
+ }
+ case Type_DynamicArray: {
+ if (val_type != nullptr) {
+ cgValue elem = cg_emit_struct_ep(p, expr, 0);
+ elem = cg_emit_load(p, elem);
+ val = cg_emit_load(p, cg_emit_ptr_offset(p, elem, idx));
+ }
+ break;
+ }
+ case Type_Struct: {
+ GB_ASSERT(is_type_soa_struct(expr_type));
+ break;
+ }
+
+ default:
+ GB_PANIC("Cannot do range_indexed of %s", type_to_string(expr_type));
+ break;
+ }
+
+ if (val_) *val_ = val;
+ if (idx_) *idx_ = idx;
+ if (loop_) *loop_ = loop;
+ if (done_) *done_ = done;
+
+}
+
+gb_internal void cg_build_range_stmt(cgProcedure *p, Ast *node) {
+ ast_node(rs, RangeStmt, node);
+
+ Ast *expr = unparen_expr(rs->expr);
+
+ if (is_ast_range(expr)) {
+ cg_build_range_stmt_interval(p, &expr->BinaryExpr, rs, rs->scope);
+ return;
+ }
+
+ Type *expr_type = type_of_expr(expr);
+ if (expr_type != nullptr) {
+ Type *et = base_type(type_deref(expr_type));
+ if (is_type_soa_struct(et)) {
+ GB_PANIC("TODO(bill): #soa array range statements");
+ // cg_build_range_stmt_struct_soa(p, rs, scope);
+ return;
+ }
+ }
+
+ cg_scope_open(p, rs->scope);
+
+
+ Ast *val0 = rs->vals.count > 0 ? cg_strip_and_prefix(rs->vals[0]) : nullptr;
+ Ast *val1 = rs->vals.count > 1 ? cg_strip_and_prefix(rs->vals[1]) : nullptr;
+ Type *val0_type = nullptr;
+ Type *val1_type = nullptr;
+ if (val0 != nullptr && !is_blank_ident(val0)) {
+ val0_type = type_of_expr(val0);
+ }
+ if (val1 != nullptr && !is_blank_ident(val1)) {
+ val1_type = type_of_expr(val1);
+ }
+
+ cgValue val = {};
+ cgValue key = {};
+ TB_Node *loop = nullptr;
+ TB_Node *done = nullptr;
+ bool is_map = false;
+ TypeAndValue tav = type_and_value_of_expr(expr);
+
+ if (tav.mode == Addressing_Type) {
+ GB_PANIC("TODO(bill): range statement over enum type");
+ } else {
+ Type *expr_type = type_of_expr(expr);
+ Type *et = base_type(type_deref(expr_type));
+ switch (et->kind) {
+ case Type_Map: {
+ is_map = true;
+ cgValue map = cg_build_addr_ptr(p, expr);
+ if (is_type_pointer(type_deref(map.type))) {
+ map = cg_emit_load(p, map);
+ }
+ GB_PANIC("TODO(bill): cg_build_range_map");
+ // cg_build_range_map(p, map, val1_type, &val, &key, &loop, &done);
+ break;
+ }
+ case Type_Array: {
+ cgValue array = cg_build_addr_ptr(p, expr);
+ if (is_type_pointer(type_deref(array.type))) {
+ array = cg_emit_load(p, array);
+ }
+ cgAddr count_ptr = cg_add_local(p, t_int, nullptr, false);
+ cg_addr_store(p, count_ptr, cg_const_int(p, t_int, et->Array.count));
+ cg_build_range_stmt_indexed(p, array, val0_type, count_ptr.addr, &val, &key, &loop, &done, rs->reverse);
+ break;
+ }
+ case Type_EnumeratedArray: {
+ cgValue array = cg_build_addr_ptr(p, expr);
+ if (is_type_pointer(type_deref(array.type))) {
+ array = cg_emit_load(p, array);
+ }
+ cgAddr count_ptr = cg_add_local(p, t_int, nullptr, false);
+ cg_addr_store(p, count_ptr, cg_const_int(p, t_int, et->EnumeratedArray.count));
+ cg_build_range_stmt_indexed(p, array, val0_type, count_ptr.addr, &val, &key, &loop, &done, rs->reverse);
+ break;
+ }
+ case Type_DynamicArray: {
+ cgValue count_ptr = {};
+ cgValue array = cg_build_addr_ptr(p, expr);
+ if (is_type_pointer(type_deref(array.type))) {
+ array = cg_emit_load(p, array);
+ }
+ count_ptr = cg_emit_struct_ep(p, array, 1);
+ cg_build_range_stmt_indexed(p, array, val0_type, count_ptr, &val, &key, &loop, &done, rs->reverse);
+ break;
+ }
+ case Type_Slice: {
+ cgValue count_ptr = {};
+ cgValue slice = cg_build_expr(p, expr);
+ if (is_type_pointer(slice.type)) {
+ count_ptr = cg_emit_struct_ep(p, slice, 1);
+ slice = cg_emit_load(p, slice);
+ } else {
+ count_ptr = cg_add_local(p, t_int, nullptr, false).addr;
+ cg_emit_store(p, count_ptr, cg_builtin_len(p, slice));
+ }
+ cg_build_range_stmt_indexed(p, slice, val0_type, count_ptr, &val, &key, &loop, &done, rs->reverse);
+ break;
+ }
+ case Type_Basic: {
+ cgValue string = cg_build_expr(p, expr);
+ if (is_type_pointer(string.type)) {
+ string = cg_emit_load(p, string);
+ }
+ if (is_type_untyped(expr_type)) {
+ cgAddr s = cg_add_local(p, default_type(string.type), nullptr, false);
+ cg_addr_store(p, s, string);
+ string = cg_addr_load(p, s);
+ }
+ Type *t = base_type(string.type);
+ GB_ASSERT(!is_type_cstring(t));
+ GB_PANIC("TODO(bill): cg_build_range_string");
+ // cg_build_range_string(p, string, val0_type, &val, &key, &loop, &done, rs->reverse);
+ break;
+ }
+ case Type_Tuple:
+ GB_PANIC("TODO(bill): cg_build_range_tuple");
+ // cg_build_range_tuple(p, expr, val0_type, val1_type, &val, &key, &loop, &done);
+ break;
+ default:
+ GB_PANIC("Cannot range over %s", type_to_string(expr_type));
+ break;
+ }
+ }
+
+ if (is_map) {
+ if (val0_type) cg_range_stmt_store_val(p, val0, key);
+ if (val1_type) cg_range_stmt_store_val(p, val1, val);
+ } else {
+ if (val0_type) cg_range_stmt_store_val(p, val0, val);
+ if (val1_type) cg_range_stmt_store_val(p, val1, key);
+ }
+
+ cg_push_target_list(p, rs->label, done, loop, nullptr);
+
+ cg_build_stmt(p, rs->body);
+
+ cg_scope_close(p, cgDeferExit_Default, nullptr);
+ cg_pop_target_list(p);
+ cg_emit_goto(p, loop);
+ tb_inst_set_control(p->func, done);
+}
+
+gb_internal bool cg_switch_stmt_can_be_trivial_jump_table(AstSwitchStmt *ss) {
+ if (ss->tag == nullptr) {
+ return false;
+ }
+ bool is_typeid = false;
+ TypeAndValue tv = type_and_value_of_expr(ss->tag);
+ if (is_type_integer(core_type(tv.type))) {
+ if (type_size_of(tv.type) > 8) {
+ return false;
+ }
+ // okay
+ } else if (is_type_typeid(tv.type)) {
+ // okay
+ is_typeid = true;
+ } else {
+ return false;
+ }
+
+ ast_node(body, BlockStmt, ss->body);
+ for (Ast *clause : body->stmts) {
+ ast_node(cc, CaseClause, clause);
+
+ if (cc->list.count == 0) {
+ continue;
+ }
+
+ for (Ast *expr : cc->list) {
+ expr = unparen_expr(expr);
+ if (is_ast_range(expr)) {
+ return false;
+ }
+ if (expr->tav.mode == Addressing_Type) {
+ GB_ASSERT(is_typeid);
+ continue;
+ }
+ tv = type_and_value_of_expr(expr);
+ if (tv.mode != Addressing_Constant) {
+ return false;
+ }
+ if (!is_type_integer(core_type(tv.type))) {
+ return false;
+ }
+ }
+
+ }
+
+ return true;
+}
+
+
+gb_internal void cg_build_switch_stmt(cgProcedure *p, Ast *node) {
+ ast_node(ss, SwitchStmt, node);
+ cg_scope_open(p, ss->scope);
+
+ if (ss->init != nullptr) {
+ cg_build_stmt(p, ss->init);
+ }
+ cgValue tag = {};
+ if (ss->tag != nullptr) {
+ tag = cg_build_expr(p, ss->tag);
+ } else {
+ tag = cg_const_bool(p, t_bool, true);
+ }
+
+ TB_Node *done = cg_control_region(p, "switch_done");
+
+ ast_node(body, BlockStmt, ss->body);
+
+ isize case_count = body->stmts.count;
+ Slice<Ast *> default_stmts = {};
+ TB_Node *default_fall = nullptr;
+ TB_Node *default_block = nullptr;
+ Scope * default_scope = nullptr;
+ TB_Node *fall = nullptr;
+
+
+ auto body_regions = slice_make<TB_Node *>(permanent_allocator(), body->stmts.count);
+ auto body_scopes = slice_make<Scope *>(permanent_allocator(), body->stmts.count);
+ for_array(i, body->stmts) {
+ Ast *clause = body->stmts[i];
+ ast_node(cc, CaseClause, clause);
+
+ body_regions[i] = cg_control_region(p, cc->list.count == 0 ? "switch_default_body" : "switch_case_body");
+ body_scopes[i] = cc->scope;
+ if (cc->list.count == 0) {
+ default_block = body_regions[i];
+ default_scope = cc->scope;
+ }
+ }
+
+ bool is_trivial = cg_switch_stmt_can_be_trivial_jump_table(ss);
+ if (is_trivial) {
+ isize key_count = 0;
+ for (Ast *clause : body->stmts) {
+ ast_node(cc, CaseClause, clause);
+ key_count += cc->list.count;
+ }
+ TB_SwitchEntry *keys = gb_alloc_array(temporary_allocator(), TB_SwitchEntry, key_count);
+ isize key_index = 0;
+ for_array(i, body->stmts) {
+ Ast *clause = body->stmts[i];
+ ast_node(cc, CaseClause, clause);
+
+ TB_Node *region = body_regions[i];
+ for (Ast *expr : cc->list) {
+ i64 key = 0;
+ expr = unparen_expr(expr);
+ GB_ASSERT(!is_ast_range(expr));
+ if (expr->tav.mode == Addressing_Type) {
+ Type *type = expr->tav.value.value_typeid;
+ if (type == nullptr || type == t_invalid) {
+ type = expr->tav.type;
+ }
+ key = cg_typeid_as_u64(p->module, type);
+ } else {
+ auto tv = type_and_value_of_expr(expr);
+ GB_ASSERT(tv.mode == Addressing_Constant);
+ key = exact_value_to_i64(tv.value);
+ }
+ keys[key_index++] = {key, region};
+ }
+ }
+ GB_ASSERT(key_index == key_count);
+
+ TB_Node *end_block = done;
+ if (default_block) {
+ end_block = default_block;
+ }
+
+ TB_DataType dt = cg_data_type(tag.type);
+ GB_ASSERT(tag.kind == cgValue_Value);
+ GB_ASSERT(!TB_IS_VOID_TYPE(dt));
+
+ tb_inst_branch(p->func, dt, tag.node, end_block, key_count, keys);
+ }
+
+ for_array(i, body->stmts) {
+ Ast *clause = body->stmts[i];
+ ast_node(cc, CaseClause, clause);
+
+ TB_Node *body_region = body_regions[i];
+ Scope *body_scope = body_scopes[i];
+ fall = done;
+ if (i+1 < case_count) {
+ fall = body_regions[i+1];
+ }
+
+ if (cc->list.count == 0) {
+ // default case
+ default_stmts = cc->stmts;
+ default_fall = fall;
+ GB_ASSERT(default_block == body_region);
+ continue;
+ }
+
+ TB_Node *next_cond = nullptr;
+ if (!is_trivial) for (Ast *expr : cc->list) {
+ expr = unparen_expr(expr);
+
+ next_cond = cg_control_region(p, "switch_case_next");
+
+ cgValue cond = {};
+ if (is_ast_range(expr)) {
+ ast_node(ie, BinaryExpr, expr);
+ TokenKind op = Token_Invalid;
+ switch (ie->op.kind) {
+ case Token_Ellipsis: op = Token_LtEq; break;
+ case Token_RangeFull: op = Token_LtEq; break;
+ case Token_RangeHalf: op = Token_Lt; break;
+ default: GB_PANIC("Invalid interval operator"); break;
+ }
+ cgValue lhs = cg_build_expr(p, ie->left);
+ cgValue rhs = cg_build_expr(p, ie->right);
+
+ cgValue cond_lhs = cg_emit_comp(p, Token_LtEq, lhs, tag);
+ cgValue cond_rhs = cg_emit_comp(p, op, tag, rhs);
+ cond = cg_emit_arith(p, Token_And, cond_lhs, cond_rhs, t_bool);
+ } else {
+ if (expr->tav.mode == Addressing_Type) {
+ GB_ASSERT(is_type_typeid(tag.type));
+ cgValue e = cg_typeid(p, expr->tav.type);
+ e = cg_emit_conv(p, e, tag.type);
+ cond = cg_emit_comp(p, Token_CmpEq, tag, e);
+ } else {
+ cond = cg_emit_comp(p, Token_CmpEq, tag, cg_build_expr(p, expr));
+ }
+ }
+
+ GB_ASSERT(cond.kind == cgValue_Value);
+ tb_inst_if(p->func, cond.node, body_region, next_cond);
+ tb_inst_set_control(p->func, next_cond);
+ }
+
+ tb_inst_set_control(p->func, body_region);
+
+ cg_push_target_list(p, ss->label, done, nullptr, fall);
+ cg_scope_open(p, body_scope);
+ cg_build_stmt_list(p, cc->stmts);
+ cg_scope_close(p, cgDeferExit_Default, body_region);
+ cg_pop_target_list(p);
+
+ cg_emit_goto(p, done);
+ tb_inst_set_control(p->func, next_cond);
+ }
+
+ if (default_block != nullptr) {
+ if (!is_trivial) {
+ cg_emit_goto(p, default_block);
+ }
+ tb_inst_set_control(p->func, default_block);
+
+ cg_push_target_list(p, ss->label, done, nullptr, default_fall);
+ cg_scope_open(p, default_scope);
+ cg_build_stmt_list(p, default_stmts);
+ cg_scope_close(p, cgDeferExit_Default, default_block);
+ cg_pop_target_list(p);
+ }
+
+
+ cg_emit_goto(p, done);
+ tb_inst_set_control(p->func, done);
+
+ cg_scope_close(p, cgDeferExit_Default, done);
+}
+
+gb_internal void cg_build_type_switch_stmt(cgProcedure *p, Ast *node) {
+ ast_node(ss, TypeSwitchStmt, node);
+
+ TB_Node *done_region = cg_control_region(p, "typeswitch_done");
+ TB_Node *else_region = done_region;
+ TB_Node *default_region = nullptr;
+ isize num_cases = 0;
+
+ cg_scope_open(p, ss->scope);
+ defer (cg_scope_close(p, cgDeferExit_Default, done_region));
+
+ ast_node(as, AssignStmt, ss->tag);
+ GB_ASSERT(as->lhs.count == 1);
+ GB_ASSERT(as->rhs.count == 1);
+
+ cgValue parent = cg_build_expr(p, as->rhs[0]);
+ bool is_parent_ptr = is_type_pointer(parent.type);
+ Type *parent_base_type = type_deref(parent.type);
+ gb_unused(parent_base_type);
+
+ TypeSwitchKind switch_kind = check_valid_type_switch_type(parent.type);
+ GB_ASSERT(switch_kind != TypeSwitch_Invalid);
+
+
+ cgValue parent_value = parent;
+
+ cgValue parent_ptr = parent;
+ if (!is_parent_ptr) {
+ parent_ptr = cg_address_from_load_or_generate_local(p, parent);
+ }
+
+ cgValue tag = {};
+ cgValue union_data = {};
+ if (switch_kind == TypeSwitch_Union) {
+ union_data = cg_emit_conv(p, parent_ptr, t_rawptr);
+ Type *union_type = type_deref(parent_ptr.type);
+ if (is_type_union_maybe_pointer(union_type)) {
+ tag = cg_emit_conv(p, cg_emit_comp_against_nil(p, Token_NotEq, union_data), t_int);
+ } else if (union_tag_size(union_type) == 0) {
+ tag = {}; // there is no tag for a zero sized union
+ } else {
+ cgValue tag_ptr = cg_emit_union_tag_ptr(p, parent_ptr);
+ tag = cg_emit_load(p, tag_ptr);
+ }
+ } else if (switch_kind == TypeSwitch_Any) {
+ tag = cg_emit_load(p, cg_emit_struct_ep(p, parent_ptr, 1));
+ } else {
+ GB_PANIC("Unknown switch kind");
+ }
+
+ ast_node(body, BlockStmt, ss->body);
+
+ for (Ast *clause : body->stmts) {
+ ast_node(cc, CaseClause, clause);
+ num_cases += cc->list.count;
+ if (cc->list.count == 0) {
+ GB_ASSERT(default_region == nullptr);
+ default_region = cg_control_region(p, "typeswitch_default_body");
+ else_region = default_region;
+ }
+ }
+
+ bool all_by_reference = false;
+ for (Ast *clause : body->stmts) {
+ ast_node(cc, CaseClause, clause);
+ if (cc->list.count != 1) {
+ continue;
+ }
+ Entity *case_entity = implicit_entity_of_node(clause);
+ all_by_reference |= (case_entity->flags & EntityFlag_Value) == 0;
+ break;
+ }
+
+ TB_Node *backing_ptr = nullptr;
+ if (!all_by_reference) {
+ bool variants_found = false;
+ i64 max_size = 0;
+ i64 max_align = 1;
+ for (Ast *clause : body->stmts) {
+ ast_node(cc, CaseClause, clause);
+ if (cc->list.count != 1) {
+ continue;
+ }
+ Entity *case_entity = implicit_entity_of_node(clause);
+ if (!is_type_untyped_nil(case_entity->type)) {
+ max_size = gb_max(max_size, type_size_of(case_entity->type));
+ max_align = gb_max(max_align, type_align_of(case_entity->type));
+ variants_found = true;
+ }
+ }
+ if (variants_found) {
+ backing_ptr = tb_inst_local(p->func, cast(TB_CharUnits)max_size, cast(TB_CharUnits)max_align);
+ }
+ }
+
+ TEMPORARY_ALLOCATOR_GUARD();
+ TB_Node **control_regions = gb_alloc_array(temporary_allocator(), TB_Node *, body->stmts.count);
+ TB_SwitchEntry *switch_entries = gb_alloc_array(temporary_allocator(), TB_SwitchEntry, num_cases);
+
+ isize case_index = 0;
+ for_array(i, body->stmts) {
+ Ast *clause = body->stmts[i];
+ ast_node(cc, CaseClause, clause);
+ if (cc->list.count == 0) {
+ control_regions[i] = default_region;
+ continue;
+ }
+
+ TB_Node *region = cg_control_region(p, "typeswitch_body");
+ control_regions[i] = region;
+
+ for (Ast *type_expr : cc->list) {
+ Type *case_type = type_of_expr(type_expr);
+ i64 key = -1;
+ if (switch_kind == TypeSwitch_Union) {
+ Type *ut = base_type(type_deref(parent.type));
+ if (is_type_untyped_nil(case_type)) {
+ key = 0;
+ } else {
+ key = union_variant_index(ut, case_type);
+ }
+ } else if (switch_kind == TypeSwitch_Any) {
+ if (is_type_untyped_nil(case_type)) {
+ key = 0;
+ } else {
+ key = cast(i64)cg_typeid_as_u64(p->module, case_type);
+ }
+ }
+ GB_ASSERT(key >= 0);
+
+ switch_entries[case_index++] = TB_SwitchEntry{key, region};
+ }
+ }
+
+ GB_ASSERT(case_index == num_cases);
+
+ {
+ TB_DataType dt = {};
+ TB_Node *key = nullptr;
+ if (type_size_of(parent_base_type) == 0) {
+ GB_ASSERT(tag.node == nullptr);
+ key = tb_inst_bool(p->func, false);
+ dt = cg_data_type(t_bool);
+ } else {
+ GB_ASSERT(tag.kind == cgValue_Value && tag.node != nullptr);
+ dt = cg_data_type(tag.type);
+ key = tag.node;
+ }
+
+ GB_ASSERT(!TB_IS_VOID_TYPE(dt));
+ tb_inst_branch(p->func, dt, key, else_region, num_cases, switch_entries);
+ }
+
+
+ for_array(i, body->stmts) {
+ Ast *clause = body->stmts[i];
+ ast_node(cc, CaseClause, clause);
+
+ bool saw_nil = false;
+ for (Ast *type_expr : cc->list) {
+ Type *case_type = type_of_expr(type_expr);
+ if (is_type_untyped_nil(case_type)) {
+ saw_nil = true;
+ }
+ }
+
+ Entity *case_entity = implicit_entity_of_node(clause);
+ bool by_reference = (case_entity->flags & EntityFlag_Value) == 0;
+
+ cg_scope_open(p, cc->scope);
+
+ TB_Node *body_region = control_regions[i];
+ tb_inst_set_control(p->func, body_region);
+
+ if (cc->list.count == 1 && !saw_nil) {
+ cgValue data = {};
+ if (switch_kind == TypeSwitch_Union) {
+ data = union_data;
+ } else if (switch_kind == TypeSwitch_Any) {
+ data = cg_emit_load(p, cg_emit_struct_ep(p, parent_ptr, 0));
+ }
+ GB_ASSERT(data.kind == cgValue_Value);
+
+ Type *ct = case_entity->type;
+ Type *ct_ptr = alloc_type_pointer(ct);
+
+ cgValue ptr = {};
+
+ if (backing_ptr) { // by value
+ GB_ASSERT(!by_reference);
+
+ i64 size = type_size_of(case_entity->type);
+ i64 align = type_align_of(case_entity->type);
+
+ // make a copy of the case value
+ tb_inst_memcpy(p->func,
+ backing_ptr, // dst
+ data.node, // src
+ tb_inst_uint(p->func, TB_TYPE_INT, size),
+ cast(TB_CharUnits)align,
+ false
+ );
+
+ ptr = cg_value(backing_ptr, ct_ptr);
+
+ } else { // by reference
+ GB_ASSERT(by_reference);
+ ptr = cg_emit_conv(p, data, ct_ptr);
+ }
+ GB_ASSERT(are_types_identical(case_entity->type, type_deref(ptr.type)));
+
+ cg_add_entity(p->module, case_entity, ptr);
+ String name = case_entity->token.string;
+ TB_Attrib *dbg = tb_function_attrib_variable(p->func, name.len, cast(char const *)name.text, cg_debug_type(p->module, ct));
+ tb_node_append_attrib(ptr.node, dbg);
+ } else {
+ if (case_entity->flags & EntityFlag_Value) {
+ // by value
+ cgAddr x = cg_add_local(p, case_entity->type, case_entity, false);
+ cg_addr_store(p, x, parent_value);
+ } else {
+ // by reference
+ cg_add_entity(p->module, case_entity, parent_value);
+ }
+ }
+
+ cg_push_target_list(p, ss->label, done_region, nullptr, nullptr);
+ cg_build_stmt_list(p, cc->stmts);
+ cg_scope_close(p, cgDeferExit_Default, body_region);
+ cg_pop_target_list(p);
+
+ cg_emit_goto(p, done_region);
+ }
+
+ cg_emit_goto(p, done_region);
+ tb_inst_set_control(p->func, done_region);
+}
+
+
+gb_internal void cg_build_mutable_value_decl(cgProcedure *p, Ast *node) {
+ ast_node(vd, ValueDecl, node);
+ if (!vd->is_mutable) {
+ return;
+ }
+
+ bool is_static = false;
+ for (Ast *name : vd->names) if (!is_blank_ident(name)) {
+ // NOTE(bill): Sanity check to check for the existence of the variable's Entity
+ GB_ASSERT(name->kind == Ast_Ident);
+ Entity *e = entity_of_node(name);
+ TokenPos pos = ast_token(name).pos;
+ GB_ASSERT_MSG(e != nullptr, "\n%s missing entity for %.*s", token_pos_to_string(pos), LIT(name->Ident.token.string));
+ if (e->flags & EntityFlag_Static) {
+ // NOTE(bill): If one of the entities is static, they all are
+ is_static = true;
+ }
+ }
+
+ if (is_static) {
+ for_array(i, vd->names) {
+ Ast *ident = vd->names[i];
+ GB_ASSERT(!is_blank_ident(ident));
+ Entity *e = entity_of_node(ident);
+ GB_ASSERT(e->flags & EntityFlag_Static);
+ String name = e->token.string;
+
+ String mangled_name = {};
+ {
+ gbString str = gb_string_make_length(permanent_allocator(), p->name.text, p->name.len);
+ str = gb_string_appendc(str, "-");
+ str = gb_string_append_fmt(str, ".%.*s-%llu", LIT(name), cast(long long)e->id);
+ mangled_name.text = cast(u8 *)str;
+ mangled_name.len = gb_string_length(str);
+ }
+
+ cgModule *m = p->module;
+
+ TB_DebugType *debug_type = cg_debug_type(m, e->type);
+ TB_Global *global = tb_global_create(m->mod, mangled_name.len, cast(char const *)mangled_name.text, debug_type, TB_LINKAGE_PRIVATE);
+
+ TB_ModuleSection *section = tb_module_get_data(m->mod);
+ if (e->Variable.thread_local_model != "") {
+ section = tb_module_get_tls(m->mod);
+ String model = e->Variable.thread_local_model;
+ if (model == "default") {
+ // TODO(bill): Thread Local Storage models
+ } else if (model == "localdynamic") {
+ // TODO(bill): Thread Local Storage models
+ } else if (model == "initialexec") {
+ // TODO(bill): Thread Local Storage models
+ } else if (model == "localexec") {
+ // TODO(bill): Thread Local Storage models
+ } else {
+ GB_PANIC("Unhandled thread local mode %.*s", LIT(model));
+ }
+ }
+
+ i64 max_objects = 0;
+ ExactValue value = {};
+
+ if (vd->values.count > 0) {
+ GB_ASSERT(vd->names.count == vd->values.count);
+ Ast *ast_value = vd->values[i];
+ GB_ASSERT(ast_value->tav.mode == Addressing_Constant ||
+ ast_value->tav.mode == Addressing_Invalid);
+
+ value = ast_value->tav.value;
+ max_objects = cg_global_const_calculate_region_count(value, e->type);
+ }
+ tb_global_set_storage(m->mod, section, global, type_size_of(e->type), type_align_of(e->type), max_objects);
+
+ cg_global_const_add_region(m, value, e->type, global, 0);
+
+ TB_Node *node = tb_inst_get_symbol_address(p->func, cast(TB_Symbol *)global);
+ cgValue global_val = cg_value(node, alloc_type_pointer(e->type));
+ cg_add_entity(p->module, e, global_val);
+ cg_add_member(p->module, mangled_name, global_val);
+ }
+ return;
+ }
+
+ TEMPORARY_ALLOCATOR_GUARD();
+
+ auto inits = array_make<cgValue>(temporary_allocator(), 0, vd->values.count != 0 ? vd->names.count : 0);
+ for (Ast *rhs : vd->values) {
+ cgValue init = cg_build_expr(p, rhs);
+ cg_append_tuple_values(p, &inits, init);
+ }
+
+
+ auto lvals = slice_make<cgAddr>(temporary_allocator(), vd->names.count);
+ for_array(i, vd->names) {
+ Ast *name = vd->names[i];
+ if (!is_blank_ident(name)) {
+ Entity *e = entity_of_node(name);
+ bool zero_init = vd->values.count == 0;
+ if (vd->names.count == vd->values.count) {
+ Ast *expr = unparen_expr(vd->values[i]);
+ if (expr->kind == Ast_CompoundLit &&
+ inits[i].kind == cgValue_Addr) {
+ TB_Node *ptr = inits[i].node;
+
+ if (e != nullptr && e->token.string.len > 0 && e->token.string != "_") {
+ // NOTE(bill): for debugging purposes only
+ String name = e->token.string;
+ TB_DebugType *debug_type = cg_debug_type(p->module, e->type);
+ tb_node_append_attrib(ptr, tb_function_attrib_variable(p->func, name.len, cast(char const *)name.text, debug_type));
+ }
+
+ cgAddr addr = cg_addr(inits[i]);
+ map_set(&p->variable_map, e, addr);
+ continue;
+ }
+ }
+
+ lvals[i] = cg_add_local(p, e->type, e, zero_init);
+ }
+ }
+
+
+ GB_ASSERT(vd->values.count == 0 || lvals.count == inits.count);
+ for_array(i, inits) {
+ cgAddr lval = lvals[i];
+ cgValue init = inits[i];
+ cg_addr_store(p, lval, init);
+ }
+}
+
+
+gb_internal void cg_build_stmt(cgProcedure *p, Ast *node) {
+ Ast *prev_stmt = p->curr_stmt;
+ defer (p->curr_stmt = prev_stmt);
+ p->curr_stmt = node;
+
+ // TODO(bill): check if last instruction was a terminating one or not
+
+ cg_set_debug_pos_from_node(p, node);
+
+ u16 prev_state_flags = p->state_flags;
+ defer (p->state_flags = prev_state_flags);
+
+ if (node->state_flags != 0) {
+ u16 in = node->state_flags;
+ u16 out = p->state_flags;
+
+ if (in & StateFlag_bounds_check) {
+ out |= StateFlag_bounds_check;
+ out &= ~StateFlag_no_bounds_check;
+ } else if (in & StateFlag_no_bounds_check) {
+ out |= StateFlag_no_bounds_check;
+ out &= ~StateFlag_bounds_check;
+ }
+ if (in & StateFlag_no_type_assert) {
+ out |= StateFlag_no_type_assert;
+ out &= ~StateFlag_type_assert;
+ } else if (in & StateFlag_type_assert) {
+ out |= StateFlag_type_assert;
+ out &= ~StateFlag_no_type_assert;
+ }
+
+ p->state_flags = out;
+ }
+
+ switch (node->kind) {
+ case_ast_node(bs, EmptyStmt, node);
+ case_end;
+
+ case_ast_node(us, UsingStmt, node);
+ case_end;
+
+ case_ast_node(ws, WhenStmt, node);
+ cg_build_when_stmt(p, ws);
+ case_end;
+
+ case_ast_node(bs, BlockStmt, node);
+ TB_Node *done = nullptr;
+ if (bs->label != nullptr) {
+ done = cg_control_region(p, "block_done");
+ cgTargetList *tl = cg_push_target_list(p, bs->label, done, nullptr, nullptr);
+ tl->is_block = true;
+ }
+
+ cg_scope_open(p, bs->scope);
+ cg_build_stmt_list(p, bs->stmts);
+ cg_scope_close(p, cgDeferExit_Default, nullptr);
+
+ if (done != nullptr) {
+ cg_emit_goto(p, done);
+ tb_inst_set_control(p->func, done);
+ }
+
+ if (bs->label != nullptr) {
+ cg_pop_target_list(p);
+ }
+ case_end;
+
+ case_ast_node(vd, ValueDecl, node);
+ cg_build_mutable_value_decl(p, node);
+ case_end;
+
+ case_ast_node(bs, BranchStmt, node);
+ TB_Node *block = nullptr;
+
+ if (bs->label != nullptr) {
+ cgBranchRegions bb = cg_lookup_branch_regions(p, bs->label);
+ switch (bs->token.kind) {
+ case Token_break: block = bb.break_; break;
+ case Token_continue: block = bb.continue_; break;
+ case Token_fallthrough:
+ GB_PANIC("fallthrough cannot have a label");
+ break;
+ }
+ } else {
+ for (cgTargetList *t = p->target_list; t != nullptr && block == nullptr; t = t->prev) {
+ if (t->is_block) {
+ continue;
+ }
+
+ switch (bs->token.kind) {
+ case Token_break: block = t->break_; break;
+ case Token_continue: block = t->continue_; break;
+ case Token_fallthrough: block = t->fallthrough_; break;
+ }
+ }
+ }
+ GB_ASSERT(block != nullptr);
+
+ cg_emit_defer_stmts(p, cgDeferExit_Branch, block);
+ cg_emit_goto(p, block);
+ case_end;
+
+ case_ast_node(es, ExprStmt, node);
+ cg_build_expr(p, es->expr);
+ case_end;
+
+ case_ast_node(as, AssignStmt, node);
+ cg_build_assign_stmt(p, as);
+ case_end;
+
+ case_ast_node(rs, ReturnStmt, node);
+ cg_build_return_stmt(p, rs->results);
+ case_end;
+
+ case_ast_node(is, IfStmt, node);
+ cg_build_if_stmt(p, node);
+ case_end;
+
+ case_ast_node(fs, ForStmt, node);
+ cg_build_for_stmt(p, node);
+ case_end;
+
+ case_ast_node(rs, RangeStmt, node);
+ cg_build_range_stmt(p, node);
+ case_end;
+
+ case_ast_node(rs, UnrollRangeStmt, node);
+ GB_PANIC("TODO(bill): lb_build_unroll_range_stmt");
+ // cg_build_range_stmt(p, rs, rs->scope);
+ case_end;
+
+ case_ast_node(fs, SwitchStmt, node);
+ cg_build_switch_stmt(p, node);
+ case_end;
+
+ case_ast_node(ts, TypeSwitchStmt, node);
+ cg_build_type_switch_stmt(p, node);
+ case_end;
+
+ case_ast_node(ds, DeferStmt, node);
+ Type *pt = base_type(p->type);
+ GB_ASSERT(pt->kind == Type_Proc);
+ if (pt->Proc.calling_convention == ProcCC_Odin) {
+ GB_ASSERT(p->context_stack.count != 0);
+ }
+
+ cgDefer *d = array_add_and_get(&p->defer_stack);
+ d->kind = cgDefer_Node;
+ d->scope_index = p->scope_index;
+ d->context_stack_count = p->context_stack.count;
+ d->control_region = tb_inst_get_control(p->func);
+ GB_ASSERT(d->control_region != nullptr);
+ d->stmt = ds->stmt;
+ case_end;
+
+
+
+ default:
+ GB_PANIC("TODO cg_build_stmt %.*s", LIT(ast_strings[node->kind]));
+ break;
+ }
+}
+
+gb_internal void cg_build_constant_value_decl(cgProcedure *p, AstValueDecl *vd) {
+ if (vd == nullptr || vd->is_mutable) {
+ return;
+ }
+
+ auto *min_dep_set = &p->module->info->minimum_dependency_set;
+
+ static i32 global_guid = 0;
+
+ for (Ast *ident : vd->names) {
+ GB_ASSERT(ident->kind == Ast_Ident);
+ Entity *e = entity_of_node(ident);
+ GB_ASSERT(e != nullptr);
+ if (e->kind != Entity_TypeName) {
+ continue;
+ }
+
+ bool polymorphic_struct = false;
+ if (e->type != nullptr && e->kind == Entity_TypeName) {
+ Type *bt = base_type(e->type);
+ if (bt->kind == Type_Struct) {
+ polymorphic_struct = bt->Struct.is_polymorphic;
+ }
+ }
+
+ if (!polymorphic_struct && !ptr_set_exists(min_dep_set, e)) {
+ continue;
+ }
+
+ if (e->TypeName.ir_mangled_name.len != 0) {
+ // NOTE(bill): Already set
+ continue;
+ }
+
+ cg_set_nested_type_name_ir_mangled_name(e, p);
+ }
+
+ for_array(i, vd->names) {
+ Ast *ident = vd->names[i];
+ GB_ASSERT(ident->kind == Ast_Ident);
+ Entity *e = entity_of_node(ident);
+ GB_ASSERT(e != nullptr);
+ if (e->kind != Entity_Procedure) {
+ continue;
+ }
+ GB_ASSERT (vd->values[i] != nullptr);
+
+ Ast *value = unparen_expr(vd->values[i]);
+ if (value->kind != Ast_ProcLit) {
+ continue; // It's an alias
+ }
+
+ DeclInfo *decl = decl_info_of_entity(e);
+ ast_node(pl, ProcLit, decl->proc_lit);
+ if (pl->body != nullptr) {
+ GenProcsData *gpd = e->Procedure.gen_procs;
+ if (gpd) {
+ rw_mutex_shared_lock(&gpd->mutex);
+ for (Entity *e : gpd->procs) {
+ if (!ptr_set_exists(min_dep_set, e)) {
+ continue;
+ }
+ DeclInfo *d = decl_info_of_entity(e);
+ cg_build_nested_proc(p, &d->proc_lit->ProcLit, e);
+ }
+ rw_mutex_shared_unlock(&gpd->mutex);
+ } else {
+ cg_build_nested_proc(p, pl, e);
+ }
+ } else {
+
+ // FFI - Foreign function interace
+ String original_name = e->token.string;
+ String name = original_name;
+
+ if (e->Procedure.is_foreign) {
+ GB_PANIC("cg_add_foreign_library_path");
+ // cg_add_foreign_library_path(p->module, e->Procedure.foreign_library);
+ }
+
+ if (e->Procedure.link_name.len > 0) {
+ name = e->Procedure.link_name;
+ }
+
+ cgValue *prev_value = string_map_get(&p->module->members, name);
+ if (prev_value != nullptr) {
+ // NOTE(bill): Don't do mutliple declarations in the IR
+ return;
+ }
+
+ e->Procedure.link_name = name;
+
+ cgProcedure *nested_proc = cg_procedure_create(p->module, e);
+
+ cgValue value = p->value;
+
+ array_add(&p->children, nested_proc);
+ string_map_set(&p->module->members, name, value);
+ cg_add_procedure_to_queue(nested_proc);
+ }
+ }
+}
+
+
+gb_internal void cg_build_stmt_list(cgProcedure *p, Slice<Ast *> const &stmts) {
+ for (Ast *stmt : stmts) {
+ switch (stmt->kind) {
+ case_ast_node(vd, ValueDecl, stmt);
+ cg_build_constant_value_decl(p, vd);
+ case_end;
+ case_ast_node(fb, ForeignBlockDecl, stmt);
+ ast_node(block, BlockStmt, fb->body);
+ cg_build_stmt_list(p, block->stmts);
+ case_end;
+ }
+ }
+ for (Ast *stmt : stmts) {
+ cg_build_stmt(p, stmt);
+ }
+}
+
+
+gb_internal void cg_build_when_stmt(cgProcedure *p, AstWhenStmt *ws) {
+ TypeAndValue tv = type_and_value_of_expr(ws->cond);
+ GB_ASSERT(is_type_boolean(tv.type));
+ GB_ASSERT(tv.value.kind == ExactValue_Bool);
+ if (tv.value.value_bool) {
+ cg_build_stmt_list(p, ws->body->BlockStmt.stmts);
+ } else if (ws->else_stmt) {
+ switch (ws->else_stmt->kind) {
+ case Ast_BlockStmt:
+ cg_build_stmt_list(p, ws->else_stmt->BlockStmt.stmts);
+ break;
+ case Ast_WhenStmt:
+ cg_build_when_stmt(p, &ws->else_stmt->WhenStmt);
+ break;
+ default:
+ GB_PANIC("Invalid 'else' statement in 'when' statement");
+ break;
+ }
+ }
+}
+
diff --git a/src/tilde_type_info.cpp b/src/tilde_type_info.cpp
new file mode 100644
index 000000000..16fe5fd3e
--- /dev/null
+++ b/src/tilde_type_info.cpp
@@ -0,0 +1,983 @@
+gb_internal void cg_global_const_type_info_ptr(cgModule *m, Type *type, TB_Global *global, i64 offset) {
+ GB_ASSERT(type != nullptr);
+ TB_Symbol *type_table_array = cg_find_symbol_from_entity(m, cg_global_type_info_data_entity);
+
+
+ i64 index_in_bytes = cast(i64)cg_type_info_index(m->info, type);
+ index_in_bytes *= type_size_of(t_type_info);
+
+ void *ti_ptr_ptr = tb_global_add_region(m->mod, global, offset, build_context.ptr_size);
+ // NOTE(bill): define the byte offset for the pointer
+ cg_write_int_at_ptr(ti_ptr_ptr, index_in_bytes, t_uintptr);
+
+ // NOTE(bill): this will add to the byte offset set previously
+ tb_global_add_symbol_reloc(m->mod, global, offset, type_table_array);
+}
+
+gb_internal cgValue cg_global_type_info_data_ptr(cgProcedure *p) {
+ cgValue v = cg_find_value_from_entity(p->module, cg_global_type_info_data_entity);
+ return cg_flatten_value(p, v);
+}
+
+gb_internal isize cg_type_info_index(CheckerInfo *info, Type *type, bool err_on_not_found) {
+ auto *set = &info->minimum_dependency_type_info_set;
+ isize index = type_info_index(info, type, err_on_not_found);
+ if (index >= 0) {
+ auto *found = map_get(set, index);
+ if (found) {
+ GB_ASSERT(*found >= 0);
+ return *found + 1;
+ }
+ }
+ if (err_on_not_found) {
+ GB_PANIC("NOT FOUND lb_type_info_index '%s' @ index %td", type_to_string(type), index);
+ }
+ return -1;
+}
+
+gb_internal cgValue cg_type_info(cgProcedure *p, Type *type) {
+ GB_ASSERT(!build_context.no_rtti);
+
+ type = default_type(type);
+
+ isize index = cg_type_info_index(p->module->info, type);
+ GB_ASSERT(index >= 0);
+
+ cgValue data = cg_global_type_info_data_ptr(p);
+ return cg_emit_array_epi(p, data, index);
+}
+
+
+gb_internal u64 cg_typeid_as_u64(cgModule *m, Type *type) {
+ GB_ASSERT(!build_context.no_rtti);
+
+ type = default_type(type);
+
+ u64 id = cast(u64)cg_type_info_index(m->info, type);
+ GB_ASSERT(id >= 0);
+
+ u64 kind = Typeid_Invalid;
+ u64 named = is_type_named(type) && type->kind != Type_Basic;
+ u64 special = 0;
+ u64 reserved = 0;
+
+ Type *bt = base_type(type);
+ TypeKind tk = bt->kind;
+ switch (tk) {
+ case Type_Basic: {
+ u32 flags = bt->Basic.flags;
+ if (flags & BasicFlag_Boolean) kind = Typeid_Boolean;
+ if (flags & BasicFlag_Integer) kind = Typeid_Integer;
+ if (flags & BasicFlag_Unsigned) kind = Typeid_Integer;
+ if (flags & BasicFlag_Float) kind = Typeid_Float;
+ if (flags & BasicFlag_Complex) kind = Typeid_Complex;
+ if (flags & BasicFlag_Pointer) kind = Typeid_Pointer;
+ if (flags & BasicFlag_String) kind = Typeid_String;
+ if (flags & BasicFlag_Rune) kind = Typeid_Rune;
+ } break;
+ case Type_Pointer: kind = Typeid_Pointer; break;
+ case Type_MultiPointer: kind = Typeid_Multi_Pointer; break;
+ case Type_Array: kind = Typeid_Array; break;
+ case Type_Matrix: kind = Typeid_Matrix; break;
+ case Type_EnumeratedArray: kind = Typeid_Enumerated_Array; break;
+ case Type_Slice: kind = Typeid_Slice; break;
+ case Type_DynamicArray: kind = Typeid_Dynamic_Array; break;
+ case Type_Map: kind = Typeid_Map; break;
+ case Type_Struct: kind = Typeid_Struct; break;
+ case Type_Enum: kind = Typeid_Enum; break;
+ case Type_Union: kind = Typeid_Union; break;
+ case Type_Tuple: kind = Typeid_Tuple; break;
+ case Type_Proc: kind = Typeid_Procedure; break;
+ case Type_BitSet: kind = Typeid_Bit_Set; break;
+ case Type_SimdVector: kind = Typeid_Simd_Vector; break;
+ case Type_RelativePointer: kind = Typeid_Relative_Pointer; break;
+ case Type_RelativeMultiPointer: kind = Typeid_Relative_Multi_Pointer; break;
+ case Type_SoaPointer: kind = Typeid_SoaPointer; break;
+ }
+
+ if (is_type_cstring(type)) {
+ special = 1;
+ } else if (is_type_integer(type) && !is_type_unsigned(type)) {
+ special = 1;
+ }
+
+ u64 data = 0;
+ if (build_context.ptr_size == 4) {
+ GB_ASSERT(id <= (1u<<24u));
+ data |= (id &~ (1u<<24)) << 0u; // index
+ data |= (kind &~ (1u<<5)) << 24u; // kind
+ data |= (named &~ (1u<<1)) << 29u; // named
+ data |= (special &~ (1u<<1)) << 30u; // special
+ data |= (reserved &~ (1u<<1)) << 31u; // reserved
+ } else {
+ GB_ASSERT(build_context.ptr_size == 8);
+ GB_ASSERT(id <= (1ull<<56u));
+ data |= (id &~ (1ull<<56)) << 0ul; // index
+ data |= (kind &~ (1ull<<5)) << 56ull; // kind
+ data |= (named &~ (1ull<<1)) << 61ull; // named
+ data |= (special &~ (1ull<<1)) << 62ull; // special
+ data |= (reserved &~ (1ull<<1)) << 63ull; // reserved
+ }
+ return data;
+}
+
+gb_internal cgValue cg_typeid(cgProcedure *p, Type *t) {
+ u64 x = cg_typeid_as_u64(p->module, t);
+ return cg_value(tb_inst_uint(p->func, cg_data_type(t_typeid), x), t_typeid);
+}
+
+
+
+
+gb_internal void cg_set_type_info_member_types(cgModule *m, TB_Global *global, isize offset, isize count, void *userdata, Type *(*type_proc)(isize index, void *userdata)) {
+ if (count == 0) {
+ return;
+ }
+
+ void *data_ptr = tb_global_add_region(m->mod, global, offset+0, build_context.ptr_size);
+ i64 offset_in_bytes = cg_global_type_info_member_types.index * type_size_of(cg_global_type_info_member_types.elem_type);
+ cg_global_type_info_member_types.index += count;
+
+ cg_write_int_at_ptr(data_ptr, offset_in_bytes, t_uintptr);
+ tb_global_add_symbol_reloc(m->mod, global, offset+0, cast(TB_Symbol *)cg_global_type_info_member_types.global);
+
+ for (isize i = 0; i < count; i++) {
+ i64 elem_size = type_size_of(cg_global_type_info_member_types.elem_type);
+ Type *type = type_proc(i, userdata);
+ i64 offset_for_elem = offset_in_bytes + i*elem_size;
+ cg_global_const_type_info_ptr(m, type, cg_global_type_info_member_types.global, offset_for_elem);
+ }
+
+ void *len_ptr = tb_global_add_region(m->mod, global, offset+build_context.int_size, build_context.int_size);
+ cg_write_int_at_ptr(len_ptr, count, t_int);
+}
+
+
+gb_internal void cg_set_type_info_member_names(cgModule *m, TB_Global *global, isize offset, isize count, void *userdata, String (*name_proc)(isize index, void *userdata)) {
+ if (count == 0) {
+ return;
+ }
+ void *data_ptr = tb_global_add_region(m->mod, global, offset+0, build_context.ptr_size);
+ i64 offset_in_bytes = cg_global_type_info_member_names.index * type_size_of(cg_global_type_info_member_names.elem_type);
+ cg_global_type_info_member_names.index += count;
+
+ cg_write_int_at_ptr(data_ptr, offset_in_bytes, t_uintptr);
+ tb_global_add_symbol_reloc(m->mod, global, offset+0, cast(TB_Symbol *)cg_global_type_info_member_names.global);
+
+ for (isize i = 0; i < count; i++) {
+ i64 elem_size = type_size_of(cg_global_type_info_member_names.elem_type);
+ String name = name_proc(i, userdata);
+ i64 offset_for_elem = offset_in_bytes + i*elem_size;
+ cg_global_const_string(m, name, cg_global_type_info_member_names.elem_type, cg_global_type_info_member_names.global, offset_for_elem);
+
+ }
+
+ void *len_ptr = tb_global_add_region(m->mod, global, offset+build_context.int_size, build_context.int_size);
+ cg_write_int_at_ptr(len_ptr, count, t_int);
+}
+
+
+gb_internal void cg_set_type_info_member_offsets(cgModule *m, TB_Global *global, isize offset, isize count, void *userdata, i64 (*offset_proc)(isize index, void *userdata)) {
+ if (count == 0) {
+ return;
+ }
+ void *data_ptr = tb_global_add_region(m->mod, global, offset+0, build_context.ptr_size);
+ i64 offset_in_bytes = cg_global_type_info_member_offsets.index * type_size_of(cg_global_type_info_member_offsets.elem_type);
+ cg_global_type_info_member_offsets.index += count;
+
+ cg_write_int_at_ptr(data_ptr, offset_in_bytes, t_uintptr);
+ tb_global_add_symbol_reloc(m->mod, global, offset+0, cast(TB_Symbol *)cg_global_type_info_member_offsets.global);
+
+ for (isize i = 0; i < count; i++) {
+ i64 elem_size = type_size_of(cg_global_type_info_member_offsets.elem_type);
+ i64 the_offset = offset_proc(i, userdata);
+ i64 offset_for_elem = offset_in_bytes + i*elem_size;
+
+ void *offset_ptr = tb_global_add_region(m->mod, cg_global_type_info_member_offsets.global, offset_for_elem, elem_size);
+ cg_write_uint_at_ptr(offset_ptr, the_offset, t_uintptr);
+ }
+
+ void *len_ptr = tb_global_add_region(m->mod, global, offset+build_context.int_size, build_context.int_size);
+ cg_write_int_at_ptr(len_ptr, count, t_int);
+}
+
+gb_internal void cg_set_type_info_member_usings(cgModule *m, TB_Global *global, isize offset, isize count, void *userdata, bool (*usings_proc)(isize index, void *userdata)) {
+ if (count == 0) {
+ return;
+ }
+ void *data_ptr = tb_global_add_region(m->mod, global, offset+0, build_context.ptr_size);
+ i64 offset_in_bytes = cg_global_type_info_member_usings.index * type_size_of(cg_global_type_info_member_usings.elem_type);
+ cg_global_type_info_member_usings.index += count;
+
+ cg_write_int_at_ptr(data_ptr, offset_in_bytes, t_uintptr);
+ tb_global_add_symbol_reloc(m->mod, global, offset+0, cast(TB_Symbol *)cg_global_type_info_member_usings.global);
+
+ for (isize i = 0; i < count; i++) {
+ i64 elem_size = type_size_of(cg_global_type_info_member_usings.elem_type);
+ GB_ASSERT(elem_size == 1);
+ bool the_usings = usings_proc(i, userdata);
+ i64 offset_for_elem = offset_in_bytes + i*elem_size;
+
+ bool *usings_ptr = cast(bool *)tb_global_add_region(m->mod, cg_global_type_info_member_usings.global, offset_for_elem, 1);
+ *usings_ptr = the_usings;
+ }
+
+ void *len_ptr = tb_global_add_region(m->mod, global, offset+build_context.int_size, build_context.int_size);
+ cg_write_int_at_ptr(len_ptr, count, t_int);
+}
+
+
+
+gb_internal void cg_set_type_info_member_tags(cgModule *m, TB_Global *global, isize offset, isize count, void *userdata, String (*tag_proc)(isize index, void *userdata)) {
+ if (count == 0) {
+ return;
+ }
+ void *data_ptr = tb_global_add_region(m->mod, global, offset+0, build_context.ptr_size);
+ i64 offset_in_bytes = cg_global_type_info_member_tags.index * type_size_of(cg_global_type_info_member_tags.elem_type);
+ cg_global_type_info_member_tags.index += count;
+
+ cg_write_int_at_ptr(data_ptr, offset_in_bytes, t_uintptr);
+ tb_global_add_symbol_reloc(m->mod, global, offset+0, cast(TB_Symbol *)cg_global_type_info_member_tags.global);
+
+ for (isize i = 0; i < count; i++) {
+ i64 elem_size = type_size_of(cg_global_type_info_member_tags.elem_type);
+ String tag = tag_proc(i, userdata);
+ i64 offset_for_elem = offset_in_bytes + i*elem_size;
+ cg_global_const_string(m, tag, cg_global_type_info_member_tags.elem_type, cg_global_type_info_member_tags.global, offset_for_elem);
+
+ }
+
+ void *len_ptr = tb_global_add_region(m->mod, global, offset+build_context.int_size, build_context.int_size);
+ cg_write_int_at_ptr(len_ptr, count, t_int);
+}
+
+gb_internal void cg_set_type_info_member_enum_values(cgModule *m, TB_Global *global, isize offset, isize count, void *userdata, i64 (*value_proc)(isize index, void *userdata)) {
+ if (count == 0) {
+ return;
+ }
+ void *data_ptr = tb_global_add_region(m->mod, global, offset+0, build_context.ptr_size);
+ i64 offset_in_bytes = cg_global_type_info_member_enum_values.index * type_size_of(cg_global_type_info_member_enum_values.elem_type);
+ cg_global_type_info_member_enum_values.index += count;
+
+ cg_write_int_at_ptr(data_ptr, offset_in_bytes, t_uintptr);
+ tb_global_add_symbol_reloc(m->mod, global, offset+0, cast(TB_Symbol *)cg_global_type_info_member_enum_values.global);
+
+ for (isize i = 0; i < count; i++) {
+ i64 elem_size = type_size_of(cg_global_type_info_member_enum_values.elem_type);
+ GB_ASSERT(elem_size == 8);
+ i64 the_value = value_proc(i, userdata);
+ i64 offset_for_elem = offset_in_bytes + i*elem_size;
+
+ void *offset_ptr = tb_global_add_region(m->mod, cg_global_type_info_member_enum_values.global, offset_for_elem, elem_size);
+ cg_write_uint_at_ptr(offset_ptr, the_value, cg_global_type_info_member_enum_values.elem_type);
+ }
+
+ void *len_ptr = tb_global_add_region(m->mod, global, offset+build_context.int_size, build_context.int_size);
+ cg_write_int_at_ptr(len_ptr, count, t_int);
+}
+
+
+
+gb_internal void cg_setup_type_info_data(cgModule *m) {
+ if (build_context.no_rtti) {
+ return;
+ }
+
+ CheckerInfo *info = m->info;
+ { // Add type info data
+ isize max_type_info_count = info->minimum_dependency_type_info_set.count+1;
+ // gb_printf_err("max_type_info_count: %td\n", max_type_info_count);
+ Type *t = alloc_type_array(t_type_info, max_type_info_count);
+
+ i64 max_objects = cast(i64)max_type_info_count * cg_global_const_calculate_region_count_from_basic_type(t_type_info);
+
+ TB_Global *g = tb_global_create(m->mod, -1, CG_TYPE_INFO_DATA_NAME, nullptr, TB_LINKAGE_PRIVATE);
+ tb_global_set_storage(m->mod, tb_module_get_rdata(m->mod), g, type_size_of(t), 16, max_objects);
+
+ cgValue value = cg_value(g, alloc_type_pointer(t));
+ cg_global_type_info_data_entity = alloc_entity_variable(nullptr, make_token_ident(CG_TYPE_INFO_DATA_NAME), t, EntityState_Resolved);
+ cg_add_symbol(m, cg_global_type_info_data_entity, cast(TB_Symbol *)g);
+ cg_add_entity(m, cg_global_type_info_data_entity, value);
+ }
+
+ { // Type info member buffer
+ // NOTE(bill): Removes need for heap allocation by making it global memory
+ isize count = 0;
+ isize enum_count = 0;
+
+ for (Type *t : m->info->type_info_types) {
+ isize index = cg_type_info_index(m->info, t, false);
+ if (index < 0) {
+ continue;
+ }
+
+ switch (t->kind) {
+ case Type_Union:
+ count += t->Union.variants.count;
+ break;
+ case Type_Struct:
+ count += t->Struct.fields.count;
+ break;
+ case Type_Tuple:
+ count += t->Tuple.variables.count;
+ break;
+ case Type_Enum:
+ enum_count += t->Enum.fields.count;
+ break;
+ }
+ }
+
+ if (count > 0) {
+ {
+ char const *name = CG_TYPE_INFO_TYPES_NAME;
+ Type *t = alloc_type_array(t_type_info_ptr, count);
+ TB_Global *g = tb_global_create(m->mod, -1, name, nullptr, TB_LINKAGE_PRIVATE);
+ tb_global_set_storage(m->mod, tb_module_get_rdata(m->mod), g, type_size_of(t), 16, count*2);
+ cg_global_type_info_member_types = GlobalTypeInfoData{g, t, t_type_info_ptr, 0};
+ }
+ {
+ char const *name = CG_TYPE_INFO_NAMES_NAME;
+ Type *t = alloc_type_array(t_string, count);
+ TB_Global *g = tb_global_create(m->mod, -1, name, nullptr, TB_LINKAGE_PRIVATE);
+ tb_global_set_storage(m->mod, tb_module_get_rdata(m->mod), g, type_size_of(t), 16, count*2);
+ cg_global_type_info_member_names = GlobalTypeInfoData{g, t, t_string, 0};
+ }
+ {
+ char const *name = CG_TYPE_INFO_OFFSETS_NAME;
+ Type *t = alloc_type_array(t_uintptr, count);
+ TB_Global *g = tb_global_create(m->mod, -1, name, nullptr, TB_LINKAGE_PRIVATE);
+ tb_global_set_storage(m->mod, tb_module_get_rdata(m->mod), g, type_size_of(t), 16, count);
+ cg_global_type_info_member_offsets = GlobalTypeInfoData{g, t, t_uintptr, 0};
+ }
+
+ {
+ char const *name = CG_TYPE_INFO_USINGS_NAME;
+ Type *t = alloc_type_array(t_bool, count);
+ TB_Global *g = tb_global_create(m->mod, -1, name, nullptr, TB_LINKAGE_PRIVATE);
+ tb_global_set_storage(m->mod, tb_module_get_rdata(m->mod), g, type_size_of(t), 16, count);
+ cg_global_type_info_member_usings = GlobalTypeInfoData{g, t, t_bool, 0};
+ }
+
+ {
+ char const *name = CG_TYPE_INFO_TAGS_NAME;
+ Type *t = alloc_type_array(t_string, count);
+ TB_Global *g = tb_global_create(m->mod, -1, name, nullptr, TB_LINKAGE_PRIVATE);
+ tb_global_set_storage(m->mod, tb_module_get_rdata(m->mod), g, type_size_of(t), 16, count*2);
+ cg_global_type_info_member_tags = GlobalTypeInfoData{g, t, t_string, 0};
+ }
+ }
+
+ if (enum_count > 0) {
+ char const *name = CG_TYPE_INFO_ENUM_VALUES_NAME;
+ Type *t = alloc_type_array(t_i64, enum_count);
+ TB_Global *g = tb_global_create(m->mod, -1, name, nullptr, TB_LINKAGE_PRIVATE);
+ tb_global_set_storage(m->mod, tb_module_get_rdata(m->mod), g, type_size_of(t), 16, enum_count);
+ cg_global_type_info_member_enum_values = GlobalTypeInfoData{g, t, t_i64, 0};
+ }
+ }
+ gb_unused(info);
+
+
+ i64 global_type_info_data_entity_count = 0;
+
+ // NOTE(bill): Set the type_table slice with the global backing array
+ TB_Global *type_table_slice = cast(TB_Global *)cg_find_symbol_from_entity(m, scope_lookup_current(m->info->runtime_package->scope, str_lit("type_table")));
+ GB_ASSERT(type_table_slice != nullptr);
+
+ TB_Global *type_table_array = cast(TB_Global *)cg_find_symbol_from_entity(m, cg_global_type_info_data_entity);
+ GB_ASSERT(type_table_array != nullptr);
+
+ Type *type = base_type(cg_global_type_info_data_entity->type);
+ GB_ASSERT(is_type_array(type));
+ global_type_info_data_entity_count = type->Array.count;
+
+ tb_global_add_symbol_reloc(m->mod, type_table_slice, 0, cast(TB_Symbol *)type_table_array);
+
+ void *len_ptr = tb_global_add_region(m->mod, type_table_slice, build_context.int_size, build_context.int_size);
+ cg_write_int_at_ptr(len_ptr, type->Array.count, t_int);
+
+ // Useful types
+ Entity *type_info_flags_entity = find_core_entity(info->checker, str_lit("Type_Info_Flags"));
+ Type *t_type_info_flags = type_info_flags_entity->type;
+ GB_ASSERT(type_size_of(t_type_info_flags) == 4);
+
+ auto entries_handled = slice_make<bool>(heap_allocator(), cast(isize)global_type_info_data_entity_count);
+ defer (gb_free(heap_allocator(), entries_handled.data));
+ entries_handled[0] = true;
+
+
+ i64 type_info_size = type_size_of(t_type_info);
+ i64 size_offset = type_offset_of(t_type_info, 0);
+ i64 align_offset = type_offset_of(t_type_info, 1);
+ i64 flags_offset = type_offset_of(t_type_info, 2);
+ i64 id_offset = type_offset_of(t_type_info, 3);
+ i64 variant_offset = type_offset_of(t_type_info, 4);
+
+ Type *type_info_union = base_type(t_type_info)->Struct.fields[4]->type;
+ GB_ASSERT(type_info_union->kind == Type_Union);
+
+ i64 union_tag_offset = type_info_union->Union.variant_block_size;
+ Type *ti_union_tag_type = union_tag_type(type_info_union);
+ u64 union_tag_type_size = type_size_of(ti_union_tag_type);
+
+ auto const &set_bool = [](cgModule *m, TB_Global *global, i64 offset, bool value) {
+ bool *ptr = cast(bool *)tb_global_add_region(m->mod, global, offset, 1);
+ *ptr = value;
+ };
+
+
+ for_array(type_info_type_index, info->type_info_types) {
+ Type *t = info->type_info_types[type_info_type_index];
+ if (t == nullptr || t == t_invalid) {
+ continue;
+ }
+
+ isize entry_index = cg_type_info_index(info, t, false);
+ if (entry_index <= 0) {
+ continue;
+ }
+
+ if (entries_handled[entry_index]) {
+ continue;
+ }
+ entries_handled[entry_index] = true;
+
+ TB_Global *global = type_table_array;
+
+ i64 offset = entry_index * type_info_size;
+
+ i64 size = type_size_of(t);
+ i64 align = type_align_of(t);
+ u32 flags = type_info_flags_of_type(t);
+ u64 id = cg_typeid_as_u64(m, t);
+
+ void *size_ptr = tb_global_add_region(m->mod, global, offset+size_offset, build_context.int_size);
+ void *align_ptr = tb_global_add_region(m->mod, global, offset+align_offset, build_context.int_size);
+ void *flags_ptr = tb_global_add_region(m->mod, global, offset+flags_offset, 4);
+ void *id_ptr = tb_global_add_region(m->mod, global, offset+id_offset, build_context.ptr_size);
+ cg_write_int_at_ptr (size_ptr, size, t_int);
+ cg_write_int_at_ptr (align_ptr, align, t_int);
+ cg_write_int_at_ptr (flags_ptr, flags, t_u32);
+ cg_write_uint_at_ptr(id_ptr, id, t_typeid);
+
+
+ // add data to the offset to make it easier to deal with later on
+ offset += variant_offset;
+
+ Type *tag_type = nullptr;
+
+ switch (t->kind) {
+ case Type_Named: {
+ // Type_Info_Named :: struct {
+ // name: string,
+ // base: ^Type_Info,
+ // pkg: string,
+ // loc: Source_Code_Location,
+ // }
+ tag_type = t_type_info_named;
+
+ if (t->Named.type_name->pkg) {
+ i64 pkg_offset = type_offset_of(tag_type, 2);
+ String pkg_name = t->Named.type_name->pkg->name;
+ cg_global_const_string(m, pkg_name, t_string, global, offset+pkg_offset);
+ }
+
+ String proc_name = {};
+ if (t->Named.type_name->parent_proc_decl) {
+ DeclInfo *decl = t->Named.type_name->parent_proc_decl;
+ if (decl->entity && decl->entity->kind == Entity_Procedure) {
+ i64 name_offset = type_offset_of(tag_type, 0);
+ proc_name = decl->entity->token.string;
+ cg_global_const_string(m, proc_name, t_string, global, offset+name_offset);
+ }
+ }
+
+ i64 loc_offset = type_offset_of(tag_type, 3);
+ TokenPos pos = t->Named.type_name->token.pos;
+ cg_global_source_code_location_const(m, proc_name, pos, global, offset+loc_offset);
+
+ i64 base_offset = type_offset_of(tag_type, 1);
+ cg_global_const_type_info_ptr(m, t->Named.base, global, offset+base_offset);
+ break;
+ }
+
+ case Type_Basic:
+ switch (t->Basic.kind) {
+ case Basic_bool:
+ case Basic_b8:
+ case Basic_b16:
+ case Basic_b32:
+ case Basic_b64:
+ tag_type = t_type_info_boolean;
+ break;
+
+ case Basic_i8:
+ case Basic_u8:
+ case Basic_i16:
+ case Basic_u16:
+ case Basic_i32:
+ case Basic_u32:
+ case Basic_i64:
+ case Basic_u64:
+ case Basic_i128:
+ case Basic_u128:
+
+ case Basic_i16le:
+ case Basic_u16le:
+ case Basic_i32le:
+ case Basic_u32le:
+ case Basic_i64le:
+ case Basic_u64le:
+ case Basic_i128le:
+ case Basic_u128le:
+ case Basic_i16be:
+ case Basic_u16be:
+ case Basic_i32be:
+ case Basic_u32be:
+ case Basic_i64be:
+ case Basic_u64be:
+ case Basic_i128be:
+ case Basic_u128be:
+
+ case Basic_int:
+ case Basic_uint:
+ case Basic_uintptr: {
+ tag_type = t_type_info_integer;
+
+ bool is_signed = (t->Basic.flags & BasicFlag_Unsigned) == 0;
+ // NOTE(bill): This is matches the runtime layout
+ u8 endianness_value = 0;
+ if (t->Basic.flags & BasicFlag_EndianLittle) {
+ endianness_value = 1;
+ } else if (t->Basic.flags & BasicFlag_EndianBig) {
+ endianness_value = 2;
+ }
+ u8 *signed_ptr = cast(u8 *)tb_global_add_region(m->mod, global, offset+0, 1);
+ u8 *endianness_ptr = cast(u8 *)tb_global_add_region(m->mod, global, offset+1, 1);
+ *signed_ptr = is_signed;
+ *endianness_ptr = endianness_value;
+ break;
+ }
+
+ case Basic_rune:
+ tag_type = t_type_info_rune;
+ break;
+
+ case Basic_f16:
+ case Basic_f32:
+ case Basic_f64:
+ case Basic_f16le:
+ case Basic_f32le:
+ case Basic_f64le:
+ case Basic_f16be:
+ case Basic_f32be:
+ case Basic_f64be:
+ {
+ tag_type = t_type_info_float;
+
+ // // NOTE(bill): This is matches the runtime layout
+ u8 endianness_value = 0;
+ if (t->Basic.flags & BasicFlag_EndianLittle) {
+ endianness_value = 1;
+ } else if (t->Basic.flags & BasicFlag_EndianBig) {
+ endianness_value = 2;
+ }
+
+ u8 *ptr = cast(u8 *)tb_global_add_region(m->mod, global, offset+0, 1);
+ *ptr = endianness_value;
+ }
+ break;
+
+ case Basic_complex32:
+ case Basic_complex64:
+ case Basic_complex128:
+ tag_type = t_type_info_complex;
+ break;
+
+ case Basic_quaternion64:
+ case Basic_quaternion128:
+ case Basic_quaternion256:
+ tag_type = t_type_info_quaternion;
+ break;
+
+ case Basic_rawptr:
+ tag_type = t_type_info_pointer;
+ break;
+
+ case Basic_string:
+ tag_type = t_type_info_string;
+ break;
+
+ case Basic_cstring:
+ tag_type = t_type_info_string;
+ set_bool(m, global, offset+0, true);
+ break;
+
+ case Basic_any:
+ tag_type = t_type_info_any;
+ break;
+
+ case Basic_typeid:
+ tag_type = t_type_info_typeid;
+ break;
+ }
+ break;
+
+ case Type_Pointer:
+ tag_type = t_type_info_pointer;
+ cg_global_const_type_info_ptr(m, t->Pointer.elem, global, offset+0);
+ break;
+ case Type_MultiPointer:
+ tag_type = t_type_info_multi_pointer;
+ cg_global_const_type_info_ptr(m, t->MultiPointer.elem, global, offset+0);
+ break;
+ case Type_SoaPointer:
+ tag_type = t_type_info_soa_pointer;
+ cg_global_const_type_info_ptr(m, t->SoaPointer.elem, global, offset+0);
+ break;
+
+ case Type_Array:
+ {
+ tag_type = t_type_info_array;
+
+ cg_global_const_type_info_ptr(m, t->Array.elem, global, offset+0);
+ void *elem_size_ptr = tb_global_add_region(m->mod, global, offset+1*build_context.int_size, build_context.int_size);
+ void *count_ptr = tb_global_add_region(m->mod, global, offset+2*build_context.int_size, build_context.int_size);
+
+ cg_write_int_at_ptr(elem_size_ptr, type_size_of(t->Array.elem), t_int);
+ cg_write_int_at_ptr(count_ptr, t->Array.count, t_int);
+ }
+ break;
+
+ case Type_EnumeratedArray:
+ {
+ tag_type = t_type_info_enumerated_array;
+
+ i64 elem_offset = type_offset_of(tag_type, 0);
+ i64 index_offset = type_offset_of(tag_type, 1);
+ i64 elem_size_offset = type_offset_of(tag_type, 2);
+ i64 count_offset = type_offset_of(tag_type, 3);
+ i64 min_value_offset = type_offset_of(tag_type, 4);
+ i64 max_value_offset = type_offset_of(tag_type, 5);
+ i64 is_sparse_offset = type_offset_of(tag_type, 6);
+
+ cg_global_const_type_info_ptr(m, t->EnumeratedArray.elem, global, offset+elem_offset);
+ cg_global_const_type_info_ptr(m, t->EnumeratedArray.index, global, offset+index_offset);
+
+ void *elem_size_ptr = tb_global_add_region(m->mod, global, offset+elem_size_offset, build_context.int_size);
+ void *count_ptr = tb_global_add_region(m->mod, global, offset+count_offset, build_context.int_size);
+
+ void *min_value_ptr = tb_global_add_region(m->mod, global, offset+min_value_offset, type_size_of(t_type_info_enum_value));
+ void *max_value_ptr = tb_global_add_region(m->mod, global, offset+max_value_offset, type_size_of(t_type_info_enum_value));
+
+ cg_write_int_at_ptr(elem_size_ptr, type_size_of(t->EnumeratedArray.elem), t_int);
+ cg_write_int_at_ptr(count_ptr, t->EnumeratedArray.count, t_int);
+
+ cg_write_int_at_ptr(min_value_ptr, exact_value_to_i64(*t->EnumeratedArray.min_value), t_type_info_enum_value);
+ cg_write_int_at_ptr(max_value_ptr, exact_value_to_i64(*t->EnumeratedArray.max_value), t_type_info_enum_value);
+ set_bool(m, global, offset+is_sparse_offset, t->EnumeratedArray.is_sparse);
+ }
+ break;
+
+ case Type_DynamicArray:
+ {
+ tag_type = t_type_info_dynamic_array;
+
+ cg_global_const_type_info_ptr(m, t->DynamicArray.elem, global, offset+0);
+ void *elem_size_ptr = tb_global_add_region(m->mod, global, offset+1*build_context.int_size, build_context.int_size);
+ cg_write_int_at_ptr(elem_size_ptr, type_size_of(t->DynamicArray.elem), t_int);
+ }
+ break;
+ case Type_Slice:
+ {
+ tag_type = t_type_info_slice;
+
+ cg_global_const_type_info_ptr(m, t->Slice.elem, global, offset+0);
+ void *elem_size_ptr = tb_global_add_region(m->mod, global, offset+1*build_context.int_size, build_context.int_size);
+ cg_write_int_at_ptr(elem_size_ptr, type_size_of(t->Slice.elem), t_int);
+ }
+ break;
+
+ case Type_Proc:
+ {
+ tag_type = t_type_info_procedure;
+
+ i64 params_offset = type_offset_of(tag_type, 0);
+ i64 results_offset = type_offset_of(tag_type, 1);
+ i64 variadic_offset = type_offset_of(tag_type, 2);
+ i64 convention_offset = type_offset_of(tag_type, 3);
+
+ if (t->Proc.params) {
+ cg_global_const_type_info_ptr(m, t->Proc.params, global, offset+params_offset);
+ }
+ if (t->Proc.results) {
+ cg_global_const_type_info_ptr(m, t->Proc.results, global, offset+results_offset);
+ }
+
+ set_bool(m, global, offset+variadic_offset, t->Proc.variadic);
+
+ u8 *convention_ptr = cast(u8 *)tb_global_add_region(m->mod, global, offset+convention_offset, 1);
+ *convention_ptr = cast(u8)t->Proc.calling_convention;
+ }
+ break;
+
+ case Type_Tuple:
+ {
+ tag_type = t_type_info_parameters;
+
+ i64 types_offset = type_offset_of(tag_type, 0);
+ i64 names_offset = type_offset_of(tag_type, 1);
+
+ i64 count = t->Tuple.variables.count;
+
+ cg_set_type_info_member_types(m, global, offset+types_offset, count, t, [](isize i, void *userdata) -> Type * {
+ Type *t = cast(Type *)userdata;
+ return t->Tuple.variables[i]->type;
+ });
+
+ cg_set_type_info_member_names(m, global, offset+names_offset, count, t, [](isize i, void *userdata) -> String {
+ Type *t = cast(Type *)userdata;
+ return t->Tuple.variables[i]->token.string;
+ });
+ }
+ break;
+
+ case Type_Enum:
+ {
+ tag_type = t_type_info_enum;
+
+ i64 base_offset = type_offset_of(tag_type, 0);
+ i64 names_offset = type_offset_of(tag_type, 1);
+ i64 values_offset = type_offset_of(tag_type, 2);
+
+ cg_global_const_type_info_ptr(m, t->Enum.base_type, global, offset+base_offset);
+
+ i64 count = t->Enum.fields.count;
+
+ cg_set_type_info_member_names(m, global, offset+names_offset, count, t, [](isize i, void *userdata) -> String {
+ Type *t = cast(Type *)userdata;
+ return t->Enum.fields[i]->token.string;
+ });
+
+ cg_set_type_info_member_enum_values(m, global, offset+values_offset, count, t, [](isize i, void *userdata) -> i64 {
+ Type *t = cast(Type *)userdata;
+ Entity *e = t->Enum.fields[i];
+ GB_ASSERT(e->kind == Entity_Constant);
+ return exact_value_to_i64(e->Constant.value);
+ });
+ }
+ break;
+ case Type_Struct:
+ {
+ tag_type = t_type_info_struct;
+
+ i64 types_offset = type_offset_of(tag_type, 0);
+ i64 names_offset = type_offset_of(tag_type, 1);
+ i64 offsets_offset = type_offset_of(tag_type, 2);
+ i64 usings_offset = type_offset_of(tag_type, 3);
+ i64 tags_offset = type_offset_of(tag_type, 4);
+
+ i64 is_packed_offset = type_offset_of(tag_type, 5);
+ i64 is_raw_union_offset = type_offset_of(tag_type, 6);
+ i64 is_no_copy_offset = type_offset_of(tag_type, 7);
+ i64 custom_align_offset = type_offset_of(tag_type, 8);
+
+ i64 equal_offset = type_offset_of(tag_type, 9);
+
+ i64 soa_kind_offset = type_offset_of(tag_type, 10);
+ i64 soa_base_type_offset = type_offset_of(tag_type, 11);
+ i64 soa_len_offset = type_offset_of(tag_type, 12);
+
+ // TODO(bill): equal proc stuff
+ gb_unused(equal_offset);
+
+ i64 count = t->Struct.fields.count;
+
+ cg_set_type_info_member_types(m, global, offset+types_offset, count, t, [](isize i, void *userdata) -> Type * {
+ Type *t = cast(Type *)userdata;
+ return t->Struct.fields[i]->type;
+ });
+
+ cg_set_type_info_member_names(m, global, offset+names_offset, count, t, [](isize i, void *userdata) -> String {
+ Type *t = cast(Type *)userdata;
+ return t->Struct.fields[i]->token.string;
+ });
+
+ cg_set_type_info_member_offsets(m, global, offset+offsets_offset, count, t, [](isize i, void *userdata) -> i64 {
+ Type *t = cast(Type *)userdata;
+ return t->Struct.offsets[i];
+ });
+
+ cg_set_type_info_member_usings(m, global, offset+usings_offset, count, t, [](isize i, void *userdata) -> bool {
+ Type *t = cast(Type *)userdata;
+ return (t->Struct.fields[i]->flags & EntityFlag_Using) != 0;
+ });
+
+ cg_set_type_info_member_tags(m, global, offset+tags_offset, count, t, [](isize i, void *userdata) -> String {
+ Type *t = cast(Type *)userdata;
+ return t->Struct.tags[i];
+ });
+
+
+ set_bool(m, global, offset+is_packed_offset, t->Struct.is_packed);
+ set_bool(m, global, offset+is_raw_union_offset, t->Struct.is_raw_union);
+ set_bool(m, global, offset+is_no_copy_offset, t->Struct.is_no_copy);
+ set_bool(m, global, offset+custom_align_offset, t->Struct.custom_align != 0);
+
+ if (t->Struct.soa_kind != StructSoa_None) {
+ u8 *kind_ptr = cast(u8 *)tb_global_add_region(m->mod, global, offset+soa_kind_offset, 1);
+ *kind_ptr = cast(u8)t->Struct.soa_kind;
+
+ cg_global_const_type_info_ptr(m, t->Struct.soa_elem, global, offset+soa_base_type_offset);
+
+ void *soa_len_ptr = tb_global_add_region(m->mod, global, offset+soa_len_offset, build_context.int_size);
+ cg_write_int_at_ptr(soa_len_ptr, t->Struct.soa_count, t_int);
+ }
+ }
+ break;
+ case Type_Union:
+ {
+ tag_type = t_type_info_union;
+
+ i64 variants_offset = type_offset_of(tag_type, 0);
+ i64 tag_offset_offset = type_offset_of(tag_type, 1);
+ i64 tag_type_offset = type_offset_of(tag_type, 2);
+
+ i64 equal_offset = type_offset_of(tag_type, 3);
+
+ i64 custom_align_offset = type_offset_of(tag_type, 4);
+ i64 no_nil_offset = type_offset_of(tag_type, 5);
+ i64 shared_nil_offset = type_offset_of(tag_type, 6);
+
+ // TODO(bill): equal procs
+ gb_unused(equal_offset);
+
+ i64 count = t->Union.variants.count;
+
+ cg_set_type_info_member_types(m, global, offset+variants_offset, count, t, [](isize i, void *userdata) -> Type * {
+ Type *t = cast(Type *)userdata;
+ return t->Union.variants[i];
+ });
+
+ void *tag_offset_ptr = tb_global_add_region(m->mod, global, offset+tag_offset_offset, build_context.ptr_size);
+ cg_write_uint_at_ptr(tag_offset_ptr, t->Union.variant_block_size, t_uintptr);
+
+ cg_global_const_type_info_ptr(m, union_tag_type(t), global, offset+tag_type_offset);
+
+ set_bool(m, global, offset+custom_align_offset, t->Union.custom_align != 0);
+ set_bool(m, global, offset+no_nil_offset, t->Union.kind == UnionType_no_nil);
+ set_bool(m, global, offset+shared_nil_offset, t->Union.kind == UnionType_shared_nil);
+ }
+ break;
+ case Type_Map:
+ {
+ tag_type = t_type_info_map;
+
+ i64 key_offset = type_offset_of(tag_type, 0);
+ i64 value_offset = type_offset_of(tag_type, 1);
+ i64 map_info_offset = type_offset_of(tag_type, 2);
+
+ // TODO(bill): map info
+ gb_unused(map_info_offset);
+
+ cg_global_const_type_info_ptr(m, t->Map.key, global, offset+key_offset);
+ cg_global_const_type_info_ptr(m, t->Map.value, global, offset+value_offset);
+
+ }
+ break;
+ case Type_BitSet:
+ {
+ tag_type = t_type_info_bit_set;
+
+ i64 elem_offset = type_offset_of(tag_type, 0);
+ i64 underlying_offset = type_offset_of(tag_type, 1);
+ i64 lower_offset = type_offset_of(tag_type, 2);
+ i64 upper_offset = type_offset_of(tag_type, 3);
+
+ cg_global_const_type_info_ptr(m, t->BitSet.elem, global, offset+elem_offset);
+ if (t->BitSet.underlying) {
+ cg_global_const_type_info_ptr(m, t->BitSet.underlying, global, offset+underlying_offset);
+ }
+
+ void *lower_ptr = tb_global_add_region(m->mod, global, offset+lower_offset, 8);
+ void *upper_ptr = tb_global_add_region(m->mod, global, offset+upper_offset, 8);
+
+ cg_write_int_at_ptr(lower_ptr, t->BitSet.lower, t_i64);
+ cg_write_int_at_ptr(upper_ptr, t->BitSet.upper, t_i64);
+ }
+ break;
+ case Type_SimdVector:
+ {
+ tag_type = t_type_info_simd_vector;
+
+ i64 elem_offset = type_offset_of(tag_type, 0);
+ i64 elem_size_offset = type_offset_of(tag_type, 1);
+ i64 count_offset = type_offset_of(tag_type, 2);
+
+ cg_global_const_type_info_ptr(m, t->SimdVector.elem, global, offset+elem_offset);
+
+ void *elem_size_ptr = tb_global_add_region(m->mod, global, offset+elem_size_offset, build_context.int_size);
+ void *count_ptr = tb_global_add_region(m->mod, global, offset+count_offset, build_context.int_size);
+
+ cg_write_int_at_ptr(elem_size_ptr, type_size_of(t->SimdVector.elem), t_int);
+ cg_write_int_at_ptr(count_ptr, t->SimdVector.count, t_int);
+ }
+ break;
+
+ case Type_RelativePointer:
+ {
+ tag_type = t_type_info_relative_pointer;
+
+ i64 pointer_offset = type_offset_of(tag_type, 0);
+ i64 base_integer_offset = type_offset_of(tag_type, 1);
+
+ cg_global_const_type_info_ptr(m, t->RelativePointer.pointer_type, global, offset+pointer_offset);
+ cg_global_const_type_info_ptr(m, t->RelativePointer.base_integer, global, offset+base_integer_offset);
+ }
+ break;
+ case Type_RelativeMultiPointer:
+ {
+ tag_type = t_type_info_relative_multi_pointer;
+
+ i64 pointer_offset = type_offset_of(tag_type, 0);
+ i64 base_integer_offset = type_offset_of(tag_type, 1);
+
+ cg_global_const_type_info_ptr(m, t->RelativePointer.pointer_type, global, offset+pointer_offset);
+ cg_global_const_type_info_ptr(m, t->RelativePointer.base_integer, global, offset+base_integer_offset);
+ }
+ break;
+ case Type_Matrix:
+ {
+ tag_type = t_type_info_matrix;
+
+ i64 elem_offset = type_offset_of(tag_type, 0);
+ i64 elem_size_offset = type_offset_of(tag_type, 1);
+ i64 elem_stride_offset = type_offset_of(tag_type, 2);
+ i64 row_count_offset = type_offset_of(tag_type, 3);
+ i64 column_count_offset = type_offset_of(tag_type, 4);
+
+ cg_global_const_type_info_ptr(m, t->Matrix.elem, global, offset+elem_offset);
+
+ void *elem_size_ptr = tb_global_add_region(m->mod, global, offset+elem_size_offset, build_context.int_size);
+ void *elem_stride_ptr = tb_global_add_region(m->mod, global, offset+elem_stride_offset, build_context.int_size);
+ void *row_count_ptr = tb_global_add_region(m->mod, global, offset+row_count_offset, build_context.int_size);
+ void *column_count_ptr = tb_global_add_region(m->mod, global, offset+column_count_offset, build_context.int_size);
+
+ cg_write_int_at_ptr(elem_size_ptr, type_size_of(t->Matrix.elem), t_int);
+ cg_write_int_at_ptr(elem_stride_ptr, matrix_type_stride_in_elems(t), t_int);
+ cg_write_int_at_ptr(row_count_ptr, t->Matrix.row_count, t_int);
+ cg_write_int_at_ptr(column_count_ptr, t->Matrix.column_count, t_int);
+
+ }
+ break;
+ }
+
+ if (tag_type != nullptr) {
+ i64 union_index = union_variant_index(type_info_union, tag_type);
+ GB_ASSERT(union_index != 0);
+ void *tag_ptr = tb_global_add_region(m->mod, global, offset+union_tag_offset, union_tag_type_size);
+ cg_write_int_at_ptr(tag_ptr, union_index, ti_union_tag_type);
+ }
+
+ }
+} \ No newline at end of file
diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp
index 17a396b9f..ad7aa81de 100644
--- a/src/tokenizer.cpp
+++ b/src/tokenizer.cpp
@@ -696,8 +696,8 @@ gb_internal void tokenizer_get_token(Tokenizer *t, Token *token, int repeat=0) {
if (entry->kind != Token_Invalid && entry->hash == hash) {
if (str_eq(entry->text, token->string)) {
token->kind = entry->kind;
- if (token->kind == Token_not_in && entry->text == "notin") {
- syntax_warning(*token, "'notin' is deprecated in favour of 'not_in'");
+ if (token->kind == Token_not_in && entry->text.len == 5) {
+ syntax_error(*token, "Did you mean 'not_in'?");
}
}
}
diff --git a/src/types.cpp b/src/types.cpp
index 385ca926d..22deca1dc 100644
--- a/src/types.cpp
+++ b/src/types.cpp
@@ -143,6 +143,7 @@ struct TypeStruct {
Type * soa_elem;
i32 soa_count;
StructSoaKind soa_kind;
+ BlockingMutex mutex; // for settings offsets
bool is_polymorphic;
bool are_offsets_set : 1;
@@ -244,6 +245,7 @@ struct TypeProc {
TYPE_KIND(Tuple, struct { \
Slice<Entity *> variables; /* Entity_Variable */ \
i64 * offsets; \
+ BlockingMutex mutex; /* for settings offsets */ \
bool are_offsets_being_processed; \
bool are_offsets_set; \
bool is_packed; \
@@ -265,8 +267,8 @@ struct TypeProc {
Type *pointer_type; \
Type *base_integer; \
}) \
- TYPE_KIND(RelativeSlice, struct { \
- Type *slice_type; \
+ TYPE_KIND(RelativeMultiPointer, struct { \
+ Type *pointer_type; \
Type *base_integer; \
}) \
TYPE_KIND(Matrix, struct { \
@@ -347,7 +349,7 @@ enum Typeid_Kind : u8 {
Typeid_Bit_Set,
Typeid_Simd_Vector,
Typeid_Relative_Pointer,
- Typeid_Relative_Slice,
+ Typeid_Relative_Multi_Pointer,
Typeid_Matrix,
Typeid_SoaPointer,
};
@@ -594,6 +596,7 @@ gb_global Type *t_untyped_uninit = &basic_types[Basic_UntypedUninit];
gb_global Type *t_u8_ptr = nullptr;
+gb_global Type *t_u8_multi_ptr = nullptr;
gb_global Type *t_int_ptr = nullptr;
gb_global Type *t_i64_ptr = nullptr;
gb_global Type *t_f64_ptr = nullptr;
@@ -632,7 +635,7 @@ gb_global Type *t_type_info_map = nullptr;
gb_global Type *t_type_info_bit_set = nullptr;
gb_global Type *t_type_info_simd_vector = nullptr;
gb_global Type *t_type_info_relative_pointer = nullptr;
-gb_global Type *t_type_info_relative_slice = nullptr;
+gb_global Type *t_type_info_relative_multi_pointer = nullptr;
gb_global Type *t_type_info_matrix = nullptr;
gb_global Type *t_type_info_soa_pointer = nullptr;
@@ -661,7 +664,7 @@ gb_global Type *t_type_info_map_ptr = nullptr;
gb_global Type *t_type_info_bit_set_ptr = nullptr;
gb_global Type *t_type_info_simd_vector_ptr = nullptr;
gb_global Type *t_type_info_relative_pointer_ptr = nullptr;
-gb_global Type *t_type_info_relative_slice_ptr = nullptr;
+gb_global Type *t_type_info_relative_multi_pointer_ptr = nullptr;
gb_global Type *t_type_info_matrix_ptr = nullptr;
gb_global Type *t_type_info_soa_pointer_ptr = nullptr;
@@ -725,7 +728,7 @@ struct TypePath;
gb_internal i64 type_size_of (Type *t);
gb_internal i64 type_align_of (Type *t);
-gb_internal i64 type_offset_of (Type *t, i32 index);
+gb_internal i64 type_offset_of (Type *t, i64 index, Type **field_type_=nullptr);
gb_internal gbString type_to_string (Type *type, bool shorthand=true);
gb_internal gbString type_to_string (Type *type, gbAllocator allocator, bool shorthand=true);
gb_internal i64 type_size_of_internal(Type *t, TypePath *path);
@@ -734,6 +737,7 @@ gb_internal Type * bit_set_to_int(Type *t);
gb_internal bool are_types_identical(Type *x, Type *y);
gb_internal bool is_type_pointer(Type *t);
+gb_internal bool is_type_multi_pointer(Type *t);
gb_internal bool is_type_soa_pointer(Type *t);
gb_internal bool is_type_proc(Type *t);
gb_internal bool is_type_slice(Type *t);
@@ -821,6 +825,9 @@ gb_internal void type_path_pop(TypePath *tp) {
#define FAILURE_ALIGNMENT 0
gb_internal bool type_ptr_set_update(PtrSet<Type *> *s, Type *t) {
+ if (t == nullptr) {
+ return true;
+ }
if (ptr_set_exists(s, t)) {
return true;
}
@@ -829,6 +836,10 @@ gb_internal bool type_ptr_set_update(PtrSet<Type *> *s, Type *t) {
}
gb_internal bool type_ptr_set_exists(PtrSet<Type *> *s, Type *t) {
+ if (t == nullptr) {
+ return true;
+ }
+
if (ptr_set_exists(s, t)) {
return true;
}
@@ -988,7 +999,7 @@ gb_internal Type *alloc_type_enumerated_array(Type *elem, Type *index, ExactValu
gb_internal Type *alloc_type_slice(Type *elem) {
Type *t = alloc_type(Type_Slice);
- t->Array.elem = elem;
+ t->Slice.elem = elem;
return t;
}
@@ -1025,12 +1036,12 @@ gb_internal Type *alloc_type_relative_pointer(Type *pointer_type, Type *base_int
return t;
}
-gb_internal Type *alloc_type_relative_slice(Type *slice_type, Type *base_integer) {
- GB_ASSERT(is_type_slice(slice_type));
+gb_internal Type *alloc_type_relative_multi_pointer(Type *pointer_type, Type *base_integer) {
+ GB_ASSERT(is_type_multi_pointer(pointer_type));
GB_ASSERT(is_type_integer(base_integer));
- Type *t = alloc_type(Type_RelativeSlice);
- t->RelativeSlice.slice_type = slice_type;
- t->RelativeSlice.base_integer = base_integer;
+ Type *t = alloc_type(Type_RelativeMultiPointer);
+ t->RelativeMultiPointer.pointer_type = pointer_type;
+ t->RelativeMultiPointer.base_integer = base_integer;
return t;
}
@@ -1541,9 +1552,9 @@ gb_internal bool is_type_relative_pointer(Type *t) {
t = base_type(t);
return t->kind == Type_RelativePointer;
}
-gb_internal bool is_type_relative_slice(Type *t) {
+gb_internal bool is_type_relative_multi_pointer(Type *t) {
t = base_type(t);
- return t->kind == Type_RelativeSlice;
+ return t->kind == Type_RelativeMultiPointer;
}
gb_internal bool is_type_u8_slice(Type *t) {
@@ -1960,7 +1971,7 @@ gb_internal bool is_type_indexable(Type *t) {
return true;
case Type_EnumeratedArray:
return true;
- case Type_RelativeSlice:
+ case Type_RelativeMultiPointer:
return true;
case Type_Matrix:
return true;
@@ -1979,7 +1990,7 @@ gb_internal bool is_type_sliceable(Type *t) {
return true;
case Type_EnumeratedArray:
return false;
- case Type_RelativeSlice:
+ case Type_RelativeMultiPointer:
return true;
case Type_Matrix:
return false;
@@ -2185,12 +2196,12 @@ gb_internal bool is_type_polymorphic(Type *t, bool or_specialized=false) {
}
break;
- case Type_RelativeSlice:
- if (is_type_polymorphic(t->RelativeSlice.slice_type, or_specialized)) {
+ case Type_RelativeMultiPointer:
+ if (is_type_polymorphic(t->RelativeMultiPointer.pointer_type, or_specialized)) {
return true;
}
- if (t->RelativeSlice.base_integer != nullptr &&
- is_type_polymorphic(t->RelativeSlice.base_integer, or_specialized)) {
+ if (t->RelativeMultiPointer.base_integer != nullptr &&
+ is_type_polymorphic(t->RelativeMultiPointer.base_integer, or_specialized)) {
return true;
}
break;
@@ -2248,7 +2259,7 @@ gb_internal bool type_has_nil(Type *t) {
return false;
case Type_RelativePointer:
- case Type_RelativeSlice:
+ case Type_RelativeMultiPointer:
return true;
}
return false;
@@ -2415,7 +2426,7 @@ gb_internal bool is_type_load_safe(Type *type) {
return true;
case Type_RelativePointer:
- case Type_RelativeSlice:
+ case Type_RelativeMultiPointer:
return true;
case Type_Pointer:
@@ -2666,7 +2677,6 @@ gb_internal bool are_types_identical_internal(Type *x, Type *y, bool check_tuple
x->Struct.soa_kind == y->Struct.soa_kind &&
x->Struct.soa_count == y->Struct.soa_count &&
are_types_identical(x->Struct.soa_elem, y->Struct.soa_elem)) {
- // TODO(bill); Fix the custom alignment rule
for_array(i, x->Struct.fields) {
Entity *xf = x->Struct.fields[i];
Entity *yf = y->Struct.fields[i];
@@ -2807,7 +2817,6 @@ gb_internal i64 union_tag_size(Type *u) {
return 0;
}
- // TODO(bill): Is this an okay approach?
i64 max_align = 1;
if (u->Union.variants.count < 1ull<<8) {
@@ -2817,7 +2826,7 @@ gb_internal i64 union_tag_size(Type *u) {
} else if (u->Union.variants.count < 1ull<<32) {
max_align = 4;
} else {
- GB_PANIC("how many variants do you have?!");
+ compiler_error("how many variants do you have?! %lld", cast(long long)u->Union.variants.count);
}
for_array(i, u->Union.variants) {
@@ -3081,7 +3090,7 @@ gb_internal Selection lookup_field_with_selection(Type *type_, String field_name
mutex_lock(md->mutex);
defer (mutex_unlock(md->mutex));
for (TypeNameObjCMetadataEntry const &entry : md->value_entries) {
- GB_ASSERT(entry.entity->kind == Entity_Procedure);
+ GB_ASSERT(entry.entity->kind == Entity_Procedure || entry.entity->kind == Entity_ProcGroup);
if (entry.name == field_name) {
sel.entity = entry.entity;
sel.pseudo_field = true;
@@ -3136,8 +3145,6 @@ gb_internal Selection lookup_field_with_selection(Type *type_, String field_name
switch (type->Basic.kind) {
case Basic_any: {
#if 1
- // IMPORTANT TODO(bill): Should these members be available to should I only allow them with
- // `Raw_Any` type?
String data_str = str_lit("data");
String id_str = str_lit("id");
gb_local_persist Entity *entity__any_data = alloc_entity_field(nullptr, make_token_ident(data_str), t_rawptr, false, 0);
@@ -3623,8 +3630,8 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) {
case Type_RelativePointer:
return type_align_of_internal(t->RelativePointer.base_integer, path);
- case Type_RelativeSlice:
- return type_align_of_internal(t->RelativeSlice.base_integer, path);
+ case Type_RelativeMultiPointer:
+ return type_align_of_internal(t->RelativeMultiPointer.base_integer, path);
case Type_SoaPointer:
return build_context.int_size;
@@ -3645,28 +3652,35 @@ gb_internal i64 *type_set_offsets_of(Slice<Entity *> const &fields, bool is_pack
}
} else if (is_packed) {
for_array(i, fields) {
- i64 size = type_size_of(fields[i]->type);
- offsets[i] = curr_offset;
- curr_offset += size;
+ if (fields[i]->kind != Entity_Variable) {
+ offsets[i] = -1;
+ } else {
+ i64 size = type_size_of(fields[i]->type);
+ offsets[i] = curr_offset;
+ curr_offset += size;
+ }
}
} else {
for_array(i, fields) {
- Type *t = fields[i]->type;
- i64 align = gb_max(type_align_of(t), 1);
- i64 size = gb_max(type_size_of( t), 0);
- curr_offset = align_formula(curr_offset, align);
- offsets[i] = curr_offset;
- curr_offset += size;
+ if (fields[i]->kind != Entity_Variable) {
+ offsets[i] = -1;
+ } else {
+ Type *t = fields[i]->type;
+ i64 align = gb_max(type_align_of(t), 1);
+ i64 size = gb_max(type_size_of( t), 0);
+ curr_offset = align_formula(curr_offset, align);
+ offsets[i] = curr_offset;
+ curr_offset += size;
+ }
}
}
return offsets;
}
gb_internal bool type_set_offsets(Type *t) {
- MUTEX_GUARD(&g_type_mutex); // TODO(bill): only per struct
-
t = base_type(t);
if (t->kind == Type_Struct) {
+ MUTEX_GUARD(&t->Struct.mutex);
if (!t->Struct.are_offsets_set) {
t->Struct.are_offsets_being_processed = true;
t->Struct.offsets = type_set_offsets_of(t->Struct.fields, t->Struct.is_packed, t->Struct.is_raw_union);
@@ -3675,6 +3689,7 @@ gb_internal bool type_set_offsets(Type *t) {
return true;
}
} else if (is_type_tuple(t)) {
+ MUTEX_GUARD(&t->Tuple.mutex);
if (!t->Tuple.are_offsets_set) {
t->Tuple.are_offsets_being_processed = true;
t->Tuple.offsets = type_set_offsets_of(t->Tuple.variables, t->Tuple.is_packed, false);
@@ -3849,7 +3864,6 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) {
max = size;
}
}
- // TODO(bill): Is this how it should work?
return align_formula(max, align);
} else {
i64 count = 0, size = 0, align = 0;
@@ -3899,59 +3913,100 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) {
case Type_RelativePointer:
return type_size_of_internal(t->RelativePointer.base_integer, path);
- case Type_RelativeSlice:
- return 2*type_size_of_internal(t->RelativeSlice.base_integer, path);
+ case Type_RelativeMultiPointer:
+ return type_size_of_internal(t->RelativeMultiPointer.base_integer, path);
}
// Catch all
return build_context.ptr_size;
}
-gb_internal i64 type_offset_of(Type *t, i32 index) {
+gb_internal i64 type_offset_of(Type *t, i64 index, Type **field_type_) {
t = base_type(t);
- if (t->kind == Type_Struct) {
+ switch (t->kind) {
+ case Type_Struct:
type_set_offsets(t);
if (gb_is_between(index, 0, t->Struct.fields.count-1)) {
GB_ASSERT(t->Struct.offsets != nullptr);
+ if (field_type_) *field_type_ = t->Struct.fields[index]->type;
return t->Struct.offsets[index];
}
- } else if (t->kind == Type_Tuple) {
+ break;
+ case Type_Tuple:
type_set_offsets(t);
if (gb_is_between(index, 0, t->Tuple.variables.count-1)) {
GB_ASSERT(t->Tuple.offsets != nullptr);
- return t->Tuple.offsets[index];
+ if (field_type_) *field_type_ = t->Tuple.variables[index]->type;
+ i64 offset = t->Tuple.offsets[index];
+ GB_ASSERT(offset >= 0);
+ return offset;
}
- } else if (t->kind == Type_Basic) {
+ break;
+
+ case Type_Array:
+ GB_ASSERT(0 <= index && index < t->Array.count);
+ return index * type_size_of(t->Array.elem);
+
+ case Type_Basic:
if (t->Basic.kind == Basic_string) {
switch (index) {
- case 0: return 0; // data
- case 1: return build_context.int_size; // len
+ case 0:
+ if (field_type_) *field_type_ = t_u8_ptr;
+ return 0; // data
+ case 1:
+ if (field_type_) *field_type_ = t_int;
+ return build_context.int_size; // len
}
} else if (t->Basic.kind == Basic_any) {
switch (index) {
- case 0: return 0; // type_info
- case 1: return build_context.ptr_size; // data
+ case 0:
+ if (field_type_) *field_type_ = t_rawptr;
+ return 0; // data
+ case 1:
+ if (field_type_) *field_type_ = t_typeid;
+ return build_context.ptr_size; // id
}
}
- } else if (t->kind == Type_Slice) {
+ break;
+ case Type_Slice:
switch (index) {
- case 0: return 0; // data
- case 1: return 1*build_context.int_size; // len
- case 2: return 2*build_context.int_size; // cap
+ case 0:
+ if (field_type_) *field_type_ = alloc_type_multi_pointer(t->Slice.elem);
+ return 0; // data
+ case 1:
+ if (field_type_) *field_type_ = t_int;
+ return 1*build_context.int_size; // len
}
- } else if (t->kind == Type_DynamicArray) {
+ break;
+ case Type_DynamicArray:
switch (index) {
- case 0: return 0; // data
- case 1: return 1*build_context.int_size; // len
- case 2: return 2*build_context.int_size; // cap
- case 3: return 3*build_context.int_size; // allocator
+ case 0:
+ if (field_type_) *field_type_ = alloc_type_multi_pointer(t->DynamicArray.elem);
+ return 0; // data
+ case 1:
+ if (field_type_) *field_type_ = t_int;
+ return 1*build_context.int_size; // len
+ case 2:
+ if (field_type_) *field_type_ = t_int;
+ return 2*build_context.int_size; // cap
+ case 3:
+ if (field_type_) *field_type_ = t_allocator;
+ return 3*build_context.int_size; // allocator
}
- } else if (t->kind == Type_Union) {
- /* i64 s = */ type_size_of(t);
- switch (index) {
- case -1: return align_formula(t->Union.variant_block_size, build_context.ptr_size); // __type_info
+ break;
+ case Type_Union:
+ if (!is_type_union_maybe_pointer(t)) {
+ /* i64 s = */ type_size_of(t);
+ switch (index) {
+ case -1:
+ if (field_type_) *field_type_ = union_tag_type(t);
+ union_tag_size(t);
+ return t->Union.variant_block_size;
+ }
}
+ break;
}
+ GB_ASSERT(index == 0);
return 0;
}
@@ -3965,8 +4020,10 @@ gb_internal i64 type_offset_of_from_selection(Type *type, Selection sel) {
i32 index = sel.index[i];
t = base_type(t);
offset += type_offset_of(t, index);
- if (t->kind == Type_Struct && !t->Struct.is_raw_union) {
+ if (t->kind == Type_Struct) {
t = t->Struct.fields[index]->type;
+ } else if (t->kind == Type_Array) {
+ t = t->Array.elem;
} else {
// NOTE(bill): No need to worry about custom types, just need the alignment
switch (t->kind) {
@@ -4410,11 +4467,11 @@ gb_internal gbString write_type_to_string(gbString str, Type *type, bool shortha
str = gb_string_append_fmt(str, ") ");
str = write_type_to_string(str, type->RelativePointer.pointer_type);
break;
- case Type_RelativeSlice:
+ case Type_RelativeMultiPointer:
str = gb_string_append_fmt(str, "#relative(");
- str = write_type_to_string(str, type->RelativeSlice.base_integer);
+ str = write_type_to_string(str, type->RelativePointer.base_integer);
str = gb_string_append_fmt(str, ") ");
- str = write_type_to_string(str, type->RelativeSlice.slice_type);
+ str = write_type_to_string(str, type->RelativePointer.pointer_type);
break;
case Type_Matrix: