diff options
| author | gingerBill <gingerBill@users.noreply.github.com> | 2025-01-06 13:43:01 +0000 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2025-01-06 13:43:01 +0000 |
| commit | 6e49bbb66853b5d824ac5bbd534ae3e81c4f39aa (patch) | |
| tree | 50886a3be8f2fcfab053e07cfe9e15f50fa5f9f6 /src/llvm_backend_stmt.cpp | |
| parent | bd96cd0af761994210018ca647eb843dfeb71494 (diff) | |
| parent | 98efb03934b464a1b23759b5695a12ff37588357 (diff) | |
Merge branch 'master' into d3d11-annotations
Diffstat (limited to 'src/llvm_backend_stmt.cpp')
| -rw-r--r-- | src/llvm_backend_stmt.cpp | 376 |
1 files changed, 286 insertions, 90 deletions
diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp index 9d688be6a..a2f0d2f4a 100644 --- a/src/llvm_backend_stmt.cpp +++ b/src/llvm_backend_stmt.cpp @@ -201,13 +201,15 @@ gb_internal void lb_open_scope(lbProcedure *p, Scope *s) { } } + GB_ASSERT(s != nullptr); + p->curr_scope = s; p->scope_index += 1; array_add(&p->scope_stack, s); } -gb_internal void lb_close_scope(lbProcedure *p, lbDeferExitKind kind, lbBlock *block, bool pop_stack=true) { - lb_emit_defer_stmts(p, kind, block); +gb_internal void lb_close_scope(lbProcedure *p, lbDeferExitKind kind, lbBlock *block, Ast *node, bool pop_stack=true) { + lb_emit_defer_stmts(p, kind, block, node); GB_ASSERT(p->scope_index > 0); // NOTE(bill): Remove `context`s made in that scope @@ -221,6 +223,10 @@ gb_internal void lb_close_scope(lbProcedure *p, lbDeferExitKind kind, lbBlock *b } + if (p->curr_scope) { + p->curr_scope = p->curr_scope->parent; + } + p->scope_index -= 1; array_pop(&p->scope_stack); } @@ -715,7 +721,7 @@ gb_internal void lb_build_range_interval(lbProcedure *p, AstBinaryExpr *node, lb_build_stmt(p, rs->body); - lb_close_scope(p, lbDeferExit_Default, nullptr); + lb_close_scope(p, lbDeferExit_Default, nullptr, node->left); lb_pop_target_list(p); if (check != nullptr) { @@ -737,6 +743,22 @@ gb_internal void lb_build_range_interval(lbProcedure *p, AstBinaryExpr *node, lb_start_block(p, done); } +gb_internal lbValue lb_enum_values_slice(lbProcedure *p, Type *enum_type, i64 *enum_count_) { + Type *t = enum_type; + GB_ASSERT(is_type_enum(t)); + t = base_type(t); + GB_ASSERT(t->kind == Type_Enum); + i64 enum_count = t->Enum.fields.count; + + if (enum_count_) *enum_count_ = enum_count; + + lbValue ti = lb_type_info(p, t); + lbValue variant = lb_emit_struct_ep(p, ti, 4); + lbValue eti_ptr = lb_emit_conv(p, variant, t_type_info_enum_ptr); + lbValue values = lb_emit_load(p, lb_emit_struct_ep(p, eti_ptr, 2)); + return values; +} + gb_internal void lb_build_range_enum(lbProcedure *p, Type *enum_type, Type *val_type, lbValue *val_, lbValue *idx_, lbBlock **loop_, lbBlock **done_) { lbModule *m = p->module; @@ -744,15 +766,11 @@ gb_internal void lb_build_range_enum(lbProcedure *p, Type *enum_type, Type *val_ GB_ASSERT(is_type_enum(t)); t = base_type(t); Type *core_elem = core_type(t); - GB_ASSERT(t->kind == Type_Enum); - i64 enum_count = t->Enum.fields.count; - lbValue max_count = lb_const_int(m, t_int, enum_count); + i64 enum_count = 0; - lbValue ti = lb_type_info(m, t); - lbValue variant = lb_emit_struct_ep(p, ti, 4); - lbValue eti_ptr = lb_emit_conv(p, variant, t_type_info_enum_ptr); - lbValue values = lb_emit_load(p, lb_emit_struct_ep(p, eti_ptr, 2)); + lbValue values = lb_enum_values_slice(p, enum_type, &enum_count); lbValue values_data = lb_slice_elem(p, values); + lbValue max_count = lb_const_int(m, t_int, enum_count); lbAddr offset_ = lb_add_local_generated(p, t_int, false); lb_addr_store(p, offset_, lb_const_int(m, t_int, 0)); @@ -790,8 +808,19 @@ gb_internal void lb_build_range_enum(lbProcedure *p, Type *enum_type, Type *val_ if (done_) *done_ = done; } -gb_internal void lb_build_range_tuple(lbProcedure *p, Ast *expr, Type *val0_type, Type *val1_type, - lbValue *val0_, lbValue *val1_, lbBlock **loop_, lbBlock **done_) { +gb_internal void lb_build_range_tuple(lbProcedure *p, AstRangeStmt *rs, Scope *scope) { + Ast *expr = unparen_expr(rs->expr); + + Type *expr_type = type_of_expr(expr); + Type *et = base_type(type_deref(expr_type)); + GB_ASSERT(et->kind == Type_Tuple); + + i32 value_count = cast(i32)et->Tuple.variables.count; + + lbValue *values = gb_alloc_array(permanent_allocator(), lbValue, value_count); + + lb_open_scope(p, scope); + lbBlock *loop = lb_create_block(p, "for.tuple.loop"); lb_emit_jump(p, loop); lb_start_block(p, loop); @@ -809,11 +838,26 @@ gb_internal void lb_build_range_tuple(lbProcedure *p, Ast *expr, Type *val0_type lb_emit_if(p, cond, body, done); lb_start_block(p, body); + for (i32 i = 0; i < value_count; i++) { + values[i] = lb_emit_tuple_ev(p, tuple_value, i); + } - if (val0_) *val0_ = lb_emit_tuple_ev(p, tuple_value, 0); - if (val1_) *val1_ = lb_emit_tuple_ev(p, tuple_value, 1); - if (loop_) *loop_ = loop; - if (done_) *done_ = done; + GB_ASSERT(rs->vals.count <= value_count); + for (isize i = 0; i < rs->vals.count; i++) { + Ast *val = rs->vals[i]; + if (val != nullptr) { + lb_store_range_stmt_val(p, val, values[i]); + } + } + + lb_push_target_list(p, rs->label, done, loop, nullptr); + + lb_build_stmt(p, rs->body); + + lb_close_scope(p, lbDeferExit_Default, nullptr, rs->body); + lb_pop_target_list(p); + lb_emit_jump(p, loop); + lb_start_block(p, done); } gb_internal void lb_build_range_stmt_struct_soa(lbProcedure *p, AstRangeStmt *rs, Scope *scope) { @@ -932,7 +976,7 @@ gb_internal void lb_build_range_stmt_struct_soa(lbProcedure *p, AstRangeStmt *rs lb_build_stmt(p, rs->body); - lb_close_scope(p, lbDeferExit_Default, nullptr); + lb_close_scope(p, lbDeferExit_Default, nullptr, rs->body); lb_pop_target_list(p); lb_emit_jump(p, loop); lb_start_block(p, done); @@ -956,6 +1000,17 @@ gb_internal void lb_build_range_stmt(lbProcedure *p, AstRangeStmt *rs, Scope *sc } } + TypeAndValue tav = type_and_value_of_expr(expr); + if (tav.mode != Addressing_Type) { + Type *expr_type = type_of_expr(expr); + Type *et = base_type(type_deref(expr_type)); + if (et->kind == Type_Tuple) { + lb_build_range_tuple(p, rs, scope); + return; + } + } + + lb_open_scope(p, scope); Ast *val0 = rs->vals.count > 0 ? lb_strip_and_prefix(rs->vals[0]) : nullptr; @@ -974,7 +1029,6 @@ gb_internal void lb_build_range_stmt(lbProcedure *p, AstRangeStmt *rs, Scope *sc lbBlock *loop = nullptr; lbBlock *done = nullptr; bool is_map = false; - TypeAndValue tav = type_and_value_of_expr(expr); if (tav.mode == Addressing_Type) { lb_build_range_enum(p, type_deref(tav.type), val0_type, &val, &key, &loop, &done); @@ -1050,8 +1104,75 @@ gb_internal void lb_build_range_stmt(lbProcedure *p, AstRangeStmt *rs, Scope *sc break; } case Type_Tuple: - lb_build_range_tuple(p, expr, val0_type, val1_type, &val, &key, &loop, &done); + GB_PANIC("Should be handled already"); + + case Type_BitSet: { + lbModule *m = p->module; + + lbValue the_set = lb_build_expr(p, expr); + if (is_type_pointer(type_deref(the_set.type))) { + the_set = lb_emit_load(p, the_set); + } + + Type *elem = et->BitSet.elem; + if (is_type_enum(elem)) { + i64 enum_count = 0; + lbValue values = lb_enum_values_slice(p, elem, &enum_count); + lbValue values_data = lb_slice_elem(p, values); + lbValue max_count = lb_const_int(m, t_int, enum_count); + + lbAddr offset_ = lb_add_local_generated(p, t_int, false); + lb_addr_store(p, offset_, lb_const_int(m, t_int, 0)); + + loop = lb_create_block(p, "for.bit_set.enum.loop"); + lb_emit_jump(p, loop); + lb_start_block(p, loop); + + lbBlock *body_check = lb_create_block(p, "for.bit_set.enum.body-check"); + lbBlock *body = lb_create_block(p, "for.bit_set.enum.body"); + done = lb_create_block(p, "for.bit_set.enum.done"); + + lbValue offset = lb_addr_load(p, offset_); + lbValue cond = lb_emit_comp(p, Token_Lt, offset, max_count); + lb_emit_if(p, cond, body_check, done); + lb_start_block(p, body_check); + + lbValue val_ptr = lb_emit_ptr_offset(p, values_data, offset); + lb_emit_increment(p, offset_.addr); + val = lb_emit_load(p, val_ptr); + val = lb_emit_conv(p, val, elem); + + lbValue check = lb_build_binary_in(p, val, the_set, Token_in); + lb_emit_if(p, check, body, loop); + lb_start_block(p, body); + } else { + lbAddr offset_ = lb_add_local_generated(p, t_int, false); + lb_addr_store(p, offset_, lb_const_int(m, t_int, et->BitSet.lower)); + + lbValue max_count = lb_const_int(m, t_int, et->BitSet.upper); + + loop = lb_create_block(p, "for.bit_set.range.loop"); + lb_emit_jump(p, loop); + lb_start_block(p, loop); + + lbBlock *body_check = lb_create_block(p, "for.bit_set.range.body-check"); + lbBlock *body = lb_create_block(p, "for.bit_set.range.body"); + done = lb_create_block(p, "for.bit_set.range.done"); + + lbValue offset = lb_addr_load(p, offset_); + lbValue cond = lb_emit_comp(p, Token_LtEq, offset, max_count); + lb_emit_if(p, cond, body_check, done); + lb_start_block(p, body_check); + + val = lb_emit_conv(p, offset, elem); + lb_emit_increment(p, offset_.addr); + + lbValue check = lb_build_binary_in(p, val, the_set, Token_in); + lb_emit_if(p, check, body, loop); + lb_start_block(p, body); + } break; + } default: GB_PANIC("Cannot range over %s", type_to_string(expr_type)); break; @@ -1071,7 +1192,7 @@ gb_internal void lb_build_range_stmt(lbProcedure *p, AstRangeStmt *rs, Scope *sc lb_build_stmt(p, rs->body); - lb_close_scope(p, lbDeferExit_Default, nullptr); + lb_close_scope(p, lbDeferExit_Default, nullptr, rs->body); lb_pop_target_list(p); lb_emit_jump(p, loop); lb_start_block(p, done); @@ -1242,7 +1363,7 @@ gb_internal void lb_build_unroll_range_stmt(lbProcedure *p, AstUnrollRangeStmt * } - lb_close_scope(p, lbDeferExit_Default, nullptr); + lb_close_scope(p, lbDeferExit_Default, nullptr, rs->body); } gb_internal bool lb_switch_stmt_can_be_trivial_jump_table(AstSwitchStmt *ss, bool *default_found_) { @@ -1289,6 +1410,10 @@ gb_internal bool lb_switch_stmt_can_be_trivial_jump_table(AstSwitchStmt *ss, boo } + if (is_typeid) { + return false; + } + return true; } @@ -1308,6 +1433,7 @@ gb_internal void lb_build_switch_stmt(lbProcedure *p, AstSwitchStmt *ss, Scope * ast_node(body, BlockStmt, ss->body); isize case_count = body->stmts.count; + Ast *default_clause = nullptr; Slice<Ast *> default_stmts = {}; lbBlock *default_fall = nullptr; lbBlock *default_block = nullptr; @@ -1357,6 +1483,7 @@ gb_internal void lb_build_switch_stmt(lbProcedure *p, AstSwitchStmt *ss, Scope * if (cc->list.count == 0) { // default case + default_clause = clause; default_stmts = cc->stmts; default_fall = fall; if (switch_instr == nullptr) { @@ -1427,7 +1554,7 @@ gb_internal void lb_build_switch_stmt(lbProcedure *p, AstSwitchStmt *ss, Scope * lb_push_target_list(p, ss->label, done, nullptr, fall); lb_open_scope(p, body->scope); lb_build_stmt_list(p, cc->stmts); - lb_close_scope(p, lbDeferExit_Default, body); + lb_close_scope(p, lbDeferExit_Default, body, clause); lb_pop_target_list(p); lb_emit_jump(p, done); @@ -1445,26 +1572,33 @@ gb_internal void lb_build_switch_stmt(lbProcedure *p, AstSwitchStmt *ss, Scope * lb_push_target_list(p, ss->label, done, nullptr, default_fall); lb_open_scope(p, default_block->scope); lb_build_stmt_list(p, default_stmts); - lb_close_scope(p, lbDeferExit_Default, default_block); + lb_close_scope(p, lbDeferExit_Default, default_block, default_clause); lb_pop_target_list(p); } lb_emit_jump(p, done); lb_start_block(p, done); - lb_close_scope(p, lbDeferExit_Default, done); + lb_close_scope(p, lbDeferExit_Default, done, ss->body); } -gb_internal void lb_store_type_case_implicit(lbProcedure *p, Ast *clause, lbValue value) { +gb_internal void lb_store_type_case_implicit(lbProcedure *p, Ast *clause, lbValue value, bool is_default_case) { Entity *e = implicit_entity_of_node(clause); GB_ASSERT(e != nullptr); if (e->flags & EntityFlag_Value) { // by value - GB_ASSERT(are_types_identical(e->type, value.type)); - lbAddr x = lb_add_local(p, e->type, e, false); - lb_addr_store(p, x, value); + if (are_types_identical(e->type, value.type)) { + lbAddr x = lb_add_local(p, e->type, e, false); + lb_addr_store(p, x, value); + } else { + GB_ASSERT_MSG(are_types_identical(e->type, type_deref(value.type)), "%s", type_to_string(value.type)); + lbAddr x = lb_add_local(p, e->type, e, false); + lb_addr_store(p, x, lb_emit_load(p, value)); + } } else { - // by reference - GB_ASSERT(are_types_identical(e->type, type_deref(value.type))); + if (!is_default_case) { + Type *clause_type = e->type; + GB_ASSERT_MSG(are_types_identical(type_deref(clause_type), type_deref(value.type)), "%s %s", type_to_string(clause_type), type_to_string(value.type)); + } lb_add_entity(p->module, e, value); } } @@ -1495,7 +1629,7 @@ gb_internal void lb_type_case_body(lbProcedure *p, Ast *label, Ast *clause, lbBl lb_push_target_list(p, label, done, nullptr, nullptr); lb_build_stmt_list(p, cc->stmts); - lb_close_scope(p, lbDeferExit_Default, body); + lb_close_scope(p, lbDeferExit_Default, body, clause); lb_pop_target_list(p); lb_emit_jump(p, done); @@ -1529,7 +1663,7 @@ gb_internal void lb_build_type_switch_stmt(lbProcedure *p, AstTypeSwitchStmt *ss union_data = lb_emit_conv(p, parent_ptr, t_rawptr); Type *union_type = type_deref(parent_ptr.type); if (is_type_union_maybe_pointer(union_type)) { - tag = lb_emit_conv(p, lb_emit_comp_against_nil(p, Token_NotEq, union_data), t_int); + tag = lb_emit_conv(p, lb_emit_comp_against_nil(p, Token_NotEq, parent_value), t_int); } else if (union_tag_size(union_type) == 0) { tag = {}; // there is no tag for a zero sized union } else { @@ -1619,10 +1753,17 @@ gb_internal void lb_build_type_switch_stmt(lbProcedure *p, AstTypeSwitchStmt *ss for (Ast *clause : body->stmts) { ast_node(cc, CaseClause, clause); + + Entity *case_entity = implicit_entity_of_node(clause); lb_open_scope(p, cc->scope); + if (cc->list.count == 0) { lb_start_block(p, default_block); - lb_store_type_case_implicit(p, clause, parent_value); + if (case_entity->flags & EntityFlag_Value) { + lb_store_type_case_implicit(p, clause, parent_value, true); + } else { + lb_store_type_case_implicit(p, clause, parent_ptr, true); + } lb_type_case_body(p, ss->label, clause, p->curr_block, done); continue; } @@ -1652,7 +1793,6 @@ gb_internal void lb_build_type_switch_stmt(lbProcedure *p, AstTypeSwitchStmt *ss LLVMAddCase(switch_instr, on_val.value, body->block); } - Entity *case_entity = implicit_entity_of_node(clause); lb_start_block(p, body); @@ -1665,6 +1805,7 @@ gb_internal void lb_build_type_switch_stmt(lbProcedure *p, AstTypeSwitchStmt *ss } else if (switch_kind == TypeSwitch_Any) { data = lb_emit_load(p, lb_emit_struct_ep(p, parent_ptr, 0)); } + GB_ASSERT(is_type_pointer(data.type)); Type *ct = case_entity->type; Type *ct_ptr = alloc_type_pointer(ct); @@ -1688,7 +1829,7 @@ gb_internal void lb_build_type_switch_stmt(lbProcedure *p, AstTypeSwitchStmt *ss lb_add_entity(p->module, case_entity, ptr); lb_add_debug_local_variable(p, ptr.value, case_entity->type, case_entity->token); } else { - lb_store_type_case_implicit(p, clause, parent_value); + lb_store_type_case_implicit(p, clause, parent_value, false); } lb_type_case_body(p, ss->label, clause, body, done); @@ -1696,7 +1837,7 @@ gb_internal void lb_build_type_switch_stmt(lbProcedure *p, AstTypeSwitchStmt *ss lb_emit_jump(p, done); lb_start_block(p, done); - lb_close_scope(p, lbDeferExit_Default, done); + lb_close_scope(p, lbDeferExit_Default, done, ss->body); } @@ -1734,7 +1875,9 @@ gb_internal void lb_build_static_variables(lbProcedure *p, AstValueDecl *vd) { LLVMSetInitializer(global, LLVMConstNull(lb_type(p->module, e->type))); if (value.value != nullptr) { LLVMSetInitializer(global, value.value); - } else { + } + if (e->Variable.is_rodata) { + LLVMSetGlobalConstant(global, true); } if (e->Variable.thread_local_model != "") { LLVMSetThreadLocal(global, true); @@ -1818,7 +1961,7 @@ gb_internal void lb_build_assignment(lbProcedure *p, Array<lbAddr> &lvals, Slice p->in_multi_assignment = prev_in_assignment; } -gb_internal void lb_build_return_stmt_internal(lbProcedure *p, lbValue res) { +gb_internal void lb_build_return_stmt_internal(lbProcedure *p, lbValue res, TokenPos pos) { lbFunctionType *ft = lb_get_function_type(p->module, p->type); bool return_by_pointer = ft->ret.kind == lbArg_Indirect; bool split_returns = ft->multiple_return_original_type != nullptr; @@ -1841,40 +1984,72 @@ gb_internal void lb_build_return_stmt_internal(lbProcedure *p, lbValue res) { LLVMBuildStore(p->builder, LLVMConstNull(p->abi_function_type->ret.type), p->return_ptr.addr.value); } - lb_emit_defer_stmts(p, lbDeferExit_Return, nullptr); + lb_emit_defer_stmts(p, lbDeferExit_Return, nullptr, pos); - LLVMBuildRetVoid(p->builder); + // Check for terminator in the defer stmts + LLVMValueRef instr = LLVMGetLastInstruction(p->curr_block->block); + if (!lb_is_instr_terminating(instr)) { + LLVMBuildRetVoid(p->builder); + } } else { LLVMValueRef ret_val = res.value; - ret_val = OdinLLVMBuildTransmute(p, ret_val, p->abi_function_type->ret.type); - if (p->abi_function_type->ret.cast_type != nullptr) { - ret_val = OdinLLVMBuildTransmute(p, ret_val, p->abi_function_type->ret.cast_type); + LLVMTypeRef ret_type = p->abi_function_type->ret.type; + if (LLVMTypeRef cast_type = p->abi_function_type->ret.cast_type) { + ret_type = cast_type; + } + + if (LLVMGetTypeKind(ret_type) == LLVMStructTypeKind) { + LLVMTypeRef src_type = LLVMTypeOf(ret_val); + + if (p->temp_callee_return_struct_memory == nullptr) { + i64 max_align = gb_max(lb_alignof(ret_type), lb_alignof(src_type)); + p->temp_callee_return_struct_memory = llvm_alloca(p, ret_type, max_align); + } + // reuse the temp return value memory where possible + LLVMValueRef ptr = p->temp_callee_return_struct_memory; + LLVMValueRef nptr = LLVMBuildPointerCast(p->builder, ptr, LLVMPointerType(src_type, 0), ""); + LLVMBuildStore(p->builder, ret_val, nptr); + ret_val = OdinLLVMBuildLoad(p, ret_type, ptr); + } else { + ret_val = OdinLLVMBuildTransmute(p, ret_val, ret_type); } - lb_emit_defer_stmts(p, lbDeferExit_Return, nullptr); - LLVMBuildRet(p->builder, ret_val); + lb_emit_defer_stmts(p, lbDeferExit_Return, nullptr, pos); + + // Check for terminator in the defer stmts + LLVMValueRef instr = LLVMGetLastInstruction(p->curr_block->block); + if (!lb_is_instr_terminating(instr)) { + LLVMBuildRet(p->builder, ret_val); + } } } -gb_internal void lb_build_return_stmt(lbProcedure *p, Slice<Ast *> const &return_results) { +gb_internal void lb_build_return_stmt(lbProcedure *p, Slice<Ast *> const &return_results, TokenPos pos) { lb_ensure_abi_function_type(p->module, p); + isize return_count = p->type->Proc.result_count; + + if (return_count == 0) { + // No return values + + lb_emit_defer_stmts(p, lbDeferExit_Return, nullptr, pos); + + // Check for terminator in the defer stmts + LLVMValueRef instr = LLVMGetLastInstruction(p->curr_block->block); + if (!lb_is_instr_terminating(instr)) { + LLVMBuildRetVoid(p->builder); + } + return; + } + lbValue res = {}; - TypeTuple *tuple = &p->type->Proc.results->Tuple; - isize return_count = p->type->Proc.result_count; + TypeTuple *tuple = &p->type->Proc.results->Tuple; isize res_count = return_results.count; lbFunctionType *ft = lb_get_function_type(p->module, p->type); bool return_by_pointer = ft->ret.kind == lbArg_Indirect; - if (return_count == 0) { - // No return values - - lb_emit_defer_stmts(p, lbDeferExit_Return, nullptr); - - LLVMBuildRetVoid(p->builder); - return; - } else if (return_count == 1) { + if (return_count == 1) { Entity *e = tuple->variables[0]; if (res_count == 0) { rw_mutex_shared_lock(&p->module->values_mutex); @@ -1965,11 +2140,11 @@ gb_internal void lb_build_return_stmt(lbProcedure *p, Slice<Ast *> const &return GB_ASSERT(result_values.count-1 == result_eps.count); lb_addr_store(p, p->return_ptr, result_values[result_values.count-1]); - lb_emit_defer_stmts(p, lbDeferExit_Return, nullptr); + lb_emit_defer_stmts(p, lbDeferExit_Return, nullptr, pos); LLVMBuildRetVoid(p->builder); return; } else { - return lb_build_return_stmt_internal(p, result_values[result_values.count-1]); + return lb_build_return_stmt_internal(p, result_values[result_values.count-1], pos); } } else { @@ -1996,7 +2171,7 @@ gb_internal void lb_build_return_stmt(lbProcedure *p, Slice<Ast *> const &return } if (return_by_pointer) { - lb_emit_defer_stmts(p, lbDeferExit_Return, nullptr); + lb_emit_defer_stmts(p, lbDeferExit_Return, nullptr, pos); LLVMBuildRetVoid(p->builder); return; } @@ -2004,14 +2179,24 @@ gb_internal void lb_build_return_stmt(lbProcedure *p, Slice<Ast *> const &return res = lb_emit_load(p, res); } } - lb_build_return_stmt_internal(p, res); + lb_build_return_stmt_internal(p, res, pos); } gb_internal void lb_build_if_stmt(lbProcedure *p, Ast *node) { ast_node(is, IfStmt, node); lb_open_scope(p, is->scope); // Scope #1 - defer (lb_close_scope(p, lbDeferExit_Default, nullptr)); + defer (lb_close_scope(p, lbDeferExit_Default, nullptr, node)); + lbBlock *then = lb_create_block(p, "if.then"); + lbBlock *done = lb_create_block(p, "if.done"); + lbBlock *else_ = done; + if (is->else_stmt != nullptr) { + else_ = lb_create_block(p, "if.else"); + } + if (is->label != nullptr) { + lbTargetList *tl = lb_push_target_list(p, is->label, done, nullptr, nullptr); + tl->is_block = true; + } if (is->init != nullptr) { lbBlock *init = lb_create_block(p, "if.init"); lb_emit_jump(p, init); @@ -2019,24 +2204,13 @@ gb_internal void lb_build_if_stmt(lbProcedure *p, Ast *node) { lb_build_stmt(p, is->init); } - lbBlock *then = lb_create_block(p, "if.then"); - lbBlock *done = lb_create_block(p, "if.done"); - lbBlock *else_ = done; - if (is->else_stmt != nullptr) { - else_ = lb_create_block(p, "if.else"); - } lbValue cond = lb_build_cond(p, is->cond, then, else_); // Note `cond.value` only set for non-and/or conditions and const negs so that the `LLVMIsConstant()` // and `LLVMConstIntGetZExtValue()` calls below will be valid and `LLVMInstructionEraseFromParent()` // will target the correct (& only) branch statement - if (is->label != nullptr) { - lbTargetList *tl = lb_push_target_list(p, is->label, done, nullptr, nullptr); - tl->is_block = true; - } - - if (cond.value && LLVMIsConstant(cond.value)) { + if (cond.value && LLVMIsAConstantInt(cond.value)) { // NOTE(bill): Do a compile time short circuit for when the condition is constantly known. // This done manually rather than relying on the SSA passes because sometimes the SSA passes // miss some even if they are constantly known, especially with few optimization passes. @@ -2062,7 +2236,7 @@ gb_internal void lb_build_if_stmt(lbProcedure *p, Ast *node) { lb_open_scope(p, scope_of_node(is->else_stmt)); lb_build_stmt(p, is->else_stmt); - lb_close_scope(p, lbDeferExit_Default, nullptr); + lb_close_scope(p, lbDeferExit_Default, nullptr, is->else_stmt); } lb_emit_jump(p, done); @@ -2079,7 +2253,7 @@ gb_internal void lb_build_if_stmt(lbProcedure *p, Ast *node) { lb_open_scope(p, scope_of_node(is->else_stmt)); lb_build_stmt(p, is->else_stmt); - lb_close_scope(p, lbDeferExit_Default, nullptr); + lb_close_scope(p, lbDeferExit_Default, nullptr, is->else_stmt); lb_emit_jump(p, done); } @@ -2099,15 +2273,6 @@ gb_internal void lb_build_for_stmt(lbProcedure *p, Ast *node) { if (p->debug_info != nullptr) { LLVMSetCurrentDebugLocation2(p->builder, lb_debug_location_from_ast(p, node)); } - - if (fs->init != nullptr) { - #if 1 - lbBlock *init = lb_create_block(p, "for.init"); - lb_emit_jump(p, init); - lb_start_block(p, init); - #endif - lb_build_stmt(p, fs->init); - } lbBlock *body = lb_create_block(p, "for.body"); lbBlock *done = lb_create_block(p, "for.done"); // NOTE(bill): Append later lbBlock *loop = body; @@ -2119,6 +2284,17 @@ gb_internal void lb_build_for_stmt(lbProcedure *p, Ast *node) { post = lb_create_block(p, "for.post"); } + lb_push_target_list(p, fs->label, done, post, nullptr); + + if (fs->init != nullptr) { + #if 1 + lbBlock *init = lb_create_block(p, "for.init"); + lb_emit_jump(p, init); + lb_start_block(p, init); + #endif + lb_build_stmt(p, fs->init); + } + lb_emit_jump(p, loop); lb_start_block(p, loop); @@ -2131,7 +2307,6 @@ gb_internal void lb_build_for_stmt(lbProcedure *p, Ast *node) { lb_start_block(p, body); } - lb_push_target_list(p, fs->label, done, post, nullptr); lb_build_stmt(p, fs->body); @@ -2149,7 +2324,7 @@ gb_internal void lb_build_for_stmt(lbProcedure *p, Ast *node) { } lb_start_block(p, done); - lb_close_scope(p, lbDeferExit_Default, nullptr); + lb_close_scope(p, lbDeferExit_Default, nullptr, node); } gb_internal void lb_build_assign_stmt_array(lbProcedure *p, TokenKind op, lbAddr const &lhs, lbValue const &value) { @@ -2415,7 +2590,7 @@ gb_internal void lb_build_stmt(lbProcedure *p, Ast *node) { lb_open_scope(p, bs->scope); lb_build_stmt_list(p, bs->stmts); - lb_close_scope(p, lbDeferExit_Default, nullptr); + lb_close_scope(p, lbDeferExit_Default, nullptr, node); if (done != nullptr) { lb_emit_jump(p, done); @@ -2529,7 +2704,7 @@ gb_internal void lb_build_stmt(lbProcedure *p, Ast *node) { case_end; case_ast_node(rs, ReturnStmt, node); - lb_build_return_stmt(p, rs->results); + lb_build_return_stmt(p, rs->results, ast_token(node).pos); case_end; case_ast_node(is, IfStmt, node); @@ -2582,7 +2757,7 @@ gb_internal void lb_build_stmt(lbProcedure *p, Ast *node) { } } if (block != nullptr) { - lb_emit_defer_stmts(p, lbDeferExit_Branch, block); + lb_emit_defer_stmts(p, lbDeferExit_Branch, block, node); } lb_emit_jump(p, block); lb_start_block(p, lb_create_block(p, "unreachable")); @@ -2622,7 +2797,13 @@ gb_internal void lb_build_defer_stmt(lbProcedure *p, lbDefer const &d) { } } -gb_internal void lb_emit_defer_stmts(lbProcedure *p, lbDeferExitKind kind, lbBlock *block) { +gb_internal void lb_emit_defer_stmts(lbProcedure *p, lbDeferExitKind kind, lbBlock *block, TokenPos pos) { + TokenPos prev_token_pos = p->branch_location_pos; + if (p->uses_branch_location) { + p->branch_location_pos = pos; + } + defer (p->branch_location_pos = prev_token_pos); + isize count = p->defer_stmts.count; isize i = count; while (i --> 0) { @@ -2649,6 +2830,21 @@ gb_internal void lb_emit_defer_stmts(lbProcedure *p, lbDeferExitKind kind, lbBlo } } +gb_internal void lb_emit_defer_stmts(lbProcedure *p, lbDeferExitKind kind, lbBlock *block, Ast *node) { + TokenPos pos = {}; + if (node) { + if (node->kind == Ast_BlockStmt) { + pos = ast_end_token(node).pos; + } else if (node->kind == Ast_CaseClause) { + pos = ast_end_token(node).pos; + } else { + pos = ast_token(node).pos; + } + } + return lb_emit_defer_stmts(p, kind, block, pos); +} + + gb_internal void lb_add_defer_node(lbProcedure *p, isize scope_index, Ast *stmt) { Type *pt = base_type(p->type); GB_ASSERT(pt->kind == Type_Proc); |