aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorgingerBill <bill@gingerbill.org>2021-10-02 15:45:46 +0100
committergingerBill <bill@gingerbill.org>2021-10-02 15:45:46 +0100
commit444fedd8d43b647040993d8cf1fac6a0c690e1a2 (patch)
treed5cdb8301887a245fd16dacd26f802d8c34aef8d /src
parentf7137bf3676d4d853556796cb7cb4a14f0d4fb35 (diff)
Heavily improve the LLVM struct type generation to improve ABI
Diffstat (limited to 'src')
-rw-r--r--src/llvm_backend.hpp100
-rw-r--r--src/llvm_backend_const.cpp24
-rw-r--r--src/llvm_backend_general.cpp115
-rw-r--r--src/llvm_backend_utility.cpp47
4 files changed, 150 insertions, 136 deletions
diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp
index d0ba29964..3ae4e24b2 100644
--- a/src/llvm_backend.hpp
+++ b/src/llvm_backend.hpp
@@ -91,6 +91,8 @@ struct lbIncompleteDebugType {
LLVMMetadataRef metadata;
};
+typedef Slice<i32> lbStructFieldRemapping;
+
struct lbModule {
LLVMModuleRef mod;
LLVMContextRef ctx;
@@ -100,8 +102,9 @@ struct lbModule {
CheckerInfo *info;
AstPackage *pkg; // associated
- Map<LLVMTypeRef> types; // Key: Type *
- Map<Type *> llvm_types; // Key: LLVMTypeRef
+ Map<LLVMTypeRef> types; // Key: Type *
+ Map<lbStructFieldRemapping> struct_field_remapping; // Key: LLVMTypeRef or Type *
+ Map<Type *> llvm_types; // Key: LLVMTypeRef
i32 internal_type_level;
Map<lbValue> values; // Key: Entity *
@@ -447,7 +450,10 @@ lbCopyElisionHint lb_set_copy_elision_hint(lbProcedure *p, lbAddr const &addr, A
void lb_reset_copy_elision_hint(lbProcedure *p, lbCopyElisionHint prev_hint);
lbValue lb_consume_copy_elision_hint(lbProcedure *p);
+
bool lb_struct_has_padding_prefix(Type *t);
+lbStructFieldRemapping lb_get_struct_remapping(lbModule *m, Type *t);
+LLVMTypeRef lb_type_padding_filler(lbModule *m, i64 padding, i64 padding_align);
#define LB_STARTUP_RUNTIME_PROC_NAME "__$startup_runtime"
#define LB_STARTUP_TYPE_INFO_PROC_NAME "__$startup_type_info"
@@ -461,51 +467,51 @@ bool lb_struct_has_padding_prefix(Type *t);
enum lbCallingConventionKind {
- lbCallingConvention_C = 0,
- lbCallingConvention_Fast = 8,
- lbCallingConvention_Cold = 9,
- lbCallingConvention_GHC = 10,
- lbCallingConvention_HiPE = 11,
- lbCallingConvention_WebKit_JS = 12,
- lbCallingConvention_AnyReg = 13,
- lbCallingConvention_PreserveMost = 14,
- lbCallingConvention_PreserveAll = 15,
- lbCallingConvention_Swift = 16,
- lbCallingConvention_CXX_FAST_TLS = 17,
- lbCallingConvention_FirstTargetCC = 64,
- lbCallingConvention_X86_StdCall = 64,
- lbCallingConvention_X86_FastCall = 65,
- lbCallingConvention_ARM_APCS = 66,
- lbCallingConvention_ARM_AAPCS = 67,
- lbCallingConvention_ARM_AAPCS_VFP = 68,
- lbCallingConvention_MSP430_INTR = 69,
- lbCallingConvention_X86_ThisCall = 70,
- lbCallingConvention_PTX_Kernel = 71,
- lbCallingConvention_PTX_Device = 72,
- lbCallingConvention_SPIR_FUNC = 75,
- lbCallingConvention_SPIR_KERNEL = 76,
- lbCallingConvention_Intel_OCL_BI = 77,
- lbCallingConvention_X86_64_SysV = 78,
- lbCallingConvention_Win64 = 79,
- lbCallingConvention_X86_VectorCall = 80,
- lbCallingConvention_HHVM = 81,
- lbCallingConvention_HHVM_C = 82,
- lbCallingConvention_X86_INTR = 83,
- lbCallingConvention_AVR_INTR = 84,
- lbCallingConvention_AVR_SIGNAL = 85,
- lbCallingConvention_AVR_BUILTIN = 86,
- lbCallingConvention_AMDGPU_VS = 87,
- lbCallingConvention_AMDGPU_GS = 88,
- lbCallingConvention_AMDGPU_PS = 89,
- lbCallingConvention_AMDGPU_CS = 90,
- lbCallingConvention_AMDGPU_KERNEL = 91,
- lbCallingConvention_X86_RegCall = 92,
- lbCallingConvention_AMDGPU_HS = 93,
- lbCallingConvention_MSP430_BUILTIN = 94,
- lbCallingConvention_AMDGPU_LS = 95,
- lbCallingConvention_AMDGPU_ES = 96,
- lbCallingConvention_AArch64_VectorCall = 97,
- lbCallingConvention_MaxID = 1023,
+ lbCallingConvention_C = 0,
+ lbCallingConvention_Fast = 8,
+ lbCallingConvention_Cold = 9,
+ lbCallingConvention_GHC = 10,
+ lbCallingConvention_HiPE = 11,
+ lbCallingConvention_WebKit_JS = 12,
+ lbCallingConvention_AnyReg = 13,
+ lbCallingConvention_PreserveMost = 14,
+ lbCallingConvention_PreserveAll = 15,
+ lbCallingConvention_Swift = 16,
+ lbCallingConvention_CXX_FAST_TLS = 17,
+ lbCallingConvention_FirstTargetCC = 64,
+ lbCallingConvention_X86_StdCall = 64,
+ lbCallingConvention_X86_FastCall = 65,
+ lbCallingConvention_ARM_APCS = 66,
+ lbCallingConvention_ARM_AAPCS = 67,
+ lbCallingConvention_ARM_AAPCS_VFP = 68,
+ lbCallingConvention_MSP430_INTR = 69,
+ lbCallingConvention_X86_ThisCall = 70,
+ lbCallingConvention_PTX_Kernel = 71,
+ lbCallingConvention_PTX_Device = 72,
+ lbCallingConvention_SPIR_FUNC = 75,
+ lbCallingConvention_SPIR_KERNEL = 76,
+ lbCallingConvention_Intel_OCL_BI = 77,
+ lbCallingConvention_X86_64_SysV = 78,
+ lbCallingConvention_Win64 = 79,
+ lbCallingConvention_X86_VectorCall = 80,
+ lbCallingConvention_HHVM = 81,
+ lbCallingConvention_HHVM_C = 82,
+ lbCallingConvention_X86_INTR = 83,
+ lbCallingConvention_AVR_INTR = 84,
+ lbCallingConvention_AVR_SIGNAL = 85,
+ lbCallingConvention_AVR_BUILTIN = 86,
+ lbCallingConvention_AMDGPU_VS = 87,
+ lbCallingConvention_AMDGPU_GS = 88,
+ lbCallingConvention_AMDGPU_PS = 89,
+ lbCallingConvention_AMDGPU_CS = 90,
+ lbCallingConvention_AMDGPU_KERNEL = 91,
+ lbCallingConvention_X86_RegCall = 92,
+ lbCallingConvention_AMDGPU_HS = 93,
+ lbCallingConvention_MSP430_BUILTIN = 94,
+ lbCallingConvention_AMDGPU_LS = 95,
+ lbCallingConvention_AMDGPU_ES = 96,
+ lbCallingConvention_AArch64_VectorCall = 97,
+ lbCallingConvention_MaxID = 1023,
};
lbCallingConventionKind const lb_calling_convention_map[ProcCC_MAX] = {
diff --git a/src/llvm_backend_const.cpp b/src/llvm_backend_const.cpp
index cb9369c72..2797f7317 100644
--- a/src/llvm_backend_const.cpp
+++ b/src/llvm_backend_const.cpp
@@ -139,15 +139,12 @@ LLVMValueRef llvm_const_named_struct(lbModule *m, Type *t, LLVMValueRef *values,
GB_ASSERT(value_count_ == bt->Struct.fields.count);
- unsigned field_offset = 0;
- if (lb_struct_has_padding_prefix(bt)) {
- field_offset = 1;
- }
-
- unsigned values_with_padding_count = field_offset + cast(unsigned)(bt->Struct.fields.count*2 + 1);
+ auto field_remapping = lb_get_struct_remapping(m, t);
+ unsigned values_with_padding_count = LLVMCountStructElementTypes(struct_type);
+
LLVMValueRef *values_with_padding = gb_alloc_array(permanent_allocator(), LLVMValueRef, values_with_padding_count);
for (unsigned i = 0; i < value_count; i++) {
- values_with_padding[field_offset + i*2 + 1] = values[i];
+ values_with_padding[field_remapping[i]] = values[i];
}
for (unsigned i = 0; i < values_with_padding_count; i++) {
if (values_with_padding[i] == nullptr) {
@@ -832,16 +829,11 @@ lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bool allow_loc
if (is_type_raw_union(type)) {
return lb_const_nil(m, original_type);
}
-
- isize offset = 0;
- if (lb_struct_has_padding_prefix(type)) {
- offset = 1;
- }
LLVMTypeRef struct_type = lb_type(m, original_type);
- unsigned value_count = cast(unsigned)(offset + type->Struct.fields.count*2 + 1);
- GB_ASSERT(LLVMCountStructElementTypes(struct_type) == value_count);
+ auto field_remapping = lb_get_struct_remapping(m, type);
+ unsigned value_count = LLVMCountStructElementTypes(struct_type);
LLVMValueRef *values = gb_alloc_array(temporary_allocator(), LLVMValueRef, value_count);
bool *visited = gb_alloc_array(temporary_allocator(), bool, value_count);
@@ -859,7 +851,7 @@ lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bool allow_loc
Selection sel = lookup_field(type, name, false);
Entity *f = type->Struct.fields[sel.index[0]];
- isize index = offset + f->Variable.field_index*2 + 1;
+ i32 index = field_remapping[f->Variable.field_index];
if (elem_type_can_be_constant(f->type)) {
values[index] = lb_const_value(m, f->type, tav.value, allow_local).value;
visited[index] = true;
@@ -874,7 +866,7 @@ lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bool allow_loc
val = tav.value;
}
- isize index = offset + f->Variable.field_index*2 + 1;
+ i32 index = field_remapping[f->Variable.field_index];
if (elem_type_can_be_constant(f->type)) {
values[index] = lb_const_value(m, f->type, val, allow_local).value;
visited[index] = true;
diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp
index 8366e0b82..01699c978 100644
--- a/src/llvm_backend_general.cpp
+++ b/src/llvm_backend_general.cpp
@@ -56,6 +56,7 @@ void lb_init_module(lbModule *m, Checker *c) {
gbAllocator a = heap_allocator();
map_init(&m->types, a);
+ map_init(&m->struct_field_remapping, a);
map_init(&m->llvm_types, a);
map_init(&m->values, a);
map_init(&m->soa_values, a);
@@ -1109,10 +1110,10 @@ lbValue lb_emit_union_tag_ptr(lbProcedure *p, lbValue u) {
LLVMTypeRef uvt = LLVMGetElementType(LLVMTypeOf(u.value));
unsigned element_count = LLVMCountStructElementTypes(uvt);
- GB_ASSERT_MSG(element_count == 3, "element_count=%u (%s) != (%s)", element_count, type_to_string(ut), LLVMPrintTypeToString(uvt));
+ GB_ASSERT_MSG(element_count == 2, "element_count=%u (%s) != (%s)", element_count, type_to_string(ut), LLVMPrintTypeToString(uvt));
lbValue tag_ptr = {};
- tag_ptr.value = LLVMBuildStructGEP(p->builder, u.value, 2, "");
+ tag_ptr.value = LLVMBuildStructGEP(p->builder, u.value, 1, "");
tag_ptr.type = alloc_type_pointer(tag_type);
return tag_ptr;
}
@@ -1646,22 +1647,25 @@ LLVMTypeRef lb_type_internal(lbModule *m, Type *type) {
GB_ASSERT(field_count == 2);
LLVMTypeRef *fields = gb_alloc_array(temporary_allocator(), LLVMTypeRef, field_count);
- LLVMTypeRef padding_type = LLVMArrayType(lb_type(m, t_uintptr), 0);
LLVMTypeRef entries_fields[] = {
- padding_type,
lb_type(m, t_rawptr), // data
- padding_type,
lb_type(m, t_int), // len
- padding_type,
lb_type(m, t_int), // cap
- padding_type,
lb_type(m, t_allocator), // allocator
- padding_type,
};
fields[0] = lb_type(m, internal_type->Struct.fields[0]->type);
fields[1] = LLVMStructTypeInContext(ctx, entries_fields, gb_count_of(entries_fields), false);
-
+
+ { // Add this to simplify things
+ lbStructFieldRemapping entries_field_remapping = {};
+ slice_init(&entries_field_remapping, permanent_allocator(), gb_count_of(entries_fields));
+ for_array(i, entries_field_remapping) {
+ entries_field_remapping[i] = cast(i32)i;
+ }
+ map_set(&m->struct_field_remapping, hash_pointer(fields[1]), entries_field_remapping);
+ }
+
return LLVMStructTypeInContext(ctx, fields, field_count, false);
}
@@ -1678,23 +1682,20 @@ LLVMTypeRef lb_type_internal(lbModule *m, Type *type) {
fields[1] = LLVMArrayType(lb_type(m, t_u8), size_of_union);
return LLVMStructTypeInContext(ctx, fields, field_count, false);
}
-
- isize offset = 0;
- if (lb_struct_has_padding_prefix(type)) {
- offset = 1;
- }
+
+ lbStructFieldRemapping field_remapping = {};
+ slice_init(&field_remapping, permanent_allocator(), type->Struct.fields.count);
m->internal_type_level += 1;
defer (m->internal_type_level -= 1);
- unsigned field_count = cast(unsigned)(offset + type->Struct.fields.count*2 + 1);
- LLVMTypeRef *fields = gb_alloc_array(temporary_allocator(), LLVMTypeRef, field_count);
-
- LLVMTypeRef type_u8 = lb_type(m, t_u8);
- LLVMTypeRef type_u16 = lb_type(m, t_u16);
- LLVMTypeRef type_u32 = lb_type(m, t_u32);
- LLVMTypeRef type_u64 = lb_type(m, t_u64);
+ auto fields = array_make<LLVMTypeRef>(temporary_allocator(), 0, type->Struct.fields.count*2 + 2);
+ if (lb_struct_has_padding_prefix(type)) {
+ LLVMTypeRef padding_offset = lb_alignment_prefix_type_hack(m, type->Struct.custom_align);
+ array_add(&fields, padding_offset);
+ }
+
i64 padding_offset = 0;
for_array(i, type->Struct.fields) {
GB_ASSERT(type->Struct.offsets != nullptr);
@@ -1702,27 +1703,14 @@ LLVMTypeRef lb_type_internal(lbModule *m, Type *type) {
Entity *field = type->Struct.fields[i];
i64 padding = type->Struct.offsets[i] - padding_offset;
- LLVMTypeRef padding_type = nullptr;
- if (padding_offset == 0) {
- padding_type = lb_alignment_prefix_type_hack(m, type_align_of(type));
- } else {
- i64 alignment = type_align_of(field->type);
- // NOTE(bill): limit to `[N x u64]` to prevent ABI issues
- alignment = gb_min(alignment, 8);
- if (padding % alignment == 0) {
- isize len = padding/alignment;
- switch (alignment) {
- case 1: padding_type = LLVMArrayType(type_u8, cast(unsigned)len); break;
- case 2: padding_type = LLVMArrayType(type_u16, cast(unsigned)len); break;
- case 4: padding_type = LLVMArrayType(type_u32, cast(unsigned)len); break;
- case 8: padding_type = LLVMArrayType(type_u64, cast(unsigned)len); break;
- }
- } else {
- padding_type = LLVMArrayType(type_u8, cast(unsigned)padding);
- }
+ if (padding != 0) {
+ LLVMTypeRef padding_type = lb_type_padding_filler(m, padding, type_align_of(field->type));
+ array_add(&fields, padding_type);
}
- fields[offset + i*2 + 0] = padding_type;
- fields[offset + i*2 + 1] = lb_type(m, field->type);
+
+ field_remapping[i] = cast(i32)fields.count;
+ array_add(&fields, lb_type(m, field->type));
+
if (!type->Struct.is_packed) {
padding_offset = align_formula(padding_offset, type_align_of(field->type));
}
@@ -1730,17 +1718,18 @@ LLVMTypeRef lb_type_internal(lbModule *m, Type *type) {
}
i64 end_padding = type_size_of(type)-padding_offset;
- fields[field_count-1] = LLVMArrayType(type_u8, cast(unsigned)end_padding);
-
- if (offset != 0) {
- GB_ASSERT(offset == 1);
- fields[0] = lb_alignment_prefix_type_hack(m, type->Struct.custom_align);
+ if (end_padding > 0) {
+ array_add(&fields, lb_type_padding_filler(m, end_padding, 1));
}
- for (unsigned i = 0; i < field_count; i++) {
+
+ for_array(i, fields) {
GB_ASSERT(fields[i] != nullptr);
}
-
- return LLVMStructTypeInContext(ctx, fields, field_count, type->Struct.is_packed);
+
+ LLVMTypeRef struct_type = LLVMStructTypeInContext(ctx, fields.data, cast(unsigned)fields.count, type->Struct.is_packed);
+ map_set(&m->struct_field_remapping, hash_pointer(struct_type), field_remapping);
+ map_set(&m->struct_field_remapping, hash_pointer(type), field_remapping);
+ return struct_type;
}
break;
@@ -1755,29 +1744,25 @@ LLVMTypeRef lb_type_internal(lbModule *m, Type *type) {
gb_unused(size);
if (is_type_union_maybe_pointer_original_alignment(type)) {
- LLVMTypeRef fields[1] = {lb_type(m, type->Union.variants[0])};
- return LLVMStructTypeInContext(ctx, fields, 1, false);
+ LLVMTypeRef fields[] = {lb_type(m, type->Union.variants[0])};
+ return LLVMStructTypeInContext(ctx, fields, gb_count_of(fields), false);
}
unsigned block_size = cast(unsigned)type->Union.variant_block_size;
- LLVMTypeRef fields[3] = {};
- unsigned field_count = 1;
- fields[0] = lb_alignment_prefix_type_hack(m, align);
+ auto fields = array_make<LLVMTypeRef>(temporary_allocator(), 0, 2);
if (is_type_union_maybe_pointer(type)) {
- field_count += 1;
- fields[1] = lb_type(m, type->Union.variants[0]);
+ LLVMTypeRef variant = lb_type(m, type->Union.variants[0]);
+ array_add(&fields, variant);
} else {
- field_count += 2;
- if (block_size == align) {
- fields[1] = LLVMIntTypeInContext(m->ctx, 8*block_size);
- } else {
- fields[1] = LLVMArrayType(lb_type(m, t_u8), block_size);
- }
- fields[2] = lb_type(m, union_tag_type(type));
+ LLVMTypeRef block_type = lb_type_padding_filler(m, block_size, align);
+ LLVMTypeRef tag_type = lb_type(m, union_tag_type(type));
+ array_add(&fields, block_type);
+ array_add(&fields, tag_type);
+
}
-
- return LLVMStructTypeInContext(ctx, fields, field_count, false);
+
+ return LLVMStructTypeInContext(ctx, fields.data, cast(unsigned)fields.count, false);
}
break;
diff --git a/src/llvm_backend_utility.cpp b/src/llvm_backend_utility.cpp
index d1613a354..470347e82 100644
--- a/src/llvm_backend_utility.cpp
+++ b/src/llvm_backend_utility.cpp
@@ -807,25 +807,56 @@ lbValue lb_address_from_load(lbProcedure *p, lbValue value) {
return {};
}
+
bool lb_struct_has_padding_prefix(Type *t) {
Type *bt = base_type(t);
GB_ASSERT(bt->kind == Type_Struct);
return bt->Struct.custom_align != 0 && bt->Struct.fields.count == 0;
}
+lbStructFieldRemapping lb_get_struct_remapping(lbModule *m, Type *t) {
+ t = base_type(t);
+ LLVMTypeRef struct_type = lb_type(m, t);
+ auto *field_remapping = map_get(&m->struct_field_remapping, hash_pointer(struct_type));
+ if (field_remapping == nullptr) {
+ field_remapping = map_get(&m->struct_field_remapping, hash_pointer(t));
+ }
+ GB_ASSERT(field_remapping != nullptr);
+ return *field_remapping;
+}
i32 lb_convert_struct_index(lbModule *m, Type *t, i32 index) {
if (t->kind == Type_Struct) {
- index = index*2 + 1;
- if (lb_struct_has_padding_prefix(t)) {
- index += 1;
+ auto field_remapping = lb_get_struct_remapping(m, t);
+ index = field_remapping[index];
+ }
+ return index;
+}
+
+LLVMTypeRef lb_type_padding_filler(lbModule *m, i64 padding, i64 padding_align) {
+ // NOTE(bill): limit to `[N x u64]` to prevent ABI issues
+ padding_align = gb_clamp(padding_align, 1, 8);
+ if (padding % padding_align == 0) {
+ LLVMTypeRef elem = nullptr;
+ isize len = padding/padding_align;
+ switch (padding_align) {
+ case 1: elem = lb_type(m, t_u8); break;
+ case 2: elem = lb_type(m, t_u16); break;
+ case 4: elem = lb_type(m, t_u32); break;
+ case 8: elem = lb_type(m, t_u64); break;
}
- unsigned count = LLVMCountStructElementTypes(lb_type(m, t));
- GB_ASSERT(count >= cast(unsigned)index);
+ GB_ASSERT_MSG(elem != nullptr, "Invalid lb_type_padding_filler padding and padding_align: %lld", padding_align);
+ if (len != 1) {
+ return LLVMArrayType(elem, cast(unsigned)len);
+ } else {
+ return elem;
+ }
+ } else {
+ return LLVMArrayType(lb_type(m, t_u8), cast(unsigned)padding);
}
- return index;
}
+
char const *llvm_type_kinds[] = {
"LLVMVoidTypeKind",
"LLVMHalfTypeKind",
@@ -912,7 +943,6 @@ lbValue lb_emit_struct_ep(lbProcedure *p, lbValue s, i32 index) {
case 0: result_type = get_struct_field_type(gst, 0); break;
case 1: result_type = get_struct_field_type(gst, 1); break;
}
- index = index*2 + 1;
} else if (is_type_array(t)) {
return lb_emit_array_epi(p, s, index);
} else if (is_type_relative_slice(t)) {
@@ -926,6 +956,7 @@ lbValue lb_emit_struct_ep(lbProcedure *p, lbValue s, i32 index) {
GB_ASSERT_MSG(result_type != nullptr, "%s %d", type_to_string(t), index);
+ i32 original_index = index;
index = lb_convert_struct_index(p->module, t, index);
if (lb_is_const(s)) {
@@ -943,7 +974,7 @@ lbValue lb_emit_struct_ep(lbProcedure *p, lbValue s, i32 index) {
// gb_printf_err("%d\n", index);
GB_ASSERT_MSG(LLVMGetTypeKind(st) == LLVMStructTypeKind, "%s", llvm_type_kinds[LLVMGetTypeKind(st)]);
unsigned count = LLVMCountStructElementTypes(st);
- GB_ASSERT(count >= cast(unsigned)index);
+ GB_ASSERT_MSG(count >= cast(unsigned)index, "%u %d %d", count, index, original_index);
res.value = LLVMBuildStructGEP(p->builder, s.value, cast(unsigned)index, "");
res.type = alloc_type_pointer(result_type);