aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorLaytan Laats <laytanlaats@hotmail.com>2025-05-09 22:27:35 +0200
committerLaytan <laytanlaats@hotmail.com>2025-05-09 23:03:17 +0200
commitf9b9e9e7dcbb605bc64bc5af1331855375f58494 (patch)
tree551e8f0d33da0696c67e50e03c271df787e2ab8a /src
parent4d08b7637279c49cb56106a1247e7dfc09265fb2 (diff)
some ABI fixups and improvements
Started with trying to enable asan in the CI for MacOS, noticed it wasn't enabled on the `tests/internal` folder, it came up with a couple of issues with the abi/OdinLLVMBuildTransmute that this also solves. - Looking at clang output for arm64, we should be promoting `{ i64, i32 }` to `{ i64, i64 }` - after doing the previous point, I noticed this is not handled well in OdinLLVMBuildTransmute which was emitting loads and stores into the space of a value that was alignment, asan does not want this, looking at clang output again, a memcpy is the appropriate way of handling this. - Having done this we don't need the hacky "return is packed" set anymore in the amd64 sysv ABI anymore either
Diffstat (limited to 'src')
-rw-r--r--src/llvm_abi.cpp42
-rw-r--r--src/llvm_backend_general.cpp58
2 files changed, 69 insertions, 31 deletions
diff --git a/src/llvm_abi.cpp b/src/llvm_abi.cpp
index c8e1ca764..baad3f873 100644
--- a/src/llvm_abi.cpp
+++ b/src/llvm_abi.cpp
@@ -977,7 +977,7 @@ namespace lbAbiAmd64SysV {
return types[0];
}
- return LLVMStructTypeInContext(c, types.data, cast(unsigned)types.count, sz == 0);
+ return LLVMStructTypeInContext(c, types.data, cast(unsigned)types.count, false);
}
gb_internal void classify_with(LLVMTypeRef t, Array<RegClass> *cls, i64 ix, i64 off) {
@@ -1231,38 +1231,24 @@ namespace lbAbiArm64 {
}
} else {
i64 size = lb_sizeof(return_type);
- if (size <= 16) {
- LLVMTypeRef cast_type = nullptr;
-
- if (size == 0) {
- cast_type = LLVMStructTypeInContext(c, nullptr, 0, false);
- } else if (size <= 8) {
- cast_type = LLVMIntTypeInContext(c, cast(unsigned)(size*8));
- } else {
- unsigned count = cast(unsigned)((size+7)/8);
-
- LLVMTypeRef llvm_i64 = LLVMIntTypeInContext(c, 64);
- LLVMTypeRef *types = gb_alloc_array(temporary_allocator(), LLVMTypeRef, count);
-
- i64 size_copy = size;
- for (unsigned i = 0; i < count; i++) {
- if (size_copy >= 8) {
- types[i] = llvm_i64;
- } else {
- types[i] = LLVMIntTypeInContext(c, 8*cast(unsigned)size_copy);
- }
- size_copy -= 8;
- }
- GB_ASSERT(size_copy <= 0);
- cast_type = LLVMStructTypeInContext(c, types, count, true);
- }
- return lb_arg_type_direct(return_type, cast_type, nullptr, nullptr);
- } else {
+ if (size > 16) {
LB_ABI_MODIFY_RETURN_IF_TUPLE_MACRO();
LLVMAttributeRef attr = lb_create_enum_attribute_with_type(c, "sret", return_type);
return lb_arg_type_indirect(return_type, attr);
}
+
+ GB_ASSERT(size <= 16);
+ LLVMTypeRef cast_type = nullptr;
+ if (size == 0) {
+ cast_type = LLVMStructTypeInContext(c, nullptr, 0, false);
+ } else if (size <= 8) {
+ cast_type = LLVMIntTypeInContext(c, cast(unsigned)(size*8));
+ } else {
+ LLVMTypeRef llvm_i64 = LLVMIntTypeInContext(c, 64);
+ cast_type = LLVMArrayType2(llvm_i64, 2);
+ }
+ return lb_arg_type_direct(return_type, cast_type, nullptr, nullptr);
}
}
diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp
index c52551b36..504c8234e 100644
--- a/src/llvm_backend_general.cpp
+++ b/src/llvm_backend_general.cpp
@@ -2525,10 +2525,13 @@ general_end:;
}
}
- src_size = align_formula(src_size, src_align);
- dst_size = align_formula(dst_size, dst_align);
+ // NOTE(laytan): even though this logic seems sound, the Address Sanitizer does not
+ // want you to load/store the space of a value that is there for alignment.
+#if 0
+ i64 aligned_src_size = align_formula(src_size, src_align);
+ i64 aligned_dst_size = align_formula(dst_size, dst_align);
- if (LLVMIsALoadInst(val) && (src_size >= dst_size && src_align >= dst_align)) {
+ if (LLVMIsALoadInst(val) && (aligned_src_size >= aligned_dst_size && src_align >= dst_align)) {
LLVMValueRef val_ptr = LLVMGetOperand(val, 0);
val_ptr = LLVMBuildPointerCast(p->builder, val_ptr, LLVMPointerType(dst_type, 0), "");
LLVMValueRef loaded_val = OdinLLVMBuildLoad(p, dst_type, val_ptr);
@@ -2536,8 +2539,57 @@ general_end:;
// LLVMSetAlignment(loaded_val, gb_min(src_align, dst_align));
return loaded_val;
+ }
+#endif
+
+ if (src_size > dst_size) {
+ GB_ASSERT(p->decl_block != p->curr_block);
+ // NOTE(laytan): src is bigger than dst, need to memcpy the part of src we want.
+
+ LLVMValueRef val_ptr;
+ if (LLVMIsALoadInst(val)) {
+ val_ptr = LLVMGetOperand(val, 0);
+ } else if (LLVMIsAAllocaInst(val)) {
+ val_ptr = LLVMBuildPointerCast(p->builder, val, LLVMPointerType(src_type, 0), "");
+ } else {
+ // NOTE(laytan): we need a pointer to memcpy from.
+ LLVMValueRef val_copy = llvm_alloca(p, src_type, src_align);
+ val_ptr = LLVMBuildPointerCast(p->builder, val_copy, LLVMPointerType(src_type, 0), "");
+ LLVMBuildStore(p->builder, val, val_ptr);
+ }
+
+ i64 max_align = gb_max(lb_alignof(src_type), lb_alignof(dst_type));
+ max_align = gb_max(max_align, 16);
+
+ LLVMValueRef ptr = llvm_alloca(p, dst_type, max_align);
+ LLVMValueRef nptr = LLVMBuildPointerCast(p->builder, ptr, LLVMPointerType(dst_type, 0), "");
+
+ LLVMTypeRef types[3] = {
+ lb_type(p->module, t_rawptr),
+ lb_type(p->module, t_rawptr),
+ lb_type(p->module, t_int)
+ };
+
+ LLVMValueRef args[4] = {
+ nptr,
+ val_ptr,
+ LLVMConstInt(LLVMIntTypeInContext(p->module->ctx, 8*cast(unsigned)build_context.int_size), dst_size, 0),
+ LLVMConstInt(LLVMInt1TypeInContext(p->module->ctx), 0, 0),
+ };
+
+ lb_call_intrinsic(
+ p,
+ "llvm.memcpy.inline",
+ args,
+ gb_count_of(args),
+ types,
+ gb_count_of(types)
+ );
+
+ return OdinLLVMBuildLoad(p, dst_type, ptr);
} else {
GB_ASSERT(p->decl_block != p->curr_block);
+ GB_ASSERT(dst_size >= src_size);
i64 max_align = gb_max(lb_alignof(src_type), lb_alignof(dst_type));
max_align = gb_max(max_align, 16);