diff options
Diffstat (limited to 'src/llvm_backend.cpp')
| -rw-r--r-- | src/llvm_backend.cpp | 157 |
1 files changed, 82 insertions, 75 deletions
diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index 276abc2d4..003424e0a 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -21,6 +21,25 @@ #include "llvm_backend_stmt.cpp" #include "llvm_backend_proc.cpp" +String get_default_microarchitecture() { + String default_march = str_lit("generic"); + if (build_context.metrics.arch == TargetArch_amd64) { + // NOTE(bill): x86-64-v2 is more than enough for everyone + // + // x86-64: CMOV, CMPXCHG8B, FPU, FXSR, MMX, FXSR, SCE, SSE, SSE2 + // x86-64-v2: (close to Nehalem) CMPXCHG16B, LAHF-SAHF, POPCNT, SSE3, SSE4.1, SSE4.2, SSSE3 + // x86-64-v3: (close to Haswell) AVX, AVX2, BMI1, BMI2, F16C, FMA, LZCNT, MOVBE, XSAVE + // x86-64-v4: AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL + if (ODIN_LLVM_MINIMUM_VERSION_12) { + if (build_context.metrics.os == TargetOs_freestanding) { + default_march = str_lit("x86-64"); + } else { + default_march = str_lit("x86-64-v2"); + } + } + } + return default_march; +} gb_internal void lb_add_foreign_library_path(lbModule *m, Entity *e) { if (e == nullptr) { @@ -1478,8 +1497,6 @@ gb_internal WORKER_TASK_PROC(lb_llvm_module_pass_worker_proc) { auto passes = array_make<char const *>(heap_allocator(), 0, 64); defer (array_free(&passes)); - - LLVMPassBuilderOptionsRef pb_options = LLVMCreatePassBuilderOptions(); defer (LLVMDisposePassBuilderOptions(pb_options)); @@ -2486,40 +2503,72 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { LLVMCodeModel code_mode = LLVMCodeModelDefault; if (is_arch_wasm()) { code_mode = LLVMCodeModelJITDefault; - } else if (build_context.metrics.os == TargetOs_freestanding) { + } else if (is_arch_x86() && build_context.metrics.os == TargetOs_freestanding) { code_mode = LLVMCodeModelKernel; } - char const *host_cpu_name = LLVMGetHostCPUName(); - char const *llvm_cpu = "generic"; + String host_cpu_name = copy_string(permanent_allocator(), make_string_c(LLVMGetHostCPUName())); + String llvm_cpu = get_default_microarchitecture(); char const *llvm_features = ""; if (build_context.microarch.len != 0) { if (build_context.microarch == "native") { llvm_cpu = host_cpu_name; } else { - llvm_cpu = alloc_cstring(permanent_allocator(), build_context.microarch); + llvm_cpu = copy_string(permanent_allocator(), build_context.microarch); } - if (gb_strcmp(llvm_cpu, host_cpu_name) == 0) { + if (llvm_cpu == host_cpu_name) { llvm_features = LLVMGetHostCPUFeatures(); } - } else if (build_context.metrics.arch == TargetArch_amd64) { - // NOTE(bill): x86-64-v2 is more than enough for everyone - // - // x86-64: CMOV, CMPXCHG8B, FPU, FXSR, MMX, FXSR, SCE, SSE, SSE2 - // x86-64-v2: (close to Nehalem) CMPXCHG16B, LAHF-SAHF, POPCNT, SSE3, SSE4.1, SSE4.2, SSSE3 - // x86-64-v3: (close to Haswell) AVX, AVX2, BMI1, BMI2, F16C, FMA, LZCNT, MOVBE, XSAVE - // x86-64-v4: AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL - if (ODIN_LLVM_MINIMUM_VERSION_12) { - if (build_context.metrics.os == TargetOs_freestanding) { - llvm_cpu = "x86-64"; - } else { - llvm_cpu = "x86-64-v2"; - } - } } + // NOTE(Jeroen): Uncomment to get the list of supported microarchitectures. + /* + if (build_context.microarch == "?") { + string_set_add(&build_context.target_features_set, str_lit("+cpuhelp")); + } + */ + if (build_context.target_features_set.entries.count != 0) { - llvm_features = target_features_set_to_cstring(permanent_allocator(), false); + // Prefix all of the features with a `+`, because we are + // enabling additional features. + char const *additional_features = target_features_set_to_cstring(permanent_allocator(), false, true); + + String f_string = make_string_c(llvm_features); + String a_string = make_string_c(additional_features); + isize f_len = f_string.len; + + if (f_len == 0) { + // The common case is that llvm_features is empty, so + // the target_features_set additions can be used as is. + llvm_features = additional_features; + } else { + // The user probably specified `-microarch:native`, so + // llvm_features is populated by LLVM's idea of what + // the host CPU supports. + // + // As far as I can tell, (which is barely better than + // wild guessing), a bitset is formed by parsing the + // string left to right. + // + // So, llvm_features + ',' + additonal_features, will + // makes the target_features_set override llvm_features. + + char *tmp = gb_alloc_array(permanent_allocator(), char, f_len + 1 + a_string.len + 1); + isize len = 0; + + // tmp = f_string + gb_memmove(tmp, f_string.text, f_string.len); + len += f_string.len; + // tmp += ',' + tmp[len++] = ','; + // tmp += a_string + gb_memmove(tmp + len, a_string.text, a_string.len); + len += a_string.len; + // tmp += NUL + tmp[len++] = 0; + + llvm_features = tmp; + } } // GB_ASSERT_MSG(LLVMTargetHasAsmBackend(target)); @@ -2566,7 +2615,7 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { for (auto const &entry : gen->modules) { LLVMTargetMachineRef target_machine = LLVMCreateTargetMachine( - target, target_triple, llvm_cpu, + target, target_triple, (const char *)llvm_cpu.text, llvm_features, code_gen_level, reloc_mode, @@ -2685,64 +2734,22 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { } } - { - char const *name = LB_TYPE_INFO_TYPES_NAME; - Type *t = alloc_type_array(t_type_info_ptr, count); - LLVMValueRef g = LLVMAddGlobal(m->mod, lb_type(m, t), name); - LLVMSetInitializer(g, LLVMConstNull(lb_type(m, t))); - LLVMSetLinkage(g, LLVMInternalLinkage); - if (LB_USE_GIANT_PACKED_STRUCT) { - lb_make_global_private_const(g); - } - lb_global_type_info_member_types = lb_addr({g, alloc_type_pointer(t)}); - - } - { - char const *name = LB_TYPE_INFO_NAMES_NAME; - Type *t = alloc_type_array(t_string, count); - LLVMValueRef g = LLVMAddGlobal(m->mod, lb_type(m, t), name); - LLVMSetInitializer(g, LLVMConstNull(lb_type(m, t))); - LLVMSetLinkage(g, LLVMInternalLinkage); - if (LB_USE_GIANT_PACKED_STRUCT) { - lb_make_global_private_const(g); - } - lb_global_type_info_member_names = lb_addr({g, alloc_type_pointer(t)}); - } - { - char const *name = LB_TYPE_INFO_OFFSETS_NAME; - Type *t = alloc_type_array(t_uintptr, count); - LLVMValueRef g = LLVMAddGlobal(m->mod, lb_type(m, t), name); - LLVMSetInitializer(g, LLVMConstNull(lb_type(m, t))); - LLVMSetLinkage(g, LLVMInternalLinkage); - if (LB_USE_GIANT_PACKED_STRUCT) { - lb_make_global_private_const(g); - } - lb_global_type_info_member_offsets = lb_addr({g, alloc_type_pointer(t)}); - } - - { - char const *name = LB_TYPE_INFO_USINGS_NAME; - Type *t = alloc_type_array(t_bool, count); + auto const global_type_info_make = [](lbModule *m, char const *name, Type *elem_type, i64 count) -> lbAddr { + Type *t = alloc_type_array(elem_type, count); LLVMValueRef g = LLVMAddGlobal(m->mod, lb_type(m, t), name); LLVMSetInitializer(g, LLVMConstNull(lb_type(m, t))); LLVMSetLinkage(g, LLVMInternalLinkage); if (LB_USE_GIANT_PACKED_STRUCT) { lb_make_global_private_const(g); } - lb_global_type_info_member_usings = lb_addr({g, alloc_type_pointer(t)}); - } - - { - char const *name = LB_TYPE_INFO_TAGS_NAME; - Type *t = alloc_type_array(t_string, count); - LLVMValueRef g = LLVMAddGlobal(m->mod, lb_type(m, t), name); - LLVMSetInitializer(g, LLVMConstNull(lb_type(m, t))); - LLVMSetLinkage(g, LLVMInternalLinkage); - if (LB_USE_GIANT_PACKED_STRUCT) { - lb_make_global_private_const(g); - } - lb_global_type_info_member_tags = lb_addr({g, alloc_type_pointer(t)}); - } + return lb_addr({g, alloc_type_pointer(t)}); + }; + + lb_global_type_info_member_types = global_type_info_make(m, LB_TYPE_INFO_TYPES_NAME, t_type_info_ptr, count); + lb_global_type_info_member_names = global_type_info_make(m, LB_TYPE_INFO_NAMES_NAME, t_string, count); + lb_global_type_info_member_offsets = global_type_info_make(m, LB_TYPE_INFO_OFFSETS_NAME, t_uintptr, count); + lb_global_type_info_member_usings = global_type_info_make(m, LB_TYPE_INFO_USINGS_NAME, t_bool, count); + lb_global_type_info_member_tags = global_type_info_make(m, LB_TYPE_INFO_TAGS_NAME, t_string, count); } } |