aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorgingerBill <bill@gingerbill.org>2022-11-08 00:38:31 +0000
committergingerBill <bill@gingerbill.org>2022-11-08 00:38:31 +0000
commitda774e3fd22848bdedf5aadb9d897c0c4e9d9b7a (patch)
tree1b8a4546272d81c8f6efdb223d71804a14213d98
parent2c3febd6203b3b55f6e4e98eaf30a2821489f97f (diff)
General modifications
-rw-r--r--core/fmt/fmt.odin3
-rw-r--r--core/runtime/dynamic_map_internal.odin48
-rw-r--r--src/check_type.cpp19
-rw-r--r--src/llvm_backend.cpp19
-rw-r--r--src/llvm_backend.hpp2
-rw-r--r--src/llvm_backend_stmt.cpp107
-rw-r--r--src/llvm_backend_utility.cpp44
7 files changed, 165 insertions, 77 deletions
diff --git a/core/fmt/fmt.odin b/core/fmt/fmt.odin
index b4118a95b..384f43aa5 100644
--- a/core/fmt/fmt.odin
+++ b/core/fmt/fmt.odin
@@ -2075,9 +2075,10 @@ fmt_value :: proc(fi: ^Info, v: any, verb: rune) {
map_cap := uintptr(runtime.map_cap(m^))
ks, vs, hs, _, _ := runtime.map_kvh_data_dynamic(m^, info.map_info)
j := 0
+ fmt_arg(fi, map_cap, 'v')
for bucket_index in 0..<map_cap {
if hs[bucket_index] == 0 {
- continue
+ // continue
}
if j > 0 {
diff --git a/core/runtime/dynamic_map_internal.odin b/core/runtime/dynamic_map_internal.odin
index 5e1c67e1c..f173f095a 100644
--- a/core/runtime/dynamic_map_internal.odin
+++ b/core/runtime/dynamic_map_internal.odin
@@ -88,7 +88,7 @@ MAP_CACHE_LINE_SIZE :: 1 << MAP_CACHE_LINE_LOG2
// In the optimal case, len(Map_Cell(T){}.data) = 1 so the cell array can be treated
// as a regular array of T, which is the case for hashes.
Map_Cell :: struct($T: typeid) #align MAP_CACHE_LINE_SIZE {
- data: [MAP_CACHE_LINE_SIZE / size_of(T) when size_of(T) < MAP_CACHE_LINE_SIZE else 1]T,
+ data: [MAP_CACHE_LINE_SIZE / size_of(T) when 0 < size_of(T) && size_of(T) < MAP_CACHE_LINE_SIZE else 1]T,
}
// So we can operate on a cell data structure at runtime without any type
@@ -107,36 +107,38 @@ Map_Cell_Info :: struct {
map_cell_index_dynamic :: #force_inline proc "contextless" (base: uintptr, info: ^Map_Cell_Info, index: uintptr) -> uintptr {
// Micro-optimize the case when the number of elements per cell is one or two
// to save on expensive integer division.
+
+ cell_index, data_index: uintptr
switch elements_per_cell := info.elements_per_cell; elements_per_cell {
case 1:
return base + (index * info.size_of_cell)
case 2:
- cell_index := index >> 1
- data_index := index & 1
+ cell_index = index >> 1
+ data_index = index & 1
return base + (cell_index * info.size_of_cell) + (data_index * info.size_of_type)
case 4:
- cell_index := index >> 2
- data_index := index & 3
+ cell_index = index >> 2
+ data_index = index & 3
return base + (cell_index * info.size_of_cell) + (data_index * info.size_of_type)
case 8:
- cell_index := index >> 3
- data_index := index & 7
+ cell_index = index >> 3
+ data_index = index & 7
return base + (cell_index * info.size_of_cell) + (data_index * info.size_of_type)
case 16:
- cell_index := index >> 4
- data_index := index & 15
+ cell_index = index >> 4
+ data_index = index & 15
return base + (cell_index * info.size_of_cell) + (data_index * info.size_of_type)
case 32:
- cell_index := index >> 5
- data_index := index & 31
+ cell_index = index >> 5
+ data_index = index & 31
return base + (cell_index * info.size_of_cell) + (data_index * info.size_of_type)
case 64:
- cell_index := index >> 6
- data_index := index & 63
+ cell_index = index >> 6
+ data_index = index & 63
return base + (cell_index * info.size_of_cell) + (data_index * info.size_of_type)
case:
- cell_index := index / elements_per_cell
- data_index := index % elements_per_cell
+ cell_index = index / elements_per_cell
+ data_index = index % elements_per_cell
return base + (cell_index * info.size_of_cell) + (data_index * info.size_of_type)
}
}
@@ -309,7 +311,7 @@ map_alloc_dynamic :: proc(info: ^Map_Info, log2_capacity: uintptr, allocator :=
return {}, .Out_Of_Memory
}
- capacity := uintptr(1) << log2_capacity
+ capacity := uintptr(1) << max(log2_capacity, MAP_MIN_LOG2_CAPACITY)
@static INFO_HS := Map_Cell_Info {
size_of(Map_Hash),
@@ -552,6 +554,9 @@ map_reserve_dynamic :: proc(#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, ne
allocator = context.allocator
}
+ new_capacity := new_capacity
+ new_capacity = max(new_capacity, uintptr(1)<<MAP_MIN_LOG2_CAPACITY)
+
log2_capacity := map_log2_cap(m^)
capacity := uintptr(1) << log2_capacity
@@ -735,14 +740,13 @@ map_clear_dynamic :: #force_inline proc "contextless" (#no_alias m: ^Raw_Map, #n
}
-__dynamic_map_get :: proc "contextless" (m: rawptr, #no_alias info: ^Map_Info, key: rawptr) -> rawptr {
+__dynamic_map_get :: proc "contextless" (m: rawptr, #no_alias info: ^Map_Info, key: rawptr) -> (ptr: rawptr) {
rm := (^Raw_Map)(m)^
- index, ok := map_lookup_dynamic(rm, info, uintptr(key))
- if !ok {
- return nil
+ if index, ok := map_lookup_dynamic(rm, info, uintptr(key)); ok {
+ vs := map_kvh_data_values_dynamic(rm, info)
+ ptr = rawptr(map_cell_index_dynamic(vs, &info.vs, index))
}
- vs := map_kvh_data_values_dynamic(rm, info)
- return rawptr(map_cell_index_dynamic(vs, &info.vs, index))
+ return
}
__dynamic_map_set :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, key, value: rawptr, loc := #caller_location) -> rawptr {
diff --git a/src/check_type.cpp b/src/check_type.cpp
index ea1c90a66..39344fb2c 100644
--- a/src/check_type.cpp
+++ b/src/check_type.cpp
@@ -2176,6 +2176,25 @@ Type *make_optional_ok_type(Type *value, bool typed) {
return t;
}
+
+// IMPORTANT NOTE(bill): This must match the definition in dynamic_map_internal.odin
+enum : i64 {
+ MAP_CACHE_LINE_LOG2 = 6,
+ MAP_CACHE_LINE_SIZE = 1 << MAP_CACHE_LINE_LOG2
+};
+GB_STATIC_ASSERT(MAP_CACHE_LINE_SIZE >= 64);
+void map_cell_size_and_len(Type *type, i64 *size_, i64 *len_) {
+ i64 elem_sz = type_size_of(type);
+
+ i64 len = 1;
+ if (0 < elem_sz && elem_sz < MAP_CACHE_LINE_SIZE) {
+ len = MAP_CACHE_LINE_SIZE / elem_sz;
+ }
+ i64 size = align_formula(elem_sz * len, MAP_CACHE_LINE_SIZE);
+ if (size_) *size_ = size;
+ if (len_) *len_ = len;
+}
+
void init_map_internal_types(Type *type) {
GB_ASSERT(type->kind == Type_Map);
GB_ASSERT(t_allocator != nullptr);
diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp
index 7c1e53b7f..fa9727106 100644
--- a/src/llvm_backend.cpp
+++ b/src/llvm_backend.cpp
@@ -500,27 +500,10 @@ lbValue lb_generate_anonymous_proc_lit(lbModule *m, String const &prefix_name, A
return value;
}
-// IMPORTANT NOTE(bill): This must match the definition in dynamic_map_internal.odin
-enum : i64 {
- MAP_CACHE_LINE_LOG2 = 6,
- MAP_CACHE_LINE_SIZE = 1 << MAP_CACHE_LINE_LOG2
-};
-GB_STATIC_ASSERT(MAP_CACHE_LINE_SIZE >= 64);
-void lb_map_cell_size_and_len(Type *type, i64 *size_, i64 *len_) {
- i64 elem_sz = type_size_of(type);
-
- i64 len = 1;
- if (0 < elem_sz && elem_sz < MAP_CACHE_LINE_SIZE) {
- len = MAP_CACHE_LINE_SIZE / elem_sz;
- }
- i64 size = align_formula(elem_sz * len, MAP_CACHE_LINE_SIZE);
- if (size_) *size_ = size;
- if (len_) *len_ = len;
-}
LLVMValueRef lb_gen_map_cell_info(lbModule *m, Type *type) {
i64 size = 0, len = 0;
- lb_map_cell_size_and_len(type, &size, &len);
+ map_cell_size_and_len(type, &size, &len);
LLVMValueRef const_values[4] = {};
const_values[0] = lb_const_int(m, t_uintptr, type_size_of(type)).value;
diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp
index f4d4cce21..2b9014d94 100644
--- a/src/llvm_backend.hpp
+++ b/src/llvm_backend.hpp
@@ -422,8 +422,6 @@ lbValue lb_dynamic_array_elem(lbProcedure *p, lbValue da);
lbValue lb_dynamic_array_len(lbProcedure *p, lbValue da);
lbValue lb_dynamic_array_cap(lbProcedure *p, lbValue da);
lbValue lb_dynamic_array_allocator(lbProcedure *p, lbValue da);
-lbValue lb_map_entries(lbProcedure *p, lbValue value);
-lbValue lb_map_entries_ptr(lbProcedure *p, lbValue value);
lbValue lb_map_len(lbProcedure *p, lbValue value);
lbValue lb_map_cap(lbProcedure *p, lbValue value);
lbValue lb_soa_struct_len(lbProcedure *p, lbValue value);
diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp
index bd622d411..55b0c4d90 100644
--- a/src/llvm_backend_stmt.cpp
+++ b/src/llvm_backend_stmt.cpp
@@ -354,16 +354,6 @@ void lb_build_range_indexed(lbProcedure *p, lbValue expr, Type *val_type, lbValu
}
break;
}
- case Type_Map: {
- lbValue entries = lb_map_entries_ptr(p, expr);
- lbValue elem = lb_emit_struct_ep(p, entries, 0);
- elem = lb_emit_load(p, elem);
- lbValue entry = lb_emit_ptr_offset(p, elem, idx);
- idx = lb_emit_load(p, lb_emit_struct_ep(p, entry, 2));
- val = lb_emit_load(p, lb_emit_struct_ep(p, entry, 3));
-
- break;
- }
case Type_Struct: {
GB_ASSERT(is_type_soa_struct(expr_type));
break;
@@ -380,6 +370,99 @@ void lb_build_range_indexed(lbProcedure *p, lbValue expr, Type *val_type, lbValu
if (done_) *done_ = done;
}
+lbValue lb_map_cell_index_static(lbProcedure *p, Type *type, lbValue cells_ptr, lbValue index) {
+ i64 size, N;
+ i64 sz = type_size_of(type);
+ map_cell_size_and_len(type, &size, &N);
+
+ index = lb_emit_conv(p, index, t_uint);
+
+ if (size == N*sz) {
+ lbValue elems_ptr = lb_emit_conv(p, cells_ptr, alloc_type_pointer(type));
+ return lb_emit_ptr_offset(p, elems_ptr, index);
+ }
+
+ // TOOD(bill): N power of two optimization to use >> and &
+
+ lbValue N_const = lb_const_int(p->module, index.type, N);
+ lbValue cell_index = lb_emit_arith(p, Token_Quo, index, N_const, index.type);
+ lbValue data_index = lb_emit_arith(p, Token_Mod, index, N_const, index.type);
+ lbValue cell = lb_emit_ptr_offset(p, cells_ptr, cell_index);
+ lbValue elems_ptr = lb_emit_conv(p, cell, alloc_type_pointer(type));
+ return lb_emit_ptr_offset(p, elems_ptr, data_index);
+}
+
+void lb_map_kvh_data_static(lbProcedure *p, lbValue map_value, lbValue *ks_, lbValue *vs_, lbValue *hs_) {
+ lbValue capacity = lb_map_cap(p, map_value);
+ lbValue ks = lb_map_data_uintptr(p, map_value);
+ lbValue vs = {};
+ lbValue hs = {};
+ if (ks_) *ks_ = ks;
+ if (vs_) *vs_ = vs;
+ if (hs_) *hs_ = hs;
+}
+
+void lb_build_range_map(lbProcedure *p, lbValue expr, Type *val_type,
+ lbValue *val_, lbValue *key_, lbBlock **loop_, lbBlock **done_) {
+ lbModule *m = p->module;
+
+ Type *type = base_type(type_deref(expr.type));
+ GB_ASSERT(type->kind == Type_Map);
+
+ lbValue idx = {};
+ lbBlock *loop = nullptr;
+ lbBlock *done = nullptr;
+ lbBlock *body = nullptr;
+ lbBlock *hash_check = nullptr;
+
+
+ lbAddr index = lb_add_local_generated(p, t_int, false);
+ lb_addr_store(p, index, lb_const_int(m, t_int, cast(u64)-1));
+
+ loop = lb_create_block(p, "for.index.loop");
+ lb_emit_jump(p, loop);
+ lb_start_block(p, loop);
+
+ lbValue incr = lb_emit_arith(p, Token_Add, lb_addr_load(p, index), lb_const_int(m, t_int, 1), t_int);
+ lb_addr_store(p, index, incr);
+
+ hash_check = lb_create_block(p, "for.index.hash_check");
+ body = lb_create_block(p, "for.index.body");
+ done = lb_create_block(p, "for.index.done");
+
+ lbValue map_value = lb_emit_load(p, expr);
+ lbValue capacity = lb_map_cap(p, map_value);
+ lbValue cond = lb_emit_comp(p, Token_Lt, incr, capacity);
+ lb_emit_if(p, cond, hash_check, done);
+ lb_start_block(p, hash_check);
+
+ idx = lb_addr_load(p, index);
+
+ lbValue ks = lb_map_data_uintptr(p, map_value);
+ lbValue vs = lb_map_cell_index_static(p, type->Map.key, ks, capacity);
+ lbValue hs = lb_map_cell_index_static(p, type->Map.value, vs, capacity);
+
+ lbValue hash = lb_emit_load(p, lb_emit_ptr_offset(p, hs, idx));
+
+ lbValue hash_cond = lb_emit_comp(p, Token_CmpEq, hash, lb_const_int(m, t_uintptr, 0));
+ lb_emit_if(p, hash_cond, body, loop);
+ lb_start_block(p, body);
+
+ // lbValue entries = lb_map_entries_ptr(p, expr);
+ // lbValue elem = lb_emit_struct_ep(p, entries, 0);
+ // elem = lb_emit_load(p, elem);
+ // lbValue entry = lb_emit_ptr_offset(p, elem, idx);
+ lbValue key = lb_const_nil(m, type->Map.key);
+ lbValue val = lb_const_nil(m, type->Map.value);
+
+
+ if (val_) *val_ = val;
+ if (key_) *key_ = key;
+ if (loop_) *loop_ = loop;
+ if (done_) *done_ = done;
+}
+
+
void lb_build_range_string(lbProcedure *p, lbValue expr, Type *val_type,
lbValue *val_, lbValue *idx_, lbBlock **loop_, lbBlock **done_) {
@@ -749,9 +832,7 @@ void lb_build_range_stmt(lbProcedure *p, AstRangeStmt *rs, Scope *scope) {
if (is_type_pointer(type_deref(map.type))) {
map = lb_emit_load(p, map);
}
- lbValue entries_ptr = lb_map_entries_ptr(p, map);
- lbValue count_ptr = lb_emit_struct_ep(p, entries_ptr, 1);
- lb_build_range_indexed(p, map, val1_type, count_ptr, &val, &key, &loop, &done);
+ lb_build_range_map(p, map, val1_type, &val, &key, &loop, &done);
break;
}
case Type_Array: {
diff --git a/src/llvm_backend_utility.cpp b/src/llvm_backend_utility.cpp
index a54171b51..dce74126e 100644
--- a/src/llvm_backend_utility.cpp
+++ b/src/llvm_backend_utility.cpp
@@ -998,6 +998,7 @@ lbValue lb_emit_struct_ep(lbProcedure *p, lbValue s, i32 index) {
switch (index) {
case 0: result_type = get_struct_field_type(gst, 0); break;
case 1: result_type = get_struct_field_type(gst, 1); break;
+ case 2: result_type = get_struct_field_type(gst, 2); break;
}
} else if (is_type_array(t)) {
return lb_emit_array_epi(p, s, index);
@@ -1134,6 +1135,7 @@ lbValue lb_emit_struct_ev(lbProcedure *p, lbValue s, i32 index) {
switch (index) {
case 0: result_type = get_struct_field_type(gst, 0); break;
case 1: result_type = get_struct_field_type(gst, 1); break;
+ case 2: result_type = get_struct_field_type(gst, 2); break;
}
}
break;
@@ -1439,34 +1441,34 @@ lbValue lb_dynamic_array_allocator(lbProcedure *p, lbValue da) {
return lb_emit_struct_ev(p, da, 3);
}
-lbValue lb_map_entries(lbProcedure *p, lbValue value) {
- Type *t = base_type(value.type);
- GB_ASSERT_MSG(t->kind == Type_Map, "%s", type_to_string(t));
- init_map_internal_types(t);
- i32 index = 1;
- lbValue entries = lb_emit_struct_ev(p, value, index);
- return entries;
+lbValue lb_map_len(lbProcedure *p, lbValue value) {
+ GB_ASSERT(is_type_map(value.type));
+ lbValue len = lb_emit_struct_ev(p, value, 1);
+ return lb_emit_conv(p, len, t_int);
}
-lbValue lb_map_entries_ptr(lbProcedure *p, lbValue value) {
- Type *t = base_type(type_deref(value.type));
- GB_ASSERT_MSG(t->kind == Type_Map, "%s", type_to_string(t));
- init_map_internal_types(t);
- i32 index = 1;
- lbValue entries = lb_emit_struct_ep(p, value, index);
- return entries;
-}
+lbValue lb_map_cap(lbProcedure *p, lbValue value) {
+ GB_ASSERT(is_type_map(value.type));
+ lbValue zero = lb_const_int(p->module, t_uintptr, 0);
+ lbValue one = lb_const_int(p->module, t_uintptr, 1);
-lbValue lb_map_len(lbProcedure *p, lbValue value) {
- lbValue entries = lb_map_entries(p, value);
- return lb_dynamic_array_len(p, entries);
+ lbValue mask = lb_const_int(p->module, t_uintptr, MAP_CACHE_LINE_SIZE-1);
+
+ lbValue data = lb_emit_struct_ev(p, value, 0);
+ lbValue log2_cap = lb_emit_arith(p, Token_And, data, mask, t_uintptr);
+ lbValue cap = lb_emit_arith(p, Token_Shl, one, log2_cap, t_uintptr);
+ lbValue cmp = lb_emit_comp(p, Token_CmpEq, data, zero);
+ return lb_emit_conv(p, lb_emit_select(p, cmp, zero, cap), t_int);
}
-lbValue lb_map_cap(lbProcedure *p, lbValue value) {
- lbValue entries = lb_map_entries(p, value);
- return lb_dynamic_array_cap(p, entries);
+lbValue lb_map_data_uintptr(lbProcedure *p, lbValue value) {
+ GB_ASSERT(is_type_map(value.type));
+ lbValue data = lb_emit_struct_ev(p, value, 0);
+ lbValue mask = lb_const_int(p->module, t_uintptr, MAP_CACHE_LINE_SIZE-1);
+ return lb_emit_arith(p, Token_And, data, mask, t_uintptr);
}
+
lbValue lb_soa_struct_len(lbProcedure *p, lbValue value) {
Type *t = base_type(value.type);
bool is_ptr = false;