aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorgingerBill <bill@gingerbill.org>2023-01-14 13:23:17 +0000
committergingerBill <bill@gingerbill.org>2023-01-14 13:23:17 +0000
commit518f30e52307e12fe184c34f0da8f197b976ced5 (patch)
tree3f5db726b8ece6f07b7c1147c56f954615048765
parent868aa4c14ab6c63b9b797f4a8178c73b69897711 (diff)
Bring `PtrMap` inline with `StringMap`
-rw-r--r--src/check_type.cpp12
-rw-r--r--src/checker.cpp8
-rw-r--r--src/docs_writer.cpp4
-rw-r--r--src/llvm_backend.cpp10
-rw-r--r--src/ptr_map.cpp175
-rw-r--r--src/queue.cpp25
-rw-r--r--src/string_map.cpp9
7 files changed, 126 insertions, 117 deletions
diff --git a/src/check_type.cpp b/src/check_type.cpp
index 9ce6585f2..0bd9af15f 100644
--- a/src/check_type.cpp
+++ b/src/check_type.cpp
@@ -2201,18 +2201,18 @@ gb_internal Type *make_optional_ok_type(Type *value, bool typed) {
// IMPORTANT NOTE(bill): This must match the definition in dynamic_map_internal.odin
enum : i64 {
- MAP_CACHE_LINE_LOG2 = 6,
- MAP_CACHE_LINE_SIZE = 1 << MAP_CACHE_LINE_LOG2
+ MAP_CELL_CACHE_LINE_LOG2 = 6,
+ MAP_CELL_CACHE_LINE_SIZE = 1 << MAP_CELL_CACHE_LINE_LOG2,
};
-GB_STATIC_ASSERT(MAP_CACHE_LINE_SIZE >= 64);
+GB_STATIC_ASSERT(MAP_CELL_CACHE_LINE_SIZE >= 64);
gb_internal void map_cell_size_and_len(Type *type, i64 *size_, i64 *len_) {
i64 elem_sz = type_size_of(type);
i64 len = 1;
- if (0 < elem_sz && elem_sz < MAP_CACHE_LINE_SIZE) {
- len = MAP_CACHE_LINE_SIZE / elem_sz;
+ if (0 < elem_sz && elem_sz < MAP_CELL_CACHE_LINE_SIZE) {
+ len = MAP_CELL_CACHE_LINE_SIZE / elem_sz;
}
- i64 size = align_formula(elem_sz * len, MAP_CACHE_LINE_SIZE);
+ i64 size = align_formula(elem_sz * len, MAP_CELL_CACHE_LINE_SIZE);
if (size_) *size_ = size;
if (len_) *len_ = len;
}
diff --git a/src/checker.cpp b/src/checker.cpp
index 39a132060..01c232bff 100644
--- a/src/checker.cpp
+++ b/src/checker.cpp
@@ -2030,7 +2030,7 @@ gb_internal void add_min_dep_type_info(Checker *c, Type *t) {
GB_ASSERT(ti_index >= 0);
// IMPORTANT NOTE(bill): this must be copied as `map_set` takes a const ref
// and effectively assigns the `+1` of the value
- isize const count = set->entries.count;
+ isize const count = set->count;
if (map_set_if_not_previously_exists(set, ti_index, count)) {
// Type already exists;
return;
@@ -2536,7 +2536,7 @@ gb_internal Array<EntityGraphNode *> generate_entity_dependency_graph(CheckerInf
// This means that the entity graph node set will have to be thread safe
TIME_SECTION("generate_entity_dependency_graph: Calculate edges for graph M - Part 2");
- auto G = array_make<EntityGraphNode *>(allocator, 0, M.entries.count);
+ auto G = array_make<EntityGraphNode *>(allocator, 0, M.count);
for (auto const &m_entry : M) {
auto *e = m_entry.key;
@@ -4227,7 +4227,7 @@ gb_internal Array<ImportGraphNode *> generate_import_dependency_graph(Checker *c
}
Array<ImportGraphNode *> G = {};
- array_init(&G, heap_allocator(), 0, M.entries.count);
+ array_init(&G, heap_allocator(), 0, M.count);
isize i = 0;
for (auto const &entry : M) {
@@ -4655,7 +4655,7 @@ gb_internal void check_create_file_scopes(Checker *c) {
total_pkg_decl_count += f->total_file_decl_count;
}
- mpmc_init(&pkg->exported_entity_queue, heap_allocator(), total_pkg_decl_count);
+ mpmc_init(&pkg->exported_entity_queue, total_pkg_decl_count);
}
}
diff --git a/src/docs_writer.cpp b/src/docs_writer.cpp
index ea0946153..cb7fa0e1e 100644
--- a/src/docs_writer.cpp
+++ b/src/docs_writer.cpp
@@ -913,7 +913,7 @@ gb_internal OdinDocEntityIndex odin_doc_add_entity(OdinDocWriter *w, Entity *e)
gb_internal void odin_doc_update_entities(OdinDocWriter *w) {
{
// NOTE(bill): Double pass, just in case entities are created on odin_doc_type
- auto entities = array_make<Entity *>(heap_allocator(), 0, w->entity_cache.entries.count);
+ auto entities = array_make<Entity *>(heap_allocator(), 0, w->entity_cache.count);
defer (array_free(&entities));
for (auto const &entry : w->entity_cache) {
@@ -973,7 +973,7 @@ gb_internal OdinDocArray<OdinDocScopeEntry> odin_doc_add_pkg_entries(OdinDocWrit
return {};
}
- auto entries = array_make<OdinDocScopeEntry>(heap_allocator(), 0, w->entity_cache.entries.count);
+ auto entries = array_make<OdinDocScopeEntry>(heap_allocator(), 0, w->entity_cache.count);
defer (array_free(&entries));
for (auto const &element : pkg->scope->elements) {
diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp
index 6cd224324..0df32329b 100644
--- a/src/llvm_backend.cpp
+++ b/src/llvm_backend.cpp
@@ -743,7 +743,7 @@ gb_internal lbValue lb_generate_anonymous_proc_lit(lbModule *m, String const &pr
// parent$count
isize name_len = prefix_name.len + 1 + 8 + 1;
char *name_text = gb_alloc_array(permanent_allocator(), char, name_len);
- i32 name_id = cast(i32)m->gen->anonymous_proc_lits.entries.count;
+ i32 name_id = cast(i32)m->gen->anonymous_proc_lits.count;
name_len = gb_snprintf(name_text, name_len, "%.*s$anon-%d", LIT(prefix_name), name_id);
String name = make_string((u8 *)name_text, name_len-1);
@@ -1625,7 +1625,7 @@ gb_internal bool lb_llvm_object_generation(lbGenerator *gen, bool do_threading)
String filepath_ll = lb_filepath_ll_for_module(m);
String filepath_obj = lb_filepath_obj_for_module(m);
- gb_printf_err("%.*s\n", LIT(filepath_obj));
+ // gb_printf_err("%.*s\n", LIT(filepath_obj));
array_add(&gen->output_object_paths, filepath_obj);
array_add(&gen->output_temp_paths, filepath_ll);
@@ -1977,7 +1977,7 @@ gb_internal bool lb_generate_code(lbGenerator *gen) {
// NOTE(bill): Target Machine Creation
// NOTE(bill, 2021-05-04): Target machines must be unique to each module because they are not thread safe
- auto target_machines = array_make<LLVMTargetMachineRef>(permanent_allocator(), 0, gen->modules.entries.count);
+ auto target_machines = array_make<LLVMTargetMachineRef>(permanent_allocator(), 0, gen->modules.count);
// NOTE(dweiler): Dynamic libraries require position-independent code.
LLVMRelocMode reloc_mode = LLVMRelocDefault;
@@ -2073,7 +2073,7 @@ gb_internal bool lb_generate_code(lbGenerator *gen) {
lbModule *m = default_module;
{ // Add type info data
- isize max_type_info_count = info->minimum_dependency_type_info_set.entries.count+1;
+ isize max_type_info_count = info->minimum_dependency_type_info_set.count+1;
// gb_printf_err("max_type_info_count: %td\n", max_type_info_count);
Type *t = alloc_type_array(t_type_info, max_type_info_count);
LLVMValueRef g = LLVMAddGlobal(m->mod, lb_type(m, t), LB_TYPE_INFO_DATA_NAME);
@@ -2330,7 +2330,7 @@ gb_internal bool lb_generate_code(lbGenerator *gen) {
}
}
- if (gen->modules.entries.count <= 1) {
+ if (gen->modules.count <= 1) {
do_threading = false;
}
diff --git a/src/ptr_map.cpp b/src/ptr_map.cpp
index 598904906..0a5c1e492 100644
--- a/src/ptr_map.cpp
+++ b/src/ptr_map.cpp
@@ -2,6 +2,13 @@
typedef u32 MapIndex;
+enum {
+ MAP_CACHE_LINE_SIZE_POW = 6,
+ MAP_CACHE_LINE_SIZE = 1<<MAP_CACHE_LINE_SIZE_POW,
+ MAP_CACHE_LINE_MASK = MAP_CACHE_LINE_SIZE-1,
+};
+
+
struct MapFindResult {
MapIndex hash_index;
MapIndex entry_prev;
@@ -21,8 +28,11 @@ struct PtrMapEntry {
template <typename K, typename V>
struct PtrMap {
- Slice<MapIndex> hashes;
- Array<PtrMapEntry<K, V> > entries;
+ MapIndex * hashes;
+ usize hashes_count;
+ PtrMapEntry<K, V> *entries;
+ u32 count;
+ u32 entries_capacity;
};
@@ -78,42 +88,48 @@ gb_internal gbAllocator map_allocator(void) {
template <typename K, typename V>
gb_internal gb_inline void map_init(PtrMap<K, V> *h, isize capacity) {
capacity = next_pow2_isize(capacity);
- slice_init(&h->hashes, map_allocator(), capacity);
- array_init(&h->entries, map_allocator(), 0, capacity);
- for (isize i = 0; i < capacity; i++) {
- h->hashes.data[i] = MAP_SENTINEL;
- }
+ map_reserve(h, capacity);
}
template <typename K, typename V>
gb_internal gb_inline void map_destroy(PtrMap<K, V> *h) {
- if (h->entries.allocator.proc == nullptr) {
- h->entries.allocator = map_allocator();
- }
- slice_free(&h->hashes, h->entries.allocator);
- array_free(&h->entries);
+ gbAllocator a = map_allocator();
+ gb_free(a, h->hashes);
+ gb_free(a, h->entries);
}
template <typename K, typename V>
+gb_internal void map__resize_hashes(PtrMap<K, V> *h, usize count) {
+ h->hashes_count = cast(u32)resize_array_raw(&h->hashes, string_map_allocator(), h->hashes_count, count, MAP_CACHE_LINE_SIZE);
+}
+
+template <typename K, typename V>
+gb_internal void map__reserve_entries(PtrMap<K, V> *h, usize capacity) {
+ h->entries_capacity = cast(u32)resize_array_raw(&h->entries, string_map_allocator(), h->entries_capacity, capacity, MAP_CACHE_LINE_SIZE);
+}
+
+
+template <typename K, typename V>
gb_internal MapIndex map__add_entry(PtrMap<K, V> *h, K key) {
PtrMapEntry<K, V> e = {};
e.key = key;
e.next = MAP_SENTINEL;
- array_add(&h->entries, e);
- return cast(MapIndex)(h->entries.count-1);
+ map__reserve_entries(h, h->count+1);
+ h->entries[h->count++] = e;
+ return cast(MapIndex)(h->count-1);
}
template <typename K, typename V>
gb_internal MapFindResult map__find(PtrMap<K, V> *h, K key) {
MapFindResult fr = {MAP_SENTINEL, MAP_SENTINEL, MAP_SENTINEL};
- if (h->hashes.count == 0) {
+ if (h->hashes_count == 0) {
return fr;
}
u32 hash = ptr_map_hash_key(key);
- fr.hash_index = cast(MapIndex)(hash & (h->hashes.count-1));
- fr.entry_index = h->hashes.data[fr.hash_index];
+ fr.hash_index = cast(MapIndex)(hash & (h->hashes_count-1));
+ fr.entry_index = h->hashes[fr.hash_index];
while (fr.entry_index != MAP_SENTINEL) {
- auto *entry = &h->entries.data[fr.entry_index];
+ auto *entry = &h->entries[fr.entry_index];
if (entry->key == key) {
return fr;
}
@@ -126,41 +142,41 @@ gb_internal MapFindResult map__find(PtrMap<K, V> *h, K key) {
template <typename K, typename V>
gb_internal MapFindResult map__find_from_entry(PtrMap<K, V> *h, PtrMapEntry<K, V> *e) {
MapFindResult fr = {MAP_SENTINEL, MAP_SENTINEL, MAP_SENTINEL};
- if (h->hashes.count == 0) {
+ if (h->hashes_count == 0) {
return fr;
}
u32 hash = ptr_map_hash_key(e->key);
- fr.hash_index = cast(MapIndex)(hash & (h->hashes.count-1));
- fr.entry_index = h->hashes.data[fr.hash_index];
+ fr.hash_index = cast(MapIndex)(hash & (h->hashes_count-1));
+ fr.entry_index = h->hashes[fr.hash_index];
while (fr.entry_index != MAP_SENTINEL) {
- if (&h->entries.data[fr.entry_index] == e) {
+ if (&h->entries[fr.entry_index] == e) {
return fr;
}
fr.entry_prev = fr.entry_index;
- fr.entry_index = h->entries.data[fr.entry_index].next;
+ fr.entry_index = h->entries[fr.entry_index].next;
}
return fr;
}
template <typename K, typename V>
gb_internal b32 map__full(PtrMap<K, V> *h) {
- return 0.75f * h->hashes.count <= h->entries.count;
+ return 0.75f * h->hashes_count <= h->count;
}
template <typename K, typename V>
gb_internal gb_inline void map_grow(PtrMap<K, V> *h) {
- isize new_count = gb_max(h->hashes.count<<1, 16);
+ isize new_count = gb_max(h->hashes_count<<1, 16);
map_rehash(h, new_count);
}
template <typename K, typename V>
gb_internal void map_reset_entries(PtrMap<K, V> *h) {
- for (isize i = 0; i < h->hashes.count; i++) {
- h->hashes.data[i] = MAP_SENTINEL;
+ for (usize i = 0; i < h->hashes_count; i++) {
+ h->hashes[i] = MAP_SENTINEL;
}
- for (isize i = 0; i < h->entries.count; i++) {
+ for (usize i = 0; i < h->count; i++) {
MapFindResult fr;
- PtrMapEntry<K, V> *e = &h->entries.data[i];
+ PtrMapEntry<K, V> *e = &h->entries[i];
e->next = MAP_SENTINEL;
fr = map__find_from_entry(h, e);
if (fr.entry_prev == MAP_SENTINEL) {
@@ -173,14 +189,11 @@ gb_internal void map_reset_entries(PtrMap<K, V> *h) {
template <typename K, typename V>
gb_internal void map_reserve(PtrMap<K, V> *h, isize cap) {
- if (h->entries.allocator.proc == nullptr) {
- h->entries.allocator = map_allocator();
- }
- array_reserve(&h->entries, cap);
- if (h->entries.count*2 < h->hashes.count) {
+ if (h->count*2 < h->hashes_count) {
return;
}
- slice_resize(&h->hashes, h->entries.allocator, cap*2);
+ map__reserve_entries(h, cap);
+ map__resize_hashes(h, cap*2);
map_reset_entries(h);
}
@@ -195,12 +208,12 @@ gb_internal V *map_get(PtrMap<K, V> *h, K key) {
MapIndex hash_index = MAP_SENTINEL;
MapIndex entry_prev = MAP_SENTINEL;
MapIndex entry_index = MAP_SENTINEL;
- if (h->hashes.count != 0) {
+ if (h->hashes_count != 0) {
u32 hash = ptr_map_hash_key(key);
- hash_index = cast(MapIndex)(hash & (h->hashes.count-1));
- entry_index = h->hashes.data[hash_index];
+ hash_index = cast(MapIndex)(hash & (h->hashes_count-1));
+ entry_index = h->hashes[hash_index];
while (entry_index != MAP_SENTINEL) {
- auto *entry = &h->entries.data[entry_index];
+ auto *entry = &h->entries[entry_index];
if (entry->key == key) {
return &entry->value;
}
@@ -213,12 +226,12 @@ gb_internal V *map_get(PtrMap<K, V> *h, K key) {
template <typename K, typename V>
gb_internal V *map_try_get(PtrMap<K, V> *h, K key, MapFindResult *fr_) {
MapFindResult fr = {MAP_SENTINEL, MAP_SENTINEL, MAP_SENTINEL};
- if (h->hashes.count != 0) {
+ if (h->hashes_count != 0) {
u32 hash = ptr_map_hash_key(key);
- fr.hash_index = cast(MapIndex)(hash & (h->hashes.count-1));
- fr.entry_index = h->hashes.data[fr.hash_index];
+ fr.hash_index = cast(MapIndex)(hash & (h->hashes_count-1));
+ fr.entry_index = h->hashes[fr.hash_index];
while (fr.entry_index != MAP_SENTINEL) {
- auto *entry = &h->entries.data[fr.entry_index];
+ auto *entry = &h->entries[fr.entry_index];
if (entry->key == key) {
return &entry->value;
}
@@ -226,7 +239,7 @@ gb_internal V *map_try_get(PtrMap<K, V> *h, K key, MapFindResult *fr_) {
fr.entry_index = entry->next;
}
}
- if (h->hashes.count == 0 || map__full(h)) {
+ if (h->hashes_count == 0 || map__full(h)) {
map_grow(h);
}
if (fr_) *fr_ = fr;
@@ -238,11 +251,11 @@ template <typename K, typename V>
gb_internal void map_set_internal_from_try_get(PtrMap<K, V> *h, K key, V const &value, MapFindResult const &fr) {
MapIndex index = map__add_entry(h, key);
if (fr.entry_prev != MAP_SENTINEL) {
- h->entries.data[fr.entry_prev].next = index;
+ h->entries[fr.entry_prev].next = index;
} else {
- h->hashes.data[fr.hash_index] = index;
+ h->hashes[fr.hash_index] = index;
}
- h->entries.data[index].value = value;
+ h->entries[index].value = value;
}
template <typename K, typename V>
@@ -256,7 +269,7 @@ template <typename K, typename V>
gb_internal void map_set(PtrMap<K, V> *h, K key, V const &value) {
MapIndex index;
MapFindResult fr;
- if (h->hashes.count == 0) {
+ if (h->hashes_count == 0) {
map_grow(h);
}
fr = map__find(h, key);
@@ -265,12 +278,12 @@ gb_internal void map_set(PtrMap<K, V> *h, K key, V const &value) {
} else {
index = map__add_entry(h, key);
if (fr.entry_prev != MAP_SENTINEL) {
- h->entries.data[fr.entry_prev].next = index;
+ h->entries[fr.entry_prev].next = index;
} else {
- h->hashes.data[fr.hash_index] = index;
+ h->hashes[fr.hash_index] = index;
}
}
- h->entries.data[index].value = value;
+ h->entries[index].value = value;
if (map__full(h)) {
map_grow(h);
@@ -282,7 +295,7 @@ template <typename K, typename V>
gb_internal bool map_set_if_not_previously_exists(PtrMap<K, V> *h, K key, V const &value) {
MapIndex index;
MapFindResult fr;
- if (h->hashes.count == 0) {
+ if (h->hashes_count == 0) {
map_grow(h);
}
fr = map__find(h, key);
@@ -291,12 +304,12 @@ gb_internal bool map_set_if_not_previously_exists(PtrMap<K, V> *h, K key, V cons
} else {
index = map__add_entry(h, key);
if (fr.entry_prev != MAP_SENTINEL) {
- h->entries.data[fr.entry_prev].next = index;
+ h->entries[fr.entry_prev].next = index;
} else {
- h->hashes.data[fr.hash_index] = index;
+ h->hashes[fr.hash_index] = index;
}
}
- h->entries.data[index].value = value;
+ h->entries[index].value = value;
if (map__full(h)) {
map_grow(h);
@@ -309,22 +322,22 @@ template <typename K, typename V>
gb_internal void map__erase(PtrMap<K, V> *h, MapFindResult const &fr) {
MapFindResult last;
if (fr.entry_prev == MAP_SENTINEL) {
- h->hashes.data[fr.hash_index] = h->entries.data[fr.entry_index].next;
+ h->hashes[fr.hash_index] = h->entries[fr.entry_index].next;
} else {
- h->entries.data[fr.entry_prev].next = h->entries.data[fr.entry_index].next;
+ h->entries[fr.entry_prev].next = h->entries[fr.entry_index].next;
}
- if (fr.entry_index == h->entries.count-1) {
- array_pop(&h->entries);
+ if (fr.entry_index == h->count-1) {
+ h->count--;
return;
}
- h->entries.data[fr.entry_index] = h->entries.data[h->entries.count-1];
- array_pop(&h->entries);
+ h->entries[fr.entry_index] = h->entries[h->count-1];
+ h->count--;
- last = map__find(h, h->entries.data[fr.entry_index].key);
+ last = map__find(h, h->entries[fr.entry_index].key);
if (last.entry_prev != MAP_SENTINEL) {
- h->entries.data[last.entry_prev].next = fr.entry_index;
+ h->entries[last.entry_prev].next = fr.entry_index;
} else {
- h->hashes.data[last.hash_index] = fr.entry_index;
+ h->hashes[last.hash_index] = fr.entry_index;
}
}
@@ -338,9 +351,9 @@ gb_internal void map_remove(PtrMap<K, V> *h, K key) {
template <typename K, typename V>
gb_internal gb_inline void map_clear(PtrMap<K, V> *h) {
- array_clear(&h->entries);
- for (isize i = 0; i < h->hashes.count; i++) {
- h->hashes.data[i] = MAP_SENTINEL;
+ h->count = 0;
+ for (usize i = 0; i < h->hashes_count; i++) {
+ h->hashes[i] = MAP_SENTINEL;
}
}
@@ -352,17 +365,17 @@ gb_internal PtrMapEntry<K, V> *multi_map_find_first(PtrMap<K, V> *h, K key) {
if (i == MAP_SENTINEL) {
return nullptr;
}
- return &h->entries.data[i];
+ return &h->entries[i];
}
template <typename K, typename V>
gb_internal PtrMapEntry<K, V> *multi_map_find_next(PtrMap<K, V> *h, PtrMapEntry<K, V> *e) {
MapIndex i = e->next;
while (i != MAP_SENTINEL) {
- if (h->entries.data[i].key == e->key) {
- return &h->entries.data[i];
+ if (h->entries[i].key == e->key) {
+ return &h->entries[i];
}
- i = h->entries.data[i].next;
+ i = h->entries[i].next;
}
return nullptr;
}
@@ -380,7 +393,7 @@ gb_internal isize multi_map_count(PtrMap<K, V> *h, K key) {
template <typename K, typename V>
gb_internal void multi_map_get_all(PtrMap<K, V> *h, K key, V *items) {
- isize i = 0;
+ usize i = 0;
PtrMapEntry<K, V> *e = multi_map_find_first(h, key);
while (e != nullptr) {
items[i++] = e->value;
@@ -392,19 +405,19 @@ template <typename K, typename V>
gb_internal void multi_map_insert(PtrMap<K, V> *h, K key, V const &value) {
MapFindResult fr;
MapIndex i;
- if (h->hashes.count == 0) {
+ if (h->hashes_count == 0) {
map_grow(h);
}
// Make
fr = map__find(h, key);
i = map__add_entry(h, key);
if (fr.entry_prev == MAP_SENTINEL) {
- h->hashes.data[fr.hash_index] = i;
+ h->hashes[fr.hash_index] = i;
} else {
- h->entries.data[fr.entry_prev].next = i;
+ h->entries[fr.entry_prev].next = i;
}
- h->entries.data[i].next = fr.entry_index;
- h->entries.data[i].value = value;
+ h->entries[i].next = fr.entry_index;
+ h->entries[i].value = value;
// Grow if needed
if (map__full(h)) {
map_grow(h);
@@ -430,20 +443,20 @@ gb_internal void multi_map_remove_all(PtrMap<K, V> *h, K key) {
template <typename K, typename V>
gb_internal PtrMapEntry<K, V> *begin(PtrMap<K, V> &m) {
- return m.entries.data;
+ return m.entries;
}
template <typename K, typename V>
gb_internal PtrMapEntry<K, V> const *begin(PtrMap<K, V> const &m) {
- return m.entries.data;
+ return m.entries;
}
template <typename K, typename V>
gb_internal PtrMapEntry<K, V> *end(PtrMap<K, V> &m) {
- return m.entries.data + m.entries.count;
+ return m.entries + m.count;
}
template <typename K, typename V>
gb_internal PtrMapEntry<K, V> const *end(PtrMap<K, V> const &m) {
- return m.entries.data + m.entries.count;
+ return m.entries + m.count;
}
diff --git a/src/queue.cpp b/src/queue.cpp
index 05076cfbd..2ad9cb29f 100644
--- a/src/queue.cpp
+++ b/src/queue.cpp
@@ -10,22 +10,19 @@ struct MPSCNode {
//
template <typename T>
struct MPSCQueue {
- gbAllocator allocator;
-
MPSCNode<T> sentinel;
std::atomic<MPSCNode<T> *> head;
std::atomic<MPSCNode<T> *> tail;
std::atomic<isize> count;
};
-template <typename T> gb_internal void mpsc_init (MPSCQueue<T> *q, gbAllocator const &allocator);
+template <typename T> gb_internal void mpsc_init (MPSCQueue<T> *q);
template <typename T> gb_internal void mpsc_destroy(MPSCQueue<T> *q);
template <typename T> gb_internal isize mpsc_enqueue(MPSCQueue<T> *q, T const &value);
template <typename T> gb_internal bool mpsc_dequeue(MPSCQueue<T> *q, T *value_);
template <typename T>
gb_internal void mpsc_init(MPSCQueue<T> *q, gbAllocator const &allocator) {
- q->allocator = allocator;
q->sentinel.next.store(nullptr, std::memory_order_relaxed);
q->head.store(&q->sentinel, std::memory_order_relaxed);
q->tail.store(&q->sentinel, std::memory_order_relaxed);
@@ -39,7 +36,7 @@ gb_internal void mpsc_destroy(MPSCQueue<T> *q) {
template <typename T>
gb_internal MPSCNode<T> *mpsc_alloc_node(MPSCQueue<T> *q, T const &value) {
- auto new_node = gb_alloc_item(q->allocator, MPSCNode<T>);
+ auto new_node = gb_alloc_item(heap_allocator(), MPSCNode<T>);
new_node->value = value;
return new_node;
}
@@ -95,7 +92,6 @@ struct MPMCQueue {
T * nodes;
MPMCQueueAtomicIdx *indices;
- gbAllocator allocator;
BlockingMutex mutex;
MPMCQueueAtomicIdx count;
i32 mask; // capacity-1, because capacity must be a power of 2
@@ -108,6 +104,9 @@ struct MPMCQueue {
};
+gb_internal gbAllocator mpmc_allocator(void) {
+ return heap_allocator();
+}
gb_internal void mpmc_internal_init_indices(MPMCQueueAtomicIdx *indices, i32 offset, i32 size) {
GB_ASSERT(offset % 8 == 0);
@@ -129,7 +128,7 @@ gb_internal void mpmc_internal_init_indices(MPMCQueueAtomicIdx *indices, i32 off
template <typename T>
-gb_internal void mpmc_init(MPMCQueue<T> *q, gbAllocator a, isize size_i) {
+gb_internal void mpmc_init(MPMCQueue<T> *q, isize size_i) {
if (size_i < 8) {
size_i = 8;
}
@@ -139,7 +138,7 @@ gb_internal void mpmc_init(MPMCQueue<T> *q, gbAllocator a, isize size_i) {
GB_ASSERT(gb_is_power_of_two(size));
q->mask = size-1;
- q->allocator = a;
+ gbAllocator a = mpmc_allocator();
q->nodes = gb_alloc_array(a, T, size);
q->indices = gb_alloc_array(a, MPMCQueueAtomicIdx, size);
@@ -150,23 +149,25 @@ gb_internal void mpmc_init(MPMCQueue<T> *q, gbAllocator a, isize size_i) {
template <typename T>
gb_internal void mpmc_destroy(MPMCQueue<T> *q) {
- gb_free(q->allocator, q->nodes);
- gb_free(q->allocator, q->indices);
+ gbAllocator a = mpmc_allocator();
+ gb_free(a, q->nodes);
+ gb_free(a, q->indices);
}
template <typename T>
gb_internal bool mpmc_internal_grow(MPMCQueue<T> *q) {
+ gbAllocator a = mpmc_allocator();
mutex_lock(&q->mutex);
i32 old_size = q->mask+1;
i32 new_size = old_size*2;
- resize_array_raw(&q->nodes, q->allocator, old_size, new_size);
+ resize_array_raw(&q->nodes, a, old_size, new_size);
if (q->nodes == nullptr) {
GB_PANIC("Unable to resize enqueue: %td -> %td", old_size, new_size);
mutex_unlock(&q->mutex);
return false;
}
- resize_array_raw(&q->indices, q->allocator, old_size, new_size);
+ resize_array_raw(&q->indices, a, old_size, new_size);
if (q->indices == nullptr) {
GB_PANIC("Unable to resize enqueue: %td -> %td", old_size, new_size);
mutex_unlock(&q->mutex);
diff --git a/src/string_map.cpp b/src/string_map.cpp
index 067adef28..bf1bbf6ca 100644
--- a/src/string_map.cpp
+++ b/src/string_map.cpp
@@ -1,10 +1,5 @@
GB_STATIC_ASSERT(sizeof(MapIndex) == sizeof(u32));
-enum {
- STRING_MAP_CACHE_LINE_SIZE_POW = 6,
- STRING_MAP_CACHE_LINE_SIZE = 1<<STRING_MAP_CACHE_LINE_SIZE_POW,
- STRING_MAP_CACHE_LINE_MASK = STRING_MAP_CACHE_LINE_SIZE-1,
-};
struct StringHashKey {
u32 hash;
@@ -85,13 +80,13 @@ gb_internal gb_inline void string_map_destroy(StringMap<T> *h) {
template <typename T>
gb_internal void string_map__resize_hashes(StringMap<T> *h, usize count) {
- h->hashes_count = cast(u32)resize_array_raw(&h->hashes, string_map_allocator(), h->hashes_count, count, STRING_MAP_CACHE_LINE_SIZE);
+ h->hashes_count = cast(u32)resize_array_raw(&h->hashes, string_map_allocator(), h->hashes_count, count, MAP_CACHE_LINE_SIZE);
}
template <typename T>
gb_internal void string_map__reserve_entries(StringMap<T> *h, usize capacity) {
- h->entries_capacity = cast(u32)resize_array_raw(&h->entries, string_map_allocator(), h->entries_capacity, capacity, STRING_MAP_CACHE_LINE_SIZE);
+ h->entries_capacity = cast(u32)resize_array_raw(&h->entries, string_map_allocator(), h->entries_capacity, capacity, MAP_CACHE_LINE_SIZE);
}