aboutsummaryrefslogtreecommitdiff
path: root/src/checker.cpp
diff options
context:
space:
mode:
authorgingerBill <bill@gingerbill.org>2025-02-20 14:10:45 +0000
committergingerBill <bill@gingerbill.org>2025-02-20 14:10:45 +0000
commit5489a889832ac05e5edca7355b4601c1a82c2d27 (patch)
treed78b2b7852ff003d5f7c40c8a91c5332776f695c /src/checker.cpp
parentc25ac939d4bd86d51c383e96232da1d241c6a504 (diff)
Change `typeid` definition to be based around the canonical type hash
`typeid` used to be a fancy index with extra metadata stored on it. Now it is direct hash of the type. This is safe to do in practice since any possible collisions are checked at compile time AND the chances of having a 1% collision are around 1 in 600K (see the Birthday Paradox). Therefore accessing a `^Type_Info` is now a hash table lookup with linear probing. The table is twice the size than necessary so prevent too much probing due to an overly dense hash table.
Diffstat (limited to 'src/checker.cpp')
-rw-r--r--src/checker.cpp45
1 files changed, 29 insertions, 16 deletions
diff --git a/src/checker.cpp b/src/checker.cpp
index 678126094..056eef3b2 100644
--- a/src/checker.cpp
+++ b/src/checker.cpp
@@ -6740,30 +6740,43 @@ gb_internal void check_parsed_files(Checker *c) {
}
array_sort(c->info.type_info_types, type_info_pair_cmp);
+ array_init(&c->info.type_info_types_hash_map, heap_allocator(), c->info.type_info_types.count*2 + 1);
map_reserve(&c->info.minimum_dependency_type_info_index_map, c->info.type_info_types.count);
- for_array(i, c->info.type_info_types) {
- auto const &tt = c->info.type_info_types[i];
- bool exists = map_set_if_not_previously_exists(&c->info.minimum_dependency_type_info_index_map, tt.hash, i);
- if (!exists) {
- continue;
- }
- for (auto const &entry : c->info.minimum_dependency_type_info_index_map) {
- if (entry.key != tt.hash) {
+ isize hash_map_len = c->info.type_info_types_hash_map.count;
+ for (auto const &tt : c->info.type_info_types) {
+ isize index = tt.hash % hash_map_len;
+ // NOTE(bill): no need for a sanity check since there
+ // will always be enough space for the entries
+ for (;;) {
+ if (index == 0 || c->info.type_info_types_hash_map[index].hash != 0) {
+ index = (index+1) % hash_map_len;
continue;
}
- auto const &other = c->info.type_info_types[entry.value];
- if (are_types_identical_unique_tuples(tt.type, other.type)) {
- continue;
+ break;
+ }
+ c->info.type_info_types_hash_map[index] = tt;
+
+ bool exists = map_set_if_not_previously_exists(&c->info.minimum_dependency_type_info_index_map, tt.hash, index);
+ if (exists) {
+ for (auto const &entry : c->info.minimum_dependency_type_info_index_map) {
+ if (entry.key != tt.hash) {
+ continue;
+ }
+ auto const &other = c->info.type_info_types[entry.value];
+ if (are_types_identical_unique_tuples(tt.type, other.type)) {
+ continue;
+ }
+ gbString t = temp_canonical_string(tt.type);
+ gbString o = temp_canonical_string(other.type);
+ GB_PANIC("%s (%s) %llu vs %s (%s) %llu",
+ type_to_string(tt.type, false), t, cast(unsigned long long)tt.hash,
+ type_to_string(other.type, false), o, cast(unsigned long long)other.hash);
}
- gbString t = temp_canonical_string(tt.type);
- gbString o = temp_canonical_string(other.type);
- GB_PANIC("%s (%s) %llu vs %s (%s) %llu",
- type_to_string(tt.type, false), t, cast(unsigned long long)tt.hash,
- type_to_string(other.type, false), o, cast(unsigned long long)other.hash);
}
}
+
GB_ASSERT(c->info.minimum_dependency_type_info_index_map.count <= c->info.type_info_types.count);
}