aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/check_type.cpp79
-rw-r--r--src/llvm_backend_debug.cpp4
-rw-r--r--src/threading.cpp26
-rw-r--r--src/types.cpp18
4 files changed, 107 insertions, 20 deletions
diff --git a/src/check_type.cpp b/src/check_type.cpp
index d216ae3e6..8a140d95e 100644
--- a/src/check_type.cpp
+++ b/src/check_type.cpp
@@ -632,9 +632,6 @@ gb_internal void check_struct_type(CheckerContext *ctx, Type *struct_type, Ast *
scope_reserve(ctx->scope, min_field_count);
- rw_mutex_lock(&struct_type->Struct.fields_mutex);
- defer (rw_mutex_unlock(&struct_type->Struct.fields_mutex));
-
if (st->is_raw_union && min_field_count > 1) {
struct_type->Struct.is_raw_union = true;
context = str_lit("struct #raw_union");
@@ -662,6 +659,7 @@ gb_internal void check_struct_type(CheckerContext *ctx, Type *struct_type, Ast *
gb_unused(where_clause_ok);
}
check_struct_fields(ctx, node, &struct_type->Struct.fields, &struct_type->Struct.tags, st->fields, min_field_count, struct_type, context);
+ wait_signal_set(&struct_type->Struct.fields_wait_signal);
}
#define ST_ALIGN(_name) if (st->_name != nullptr) { \
@@ -2255,6 +2253,34 @@ gb_internal void map_cell_size_and_len(Type *type, i64 *size_, i64 *len_) {
if (len_) *len_ = len;
}
+gb_internal Type *get_map_cell_type(Type *type) {
+ i64 size, len;
+ i64 elem_size = type_size_of(type);
+ map_cell_size_and_len(type, &size, &len);
+
+ if (size == len*elem_size) {
+ return type;
+ }
+
+ if (is_power_of_two(len)) {
+ return type;
+ }
+
+ i64 padding = size - len*elem_size;
+ GB_ASSERT(padding > 0);
+
+ // Padding exists
+ Type *s = alloc_type_struct();
+ Scope *scope = create_scope(nullptr, nullptr);
+ s->Struct.fields = slice_make<Entity *>(permanent_allocator(), 2);
+ s->Struct.fields[0] = alloc_entity_field(scope, make_token_ident("v"), alloc_type_array(type, len), false, 0, EntityState_Resolved);
+ s->Struct.fields[1] = alloc_entity_field(scope, make_token_ident("_"), alloc_type_array(t_u8, padding), false, 1, EntityState_Resolved);
+ s->Struct.scope = scope;
+ gb_unused(type_size_of(s));
+
+ return s;
+}
+
gb_internal void init_map_internal_types(Type *type) {
GB_ASSERT(type->kind == Type_Map);
GB_ASSERT(t_allocator != nullptr);
@@ -2265,6 +2291,43 @@ gb_internal void init_map_internal_types(Type *type) {
GB_ASSERT(key != nullptr);
GB_ASSERT(value != nullptr);
+
+
+ Type *key_cell = get_map_cell_type(key);
+ Type *value_cell = get_map_cell_type(value);
+
+ Type *metadata_type = alloc_type_struct();
+ Scope *metadata_scope = create_scope(nullptr, nullptr);
+ metadata_type->Struct.fields = slice_make<Entity *>(permanent_allocator(), 5);
+ metadata_type->Struct.fields[0] = alloc_entity_field(metadata_scope, make_token_ident("key"), key, false, 0, EntityState_Resolved);
+ metadata_type->Struct.fields[1] = alloc_entity_field(metadata_scope, make_token_ident("value"), value, false, 1, EntityState_Resolved);
+ metadata_type->Struct.fields[2] = alloc_entity_field(metadata_scope, make_token_ident("hash"), t_uintptr, false, 2, EntityState_Resolved);
+ metadata_type->Struct.fields[3] = alloc_entity_field(metadata_scope, make_token_ident("key_cell"), key_cell, false, 3, EntityState_Resolved);
+ metadata_type->Struct.fields[4] = alloc_entity_field(metadata_scope, make_token_ident("value_cell"), value_cell, false, 4, EntityState_Resolved);
+ metadata_type->Struct.scope = metadata_scope;
+ metadata_type->Struct.node = nullptr;
+
+ gb_unused(type_size_of(metadata_type));
+
+ // NOTE(bill): [0]^struct{key: Key, value: Value, hash: uintptr}
+ // This is a zero array to a pointer to keep the alignment to that of a pointer, and not effective the size of the final struct
+ metadata_type = alloc_type_array(alloc_type_pointer(metadata_type), 0);;
+
+
+ Scope *scope = create_scope(nullptr, nullptr);
+ Type *debug_type = alloc_type_struct();
+ debug_type->Struct.fields = slice_make<Entity *>(permanent_allocator(), 4);
+ debug_type->Struct.fields[0] = alloc_entity_field(scope, make_token_ident("data"), t_uintptr, false, 0, EntityState_Resolved);
+ debug_type->Struct.fields[1] = alloc_entity_field(scope, make_token_ident("len"), t_int, false, 1, EntityState_Resolved);
+ debug_type->Struct.fields[2] = alloc_entity_field(scope, make_token_ident("allocator"), t_allocator, false, 2, EntityState_Resolved);
+ debug_type->Struct.fields[3] = alloc_entity_field(scope, make_token_ident("__metadata"), metadata_type, false, 3, EntityState_Resolved);
+ debug_type->Struct.scope = scope;
+ debug_type->Struct.node = nullptr;
+
+ gb_unused(type_size_of(debug_type));
+
+ type->Map.debug_metadata_type = debug_type;
+
type->Map.lookup_result_type = make_optional_ok_type(value);
}
@@ -2488,6 +2551,8 @@ gb_internal Type *make_soa_struct_internal(CheckerContext *ctx, Ast *array_typ_e
GB_ASSERT(is_type_struct(elem));
Type *old_struct = base_type(elem);
+
+ wait_signal_until_available(&old_struct->Struct.fields_wait_signal);
field_count = old_struct->Struct.fields.count;
soa_struct = alloc_type_struct();
@@ -2528,21 +2593,19 @@ gb_internal Type *make_soa_struct_internal(CheckerContext *ctx, Ast *array_typ_e
}
if (soa_kind != StructSoa_Fixed) {
- Entity *len_field = alloc_entity_field(scope, empty_token, t_int, false, cast(i32)field_count+0);
+ Entity *len_field = alloc_entity_field(scope, make_token_ident("__$len"), t_int, false, cast(i32)field_count+0);
soa_struct->Struct.fields[field_count+0] = len_field;
add_entity(ctx, scope, nullptr, len_field);
add_entity_use(ctx, nullptr, len_field);
if (soa_kind == StructSoa_Dynamic) {
- Entity *cap_field = alloc_entity_field(scope, empty_token, t_int, false, cast(i32)field_count+1);
+ Entity *cap_field = alloc_entity_field(scope, make_token_ident("__$cap"), t_int, false, cast(i32)field_count+1);
soa_struct->Struct.fields[field_count+1] = cap_field;
add_entity(ctx, scope, nullptr, cap_field);
add_entity_use(ctx, nullptr, cap_field);
- Token token = {};
- token.string = str_lit("allocator");
init_mem_allocator(ctx->checker);
- Entity *allocator_field = alloc_entity_field(scope, token, t_allocator, false, cast(i32)field_count+2);
+ Entity *allocator_field = alloc_entity_field(scope, make_token_ident("allocator"), t_allocator, false, cast(i32)field_count+2);
soa_struct->Struct.fields[field_count+2] = allocator_field;
add_entity(ctx, scope, nullptr, allocator_field);
add_entity_use(ctx, nullptr, allocator_field);
diff --git a/src/llvm_backend_debug.cpp b/src/llvm_backend_debug.cpp
index e053c5b40..f45cf0cbc 100644
--- a/src/llvm_backend_debug.cpp
+++ b/src/llvm_backend_debug.cpp
@@ -710,7 +710,9 @@ gb_internal void lb_debug_complete_types(lbModule *m) {
case Type_Map:
GB_ASSERT(t_raw_map != nullptr);
- bt = base_type(t_raw_map);
+ bt = base_type(bt->Map.debug_metadata_type);
+ // bt = base_type(t_raw_map);
+ GB_ASSERT(bt->kind == Type_Struct);
/*fallthrough*/
case Type_Struct:
if (file == nullptr) {
diff --git a/src/threading.cpp b/src/threading.cpp
index c283da425..725b58c89 100644
--- a/src/threading.cpp
+++ b/src/threading.cpp
@@ -107,6 +107,22 @@ gb_internal void thread_set_name (Thread *t, char const *name);
gb_internal void yield_thread(void);
gb_internal void yield_process(void);
+struct Wait_Signal {
+ Futex futex;
+};
+
+gb_internal void wait_signal_until_available(Wait_Signal *ws) {
+ if (ws->futex.load() == 0) {
+ futex_wait(&ws->futex, 1);
+ }
+}
+
+gb_internal void wait_signal_set(Wait_Signal *ws) {
+ ws->futex.store(1);
+ futex_broadcast(&ws->futex);
+}
+
+
struct MutexGuard {
MutexGuard() = delete;
@@ -119,17 +135,25 @@ struct MutexGuard {
explicit MutexGuard(RecursiveMutex *rm) noexcept : rm{rm} {
mutex_lock(this->rm);
}
+ explicit MutexGuard(RwMutex *rwm) noexcept : rwm{rwm} {
+ rw_mutex_lock(this->rwm);
+ }
explicit MutexGuard(BlockingMutex &bm) noexcept : bm{&bm} {
mutex_lock(this->bm);
}
explicit MutexGuard(RecursiveMutex &rm) noexcept : rm{&rm} {
mutex_lock(this->rm);
}
+ explicit MutexGuard(RwMutex &rwm) noexcept : rwm{&rwm} {
+ rw_mutex_lock(this->rwm);
+ }
~MutexGuard() noexcept {
if (this->bm) {
mutex_unlock(this->bm);
} else if (this->rm) {
mutex_unlock(this->rm);
+ } else if (this->rwm) {
+ rw_mutex_unlock(this->rwm);
}
}
@@ -137,10 +161,12 @@ struct MutexGuard {
BlockingMutex *bm;
RecursiveMutex *rm;
+ RwMutex *rwm;
};
#define MUTEX_GUARD_BLOCK(m) if (MutexGuard GB_DEFER_3(_mutex_guard_){m})
#define MUTEX_GUARD(m) mutex_lock(m); defer (mutex_unlock(m))
+#define RW_MUTEX_GUARD(m) rw_mutex_lock(m); defer (rw_mutex_unlock(m))
struct RecursiveMutex {
diff --git a/src/types.cpp b/src/types.cpp
index 8275b87ba..2f1994574 100644
--- a/src/types.cpp
+++ b/src/types.cpp
@@ -144,7 +144,7 @@ struct TypeStruct {
Type * soa_elem;
i32 soa_count;
StructSoaKind soa_kind;
- RwMutex fields_mutex;
+ Wait_Signal fields_wait_signal;
BlockingMutex offset_mutex; // for settings offsets
bool is_polymorphic;
@@ -231,6 +231,7 @@ struct TypeProc {
Type *key; \
Type *value; \
Type *lookup_result_type; \
+ Type *debug_metadata_type; \
}) \
TYPE_KIND(Struct, TypeStruct) \
TYPE_KIND(Union, TypeUnion) \
@@ -2968,9 +2969,8 @@ gb_internal Selection lookup_field_from_index(Type *type, i64 index) {
isize max_count = 0;
switch (type->kind) {
case Type_Struct:
- rw_mutex_shared_lock(&type->Struct.fields_mutex);
+ wait_signal_until_available(&type->Struct.fields_wait_signal);
max_count = type->Struct.fields.count;
- rw_mutex_shared_unlock(&type->Struct.fields_mutex);
break;
case Type_Tuple: max_count = type->Tuple.variables.count; break;
}
@@ -2981,8 +2981,7 @@ gb_internal Selection lookup_field_from_index(Type *type, i64 index) {
switch (type->kind) {
case Type_Struct: {
- rw_mutex_shared_lock(&type->Struct.fields_mutex);
- defer (rw_mutex_shared_unlock(&type->Struct.fields_mutex));
+ wait_signal_until_available(&type->Struct.fields_wait_signal);
for (isize i = 0; i < max_count; i++) {
Entity *f = type->Struct.fields[i];
if (f->kind == Entity_Variable) {
@@ -3047,9 +3046,8 @@ gb_internal Selection lookup_field_with_selection(Type *type_, String field_name
}
}
if (type->kind == Type_Struct) {
- rw_mutex_shared_lock(&type->Struct.fields_mutex);
+ wait_signal_until_available(&type->Struct.fields_wait_signal);
isize field_count = type->Struct.fields.count;
- rw_mutex_shared_unlock(&type->Struct.fields_mutex);
if (field_count != 0) for_array(i, type->Struct.fields) {
Entity *f = type->Struct.fields[i];
if (f->flags&EntityFlag_Using) {
@@ -3078,9 +3076,8 @@ gb_internal Selection lookup_field_with_selection(Type *type_, String field_name
}
if (type->kind == Type_Struct) {
- rw_mutex_shared_lock(&type->Struct.fields_mutex);
+ wait_signal_until_available(&type->Struct.fields_wait_signal);
Scope *s = type->Struct.scope;
- rw_mutex_shared_unlock(&type->Struct.fields_mutex);
if (s != nullptr) {
Entity *found = scope_lookup_current(s, field_name);
if (found != nullptr && found->kind != Entity_Variable) {
@@ -3128,9 +3125,8 @@ gb_internal Selection lookup_field_with_selection(Type *type_, String field_name
}
}
- rw_mutex_shared_lock(&type->Struct.fields_mutex);
+ wait_signal_until_available(&type->Struct.fields_wait_signal);
isize field_count = type->Struct.fields.count;
- rw_mutex_shared_unlock(&type->Struct.fields_mutex);
if (field_count != 0) for_array(i, type->Struct.fields) {
Entity *f = type->Struct.fields[i];
if (f->kind != Entity_Variable || (f->flags & EntityFlag_Field) == 0) {