diff options
| author | gingerBill <gingerBill@users.noreply.github.com> | 2025-08-05 12:27:28 +0100 |
|---|---|---|
| committer | gingerBill <gingerBill@users.noreply.github.com> | 2025-08-05 12:28:18 +0100 |
| commit | 7f194080e6fca33a8341f2c86d52a85be056cf8f (patch) | |
| tree | 9aae401ca128713b176d17d09a8a676d7f097819 /src/types.cpp | |
| parent | c910b5e583b8064f551179bfddc7c59ef5a32675 (diff) | |
Fix possible race condition with struct offsets
Diffstat (limited to 'src/types.cpp')
| -rw-r--r-- | src/types.cpp | 20 |
1 files changed, 12 insertions, 8 deletions
diff --git a/src/types.cpp b/src/types.cpp index 9ffd10ca8..861841b59 100644 --- a/src/types.cpp +++ b/src/types.cpp @@ -152,10 +152,11 @@ struct TypeStruct { bool is_polymorphic; bool are_offsets_set : 1; - bool are_offsets_being_processed : 1; bool is_packed : 1; bool is_raw_union : 1; bool is_poly_specialized : 1; + + std::atomic<bool> are_offsets_being_processed; }; struct TypeUnion { @@ -4099,18 +4100,18 @@ gb_internal bool type_set_offsets(Type *t) { if (t->kind == Type_Struct) { MUTEX_GUARD(&t->Struct.offset_mutex); if (!t->Struct.are_offsets_set) { - t->Struct.are_offsets_being_processed = true; + t->Struct.are_offsets_being_processed.store(true); t->Struct.offsets = type_set_offsets_of(t->Struct.fields, t->Struct.is_packed, t->Struct.is_raw_union, t->Struct.custom_min_field_align, t->Struct.custom_max_field_align); - t->Struct.are_offsets_being_processed = false; + t->Struct.are_offsets_being_processed.store(false); t->Struct.are_offsets_set = true; return true; } } else if (is_type_tuple(t)) { MUTEX_GUARD(&t->Tuple.mutex); if (!t->Tuple.are_offsets_set) { - t->Tuple.are_offsets_being_processed = true; + t->Tuple.are_offsets_being_processed.store(true); t->Tuple.offsets = type_set_offsets_of(t->Tuple.variables, t->Tuple.is_packed, false, 1, 0); - t->Tuple.are_offsets_being_processed = false; + t->Tuple.are_offsets_being_processed.store(false); t->Tuple.are_offsets_set = true; return true; } @@ -4293,9 +4294,12 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { if (path->failure) { return FAILURE_SIZE; } - if (t->Struct.are_offsets_being_processed && t->Struct.offsets == nullptr) { - type_path_print_illegal_cycle(path, path->path.count-1); - return FAILURE_SIZE; + { + MUTEX_GUARD(&t->Struct.offset_mutex); + if (t->Struct.are_offsets_being_processed.load() && t->Struct.offsets == nullptr) { + type_path_print_illegal_cycle(path, path->path.count-1); + return FAILURE_SIZE; + } } type_set_offsets(t); GB_ASSERT(t->Struct.fields.count == 0 || t->Struct.offsets != nullptr); |