aboutsummaryrefslogtreecommitdiff
path: root/src/types.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/types.cpp')
-rw-r--r--src/types.cpp646
1 files changed, 480 insertions, 166 deletions
diff --git a/src/types.cpp b/src/types.cpp
index 574e628c5..233f903a3 100644
--- a/src/types.cpp
+++ b/src/types.cpp
@@ -137,13 +137,17 @@ struct TypeStruct {
Scope * scope;
i64 custom_align;
+ i64 custom_min_field_align;
+ i64 custom_max_field_align;
Type * polymorphic_params; // Type_Tuple
Type * polymorphic_parent;
+ Wait_Signal polymorphic_wait_signal;
Type * soa_elem;
i32 soa_count;
StructSoaKind soa_kind;
- RwMutex fields_mutex;
+ Wait_Signal fields_wait_signal;
+ BlockingMutex soa_mutex;
BlockingMutex offset_mutex; // for settings offsets
bool is_polymorphic;
@@ -165,6 +169,7 @@ struct TypeUnion {
i64 custom_align;
Type * polymorphic_params; // Type_Tuple
Type * polymorphic_parent;
+ Wait_Signal polymorphic_wait_signal;
i16 tag_size;
bool is_polymorphic;
@@ -182,6 +187,8 @@ struct TypeProc {
isize specialization_count;
ProcCallingConvention calling_convention;
i32 variadic_index;
+ String require_target_feature;
+ String enable_target_feature;
// TODO(bill): Make this a flag set rather than bools
bool variadic;
bool require_results;
@@ -230,6 +237,7 @@ struct TypeProc {
Type *key; \
Type *value; \
Type *lookup_result_type; \
+ Type *debug_metadata_type; \
}) \
TYPE_KIND(Struct, TypeStruct) \
TYPE_KIND(Union, TypeUnion) \
@@ -264,14 +272,6 @@ struct TypeProc {
Type *elem; \
Type *generic_count; \
}) \
- TYPE_KIND(RelativePointer, struct { \
- Type *pointer_type; \
- Type *base_integer; \
- }) \
- TYPE_KIND(RelativeMultiPointer, struct { \
- Type *pointer_type; \
- Type *base_integer; \
- }) \
TYPE_KIND(Matrix, struct { \
Type *elem; \
i64 row_count; \
@@ -279,6 +279,16 @@ struct TypeProc {
Type *generic_row_count; \
Type *generic_column_count; \
i64 stride_in_bytes; \
+ bool is_row_major; \
+ }) \
+ TYPE_KIND(BitField, struct { \
+ Scope * scope; \
+ Type * backing_type; \
+ Slice<Entity *> fields; \
+ String * tags; /*count == fields.count*/ \
+ Slice<u8> bit_sizes; \
+ Slice<i64> bit_offsets; \
+ Ast * node; \
}) \
TYPE_KIND(SoaPointer, struct { Type *elem; })
@@ -349,10 +359,12 @@ enum Typeid_Kind : u8 {
Typeid_Map,
Typeid_Bit_Set,
Typeid_Simd_Vector,
- Typeid_Relative_Pointer,
- Typeid_Relative_Multi_Pointer,
Typeid_Matrix,
Typeid_SoaPointer,
+ Typeid_Bit_Field,
+
+ Typeid__COUNT
+
};
// IMPORTANT NOTE(bill): This must match the same as the in core.odin
@@ -374,6 +386,9 @@ enum : int {
gb_internal bool is_type_comparable(Type *t);
gb_internal bool is_type_simple_compare(Type *t);
+gb_internal Type *type_deref(Type *t, bool allow_multi_pointer=false);
+gb_internal Type *base_type(Type *t);
+gb_internal Type *alloc_type_multi_pointer(Type *elem);
gb_internal u32 type_info_flags_of_type(Type *type) {
if (type == nullptr) {
@@ -398,6 +413,7 @@ struct Selection {
bool indirect; // Set if there was a pointer deref anywhere down the line
u8 swizzle_count; // maximum components = 4
u8 swizzle_indices; // 2 bits per component, representing which swizzle index
+ bool is_bit_field;
bool pseudo_field;
};
gb_global Selection const empty_selection = {0};
@@ -434,6 +450,15 @@ gb_internal Selection sub_selection(Selection const &sel, isize offset) {
return res;
}
+gb_internal Selection trim_selection(Selection const &sel) {
+ Selection res = {};
+ res.index.data = sel.index.data;
+ res.index.count = gb_max(sel.index.count - 1, 0);
+ res.index.capacity = res.index.count;
+ return res;
+}
+
+
gb_global Type basic_types[] = {
{Type_Basic, {Basic_Invalid, 0, 0, STR_LIT("invalid type")}},
@@ -546,6 +571,14 @@ gb_global Type *t_f16 = &basic_types[Basic_f16];
gb_global Type *t_f32 = &basic_types[Basic_f32];
gb_global Type *t_f64 = &basic_types[Basic_f64];
+gb_global Type *t_f16be = &basic_types[Basic_f16be];
+gb_global Type *t_f32be = &basic_types[Basic_f32be];
+gb_global Type *t_f64be = &basic_types[Basic_f64be];
+
+gb_global Type *t_f16le = &basic_types[Basic_f16le];
+gb_global Type *t_f32le = &basic_types[Basic_f32le];
+gb_global Type *t_f64le = &basic_types[Basic_f64le];
+
gb_global Type *t_complex32 = &basic_types[Basic_complex32];
gb_global Type *t_complex64 = &basic_types[Basic_complex64];
gb_global Type *t_complex128 = &basic_types[Basic_complex128];
@@ -635,10 +668,9 @@ gb_global Type *t_type_info_enum = nullptr;
gb_global Type *t_type_info_map = nullptr;
gb_global Type *t_type_info_bit_set = nullptr;
gb_global Type *t_type_info_simd_vector = nullptr;
-gb_global Type *t_type_info_relative_pointer = nullptr;
-gb_global Type *t_type_info_relative_multi_pointer = nullptr;
gb_global Type *t_type_info_matrix = nullptr;
gb_global Type *t_type_info_soa_pointer = nullptr;
+gb_global Type *t_type_info_bit_field = nullptr;
gb_global Type *t_type_info_named_ptr = nullptr;
gb_global Type *t_type_info_integer_ptr = nullptr;
@@ -664,10 +696,9 @@ gb_global Type *t_type_info_enum_ptr = nullptr;
gb_global Type *t_type_info_map_ptr = nullptr;
gb_global Type *t_type_info_bit_set_ptr = nullptr;
gb_global Type *t_type_info_simd_vector_ptr = nullptr;
-gb_global Type *t_type_info_relative_pointer_ptr = nullptr;
-gb_global Type *t_type_info_relative_multi_pointer_ptr = nullptr;
gb_global Type *t_type_info_matrix_ptr = nullptr;
gb_global Type *t_type_info_soa_pointer_ptr = nullptr;
+gb_global Type *t_type_info_bit_field_ptr = nullptr;
gb_global Type *t_allocator = nullptr;
gb_global Type *t_allocator_ptr = nullptr;
@@ -678,6 +709,10 @@ gb_global Type *t_allocator_error = nullptr;
gb_global Type *t_source_code_location = nullptr;
gb_global Type *t_source_code_location_ptr = nullptr;
+gb_global Type *t_load_directory_file = nullptr;
+gb_global Type *t_load_directory_file_ptr = nullptr;
+gb_global Type *t_load_directory_file_slice = nullptr;
+
gb_global Type *t_map_info = nullptr;
gb_global Type *t_map_cell_info = nullptr;
gb_global Type *t_raw_map = nullptr;
@@ -733,7 +768,7 @@ gb_internal i64 type_offset_of (Type *t, i64 index, Type **field_type_=null
gb_internal gbString type_to_string (Type *type, bool shorthand=true);
gb_internal gbString type_to_string (Type *type, gbAllocator allocator, bool shorthand=true);
gb_internal i64 type_size_of_internal(Type *t, TypePath *path);
-gb_internal void init_map_internal_types(Type *type);
+gb_internal i64 type_align_of_internal(Type *t, TypePath *path);
gb_internal Type * bit_set_to_int(Type *t);
gb_internal bool are_types_identical(Type *x, Type *y);
@@ -744,10 +779,6 @@ gb_internal bool is_type_proc(Type *t);
gb_internal bool is_type_slice(Type *t);
gb_internal bool is_type_integer(Type *t);
gb_internal bool type_set_offsets(Type *t);
-gb_internal Type *base_type(Type *t);
-
-gb_internal i64 type_size_of_internal(Type *t, TypePath *path);
-gb_internal i64 type_align_of_internal(Type *t, TypePath *path);
// IMPORTANT TODO(bill): SHould this TypePath code be removed since type cycle checking is handled much earlier on?
@@ -825,11 +856,13 @@ gb_internal void type_path_pop(TypePath *tp) {
#define FAILURE_SIZE 0
#define FAILURE_ALIGNMENT 0
+gb_internal bool type_ptr_set_exists(PtrSet<Type *> *s, Type *t);
+
gb_internal bool type_ptr_set_update(PtrSet<Type *> *s, Type *t) {
if (t == nullptr) {
return true;
}
- if (ptr_set_exists(s, t)) {
+ if (type_ptr_set_exists(s, t)) {
return true;
}
ptr_set_add(s, t);
@@ -898,6 +931,9 @@ gb_internal Type *core_type(Type *t) {
case Type_Enum:
t = t->Enum.base_type;
continue;
+ case Type_BitField:
+ t = t->BitField.backing_type;
+ continue;
}
break;
}
@@ -915,7 +951,7 @@ gb_internal Type *alloc_type(TypeKind kind) {
// gbAllocator a = heap_allocator();
gbAllocator a = permanent_allocator();
Type *t = gb_alloc_item(a, Type);
- zero_item(t);
+ gb_zero_item(t);
t->kind = kind;
t->cached_size = -1;
t->cached_align = -1;
@@ -950,6 +986,27 @@ gb_internal Type *alloc_type_soa_pointer(Type *elem) {
return t;
}
+gb_internal Type *alloc_type_pointer_to_multi_pointer(Type *ptr) {
+ Type *original_type = ptr;
+ ptr = base_type(ptr);
+ if (ptr->kind == Type_Pointer) {
+ return alloc_type_multi_pointer(ptr->Pointer.elem);
+ } else if (ptr->kind != Type_MultiPointer) {
+ GB_PANIC("Invalid type: %s", type_to_string(original_type));
+ }
+ return original_type;
+}
+
+gb_internal Type *alloc_type_multi_pointer_to_pointer(Type *ptr) {
+ Type *original_type = ptr;
+ ptr = base_type(ptr);
+ if (ptr->kind == Type_MultiPointer) {
+ return alloc_type_pointer(ptr->MultiPointer.elem);
+ } else if (ptr->kind != Type_Pointer) {
+ GB_PANIC("Invalid type: %s", type_to_string(original_type));
+ }
+ return original_type;
+}
gb_internal Type *alloc_type_array(Type *elem, i64 count, Type *generic_count = nullptr) {
if (generic_count != nullptr) {
@@ -965,7 +1022,7 @@ gb_internal Type *alloc_type_array(Type *elem, i64 count, Type *generic_count =
return t;
}
-gb_internal Type *alloc_type_matrix(Type *elem, i64 row_count, i64 column_count, Type *generic_row_count = nullptr, Type *generic_column_count = nullptr) {
+gb_internal Type *alloc_type_matrix(Type *elem, i64 row_count, i64 column_count, Type *generic_row_count, Type *generic_column_count, bool is_row_major) {
if (generic_row_count != nullptr || generic_column_count != nullptr) {
Type *t = alloc_type(Type_Matrix);
t->Matrix.elem = elem;
@@ -973,12 +1030,14 @@ gb_internal Type *alloc_type_matrix(Type *elem, i64 row_count, i64 column_count,
t->Matrix.column_count = column_count;
t->Matrix.generic_row_count = generic_row_count;
t->Matrix.generic_column_count = generic_column_count;
+ t->Matrix.is_row_major = is_row_major;
return t;
}
Type *t = alloc_type(Type_Matrix);
t->Matrix.elem = elem;
t->Matrix.row_count = row_count;
t->Matrix.column_count = column_count;
+ t->Matrix.is_row_major = is_row_major;
return t;
}
@@ -1020,6 +1079,14 @@ gb_internal Type *alloc_type_struct() {
return t;
}
+gb_internal Type *alloc_type_struct_complete() {
+ Type *t = alloc_type(Type_Struct);
+ wait_signal_set(&t->Struct.fields_wait_signal);
+ wait_signal_set(&t->Struct.polymorphic_wait_signal);
+ return t;
+}
+
+
gb_internal Type *alloc_type_union() {
Type *t = alloc_type(Type_Union);
return t;
@@ -1032,21 +1099,8 @@ gb_internal Type *alloc_type_enum() {
return t;
}
-gb_internal Type *alloc_type_relative_pointer(Type *pointer_type, Type *base_integer) {
- GB_ASSERT(is_type_pointer(pointer_type));
- GB_ASSERT(is_type_integer(base_integer));
- Type *t = alloc_type(Type_RelativePointer);
- t->RelativePointer.pointer_type = pointer_type;
- t->RelativePointer.base_integer = base_integer;
- return t;
-}
-
-gb_internal Type *alloc_type_relative_multi_pointer(Type *pointer_type, Type *base_integer) {
- GB_ASSERT(is_type_multi_pointer(pointer_type));
- GB_ASSERT(is_type_integer(base_integer));
- Type *t = alloc_type(Type_RelativeMultiPointer);
- t->RelativeMultiPointer.pointer_type = pointer_type;
- t->RelativeMultiPointer.base_integer = base_integer;
+gb_internal Type *alloc_type_bit_field() {
+ Type *t = alloc_type(Type_BitField);
return t;
}
@@ -1132,7 +1186,7 @@ gb_internal Type *alloc_type_simd_vector(i64 count, Type *elem, Type *generic_co
////////////////////////////////////////////////////////////////
-gb_internal Type *type_deref(Type *t, bool allow_multi_pointer=false) {
+gb_internal Type *type_deref(Type *t, bool allow_multi_pointer) {
if (t != nullptr) {
Type *bt = base_type(t);
if (bt == nullptr) {
@@ -1141,8 +1195,6 @@ gb_internal Type *type_deref(Type *t, bool allow_multi_pointer=false) {
switch (bt->kind) {
case Type_Pointer:
return bt->Pointer.elem;
- case Type_RelativePointer:
- return type_deref(bt->RelativePointer.pointer_type);
case Type_SoaPointer:
{
Type *elem = base_type(bt->SoaPointer.elem);
@@ -1389,6 +1441,7 @@ gb_internal i64 matrix_align_of(Type *t, struct TypePath *tp) {
Type *elem = t->Matrix.elem;
i64 row_count = gb_max(t->Matrix.row_count, 1);
+ i64 column_count = gb_max(t->Matrix.column_count, 1);
bool pop = type_path_push(tp, elem);
if (tp->failure) {
@@ -1406,13 +1459,13 @@ gb_internal i64 matrix_align_of(Type *t, struct TypePath *tp) {
// could be maximally aligned but as a compromise, having no padding will be
// beneficial to third libraries that assume no padding
- i64 total_expected_size = row_count*t->Matrix.column_count*elem_size;
+ i64 total_expected_size = row_count*column_count*elem_size;
// i64 min_alignment = prev_pow2(elem_align * row_count);
i64 min_alignment = prev_pow2(total_expected_size);
- while ((total_expected_size % min_alignment) != 0) {
+ while (total_expected_size != 0 && (total_expected_size % min_alignment) != 0) {
min_alignment >>= 1;
}
- GB_ASSERT(min_alignment >= elem_align);
+ min_alignment = gb_max(min_alignment, elem_align);
i64 align = gb_min(min_alignment, build_context.max_simd_align);
return align;
@@ -1438,12 +1491,15 @@ gb_internal i64 matrix_type_stride_in_bytes(Type *t, struct TypePath *tp) {
i64 stride_in_bytes = 0;
// NOTE(bill, 2021-10-25): The alignment strategy here is to have zero padding
- // It would be better for performance to pad each column so that each column
+ // It would be better for performance to pad each column/row so that each column/row
// could be maximally aligned but as a compromise, having no padding will be
// beneficial to third libraries that assume no padding
- i64 row_count = t->Matrix.row_count;
- stride_in_bytes = elem_size*row_count;
-
+
+ if (t->Matrix.is_row_major) {
+ stride_in_bytes = elem_size*t->Matrix.column_count;
+ } else {
+ stride_in_bytes = elem_size*t->Matrix.row_count;
+ }
t->Matrix.stride_in_bytes = stride_in_bytes;
return stride_in_bytes;
}
@@ -1470,14 +1526,18 @@ gb_internal i64 matrix_indices_to_offset(Type *t, i64 row_index, i64 column_inde
GB_ASSERT(0 <= row_index && row_index < t->Matrix.row_count);
GB_ASSERT(0 <= column_index && column_index < t->Matrix.column_count);
i64 stride_elems = matrix_type_stride_in_elems(t);
- // NOTE(bill): Column-major layout internally
- return row_index + stride_elems*column_index;
+ if (t->Matrix.is_row_major) {
+ return column_index + stride_elems*row_index;
+ } else {
+ // NOTE(bill): Column-major layout internally
+ return row_index + stride_elems*column_index;
+ }
}
gb_internal i64 matrix_row_major_index_to_offset(Type *t, i64 index) {
t = base_type(t);
GB_ASSERT(t->kind == Type_Matrix);
-
+
i64 row_index = index/t->Matrix.column_count;
i64 column_index = index%t->Matrix.column_count;
return matrix_indices_to_offset(t, row_index, column_index);
@@ -1548,18 +1608,29 @@ gb_internal Type *base_array_type(Type *t) {
return t;
}
-gb_internal bool is_type_generic(Type *t) {
- t = base_type(t);
- return t->kind == Type_Generic;
-}
-gb_internal bool is_type_relative_pointer(Type *t) {
- t = base_type(t);
- return t->kind == Type_RelativePointer;
+gb_internal Type *base_any_array_type(Type *t) {
+ Type *bt = base_type(t);
+ if (is_type_array(bt)) {
+ return bt->Array.elem;
+ } else if (is_type_slice(bt)) {
+ return bt->Slice.elem;
+ } else if (is_type_dynamic_array(bt)) {
+ return bt->DynamicArray.elem;
+ } else if (is_type_enumerated_array(bt)) {
+ return bt->EnumeratedArray.elem;
+ } else if (is_type_simd_vector(bt)) {
+ return bt->SimdVector.elem;
+ } else if (is_type_matrix(bt)) {
+ return bt->Matrix.elem;
+ }
+ return t;
}
-gb_internal bool is_type_relative_multi_pointer(Type *t) {
+
+
+gb_internal bool is_type_generic(Type *t) {
t = base_type(t);
- return t->kind == Type_RelativeMultiPointer;
+ return t->kind == Type_Generic;
}
gb_internal bool is_type_u8_slice(Type *t) {
@@ -1699,6 +1770,10 @@ gb_internal bool is_type_bit_set(Type *t) {
t = base_type(t);
return (t->kind == Type_BitSet);
}
+gb_internal bool is_type_bit_field(Type *t) {
+ t = base_type(t);
+ return (t->kind == Type_BitField);
+}
gb_internal bool is_type_map(Type *t) {
t = base_type(t);
return t->kind == Type_Map;
@@ -1918,6 +1993,24 @@ gb_internal bool is_type_valid_bit_set_elem(Type *t) {
return false;
}
+
+gb_internal bool is_valid_bit_field_backing_type(Type *type) {
+ if (type == nullptr) {
+ return false;
+ }
+ type = base_type(type);
+ if (is_type_untyped(type)) {
+ return false;
+ }
+ if (is_type_integer(type)) {
+ return true;
+ }
+ if (type->kind == Type_Array) {
+ return is_type_integer(type->Array.elem);
+ }
+ return false;
+}
+
gb_internal Type *bit_set_to_int(Type *t) {
GB_ASSERT(is_type_bit_set(t));
Type *bt = base_type(t);
@@ -1925,6 +2018,9 @@ gb_internal Type *bit_set_to_int(Type *t) {
if (underlying != nullptr && is_type_integer(underlying)) {
return underlying;
}
+ if (underlying != nullptr && is_valid_bit_field_backing_type(underlying)) {
+ return underlying;
+ }
i64 sz = type_size_of(t);
switch (sz) {
@@ -1957,6 +2053,9 @@ gb_internal bool is_type_valid_vector_elem(Type *t) {
if (is_type_boolean(t)) {
return true;
}
+ if (t->Basic.kind == Basic_rawptr) {
+ return true;
+ }
}
return false;
}
@@ -1976,8 +2075,6 @@ gb_internal bool is_type_indexable(Type *t) {
return true;
case Type_EnumeratedArray:
return true;
- case Type_RelativeMultiPointer:
- return true;
case Type_Matrix:
return true;
}
@@ -1995,8 +2092,6 @@ gb_internal bool is_type_sliceable(Type *t) {
return true;
case Type_EnumeratedArray:
return false;
- case Type_RelativeMultiPointer:
- return true;
case Type_Matrix:
return false;
}
@@ -2040,21 +2135,24 @@ gb_internal bool is_type_polymorphic_record_unspecialized(Type *t) {
t = base_type(t);
if (t->kind == Type_Struct) {
return t->Struct.is_polymorphic && !t->Struct.is_poly_specialized;
- } else if (t->kind == Type_Struct) {
- return t->Struct.is_polymorphic && !t->Struct.is_poly_specialized;
+ } else if (t->kind == Type_Union) {
+ return t->Union.is_polymorphic && !t->Union.is_poly_specialized;
}
return false;
}
+
gb_internal TypeTuple *get_record_polymorphic_params(Type *t) {
t = base_type(t);
switch (t->kind) {
case Type_Struct:
+ wait_signal_until_available(&t->Struct.polymorphic_wait_signal);
if (t->Struct.polymorphic_params) {
return &t->Struct.polymorphic_params->Tuple;
}
break;
case Type_Union:
+ wait_signal_until_available(&t->Union.polymorphic_wait_signal);
if (t->Union.polymorphic_params) {
return &t->Union.polymorphic_params->Tuple;
}
@@ -2200,27 +2298,7 @@ gb_internal bool is_type_polymorphic(Type *t, bool or_specialized=false) {
return true;
}
break;
-
- case Type_RelativeMultiPointer:
- if (is_type_polymorphic(t->RelativeMultiPointer.pointer_type, or_specialized)) {
- return true;
- }
- if (t->RelativeMultiPointer.base_integer != nullptr &&
- is_type_polymorphic(t->RelativeMultiPointer.base_integer, or_specialized)) {
- return true;
- }
- break;
- case Type_RelativePointer:
- if (is_type_polymorphic(t->RelativePointer.pointer_type, or_specialized)) {
- return true;
- }
- if (t->RelativePointer.base_integer != nullptr &&
- is_type_polymorphic(t->RelativePointer.base_integer, or_specialized)) {
- return true;
- }
- break;
}
-
return false;
}
@@ -2262,14 +2340,11 @@ gb_internal bool type_has_nil(Type *t) {
}
}
return false;
-
- case Type_RelativePointer:
- case Type_RelativeMultiPointer:
- return true;
}
return false;
}
+
gb_internal bool elem_type_can_be_constant(Type *t) {
t = base_type(t);
if (t == t_invalid) {
@@ -2357,6 +2432,9 @@ gb_internal bool is_type_comparable(Type *t) {
case Type_SimdVector:
return true;
+
+ case Type_BitField:
+ return is_type_comparable(t->BitField.backing_type);
}
return false;
}
@@ -2430,10 +2508,6 @@ gb_internal bool is_type_load_safe(Type *type) {
}
return true;
- case Type_RelativePointer:
- case Type_RelativeMultiPointer:
- return true;
-
case Type_Pointer:
case Type_MultiPointer:
case Type_Slice:
@@ -2641,6 +2715,7 @@ gb_internal bool are_types_identical_internal(Type *x, Type *y, bool check_tuple
case Type_Matrix:
return x->Matrix.row_count == y->Matrix.row_count &&
x->Matrix.column_count == y->Matrix.column_count &&
+ x->Matrix.is_row_major == y->Matrix.is_row_major &&
are_types_identical(x->Matrix.elem, y->Matrix.elem);
case Type_DynamicArray:
@@ -2665,8 +2740,14 @@ gb_internal bool are_types_identical_internal(Type *x, Type *y, bool check_tuple
case Type_Union:
if (x->Union.variants.count == y->Union.variants.count &&
- x->Union.custom_align == y->Union.custom_align &&
x->Union.kind == y->Union.kind) {
+
+ if (x->Union.custom_align != y->Union.custom_align) {
+ if (type_align_of(x) != type_align_of(y)) {
+ return false;
+ }
+ }
+
// NOTE(bill): zeroth variant is nullptr
for_array(i, x->Union.variants) {
if (!are_types_identical(x->Union.variants[i], y->Union.variants[i])) {
@@ -2682,10 +2763,16 @@ gb_internal bool are_types_identical_internal(Type *x, Type *y, bool check_tuple
x->Struct.is_no_copy == y->Struct.is_no_copy &&
x->Struct.fields.count == y->Struct.fields.count &&
x->Struct.is_packed == y->Struct.is_packed &&
- x->Struct.custom_align == y->Struct.custom_align &&
x->Struct.soa_kind == y->Struct.soa_kind &&
x->Struct.soa_count == y->Struct.soa_count &&
are_types_identical(x->Struct.soa_elem, y->Struct.soa_elem)) {
+
+ if (x->Struct.custom_align != y->Struct.custom_align) {
+ if (type_align_of(x) != type_align_of(y)) {
+ return false;
+ }
+ }
+
for_array(i, x->Struct.fields) {
Entity *xf = x->Struct.fields[i];
Entity *yf = y->Struct.fields[i];
@@ -2764,6 +2851,29 @@ gb_internal bool are_types_identical_internal(Type *x, Type *y, bool check_tuple
return are_types_identical(x->SimdVector.elem, y->SimdVector.elem);
}
break;
+
+ case Type_BitField:
+ if (are_types_identical(x->BitField.backing_type, y->BitField.backing_type) &&
+ x->BitField.fields.count == y->BitField.fields.count) {
+ for_array(i, x->BitField.fields) {
+ Entity *a = x->BitField.fields[i];
+ Entity *b = y->BitField.fields[i];
+ if (!are_types_identical(a->type, b->type)) {
+ return false;
+ }
+ if (a->token.string != b->token.string) {
+ return false;
+ }
+ if (x->BitField.bit_sizes[i] != y->BitField.bit_sizes[i]) {
+ return false;
+ }
+ if (x->BitField.bit_offsets[i] != y->BitField.bit_offsets[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+ break;
}
return false;
@@ -2787,6 +2897,49 @@ gb_internal Type *default_type(Type *type) {
return type;
}
+// See https://en.cppreference.com/w/c/language/conversion#Default_argument_promotions
+gb_internal Type *c_vararg_promote_type(Type *type) {
+ GB_ASSERT(type != nullptr);
+
+ Type *core = core_type(type);
+ GB_ASSERT(core->kind != Type_BitSet);
+
+ if (core->kind == Type_Basic) {
+ switch (core->Basic.kind) {
+ case Basic_f16:
+ case Basic_f32:
+ case Basic_UntypedFloat:
+ return t_f64;
+ case Basic_f16le:
+ case Basic_f32le:
+ return t_f64le;
+ case Basic_f16be:
+ case Basic_f32be:
+ return t_f64be;
+
+ case Basic_UntypedBool:
+ case Basic_bool:
+ case Basic_b8:
+ case Basic_b16:
+ case Basic_i8:
+ case Basic_i16:
+ case Basic_u8:
+ case Basic_u16:
+ return t_i32;
+
+ case Basic_i16le:
+ case Basic_u16le:
+ return t_i32le;
+
+ case Basic_i16be:
+ case Basic_u16be:
+ return t_i32be;
+ }
+ }
+
+ return type;
+}
+
gb_internal bool union_variant_index_types_equal(Type *v, Type *vt) {
if (are_types_identical(v, vt)) {
return true;
@@ -2863,7 +3016,22 @@ gb_internal Type *union_tag_type(Type *u) {
return t_uint;
}
+gb_internal int matched_target_features(TypeProc *t) {
+ if (t->require_target_feature.len == 0) {
+ return 0;
+ }
+ int matches = 0;
+ String_Iterator it = {t->require_target_feature, 0};
+ for (;;) {
+ String str = string_split_iterator(&it, ',');
+ if (str == "") break;
+ if (check_target_feature_is_valid_for_target_arch(str, nullptr)) {
+ matches += 1;
+ }
+ }
+ return matches;
+}
enum ProcTypeOverloadKind {
ProcOverload_Identical, // The types are identical
@@ -2875,6 +3043,7 @@ enum ProcTypeOverloadKind {
ProcOverload_ResultCount,
ProcOverload_ResultTypes,
ProcOverload_Polymorphic,
+ ProcOverload_TargetFeatures,
ProcOverload_NotProcedure,
@@ -2932,6 +3101,10 @@ gb_internal ProcTypeOverloadKind are_proc_types_overload_safe(Type *x, Type *y)
}
}
+ if (matched_target_features(&px) != matched_target_features(&py)) {
+ return ProcOverload_TargetFeatures;
+ }
+
if (px.params != nullptr && py.params != nullptr) {
Entity *ex = px.params->Tuple.variables[0];
Entity *ey = py.params->Tuple.variables[0];
@@ -2961,9 +3134,8 @@ gb_internal Selection lookup_field_from_index(Type *type, i64 index) {
isize max_count = 0;
switch (type->kind) {
case Type_Struct:
- rw_mutex_shared_lock(&type->Struct.fields_mutex);
+ wait_signal_until_available(&type->Struct.fields_wait_signal);
max_count = type->Struct.fields.count;
- rw_mutex_shared_unlock(&type->Struct.fields_mutex);
break;
case Type_Tuple: max_count = type->Tuple.variables.count; break;
}
@@ -2974,8 +3146,7 @@ gb_internal Selection lookup_field_from_index(Type *type, i64 index) {
switch (type->kind) {
case Type_Struct: {
- rw_mutex_shared_lock(&type->Struct.fields_mutex);
- defer (rw_mutex_shared_unlock(&type->Struct.fields_mutex));
+ wait_signal_until_available(&type->Struct.fields_wait_signal);
for (isize i = 0; i < max_count; i++) {
Entity *f = type->Struct.fields[i];
if (f->kind == Entity_Variable) {
@@ -3031,7 +3202,7 @@ gb_internal Selection lookup_field_with_selection(Type *type_, String field_name
mutex_lock(md->mutex);
defer (mutex_unlock(md->mutex));
for (TypeNameObjCMetadataEntry const &entry : md->type_entries) {
- GB_ASSERT(entry.entity->kind == Entity_Procedure);
+ GB_ASSERT(entry.entity->kind == Entity_Procedure || entry.entity->kind == Entity_ProcGroup);
if (entry.name == field_name) {
sel.entity = entry.entity;
sel.pseudo_field = true;
@@ -3040,9 +3211,8 @@ gb_internal Selection lookup_field_with_selection(Type *type_, String field_name
}
}
if (type->kind == Type_Struct) {
- rw_mutex_shared_lock(&type->Struct.fields_mutex);
+ // wait_signal_until_available(&type->Struct.fields_wait_signal);
isize field_count = type->Struct.fields.count;
- rw_mutex_shared_unlock(&type->Struct.fields_mutex);
if (field_count != 0) for_array(i, type->Struct.fields) {
Entity *f = type->Struct.fields[i];
if (f->flags&EntityFlag_Using) {
@@ -3071,9 +3241,8 @@ gb_internal Selection lookup_field_with_selection(Type *type_, String field_name
}
if (type->kind == Type_Struct) {
- rw_mutex_shared_lock(&type->Struct.fields_mutex);
+ // wait_signal_until_available(&type->Struct.fields_wait_signal);
Scope *s = type->Struct.scope;
- rw_mutex_shared_unlock(&type->Struct.fields_mutex);
if (s != nullptr) {
Entity *found = scope_lookup_current(s, field_name);
if (found != nullptr && found->kind != Entity_Variable) {
@@ -3121,9 +3290,12 @@ gb_internal Selection lookup_field_with_selection(Type *type_, String field_name
}
}
- rw_mutex_shared_lock(&type->Struct.fields_mutex);
+ if (is_type_polymorphic(type)) {
+ // NOTE(bill): A polymorphic struct has no fields, this only hits in the case of an error
+ return sel;
+ }
+ wait_signal_until_available(&type->Struct.fields_wait_signal);
isize field_count = type->Struct.fields.count;
- rw_mutex_shared_unlock(&type->Struct.fields_mutex);
if (field_count != 0) for_array(i, type->Struct.fields) {
Entity *f = type->Struct.fields[i];
if (f->kind != Entity_Variable || (f->flags & EntityFlag_Field) == 0) {
@@ -3165,6 +3337,21 @@ gb_internal Selection lookup_field_with_selection(Type *type_, String field_name
else if (field_name == "a") mapped_field_name = str_lit("w");
return lookup_field_with_selection(type, mapped_field_name, is_type, sel, allow_blank_ident);
}
+ } else if (type->kind == Type_BitField) {
+ for_array(i, type->BitField.fields) {
+ Entity *f = type->BitField.fields[i];
+ if (f->kind != Entity_Variable || (f->flags & EntityFlag_Field) == 0) {
+ continue;
+ }
+ String str = f->token.string;
+ if (field_name == str) {
+ selection_add_index(&sel, i); // HACK(bill): Leaky memory
+ sel.entity = f;
+ sel.is_bit_field = true;
+ return sel;
+ }
+ }
+
} else if (type->kind == Type_Basic) {
switch (type->Basic.kind) {
case Basic_any: {
@@ -3305,31 +3492,6 @@ gb_internal Selection lookup_field_with_selection(Type *type_, String field_name
}
return sel;
- } else if (type->kind == Type_Array) {
- if (type->Array.count <= 4) {
- // HACK(bill): Memory leak
- switch (type->Array.count) {
- #define _ARRAY_FIELD_CASE_IF(_length, _name) \
- if (field_name == (_name)) { \
- selection_add_index(&sel, (_length)-1); \
- sel.entity = alloc_entity_array_elem(nullptr, make_token_ident(str_lit(_name)), type->Array.elem, (_length)-1); \
- return sel; \
- }
- #define _ARRAY_FIELD_CASE(_length, _name0, _name1) \
- case (_length): \
- _ARRAY_FIELD_CASE_IF(_length, _name0); \
- _ARRAY_FIELD_CASE_IF(_length, _name1); \
- /*fallthrough*/
-
- _ARRAY_FIELD_CASE(4, "w", "a");
- _ARRAY_FIELD_CASE(3, "z", "b");
- _ARRAY_FIELD_CASE(2, "y", "g");
- _ARRAY_FIELD_CASE(1, "x", "r");
- default: break;
-
- #undef _ARRAY_FIELD_CASE
- }
- }
} else if (type->kind == Type_DynamicArray) {
GB_ASSERT(t_allocator != nullptr);
String allocator_str = str_lit("allocator");
@@ -3350,7 +3512,53 @@ gb_internal Selection lookup_field_with_selection(Type *type_, String field_name
sel.entity = entity__allocator;
return sel;
}
+
+
+#define _ARRAY_FIELD_CASE_IF(_length, _name) \
+ if (field_name == (_name)) { \
+ selection_add_index(&sel, (_length)-1); \
+ sel.entity = alloc_entity_array_elem(nullptr, make_token_ident(str_lit(_name)), elem, (_length)-1); \
+ return sel; \
}
+#define _ARRAY_FIELD_CASE(_length, _name0, _name1) \
+case (_length): \
+ _ARRAY_FIELD_CASE_IF(_length, _name0); \
+ _ARRAY_FIELD_CASE_IF(_length, _name1); \
+ /*fallthrough*/
+
+
+ } else if (type->kind == Type_Array) {
+
+ Type *elem = type->Array.elem;
+
+ if (type->Array.count <= 4) {
+ // HACK(bill): Memory leak
+ switch (type->Array.count) {
+
+ _ARRAY_FIELD_CASE(4, "w", "a");
+ _ARRAY_FIELD_CASE(3, "z", "b");
+ _ARRAY_FIELD_CASE(2, "y", "g");
+ _ARRAY_FIELD_CASE(1, "x", "r");
+ default: break;
+ }
+ }
+ } else if (type->kind == Type_SimdVector) {
+
+ Type *elem = type->SimdVector.elem;
+ if (type->SimdVector.count <= 4) {
+ // HACK(bill): Memory leak
+ switch (type->SimdVector.count) {
+ _ARRAY_FIELD_CASE(4, "w", "a");
+ _ARRAY_FIELD_CASE(3, "z", "b");
+ _ARRAY_FIELD_CASE(2, "y", "g");
+ _ARRAY_FIELD_CASE(1, "x", "r");
+ default: break;
+ }
+ }
+ }
+
+#undef _ARRAY_FIELD_CASE
+#undef _ARRAY_FIELD_CASE
return sel;
}
@@ -3414,8 +3622,6 @@ gb_internal Slice<i32> struct_fields_index_by_increasing_offset(gbAllocator allo
-gb_internal i64 type_size_of_internal (Type *t, TypePath *path);
-gb_internal i64 type_align_of_internal(Type *t, TypePath *path);
gb_internal i64 type_size_of(Type *t);
gb_internal i64 type_align_of(Type *t);
@@ -3565,6 +3771,8 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) {
case Type_Slice:
return build_context.int_size;
+ case Type_BitField:
+ return type_align_of_internal(t->BitField.backing_type, path);
case Type_Tuple: {
i64 max = 1;
@@ -3615,6 +3823,8 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) {
return 1;
}
+ type_set_offsets(t);
+
i64 max = 1;
for_array(i, t->Struct.fields) {
Type *field_type = t->Struct.fields[i]->type;
@@ -3628,6 +3838,14 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) {
max = align;
}
}
+
+ if (t->Struct.custom_min_field_align > 0) {
+ max = gb_max(max, t->Struct.custom_min_field_align);
+ }
+ if (t->Struct.custom_max_field_align != 0 &&
+ t->Struct.custom_max_field_align > t->Struct.custom_min_field_align) {
+ max = gb_min(max, t->Struct.custom_max_field_align);
+ }
return max;
} break;
@@ -3652,11 +3870,6 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) {
case Type_Matrix:
return matrix_align_of(t, path);
- case Type_RelativePointer:
- return type_align_of_internal(t->RelativePointer.base_integer, path);
- case Type_RelativeMultiPointer:
- return type_align_of_internal(t->RelativeMultiPointer.base_integer, path);
-
case Type_SoaPointer:
return build_context.int_size;
}
@@ -3666,10 +3879,19 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) {
return gb_clamp(next_pow2(type_size_of_internal(t, path)), 1, build_context.max_align);
}
-gb_internal i64 *type_set_offsets_of(Slice<Entity *> const &fields, bool is_packed, bool is_raw_union) {
+gb_internal i64 *type_set_offsets_of(Slice<Entity *> const &fields, bool is_packed, bool is_raw_union, i64 min_field_align, i64 max_field_align) {
gbAllocator a = permanent_allocator();
auto offsets = gb_alloc_array(a, i64, fields.count);
i64 curr_offset = 0;
+
+ if (min_field_align == 0) {
+ min_field_align = 1;
+ }
+
+ TypePath path{};
+ type_path_init(&path);
+ defer (type_path_free(&path));
+
if (is_raw_union) {
for_array(i, fields) {
offsets[i] = 0;
@@ -3679,7 +3901,7 @@ gb_internal i64 *type_set_offsets_of(Slice<Entity *> const &fields, bool is_pack
if (fields[i]->kind != Entity_Variable) {
offsets[i] = -1;
} else {
- i64 size = type_size_of(fields[i]->type);
+ i64 size = type_size_of_internal(fields[i]->type, &path);
offsets[i] = curr_offset;
curr_offset += size;
}
@@ -3690,8 +3912,11 @@ gb_internal i64 *type_set_offsets_of(Slice<Entity *> const &fields, bool is_pack
offsets[i] = -1;
} else {
Type *t = fields[i]->type;
- i64 align = gb_max(type_align_of(t), 1);
- i64 size = gb_max(type_size_of( t), 0);
+ i64 align = gb_max(type_align_of_internal(t, &path), min_field_align);
+ if (max_field_align > min_field_align) {
+ align = gb_min(align, max_field_align);
+ }
+ i64 size = gb_max(type_size_of_internal(t, &path), 0);
curr_offset = align_formula(curr_offset, align);
offsets[i] = curr_offset;
curr_offset += size;
@@ -3707,7 +3932,7 @@ gb_internal bool type_set_offsets(Type *t) {
MUTEX_GUARD(&t->Struct.offset_mutex);
if (!t->Struct.are_offsets_set) {
t->Struct.are_offsets_being_processed = true;
- t->Struct.offsets = type_set_offsets_of(t->Struct.fields, t->Struct.is_packed, t->Struct.is_raw_union);
+ t->Struct.offsets = type_set_offsets_of(t->Struct.fields, t->Struct.is_packed, t->Struct.is_raw_union, t->Struct.custom_min_field_align, t->Struct.custom_max_field_align);
t->Struct.are_offsets_being_processed = false;
t->Struct.are_offsets_set = true;
return true;
@@ -3716,7 +3941,7 @@ gb_internal bool type_set_offsets(Type *t) {
MUTEX_GUARD(&t->Tuple.mutex);
if (!t->Tuple.are_offsets_set) {
t->Tuple.are_offsets_being_processed = true;
- t->Tuple.offsets = type_set_offsets_of(t->Tuple.variables, t->Tuple.is_packed, false);
+ t->Tuple.offsets = type_set_offsets_of(t->Tuple.variables, t->Tuple.is_packed, false, 1, 0);
t->Tuple.are_offsets_being_processed = false;
t->Tuple.are_offsets_set = true;
return true;
@@ -3932,13 +4157,15 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) {
case Type_Matrix: {
i64 stride_in_bytes = matrix_type_stride_in_bytes(t, path);
- return stride_in_bytes * t->Matrix.column_count;
+ if (t->Matrix.is_row_major) {
+ return stride_in_bytes * t->Matrix.row_count;
+ } else {
+ return stride_in_bytes * t->Matrix.column_count;
+ }
}
- case Type_RelativePointer:
- return type_size_of_internal(t->RelativePointer.base_integer, path);
- case Type_RelativeMultiPointer:
- return type_size_of_internal(t->RelativeMultiPointer.base_integer, path);
+ case Type_BitField:
+ return type_size_of_internal(t->BitField.backing_type, path);
}
// Catch all
@@ -4085,7 +4312,7 @@ gb_internal i64 type_offset_of_from_selection(Type *type, Selection sel) {
return offset;
}
-gb_internal isize check_is_assignable_to_using_subtype(Type *src, Type *dst, isize level = 0, bool src_is_ptr = false) {
+gb_internal isize check_is_assignable_to_using_subtype(Type *src, Type *dst, isize level = 0, bool src_is_ptr = false, bool allow_polymorphic=false) {
Type *prev_src = src;
src = type_deref(src);
if (!src_is_ptr) {
@@ -4097,11 +4324,21 @@ gb_internal isize check_is_assignable_to_using_subtype(Type *src, Type *dst, isi
return 0;
}
+ bool dst_is_polymorphic = is_type_polymorphic(dst);
+
for_array(i, src->Struct.fields) {
Entity *f = src->Struct.fields[i];
if (f->kind != Entity_Variable || (f->flags&EntityFlags_IsSubtype) == 0) {
continue;
}
+ if (allow_polymorphic && dst_is_polymorphic) {
+ Type *fb = base_type(type_deref(f->type));
+ if (fb->kind == Type_Struct) {
+ if (fb->Struct.polymorphic_parent == dst) {
+ return true;
+ }
+ }
+ }
if (are_types_identical(f->type, dst)) {
return level+1;
@@ -4111,7 +4348,7 @@ gb_internal isize check_is_assignable_to_using_subtype(Type *src, Type *dst, isi
return level+1;
}
}
- isize nested_level = check_is_assignable_to_using_subtype(f->type, dst, level+1, src_is_ptr);
+ isize nested_level = check_is_assignable_to_using_subtype(f->type, dst, level+1, src_is_ptr, allow_polymorphic);
if (nested_level > 0) {
return nested_level;
}
@@ -4127,6 +4364,13 @@ gb_internal bool is_type_subtype_of(Type *src, Type *dst) {
return 0 < check_is_assignable_to_using_subtype(src, dst, 0, is_type_pointer(src));
}
+gb_internal bool is_type_subtype_of_and_allow_polymorphic(Type *src, Type *dst) {
+ if (are_types_identical(src, dst)) {
+ return true;
+ }
+
+ return 0 < check_is_assignable_to_using_subtype(src, dst, 0, is_type_pointer(src), true);
+}
gb_internal bool has_type_got_objc_class_attribute(Type *t) {
@@ -4194,7 +4438,70 @@ gb_internal Type *alloc_type_proc_from_types(Type **param_types, unsigned param_
return t;
}
-
+// gb_internal Type *type_from_selection(Type *type, Selection const &sel) {
+// for (i32 index : sel.index) {
+// Type *bt = base_type(type_deref(type));
+// switch (bt->kind) {
+// case Type_Struct:
+// type = bt->Struct.fields[index]->type;
+// break;
+// case Type_Tuple:
+// type = bt->Tuple.variables[index]->type;
+// break;
+// case Type_BitField:
+// type = bt->BitField.fields[index]->type;
+// break;
+// case Type_Array:
+// type = bt->Array.elem;
+// break;
+// case Type_EnumeratedArray:
+// type = bt->Array.elem;
+// break;
+// case Type_Slice:
+// switch (index) {
+// case 0: type = alloc_type_multi_pointer(bt->Slice.elem); break;
+// case 1: type = t_int; break;
+// }
+// break;
+// case Type_DynamicArray:
+// switch (index) {
+// case 0: type = alloc_type_multi_pointer(bt->DynamicArray.elem); break;
+// case 1: type = t_int; break;
+// case 2: type = t_int; break;
+// case 3: type = t_allocator; break;
+// }
+// break;
+// case Type_Map:
+// switch (index) {
+// case 0: type = t_uintptr; break;
+// case 1: type = t_int; break;
+// case 2: type = t_allocator; break;
+// }
+// break;
+// case Type_Basic:
+// if (is_type_complex_or_quaternion(bt)) {
+// type = base_complex_elem_type(bt);
+// } else {
+// switch (type->Basic.kind) {
+// case Basic_any:
+// switch (index) {
+// case 0: type = t_rawptr; break;
+// case 1: type = t_typeid; break;
+// }
+// break;
+// case Basic_string:
+// switch (index) {
+// case 0: type = t_u8_multi_ptr; break;
+// case 1: type = t_int; break;
+// }
+// break;
+// }
+// }
+// break;
+// }
+// }
+// return type;
+// }
gb_internal gbString write_type_to_string(gbString str, Type *type, bool shorthand=false) {
if (type == nullptr) {
@@ -4484,24 +4791,31 @@ gb_internal gbString write_type_to_string(gbString str, Type *type, bool shortha
str = gb_string_append_fmt(str, "#simd[%d]", cast(int)type->SimdVector.count);
str = write_type_to_string(str, type->SimdVector.elem);
break;
-
- case Type_RelativePointer:
- str = gb_string_append_fmt(str, "#relative(");
- str = write_type_to_string(str, type->RelativePointer.base_integer);
- str = gb_string_append_fmt(str, ") ");
- str = write_type_to_string(str, type->RelativePointer.pointer_type);
- break;
- case Type_RelativeMultiPointer:
- str = gb_string_append_fmt(str, "#relative(");
- str = write_type_to_string(str, type->RelativePointer.base_integer);
- str = gb_string_append_fmt(str, ") ");
- str = write_type_to_string(str, type->RelativePointer.pointer_type);
- break;
case Type_Matrix:
+ if (type->Matrix.is_row_major) {
+ str = gb_string_appendc(str, "#row_major ");
+ }
str = gb_string_appendc(str, gb_bprintf("matrix[%d, %d]", cast(int)type->Matrix.row_count, cast(int)type->Matrix.column_count));
str = write_type_to_string(str, type->Matrix.elem);
break;
+
+ case Type_BitField:
+ str = gb_string_appendc(str, "bit_field ");
+ str = write_type_to_string(str, type->BitField.backing_type);
+ str = gb_string_appendc(str, " {");
+ for (isize i = 0; i < type->BitField.fields.count; i++) {
+ Entity *f = type->BitField.fields[i];
+ if (i > 0) {
+ str = gb_string_appendc(str, ", ");
+ }
+ str = gb_string_append_length(str, f->token.string.text, f->token.string.len);
+ str = gb_string_appendc(str, ": ");
+ str = write_type_to_string(str, f->type);
+ str = gb_string_append_fmt(str, " | %u", type->BitField.bit_sizes[i]);
+ }
+ str = gb_string_appendc(str, " }");
+ break;
}
return str;