aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorgingerBill <bill@gingerbill.org>2021-10-02 17:22:56 +0100
committergingerBill <bill@gingerbill.org>2021-10-02 17:22:56 +0100
commit00671a59a038c5fc9d4b2af02ca2194092b00778 (patch)
treec5e0689afb44fc3444a1666686d7fef4d7c03aba /src
parentebca0398a7ec76374322001e697a0aaa531eb22f (diff)
Minor code cleanup for backend; add `struct_fields_index_by_increasing_offset` for future use
Diffstat (limited to 'src')
-rw-r--r--src/array.cpp123
-rw-r--r--src/check_expr.cpp4
-rw-r--r--src/check_type.cpp3
-rw-r--r--src/llvm_backend.hpp2
-rw-r--r--src/llvm_backend_const.cpp10
-rw-r--r--src/llvm_backend_general.cpp18
-rw-r--r--src/llvm_backend_type.cpp2
-rw-r--r--src/llvm_backend_utility.cpp2
-rw-r--r--src/types.cpp48
9 files changed, 99 insertions, 113 deletions
diff --git a/src/array.cpp b/src/array.cpp
index 521fa91e2..c41125c6d 100644
--- a/src/array.cpp
+++ b/src/array.cpp
@@ -1,7 +1,6 @@
#define ARRAY_GROW_FORMULA(x) (gb_max(((x)+1)*3 >> 1, 8))
GB_STATIC_ASSERT(ARRAY_GROW_FORMULA(0) > 0);
-#if 1
template <typename T>
struct Array {
gbAllocator allocator;
@@ -418,98 +417,36 @@ void array_unordered_remove(Array<T> *array, isize index) {
-
-#endif
-
-#if 0
-#define Array(Type_) struct { \
- gbAllocator const &allocator; \
- Type_ * e; \
- isize count; \
- isize capacity; \
+template <typename T>
+T *begin(Array<T> &array) {
+ return array.data;
}
-
-typedef Array(void) ArrayVoid;
-
-#define array_init_reserve(x_, allocator_, init_capacity_) do { \
- void **e = cast(void **)&((x_)->e); \
- GB_ASSERT((x_) != nullptr); \
- (x_)->allocator = (allocator_); \
- (x_)->count = 0; \
- (x_)->capacity = (init_capacity_); \
- *e = gb_alloc((allocator_), gb_size_of(*(x_)->e)*(init_capacity_)); \
-} while (0)
-
-#define array_init_count(x_, allocator_, init_count_) do { \
- void **e = cast(void **)&((x_)->e); \
- GB_ASSERT((x_) != nullptr); \
- (x_)->allocator = (allocator_); \
- (x_)->count = (init_count_); \
- (x_)->capacity = (init_count_); \
- *e = gb_alloc((allocator_), gb_size_of(*(x_)->e)*(init_count_)); \
-} while (0)
-
-#define array_init(x_, allocator_) do { array_init_reserve(x_, allocator_, ARRAY_GROW_FORMULA(0)); } while (0)
-#define array_free(x_) do { gb_free((x_)->allocator, (x_)->e); } while (0)
-#define array_set_capacity(x_, capacity_) do { array__set_capacity((x_), (capacity_), gb_size_of(*(x_)->e)); } while (0)
-
-#define array_grow(x_, min_capacity_) do { \
- isize new_capacity = ARRAY_GROW_FORMULA((x_)->capacity); \
- if (new_capacity < (min_capacity_)) { \
- new_capacity = (min_capacity_); \
- } \
- array_set_capacity(x_, new_capacity); \
-} while (0)
-
-#define array_add(x_, item_) do { \
- if ((x_)->capacity < (x_)->count+1) { \
- array_grow(x_, 0); \
- } \
- (x_)->e[(x_)->count++] = item_; \
-} while (0)
-
-#define array_pop(x_) do { GB_ASSERT((x_)->count > 0); (x_)->count--; } while (0)
-#define array_clear(x_) do { (x_)->count = 0; } while (0)
-
-#define array_resize(x_, new_count_) do { \
- if ((x_)->capacity < (new_count_)) { \
- array_grow((x_), (new_count_)); \
- } \
- (x_)->count = (new_count_); \
-} while (0)
-
-#define array_reserve(x_, new_capacity_) do { \
- if ((x_)->capacity < (new_capacity_)) { \
- array_set_capacity((x_), (new_capacity_)); \
- } \
-} while (0)
-
-
-
-
-void array__set_capacity(void *ptr, isize capacity, isize element_size) {
- ArrayVoid *x = cast(ArrayVoid *)ptr;
- GB_ASSERT(ptr != nullptr);
-
- GB_ASSERT(element_size > 0);
-
- if (capacity == x->capacity) {
- return;
- }
-
- if (capacity < x->count) {
- if (x->capacity < capacity) {
- isize new_capacity = ARRAY_GROW_FORMULA(x->capacity);
- if (new_capacity < capacity) {
- new_capacity = capacity;
- }
- array__set_capacity(ptr, new_capacity, element_size);
- }
- x->count = capacity;
- }
-
- x->e = gb_resize(x->allocator, x->e, element_size*x->capacity, element_size*capacity);
- x->capacity = capacity;
+template <typename T>
+T const *begin(Array<T> const &array) {
+ return array.data;
+}
+template <typename T>
+T *end(Array<T> &array) {
+ return array.data + array.count;
+}
+template <typename T>
+T const *end(Array<T> const &array) {
+ return array.data + array.count;
}
-#endif
+template <typename T>
+T *begin(Slice<T> &array) {
+ return array.data;
+}
+template <typename T>
+T const *begin(Slice<T> const &array) {
+ return array.data;
+}
+template <typename T>
+T *end(Slice<T> &array) {
+ return array.data + array.count;
+}
+template <typename T>
+T const *end(Slice<T> const &array) {
+ return array.data + array.count;
+}
diff --git a/src/check_expr.cpp b/src/check_expr.cpp
index b8c600326..fb7a98388 100644
--- a/src/check_expr.cpp
+++ b/src/check_expr.cpp
@@ -2017,8 +2017,8 @@ void add_comparison_procedures_for_fields(CheckerContext *c, Type *t) {
}
break;
case Type_Struct:
- for_array(i, t->Struct.fields) {
- add_comparison_procedures_for_fields(c, t->Struct.fields[i]->type);
+ for (Entity *field : t->Struct.fields) {
+ add_comparison_procedures_for_fields(c, field->type);
}
break;
}
diff --git a/src/check_type.cpp b/src/check_type.cpp
index b7bcfdc13..75fa503e5 100644
--- a/src/check_type.cpp
+++ b/src/check_type.cpp
@@ -39,8 +39,7 @@ void populate_using_entity_scope(CheckerContext *ctx, Ast *node, AstField *field
}
if (t->kind == Type_Struct) {
- for_array(i, t->Struct.fields) {
- Entity *f = t->Struct.fields[i];
+ for (Entity *f : t->Struct.fields) {
GB_ASSERT(f->kind == Entity_Variable);
String name = f->token.string;
Entity *e = scope_lookup_current(ctx->scope, name);
diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp
index fbb577383..ffb81f0e4 100644
--- a/src/llvm_backend.hpp
+++ b/src/llvm_backend.hpp
@@ -436,7 +436,7 @@ lbValue lb_emit_logical_binary_expr(lbProcedure *p, TokenKind op, Ast *left, Ast
lbValue lb_build_cond(lbProcedure *p, Ast *cond, lbBlock *true_block, lbBlock *false_block);
LLVMValueRef llvm_const_named_struct(lbModule *m, Type *t, LLVMValueRef *values, isize value_count_);
-LLVMValueRef llvm_const_named_struct(LLVMTypeRef t, LLVMValueRef *values, isize value_count_);
+LLVMValueRef llvm_const_named_struct_internal(LLVMTypeRef t, LLVMValueRef *values, isize value_count_);
void lb_set_entity_from_other_modules_linkage_correctly(lbModule *other_module, Entity *e, String const &name);
lbValue lb_expr_untyped_const_to_typed(lbModule *m, Ast *expr, Type *t);
diff --git a/src/llvm_backend_const.cpp b/src/llvm_backend_const.cpp
index 2797f7317..0cdf28475 100644
--- a/src/llvm_backend_const.cpp
+++ b/src/llvm_backend_const.cpp
@@ -132,7 +132,7 @@ LLVMValueRef llvm_const_named_struct(lbModule *m, Type *t, LLVMValueRef *values,
unsigned value_count = cast(unsigned)value_count_;
unsigned elem_count = LLVMCountStructElementTypes(struct_type);
if (elem_count == value_count) {
- return llvm_const_named_struct(struct_type, values, value_count_);
+ return llvm_const_named_struct_internal(struct_type, values, value_count_);
}
Type *bt = base_type(t);
GB_ASSERT(bt->kind == Type_Struct);
@@ -152,10 +152,10 @@ LLVMValueRef llvm_const_named_struct(lbModule *m, Type *t, LLVMValueRef *values,
}
}
- return llvm_const_named_struct(struct_type, values_with_padding, values_with_padding_count);
+ return llvm_const_named_struct_internal(struct_type, values_with_padding, values_with_padding_count);
}
-LLVMValueRef llvm_const_named_struct(LLVMTypeRef t, LLVMValueRef *values, isize value_count_) {
+LLVMValueRef llvm_const_named_struct_internal(LLVMTypeRef t, LLVMValueRef *values, isize value_count_) {
unsigned value_count = cast(unsigned)value_count_;
unsigned elem_count = LLVMCountStructElementTypes(t);
GB_ASSERT_MSG(value_count == elem_count, "%s %u %u", LLVMPrintTypeToString(t), value_count, elem_count);
@@ -895,7 +895,7 @@ lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bool allow_loc
}
if (is_constant) {
- res.value = llvm_const_named_struct(struct_type, values, cast(unsigned)value_count);
+ res.value = llvm_const_named_struct_internal(struct_type, values, cast(unsigned)value_count);
return res;
} else {
// TODO(bill): THIS IS HACK BUT IT WORKS FOR WHAT I NEED
@@ -909,7 +909,7 @@ lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bool allow_loc
new_values[i] = LLVMConstNull(LLVMTypeOf(old_value));
}
}
- LLVMValueRef constant_value = llvm_const_named_struct(struct_type, new_values, cast(unsigned)value_count);
+ LLVMValueRef constant_value = llvm_const_named_struct_internal(struct_type, new_values, cast(unsigned)value_count);
GB_ASSERT(is_local);
lbProcedure *p = m->curr_procedure;
diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp
index fcdc1d979..61f256066 100644
--- a/src/llvm_backend_general.cpp
+++ b/src/llvm_backend_general.cpp
@@ -1700,18 +1700,18 @@ LLVMTypeRef lb_type_internal(lbModule *m, Type *type) {
auto fields = array_make<LLVMTypeRef>(temporary_allocator(), 0, type->Struct.fields.count*2 + 1);
i64 padding_offset = 0;
- for_array(i, type->Struct.fields) {
- GB_ASSERT(type->Struct.offsets != nullptr);
-
- Entity *field = type->Struct.fields[i];
- i64 padding = type->Struct.offsets[i] - padding_offset;
+ // auto field_indices = struct_fields_index_by_increasing_offset(type, temporary_allocator());
+ // for (i32 field_index : field_indices) {
+ for (isize field_index = 0; field_index < type->Struct.fields.count; field_index++) {
+ Entity *field = type->Struct.fields[field_index];
+ i64 padding = type->Struct.offsets[field_index] - padding_offset;
if (padding != 0) {
LLVMTypeRef padding_type = lb_type_padding_filler(m, padding, type_align_of(field->type));
array_add(&fields, padding_type);
}
- field_remapping[i] = cast(i32)fields.count;
+ field_remapping[field_index] = cast(i32)fields.count;
array_add(&fields, lb_type(m, field->type));
if (!type->Struct.is_packed) {
@@ -1720,7 +1720,8 @@ LLVMTypeRef lb_type_internal(lbModule *m, Type *type) {
padding_offset += type_size_of(field->type);
}
- i64 end_padding = type_size_of(type)-padding_offset;
+ i64 full_type_size = type_size_of(type);
+ i64 end_padding = full_type_size-padding_offset;
if (end_padding > 0) {
array_add(&fields, lb_type_padding_filler(m, end_padding, 1));
}
@@ -1731,7 +1732,8 @@ LLVMTypeRef lb_type_internal(lbModule *m, Type *type) {
LLVMTypeRef struct_type = LLVMStructTypeInContext(ctx, fields.data, cast(unsigned)fields.count, type->Struct.is_packed);
map_set(&m->struct_field_remapping, hash_pointer(struct_type), field_remapping);
- map_set(&m->struct_field_remapping, hash_pointer(type), field_remapping);
+ map_set(&m->struct_field_remapping, hash_pointer(type), field_remapping);
+ GB_ASSERT(lb_sizeof(struct_type) == full_type_size);
return struct_type;
}
break;
diff --git a/src/llvm_backend_type.cpp b/src/llvm_backend_type.cpp
index d13d3b04d..e90bb6f16 100644
--- a/src/llvm_backend_type.cpp
+++ b/src/llvm_backend_type.cpp
@@ -171,7 +171,7 @@ void lb_setup_type_info_data(lbProcedure *p) { // NOTE(bill): Setup type_info da
LLVMConstInBoundsGEP(lb_global_type_info_data_ptr(m).value, indices, gb_count_of(indices)),
LLVMConstInt(lb_type(m, t_int), type->Array.count, true),
};
- LLVMValueRef slice = llvm_const_named_struct(llvm_addr_type(global_type_table), values, gb_count_of(values));
+ LLVMValueRef slice = llvm_const_named_struct_internal(llvm_addr_type(global_type_table), values, gb_count_of(values));
LLVMSetInitializer(global_type_table.value, slice);
}
diff --git a/src/llvm_backend_utility.cpp b/src/llvm_backend_utility.cpp
index 8e4e2008a..0531c62bb 100644
--- a/src/llvm_backend_utility.cpp
+++ b/src/llvm_backend_utility.cpp
@@ -139,7 +139,7 @@ lbValue lb_emit_string(lbProcedure *p, lbValue str_elem, lbValue str_len) {
};
lbValue res = {};
res.type = t_string;
- res.value = llvm_const_named_struct(lb_type(p->module, t_string), values, gb_count_of(values));
+ res.value = llvm_const_named_struct(p->module, t_string, values, gb_count_of(values));
return res;
} else {
lbAddr res = lb_add_local_generated(p, t_string, false);
diff --git a/src/types.cpp b/src/types.cpp
index 97552240f..6d800dea7 100644
--- a/src/types.cpp
+++ b/src/types.cpp
@@ -679,6 +679,7 @@ bool are_types_identical(Type *x, Type *y);
bool is_type_pointer(Type *t);
bool is_type_slice(Type *t);
bool is_type_integer(Type *t);
+bool type_set_offsets(Type *t);
void init_type_mutex(void) {
mutex_init(&g_type_mutex);
@@ -2758,6 +2759,53 @@ Selection lookup_field_with_selection(Type *type_, String field_name, bool is_ty
return sel;
}
+GB_COMPARE_PROC(struct_field_cmp_by_offset) {
+ i64 x = *(i64 const *)(a);
+ i64 y = *(i64 const *)(b);
+ if (x < y) {
+ return -1;
+ } else if (x > y) {
+ return +1;
+ }
+ return 0;
+}
+
+
+Slice<i32> struct_fields_index_by_increasing_offset(Type *type, gbAllocator allocator) {
+ type = base_type(type);
+ GB_ASSERT(type->kind == Type_Struct);
+ type_set_offsets(type);
+ GB_ASSERT(type->Struct.offsets != nullptr);
+ auto indices = slice_make<i32>(allocator, type->Struct.fields.count);
+
+ i64 prev_offset = 0;
+ bool is_ordered = true;
+ for_array(i, indices) {
+ indices.data[i] = cast(i32)i;
+ i64 offset = type->Struct.offsets[i];
+ if (is_ordered && prev_offset > offset) {
+ is_ordered = false;
+ }
+ prev_offset = offset;
+ }
+ if (!is_ordered) {
+ isize n = indices.count;
+ for (isize i = 0; i < n-1; i++) {
+ for (isize j = 0; j < n-i-1; j++) {
+ isize a = j;
+ isize b = j+1;
+ if (type->Struct.offsets[a] > type->Struct.offsets[b]) {
+ gb_swap(i32, indices[a], indices[b]);
+ }
+ }
+ }
+ }
+
+ return indices;
+}
+
+
+
// IMPORTANT TODO(bill): SHould this TypePath code be removed since type cycle checking is handled much earlier on?