aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGinger Bill <bill@gingerbill.org>2017-07-05 13:51:25 +0100
committerGinger Bill <bill@gingerbill.org>2017-07-05 13:51:25 +0100
commiteed873c6ec9ac1631fbf1285d4047596b353e9bf (patch)
treeb24943bc111d45b84a4dd01a384fc917b87e6a57 /src
parent3d2d46186751c67c4239479bcbe4908dff61ecd4 (diff)
Add `free` for maps (a previous oversight)
Diffstat (limited to 'src')
-rw-r--r--src/checker.cpp15
-rw-r--r--src/common.cpp166
-rw-r--r--src/ir.cpp19
3 files changed, 133 insertions, 67 deletions
diff --git a/src/checker.cpp b/src/checker.cpp
index 149435ed2..c66f9a63e 100644
--- a/src/checker.cpp
+++ b/src/checker.cpp
@@ -305,9 +305,10 @@ struct Checker {
Array<DelayedDecl> delayed_foreign_libraries;
Array<CheckerFileNode> file_nodes;
+ Pool pool;
+ gbAllocator allocator;
gbArena arena;
gbArena tmp_arena;
- gbAllocator allocator;
gbAllocator tmp_allocator;
CheckerContext context;
@@ -773,11 +774,13 @@ void init_checker(Checker *c, Parser *parser) {
total_token_count += f->tokens.count;
}
isize arena_size = 2 * item_size * total_token_count;
- gb_arena_init_from_allocator(&c->arena, a, arena_size);
gb_arena_init_from_allocator(&c->tmp_arena, a, arena_size);
+ gb_arena_init_from_allocator(&c->arena, a, arena_size);
-
- c->allocator = gb_arena_allocator(&c->arena);
+ pool_init(&c->pool, gb_megabytes(4), gb_kilobytes(384));
+ // c->allocator = pool_allocator(&c->pool);
+ c->allocator = heap_allocator();
+ // c->allocator = gb_arena_allocator(&c->arena);
c->tmp_allocator = gb_arena_allocator(&c->tmp_arena);
c->global_scope = make_scope(universal_scope, c->allocator);
@@ -793,7 +796,9 @@ void destroy_checker(Checker *c) {
array_free(&c->delayed_foreign_libraries);
array_free(&c->file_nodes);
- gb_arena_free(&c->arena);
+ pool_destroy(&c->pool);
+ gb_arena_free(&c->tmp_arena);
+ // gb_arena_free(&c->arena);
}
diff --git a/src/common.cpp b/src/common.cpp
index 40386375d..54d80f1e7 100644
--- a/src/common.cpp
+++ b/src/common.cpp
@@ -23,6 +23,9 @@ gbAllocator heap_allocator(void) {
#include "integer128.cpp"
#include "murmurhash3.cpp"
+#define for_array(index_, array_) for (isize index_ = 0; index_ < (array_).count; index_++)
+
+
u128 fnv128a(void const *data, isize len) {
u128 o = u128_lo_hi(0x13bull, 0x1000000ull);
u128 h = u128_lo_hi(0x62b821756295c58dull, 0x6c62272e07bb0142ull);
@@ -52,87 +55,145 @@ gbAllocator scratch_allocator(void) {
return gb_scratch_allocator(&scratch_memory);
}
+struct Pool {
+ isize memblock_size;
+ isize out_of_band_size;
+ isize alignment;
-struct DynamicArenaBlock {
- DynamicArenaBlock *prev;
- DynamicArenaBlock *next;
- u8 * start;
- isize count;
- isize capacity;
+ Array<u8 *> unused_memblock;
+ Array<u8 *> used_memblock;
+ Array<u8 *> out_of_band_allocations;
- gbVirtualMemory vm;
-};
+ u8 * current_memblock;
+ u8 * current_pos;
+ isize bytes_left;
-struct DynamicArena {
- DynamicArenaBlock *start_block;
- DynamicArenaBlock *current_block;
- isize block_size;
+ gbAllocator block_allocator;
};
-DynamicArenaBlock *add_dynamic_arena_block(DynamicArena *a) {
- GB_ASSERT(a != NULL);
- GB_ASSERT(a->block_size > 0);
+enum {
+ POOL_BUCKET_SIZE_DEFAULT = 65536,
+ POOL_OUT_OF_BAND_SIZE_DEFAULT = 6554,
+};
- gbVirtualMemory vm = gb_vm_alloc(NULL, a->block_size);
- DynamicArenaBlock *block = cast(DynamicArenaBlock *)vm.data;
+void pool_init(Pool *pool,
+ isize memblock_size = POOL_BUCKET_SIZE_DEFAULT,
+ isize out_of_band_size = POOL_OUT_OF_BAND_SIZE_DEFAULT,
+ isize alignment = 8,
+ gbAllocator block_allocator = heap_allocator(),
+ gbAllocator array_allocator = heap_allocator()) {
+ pool->memblock_size = memblock_size;
+ pool->out_of_band_size = out_of_band_size;
+ pool->alignment = alignment;
+ pool->block_allocator = block_allocator;
+
+ array_init(&pool->unused_memblock, array_allocator);
+ array_init(&pool->used_memblock, array_allocator);
+ array_init(&pool->out_of_band_allocations, array_allocator);
+}
- u8 *start = cast(u8 *)gb_align_forward(cast(u8 *)(block + 1), GB_DEFAULT_MEMORY_ALIGNMENT);
- u8 *end = cast(u8 *)vm.data + vm.size;
+void pool_free_all(Pool *p) {
+ if (p->current_memblock != NULL) {
+ array_add(&p->unused_memblock, p->current_memblock);
+ p->current_memblock = NULL;
+ }
- block->vm = vm;
- block->start = start;
- block->count = 0;
- block->capacity = end-start;
+ for_array(i, p->used_memblock) {
+ array_add(&p->unused_memblock, p->used_memblock[i]);
+ }
+ array_clear(&p->unused_memblock);
- if (a->current_block != NULL) {
- a->current_block->next = block;
- block->prev = a->current_block;
+ for_array(i, p->out_of_band_allocations) {
+ gb_free(p->block_allocator, p->out_of_band_allocations[i]);
}
- a->current_block = block;
- return block;
+ array_clear(&p->out_of_band_allocations);
}
-void init_dynamic_arena(DynamicArena *a, isize block_size) {
- isize size = gb_size_of(DynamicArenaBlock) + block_size;
- size = cast(isize)gb_align_forward(cast(void *)cast(uintptr)size, GB_DEFAULT_MEMORY_ALIGNMENT);
- a->block_size = size;
- a->start_block = add_dynamic_arena_block(a);
+void pool_destroy(Pool *p) {
+ pool_free_all(p);
+
+ for_array(i, p->unused_memblock) {
+ gb_free(p->block_allocator, p->unused_memblock[i]);
+ }
}
-void destroy_dynamic_arena(DynamicArena *a) {
- DynamicArenaBlock *b = a->current_block;
- while (b != NULL) {
- gbVirtualMemory vm = b->vm;
- b = b->prev;
- gb_vm_free(b->vm);
+void pool_cycle_new_block(Pool *p) {
+ GB_ASSERT_MSG(p->block_allocator.proc != NULL,
+ "You must call pool_init on a Pool before using it!");
+
+ if (p->current_memblock != NULL) {
+ array_add(&p->used_memblock, p->current_memblock);
+ }
+
+ u8 *new_block = NULL;
+
+ if (p->unused_memblock.count > 0) {
+ new_block = array_pop(&p->unused_memblock);
+ } else {
+ GB_ASSERT(p->block_allocator.proc != NULL);
+ new_block = cast(u8 *)gb_alloc_align(p->block_allocator, p->memblock_size, p->alignment);
}
+
+ p->bytes_left = p->memblock_size;
+ p->current_memblock = new_block;
+ p->current_memblock = new_block;
}
-GB_ALLOCATOR_PROC(dynamic_arena_allocator_proc) {
- DynamicArena *a = cast(DynamicArena *)allocator_data;
- void *ptr = NULL;
+void *pool_get(Pool *p,
+ isize size, isize alignment = 0) {
+ if (alignment <= 0) alignment = p->alignment;
+
+ isize extra = alignment - (size & alignment);
+ size += extra;
+ if (size >= p->out_of_band_size) {
+ GB_ASSERT(p->block_allocator.proc != NULL);
+ u8 *memory = cast(u8 *)gb_alloc_align(p->block_allocator, p->memblock_size, alignment);
+ if (memory != NULL) {
+ array_add(&p->out_of_band_allocations, memory);
+ }
+ return memory;
+ }
- switch (type) {
- case gbAllocation_Alloc: {
+ if (p->bytes_left < size) {
+ pool_cycle_new_block(p);
+ if (p->current_memblock != NULL) {
+ return NULL;
+ }
+ }
+
+ u8 *res = p->current_pos;
+ p->current_pos += size;
+ p->bytes_left -= size;
+ return res;
+}
- } break;
- case gbAllocation_Free: {
- } break;
+gbAllocator pool_allocator(Pool *pool);
- case gbAllocation_Resize: {
- } break;
+GB_ALLOCATOR_PROC(pool_allocator_procedure) {
+ Pool *p = cast(Pool *)allocator_data;
+ void *ptr = NULL;
+ switch (type) {
+ case gbAllocation_Alloc:
+ return pool_get(p, size, alignment);
+ case gbAllocation_Free:
+ // Does nothing
+ break;
case gbAllocation_FreeAll:
- GB_PANIC("free_all is not supported by this allocator");
+ pool_free_all(p);
break;
+ case gbAllocation_Resize:
+ return gb_default_resize_align(pool_allocator(p), old_memory, old_size, size, alignment);
}
return ptr;
}
-gbAllocator dynamic_arena_allocator(DynamicArena *a) {
- gbAllocator allocator = {dynamic_arena_allocator_proc, a};
+gbAllocator pool_allocator(Pool *pool) {
+ gbAllocator allocator;
+ allocator.proc = pool_allocator_procedure;
+ allocator.data = pool;
return allocator;
}
@@ -224,7 +285,6 @@ f64 gb_sqrt(f64 x) {
-#define for_array(index_, array_) for (isize index_ = 0; index_ < (array_).count; index_++)
// Doubly Linked Lists
diff --git a/src/ir.cpp b/src/ir.cpp
index 930132cce..fc4c97c9a 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -7123,20 +7123,21 @@ void ir_init_module(irModule *m, Checker *c) {
// TODO(bill): Determine a decent size for the arena
isize token_count = c->parser->total_token_count;
isize arena_size = 4 * token_count * gb_size_of(irValue);
- gb_arena_init_from_allocator(&m->arena, heap_allocator(), arena_size);
+ gb_arena_init_from_allocator(&m->arena, heap_allocator(), arena_size);
gb_arena_init_from_allocator(&m->tmp_arena, heap_allocator(), arena_size);
- m->allocator = gb_arena_allocator(&m->arena);
+ // m->allocator = gb_arena_allocator(&m->arena);
+ m->allocator = heap_allocator();
m->tmp_allocator = gb_arena_allocator(&m->tmp_arena);
m->info = &c->info;
- map_init(&m->values, heap_allocator());
- map_init(&m->members, heap_allocator());
- map_init(&m->debug_info, heap_allocator());
- map_init(&m->entity_names, heap_allocator());
- array_init(&m->procs, heap_allocator());
- array_init(&m->procs_to_generate, heap_allocator());
+ map_init(&m->values, heap_allocator());
+ map_init(&m->members, heap_allocator());
+ map_init(&m->debug_info, heap_allocator());
+ map_init(&m->entity_names, heap_allocator());
+ array_init(&m->procs, heap_allocator());
+ array_init(&m->procs_to_generate, heap_allocator());
array_init(&m->foreign_library_paths, heap_allocator());
- map_init(&m->const_strings, heap_allocator());
+ map_init(&m->const_strings, heap_allocator());
// Default states
m->stmt_state_flags = 0;