aboutsummaryrefslogtreecommitdiff
path: root/src/common.cpp
diff options
context:
space:
mode:
authorgingerBill <bill@gingerbill.org>2020-05-27 18:23:37 +0100
committergingerBill <bill@gingerbill.org>2020-05-27 18:23:37 +0100
commit1a0614b0d7f4b6010d79ac0a402d3c4c1f389529 (patch)
treeaeee6f81470fe8f9ed0d918008272ed081a099bd /src/common.cpp
parent876820789e9dedaa6198c4cd145702485e3bd21c (diff)
Improve performance of tokenization and parsing
Diffstat (limited to 'src/common.cpp')
-rw-r--r--src/common.cpp30
1 files changed, 18 insertions, 12 deletions
diff --git a/src/common.cpp b/src/common.cpp
index 7068eb333..0f058d8f3 100644
--- a/src/common.cpp
+++ b/src/common.cpp
@@ -74,7 +74,7 @@ GB_ALLOCATOR_PROC(heap_allocator_proc) {
case gbAllocation_Alloc:
ptr = _aligned_malloc(size, alignment);
if (flags & gbAllocatorFlag_ClearToZero) {
- gb_zero_size(ptr, size);
+ zero_size(ptr, size);
}
break;
case gbAllocation_Free:
@@ -105,7 +105,7 @@ GB_ALLOCATOR_PROC(heap_allocator_proc) {
// ptr = malloc(size+alignment);
if (flags & gbAllocatorFlag_ClearToZero) {
- gb_zero_size(ptr, size);
+ zero_size(ptr, size);
}
break;
}
@@ -126,7 +126,7 @@ GB_ALLOCATOR_PROC(heap_allocator_proc) {
posix_memalign(&ptr, alignment, size);
if (flags & gbAllocatorFlag_ClearToZero) {
- gb_zero_size(ptr, size);
+ zero_size(ptr, size);
}
break;
}
@@ -347,6 +347,12 @@ void mul_overflow_u64(u64 x, u64 y, u64 *lo, u64 *hi) {
#endif
}
+gb_inline void zero_size(void *ptr, isize len) {
+ memset(ptr, 0, len);
+}
+
+#define zero_item(ptr) zero_size((ptr), gb_size_of(ptr))
+
gb_global String global_module_path = {0};
@@ -376,27 +382,27 @@ typedef struct Arena {
void arena_init(Arena *arena, gbAllocator backing, isize block_size=ARENA_DEFAULT_BLOCK_SIZE) {
arena->backing = backing;
arena->block_size = block_size;
- array_init(&arena->blocks, backing);
+ array_init(&arena->blocks, backing, 0, 2);
gb_mutex_init(&arena->mutex);
}
void arena_grow(Arena *arena, isize min_size) {
- gb_mutex_lock(&arena->mutex);
- defer (gb_mutex_unlock(&arena->mutex));
+ // gb_mutex_lock(&arena->mutex);
+ // defer (gb_mutex_unlock(&arena->mutex));
isize size = gb_max(arena->block_size, min_size);
size = ALIGN_UP(size, ARENA_MIN_ALIGNMENT);
void *new_ptr = gb_alloc(arena->backing, size);
arena->ptr = cast(u8 *)new_ptr;
- // gb_zero_size(arena->ptr, size); // NOTE(bill): This should already be zeroed
+ // zero_size(arena->ptr, size); // NOTE(bill): This should already be zeroed
GB_ASSERT(arena->ptr == ALIGN_DOWN_PTR(arena->ptr, ARENA_MIN_ALIGNMENT));
arena->end = arena->ptr + size;
array_add(&arena->blocks, arena->ptr);
}
void *arena_alloc(Arena *arena, isize size, isize alignment) {
- gb_mutex_lock(&arena->mutex);
- defer (gb_mutex_unlock(&arena->mutex));
+ // gb_mutex_lock(&arena->mutex);
+ // defer (gb_mutex_unlock(&arena->mutex));
arena->total_used += size;
@@ -411,13 +417,13 @@ void *arena_alloc(Arena *arena, isize size, isize alignment) {
arena->ptr = cast(u8 *)ALIGN_UP_PTR(arena->ptr + size, align);
GB_ASSERT(arena->ptr <= arena->end);
GB_ASSERT(ptr == ALIGN_DOWN_PTR(ptr, align));
- gb_zero_size(ptr, size);
+ // zero_size(ptr, size);
return ptr;
}
void arena_free_all(Arena *arena) {
- gb_mutex_lock(&arena->mutex);
- defer (gb_mutex_unlock(&arena->mutex));
+ // gb_mutex_lock(&arena->mutex);
+ // defer (gb_mutex_unlock(&arena->mutex));
for_array(i, arena->blocks) {
gb_free(arena->backing, arena->blocks[i]);