aboutsummaryrefslogtreecommitdiff
path: root/core/container
diff options
context:
space:
mode:
authorJeroen van Rijn <Kelimion@users.noreply.github.com>2022-04-27 14:37:15 +0200
committerJeroen van Rijn <Kelimion@users.noreply.github.com>2022-04-27 14:37:15 +0200
commitc4e0d1efa1ec655bae9134b95a0fcd060cc7bbea (patch)
treec29bd0b78138e8d67aebe34ac689d13e32d9d15f /core/container
parent6e61abc7d06f22129f93110a9f652c3eec21f0c6 (diff)
parent9349dfba8fec53f52f77a0c8928e115ec93ff447 (diff)
Merge branch 'master' into xml
Diffstat (limited to 'core/container')
-rw-r--r--core/container/array.odin216
-rw-r--r--core/container/bit_array/bit_array.odin239
-rw-r--r--core/container/bit_array/doc.odin53
-rw-r--r--core/container/bloom_filter.odin80
-rw-r--r--core/container/lru/lru_cache.odin201
-rw-r--r--core/container/map.odin377
-rw-r--r--core/container/priority_queue.odin121
-rw-r--r--core/container/priority_queue/priority_queue.odin143
-rw-r--r--core/container/queue.odin175
-rw-r--r--core/container/queue/queue.odin209
-rw-r--r--core/container/ring.odin74
-rw-r--r--core/container/set.odin240
-rw-r--r--core/container/small_array.odin95
-rw-r--r--core/container/small_array/small_array.odin117
-rw-r--r--core/container/topological_sort/topological_sort.odin98
15 files changed, 1060 insertions, 1378 deletions
diff --git a/core/container/array.odin b/core/container/array.odin
deleted file mode 100644
index 2d5a64ec3..000000000
--- a/core/container/array.odin
+++ /dev/null
@@ -1,216 +0,0 @@
-package container
-
-import "core:mem"
-import "core:runtime"
-
-Array :: struct($T: typeid) {
- data: ^T,
- len: int,
- cap: int,
- allocator: mem.Allocator,
-}
-
-ARRAY_DEFAULT_CAPACITY :: 16
-
-/*
-array_init :: proc {
- array_init_none,
- array_init_len,
- array_init_len_cap,
-}
-array_init
-array_delete
-array_len
-array_cap
-array_space
-array_slice
-array_get
-array_get_ptr
-array_set
-array_reserve
-array_resize
-array_push = array_append :: proc{
- array_push_back,
- array_push_back_elems,
-}
-array_push_front
-array_pop_back
-array_pop_front
-array_consume
-array_trim
-array_clear
-array_clone
-array_set_capacity
-array_grow
-*/
-
-
-array_init_none :: proc(a: ^$A/Array, allocator := context.allocator) {
- array_init_len_cap(a, 0, ARRAY_DEFAULT_CAPACITY, allocator)
-}
-array_init_len :: proc(a: ^$A/Array, len: int, allocator := context.allocator) {
- array_init_len_cap(a, len, len, allocator)
-}
-array_init_len_cap :: proc(a: ^$A/Array($T), len: int, cap: int, allocator := context.allocator) {
- a.allocator = allocator
- a.data = (^T)(mem.alloc(size_of(T)*cap, align_of(T), a.allocator))
- a.len = len
- a.cap = cap
-}
-
-array_init :: proc{array_init_none, array_init_len, array_init_len_cap}
-
-array_delete :: proc(a: $A/Array) {
- mem.free(a.data, a.allocator)
-}
-
-array_len :: proc(a: $A/Array) -> int {
- return a.len
-}
-
-array_cap :: proc(a: $A/Array) -> int {
- return a.cap
-}
-
-array_space :: proc(a: $A/Array) -> int {
- return a.cap - a.len
-}
-
-array_slice :: proc(a: $A/Array($T)) -> []T {
- s := mem.Raw_Slice{a.data, a.len}
- return transmute([]T)s
-}
-
-array_cap_slice :: proc(a: $A/Array($T)) -> []T {
- s := mem.Raw_Slice{a.data, a.cap}
- return transmute([]T)s
-}
-
-array_get :: proc(a: $A/Array($T), index: int, loc := #caller_location) -> T {
- runtime.bounds_check_error_loc(loc, index, array_len(a))
- return (^T)(uintptr(a.data) + size_of(T)*uintptr(index))^
-}
-array_get_ptr :: proc(a: $A/Array($T), index: int, loc := #caller_location) -> ^T {
- runtime.bounds_check_error_loc(loc, index, array_len(a))
- return (^T)(uintptr(a.data) + size_of(T)*uintptr(index))
-}
-
-array_set :: proc(a: ^$A/Array($T), index: int, item: T, loc := #caller_location) {
- runtime.bounds_check_error_loc(loc, index, array_len(a^))
- (^T)(uintptr(a.data) + size_of(T)*uintptr(index))^ = item
-}
-
-
-array_reserve :: proc(a: ^$A/Array, capacity: int) {
- if capacity > a.len {
- array_set_capacity(a, capacity)
- }
-}
-
-array_resize :: proc(a: ^$A/Array, length: int) {
- if length > a.len {
- array_set_capacity(a, length)
- }
- a.len = length
-}
-
-
-
-array_push_back :: proc(a: ^$A/Array($T), item: T) {
- if array_space(a^) == 0 {
- array_grow(a)
- }
-
- a.len += 1
- array_set(a, a.len-1, item)
-}
-
-array_push_front :: proc(a: ^$A/Array($T), item: T) {
- if array_space(a^) == 0 {
- array_grow(a)
- }
-
- a.len += 1
- data := array_slice(a^)
- copy(data[1:], data[:])
- data[0] = item
-}
-
-array_pop_back :: proc(a: ^$A/Array($T), loc := #caller_location) -> T {
- assert(condition=a.len > 0, loc=loc)
- item := array_get(a^, a.len-1)
- a.len -= 1
- return item
-}
-
-array_pop_front :: proc(a: ^$A/Array($T), loc := #caller_location) -> T {
- assert(condition=a.len > 0, loc=loc)
- item := array_get(a^, 0)
- s := array_slice(a^)
- copy(s[:], s[1:])
- a.len -= 1
- return item
-}
-
-
-array_consume :: proc(a: ^$A/Array($T), count: int, loc := #caller_location) {
- assert(condition=a.len >= count, loc=loc)
- a.len -= count
-}
-
-
-array_trim :: proc(a: ^$A/Array($T)) {
- array_set_capacity(a, a.len)
-}
-
-array_clear :: proc(a: ^$A/Array($T)) {
- array_resize(a, 0)
-}
-
-array_clone :: proc(a: $A/Array($T), allocator := context.allocator) -> A {
- res: A
- array_init(&res, array_len(a), array_len(a), allocator)
- copy(array_slice(res), array_slice(a))
- return res
-}
-
-array_push_back_elems :: proc(a: ^$A/Array($T), items: ..T) {
- if array_space(a^) < len(items) {
- array_grow(a, a.len + len(items))
- }
- offset := a.len
- data := array_cap_slice(a^)
- n := copy(data[a.len:], items)
- a.len += n
-}
-
-array_push :: proc{array_push_back, array_push_back_elems}
-array_append :: proc{array_push_back, array_push_back_elems}
-
-array_set_capacity :: proc(a: ^$A/Array($T), new_capacity: int) {
- if new_capacity == a.cap {
- return
- }
-
- if new_capacity < a.len {
- array_resize(a, new_capacity)
- }
-
- new_data: ^T
- if new_capacity > 0 {
- if a.allocator.procedure == nil {
- a.allocator = context.allocator
- }
- new_data = (^T)(mem.alloc(size_of(T)*new_capacity, align_of(T), a.allocator))
- if new_data != nil {
- mem.copy(new_data, a.data, size_of(T)*a.len)
- }
- }
- mem.free(a.data, a.allocator)
- a.data = new_data
- a.cap = new_capacity
-}
-array_grow :: proc(a: ^$A/Array, min_capacity: int = 0) {
- new_capacity := max(array_len(a^)*2 + 8, min_capacity)
- array_set_capacity(a, new_capacity)
-}
diff --git a/core/container/bit_array/bit_array.odin b/core/container/bit_array/bit_array.odin
new file mode 100644
index 000000000..98ef4b542
--- /dev/null
+++ b/core/container/bit_array/bit_array.odin
@@ -0,0 +1,239 @@
+package dynamic_bit_array
+
+import "core:intrinsics"
+import "core:mem"
+
+/*
+ Note that these constants are dependent on the backing being a u64.
+*/
+@(private="file")
+INDEX_SHIFT :: 6
+
+@(private="file")
+INDEX_MASK :: 63
+
+@(private="file")
+NUM_BITS :: 64
+
+Bit_Array :: struct {
+ bits: [dynamic]u64,
+ bias: int,
+ max_index: int,
+ free_pointer: bool,
+}
+
+Bit_Array_Iterator :: struct {
+ array: ^Bit_Array,
+ word_idx: int,
+ bit_idx: uint,
+}
+
+/*
+ In:
+ - ba: ^Bit_Array - the array to iterate over
+
+ Out:
+ - it: ^Bit_Array_Iterator - the iterator that holds iteration state
+*/
+make_iterator :: proc (ba: ^Bit_Array) -> (it: Bit_Array_Iterator) {
+ return Bit_Array_Iterator { array = ba }
+}
+
+/*
+ In:
+ - it: ^Bit_Array_Iterator - the iterator struct that holds the state.
+
+ Out:
+ - set: bool - the state of the bit at `index`
+ - index: int - the next bit of the Bit_Array referenced by `it`.
+ - ok: bool - `true` if the iterator returned a valid index,
+ `false` if there were no more bits
+*/
+iterate_by_all :: proc (it: ^Bit_Array_Iterator) -> (set: bool, index: int, ok: bool) {
+ index = it.word_idx * NUM_BITS + int(it.bit_idx) + it.array.bias
+ if index > it.array.max_index { return false, 0, false }
+
+ word := it.array.bits[it.word_idx] if len(it.array.bits) > it.word_idx else 0
+ set = (word >> it.bit_idx & 1) == 1
+
+ it.bit_idx += 1
+ if it.bit_idx >= NUM_BITS {
+ it.bit_idx = 0
+ it.word_idx += 1
+ }
+
+ return set, index, true
+}
+
+/*
+ In:
+ - it: ^Bit_Array_Iterator - the iterator struct that holds the state.
+
+ Out:
+ - index: int - the next set bit of the Bit_Array referenced by `it`.
+ - ok: bool - `true` if the iterator returned a valid index,
+ `false` if there were no more bits set
+*/
+iterate_by_set :: proc (it: ^Bit_Array_Iterator) -> (index: int, ok: bool) {
+ return iterate_internal_(it, true)
+}
+
+/*
+ In:
+ - it: ^Bit_Array_Iterator - the iterator struct that holds the state.
+
+ Out:
+ - index: int - the next unset bit of the Bit_Array referenced by `it`.
+ - ok: bool - `true` if the iterator returned a valid index,
+ `false` if there were no more unset bits
+*/
+iterate_by_unset:: proc (it: ^Bit_Array_Iterator) -> (index: int, ok: bool) {
+ return iterate_internal_(it, false)
+}
+
+@(private="file")
+iterate_internal_ :: proc (it: ^Bit_Array_Iterator, $ITERATE_SET_BITS: bool) -> (index: int, ok: bool) {
+ word := it.array.bits[it.word_idx] if len(it.array.bits) > it.word_idx else 0
+ when ! ITERATE_SET_BITS { word = ~word }
+
+ // if the word is empty or we have already gone over all the bits in it,
+ // b.bit_idx is greater than the index of any set bit in the word,
+ // meaning that word >> b.bit_idx == 0.
+ for it.word_idx < len(it.array.bits) && word >> it.bit_idx == 0 {
+ it.word_idx += 1
+ it.bit_idx = 0
+ word = it.array.bits[it.word_idx] if len(it.array.bits) > it.word_idx else 0
+ when ! ITERATE_SET_BITS { word = ~word }
+ }
+
+ // if we are iterating the set bits, reaching the end of the array means we have no more bits to check
+ when ITERATE_SET_BITS {
+ if it.word_idx >= len(it.array.bits) {
+ return 0, false
+ }
+ }
+
+ // reaching here means that the word has some set bits
+ it.bit_idx += uint(intrinsics.count_trailing_zeros(word >> it.bit_idx))
+ index = it.word_idx * NUM_BITS + int(it.bit_idx) + it.array.bias
+
+ it.bit_idx += 1
+ if it.bit_idx >= NUM_BITS {
+ it.bit_idx = 0
+ it.word_idx += 1
+ }
+ return index, index <= it.array.max_index
+}
+
+
+/*
+ In:
+ - ba: ^Bit_Array - a pointer to the Bit Array
+ - index: The bit index. Can be an enum member.
+
+ Out:
+ - res: The bit you're interested in.
+ - ok: Whether the index was valid. Returns `false` if the index is smaller than the bias.
+
+ The `ok` return value may be ignored.
+*/
+get :: proc(ba: ^Bit_Array, #any_int index: uint, allocator := context.allocator) -> (res: bool, ok: bool) {
+ idx := int(index) - ba.bias
+
+ if ba == nil || int(index) < ba.bias { return false, false }
+ context.allocator = allocator
+
+ leg_index := idx >> INDEX_SHIFT
+ bit_index := idx & INDEX_MASK
+
+ /*
+ If we `get` a bit that doesn't fit in the Bit Array, it's naturally `false`.
+ This early-out prevents unnecessary resizing.
+ */
+ if leg_index + 1 > len(ba.bits) { return false, true }
+
+ val := u64(1 << uint(bit_index))
+ res = ba.bits[leg_index] & val == val
+
+ return res, true
+}
+
+/*
+ In:
+ - ba: ^Bit_Array - a pointer to the Bit Array
+ - index: The bit index. Can be an enum member.
+
+ Out:
+ - ok: Whether or not we managed to set requested bit.
+
+ `set` automatically resizes the Bit Array to accommodate the requested index if needed.
+*/
+set :: proc(ba: ^Bit_Array, #any_int index: uint, allocator := context.allocator) -> (ok: bool) {
+
+ idx := int(index) - ba.bias
+
+ if ba == nil || int(index) < ba.bias { return false }
+ context.allocator = allocator
+
+ leg_index := idx >> INDEX_SHIFT
+ bit_index := idx & INDEX_MASK
+
+ resize_if_needed(ba, leg_index) or_return
+
+ ba.max_index = max(idx, ba.max_index)
+ ba.bits[leg_index] |= 1 << uint(bit_index)
+ return true
+}
+
+/*
+ A helper function to create a Bit Array with optional bias, in case your smallest index is non-zero (including negative).
+*/
+create :: proc(max_index: int, min_index := 0, allocator := context.allocator) -> (res: ^Bit_Array, ok: bool) #optional_ok {
+ context.allocator = allocator
+ size_in_bits := max_index - min_index
+
+ if size_in_bits < 1 { return {}, false }
+
+ legs := size_in_bits >> INDEX_SHIFT
+
+ res = new(Bit_Array)
+ res.bias = min_index
+ res.max_index = max_index
+ res.free_pointer = true
+ return res, resize_if_needed(res, legs)
+}
+
+/*
+ Sets all bits to `false`.
+*/
+clear :: proc(ba: ^Bit_Array) {
+ if ba == nil { return }
+ mem.zero_slice(ba.bits[:])
+}
+
+/*
+ Releases the memory used by the Bit Array.
+*/
+destroy :: proc(ba: ^Bit_Array) {
+ if ba == nil { return }
+ delete(ba.bits)
+ if ba.free_pointer { // Only free if this Bit_Array was created using `create`, not when on the stack.
+ free(ba)
+ }
+}
+
+/*
+ Resizes the Bit Array. For internal use.
+ If you want to reserve the memory for a given-sized Bit Array up front, you can use `create`.
+*/
+@(private="file")
+resize_if_needed :: proc(ba: ^Bit_Array, legs: int, allocator := context.allocator) -> (ok: bool) {
+ if ba == nil { return false }
+
+ context.allocator = allocator
+
+ if legs + 1 > len(ba.bits) {
+ resize(&ba.bits, legs + 1)
+ }
+ return len(ba.bits) > legs
+}
diff --git a/core/container/bit_array/doc.odin b/core/container/bit_array/doc.odin
new file mode 100644
index 000000000..52e252d8a
--- /dev/null
+++ b/core/container/bit_array/doc.odin
@@ -0,0 +1,53 @@
+package dynamic_bit_array
+
+/*
+ The Bit Array can be used in several ways:
+
+ -- By default you don't need to instantiate a Bit Array:
+
+ package test
+
+ import "core:fmt"
+ import "core:container/bit_array"
+
+ main :: proc() {
+ using bit_array
+
+ bits: Bit_Array
+
+ // returns `true`
+ fmt.println(set(&bits, 42))
+
+ // returns `false`, `false`, because this Bit Array wasn't created to allow negative indices.
+ was_set, was_retrieved := get(&bits, -1)
+ fmt.println(was_set, was_retrieved)
+ destroy(&bits)
+ }
+
+ -- A Bit Array can optionally allow for negative indices, if the mininum value was given during creation:
+
+ package test
+
+ import "core:fmt"
+ import "core:container/bit_array"
+
+ main :: proc() {
+ Foo :: enum int {
+ Negative_Test = -42,
+ Bar = 420,
+ Leaves = 69105,
+ }
+
+ using bit_array
+
+ bits := create(int(max(Foo)), int(min(Foo)))
+ defer destroy(bits)
+
+ fmt.printf("Set(Bar): %v\n", set(bits, Foo.Bar))
+ fmt.printf("Get(Bar): %v, %v\n", get(bits, Foo.Bar))
+ fmt.printf("Set(Negative_Test): %v\n", set(bits, Foo.Negative_Test))
+ fmt.printf("Get(Leaves): %v, %v\n", get(bits, Foo.Leaves))
+ fmt.printf("Get(Negative_Test): %v, %v\n", get(bits, Foo.Negative_Test))
+ fmt.printf("Freed.\n")
+ }
+*/ \ No newline at end of file
diff --git a/core/container/bloom_filter.odin b/core/container/bloom_filter.odin
deleted file mode 100644
index 8af7aeb85..000000000
--- a/core/container/bloom_filter.odin
+++ /dev/null
@@ -1,80 +0,0 @@
-package container
-
-import "core:mem"
-
-Bloom_Hash_Proc :: #type proc(data: []byte) -> u32
-
-Bloom_Hash :: struct {
- hash_proc: Bloom_Hash_Proc,
- next: ^Bloom_Hash,
-}
-
-Bloom_Filter :: struct {
- allocator: mem.Allocator,
- hash: ^Bloom_Hash,
- bits: []byte,
-}
-
-bloom_filter_init :: proc(b: ^Bloom_Filter, size: int, allocator := context.allocator) {
- b.allocator = allocator
- b.bits = make([]byte, size, allocator)
-}
-
-bloom_filter_destroy :: proc(b: ^Bloom_Filter) {
- context.allocator = b.allocator
- delete(b.bits)
- for b.hash != nil {
- hash := b.hash
- b.hash = b.hash.next
- free(hash)
- }
-}
-
-bloom_filter_add_hash_proc :: proc(b: ^Bloom_Filter, hash_proc: Bloom_Hash_Proc) {
- context.allocator = b.allocator
- h := new(Bloom_Hash)
- h.hash_proc = hash_proc
-
- head := &b.hash
- for head^ != nil {
- head = &(head^.next)
- }
- head^ = h
-}
-
-bloom_filter_add :: proc(b: ^Bloom_Filter, item: []byte) {
- #no_bounds_check for h := b.hash; h != nil; h = h.next {
- hash := h.hash_proc(item)
- hash %= u32(len(b.bits) * 8)
- b.bits[hash >> 3] |= 1 << (hash & 3)
- }
-}
-
-bloom_filter_add_string :: proc(b: ^Bloom_Filter, item: string) {
- bloom_filter_add(b, transmute([]byte)item)
-}
-
-bloom_filter_add_raw :: proc(b: ^Bloom_Filter, data: rawptr, size: int) {
- item := mem.slice_ptr((^byte)(data), size)
- bloom_filter_add(b, item)
-}
-
-bloom_filter_test :: proc(b: ^Bloom_Filter, item: []byte) -> bool {
- #no_bounds_check for h := b.hash; h != nil; h = h.next {
- hash := h.hash_proc(item)
- hash %= u32(len(b.bits) * 8)
- if (b.bits[hash >> 3] & (1 << (hash & 3)) == 0) {
- return false
- }
- }
- return true
-}
-
-bloom_filter_test_string :: proc(b: ^Bloom_Filter, item: string) -> bool {
- return bloom_filter_test(b, transmute([]byte)item)
-}
-
-bloom_filter_test_raw :: proc(b: ^Bloom_Filter, data: rawptr, size: int) -> bool {
- item := mem.slice_ptr((^byte)(data), size)
- return bloom_filter_test(b, item)
-}
diff --git a/core/container/lru/lru_cache.odin b/core/container/lru/lru_cache.odin
new file mode 100644
index 000000000..b59f29f0c
--- /dev/null
+++ b/core/container/lru/lru_cache.odin
@@ -0,0 +1,201 @@
+package container_lru
+
+import "core:runtime"
+import "core:intrinsics"
+_ :: runtime
+_ :: intrinsics
+
+Node :: struct($Key, $Value: typeid) where intrinsics.type_is_valid_map_key(Key) {
+ prev, next: ^Node(Key, Value),
+ key: Key,
+ value: Value,
+}
+
+// Cache is an LRU cache. It automatically removes entries as new entries are
+// added if the capacity is reached. Entries are removed based on how recently
+// they were used where the oldest entries are removed first.
+Cache :: struct($Key, $Value: typeid) where intrinsics.type_is_valid_map_key(Key) {
+ head: ^Node(Key, Value),
+ tail: ^Node(Key, Value),
+
+ entries: map[Key]^Node(Key, Value),
+
+ count: int,
+ capacity: int,
+
+ node_allocator: runtime.Allocator,
+
+ on_remove: proc(key: Key, value: Value, user_data: rawptr),
+ on_remove_user_data: rawptr,
+}
+
+// init initializes a Cache
+init :: proc(c: ^$C/Cache($Key, $Value), capacity: int, entries_allocator := context.allocator, node_allocator := context.allocator) {
+ c.entries.allocator = entries_allocator
+ c.node_allocator = node_allocator
+ c.capacity = capacity
+}
+
+// destroy deinitializes a Cachem
+destroy :: proc(c: ^$C/Cache($Key, $Value), call_on_remove: bool) {
+ clear(c, call_on_remove)
+ delete(c.entries)
+}
+
+// clear the contents of a Cache
+clear :: proc(c: ^$C/Cache($Key, $Value), call_on_remove: bool) {
+ for _, node in c.entries {
+ if call_on_remove {
+ _call_on_remove(c, node)
+ }
+ free(node, c.node_allocator)
+ }
+ runtime.clear(&c.entries)
+ c.head = nil
+ c.tail = nil
+ c.count = 0
+}
+
+// set the given key value pair. This operation updates the recent usage of the item.
+set :: proc(c: ^$C/Cache($Key, $Value), key: Key, value: Value) -> runtime.Allocator_Error {
+ if e, ok := c.entries[key]; ok {
+ e.value = value
+ _pop_node(c, e)
+ _push_front_node(c, e)
+ return nil
+ }
+
+ e : ^Node(Key, Value) = nil
+ assert(c.count <= c.capacity)
+ if c.count == c.capacity {
+ e = c.tail
+ _remove_node(c, e)
+ }
+ else {
+ c.count += 1
+ e = new(Node(Key, Value), c.node_allocator) or_return
+ }
+
+ e.key = key
+ e.value = value
+ _push_front_node(c, e)
+ c.entries[key] = e
+
+ return nil
+}
+
+// get a value from the cache from a given key. This operation updates the usage of the item.
+get :: proc(c: ^$C/Cache($Key, $Value), key: Key) -> (value: Value, ok: bool) #optional_ok {
+ e: ^Node(Key, Value)
+ e, ok = c.entries[key]
+ if !ok {
+ return
+ }
+ _pop_node(c, e)
+ _push_front_node(c, e)
+ return e.value, true
+}
+
+// get_ptr gets the pointer to a value the cache from a given key. This operation updates the usage of the item.
+get_ptr :: proc(c: ^$C/Cache($Key, $Value), key: Key) -> (value: ^Value, ok: bool) #optional_ok {
+ e: ^Node(Key, Value)
+ e, ok = c.entries[key]
+ if !ok {
+ return
+ }
+ _pop_node(c, e)
+ _push_front_node(c, e)
+ return &e.value, true
+}
+
+// peek gets the value from the cache from a given key without updating the recent usage.
+peek :: proc(c: ^$C/Cache($Key, $Value), key: Key) -> (value: Value, ok: bool) #optional_ok {
+ e: ^Node(Key, Value)
+ e, ok = c.entries[key]
+ if !ok {
+ return
+ }
+ return e.value, true
+}
+
+// exists checks for the existence of a value from a given key without updating the recent usage.
+exists :: proc(c: ^$C/Cache($Key, $Value), key: Key) -> bool {
+ return key in c.entries
+}
+
+// remove removes an item from the cache.
+remove :: proc(c: ^$C/Cache($Key, $Value), key: Key) -> bool {
+ e, ok := c.entries[key]
+ if !ok {
+ return false
+ }
+ _remove_node(c, e)
+ free(node, c.node_allocator)
+ c.count -= 1
+ return true
+}
+
+
+@(private)
+_remove_node :: proc(c: ^$C/Cache($Key, $Value), node: ^Node(Key, Value)) {
+ if c.head == node {
+ c.head = node.next
+ }
+ if c.tail == node {
+ c.tail = node.prev
+ }
+ if node.prev != nil {
+ node.prev.next = node.next
+ }
+ if node.next != nil {
+ node.next.prev = node.prev
+ }
+ node.prev = nil
+ node.next = nil
+
+ delete_key(&c.entries, node.key)
+
+ _call_on_remove(c, node)
+}
+
+@(private)
+_call_on_remove :: proc(c: ^$C/Cache($Key, $Value), node: ^Node(Key, Value)) {
+ if c.on_remove != nil {
+ c.on_remove(node.key, node.value, c.on_remove_user_data)
+ }
+}
+
+@(private)
+_push_front_node :: proc(c: ^$C/Cache($Key, $Value), e: ^Node(Key, Value)) {
+ if c.head != nil {
+ e.next = c.head
+ e.next.prev = e
+ }
+ c.head = e
+ if c.tail == nil {
+ c.tail = e
+ }
+ e.prev = nil
+}
+
+@(private)
+_pop_node :: proc(c: ^$C/Cache($Key, $Value), e: ^Node(Key, Value)) {
+ if e == nil {
+ return
+ }
+ if c.head == e {
+ c.head = e.next
+ }
+ if c.tail == e {
+ c.tail = e.prev
+ }
+ if e.prev != nil {
+ e.prev.next = e.next
+ }
+
+ if e.next != nil {
+ e.next.prev = e.prev
+ }
+ e.prev = nil
+ e.next = nil
+} \ No newline at end of file
diff --git a/core/container/map.odin b/core/container/map.odin
deleted file mode 100644
index 54cbc22fc..000000000
--- a/core/container/map.odin
+++ /dev/null
@@ -1,377 +0,0 @@
-package container
-
-import "core:intrinsics"
-_ :: intrinsics
-
-
-Map :: struct($Key, $Value: typeid) where intrinsics.type_is_valid_map_key(Key) {
- hash: Array(int),
- entries: Array(Map_Entry(Key, Value)),
-}
-
-Map_Entry :: struct($Key, $Value: typeid) where intrinsics.type_is_valid_map_key(Key) {
- hash: uintptr,
- next: int,
- key: Key,
- value: Value,
-}
-
-
-/*
-map_init :: proc{
- map_init_none,
- map_init_cap,
-}
-map_delete
-
-map_has
-map_get
-map_get_default
-map_get_ptr
-map_set
-map_remove
-map_reserve
-map_clear
-
-// Multi Map
-
-multi_map_find_first
-multi_map_find_next
-multi_map_count
-multi_map_get :: proc{
- multi_map_get_array,
- multi_map_get_slice,
-};
-multi_map_get_as_slice
-multi_map_insert
-multi_map_remove
-multi_map_remove_all
-
-*/
-
-map_init :: proc{map_init_none, map_init_cap}
-
-map_init_none :: proc(m: ^$M/Map($Key, $Value), allocator := context.allocator) {
- m.hash.allocator = allocator
- m.entries.allocator = allocator
-}
-
-map_init_cap :: proc(m: ^$M/Map($Key, $Value), cap: int, allocator := context.allocator) {
- m.hash.allocator = allocator
- m.entries.allocator = allocator
- map_reserve(m, cap)
-}
-
-map_delete :: proc(m: $M/Map($Key, $Value)) {
- array_delete(m.hash)
- array_delete(m.entries)
-}
-
-
-map_has :: proc(m: $M/Map($Key, $Value), key: Key) -> bool {
- return _map_find_or_fail(m, key) >= 0
-}
-
-map_get :: proc(m: $M/Map($Key, $Value), key: Key) -> (res: Value, ok: bool) #optional_ok {
- i := _map_find_or_fail(m, key)
- if i < 0 {
- return {}, false
- }
- return array_get(m.entries, i).value, true
-}
-
-map_get_default :: proc(m: $M/Map($Key, $Value), key: Key, default: Value) -> (res: Value, ok: bool) #optional_ok {
- i := _map_find_or_fail(m, key)
- if i < 0 {
- return default, false
- }
- return array_get(m.entries, i).value, true
-}
-
-map_get_ptr :: proc(m: $M/Map($Key, $Value), key: Key) -> ^Value {
- i := _map_find_or_fail(m, key)
- if i < 0 {
- return nil
- }
- return array_get_ptr(m.entries, i).value
-}
-
-map_set :: proc(m: ^$M/Map($Key, $Value), key: Key, value: Value) {
- if array_len(m.hash) == 0 {
- _map_grow(m)
- }
-
- i := _map_find_or_make(m, key)
- array_get_ptr(m.entries, i).value = value
- if _map_full(m^) {
- _map_grow(m)
- }
-}
-
-map_remove :: proc(m: ^$M/Map($Key, $Value), key: Key) {
- fr := _map_find_key(m^, key)
- if fr.entry_index >= 0 {
- _map_erase(m, fr)
- }
-}
-
-
-map_reserve :: proc(m: ^$M/Map($Key, $Value), new_size: int) {
- nm: M
- map_init(&nm, m.hash.allocator)
- array_resize(&nm.hash, new_size)
- array_reserve(&nm.entries, array_len(m.entries))
-
- for i in 0..<new_size {
- array_set(&nm.hash, i, -1)
- }
- for i in 0..<array_len(m.entries) {
- e := array_get(m.entries, i)
- multi_map_insert(&nm, e.key, e.value)
- }
-
- map_delete(m^)
- m^ = nm
-}
-
-map_clear :: proc(m: ^$M/Map($Key, $Value)) {
- array_clear(&m.hash)
- array_clear(&m.entries)
-}
-
-
-
-multi_map_find_first :: proc(m: $M/Map($Key, $Value), key: Key) -> ^Map_Entry(Key, Value) {
- i := _map_find_or_fail(m, key)
- if i < 0 {
- return nil
- }
- return array_get_ptr(m.entries, i)
-}
-
-multi_map_find_next :: proc(m: $M/Map($Key, $Value), e: ^Map_Entry(Key, Value)) -> ^Map_Entry(Key, Value) {
- i := e.next
- for i >= 0 {
- it := array_get_ptr(m.entries, i)
- if it.hash == e.hash && it.key == e.key {
- return it
- }
- i = it.next
- }
- return nil
-}
-
-multi_map_count :: proc(m: $M/Map($Key, $Value), key: Key) -> int {
- n := 0
- e := multi_map_find_first(m, key)
- for e != nil {
- n += 1
- e = multi_map_find_next(m, e)
- }
- return n
-}
-
-multi_map_get :: proc{multi_map_get_array, multi_map_get_slice}
-
-multi_map_get_array :: proc(m: $M/Map($Key, $Value), key: Key, items: ^Array(Value)) {
- if items == nil {
- return
- }
- e := multi_map_find_first(m, key)
- for e != nil {
- array_append(items, e.value)
- e = multi_map_find_next(m, e)
- }
-}
-
-multi_map_get_slice :: proc(m: $M/Map($Key, $Value), key: Key, items: []Value) {
- e := multi_map_find_first(m, key)
- i := 0
- for e != nil && i < len(items) {
- items[i] = e.value
- i += 1
- e = multi_map_find_next(m, e)
- }
-}
-
-multi_map_get_as_slice :: proc(m: $M/Map($Key, $Value), key: Key) -> []Value {
- items: Array(Value)
- array_init(&items, 0)
-
- e := multi_map_find_first(m, key)
- for e != nil {
- array_append(&items, e.value)
- e = multi_map_find_next(m, e)
- }
-
- return array_slice(items)
-}
-
-
-multi_map_insert :: proc(m: ^$M/Map($Key, $Value), key: Key, value: Value) {
- if array_len(m.hash) == 0 {
- _map_grow(m)
- }
-
- i := _map_make(m, key)
- array_get_ptr(m.entries, i).value = value
- if _map_full(m^) {
- _map_grow(m)
- }
-}
-
-multi_map_remove :: proc(m: ^$M/Map($Key, $Value), e: ^Map_Entry(Key, Value)) {
- fr := _map_find_entry(m, e)
- if fr.entry_index >= 0 {
- _map_erase(m, fr)
- }
-}
-
-multi_map_remove_all :: proc(m: ^$M/Map($Key, $Value), key: Key) {
- for map_exist(m^, key) {
- map_remove(m, key)
- }
-}
-
-
-/// Internal
-
-
-Map_Find_Result :: struct {
- hash_index: int,
- entry_prev: int,
- entry_index: int,
-}
-
-_map_add_entry :: proc(m: ^$M/Map($Key, $Value), key: Key) -> int where intrinsics.type_is_valid_map_key(Key) {
- hasher := intrinsics.type_hasher_proc(Key)
-
- e: Map_Entry(Key, Value)
- e.key = key
- e.hash = hasher(&e.key, 0)
- e.next = -1
- idx := array_len(m.entries)
- array_push(&m.entries, e)
- return idx
-}
-
-_map_erase :: proc(m: ^$M/Map, fr: Map_Find_Result) {
- if fr.entry_prev < 0 {
- array_set(&m.hash, fr.hash_index, array_get(m.entries, fr.entry_index).next)
- } else {
- array_get_ptr(m.entries, fr.entry_prev).next = array_get(m.entries, fr.entry_index).next
- }
-
- if fr.entry_index == array_len(m.entries)-1 {
- array_pop_back(&m.entries)
- return
- }
-
- array_set(&m.entries, fr.entry_index, array_get(m.entries, array_len(m.entries)-1))
- last := _map_find_key(m^, array_get(m.entries, fr.entry_index).key)
-
- if last.entry_prev < 0 {
- array_get_ptr(m.entries, last.entry_prev).next = fr.entry_index
- } else {
- array_set(&m.hash, last.hash_index, fr.entry_index)
- }
-}
-
-
-_map_find_key :: proc(m: $M/Map($Key, $Value), key: Key) -> Map_Find_Result where intrinsics.type_is_valid_map_key(Key) {
- fr: Map_Find_Result
- fr.hash_index = -1
- fr.entry_prev = -1
- fr.entry_index = -1
-
- if array_len(m.hash) == 0 {
- return fr
- }
-
- hasher := intrinsics.type_hasher_proc(Key)
-
- key := key
- hash := hasher(&key, 0)
-
- fr.hash_index = int(hash % uintptr(array_len(m.hash)))
- fr.entry_index = array_get(m.hash, fr.hash_index)
- for fr.entry_index >= 0 {
- it := array_get_ptr(m.entries, fr.entry_index)
- if it.hash == hash && it.key == key {
- return fr
- }
- fr.entry_prev = fr.entry_index
- fr.entry_index = it.next
- }
- return fr
-}
-
-_map_find_entry :: proc(m: ^$M/Map($Key, $Value), e: ^Map_Entry(Key, Value)) -> Map_Find_Result {
- fr: Map_Find_Result
- fr.hash_index = -1
- fr.entry_prev = -1
- fr.entry_index = -1
-
- if array_len(m.hash) == 0 {
- return fr
- }
-
- fr.hash_index = int(e.hash % uintptr(array_len(m.hash)))
- fr.entry_index = array_get(m.hash, fr.hash_index)
- for fr.entry_index >= 0 {
- it := array_get_ptr(m.entries, fr.entry_index)
- if it == e {
- return fr
- }
- fr.entry_prev = fr.entry_index
- fr.entry_index = it.next
- }
- return fr
-}
-
-_map_find_or_fail :: proc(m: $M/Map($Key, $Value), key: Key) -> int {
- return _map_find_key(m, key).entry_index
-}
-_map_find_or_make :: proc(m: ^$M/Map($Key, $Value), key: Key) -> int {
- fr := _map_find_key(m^, key)
- if fr.entry_index >= 0 {
- return fr.entry_index
- }
-
- i := _map_add_entry(m, key)
- if fr.entry_prev < 0 {
- array_set(&m.hash, fr.hash_index, i)
- } else {
- array_get_ptr(m.entries, fr.entry_prev).next = i
- }
- return i
-}
-
-
-_map_make :: proc(m: ^$M/Map($Key, $Value), key: Key) -> int {
- fr := _map_find_key(m^, key)
- i := _map_add_entry(m, key)
-
- if fr.entry_prev < 0 {
- array_set(&m.hash, fr.hash_index, i)
- } else {
- array_get_ptr(m.entries, fr.entry_prev).next = i
- }
-
- array_get_ptr(m.entries, i).next = fr.entry_index
-
- return i
-}
-
-
-_map_full :: proc(m: $M/Map($Key, $Value)) -> bool {
- // TODO(bill): Determine good max load factor
- return array_len(m.entries) >= (array_len(m.hash) / 4)*3
-}
-
-_map_grow :: proc(m: ^$M/Map($Key, $Value)) {
- new_size := array_len(m.entries) * 4 + 7 // TODO(bill): Determine good grow rate
- map_reserve(m, new_size)
-}
-
-
diff --git a/core/container/priority_queue.odin b/core/container/priority_queue.odin
deleted file mode 100644
index c54e964a6..000000000
--- a/core/container/priority_queue.odin
+++ /dev/null
@@ -1,121 +0,0 @@
-package container
-
-Priority_Queue :: struct($T: typeid) {
- data: Array(T),
- len: int,
- priority: proc(item: T) -> int,
-}
-
-priority_queue_init_none :: proc(q: ^$Q/Priority_Queue($T), f: proc(item: T) -> int, allocator := context.allocator) {
- queue_init_len(q, f, 0, allocator)
-}
-priority_queue_init_len :: proc(q: ^$Q/Priority_Queue($T), f: proc(item: T) -> int, len: int, allocator := context.allocator) {
- queue_init_len_cap(q, f, 0, 16, allocator)
-}
-priority_queue_init_len_cap :: proc(q: ^$Q/Priority_Queue($T), f: proc(item: T) -> int, len: int, cap: int, allocator := context.allocator) {
- array_init(&q.data, len, cap, allocator)
- q.len = len
- q.priority = f
-}
-
-priority_queue_init :: proc{priority_queue_init_none, priority_queue_init_len, priority_queue_init_len_cap}
-
-
-priority_queue_delete :: proc(q: $Q/Priority_Queue($T)) {
- array_delete(q.data)
-}
-
-priority_queue_clear :: proc(q: ^$Q/Priority_Queue($T)) {
- q.len = 0
-}
-
-priority_queue_len :: proc(q: $Q/Priority_Queue($T)) -> int {
- return q.len
-}
-
-priority_queue_cap :: proc(q: $Q/Priority_Queue($T)) -> int {
- return array_cap(q.data)
-}
-
-priority_queue_space :: proc(q: $Q/Priority_Queue($T)) -> int {
- return array_len(q.data) - q.len
-}
-
-priority_queue_reserve :: proc(q: ^$Q/Priority_Queue($T), capacity: int) {
- if capacity > q.len {
- array_resize(&q.data, new_capacity)
- }
-}
-
-priority_queue_resize :: proc(q: ^$Q/Priority_Queue($T), length: int) {
- if length > q.len {
- array_resize(&q.data, new_capacity)
- }
- q.len = length
-}
-
-_priority_queue_grow :: proc(q: ^$Q/Priority_Queue($T), min_capacity: int = 0) {
- new_capacity := max(array_len(q.data)*2 + 8, min_capacity)
- array_resize(&q.data, new_capacity)
-}
-
-
-priority_queue_push :: proc(q: ^$Q/Priority_Queue($T), item: T) {
- if array_len(q.data) - q.len == 0 {
- _priority_queue_grow(q)
- }
-
- s := array_slice(q.data)
- s[q.len] = item
-
- i := q.len
- for i > 0 {
- p := (i - 1) / 2
- if q.priority(s[p]) <= q.priority(item) {
- break
- }
- s[i] = s[p]
- i = p
- }
-
- q.len += 1
- if q.len > 0 {
- s[i] = item
- }
-}
-
-
-
-priority_queue_pop :: proc(q: ^$Q/Priority_Queue($T)) -> T {
- assert(q.len > 0)
-
- s := array_slice(q.data)
- min := s[0]
- root := s[q.len-1]
- q.len -= 1
-
- i := 0
- for i * 2 + 1 < q.len {
- a := i * 2 + 1
- b := i * 2 + 2
- c := b < q.len && q.priority(s[b]) < q.priority(s[a]) ? b : a
-
- if q.priority(s[c]) >= q.priority(root) {
- break
- }
- s[i] = s[c]
- i = c
- }
-
- if q.len > 0 {
- s[i] = root
- }
- return min
-}
-
-priority_queue_peek :: proc(q: ^$Q/Priority_Queue($T)) -> T {
- assert(q.len > 0)
-
- s := array_slice(q.data)
- return s[0]
-}
diff --git a/core/container/priority_queue/priority_queue.odin b/core/container/priority_queue/priority_queue.odin
new file mode 100644
index 000000000..e324287f3
--- /dev/null
+++ b/core/container/priority_queue/priority_queue.odin
@@ -0,0 +1,143 @@
+package container_priority_queue
+
+import "core:builtin"
+
+Priority_Queue :: struct($T: typeid) {
+ queue: [dynamic]T,
+
+ less: proc(a, b: T) -> bool,
+ swap: proc(q: []T, i, j: int),
+}
+
+DEFAULT_CAPACITY :: 16
+
+default_swap_proc :: proc($T: typeid) -> proc(q: []T, i, j: int) {
+ return proc(q: []T, i, j: int) {
+ q[i], q[j] = q[j], q[i]
+ }
+}
+
+init :: proc(pq: ^$Q/Priority_Queue($T), less: proc(a, b: T) -> bool, swap: proc(q: []T, i, j: int), capacity := DEFAULT_CAPACITY, allocator := context.allocator) {
+ if pq.queue.allocator.procedure == nil {
+ pq.queue.allocator = allocator
+ }
+ reserve(pq, capacity)
+ pq.less = less
+ pq.swap = swap
+}
+
+init_from_dynamic_array :: proc(pq: ^$Q/Priority_Queue($T), queue: [dynamic]T, less: proc(a, b: T) -> bool, swap: proc(q: []T, i, j: int)) {
+ pq.queue = queue
+ pq.less = less
+ pq.swap = swap
+ n := builtin.len(pq.queue)
+ for i := n/2 - 1; i >= 0; i -= 1 {
+ _shift_down(pq, i, n)
+ }
+}
+
+destroy :: proc(pq: ^$Q/Priority_Queue($T)) {
+ clear(pq)
+ delete(pq.queue)
+}
+
+reserve :: proc(pq: ^$Q/Priority_Queue($T), capacity: int) {
+ builtin.reserve(&pq.queue, capacity)
+}
+clear :: proc(pq: ^$Q/Priority_Queue($T)) {
+ builtin.clear(&pq.queue)
+}
+len :: proc(pq: $Q/Priority_Queue($T)) -> int {
+ return builtin.len(pq.queue)
+}
+cap :: proc(pq: $Q/Priority_Queue($T)) -> int {
+ return builtin.cap(pq.queue)
+}
+
+_shift_down :: proc(pq: ^$Q/Priority_Queue($T), i0, n: int) -> bool {
+ // O(n log n)
+ if 0 > i0 || i0 > n {
+ return false
+ }
+
+ i := i0
+ queue := pq.queue[:]
+
+ for {
+ j1 := 2*i + 1
+ if j1 < 0 || j1 >= n {
+ break
+ }
+ j := j1
+ if j2 := j1+1; j2 < n && pq.less(queue[j2], queue[j1]) {
+ j = j2
+ }
+ if !pq.less(queue[j], queue[i]) {
+ break
+ }
+
+ pq.swap(queue, i, j)
+ i = j
+ }
+ return i > i0
+}
+
+_shift_up :: proc(pq: ^$Q/Priority_Queue($T), j: int) {
+ j := j
+ queue := pq.queue[:]
+ n := builtin.len(queue)
+ for 0 <= j {
+ i := (j-1)/2
+ if i == j || !pq.less(queue[j], queue[i]) {
+ break
+ }
+ pq.swap(queue, i, j)
+ j = i
+ }
+}
+
+// NOTE(bill): When an element at index 'i' has changed its value, this will fix the
+// the heap ordering. This is using a basic "heapsort" with shift up and a shift down parts.
+fix :: proc(pq: ^$Q/Priority_Queue($T), i: int) {
+ if !_shift_down(pq, i, builtin.len(pq.queue)) {
+ _shift_up(pq, i)
+ }
+}
+
+push :: proc(pq: ^$Q/Priority_Queue($T), value: T) {
+ append(&pq.queue, value)
+ _shift_up(pq, builtin.len(pq.queue)-1)
+}
+
+pop :: proc(pq: ^$Q/Priority_Queue($T), loc := #caller_location) -> (value: T) {
+ assert(condition=builtin.len(pq.queue)>0, loc=loc)
+
+ n := builtin.len(pq.queue)-1
+ pq.swap(pq.queue[:], 0, n)
+ _shift_down(pq, 0, n)
+ return builtin.pop(&pq.queue)
+}
+
+pop_safe :: proc(pq: ^$Q/Priority_Queue($T), loc := #caller_location) -> (value: T, ok: bool) {
+ if builtin.len(pq.queue) > 0 {
+ n := builtin.len(pq.queue)-1
+ pq.swap(pq.queue[:], 0, n)
+ _shift_down(pq, 0, n)
+ return builtin.pop_safe(&pq.queue)
+ }
+ return
+}
+
+remove :: proc(pq: ^$Q/Priority_Queue($T), i: int) -> (value: T, ok: bool) {
+ n := builtin.len(pq.queue)
+ if 0 <= i && i < n {
+ if n != i {
+ pq.swap(pq.queue[:], i, n)
+ _shift_down(pq, i, n)
+ _shift_up(pq, i)
+ }
+ value, ok = builtin.pop_safe(&pq.queue)
+ }
+ return
+}
+
diff --git a/core/container/queue.odin b/core/container/queue.odin
deleted file mode 100644
index bab4a18e6..000000000
--- a/core/container/queue.odin
+++ /dev/null
@@ -1,175 +0,0 @@
-package container
-
-Queue :: struct($T: typeid) {
- data: Array(T),
- len: int,
- offset: int,
-}
-
-/*
-queue_init :: proc{
- queue_init_none,
- queue_init_len,
- queue_init_len_cap,
-}
-queue_delete
-queue_clear
-queue_len
-queue_cap
-queue_space
-queue_get
-queue_set
-queue_reserve
-queue_resize
-queue_push :: proc{
- queue_push_back,
- queue_push_elems,
-};
-queue_push_front
-queue_pop_front
-queue_pop_back
-queue_consume
-*/
-
-queue_init_none :: proc(q: ^$Q/Queue($T), allocator := context.allocator) {
- queue_init_len(q, 0, allocator)
-}
-queue_init_len :: proc(q: ^$Q/Queue($T), len: int, allocator := context.allocator) {
- queue_init_len_cap(q, 0, 16, allocator)
-}
-queue_init_len_cap :: proc(q: ^$Q/Queue($T), len: int, cap: int, allocator := context.allocator) {
- array_init(&q.data, len, cap, allocator)
- q.len = len
- q.offset = 0
-}
-
-queue_init :: proc{queue_init_none, queue_init_len, queue_init_len_cap}
-
-queue_delete :: proc(q: $Q/Queue($T)) {
- array_delete(q.data)
-}
-
-queue_clear :: proc(q: ^$Q/Queue($T)) {
- q.len = 0
-}
-
-queue_len :: proc(q: $Q/Queue($T)) -> int {
- return q.len
-}
-
-queue_cap :: proc(q: $Q/Queue($T)) -> int {
- return array_cap(q.data)
-}
-
-queue_space :: proc(q: $Q/Queue($T)) -> int {
- return array_len(q.data) - q.len
-}
-
-queue_get :: proc(q: $Q/Queue($T), index: int) -> T {
- i := (index + q.offset) % array_len(q.data)
- data := array_slice(q.data)
- return data[i]
-}
-
-queue_set :: proc(q: ^$Q/Queue($T), index: int, item: T) {
- i := (index + q.offset) % array_len(q.data)
- data := array_slice(q.data)
- data[i] = item
-}
-
-
-queue_reserve :: proc(q: ^$Q/Queue($T), capacity: int) {
- if capacity > q.len {
- _queue_increase_capacity(q, capacity)
- }
-}
-
-queue_resize :: proc(q: ^$Q/Queue($T), length: int) {
- if length > q.len {
- _queue_increase_capacity(q, length)
- }
- q.len = length
-}
-
-queue_push_back :: proc(q: ^$Q/Queue($T), item: T) {
- if queue_space(q^) == 0 {
- _queue_grow(q)
- }
-
- queue_set(q, q.len, item)
- q.len += 1
-}
-
-queue_push_front :: proc(q: ^$Q/Queue($T), item: T) {
- if queue_space(q^) == 0 {
- _queue_grow(q)
- }
-
- q.offset = (q.offset - 1 + array_len(q.data)) % array_len(q.data)
- q.len += 1
- queue_set(q, 0, item)
-}
-
-queue_pop_front :: proc(q: ^$Q/Queue($T)) -> T {
- assert(q.len > 0)
- item := queue_get(q^, 0)
- q.offset = (q.offset + 1) % array_len(q.data)
- q.len -= 1
- if q.len == 0 {
- q.offset = 0
- }
- return item
-}
-
-queue_pop_back :: proc(q: ^$Q/Queue($T)) -> T {
- assert(q.len > 0)
- item := queue_get(q^, q.len-1)
- q.len -= 1
- return item
-}
-
-queue_consume :: proc(q: ^$Q/Queue($T), count: int) {
- q.offset = (q.offset + count) & array_len(q.data)
- q.len -= count
-}
-
-
-queue_push_elems :: proc(q: ^$Q/Queue($T), items: ..T) {
- if queue_space(q^) < len(items) {
- _queue_grow(q, q.len + len(items))
- }
- size := array_len(q.data)
- insert := (q.offset + q.len) % size
-
- to_insert := len(items)
- if insert + to_insert > size {
- to_insert = size - insert
- }
-
- the_items := items[:]
-
- data := array_slice(q.data)
-
- q.len += copy(data[insert:][:to_insert], the_items)
- the_items = the_items[to_insert:]
- q.len += copy(data[:], the_items)
-}
-
-queue_push :: proc{queue_push_back, queue_push_elems}
-
-
-
-_queue_increase_capacity :: proc(q: ^$Q/Queue($T), new_capacity: int) {
- end := array_len(q.data)
- array_resize(&q.data, new_capacity)
- if q.offset + q.len > end {
- end_items := q.len + end
- data := array_slice(q.data)
- copy(data[new_capacity-end_items:][:end_items], data[q.offset:][:end_items])
- q.offset += new_capacity - end
- }
-}
-_queue_grow :: proc(q: ^$Q/Queue($T), min_capacity: int = 0) {
- new_capacity := max(array_len(q.data)*2 + 8, min_capacity)
- _queue_increase_capacity(q, new_capacity)
-}
diff --git a/core/container/queue/queue.odin b/core/container/queue/queue.odin
new file mode 100644
index 000000000..8ca3a85ac
--- /dev/null
+++ b/core/container/queue/queue.odin
@@ -0,0 +1,209 @@
+package container_queue
+
+import "core:builtin"
+import "core:runtime"
+_ :: runtime
+
+// Dynamically resizable double-ended queue/ring-buffer
+Queue :: struct($T: typeid) {
+ data: [dynamic]T,
+ len: uint,
+ offset: uint,
+}
+
+DEFAULT_CAPACITY :: 16
+
+// Procedure to initialize a queue
+init :: proc(q: ^$Q/Queue($T), capacity := DEFAULT_CAPACITY, allocator := context.allocator) -> bool {
+ if q.data.allocator.procedure == nil {
+ q.data.allocator = allocator
+ }
+ clear(q)
+ return reserve(q, capacity)
+}
+
+// Procedure to initialize a queue from a fixed backing slice
+init_from_slice :: proc(q: ^$Q/Queue($T), backing: []T) -> bool {
+ clear(q)
+ q.data = transmute([dynamic]T)runtime.Raw_Dynamic_Array{
+ data = raw_data(backing),
+ len = builtin.len(backing),
+ cap = builtin.len(backing),
+ allocator = {procedure=runtime.nil_allocator_proc, data=nil},
+ }
+ return true
+}
+
+// Procedure to destroy a queue
+destroy :: proc(q: ^$Q/Queue($T)) {
+ delete(q.data)
+}
+
+// The length of the queue
+len :: proc(q: $Q/Queue($T)) -> int {
+ return int(q.len)
+}
+
+// The current capacity of the queue
+cap :: proc(q: $Q/Queue($T)) -> int {
+ return builtin.len(q.data)
+}
+
+// Remaining space in the queue (cap-len)
+space :: proc(q: $Q/Queue($T)) -> int {
+ return builtin.len(q.data) - int(q.len)
+}
+
+// Reserve enough space for at least the specified capacity
+reserve :: proc(q: ^$Q/Queue($T), capacity: int) -> bool {
+ if uint(capacity) > q.len {
+ return _grow(q, uint(capacity))
+ }
+ return true
+}
+
+
+get :: proc(q: ^$Q/Queue($T), #any_int i: int, loc := #caller_location) -> T {
+ runtime.bounds_check_error_loc(loc, i, builtin.len(q.data))
+
+ idx := (uint(i)+q.offset)%builtin.len(q.data)
+ return q.data[idx]
+}
+set :: proc(q: ^$Q/Queue($T), #any_int i: int, val: T, loc := #caller_location) {
+ runtime.bounds_check_error_loc(loc, i, builtin.len(q.data))
+
+ idx := (uint(i)+q.offset)%builtin.len(q.data)
+ q.data[idx] = val
+}
+get_ptr :: proc(q: ^$Q/Queue($T), #any_int i: int, loc := #caller_location) -> ^T {
+ runtime.bounds_check_error_loc(loc, i, builtin.len(q.data))
+
+ idx := (uint(i)+q.offset)%builtin.len(q.data)
+ return &q.data[idx]
+}
+
+// Push an element to the back of the queue
+push_back :: proc(q: ^$Q/Queue($T), elem: T) -> bool {
+ if space(q^) == 0 {
+ _grow(q) or_return
+ }
+ idx := (q.offset+uint(q.len))%builtin.len(q.data)
+ q.data[idx] = elem
+ q.len += 1
+ return true
+}
+
+// Push an element to the front of the queue
+push_front :: proc(q: ^$Q/Queue($T), elem: T) -> bool {
+ if space(q^) == 0 {
+ _grow(q) or_return
+ }
+ q.offset = uint(q.offset - 1 + builtin.len(q.data)) % builtin.len(q.data)
+ q.len += 1
+ q.data[q.offset] = elem
+ return true
+}
+
+
+// Pop an element from the back of the queue
+pop_back :: proc(q: ^$Q/Queue($T), loc := #caller_location) -> (elem: T) {
+ assert(condition=q.len > 0, loc=loc)
+ q.len -= 1
+ idx := (q.offset+uint(q.len))%builtin.len(q.data)
+ elem = q.data[idx]
+ return
+}
+// Safely pop an element from the back of the queue
+pop_back_safe :: proc(q: ^$Q/Queue($T)) -> (elem: T, ok: bool) {
+ if q.len > 0 {
+ q.len -= 1
+ idx := (q.offset+uint(q.len))%builtin.len(q.data)
+ elem = q.data[idx]
+ ok = true
+ }
+ return
+}
+
+// Pop an element from the front of the queue
+pop_front :: proc(q: ^$Q/Queue($T), loc := #caller_location) -> (elem: T) {
+ assert(condition=q.len > 0, loc=loc)
+ elem = q.data[q.offset]
+ q.offset = (q.offset+1)%builtin.len(q.data)
+ q.len -= 1
+ return
+}
+// Safely pop an element from the front of the queue
+pop_front_safe :: proc(q: ^$Q/Queue($T)) -> (elem: T, ok: bool) {
+ if q.len > 0 {
+ elem = q.data[q.offset]
+ q.offset = (q.offset+1)%builtin.len(q.data)
+ q.len -= 1
+ ok = true
+ }
+ return
+}
+
+// Push multiple elements to the front of the queue
+push_back_elems :: proc(q: ^$Q/Queue($T), elems: ..T) -> bool {
+ n := uint(builtin.len(elems))
+ if space(q^) < int(n) {
+ _grow(q, q.len + n) or_return
+ }
+
+ sz := uint(builtin.len(q.data))
+ insert_from := (q.offset + q.len) % sz
+ insert_to := n
+ if insert_from + insert_to > sz {
+ insert_to = sz - insert_from
+ }
+ copy(q.data[insert_from:], elems[:insert_to])
+ copy(q.data[:insert_from], elems[insert_to:])
+ q.len += n
+ return true
+}
+
+// Consume `n` elements from the front of the queue
+consume_front :: proc(q: ^$Q/Queue($T), n: int, loc := #caller_location) {
+ assert(condition=int(q.len) >= n, loc=loc)
+ if n > 0 {
+ nu := uint(n)
+ q.offset = (q.offset + nu) % builtin.len(q.data)
+ q.len -= nu
+ }
+}
+
+// Consume `n` elements from the back of the queue
+consume_back :: proc(q: ^$Q/Queue($T), n: int, loc := #caller_location) {
+ assert(condition=int(q.len) >= n, loc=loc)
+ if n > 0 {
+ q.len -= uint(n)
+ }
+}
+
+
+
+append_elem :: push_back
+append_elems :: push_back_elems
+push :: proc{push_back, push_back_elems}
+append :: proc{push_back, push_back_elems}
+
+
+// Clear the contents of the queue
+clear :: proc(q: ^$Q/Queue($T)) {
+ q.len = 0
+ q.offset = 0
+}
+
+
+// Internal growinh procedure
+_grow :: proc(q: ^$Q/Queue($T), min_capacity: uint = 0) -> bool {
+ new_capacity := max(min_capacity, uint(8), uint(builtin.len(q.data))*2)
+ n := uint(builtin.len(q.data))
+ builtin.resize(&q.data, int(new_capacity)) or_return
+ if q.offset + q.len > n {
+ diff := n - q.offset
+ copy(q.data[new_capacity-diff:], q.data[q.offset:][:diff])
+ q.offset += new_capacity - n
+ }
+ return true
+}
diff --git a/core/container/ring.odin b/core/container/ring.odin
deleted file mode 100644
index 61492ec84..000000000
--- a/core/container/ring.odin
+++ /dev/null
@@ -1,74 +0,0 @@
-package container
-
-
-Ring :: struct($T: typeid) {
- next, prev: ^Ring(T),
- value: T,
-}
-
-ring_init :: proc(r: ^$R/Ring) -> ^R {
- r.prev, r.next = r, r
- return r
-}
-
-ring_next :: proc(r: ^$R/Ring) -> ^R {
- if r.next == nil {
- return ring_init(r)
- }
- return r.next
-}
-ring_prev :: proc(r: ^$R/Ring) -> ^R {
- if r.prev == nil {
- return ring_init(r)
- }
- return r.prev
-}
-
-
-ring_move :: proc(r: ^$R/Ring, n: int) -> ^R {
- r := r
- if r.next == nil {
- return ring_init(r)
- }
-
- switch {
- case n < 0:
- for _ in n..<0 {
- r = r.prev
- }
- case n > 0:
- for _ in 0..<n {
- r = r.next
- }
- }
- return r
-}
-
-ring_link :: proc(r, s: ^$R/Ring) -> ^R {
- n := ring_next(r)
- if s != nil {
- p := ring_prev(s)
- r.next = s
- s.prev = r
- n.prev = p
- p.next = n
- }
- return n
-}
-ring_unlink :: proc(r: ^$R/Ring, n: int) -> ^R {
- if n <= 0 {
- return nil
- }
- return ring_link(r, ring_move(r, n+1))
-}
-ring_len :: proc(r: ^$R/Ring) -> int {
- n := 0
- if r != nil {
- n = 1
- for p := ring_next(r); p != r; p = p.next {
- n += 1
- }
- }
- return n
-}
-
diff --git a/core/container/set.odin b/core/container/set.odin
deleted file mode 100644
index 562ac5409..000000000
--- a/core/container/set.odin
+++ /dev/null
@@ -1,240 +0,0 @@
-package container
-
-Set :: struct {
- hash: Array(int),
- entries: Array(Set_Entry),
-}
-
-Set_Entry :: struct {
- key: u64,
- next: int,
-}
-
-
-/*
-set_init :: proc{
- set_init_none,
- set_init_cap,
-}
-set_delete
-
-set_in
-set_not_in
-set_add
-set_remove
-set_reserve
-set_clear
-*/
-
-set_init :: proc{set_init_none, set_init_cap}
-
-set_init_none :: proc(m: ^Set, allocator := context.allocator) {
- m.hash.allocator = allocator
- m.entries.allocator = allocator
-}
-
-set_init_cap :: proc(m: ^Set, cap: int, allocator := context.allocator) {
- m.hash.allocator = allocator
- m.entries.allocator = allocator
- set_reserve(m, cap)
-}
-
-set_delete :: proc(m: Set) {
- array_delete(m.hash)
- array_delete(m.entries)
-}
-
-
-set_in :: proc(m: Set, key: u64) -> bool {
- return _set_find_or_fail(m, key) >= 0
-}
-set_not_in :: proc(m: Set, key: u64) -> bool {
- return _set_find_or_fail(m, key) < 0
-}
-
-set_add :: proc(m: ^Set, key: u64) {
- if array_len(m.hash) == 0 {
- _set_grow(m)
- }
-
- _ = _set_find_or_make(m, key)
- if _set_full(m^) {
- _set_grow(m)
- }
-}
-
-set_remove :: proc(m: ^Set, key: u64) {
- fr := _set_find_key(m^, key)
- if fr.entry_index >= 0 {
- _set_erase(m, fr)
- }
-}
-
-
-set_reserve :: proc(m: ^Set, new_size: int) {
- nm: Set
- set_init(&nm, m.hash.allocator)
- array_resize(&nm.hash, new_size)
- array_reserve(&nm.entries, array_len(m.entries))
-
- for i in 0..<new_size {
- array_set(&nm.hash, i, -1)
- }
- for i in 0..<array_len(m.entries) {
- e := array_get(m.entries, i)
- set_add(&nm, e.key)
- }
-
- set_delete(m^)
- m^ = nm
-}
-
-set_clear :: proc(m: ^Set) {
- array_clear(&m.hash)
- array_clear(&m.entries)
-}
-
-
-set_equal :: proc(a, b: Set) -> bool {
- a_entries := array_slice(a.entries)
- b_entries := array_slice(b.entries)
- if len(a_entries) != len(b_entries) {
- return false
- }
- for e in a_entries {
- if set_not_in(b, e.key) {
- return false
- }
- }
-
- return true
-}
-
-
-
-/// Internal
-
-_set_add_entry :: proc(m: ^Set, key: u64) -> int {
- e: Set_Entry
- e.key = key
- e.next = -1
- idx := array_len(m.entries)
- array_push(&m.entries, e)
- return idx
-}
-
-_set_erase :: proc(m: ^Set, fr: Map_Find_Result) {
- if fr.entry_prev < 0 {
- array_set(&m.hash, fr.hash_index, array_get(m.entries, fr.entry_index).next)
- } else {
- array_get_ptr(m.entries, fr.entry_prev).next = array_get(m.entries, fr.entry_index).next
- }
-
- if fr.entry_index == array_len(m.entries)-1 {
- array_pop_back(&m.entries)
- return
- }
-
- array_set(&m.entries, fr.entry_index, array_get(m.entries, array_len(m.entries)-1))
- last := _set_find_key(m^, array_get(m.entries, fr.entry_index).key)
-
- if last.entry_prev < 0 {
- array_get_ptr(m.entries, last.entry_prev).next = fr.entry_index
- } else {
- array_set(&m.hash, last.hash_index, fr.entry_index)
- }
-}
-
-
-_set_find_key :: proc(m: Set, key: u64) -> Map_Find_Result {
- fr: Map_Find_Result
- fr.hash_index = -1
- fr.entry_prev = -1
- fr.entry_index = -1
-
- if array_len(m.hash) == 0 {
- return fr
- }
-
- fr.hash_index = int(key % u64(array_len(m.hash)))
- fr.entry_index = array_get(m.hash, fr.hash_index)
- for fr.entry_index >= 0 {
- it := array_get_ptr(m.entries, fr.entry_index)
- if it.key == key {
- return fr
- }
- fr.entry_prev = fr.entry_index
- fr.entry_index = it.next
- }
- return fr
-}
-
-_set_find_entry :: proc(m: ^Set, e: ^Set_Entry) -> Map_Find_Result {
- fr: Map_Find_Result
- fr.hash_index = -1
- fr.entry_prev = -1
- fr.entry_index = -1
-
- if array_len(m.hash) == 0 {
- return fr
- }
-
- fr.hash_index = int(e.key % u64(array_len(m.hash)))
- fr.entry_index = array_get(m.hash, fr.hash_index)
- for fr.entry_index >= 0 {
- it := array_get_ptr(m.entries, fr.entry_index)
- if it == e {
- return fr
- }
- fr.entry_prev = fr.entry_index
- fr.entry_index = it.next
- }
- return fr
-}
-
-_set_find_or_fail :: proc(m: Set, key: u64) -> int {
- return _set_find_key(m, key).entry_index
-}
-_set_find_or_make :: proc(m: ^Set, key: u64) -> int {
- fr := _set_find_key(m^, key)
- if fr.entry_index >= 0 {
- return fr.entry_index
- }
-
- i := _set_add_entry(m, key)
- if fr.entry_prev < 0 {
- array_set(&m.hash, fr.hash_index, i)
- } else {
- array_get_ptr(m.entries, fr.entry_prev).next = i
- }
- return i
-}
-
-
-_set_make :: proc(m: ^Set, key: u64) -> int {
- fr := _set_find_key(m^, key)
- i := _set_add_entry(m, key)
-
- if fr.entry_prev < 0 {
- array_set(&m.hash, fr.hash_index, i)
- } else {
- array_get_ptr(m.entries, fr.entry_prev).next = i
- }
-
- array_get_ptr(m.entries, i).next = fr.entry_index
-
- return i
-}
-
-
-_set_full :: proc(m: Set) -> bool {
- // TODO(bill): Determine good max load factor
- return array_len(m.entries) >= (array_len(m.hash) / 4)*3
-}
-
-_set_grow :: proc(m: ^Set) {
- new_size := array_len(m.entries) * 4 + 7 // TODO(bill): Determine good grow rate
- set_reserve(m, new_size)
-}
-
-
diff --git a/core/container/small_array.odin b/core/container/small_array.odin
deleted file mode 100644
index 43b879d2d..000000000
--- a/core/container/small_array.odin
+++ /dev/null
@@ -1,95 +0,0 @@
-package container
-
-Small_Array :: struct($N: int, $T: typeid) where N >= 0 {
- data: [N]T,
- len: int,
-}
-
-
-small_array_len :: proc(a: $A/Small_Array) -> int {
- return a.len
-}
-
-small_array_cap :: proc(a: $A/Small_Array) -> int {
- return len(a.data)
-}
-
-small_array_space :: proc(a: $A/Small_Array) -> int {
- return len(a.data) - a.len
-}
-
-small_array_slice :: proc(a: ^$A/Small_Array($N, $T)) -> []T {
- return a.data[:a.len]
-}
-
-
-small_array_get :: proc(a: $A/Small_Array($N, $T), index: int, loc := #caller_location) -> T {
- return a.data[index]
-}
-small_array_get_ptr :: proc(a: $A/Small_Array($N, $T), index: int, loc := #caller_location) -> ^T {
- return &a.data[index]
-}
-
-small_array_set :: proc(a: ^$A/Small_Array($N, $T), index: int, item: T, loc := #caller_location) {
- a.data[index] = item
-}
-
-small_array_resize :: proc(a: ^$A/Small_Array, length: int) {
- a.len = min(length, len(a.data))
-}
-
-
-small_array_push_back :: proc(a: ^$A/Small_Array($N, $T), item: T) -> bool {
- if a.len < len(a.data) {
- a.len += 1
- a.data[a.len-1] = item
- return true
- }
- return false
-}
-
-small_array_push_front :: proc(a: ^$A/Small_Array($N, $T), item: T) -> bool {
- if a.len < len(a.data) {
- a.len += 1
- data := small_array_slice(a)
- copy(data[1:], data[:])
- data[0] = item
- return true
- }
- return false
-}
-
-small_array_pop_back :: proc(a: ^$A/Small_Array($N, $T), loc := #caller_location) -> T {
- assert(condition=a.len > 0, loc=loc)
- item := a.data[a.len-1]
- a.len -= 1
- return item
-}
-
-small_array_pop_front :: proc(a: ^$A/Small_Array($N, $T), loc := #caller_location) -> T {
- assert(condition=a.len > 0, loc=loc)
- item := a.data[0]
- s := small_array_slice(a)
- copy(s[:], s[1:])
- a.len -= 1
- return item
-}
-
-
-small_array_consume :: proc(a: ^$A/Small_Array($N, $T), count: int, loc := #caller_location) {
- assert(condition=a.len >= count, loc=loc)
- a.len -= count
-}
-
-small_array_clear :: proc(a: ^$A/Small_Array($N, $T)) {
- small_array_resize(a, 0)
-}
-
-small_array_push_back_elems :: proc(a: ^$A/Small_Array($N, $T), items: ..T) {
- n := copy(a.data[a.len:], items[:])
- a.len += n
-}
-
-small_array_push :: proc{small_array_push_back, small_array_push_back_elems}
-small_array_append :: proc{small_array_push_back, small_array_push_back_elems}
-
diff --git a/core/container/small_array/small_array.odin b/core/container/small_array/small_array.odin
new file mode 100644
index 000000000..5cd421c84
--- /dev/null
+++ b/core/container/small_array/small_array.odin
@@ -0,0 +1,117 @@
+package container_small_array
+
+import "core:builtin"
+
+Small_Array :: struct($N: int, $T: typeid) where N >= 0 {
+ data: [N]T,
+ len: int,
+}
+
+
+len :: proc(a: $A/Small_Array) -> int {
+ return a.len
+}
+
+cap :: proc(a: $A/Small_Array) -> int {
+ return builtin.len(a.data)
+}
+
+space :: proc(a: $A/Small_Array) -> int {
+ return builtin.len(a.data) - a.len
+}
+
+slice :: proc(a: ^$A/Small_Array($N, $T)) -> []T {
+ return a.data[:a.len]
+}
+
+
+get :: proc(a: $A/Small_Array($N, $T), index: int) -> T {
+ return a.data[index]
+}
+get_ptr :: proc(a: ^$A/Small_Array($N, $T), index: int) -> ^T {
+ return &a.data[index]
+}
+
+set :: proc(a: ^$A/Small_Array($N, $T), index: int, item: T) {
+ a.data[index] = item
+}
+
+resize :: proc(a: ^$A/Small_Array, length: int) {
+ a.len = min(length, builtin.len(a.data))
+}
+
+
+push_back :: proc(a: ^$A/Small_Array($N, $T), item: T) -> bool {
+ if a.len < cap(a^) {
+ a.data[a.len] = item
+ a.len += 1
+ return true
+ }
+ return false
+}
+
+push_front :: proc(a: ^$A/Small_Array($N, $T), item: T) -> bool {
+ if a.len < cap(a^) {
+ a.len += 1
+ data := slice(a)
+ copy(data[1:], data[:])
+ data[0] = item
+ return true
+ }
+ return false
+}
+
+pop_back :: proc(a: ^$A/Small_Array($N, $T), loc := #caller_location) -> T {
+ assert(condition=(N > 0 && a.len > 0), loc=loc)
+ item := a.data[a.len-1]
+ a.len -= 1
+ return item
+}
+
+pop_front :: proc(a: ^$A/Small_Array($N, $T), loc := #caller_location) -> T {
+ assert(condition=(N > 0 && a.len > 0), loc=loc)
+ item := a.data[0]
+ s := slice(a)
+ copy(s[:], s[1:])
+ a.len -= 1
+ return item
+}
+
+pop_back_safe :: proc(a: ^$A/Small_Array($N, $T)) -> (item: T, ok: bool) {
+ if N > 0 && a.len > 0 {
+ item = a.data[a.len-1]
+ a.len -= 1
+ ok = true
+ }
+ return
+}
+
+pop_front_safe :: proc(a: ^$A/Small_Array($N, $T)) -> (T, bool) {
+ if N > 0 && a.len > 0 {
+ item = a.data[0]
+ s := slice(a)
+ copy(s[:], s[1:])
+ a.len -= 1
+ ok = true
+ }
+ return
+}
+
+consume :: proc(a: ^$A/Small_Array($N, $T), count: int, loc := #caller_location) {
+ assert(condition=a.len >= count, loc=loc)
+ a.len -= count
+}
+
+clear :: proc(a: ^$A/Small_Array($N, $T)) {
+ resize(a, 0)
+}
+
+push_back_elems :: proc(a: ^$A/Small_Array($N, $T), items: ..T) {
+ n := copy(a.data[a.len:], items[:])
+ a.len += n
+}
+
+append_elem :: push_back
+append_elems :: push_back_elems
+push :: proc{push_back, push_back_elems}
+append :: proc{push_back, push_back_elems} \ No newline at end of file
diff --git a/core/container/topological_sort/topological_sort.odin b/core/container/topological_sort/topological_sort.odin
new file mode 100644
index 000000000..4b69930d5
--- /dev/null
+++ b/core/container/topological_sort/topological_sort.odin
@@ -0,0 +1,98 @@
+// The following is a generic O(V+E) topological sorter implementation.
+// This is the fastest known method for topological sorting and Odin's
+// map type is being used to accelerate lookups.
+package container_topological_sort
+
+import "core:intrinsics"
+import "core:runtime"
+_ :: intrinsics
+_ :: runtime
+
+
+Relations :: struct($K: typeid) where intrinsics.type_is_valid_map_key(K) {
+ dependents: map[K]bool,
+ dependencies: int,
+}
+
+Sorter :: struct(K: typeid) where intrinsics.type_is_valid_map_key(K) {
+ relations: map[K]Relations(K),
+ dependents_allocator: runtime.Allocator,
+}
+
+@(private="file")
+make_relations :: proc(sorter: ^$S/Sorter($K)) -> (r: Relations(K)) {
+ r.dependents.allocator = sorter.dependents_allocator
+ return
+}
+
+
+init :: proc(sorter: ^$S/Sorter($K)) {
+ sorter.relations = make(map[K]Relations(K))
+ sorter.dependents_allocator = context.allocator
+}
+
+destroy :: proc(sorter: ^$S/Sorter($K)) {
+ for _, v in &sorter.relations {
+ delete(v.dependents)
+ }
+ delete(sorter.relations)
+}
+
+add_key :: proc(sorter: ^$S/Sorter($K), key: K) -> bool {
+ if key in sorter.relations {
+ return false
+ }
+ sorter.relations[key] = make_relations(sorter)
+ return true
+}
+
+add_dependency :: proc(sorter: ^$S/Sorter($K), key, dependency: K) -> bool {
+ if key == dependency {
+ return false
+ }
+
+ find := &sorter.relations[dependency]
+ if find == nil {
+ find = map_insert(&sorter.relations, dependency, make_relations(sorter))
+ }
+
+ if find.dependents[key] {
+ return true
+ }
+ find.dependents[key] = true
+
+ find = &sorter.relations[key]
+ if find == nil {
+ find = map_insert(&sorter.relations, key, make_relations(sorter))
+ }
+
+ find.dependencies += 1
+
+ return true
+}
+
+sort :: proc(sorter: ^$S/Sorter($K)) -> (sorted, cycled: [dynamic]K) {
+ relations := &sorter.relations
+
+ for k, v in relations {
+ if v.dependencies == 0 {
+ append(&sorted, k)
+ }
+ }
+
+ for root in &sorted do for k, _ in relations[root].dependents {
+ relation := &relations[k]
+ relation.dependencies -= 1
+ if relation.dependencies == 0 {
+ append(&sorted, k)
+ }
+ }
+
+ for k, v in relations {
+ if v.dependencies != 0 {
+ append(&cycled, k)
+ }
+ }
+
+ return
+} \ No newline at end of file