aboutsummaryrefslogtreecommitdiff
path: root/core/hash
diff options
context:
space:
mode:
authorLaytan Laats <laytanlaats@hotmail.com>2024-07-08 21:07:53 +0200
committerLaytan Laats <laytanlaats@hotmail.com>2024-07-08 21:07:53 +0200
commit2d8d0dd8515a4598d6e027f28818614c117ae0c4 (patch)
treecf038003add5ddda2ef71395fe78a801c8a2abb0 /core/hash
parent1a20b78633038614635da99b5e634015d4ce7d6e (diff)
fix `@(optimization_mode)` usage in builtin collections
Diffstat (limited to 'core/hash')
-rw-r--r--core/hash/crc.odin8
-rw-r--r--core/hash/crc32.odin2
-rw-r--r--core/hash/hash.odin22
-rw-r--r--core/hash/xxhash/common.odin10
-rw-r--r--core/hash/xxhash/xxhash_3.odin90
-rw-r--r--core/hash/xxhash/xxhash_32.odin10
-rw-r--r--core/hash/xxhash/xxhash_64.odin16
7 files changed, 79 insertions, 79 deletions
diff --git a/core/hash/crc.odin b/core/hash/crc.odin
index cb3e36881..68b8f5369 100644
--- a/core/hash/crc.odin
+++ b/core/hash/crc.odin
@@ -1,6 +1,6 @@
package hash
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
crc64_ecma_182 :: proc "contextless" (data: []byte, seed := u64(0)) -> (result: u64) #no_bounds_check {
result = seed
#no_bounds_check for b in data {
@@ -14,7 +14,7 @@ crc64_ecma_182 :: proc "contextless" (data: []byte, seed := u64(0)) -> (result:
bit-reversed, with one's complement pre and post processing.
Based on Mark Adler's v1.4 implementation in C under the ZLIB license.
*/
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
crc64_xz :: proc "contextless" (data: []byte, seed := u64(0)) -> u64 #no_bounds_check {
data := data
result := ~u64le(seed)
@@ -52,7 +52,7 @@ crc64_xz :: proc "contextless" (data: []byte, seed := u64(0)) -> u64 #no_bounds_
/*
Generator polynomial: x^64 + x^4 + x^3 + x + 1
*/
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
crc64_iso_3306 :: proc "contextless" (data: []byte, seed := u64(0)) -> u64 #no_bounds_check {
result := seed
@@ -738,4 +738,4 @@ crc64_iso_3306_inverse :: proc "contextless" (data: []byte, seed := u64(0)) -> u
0x9fc0, 0x9e70, 0x9ca0, 0x9d10,
0x9480, 0x9530, 0x97e0, 0x9650,
0x9240, 0x93f0, 0x9120, 0x9090,
-} \ No newline at end of file
+}
diff --git a/core/hash/crc32.odin b/core/hash/crc32.odin
index 5dde467a7..a7f68207e 100644
--- a/core/hash/crc32.odin
+++ b/core/hash/crc32.odin
@@ -2,7 +2,7 @@ package hash
import "base:intrinsics"
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
crc32 :: proc "contextless" (data: []byte, seed := u32(0)) -> u32 #no_bounds_check {
crc := ~seed
buffer := raw_data(data)
diff --git a/core/hash/hash.odin b/core/hash/hash.odin
index fb170bfe4..45f524d8a 100644
--- a/core/hash/hash.odin
+++ b/core/hash/hash.odin
@@ -3,7 +3,7 @@ package hash
import "core:mem"
import "base:intrinsics"
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
adler32 :: proc "contextless" (data: []byte, seed := u32(1)) -> u32 #no_bounds_check {
ADLER_CONST :: 65521
@@ -46,7 +46,7 @@ adler32 :: proc "contextless" (data: []byte, seed := u32(1)) -> u32 #no_bounds_c
return (u32(b) << 16) | u32(a)
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
djb2 :: proc "contextless" (data: []byte, seed := u32(5381)) -> u32 {
hash: u32 = seed
for b in data {
@@ -73,7 +73,7 @@ djbx33a :: proc "contextless" (data: []byte, seed := u32(5381)) -> (result: [16]
}
// If you have a choice, prefer fnv32a
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
fnv32_no_a :: proc "contextless" (data: []byte, seed := u32(0x811c9dc5)) -> u32 {
h: u32 = seed
for b in data {
@@ -86,7 +86,7 @@ fnv32 :: fnv32_no_a // NOTE(bill): Not a fan of these aliases but seems necessar
fnv64 :: fnv64_no_a // NOTE(bill): Not a fan of these aliases but seems necessary
// If you have a choice, prefer fnv64a
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
fnv64_no_a :: proc "contextless" (data: []byte, seed := u64(0xcbf29ce484222325)) -> u64 {
h: u64 = seed
for b in data {
@@ -94,7 +94,7 @@ fnv64_no_a :: proc "contextless" (data: []byte, seed := u64(0xcbf29ce484222325))
}
return h
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
fnv32a :: proc "contextless" (data: []byte, seed := u32(0x811c9dc5)) -> u32 {
h: u32 = seed
for b in data {
@@ -103,7 +103,7 @@ fnv32a :: proc "contextless" (data: []byte, seed := u32(0x811c9dc5)) -> u32 {
return h
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
fnv64a :: proc "contextless" (data: []byte, seed := u64(0xcbf29ce484222325)) -> u64 {
h: u64 = seed
for b in data {
@@ -112,7 +112,7 @@ fnv64a :: proc "contextless" (data: []byte, seed := u64(0xcbf29ce484222325)) ->
return h
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
jenkins :: proc "contextless" (data: []byte, seed := u32(0)) -> u32 {
hash: u32 = seed
for b in data {
@@ -126,7 +126,7 @@ jenkins :: proc "contextless" (data: []byte, seed := u32(0)) -> u32 {
return hash
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
murmur32 :: proc "contextless" (data: []byte, seed := u32(0)) -> u32 {
c1_32: u32 : 0xcc9e2d51
c2_32: u32 : 0x1b873593
@@ -177,7 +177,7 @@ murmur32 :: proc "contextless" (data: []byte, seed := u32(0)) -> u32 {
}
// See https://github.com/aappleby/smhasher/blob/master/src/MurmurHash2.cpp#L96
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
murmur64a :: proc "contextless" (data: []byte, seed := u64(0x9747b28c)) -> u64 {
m :: 0xc6a4a7935bd1e995
r :: 47
@@ -218,7 +218,7 @@ murmur64a :: proc "contextless" (data: []byte, seed := u64(0x9747b28c)) -> u64 {
}
// See https://github.com/aappleby/smhasher/blob/master/src/MurmurHash2.cpp#L140
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
murmur64b :: proc "contextless" (data: []byte, seed := u64(0x9747b28c)) -> u64 {
m :: 0x5bd1e995
r :: 24
@@ -286,7 +286,7 @@ murmur64b :: proc "contextless" (data: []byte, seed := u64(0x9747b28c)) -> u64 {
return u64(h1)<<32 | u64(h2)
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
sdbm :: proc "contextless" (data: []byte, seed := u32(0)) -> u32 {
hash: u32 = seed
for b in data {
diff --git a/core/hash/xxhash/common.odin b/core/hash/xxhash/common.odin
index faf88e0d4..bbeb60db3 100644
--- a/core/hash/xxhash/common.odin
+++ b/core/hash/xxhash/common.odin
@@ -67,17 +67,17 @@ when !XXH_DISABLE_PREFETCH {
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH_rotl32 :: #force_inline proc(x, r: u32) -> (res: u32) {
return ((x << r) | (x >> (32 - r)))
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH_rotl64 :: #force_inline proc(x, r: u64) -> (res: u64) {
return ((x << r) | (x >> (64 - r)))
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH32_read32 :: #force_inline proc(buf: []u8, alignment := Alignment.Unaligned) -> (res: u32) {
if XXH_FORCE_MEMORY_ACCESS == 2 || alignment == .Aligned {
#no_bounds_check b := (^u32le)(&buf[0])^
@@ -89,7 +89,7 @@ XXH32_read32 :: #force_inline proc(buf: []u8, alignment := Alignment.Unaligned)
}
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH64_read64 :: #force_inline proc(buf: []u8, alignment := Alignment.Unaligned) -> (res: u64) {
if XXH_FORCE_MEMORY_ACCESS == 2 || alignment == .Aligned {
#no_bounds_check b := (^u64le)(&buf[0])^
@@ -99,4 +99,4 @@ XXH64_read64 :: #force_inline proc(buf: []u8, alignment := Alignment.Unaligned)
mem_copy(&b, raw_data(buf[:]), 8)
return u64(b)
}
-} \ No newline at end of file
+}
diff --git a/core/hash/xxhash/xxhash_3.odin b/core/hash/xxhash/xxhash_3.odin
index be2531b6e..9e159260b 100644
--- a/core/hash/xxhash/xxhash_3.odin
+++ b/core/hash/xxhash/xxhash_3.odin
@@ -111,13 +111,13 @@ XXH128_canonical :: struct {
@param lhs, rhs The 64-bit integers to multiply
@return The low 64 bits of the product XOR'd by the high 64 bits.
*/
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH_mul_64_to_128_fold_64 :: #force_inline proc(lhs, rhs: xxh_u64) -> (res: xxh_u64) {
t := u128(lhs) * u128(rhs)
return u64(t & 0xFFFFFFFFFFFFFFFF) ~ u64(t >> 64)
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH_xorshift_64 :: #force_inline proc(v: xxh_u64, #any_int shift: uint) -> (res: xxh_u64) {
return v ~ (v >> shift)
}
@@ -125,7 +125,7 @@ XXH_xorshift_64 :: #force_inline proc(v: xxh_u64, #any_int shift: uint) -> (res:
/*
This is a fast avalanche stage, suitable when input bits are already partially mixed
*/
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_avalanche :: #force_inline proc(h64: xxh_u64) -> (res: xxh_u64) {
res = XXH_xorshift_64(h64, 37)
res *= 0x165667919E3779F9
@@ -137,7 +137,7 @@ XXH3_avalanche :: #force_inline proc(h64: xxh_u64) -> (res: xxh_u64) {
This is a stronger avalanche, inspired by Pelle Evensen's rrmxmx
preferable when input has not been previously mixed
*/
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_rrmxmx :: #force_inline proc(h64, length: xxh_u64) -> (res: xxh_u64) {
/* this mix is inspired by Pelle Evensen's rrmxmx */
res = h64
@@ -166,7 +166,7 @@ XXH3_rrmxmx :: #force_inline proc(h64, length: xxh_u64) -> (res: xxh_u64) {
fast for a _128-bit_ hash on 32-bit (it usually clears XXH64).
*/
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_len_1to3_128b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u128) {
/* A doubled version of 1to3_64b with different constants. */
length := len(input)
@@ -190,7 +190,7 @@ XXH3_len_1to3_128b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u6
}
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_len_4to8_128b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u128) {
length := len(input)
seed := seed
@@ -219,7 +219,7 @@ XXH3_len_4to8_128b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u6
}
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_len_9to16_128b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u128) {
length := len(input)
@@ -261,7 +261,7 @@ XXH3_len_9to16_128b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u
/*
Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN
*/
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_len_0to16_128b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u128) {
length := len(input)
@@ -279,7 +279,7 @@ XXH3_len_0to16_128b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u
/*
A bit slower than XXH3_mix16B, but handles multiply by zero better.
*/
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH128_mix32B :: #force_inline proc(acc: xxh_u128, input_1: []u8, input_2: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u128) {
acc128 := XXH128_hash_t{
h = acc,
@@ -293,7 +293,7 @@ XXH128_mix32B :: #force_inline proc(acc: xxh_u128, input_1: []u8, input_2: []u8,
}
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_len_17to128_128b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u128) {
length := len(input)
@@ -323,7 +323,7 @@ XXH3_len_17to128_128b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh
unreachable()
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_len_129to240_128b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u128) {
length := len(input)
@@ -379,7 +379,7 @@ XXH3_INIT_ACC :: [XXH_ACC_NB]xxh_u64{
XXH_SECRET_MERGEACCS_START :: 11
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_hashLong_128b_internal :: #force_inline proc(
input: []u8,
secret: []u8,
@@ -407,7 +407,7 @@ XXH3_hashLong_128b_internal :: #force_inline proc(
/*
* It's important for performance that XXH3_hashLong is not inlined.
*/
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_hashLong_128b_default :: #force_no_inline proc(input: []u8, seed: xxh_u64, secret: []u8) -> (res: XXH3_128_hash) {
return XXH3_hashLong_128b_internal(input, XXH3_kSecret[:], XXH3_accumulate_512, XXH3_scramble_accumulator)
}
@@ -415,12 +415,12 @@ XXH3_hashLong_128b_default :: #force_no_inline proc(input: []u8, seed: xxh_u64,
/*
* It's important for performance that XXH3_hashLong is not inlined.
*/
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_hashLong_128b_withSecret :: #force_no_inline proc(input: []u8, seed: xxh_u64, secret: []u8) -> (res: XXH3_128_hash) {
return XXH3_hashLong_128b_internal(input, secret, XXH3_accumulate_512, XXH3_scramble_accumulator)
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_hashLong_128b_withSeed_internal :: #force_inline proc(
input: []u8, seed: xxh_u64, secret: []u8,
f_acc512: XXH3_accumulate_512_f,
@@ -441,14 +441,14 @@ XXH3_hashLong_128b_withSeed_internal :: #force_inline proc(
/*
* It's important for performance that XXH3_hashLong is not inlined.
*/
- @(optimization_mode="speed")
+ @(optimization_mode="favor_size")
XXH3_hashLong_128b_withSeed :: #force_no_inline proc(input: []u8, seed: xxh_u64, secret: []u8) -> (res: XXH3_128_hash) {
return XXH3_hashLong_128b_withSeed_internal(input, seed, secret, XXH3_accumulate_512, XXH3_scramble_accumulator , XXH3_init_custom_secret)
}
XXH3_hashLong128_f :: #type proc(input: []u8, seed: xxh_u64, secret: []u8) -> (res: XXH3_128_hash)
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_128bits_internal :: #force_inline proc(
input: []u8, seed: xxh_u64, secret: []u8, f_hl128: XXH3_hashLong128_f) -> (res: XXH3_128_hash) {
@@ -474,17 +474,17 @@ XXH3_128bits_internal :: #force_inline proc(
}
/* === Public XXH128 API === */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_128_default :: proc(input: []u8) -> (hash: XXH3_128_hash) {
return XXH3_128bits_internal(input, 0, XXH3_kSecret[:], XXH3_hashLong_128b_withSeed)
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_128_with_seed :: proc(input: []u8, seed: xxh_u64) -> (hash: XXH3_128_hash) {
return XXH3_128bits_internal(input, seed, XXH3_kSecret[:], XXH3_hashLong_128b_withSeed)
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_128_with_secret :: proc(input: []u8, secret: []u8) -> (hash: XXH3_128_hash) {
return XXH3_128bits_internal(input, 0, secret, XXH3_hashLong_128b_withSecret)
}
@@ -519,7 +519,7 @@ XXH3_128 :: proc { XXH3_128_default, XXH3_128_with_seed, XXH3_128_with_secret }
The XOR mixing hides individual parts of the secret and increases entropy.
This adds an extra layer of strength for custom secrets.
*/
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_len_1to3_64b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u64) {
length := u32(len(input))
assert(input != nil)
@@ -542,7 +542,7 @@ XXH3_len_1to3_64b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64
}
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_len_4to8_64b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u64) {
length := u32(len(input))
assert(input != nil)
@@ -562,7 +562,7 @@ XXH3_len_4to8_64b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64
}
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_len_9to16_64b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u64) {
length := u64(len(input))
assert(input != nil)
@@ -579,7 +579,7 @@ XXH3_len_9to16_64b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u6
}
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_len_0to16_64b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u64) {
length := u64(len(input))
assert(input != nil)
@@ -621,7 +621,7 @@ XXH3_len_0to16_64b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u6
by this, although it is always a good idea to use a proper seed if you care
about strength.
*/
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_mix16B :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u64) {
input_lo := XXH64_read64(input[0:])
input_hi := XXH64_read64(input[8:])
@@ -632,7 +632,7 @@ XXH3_mix16B :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64) -> (
}
/* For mid range keys, XXH3 uses a Mum-hash variant. */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_len_17to128_64b :: proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u64) {
assert(len(secret) >= XXH3_SECRET_SIZE_MIN)
length := len(input)
@@ -665,7 +665,7 @@ XXH3_MIDSIZE_MAX :: 240
XXH3_MIDSIZE_STARTOFFSET :: 3
XXH3_MIDSIZE_LASTOFFSET :: 17
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_len_129to240_64b :: proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u64) {
assert(len(secret) >= XXH3_SECRET_SIZE_MIN)
length := len(input)
@@ -699,7 +699,7 @@ XXH_SECRET_CONSUME_RATE :: 8 /* nb of secret bytes consumed at each accumulatio
XXH_ACC_NB :: (XXH_STRIPE_LEN / size_of(xxh_u64))
XXH_SECRET_LASTACC_START :: 7 /* not aligned on 8, last secret is different from acc & scrambler */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH_writeLE64 :: #force_inline proc(dst: []u8, v64: u64le) {
v := v64
mem_copy(raw_data(dst), &v, size_of(v64))
@@ -737,7 +737,7 @@ XXH3_scramble_accumulator : XXH3_scramble_accumulator_f = XXH3_scramble_accumula
XXH3_init_custom_secret : XXH3_init_custom_secret_f = XXH3_init_custom_secret_scalar
/* scalar variants - universal */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_accumulate_512_scalar :: #force_inline proc(acc: []xxh_u64, input: []u8, secret: []u8) {
xacc := acc /* presumed aligned */
xinput := input /* no alignment restriction */
@@ -754,7 +754,7 @@ XXH3_accumulate_512_scalar :: #force_inline proc(acc: []xxh_u64, input: []u8, se
}
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_scramble_accumulator_scalar :: #force_inline proc(acc: []xxh_u64, secret: []u8) {
xacc := acc /* presumed aligned */
xsecret := secret /* no alignment restriction */
@@ -771,7 +771,7 @@ XXH3_scramble_accumulator_scalar :: #force_inline proc(acc: []xxh_u64, secret: [
}
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_init_custom_secret_scalar :: #force_inline proc(custom_secret: []u8, seed64: xxh_u64) {
#assert((XXH_SECRET_DEFAULT_SIZE & 15) == 0)
@@ -791,7 +791,7 @@ XXH_PREFETCH_DIST :: 320
* Loops over XXH3_accumulate_512().
* Assumption: nbStripes will not overflow the secret size
*/
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_accumulate :: #force_inline proc(
acc: []xxh_u64, input: []u8, secret: []u8, nbStripes: uint, f_acc512: XXH3_accumulate_512_f) {
@@ -804,7 +804,7 @@ XXH3_accumulate :: #force_inline proc(
}
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_hashLong_internal_loop :: #force_inline proc(acc: []xxh_u64, input: []u8, secret: []u8,
f_acc512: XXH3_accumulate_512_f, f_scramble: XXH3_scramble_accumulator_f) {
@@ -833,14 +833,14 @@ XXH3_hashLong_internal_loop :: #force_inline proc(acc: []xxh_u64, input: []u8, s
}
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_mix2Accs :: #force_inline proc(acc: []xxh_u64, secret: []u8) -> (res: xxh_u64) {
return XXH_mul_64_to_128_fold_64(
acc[0] ~ XXH64_read64(secret),
acc[1] ~ XXH64_read64(secret[8:]))
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_mergeAccs :: #force_inline proc(acc: []xxh_u64, secret: []u8, start: xxh_u64) -> (res: xxh_u64) {
result64 := start
#no_bounds_check for i := 0; i < 4; i += 1 {
@@ -849,7 +849,7 @@ XXH3_mergeAccs :: #force_inline proc(acc: []xxh_u64, secret: []u8, start: xxh_u6
return XXH3_avalanche(result64)
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_hashLong_64b_internal :: #force_inline proc(input: []u8, secret: []u8,
f_acc512: XXH3_accumulate_512_f, f_scramble: XXH3_scramble_accumulator_f) -> (hash: xxh_u64) {
@@ -868,7 +868,7 @@ XXH3_hashLong_64b_internal :: #force_inline proc(input: []u8, secret: []u8,
/*
It's important for performance that XXH3_hashLong is not inlined.
*/
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_hashLong_64b_withSecret :: #force_no_inline proc(input: []u8, seed64: xxh_u64, secret: []u8) -> (hash: xxh_u64) {
return XXH3_hashLong_64b_internal(input, secret, XXH3_accumulate_512, XXH3_scramble_accumulator)
}
@@ -880,7 +880,7 @@ XXH3_hashLong_64b_withSecret :: #force_no_inline proc(input: []u8, seed64: xxh_u
This variant enforces that the compiler can detect that,
and uses this opportunity to streamline the generated code for better performance.
*/
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_hashLong_64b_default :: #force_no_inline proc(input: []u8, seed64: xxh_u64, secret: []u8) -> (hash: xxh_u64) {
return XXH3_hashLong_64b_internal(input, XXH3_kSecret[:], XXH3_accumulate_512, XXH3_scramble_accumulator)
}
@@ -896,7 +896,7 @@ XXH3_hashLong_64b_default :: #force_no_inline proc(input: []u8, seed64: xxh_u64,
It's important for performance that XXH3_hashLong is not inlined. Not sure
why (uop cache maybe?), but the difference is large and easily measurable.
*/
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_hashLong_64b_withSeed_internal :: #force_no_inline proc(
input: []u8,
seed: xxh_u64,
@@ -916,7 +916,7 @@ XXH3_hashLong_64b_withSeed_internal :: #force_no_inline proc(
/*
It's important for performance that XXH3_hashLong is not inlined.
*/
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_hashLong_64b_withSeed :: #force_no_inline proc(input: []u8, seed: xxh_u64, secret: []u8) -> (hash: xxh_u64) {
return XXH3_hashLong_64b_withSeed_internal(input, seed, XXH3_accumulate_512, XXH3_scramble_accumulator, XXH3_init_custom_secret)
}
@@ -924,7 +924,7 @@ XXH3_hashLong_64b_withSeed :: #force_no_inline proc(input: []u8, seed: xxh_u64,
XXH3_hashLong64_f :: #type proc(input: []u8, seed: xxh_u64, secret: []u8) -> (res: xxh_u64)
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_64bits_internal :: proc(input: []u8, seed: xxh_u64, secret: []u8, f_hashLong: XXH3_hashLong64_f) -> (hash: xxh_u64) {
assert(len(secret) >= XXH3_SECRET_SIZE_MIN)
/*
@@ -944,19 +944,19 @@ XXH3_64bits_internal :: proc(input: []u8, seed: xxh_u64, secret: []u8, f_hashLon
}
/* === Public entry point === */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_64_default :: proc(input: []u8) -> (hash: xxh_u64) {
return XXH3_64bits_internal(input, 0, XXH3_kSecret[:], XXH3_hashLong_64b_default)
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_64_with_seed :: proc(input: []u8, seed: xxh_u64) -> (hash: xxh_u64) {
return XXH3_64bits_internal(input, seed, XXH3_kSecret[:], XXH3_hashLong_64b_withSeed)
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH3_64_with_secret :: proc(input, secret: []u8) -> (hash: xxh_u64) {
return XXH3_64bits_internal(input, 0, secret, XXH3_hashLong_64b_withSecret)
}
-XXH3_64 :: proc { XXH3_64_default, XXH3_64_with_seed, XXH3_64_with_secret } \ No newline at end of file
+XXH3_64 :: proc { XXH3_64_default, XXH3_64_with_seed, XXH3_64_with_secret }
diff --git a/core/hash/xxhash/xxhash_32.odin b/core/hash/xxhash/xxhash_32.odin
index b0dea305e..3ea1c3cf2 100644
--- a/core/hash/xxhash/xxhash_32.odin
+++ b/core/hash/xxhash/xxhash_32.odin
@@ -40,7 +40,7 @@ XXH_PRIME32_3 :: 0xC2B2AE3D /*!< 0b11000010101100101010111000111101 */
XXH_PRIME32_4 :: 0x27D4EB2F /*!< 0b00100111110101001110101100101111 */
XXH_PRIME32_5 :: 0x165667B1 /*!< 0b00010110010101100110011110110001 */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH32_round :: #force_inline proc(seed, input: XXH32_hash) -> (res: XXH32_hash) {
seed := seed
@@ -53,7 +53,7 @@ XXH32_round :: #force_inline proc(seed, input: XXH32_hash) -> (res: XXH32_hash)
/*
Mix all bits
*/
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH32_avalanche :: #force_inline proc(h32: u32) -> (res: u32) {
h32 := h32
@@ -65,7 +65,7 @@ XXH32_avalanche :: #force_inline proc(h32: u32) -> (res: u32) {
return h32
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH32_finalize :: #force_inline proc(h32: u32, buf: []u8, alignment: Alignment) -> (res: u32) {
process_1 :: #force_inline proc(h32: u32, buf: []u8) -> (h32_res: u32, buf_res: []u8) {
#no_bounds_check b := u32(buf[0])
@@ -143,7 +143,7 @@ XXH32_finalize :: #force_inline proc(h32: u32, buf: []u8, alignment: Alignment)
unreachable()
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH32_endian_align :: #force_inline proc(input: []u8, seed := XXH32_DEFAULT_SEED, alignment: Alignment) -> (res: XXH32_hash) {
buf := input
length := len(input)
@@ -318,4 +318,4 @@ XXH32_canonical_from_hash :: proc(hash: XXH32_hash) -> (canonical: XXH32_canonic
XXH32_hash_from_canonical :: proc(canonical: ^XXH32_canonical) -> (hash: XXH32_hash) {
h := (^u32be)(&canonical.digest)^
return XXH32_hash(h)
-} \ No newline at end of file
+}
diff --git a/core/hash/xxhash/xxhash_64.odin b/core/hash/xxhash/xxhash_64.odin
index b274da374..3b24f20a1 100644
--- a/core/hash/xxhash/xxhash_64.odin
+++ b/core/hash/xxhash/xxhash_64.odin
@@ -40,7 +40,7 @@ XXH_PRIME64_3 :: 0x165667B19E3779F9 /*!< 0b0001011001010110011001111011000110011
XXH_PRIME64_4 :: 0x85EBCA77C2B2AE63 /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */
XXH_PRIME64_5 :: 0x27D4EB2F165667C5 /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH64_round :: proc(acc, input: xxh_u64) -> (res: xxh_u64) {
acc := acc
@@ -50,14 +50,14 @@ XXH64_round :: proc(acc, input: xxh_u64) -> (res: xxh_u64) {
return acc
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH64_mergeRound :: proc(acc, val: xxh_u64) -> (res: xxh_u64) {
res = acc ~ XXH64_round(0, val)
res = res * XXH_PRIME64_1 + XXH_PRIME64_4
return res
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH64_avalanche :: proc(h64: xxh_u64) -> (res: xxh_u64) {
res = h64
res ~= res >> 33
@@ -68,7 +68,7 @@ XXH64_avalanche :: proc(h64: xxh_u64) -> (res: xxh_u64) {
return res
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH64_finalize :: proc(h64: xxh_u64, buf: []u8, alignment: Alignment) -> (res: xxh_u64) {
buf := buf
length := len(buf) & 31
@@ -100,7 +100,7 @@ XXH64_finalize :: proc(h64: xxh_u64, buf: []u8, alignment: Alignment) -> (res: x
return XXH64_avalanche(res)
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH64_endian_align :: proc(input: []u8, seed := XXH64_DEFAULT_SEED, alignment := Alignment.Unaligned) -> (res: xxh_u64) {
buf := input
length := len(buf)
@@ -191,7 +191,7 @@ XXH64_reset_state :: proc(state_ptr: ^XXH64_state, seed := XXH64_DEFAULT_SEED) -
return .None
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH64_update :: proc(state: ^XXH64_state, input: []u8) -> (err: Error) {
buf := input
length := len(buf)
@@ -245,7 +245,7 @@ XXH64_update :: proc(state: ^XXH64_state, input: []u8) -> (err: Error) {
return .None
}
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
XXH64_digest :: proc(state: ^XXH64_state) -> (res: XXH64_hash) {
if state.total_len >= 32 {
v1 := state.v1
@@ -292,4 +292,4 @@ XXH64_canonical_from_hash :: proc(hash: XXH64_hash) -> (canonical: XXH64_canonic
XXH64_hash_from_canonical :: proc(canonical: ^XXH64_canonical) -> (hash: XXH64_hash) {
h := (^u64be)(&canonical.digest)^
return XXH64_hash(h)
-} \ No newline at end of file
+}