aboutsummaryrefslogtreecommitdiff
path: root/core/hash/xxhash/common.odin
diff options
context:
space:
mode:
authorBarinzaya <barinzaya@gmail.com>2025-07-30 12:37:12 -0400
committerBarinzaya <barinzaya@gmail.com>2025-07-31 13:05:08 -0400
commit9d40f371bebaca6c74b615fa03dd5574eb51e327 (patch)
treeae72123bc6c73555866ffaa71ee39047f12e1149 /core/hash/xxhash/common.odin
parent393e00bec3e855475659de0c6c38d3898a36cb36 (diff)
Add static SIMD support to XXH3 in core:hash/xxhash.
This uses compile-time features to decide how large of a SIMD vector to use. It currently has checks for amd64/i386 to size its vectors for SSE2/AVX2/AVX512 as necessary. The generalized SIMD functions could also be useful for multiversioning of the hash procs, to allow for run-time dispatch based on available CPU features.
Diffstat (limited to 'core/hash/xxhash/common.odin')
-rw-r--r--core/hash/xxhash/common.odin32
1 files changed, 32 insertions, 0 deletions
diff --git a/core/hash/xxhash/common.odin b/core/hash/xxhash/common.odin
index adfc1bac2..636393b52 100644
--- a/core/hash/xxhash/common.odin
+++ b/core/hash/xxhash/common.odin
@@ -101,3 +101,35 @@ XXH64_read64 :: #force_inline proc(buf: []u8, alignment := Alignment.Unaligned)
return u64(b)
}
}
+
+XXH64_read64_simd :: #force_inline proc(buf: []$E, $W: uint, alignment := Alignment.Unaligned) -> (res: #simd[W]u64) {
+ if alignment == .Aligned {
+ res = (^#simd[W]u64)(raw_data(buf))^
+ } else {
+ res = intrinsics.unaligned_load((^#simd[W]u64)(raw_data(buf)))
+ }
+
+ when ODIN_ENDIAN == .Big {
+ bytes := transmute(#simd[W*8]u8)res
+ bytes = intrinsics.simd_lanes_reverse(bytes)
+ res = transmute(#simd[W]u64)bytes
+ res = intrinsics.simd_lanes_reverse(res)
+ }
+ return
+}
+
+XXH64_write64_simd :: #force_inline proc(buf: []$E, value: $V/#simd[$W]u64, alignment := Alignment.Unaligned) {
+ value := value
+ when ODIN_ENDIAN == .Big {
+ bytes := transmute(#simd[W*8]u8)value
+ bytes = intrinsics.simd_lanes_reverse(bytes)
+ value = transmute(#simd[W]u64)bytes
+ value = intrinsics.simd_lanes_reverse(value)
+ }
+
+ if alignment == .Aligned {
+ (^V)(raw_data(buf))^ = value
+ } else {
+ intrinsics.unaligned_store((^V)(raw_data(buf)), value)
+ }
+}