1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
|
// Yann Collet's `xxhash`.
//
// [[ xxhash Fast Hash Algorithm; https://cyan4973.github.io/xxHash/ ]]
package xxhash
/*
Copyright 2021 Jeroen van Rijn <nom@duclavier.com>.
Made available under Odin's BSD-3 license, based on the original C code.
List of contributors:
Jeroen van Rijn: Initial implementation.
*/
import "base:intrinsics"
import "base:runtime"
mem_copy :: runtime.mem_copy
byte_swap :: intrinsics.byte_swap
/*
Version definition
*/
XXH_VERSION_MAJOR :: 0
XXH_VERSION_MINOR :: 8
XXH_VERSION_RELEASE :: 1
XXH_VERSION_NUMBER :: XXH_VERSION_MAJOR * 100 * 100 + XXH_VERSION_MINOR * 100 + XXH_VERSION_RELEASE
/*
0 - Use memcopy, for platforms where unaligned reads are a problem
2 - Direct cast, for platforms where unaligned are allowed (default)
*/
XXH_FORCE_MEMORY_ACCESS :: #config(XXH_FORCE_MEMORY_ACCESS, 2)
/*
`false` - Use this on platforms where unaligned reads are fast
`true` - Use this on platforms where unaligned reads are slow
*/
XXH_FORCE_ALIGN_CHECK :: #config(XXH_FORCE_ALIGN_CHECK, false)
Alignment :: enum {
Aligned,
Unaligned,
}
Error :: enum {
None = 0,
Error,
}
XXH_DISABLE_PREFETCH :: #config(XXH_DISABLE_PREFETCH, true)
/*
llvm.prefetch fails code generation on Linux.
*/
when !XXH_DISABLE_PREFETCH {
prefetch_address :: #force_inline proc(address: rawptr) {
intrinsics.prefetch_read_data(address, /*high*/3)
}
prefetch_offset :: #force_inline proc(address: rawptr, #any_int offset: uintptr) {
ptr := rawptr(uintptr(address) + offset)
prefetch_address(ptr)
}
prefetch :: proc { prefetch_address, prefetch_offset, }
} else {
prefetch_address :: #force_inline proc(address: rawptr) {
}
prefetch_offset :: #force_inline proc(address: rawptr, #any_int offset: uintptr) {
}
}
@(optimization_mode="favor_size")
XXH_rotl32 :: #force_inline proc(x, r: u32) -> (res: u32) {
return ((x << r) | (x >> (32 - r)))
}
@(optimization_mode="favor_size")
XXH_rotl64 :: #force_inline proc(x, r: u64) -> (res: u64) {
return ((x << r) | (x >> (64 - r)))
}
@(optimization_mode="favor_size")
XXH32_read32 :: #force_inline proc(buf: []u8, alignment := Alignment.Unaligned) -> (res: u32) {
if XXH_FORCE_MEMORY_ACCESS == 2 || alignment == .Aligned {
#no_bounds_check b := (^u32le)(&buf[0])^
return u32(b)
} else {
b: u32le
mem_copy(&b, raw_data(buf[:]), 4)
return u32(b)
}
}
@(optimization_mode="favor_size")
XXH64_read64 :: #force_inline proc(buf: []u8, alignment := Alignment.Unaligned) -> (res: u64) {
if XXH_FORCE_MEMORY_ACCESS == 2 || alignment == .Aligned {
#no_bounds_check b := (^u64le)(&buf[0])^
return u64(b)
} else {
b: u64le
mem_copy(&b, raw_data(buf[:]), 8)
return u64(b)
}
}
XXH64_read64_simd :: #force_inline proc(buf: []$E, $W: uint, alignment := Alignment.Unaligned) -> (res: #simd[W]u64) {
if alignment == .Aligned {
res = (^#simd[W]u64)(raw_data(buf))^
} else {
res = intrinsics.unaligned_load((^#simd[W]u64)(raw_data(buf)))
}
when ODIN_ENDIAN == .Big {
bytes := transmute(#simd[W*8]u8)res
bytes = intrinsics.simd_lanes_reverse(bytes)
res = transmute(#simd[W]u64)bytes
res = intrinsics.simd_lanes_reverse(res)
}
return
}
XXH64_write64_simd :: #force_inline proc(buf: []$E, value: $V/#simd[$W]u64, alignment := Alignment.Unaligned) {
value := value
when ODIN_ENDIAN == .Big {
bytes := transmute(#simd[W*8]u8)value
bytes = intrinsics.simd_lanes_reverse(bytes)
value = transmute(#simd[W]u64)bytes
value = intrinsics.simd_lanes_reverse(value)
}
if alignment == .Aligned {
(^V)(raw_data(buf))^ = value
} else {
intrinsics.unaligned_store((^V)(raw_data(buf)), value)
}
}
|