1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
|
/*
An implementation of Yann Collet's [xxhash Fast Hash Algorithm](https://cyan4973.github.io/xxHash/).
Copyright 2021 Jeroen van Rijn <nom@duclavier.com>.
Made available under Odin's BSD-3 license, based on the original C code.
List of contributors:
Jeroen van Rijn: Initial implementation.
*/
package xxhash
import "base:intrinsics"
import "base:runtime"
mem_copy :: runtime.mem_copy
byte_swap :: intrinsics.byte_swap
/*
Version definition
*/
XXH_VERSION_MAJOR :: 0
XXH_VERSION_MINOR :: 8
XXH_VERSION_RELEASE :: 1
XXH_VERSION_NUMBER :: XXH_VERSION_MAJOR * 100 * 100 + XXH_VERSION_MINOR * 100 + XXH_VERSION_RELEASE
/*
0 - Use memcopy, for platforms where unaligned reads are a problem
2 - Direct cast, for platforms where unaligned are allowed (default)
*/
XXH_FORCE_MEMORY_ACCESS :: #config(XXH_FORCE_MEMORY_ACCESS, 2)
/*
`false` - Use this on platforms where unaligned reads are fast
`true` - Use this on platforms where unaligned reads are slow
*/
XXH_FORCE_ALIGN_CHECK :: #config(XXH_FORCE_ALIGN_CHECK, false)
Alignment :: enum {
Aligned,
Unaligned,
}
Error :: enum {
None = 0,
Error,
}
XXH_DISABLE_PREFETCH :: #config(XXH_DISABLE_PREFETCH, true)
/*
llvm.prefetch fails code generation on Linux.
*/
when !XXH_DISABLE_PREFETCH {
prefetch_address :: #force_inline proc(address: rawptr) {
intrinsics.prefetch_read_data(address, /*high*/3)
}
prefetch_offset :: #force_inline proc(address: rawptr, #any_int offset: uintptr) {
ptr := rawptr(uintptr(address) + offset)
prefetch_address(ptr)
}
prefetch :: proc { prefetch_address, prefetch_offset, }
} else {
prefetch_address :: #force_inline proc(address: rawptr) {
}
prefetch_offset :: #force_inline proc(address: rawptr, #any_int offset: uintptr) {
}
}
@(optimization_mode="speed")
XXH_rotl32 :: #force_inline proc(x, r: u32) -> (res: u32) {
return ((x << r) | (x >> (32 - r)))
}
@(optimization_mode="speed")
XXH_rotl64 :: #force_inline proc(x, r: u64) -> (res: u64) {
return ((x << r) | (x >> (64 - r)))
}
@(optimization_mode="speed")
XXH32_read32 :: #force_inline proc(buf: []u8, alignment := Alignment.Unaligned) -> (res: u32) {
if XXH_FORCE_MEMORY_ACCESS == 2 || alignment == .Aligned {
#no_bounds_check b := (^u32le)(&buf[0])^
return u32(b)
} else {
b: u32le
mem_copy(&b, raw_data(buf[:]), 4)
return u32(b)
}
}
@(optimization_mode="speed")
XXH64_read64 :: #force_inline proc(buf: []u8, alignment := Alignment.Unaligned) -> (res: u64) {
if XXH_FORCE_MEMORY_ACCESS == 2 || alignment == .Aligned {
#no_bounds_check b := (^u64le)(&buf[0])^
return u64(b)
} else {
b: u64le
mem_copy(&b, raw_data(buf[:]), 8)
return u64(b)
}
}
|