aboutsummaryrefslogtreecommitdiff
path: root/core/crypto/legacy
diff options
context:
space:
mode:
authorYawning Angel <yawning@schwanenlied.me>2023-11-17 19:18:45 +0900
committerYawning Angel <yawning@schwanenlied.me>2023-11-17 19:32:11 +0900
commit59950bcad6829d656fa58b1e1c10330535d2fef3 (patch)
tree1243169fa38f63f19aa4f37c7d84cfc4af15bb2a /core/crypto/legacy
parent4587a55486a1c0367778c67fec50d895bf0dbd13 (diff)
core/crypto: Exile keccak, md5 and sha1 to legacy
In an perfect world these would just be removed, but the world is imperfect, and people are forced to interact/interface with things that are broken.
Diffstat (limited to 'core/crypto/legacy')
-rw-r--r--core/crypto/legacy/README.md10
-rw-r--r--core/crypto/legacy/keccak/keccak.odin377
-rw-r--r--core/crypto/legacy/md5/md5.odin295
-rw-r--r--core/crypto/legacy/sha1/sha1.odin252
4 files changed, 934 insertions, 0 deletions
diff --git a/core/crypto/legacy/README.md b/core/crypto/legacy/README.md
new file mode 100644
index 000000000..e1ba6f54b
--- /dev/null
+++ b/core/crypto/legacy/README.md
@@ -0,0 +1,10 @@
+# crypto/legacy
+
+These are algorithms that are shipped solely for the purpose of
+interoperability with legacy systems. The use of these packages in
+any other capacity is discouraged, especially those that are known
+to be broken.
+
+- keccak - The draft version of the algorithm that became SHA-3
+- MD5 - Broken (https://eprint.iacr.org/2005/075)
+- SHA-1 - Broken (https://eprint.iacr.org/2017/190)
diff --git a/core/crypto/legacy/keccak/keccak.odin b/core/crypto/legacy/keccak/keccak.odin
new file mode 100644
index 000000000..09db853a6
--- /dev/null
+++ b/core/crypto/legacy/keccak/keccak.odin
@@ -0,0 +1,377 @@
+package keccak
+
+/*
+ Copyright 2021 zhibog
+ Made available under the BSD-3 license.
+
+ List of contributors:
+ zhibog, dotbmp: Initial implementation.
+
+ Interface for the Keccak hashing algorithm.
+ This is done because the padding in the SHA3 standard was changed by the NIST, resulting in a different output.
+*/
+
+import "core:io"
+import "core:os"
+
+import "../../_sha3"
+
+/*
+ High level API
+*/
+
+DIGEST_SIZE_224 :: 28
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_384 :: 48
+DIGEST_SIZE_512 :: 64
+
+// hash_string_224 will hash the given input and return the
+// computed hash
+hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
+ return hash_bytes_224(transmute([]byte)(data))
+}
+
+// hash_bytes_224 will hash the given input and return the
+// computed hash
+hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
+ hash: [DIGEST_SIZE_224]byte
+ ctx: Context
+ ctx.mdlen = DIGEST_SIZE_224
+ ctx.is_keccak = true
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash[:])
+ return hash
+}
+
+// hash_string_to_buffer_224 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_224 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
+ ctx: Context
+ ctx.mdlen = DIGEST_SIZE_224
+ ctx.is_keccak = true
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
+// hash_stream_224 will read the stream in chunks and compute a
+// hash from its contents
+hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
+ hash: [DIGEST_SIZE_224]byte
+ ctx: Context
+ ctx.mdlen = DIGEST_SIZE_224
+ ctx.is_keccak = true
+ init(&ctx)
+
+ buf := make([]byte, 512)
+ defer delete(buf)
+
+ read := 1
+ for read > 0 {
+ read, _ = io.read(s, buf)
+ if read > 0 {
+ update(&ctx, buf[:read])
+ }
+ }
+ final(&ctx, hash[:])
+ return hash, true
+}
+
+// hash_file_224 will read the file provided by the given handle
+// and compute a hash
+hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
+ if !load_at_once {
+ return hash_stream_224(os.stream_from_handle(hd))
+ } else {
+ if buf, ok := os.read_entire_file(hd); ok {
+ return hash_bytes_224(buf[:]), ok
+ }
+ }
+ return [DIGEST_SIZE_224]byte{}, false
+}
+
+hash_224 :: proc {
+ hash_stream_224,
+ hash_file_224,
+ hash_bytes_224,
+ hash_string_224,
+ hash_bytes_to_buffer_224,
+ hash_string_to_buffer_224,
+}
+
+// hash_string_256 will hash the given input and return the
+// computed hash
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
+ return hash_bytes_256(transmute([]byte)(data))
+}
+
+// hash_bytes_256 will hash the given input and return the
+// computed hash
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+ hash: [DIGEST_SIZE_256]byte
+ ctx: Context
+ ctx.mdlen = DIGEST_SIZE_256
+ ctx.is_keccak = true
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash[:])
+ return hash
+}
+
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+ ctx: Context
+ ctx.mdlen = DIGEST_SIZE_256
+ ctx.is_keccak = true
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
+// hash_stream_256 will read the stream in chunks and compute a
+// hash from its contents
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+ hash: [DIGEST_SIZE_256]byte
+ ctx: Context
+ ctx.mdlen = DIGEST_SIZE_256
+ ctx.is_keccak = true
+ init(&ctx)
+
+ buf := make([]byte, 512)
+ defer delete(buf)
+
+ read := 1
+ for read > 0 {
+ read, _ = io.read(s, buf)
+ if read > 0 {
+ update(&ctx, buf[:read])
+ }
+ }
+ final(&ctx, hash[:])
+ return hash, true
+}
+
+// hash_file_256 will read the file provided by the given handle
+// and compute a hash
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
+ if !load_at_once {
+ return hash_stream_256(os.stream_from_handle(hd))
+ } else {
+ if buf, ok := os.read_entire_file(hd); ok {
+ return hash_bytes_256(buf[:]), ok
+ }
+ }
+ return [DIGEST_SIZE_256]byte{}, false
+}
+
+hash_256 :: proc {
+ hash_stream_256,
+ hash_file_256,
+ hash_bytes_256,
+ hash_string_256,
+ hash_bytes_to_buffer_256,
+ hash_string_to_buffer_256,
+}
+
+// hash_string_384 will hash the given input and return the
+// computed hash
+hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
+ return hash_bytes_384(transmute([]byte)(data))
+}
+
+// hash_bytes_384 will hash the given input and return the
+// computed hash
+hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
+ hash: [DIGEST_SIZE_384]byte
+ ctx: Context
+ ctx.mdlen = DIGEST_SIZE_384
+ ctx.is_keccak = true
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash[:])
+ return hash
+}
+
+// hash_string_to_buffer_384 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_384 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
+ ctx: Context
+ ctx.mdlen = DIGEST_SIZE_384
+ ctx.is_keccak = true
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
+// hash_stream_384 will read the stream in chunks and compute a
+// hash from its contents
+hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
+ hash: [DIGEST_SIZE_384]byte
+ ctx: Context
+ ctx.mdlen = DIGEST_SIZE_384
+ ctx.is_keccak = true
+ init(&ctx)
+
+ buf := make([]byte, 512)
+ defer delete(buf)
+
+ read := 1
+ for read > 0 {
+ read, _ = io.read(s, buf)
+ if read > 0 {
+ update(&ctx, buf[:read])
+ }
+ }
+ final(&ctx, hash[:])
+ return hash, true
+}
+
+// hash_file_384 will read the file provided by the given handle
+// and compute a hash
+hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
+ if !load_at_once {
+ return hash_stream_384(os.stream_from_handle(hd))
+ } else {
+ if buf, ok := os.read_entire_file(hd); ok {
+ return hash_bytes_384(buf[:]), ok
+ }
+ }
+ return [DIGEST_SIZE_384]byte{}, false
+}
+
+hash_384 :: proc {
+ hash_stream_384,
+ hash_file_384,
+ hash_bytes_384,
+ hash_string_384,
+ hash_bytes_to_buffer_384,
+ hash_string_to_buffer_384,
+}
+
+// hash_string_512 will hash the given input and return the
+// computed hash
+hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
+ return hash_bytes_512(transmute([]byte)(data))
+}
+
+// hash_bytes_512 will hash the given input and return the
+// computed hash
+hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
+ hash: [DIGEST_SIZE_512]byte
+ ctx: Context
+ ctx.mdlen = DIGEST_SIZE_512
+ ctx.is_keccak = true
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash[:])
+ return hash
+}
+
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+ ctx: Context
+ ctx.mdlen = DIGEST_SIZE_512
+ ctx.is_keccak = true
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
+// hash_stream_512 will read the stream in chunks and compute a
+// hash from its contents
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+ hash: [DIGEST_SIZE_512]byte
+ ctx: Context
+ ctx.mdlen = DIGEST_SIZE_512
+ ctx.is_keccak = true
+ init(&ctx)
+
+ buf := make([]byte, 512)
+ defer delete(buf)
+
+ read := 1
+ for read > 0 {
+ read, _ = io.read(s, buf)
+ if read > 0 {
+ update(&ctx, buf[:read])
+ }
+ }
+ final(&ctx, hash[:])
+ return hash, true
+}
+
+// hash_file_512 will read the file provided by the given handle
+// and compute a hash
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
+ if !load_at_once {
+ return hash_stream_512(os.stream_from_handle(hd))
+ } else {
+ if buf, ok := os.read_entire_file(hd); ok {
+ return hash_bytes_512(buf[:]), ok
+ }
+ }
+ return [DIGEST_SIZE_512]byte{}, false
+}
+
+hash_512 :: proc {
+ hash_stream_512,
+ hash_file_512,
+ hash_bytes_512,
+ hash_string_512,
+ hash_bytes_to_buffer_512,
+ hash_string_to_buffer_512,
+}
+
+/*
+ Low level API
+*/
+
+Context :: _sha3.Sha3_Context
+
+init :: proc(ctx: ^Context) {
+ ctx.is_keccak = true
+ _sha3.init(ctx)
+}
+
+update :: proc(ctx: ^Context, data: []byte) {
+ _sha3.update(ctx, data)
+}
+
+final :: proc(ctx: ^Context, hash: []byte) {
+ _sha3.final(ctx, hash)
+}
diff --git a/core/crypto/legacy/md5/md5.odin b/core/crypto/legacy/md5/md5.odin
new file mode 100644
index 000000000..69ae087e4
--- /dev/null
+++ b/core/crypto/legacy/md5/md5.odin
@@ -0,0 +1,295 @@
+package md5
+
+/*
+ Copyright 2021 zhibog
+ Made available under the BSD-3 license.
+
+ List of contributors:
+ zhibog, dotbmp: Initial implementation.
+
+ Implementation of the MD5 hashing algorithm, as defined in RFC 1321 <https://datatracker.ietf.org/doc/html/rfc1321>
+*/
+
+import "core:encoding/endian"
+import "core:io"
+import "core:math/bits"
+import "core:mem"
+import "core:os"
+
+/*
+ High level API
+*/
+
+DIGEST_SIZE :: 16
+
+// hash_string will hash the given input and return the
+// computed hash
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
+ return hash_bytes(transmute([]byte)(data))
+}
+
+// hash_bytes will hash the given input and return the
+// computed hash
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+ hash: [DIGEST_SIZE]byte
+ ctx: Context
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash[:])
+ return hash
+}
+
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+ ctx: Context
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
+// hash_stream will read the stream in chunks and compute a
+// hash from its contents
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+ hash: [DIGEST_SIZE]byte
+ ctx: Context
+ init(&ctx)
+
+ buf := make([]byte, 512)
+ defer delete(buf)
+
+ read := 1
+ for read > 0 {
+ read, _ = io.read(s, buf)
+ if read > 0 {
+ update(&ctx, buf[:read])
+ }
+ }
+ final(&ctx, hash[:])
+ return hash, true
+}
+
+// hash_file will read the file provided by the given handle
+// and compute a hash
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
+ if !load_at_once {
+ return hash_stream(os.stream_from_handle(hd))
+ } else {
+ if buf, ok := os.read_entire_file(hd); ok {
+ return hash_bytes(buf[:]), ok
+ }
+ }
+ return [DIGEST_SIZE]byte{}, false
+}
+
+hash :: proc {
+ hash_stream,
+ hash_file,
+ hash_bytes,
+ hash_string,
+ hash_bytes_to_buffer,
+ hash_string_to_buffer,
+}
+
+/*
+ Low level API
+*/
+
+init :: proc(ctx: ^Context) {
+ ctx.state[0] = 0x67452301
+ ctx.state[1] = 0xefcdab89
+ ctx.state[2] = 0x98badcfe
+ ctx.state[3] = 0x10325476
+
+ ctx.bitlen = 0
+ ctx.datalen = 0
+
+ ctx.is_initialized = true
+}
+
+update :: proc(ctx: ^Context, data: []byte) {
+ assert(ctx.is_initialized)
+
+ for i := 0; i < len(data); i += 1 {
+ ctx.data[ctx.datalen] = data[i]
+ ctx.datalen += 1
+ if (ctx.datalen == BLOCK_SIZE) {
+ transform(ctx, ctx.data[:])
+ ctx.bitlen += 512
+ ctx.datalen = 0
+ }
+ }
+}
+
+final :: proc(ctx: ^Context, hash: []byte) {
+ assert(ctx.is_initialized)
+
+ if len(hash) < DIGEST_SIZE {
+ panic("crypto/md5: invalid destination digest size")
+ }
+
+ i := ctx.datalen
+
+ if ctx.datalen < 56 {
+ ctx.data[i] = 0x80
+ i += 1
+ for i < 56 {
+ ctx.data[i] = 0x00
+ i += 1
+ }
+ } else if ctx.datalen >= 56 {
+ ctx.data[i] = 0x80
+ i += 1
+ for i < BLOCK_SIZE {
+ ctx.data[i] = 0x00
+ i += 1
+ }
+ transform(ctx, ctx.data[:])
+ mem.set(&ctx.data, 0, 56)
+ }
+
+ ctx.bitlen += u64(ctx.datalen * 8)
+ endian.unchecked_put_u64le(ctx.data[56:], ctx.bitlen)
+ transform(ctx, ctx.data[:])
+
+ for i = 0; i < DIGEST_SIZE / 4; i += 1 {
+ endian.unchecked_put_u32le(hash[i * 4:], ctx.state[i])
+ }
+
+ ctx.is_initialized = false
+}
+
+/*
+ MD5 implementation
+*/
+
+BLOCK_SIZE :: 64
+
+Context :: struct {
+ data: [BLOCK_SIZE]byte,
+ state: [4]u32,
+ bitlen: u64,
+ datalen: u32,
+
+ is_initialized: bool,
+}
+
+/*
+ @note(zh): F, G, H and I, as mentioned in the RFC, have been inlined into FF, GG, HH
+ and II respectively, instead of declaring them separately.
+*/
+
+@(private)
+FF :: #force_inline proc "contextless" (a, b, c, d, m: u32, s: int, t: u32) -> u32 {
+ return b + bits.rotate_left32(a + ((b & c) | (~b & d)) + m + t, s)
+}
+
+@(private)
+GG :: #force_inline proc "contextless" (a, b, c, d, m: u32, s: int, t: u32) -> u32 {
+ return b + bits.rotate_left32(a + ((b & d) | (c & ~d)) + m + t, s)
+}
+
+@(private)
+HH :: #force_inline proc "contextless" (a, b, c, d, m: u32, s: int, t: u32) -> u32 {
+ return b + bits.rotate_left32(a + (b ~ c ~ d) + m + t, s)
+}
+
+@(private)
+II :: #force_inline proc "contextless" (a, b, c, d, m: u32, s: int, t: u32) -> u32 {
+ return b + bits.rotate_left32(a + (c ~ (b | ~d)) + m + t, s)
+}
+
+@(private)
+transform :: proc "contextless" (ctx: ^Context, data: []byte) {
+ m: [DIGEST_SIZE]u32
+
+ for i := 0; i < DIGEST_SIZE; i += 1 {
+ m[i] = endian.unchecked_get_u32le(data[i * 4:])
+ }
+
+ a := ctx.state[0]
+ b := ctx.state[1]
+ c := ctx.state[2]
+ d := ctx.state[3]
+
+ a = FF(a, b, c, d, m[0], 7, 0xd76aa478)
+ d = FF(d, a, b, c, m[1], 12, 0xe8c7b756)
+ c = FF(c, d, a, b, m[2], 17, 0x242070db)
+ b = FF(b, c, d, a, m[3], 22, 0xc1bdceee)
+ a = FF(a, b, c, d, m[4], 7, 0xf57c0faf)
+ d = FF(d, a, b, c, m[5], 12, 0x4787c62a)
+ c = FF(c, d, a, b, m[6], 17, 0xa8304613)
+ b = FF(b, c, d, a, m[7], 22, 0xfd469501)
+ a = FF(a, b, c, d, m[8], 7, 0x698098d8)
+ d = FF(d, a, b, c, m[9], 12, 0x8b44f7af)
+ c = FF(c, d, a, b, m[10], 17, 0xffff5bb1)
+ b = FF(b, c, d, a, m[11], 22, 0x895cd7be)
+ a = FF(a, b, c, d, m[12], 7, 0x6b901122)
+ d = FF(d, a, b, c, m[13], 12, 0xfd987193)
+ c = FF(c, d, a, b, m[14], 17, 0xa679438e)
+ b = FF(b, c, d, a, m[15], 22, 0x49b40821)
+
+ a = GG(a, b, c, d, m[1], 5, 0xf61e2562)
+ d = GG(d, a, b, c, m[6], 9, 0xc040b340)
+ c = GG(c, d, a, b, m[11], 14, 0x265e5a51)
+ b = GG(b, c, d, a, m[0], 20, 0xe9b6c7aa)
+ a = GG(a, b, c, d, m[5], 5, 0xd62f105d)
+ d = GG(d, a, b, c, m[10], 9, 0x02441453)
+ c = GG(c, d, a, b, m[15], 14, 0xd8a1e681)
+ b = GG(b, c, d, a, m[4], 20, 0xe7d3fbc8)
+ a = GG(a, b, c, d, m[9], 5, 0x21e1cde6)
+ d = GG(d, a, b, c, m[14], 9, 0xc33707d6)
+ c = GG(c, d, a, b, m[3], 14, 0xf4d50d87)
+ b = GG(b, c, d, a, m[8], 20, 0x455a14ed)
+ a = GG(a, b, c, d, m[13], 5, 0xa9e3e905)
+ d = GG(d, a, b, c, m[2], 9, 0xfcefa3f8)
+ c = GG(c, d, a, b, m[7], 14, 0x676f02d9)
+ b = GG(b, c, d, a, m[12], 20, 0x8d2a4c8a)
+
+ a = HH(a, b, c, d, m[5], 4, 0xfffa3942)
+ d = HH(d, a, b, c, m[8], 11, 0x8771f681)
+ c = HH(c, d, a, b, m[11], 16, 0x6d9d6122)
+ b = HH(b, c, d, a, m[14], 23, 0xfde5380c)
+ a = HH(a, b, c, d, m[1], 4, 0xa4beea44)
+ d = HH(d, a, b, c, m[4], 11, 0x4bdecfa9)
+ c = HH(c, d, a, b, m[7], 16, 0xf6bb4b60)
+ b = HH(b, c, d, a, m[10], 23, 0xbebfbc70)
+ a = HH(a, b, c, d, m[13], 4, 0x289b7ec6)
+ d = HH(d, a, b, c, m[0], 11, 0xeaa127fa)
+ c = HH(c, d, a, b, m[3], 16, 0xd4ef3085)
+ b = HH(b, c, d, a, m[6], 23, 0x04881d05)
+ a = HH(a, b, c, d, m[9], 4, 0xd9d4d039)
+ d = HH(d, a, b, c, m[12], 11, 0xe6db99e5)
+ c = HH(c, d, a, b, m[15], 16, 0x1fa27cf8)
+ b = HH(b, c, d, a, m[2], 23, 0xc4ac5665)
+
+ a = II(a, b, c, d, m[0], 6, 0xf4292244)
+ d = II(d, a, b, c, m[7], 10, 0x432aff97)
+ c = II(c, d, a, b, m[14], 15, 0xab9423a7)
+ b = II(b, c, d, a, m[5], 21, 0xfc93a039)
+ a = II(a, b, c, d, m[12], 6, 0x655b59c3)
+ d = II(d, a, b, c, m[3], 10, 0x8f0ccc92)
+ c = II(c, d, a, b, m[10], 15, 0xffeff47d)
+ b = II(b, c, d, a, m[1], 21, 0x85845dd1)
+ a = II(a, b, c, d, m[8], 6, 0x6fa87e4f)
+ d = II(d, a, b, c, m[15], 10, 0xfe2ce6e0)
+ c = II(c, d, a, b, m[6], 15, 0xa3014314)
+ b = II(b, c, d, a, m[13], 21, 0x4e0811a1)
+ a = II(a, b, c, d, m[4], 6, 0xf7537e82)
+ d = II(d, a, b, c, m[11], 10, 0xbd3af235)
+ c = II(c, d, a, b, m[2], 15, 0x2ad7d2bb)
+ b = II(b, c, d, a, m[9], 21, 0xeb86d391)
+
+ ctx.state[0] += a
+ ctx.state[1] += b
+ ctx.state[2] += c
+ ctx.state[3] += d
+}
diff --git a/core/crypto/legacy/sha1/sha1.odin b/core/crypto/legacy/sha1/sha1.odin
new file mode 100644
index 000000000..6c4407067
--- /dev/null
+++ b/core/crypto/legacy/sha1/sha1.odin
@@ -0,0 +1,252 @@
+package sha1
+
+/*
+ Copyright 2021 zhibog
+ Made available under the BSD-3 license.
+
+ List of contributors:
+ zhibog, dotbmp: Initial implementation.
+
+ Implementation of the SHA1 hashing algorithm, as defined in RFC 3174 <https://datatracker.ietf.org/doc/html/rfc3174>
+*/
+
+import "core:encoding/endian"
+import "core:io"
+import "core:math/bits"
+import "core:mem"
+import "core:os"
+
+/*
+ High level API
+*/
+
+DIGEST_SIZE :: 20
+
+// hash_string will hash the given input and return the
+// computed hash
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
+ return hash_bytes(transmute([]byte)(data))
+}
+
+// hash_bytes will hash the given input and return the
+// computed hash
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+ hash: [DIGEST_SIZE]byte
+ ctx: Context
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash[:])
+ return hash
+}
+
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+ ctx: Context
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
+// hash_stream will read the stream in chunks and compute a
+// hash from its contents
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+ hash: [DIGEST_SIZE]byte
+ ctx: Context
+ init(&ctx)
+
+ buf := make([]byte, 512)
+ defer delete(buf)
+
+ read := 1
+ for read > 0 {
+ read, _ = io.read(s, buf)
+ if read > 0 {
+ update(&ctx, buf[:read])
+ }
+ }
+ final(&ctx, hash[:])
+ return hash, true
+}
+
+// hash_file will read the file provided by the given handle
+// and compute a hash
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
+ if !load_at_once {
+ return hash_stream(os.stream_from_handle(hd))
+ } else {
+ if buf, ok := os.read_entire_file(hd); ok {
+ return hash_bytes(buf[:]), ok
+ }
+ }
+ return [DIGEST_SIZE]byte{}, false
+}
+
+hash :: proc {
+ hash_stream,
+ hash_file,
+ hash_bytes,
+ hash_string,
+ hash_bytes_to_buffer,
+ hash_string_to_buffer,
+}
+
+/*
+ Low level API
+*/
+
+init :: proc(ctx: ^Context) {
+ ctx.state[0] = 0x67452301
+ ctx.state[1] = 0xefcdab89
+ ctx.state[2] = 0x98badcfe
+ ctx.state[3] = 0x10325476
+ ctx.state[4] = 0xc3d2e1f0
+ ctx.k[0] = 0x5a827999
+ ctx.k[1] = 0x6ed9eba1
+ ctx.k[2] = 0x8f1bbcdc
+ ctx.k[3] = 0xca62c1d6
+
+ ctx.datalen = 0
+ ctx.bitlen = 0
+
+ ctx.is_initialized = true
+}
+
+update :: proc(ctx: ^Context, data: []byte) {
+ assert(ctx.is_initialized)
+
+ for i := 0; i < len(data); i += 1 {
+ ctx.data[ctx.datalen] = data[i]
+ ctx.datalen += 1
+ if (ctx.datalen == BLOCK_SIZE) {
+ transform(ctx, ctx.data[:])
+ ctx.bitlen += 512
+ ctx.datalen = 0
+ }
+ }
+}
+
+final :: proc(ctx: ^Context, hash: []byte) {
+ assert(ctx.is_initialized)
+
+ if len(hash) < DIGEST_SIZE {
+ panic("crypto/sha1: invalid destination digest size")
+ }
+
+ i := ctx.datalen
+
+ if ctx.datalen < 56 {
+ ctx.data[i] = 0x80
+ i += 1
+ for i < 56 {
+ ctx.data[i] = 0x00
+ i += 1
+ }
+ } else {
+ ctx.data[i] = 0x80
+ i += 1
+ for i < BLOCK_SIZE {
+ ctx.data[i] = 0x00
+ i += 1
+ }
+ transform(ctx, ctx.data[:])
+ mem.set(&ctx.data, 0, 56)
+ }
+
+ ctx.bitlen += u64(ctx.datalen * 8)
+ endian.unchecked_put_u64be(ctx.data[56:], ctx.bitlen)
+ transform(ctx, ctx.data[:])
+
+ for i = 0; i < DIGEST_SIZE / 4; i += 1 {
+ endian.unchecked_put_u32be(hash[i * 4:], ctx.state[i])
+ }
+
+ ctx.is_initialized = false
+}
+
+/*
+ SHA1 implementation
+*/
+
+BLOCK_SIZE :: 64
+
+Context :: struct {
+ data: [BLOCK_SIZE]byte,
+ datalen: u32,
+ bitlen: u64,
+ state: [5]u32,
+ k: [4]u32,
+
+ is_initialized: bool,
+}
+
+@(private)
+transform :: proc "contextless" (ctx: ^Context, data: []byte) {
+ a, b, c, d, e, i, t: u32
+ m: [80]u32
+
+ for i = 0; i < 16; i += 1 {
+ m[i] = endian.unchecked_get_u32be(data[i * 4:])
+ }
+ for i < 80 {
+ m[i] = (m[i - 3] ~ m[i - 8] ~ m[i - 14] ~ m[i - 16])
+ m[i] = (m[i] << 1) | (m[i] >> 31)
+ i += 1
+ }
+
+ a = ctx.state[0]
+ b = ctx.state[1]
+ c = ctx.state[2]
+ d = ctx.state[3]
+ e = ctx.state[4]
+
+ for i = 0; i < 20; i += 1 {
+ t = bits.rotate_left32(a, 5) + ((b & c) ~ (~b & d)) + e + ctx.k[0] + m[i]
+ e = d
+ d = c
+ c = bits.rotate_left32(b, 30)
+ b = a
+ a = t
+ }
+ for i < 40 {
+ t = bits.rotate_left32(a, 5) + (b ~ c ~ d) + e + ctx.k[1] + m[i]
+ e = d
+ d = c
+ c = bits.rotate_left32(b, 30)
+ b = a
+ a = t
+ i += 1
+ }
+ for i < 60 {
+ t = bits.rotate_left32(a, 5) + ((b & c) ~ (b & d) ~ (c & d)) + e + ctx.k[2] + m[i]
+ e = d
+ d = c
+ c = bits.rotate_left32(b, 30)
+ b = a
+ a = t
+ i += 1
+ }
+ for i < 80 {
+ t = bits.rotate_left32(a, 5) + (b ~ c ~ d) + e + ctx.k[3] + m[i]
+ e = d
+ d = c
+ c = bits.rotate_left32(b, 30)
+ b = a
+ a = t
+ i += 1
+ }
+
+ ctx.state[0] += a
+ ctx.state[1] += b
+ ctx.state[2] += c
+ ctx.state[3] += d
+ ctx.state[4] += e
+}