From 59950bcad6829d656fa58b1e1c10330535d2fef3 Mon Sep 17 00:00:00 2001 From: Yawning Angel Date: Fri, 17 Nov 2023 19:18:45 +0900 Subject: core/crypto: Exile keccak, md5 and sha1 to legacy In an perfect world these would just be removed, but the world is imperfect, and people are forced to interact/interface with things that are broken. --- core/crypto/README.md | 6 +- core/crypto/keccak/keccak.odin | 377 ------------------------------ core/crypto/legacy/README.md | 10 + core/crypto/legacy/keccak/keccak.odin | 377 ++++++++++++++++++++++++++++++ core/crypto/legacy/md5/md5.odin | 295 +++++++++++++++++++++++ core/crypto/legacy/sha1/sha1.odin | 252 ++++++++++++++++++++ core/crypto/md5/md5.odin | 295 ----------------------- core/crypto/sha1/sha1.odin | 252 -------------------- examples/all/all_main.odin | 6 +- examples/all/all_vendor.odin | 6 +- tests/core/crypto/test_core_crypto.odin | 6 +- tests/vendor/botan/test_vendor_botan.odin | 6 +- vendor/botan/README.md | 6 +- vendor/botan/keccak/keccak.odin | 118 ---------- vendor/botan/legacy/README.md | 10 + vendor/botan/legacy/keccak/keccak.odin | 118 ++++++++++ vendor/botan/legacy/md5/md5.odin | 118 ++++++++++ vendor/botan/legacy/sha1/sha1.odin | 118 ++++++++++ vendor/botan/md5/md5.odin | 118 ---------- vendor/botan/sha1/sha1.odin | 118 ---------- 20 files changed, 1316 insertions(+), 1296 deletions(-) delete mode 100644 core/crypto/keccak/keccak.odin create mode 100644 core/crypto/legacy/README.md create mode 100644 core/crypto/legacy/keccak/keccak.odin create mode 100644 core/crypto/legacy/md5/md5.odin create mode 100644 core/crypto/legacy/sha1/sha1.odin delete mode 100644 core/crypto/md5/md5.odin delete mode 100644 core/crypto/sha1/sha1.odin delete mode 100644 vendor/botan/keccak/keccak.odin create mode 100644 vendor/botan/legacy/README.md create mode 100644 vendor/botan/legacy/keccak/keccak.odin create mode 100644 vendor/botan/legacy/md5/md5.odin create mode 100644 vendor/botan/legacy/sha1/sha1.odin delete mode 100644 vendor/botan/md5/md5.odin delete mode 100644 vendor/botan/sha1/sha1.odin diff --git a/core/crypto/README.md b/core/crypto/README.md index 69f76b719..35ed9ca70 100644 --- a/core/crypto/README.md +++ b/core/crypto/README.md @@ -10,13 +10,13 @@ Please see the chart below for the options. |:-------------------------------------------------------------------------------------------------------------|:-----------------| | [BLAKE2B](https://datatracker.ietf.org/doc/html/rfc7693) | ✔️ | | [BLAKE2S](https://datatracker.ietf.org/doc/html/rfc7693) | ✔️ | -| [Keccak](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf) | ✔️ | -| [MD5](https://datatracker.ietf.org/doc/html/rfc1321) | ✔️ | -| [SHA-1](https://datatracker.ietf.org/doc/html/rfc3174) | ✔️ | | [SHA-2](https://csrc.nist.gov/csrc/media/publications/fips/180/2/archive/2002-08-01/documents/fips180-2.pdf) | ✔️ | | [SHA-3](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf) | ✔️ | | [SHAKE](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf) | ✔️ | | [SM3](https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02) | ✔️ | +| legacy/[Keccak](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf) | ✔️ | +| legacy/[MD5](https://datatracker.ietf.org/doc/html/rfc1321) | ✔️ | +| legacy/[SHA-1](https://datatracker.ietf.org/doc/html/rfc3174) | ✔️ | #### High level API Each hash algorithm contains a procedure group named `hash`, or if the algorithm provides more than one digest size `hash_`\*. diff --git a/core/crypto/keccak/keccak.odin b/core/crypto/keccak/keccak.odin deleted file mode 100644 index a41befd6d..000000000 --- a/core/crypto/keccak/keccak.odin +++ /dev/null @@ -1,377 +0,0 @@ -package keccak - -/* - Copyright 2021 zhibog - Made available under the BSD-3 license. - - List of contributors: - zhibog, dotbmp: Initial implementation. - - Interface for the Keccak hashing algorithm. - This is done because the padding in the SHA3 standard was changed by the NIST, resulting in a different output. -*/ - -import "core:io" -import "core:os" - -import "../_sha3" - -/* - High level API -*/ - -DIGEST_SIZE_224 :: 28 -DIGEST_SIZE_256 :: 32 -DIGEST_SIZE_384 :: 48 -DIGEST_SIZE_512 :: 64 - -// hash_string_224 will hash the given input and return the -// computed hash -hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte { - return hash_bytes_224(transmute([]byte)(data)) -} - -// hash_bytes_224 will hash the given input and return the -// computed hash -hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte { - hash: [DIGEST_SIZE_224]byte - ctx: Context - ctx.mdlen = DIGEST_SIZE_224 - ctx.is_keccak = true - init(&ctx) - update(&ctx, data) - final(&ctx, hash[:]) - return hash -} - -// hash_string_to_buffer_224 will hash the given input and assign the -// computed hash to the second parameter. -// It requires that the destination buffer is at least as big as the digest size -hash_string_to_buffer_224 :: proc(data: string, hash: []byte) { - hash_bytes_to_buffer_224(transmute([]byte)(data), hash) -} - -// hash_bytes_to_buffer_224 will hash the given input and write the -// computed hash into the second parameter. -// It requires that the destination buffer is at least as big as the digest size -hash_bytes_to_buffer_224 :: proc(data, hash: []byte) { - ctx: Context - ctx.mdlen = DIGEST_SIZE_224 - ctx.is_keccak = true - init(&ctx) - update(&ctx, data) - final(&ctx, hash) -} - -// hash_stream_224 will read the stream in chunks and compute a -// hash from its contents -hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) { - hash: [DIGEST_SIZE_224]byte - ctx: Context - ctx.mdlen = DIGEST_SIZE_224 - ctx.is_keccak = true - init(&ctx) - - buf := make([]byte, 512) - defer delete(buf) - - read := 1 - for read > 0 { - read, _ = io.read(s, buf) - if read > 0 { - update(&ctx, buf[:read]) - } - } - final(&ctx, hash[:]) - return hash, true -} - -// hash_file_224 will read the file provided by the given handle -// and compute a hash -hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) { - if !load_at_once { - return hash_stream_224(os.stream_from_handle(hd)) - } else { - if buf, ok := os.read_entire_file(hd); ok { - return hash_bytes_224(buf[:]), ok - } - } - return [DIGEST_SIZE_224]byte{}, false -} - -hash_224 :: proc { - hash_stream_224, - hash_file_224, - hash_bytes_224, - hash_string_224, - hash_bytes_to_buffer_224, - hash_string_to_buffer_224, -} - -// hash_string_256 will hash the given input and return the -// computed hash -hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte { - return hash_bytes_256(transmute([]byte)(data)) -} - -// hash_bytes_256 will hash the given input and return the -// computed hash -hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte { - hash: [DIGEST_SIZE_256]byte - ctx: Context - ctx.mdlen = DIGEST_SIZE_256 - ctx.is_keccak = true - init(&ctx) - update(&ctx, data) - final(&ctx, hash[:]) - return hash -} - -// hash_string_to_buffer_256 will hash the given input and assign the -// computed hash to the second parameter. -// It requires that the destination buffer is at least as big as the digest size -hash_string_to_buffer_256 :: proc(data: string, hash: []byte) { - hash_bytes_to_buffer_256(transmute([]byte)(data), hash) -} - -// hash_bytes_to_buffer_256 will hash the given input and write the -// computed hash into the second parameter. -// It requires that the destination buffer is at least as big as the digest size -hash_bytes_to_buffer_256 :: proc(data, hash: []byte) { - ctx: Context - ctx.mdlen = DIGEST_SIZE_256 - ctx.is_keccak = true - init(&ctx) - update(&ctx, data) - final(&ctx, hash) -} - -// hash_stream_256 will read the stream in chunks and compute a -// hash from its contents -hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) { - hash: [DIGEST_SIZE_256]byte - ctx: Context - ctx.mdlen = DIGEST_SIZE_256 - ctx.is_keccak = true - init(&ctx) - - buf := make([]byte, 512) - defer delete(buf) - - read := 1 - for read > 0 { - read, _ = io.read(s, buf) - if read > 0 { - update(&ctx, buf[:read]) - } - } - final(&ctx, hash[:]) - return hash, true -} - -// hash_file_256 will read the file provided by the given handle -// and compute a hash -hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) { - if !load_at_once { - return hash_stream_256(os.stream_from_handle(hd)) - } else { - if buf, ok := os.read_entire_file(hd); ok { - return hash_bytes_256(buf[:]), ok - } - } - return [DIGEST_SIZE_256]byte{}, false -} - -hash_256 :: proc { - hash_stream_256, - hash_file_256, - hash_bytes_256, - hash_string_256, - hash_bytes_to_buffer_256, - hash_string_to_buffer_256, -} - -// hash_string_384 will hash the given input and return the -// computed hash -hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte { - return hash_bytes_384(transmute([]byte)(data)) -} - -// hash_bytes_384 will hash the given input and return the -// computed hash -hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte { - hash: [DIGEST_SIZE_384]byte - ctx: Context - ctx.mdlen = DIGEST_SIZE_384 - ctx.is_keccak = true - init(&ctx) - update(&ctx, data) - final(&ctx, hash[:]) - return hash -} - -// hash_string_to_buffer_384 will hash the given input and assign the -// computed hash to the second parameter. -// It requires that the destination buffer is at least as big as the digest size -hash_string_to_buffer_384 :: proc(data: string, hash: []byte) { - hash_bytes_to_buffer_384(transmute([]byte)(data), hash) -} - -// hash_bytes_to_buffer_384 will hash the given input and write the -// computed hash into the second parameter. -// It requires that the destination buffer is at least as big as the digest size -hash_bytes_to_buffer_384 :: proc(data, hash: []byte) { - ctx: Context - ctx.mdlen = DIGEST_SIZE_384 - ctx.is_keccak = true - init(&ctx) - update(&ctx, data) - final(&ctx, hash) -} - -// hash_stream_384 will read the stream in chunks and compute a -// hash from its contents -hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) { - hash: [DIGEST_SIZE_384]byte - ctx: Context - ctx.mdlen = DIGEST_SIZE_384 - ctx.is_keccak = true - init(&ctx) - - buf := make([]byte, 512) - defer delete(buf) - - read := 1 - for read > 0 { - read, _ = io.read(s, buf) - if read > 0 { - update(&ctx, buf[:read]) - } - } - final(&ctx, hash[:]) - return hash, true -} - -// hash_file_384 will read the file provided by the given handle -// and compute a hash -hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) { - if !load_at_once { - return hash_stream_384(os.stream_from_handle(hd)) - } else { - if buf, ok := os.read_entire_file(hd); ok { - return hash_bytes_384(buf[:]), ok - } - } - return [DIGEST_SIZE_384]byte{}, false -} - -hash_384 :: proc { - hash_stream_384, - hash_file_384, - hash_bytes_384, - hash_string_384, - hash_bytes_to_buffer_384, - hash_string_to_buffer_384, -} - -// hash_string_512 will hash the given input and return the -// computed hash -hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte { - return hash_bytes_512(transmute([]byte)(data)) -} - -// hash_bytes_512 will hash the given input and return the -// computed hash -hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte { - hash: [DIGEST_SIZE_512]byte - ctx: Context - ctx.mdlen = DIGEST_SIZE_512 - ctx.is_keccak = true - init(&ctx) - update(&ctx, data) - final(&ctx, hash[:]) - return hash -} - -// hash_string_to_buffer_512 will hash the given input and assign the -// computed hash to the second parameter. -// It requires that the destination buffer is at least as big as the digest size -hash_string_to_buffer_512 :: proc(data: string, hash: []byte) { - hash_bytes_to_buffer_512(transmute([]byte)(data), hash) -} - -// hash_bytes_to_buffer_512 will hash the given input and write the -// computed hash into the second parameter. -// It requires that the destination buffer is at least as big as the digest size -hash_bytes_to_buffer_512 :: proc(data, hash: []byte) { - ctx: Context - ctx.mdlen = DIGEST_SIZE_512 - ctx.is_keccak = true - init(&ctx) - update(&ctx, data) - final(&ctx, hash) -} - -// hash_stream_512 will read the stream in chunks and compute a -// hash from its contents -hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) { - hash: [DIGEST_SIZE_512]byte - ctx: Context - ctx.mdlen = DIGEST_SIZE_512 - ctx.is_keccak = true - init(&ctx) - - buf := make([]byte, 512) - defer delete(buf) - - read := 1 - for read > 0 { - read, _ = io.read(s, buf) - if read > 0 { - update(&ctx, buf[:read]) - } - } - final(&ctx, hash[:]) - return hash, true -} - -// hash_file_512 will read the file provided by the given handle -// and compute a hash -hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) { - if !load_at_once { - return hash_stream_512(os.stream_from_handle(hd)) - } else { - if buf, ok := os.read_entire_file(hd); ok { - return hash_bytes_512(buf[:]), ok - } - } - return [DIGEST_SIZE_512]byte{}, false -} - -hash_512 :: proc { - hash_stream_512, - hash_file_512, - hash_bytes_512, - hash_string_512, - hash_bytes_to_buffer_512, - hash_string_to_buffer_512, -} - -/* - Low level API -*/ - -Context :: _sha3.Sha3_Context - -init :: proc(ctx: ^Context) { - ctx.is_keccak = true - _sha3.init(ctx) -} - -update :: proc(ctx: ^Context, data: []byte) { - _sha3.update(ctx, data) -} - -final :: proc(ctx: ^Context, hash: []byte) { - _sha3.final(ctx, hash) -} diff --git a/core/crypto/legacy/README.md b/core/crypto/legacy/README.md new file mode 100644 index 000000000..e1ba6f54b --- /dev/null +++ b/core/crypto/legacy/README.md @@ -0,0 +1,10 @@ +# crypto/legacy + +These are algorithms that are shipped solely for the purpose of +interoperability with legacy systems. The use of these packages in +any other capacity is discouraged, especially those that are known +to be broken. + +- keccak - The draft version of the algorithm that became SHA-3 +- MD5 - Broken (https://eprint.iacr.org/2005/075) +- SHA-1 - Broken (https://eprint.iacr.org/2017/190) diff --git a/core/crypto/legacy/keccak/keccak.odin b/core/crypto/legacy/keccak/keccak.odin new file mode 100644 index 000000000..09db853a6 --- /dev/null +++ b/core/crypto/legacy/keccak/keccak.odin @@ -0,0 +1,377 @@ +package keccak + +/* + Copyright 2021 zhibog + Made available under the BSD-3 license. + + List of contributors: + zhibog, dotbmp: Initial implementation. + + Interface for the Keccak hashing algorithm. + This is done because the padding in the SHA3 standard was changed by the NIST, resulting in a different output. +*/ + +import "core:io" +import "core:os" + +import "../../_sha3" + +/* + High level API +*/ + +DIGEST_SIZE_224 :: 28 +DIGEST_SIZE_256 :: 32 +DIGEST_SIZE_384 :: 48 +DIGEST_SIZE_512 :: 64 + +// hash_string_224 will hash the given input and return the +// computed hash +hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte { + return hash_bytes_224(transmute([]byte)(data)) +} + +// hash_bytes_224 will hash the given input and return the +// computed hash +hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte { + hash: [DIGEST_SIZE_224]byte + ctx: Context + ctx.mdlen = DIGEST_SIZE_224 + ctx.is_keccak = true + init(&ctx) + update(&ctx, data) + final(&ctx, hash[:]) + return hash +} + +// hash_string_to_buffer_224 will hash the given input and assign the +// computed hash to the second parameter. +// It requires that the destination buffer is at least as big as the digest size +hash_string_to_buffer_224 :: proc(data: string, hash: []byte) { + hash_bytes_to_buffer_224(transmute([]byte)(data), hash) +} + +// hash_bytes_to_buffer_224 will hash the given input and write the +// computed hash into the second parameter. +// It requires that the destination buffer is at least as big as the digest size +hash_bytes_to_buffer_224 :: proc(data, hash: []byte) { + ctx: Context + ctx.mdlen = DIGEST_SIZE_224 + ctx.is_keccak = true + init(&ctx) + update(&ctx, data) + final(&ctx, hash) +} + +// hash_stream_224 will read the stream in chunks and compute a +// hash from its contents +hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) { + hash: [DIGEST_SIZE_224]byte + ctx: Context + ctx.mdlen = DIGEST_SIZE_224 + ctx.is_keccak = true + init(&ctx) + + buf := make([]byte, 512) + defer delete(buf) + + read := 1 + for read > 0 { + read, _ = io.read(s, buf) + if read > 0 { + update(&ctx, buf[:read]) + } + } + final(&ctx, hash[:]) + return hash, true +} + +// hash_file_224 will read the file provided by the given handle +// and compute a hash +hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) { + if !load_at_once { + return hash_stream_224(os.stream_from_handle(hd)) + } else { + if buf, ok := os.read_entire_file(hd); ok { + return hash_bytes_224(buf[:]), ok + } + } + return [DIGEST_SIZE_224]byte{}, false +} + +hash_224 :: proc { + hash_stream_224, + hash_file_224, + hash_bytes_224, + hash_string_224, + hash_bytes_to_buffer_224, + hash_string_to_buffer_224, +} + +// hash_string_256 will hash the given input and return the +// computed hash +hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte { + return hash_bytes_256(transmute([]byte)(data)) +} + +// hash_bytes_256 will hash the given input and return the +// computed hash +hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte { + hash: [DIGEST_SIZE_256]byte + ctx: Context + ctx.mdlen = DIGEST_SIZE_256 + ctx.is_keccak = true + init(&ctx) + update(&ctx, data) + final(&ctx, hash[:]) + return hash +} + +// hash_string_to_buffer_256 will hash the given input and assign the +// computed hash to the second parameter. +// It requires that the destination buffer is at least as big as the digest size +hash_string_to_buffer_256 :: proc(data: string, hash: []byte) { + hash_bytes_to_buffer_256(transmute([]byte)(data), hash) +} + +// hash_bytes_to_buffer_256 will hash the given input and write the +// computed hash into the second parameter. +// It requires that the destination buffer is at least as big as the digest size +hash_bytes_to_buffer_256 :: proc(data, hash: []byte) { + ctx: Context + ctx.mdlen = DIGEST_SIZE_256 + ctx.is_keccak = true + init(&ctx) + update(&ctx, data) + final(&ctx, hash) +} + +// hash_stream_256 will read the stream in chunks and compute a +// hash from its contents +hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) { + hash: [DIGEST_SIZE_256]byte + ctx: Context + ctx.mdlen = DIGEST_SIZE_256 + ctx.is_keccak = true + init(&ctx) + + buf := make([]byte, 512) + defer delete(buf) + + read := 1 + for read > 0 { + read, _ = io.read(s, buf) + if read > 0 { + update(&ctx, buf[:read]) + } + } + final(&ctx, hash[:]) + return hash, true +} + +// hash_file_256 will read the file provided by the given handle +// and compute a hash +hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) { + if !load_at_once { + return hash_stream_256(os.stream_from_handle(hd)) + } else { + if buf, ok := os.read_entire_file(hd); ok { + return hash_bytes_256(buf[:]), ok + } + } + return [DIGEST_SIZE_256]byte{}, false +} + +hash_256 :: proc { + hash_stream_256, + hash_file_256, + hash_bytes_256, + hash_string_256, + hash_bytes_to_buffer_256, + hash_string_to_buffer_256, +} + +// hash_string_384 will hash the given input and return the +// computed hash +hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte { + return hash_bytes_384(transmute([]byte)(data)) +} + +// hash_bytes_384 will hash the given input and return the +// computed hash +hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte { + hash: [DIGEST_SIZE_384]byte + ctx: Context + ctx.mdlen = DIGEST_SIZE_384 + ctx.is_keccak = true + init(&ctx) + update(&ctx, data) + final(&ctx, hash[:]) + return hash +} + +// hash_string_to_buffer_384 will hash the given input and assign the +// computed hash to the second parameter. +// It requires that the destination buffer is at least as big as the digest size +hash_string_to_buffer_384 :: proc(data: string, hash: []byte) { + hash_bytes_to_buffer_384(transmute([]byte)(data), hash) +} + +// hash_bytes_to_buffer_384 will hash the given input and write the +// computed hash into the second parameter. +// It requires that the destination buffer is at least as big as the digest size +hash_bytes_to_buffer_384 :: proc(data, hash: []byte) { + ctx: Context + ctx.mdlen = DIGEST_SIZE_384 + ctx.is_keccak = true + init(&ctx) + update(&ctx, data) + final(&ctx, hash) +} + +// hash_stream_384 will read the stream in chunks and compute a +// hash from its contents +hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) { + hash: [DIGEST_SIZE_384]byte + ctx: Context + ctx.mdlen = DIGEST_SIZE_384 + ctx.is_keccak = true + init(&ctx) + + buf := make([]byte, 512) + defer delete(buf) + + read := 1 + for read > 0 { + read, _ = io.read(s, buf) + if read > 0 { + update(&ctx, buf[:read]) + } + } + final(&ctx, hash[:]) + return hash, true +} + +// hash_file_384 will read the file provided by the given handle +// and compute a hash +hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) { + if !load_at_once { + return hash_stream_384(os.stream_from_handle(hd)) + } else { + if buf, ok := os.read_entire_file(hd); ok { + return hash_bytes_384(buf[:]), ok + } + } + return [DIGEST_SIZE_384]byte{}, false +} + +hash_384 :: proc { + hash_stream_384, + hash_file_384, + hash_bytes_384, + hash_string_384, + hash_bytes_to_buffer_384, + hash_string_to_buffer_384, +} + +// hash_string_512 will hash the given input and return the +// computed hash +hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte { + return hash_bytes_512(transmute([]byte)(data)) +} + +// hash_bytes_512 will hash the given input and return the +// computed hash +hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte { + hash: [DIGEST_SIZE_512]byte + ctx: Context + ctx.mdlen = DIGEST_SIZE_512 + ctx.is_keccak = true + init(&ctx) + update(&ctx, data) + final(&ctx, hash[:]) + return hash +} + +// hash_string_to_buffer_512 will hash the given input and assign the +// computed hash to the second parameter. +// It requires that the destination buffer is at least as big as the digest size +hash_string_to_buffer_512 :: proc(data: string, hash: []byte) { + hash_bytes_to_buffer_512(transmute([]byte)(data), hash) +} + +// hash_bytes_to_buffer_512 will hash the given input and write the +// computed hash into the second parameter. +// It requires that the destination buffer is at least as big as the digest size +hash_bytes_to_buffer_512 :: proc(data, hash: []byte) { + ctx: Context + ctx.mdlen = DIGEST_SIZE_512 + ctx.is_keccak = true + init(&ctx) + update(&ctx, data) + final(&ctx, hash) +} + +// hash_stream_512 will read the stream in chunks and compute a +// hash from its contents +hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) { + hash: [DIGEST_SIZE_512]byte + ctx: Context + ctx.mdlen = DIGEST_SIZE_512 + ctx.is_keccak = true + init(&ctx) + + buf := make([]byte, 512) + defer delete(buf) + + read := 1 + for read > 0 { + read, _ = io.read(s, buf) + if read > 0 { + update(&ctx, buf[:read]) + } + } + final(&ctx, hash[:]) + return hash, true +} + +// hash_file_512 will read the file provided by the given handle +// and compute a hash +hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) { + if !load_at_once { + return hash_stream_512(os.stream_from_handle(hd)) + } else { + if buf, ok := os.read_entire_file(hd); ok { + return hash_bytes_512(buf[:]), ok + } + } + return [DIGEST_SIZE_512]byte{}, false +} + +hash_512 :: proc { + hash_stream_512, + hash_file_512, + hash_bytes_512, + hash_string_512, + hash_bytes_to_buffer_512, + hash_string_to_buffer_512, +} + +/* + Low level API +*/ + +Context :: _sha3.Sha3_Context + +init :: proc(ctx: ^Context) { + ctx.is_keccak = true + _sha3.init(ctx) +} + +update :: proc(ctx: ^Context, data: []byte) { + _sha3.update(ctx, data) +} + +final :: proc(ctx: ^Context, hash: []byte) { + _sha3.final(ctx, hash) +} diff --git a/core/crypto/legacy/md5/md5.odin b/core/crypto/legacy/md5/md5.odin new file mode 100644 index 000000000..69ae087e4 --- /dev/null +++ b/core/crypto/legacy/md5/md5.odin @@ -0,0 +1,295 @@ +package md5 + +/* + Copyright 2021 zhibog + Made available under the BSD-3 license. + + List of contributors: + zhibog, dotbmp: Initial implementation. + + Implementation of the MD5 hashing algorithm, as defined in RFC 1321 +*/ + +import "core:encoding/endian" +import "core:io" +import "core:math/bits" +import "core:mem" +import "core:os" + +/* + High level API +*/ + +DIGEST_SIZE :: 16 + +// hash_string will hash the given input and return the +// computed hash +hash_string :: proc(data: string) -> [DIGEST_SIZE]byte { + return hash_bytes(transmute([]byte)(data)) +} + +// hash_bytes will hash the given input and return the +// computed hash +hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte { + hash: [DIGEST_SIZE]byte + ctx: Context + init(&ctx) + update(&ctx, data) + final(&ctx, hash[:]) + return hash +} + +// hash_string_to_buffer will hash the given input and assign the +// computed hash to the second parameter. +// It requires that the destination buffer is at least as big as the digest size +hash_string_to_buffer :: proc(data: string, hash: []byte) { + hash_bytes_to_buffer(transmute([]byte)(data), hash) +} + +// hash_bytes_to_buffer will hash the given input and write the +// computed hash into the second parameter. +// It requires that the destination buffer is at least as big as the digest size +hash_bytes_to_buffer :: proc(data, hash: []byte) { + ctx: Context + init(&ctx) + update(&ctx, data) + final(&ctx, hash) +} + +// hash_stream will read the stream in chunks and compute a +// hash from its contents +hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) { + hash: [DIGEST_SIZE]byte + ctx: Context + init(&ctx) + + buf := make([]byte, 512) + defer delete(buf) + + read := 1 + for read > 0 { + read, _ = io.read(s, buf) + if read > 0 { + update(&ctx, buf[:read]) + } + } + final(&ctx, hash[:]) + return hash, true +} + +// hash_file will read the file provided by the given handle +// and compute a hash +hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) { + if !load_at_once { + return hash_stream(os.stream_from_handle(hd)) + } else { + if buf, ok := os.read_entire_file(hd); ok { + return hash_bytes(buf[:]), ok + } + } + return [DIGEST_SIZE]byte{}, false +} + +hash :: proc { + hash_stream, + hash_file, + hash_bytes, + hash_string, + hash_bytes_to_buffer, + hash_string_to_buffer, +} + +/* + Low level API +*/ + +init :: proc(ctx: ^Context) { + ctx.state[0] = 0x67452301 + ctx.state[1] = 0xefcdab89 + ctx.state[2] = 0x98badcfe + ctx.state[3] = 0x10325476 + + ctx.bitlen = 0 + ctx.datalen = 0 + + ctx.is_initialized = true +} + +update :: proc(ctx: ^Context, data: []byte) { + assert(ctx.is_initialized) + + for i := 0; i < len(data); i += 1 { + ctx.data[ctx.datalen] = data[i] + ctx.datalen += 1 + if (ctx.datalen == BLOCK_SIZE) { + transform(ctx, ctx.data[:]) + ctx.bitlen += 512 + ctx.datalen = 0 + } + } +} + +final :: proc(ctx: ^Context, hash: []byte) { + assert(ctx.is_initialized) + + if len(hash) < DIGEST_SIZE { + panic("crypto/md5: invalid destination digest size") + } + + i := ctx.datalen + + if ctx.datalen < 56 { + ctx.data[i] = 0x80 + i += 1 + for i < 56 { + ctx.data[i] = 0x00 + i += 1 + } + } else if ctx.datalen >= 56 { + ctx.data[i] = 0x80 + i += 1 + for i < BLOCK_SIZE { + ctx.data[i] = 0x00 + i += 1 + } + transform(ctx, ctx.data[:]) + mem.set(&ctx.data, 0, 56) + } + + ctx.bitlen += u64(ctx.datalen * 8) + endian.unchecked_put_u64le(ctx.data[56:], ctx.bitlen) + transform(ctx, ctx.data[:]) + + for i = 0; i < DIGEST_SIZE / 4; i += 1 { + endian.unchecked_put_u32le(hash[i * 4:], ctx.state[i]) + } + + ctx.is_initialized = false +} + +/* + MD5 implementation +*/ + +BLOCK_SIZE :: 64 + +Context :: struct { + data: [BLOCK_SIZE]byte, + state: [4]u32, + bitlen: u64, + datalen: u32, + + is_initialized: bool, +} + +/* + @note(zh): F, G, H and I, as mentioned in the RFC, have been inlined into FF, GG, HH + and II respectively, instead of declaring them separately. +*/ + +@(private) +FF :: #force_inline proc "contextless" (a, b, c, d, m: u32, s: int, t: u32) -> u32 { + return b + bits.rotate_left32(a + ((b & c) | (~b & d)) + m + t, s) +} + +@(private) +GG :: #force_inline proc "contextless" (a, b, c, d, m: u32, s: int, t: u32) -> u32 { + return b + bits.rotate_left32(a + ((b & d) | (c & ~d)) + m + t, s) +} + +@(private) +HH :: #force_inline proc "contextless" (a, b, c, d, m: u32, s: int, t: u32) -> u32 { + return b + bits.rotate_left32(a + (b ~ c ~ d) + m + t, s) +} + +@(private) +II :: #force_inline proc "contextless" (a, b, c, d, m: u32, s: int, t: u32) -> u32 { + return b + bits.rotate_left32(a + (c ~ (b | ~d)) + m + t, s) +} + +@(private) +transform :: proc "contextless" (ctx: ^Context, data: []byte) { + m: [DIGEST_SIZE]u32 + + for i := 0; i < DIGEST_SIZE; i += 1 { + m[i] = endian.unchecked_get_u32le(data[i * 4:]) + } + + a := ctx.state[0] + b := ctx.state[1] + c := ctx.state[2] + d := ctx.state[3] + + a = FF(a, b, c, d, m[0], 7, 0xd76aa478) + d = FF(d, a, b, c, m[1], 12, 0xe8c7b756) + c = FF(c, d, a, b, m[2], 17, 0x242070db) + b = FF(b, c, d, a, m[3], 22, 0xc1bdceee) + a = FF(a, b, c, d, m[4], 7, 0xf57c0faf) + d = FF(d, a, b, c, m[5], 12, 0x4787c62a) + c = FF(c, d, a, b, m[6], 17, 0xa8304613) + b = FF(b, c, d, a, m[7], 22, 0xfd469501) + a = FF(a, b, c, d, m[8], 7, 0x698098d8) + d = FF(d, a, b, c, m[9], 12, 0x8b44f7af) + c = FF(c, d, a, b, m[10], 17, 0xffff5bb1) + b = FF(b, c, d, a, m[11], 22, 0x895cd7be) + a = FF(a, b, c, d, m[12], 7, 0x6b901122) + d = FF(d, a, b, c, m[13], 12, 0xfd987193) + c = FF(c, d, a, b, m[14], 17, 0xa679438e) + b = FF(b, c, d, a, m[15], 22, 0x49b40821) + + a = GG(a, b, c, d, m[1], 5, 0xf61e2562) + d = GG(d, a, b, c, m[6], 9, 0xc040b340) + c = GG(c, d, a, b, m[11], 14, 0x265e5a51) + b = GG(b, c, d, a, m[0], 20, 0xe9b6c7aa) + a = GG(a, b, c, d, m[5], 5, 0xd62f105d) + d = GG(d, a, b, c, m[10], 9, 0x02441453) + c = GG(c, d, a, b, m[15], 14, 0xd8a1e681) + b = GG(b, c, d, a, m[4], 20, 0xe7d3fbc8) + a = GG(a, b, c, d, m[9], 5, 0x21e1cde6) + d = GG(d, a, b, c, m[14], 9, 0xc33707d6) + c = GG(c, d, a, b, m[3], 14, 0xf4d50d87) + b = GG(b, c, d, a, m[8], 20, 0x455a14ed) + a = GG(a, b, c, d, m[13], 5, 0xa9e3e905) + d = GG(d, a, b, c, m[2], 9, 0xfcefa3f8) + c = GG(c, d, a, b, m[7], 14, 0x676f02d9) + b = GG(b, c, d, a, m[12], 20, 0x8d2a4c8a) + + a = HH(a, b, c, d, m[5], 4, 0xfffa3942) + d = HH(d, a, b, c, m[8], 11, 0x8771f681) + c = HH(c, d, a, b, m[11], 16, 0x6d9d6122) + b = HH(b, c, d, a, m[14], 23, 0xfde5380c) + a = HH(a, b, c, d, m[1], 4, 0xa4beea44) + d = HH(d, a, b, c, m[4], 11, 0x4bdecfa9) + c = HH(c, d, a, b, m[7], 16, 0xf6bb4b60) + b = HH(b, c, d, a, m[10], 23, 0xbebfbc70) + a = HH(a, b, c, d, m[13], 4, 0x289b7ec6) + d = HH(d, a, b, c, m[0], 11, 0xeaa127fa) + c = HH(c, d, a, b, m[3], 16, 0xd4ef3085) + b = HH(b, c, d, a, m[6], 23, 0x04881d05) + a = HH(a, b, c, d, m[9], 4, 0xd9d4d039) + d = HH(d, a, b, c, m[12], 11, 0xe6db99e5) + c = HH(c, d, a, b, m[15], 16, 0x1fa27cf8) + b = HH(b, c, d, a, m[2], 23, 0xc4ac5665) + + a = II(a, b, c, d, m[0], 6, 0xf4292244) + d = II(d, a, b, c, m[7], 10, 0x432aff97) + c = II(c, d, a, b, m[14], 15, 0xab9423a7) + b = II(b, c, d, a, m[5], 21, 0xfc93a039) + a = II(a, b, c, d, m[12], 6, 0x655b59c3) + d = II(d, a, b, c, m[3], 10, 0x8f0ccc92) + c = II(c, d, a, b, m[10], 15, 0xffeff47d) + b = II(b, c, d, a, m[1], 21, 0x85845dd1) + a = II(a, b, c, d, m[8], 6, 0x6fa87e4f) + d = II(d, a, b, c, m[15], 10, 0xfe2ce6e0) + c = II(c, d, a, b, m[6], 15, 0xa3014314) + b = II(b, c, d, a, m[13], 21, 0x4e0811a1) + a = II(a, b, c, d, m[4], 6, 0xf7537e82) + d = II(d, a, b, c, m[11], 10, 0xbd3af235) + c = II(c, d, a, b, m[2], 15, 0x2ad7d2bb) + b = II(b, c, d, a, m[9], 21, 0xeb86d391) + + ctx.state[0] += a + ctx.state[1] += b + ctx.state[2] += c + ctx.state[3] += d +} diff --git a/core/crypto/legacy/sha1/sha1.odin b/core/crypto/legacy/sha1/sha1.odin new file mode 100644 index 000000000..6c4407067 --- /dev/null +++ b/core/crypto/legacy/sha1/sha1.odin @@ -0,0 +1,252 @@ +package sha1 + +/* + Copyright 2021 zhibog + Made available under the BSD-3 license. + + List of contributors: + zhibog, dotbmp: Initial implementation. + + Implementation of the SHA1 hashing algorithm, as defined in RFC 3174 +*/ + +import "core:encoding/endian" +import "core:io" +import "core:math/bits" +import "core:mem" +import "core:os" + +/* + High level API +*/ + +DIGEST_SIZE :: 20 + +// hash_string will hash the given input and return the +// computed hash +hash_string :: proc(data: string) -> [DIGEST_SIZE]byte { + return hash_bytes(transmute([]byte)(data)) +} + +// hash_bytes will hash the given input and return the +// computed hash +hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte { + hash: [DIGEST_SIZE]byte + ctx: Context + init(&ctx) + update(&ctx, data) + final(&ctx, hash[:]) + return hash +} + +// hash_string_to_buffer will hash the given input and assign the +// computed hash to the second parameter. +// It requires that the destination buffer is at least as big as the digest size +hash_string_to_buffer :: proc(data: string, hash: []byte) { + hash_bytes_to_buffer(transmute([]byte)(data), hash) +} + +// hash_bytes_to_buffer will hash the given input and write the +// computed hash into the second parameter. +// It requires that the destination buffer is at least as big as the digest size +hash_bytes_to_buffer :: proc(data, hash: []byte) { + ctx: Context + init(&ctx) + update(&ctx, data) + final(&ctx, hash) +} + +// hash_stream will read the stream in chunks and compute a +// hash from its contents +hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) { + hash: [DIGEST_SIZE]byte + ctx: Context + init(&ctx) + + buf := make([]byte, 512) + defer delete(buf) + + read := 1 + for read > 0 { + read, _ = io.read(s, buf) + if read > 0 { + update(&ctx, buf[:read]) + } + } + final(&ctx, hash[:]) + return hash, true +} + +// hash_file will read the file provided by the given handle +// and compute a hash +hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) { + if !load_at_once { + return hash_stream(os.stream_from_handle(hd)) + } else { + if buf, ok := os.read_entire_file(hd); ok { + return hash_bytes(buf[:]), ok + } + } + return [DIGEST_SIZE]byte{}, false +} + +hash :: proc { + hash_stream, + hash_file, + hash_bytes, + hash_string, + hash_bytes_to_buffer, + hash_string_to_buffer, +} + +/* + Low level API +*/ + +init :: proc(ctx: ^Context) { + ctx.state[0] = 0x67452301 + ctx.state[1] = 0xefcdab89 + ctx.state[2] = 0x98badcfe + ctx.state[3] = 0x10325476 + ctx.state[4] = 0xc3d2e1f0 + ctx.k[0] = 0x5a827999 + ctx.k[1] = 0x6ed9eba1 + ctx.k[2] = 0x8f1bbcdc + ctx.k[3] = 0xca62c1d6 + + ctx.datalen = 0 + ctx.bitlen = 0 + + ctx.is_initialized = true +} + +update :: proc(ctx: ^Context, data: []byte) { + assert(ctx.is_initialized) + + for i := 0; i < len(data); i += 1 { + ctx.data[ctx.datalen] = data[i] + ctx.datalen += 1 + if (ctx.datalen == BLOCK_SIZE) { + transform(ctx, ctx.data[:]) + ctx.bitlen += 512 + ctx.datalen = 0 + } + } +} + +final :: proc(ctx: ^Context, hash: []byte) { + assert(ctx.is_initialized) + + if len(hash) < DIGEST_SIZE { + panic("crypto/sha1: invalid destination digest size") + } + + i := ctx.datalen + + if ctx.datalen < 56 { + ctx.data[i] = 0x80 + i += 1 + for i < 56 { + ctx.data[i] = 0x00 + i += 1 + } + } else { + ctx.data[i] = 0x80 + i += 1 + for i < BLOCK_SIZE { + ctx.data[i] = 0x00 + i += 1 + } + transform(ctx, ctx.data[:]) + mem.set(&ctx.data, 0, 56) + } + + ctx.bitlen += u64(ctx.datalen * 8) + endian.unchecked_put_u64be(ctx.data[56:], ctx.bitlen) + transform(ctx, ctx.data[:]) + + for i = 0; i < DIGEST_SIZE / 4; i += 1 { + endian.unchecked_put_u32be(hash[i * 4:], ctx.state[i]) + } + + ctx.is_initialized = false +} + +/* + SHA1 implementation +*/ + +BLOCK_SIZE :: 64 + +Context :: struct { + data: [BLOCK_SIZE]byte, + datalen: u32, + bitlen: u64, + state: [5]u32, + k: [4]u32, + + is_initialized: bool, +} + +@(private) +transform :: proc "contextless" (ctx: ^Context, data: []byte) { + a, b, c, d, e, i, t: u32 + m: [80]u32 + + for i = 0; i < 16; i += 1 { + m[i] = endian.unchecked_get_u32be(data[i * 4:]) + } + for i < 80 { + m[i] = (m[i - 3] ~ m[i - 8] ~ m[i - 14] ~ m[i - 16]) + m[i] = (m[i] << 1) | (m[i] >> 31) + i += 1 + } + + a = ctx.state[0] + b = ctx.state[1] + c = ctx.state[2] + d = ctx.state[3] + e = ctx.state[4] + + for i = 0; i < 20; i += 1 { + t = bits.rotate_left32(a, 5) + ((b & c) ~ (~b & d)) + e + ctx.k[0] + m[i] + e = d + d = c + c = bits.rotate_left32(b, 30) + b = a + a = t + } + for i < 40 { + t = bits.rotate_left32(a, 5) + (b ~ c ~ d) + e + ctx.k[1] + m[i] + e = d + d = c + c = bits.rotate_left32(b, 30) + b = a + a = t + i += 1 + } + for i < 60 { + t = bits.rotate_left32(a, 5) + ((b & c) ~ (b & d) ~ (c & d)) + e + ctx.k[2] + m[i] + e = d + d = c + c = bits.rotate_left32(b, 30) + b = a + a = t + i += 1 + } + for i < 80 { + t = bits.rotate_left32(a, 5) + (b ~ c ~ d) + e + ctx.k[3] + m[i] + e = d + d = c + c = bits.rotate_left32(b, 30) + b = a + a = t + i += 1 + } + + ctx.state[0] += a + ctx.state[1] += b + ctx.state[2] += c + ctx.state[3] += d + ctx.state[4] += e +} diff --git a/core/crypto/md5/md5.odin b/core/crypto/md5/md5.odin deleted file mode 100644 index 69ae087e4..000000000 --- a/core/crypto/md5/md5.odin +++ /dev/null @@ -1,295 +0,0 @@ -package md5 - -/* - Copyright 2021 zhibog - Made available under the BSD-3 license. - - List of contributors: - zhibog, dotbmp: Initial implementation. - - Implementation of the MD5 hashing algorithm, as defined in RFC 1321 -*/ - -import "core:encoding/endian" -import "core:io" -import "core:math/bits" -import "core:mem" -import "core:os" - -/* - High level API -*/ - -DIGEST_SIZE :: 16 - -// hash_string will hash the given input and return the -// computed hash -hash_string :: proc(data: string) -> [DIGEST_SIZE]byte { - return hash_bytes(transmute([]byte)(data)) -} - -// hash_bytes will hash the given input and return the -// computed hash -hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte { - hash: [DIGEST_SIZE]byte - ctx: Context - init(&ctx) - update(&ctx, data) - final(&ctx, hash[:]) - return hash -} - -// hash_string_to_buffer will hash the given input and assign the -// computed hash to the second parameter. -// It requires that the destination buffer is at least as big as the digest size -hash_string_to_buffer :: proc(data: string, hash: []byte) { - hash_bytes_to_buffer(transmute([]byte)(data), hash) -} - -// hash_bytes_to_buffer will hash the given input and write the -// computed hash into the second parameter. -// It requires that the destination buffer is at least as big as the digest size -hash_bytes_to_buffer :: proc(data, hash: []byte) { - ctx: Context - init(&ctx) - update(&ctx, data) - final(&ctx, hash) -} - -// hash_stream will read the stream in chunks and compute a -// hash from its contents -hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) { - hash: [DIGEST_SIZE]byte - ctx: Context - init(&ctx) - - buf := make([]byte, 512) - defer delete(buf) - - read := 1 - for read > 0 { - read, _ = io.read(s, buf) - if read > 0 { - update(&ctx, buf[:read]) - } - } - final(&ctx, hash[:]) - return hash, true -} - -// hash_file will read the file provided by the given handle -// and compute a hash -hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) { - if !load_at_once { - return hash_stream(os.stream_from_handle(hd)) - } else { - if buf, ok := os.read_entire_file(hd); ok { - return hash_bytes(buf[:]), ok - } - } - return [DIGEST_SIZE]byte{}, false -} - -hash :: proc { - hash_stream, - hash_file, - hash_bytes, - hash_string, - hash_bytes_to_buffer, - hash_string_to_buffer, -} - -/* - Low level API -*/ - -init :: proc(ctx: ^Context) { - ctx.state[0] = 0x67452301 - ctx.state[1] = 0xefcdab89 - ctx.state[2] = 0x98badcfe - ctx.state[3] = 0x10325476 - - ctx.bitlen = 0 - ctx.datalen = 0 - - ctx.is_initialized = true -} - -update :: proc(ctx: ^Context, data: []byte) { - assert(ctx.is_initialized) - - for i := 0; i < len(data); i += 1 { - ctx.data[ctx.datalen] = data[i] - ctx.datalen += 1 - if (ctx.datalen == BLOCK_SIZE) { - transform(ctx, ctx.data[:]) - ctx.bitlen += 512 - ctx.datalen = 0 - } - } -} - -final :: proc(ctx: ^Context, hash: []byte) { - assert(ctx.is_initialized) - - if len(hash) < DIGEST_SIZE { - panic("crypto/md5: invalid destination digest size") - } - - i := ctx.datalen - - if ctx.datalen < 56 { - ctx.data[i] = 0x80 - i += 1 - for i < 56 { - ctx.data[i] = 0x00 - i += 1 - } - } else if ctx.datalen >= 56 { - ctx.data[i] = 0x80 - i += 1 - for i < BLOCK_SIZE { - ctx.data[i] = 0x00 - i += 1 - } - transform(ctx, ctx.data[:]) - mem.set(&ctx.data, 0, 56) - } - - ctx.bitlen += u64(ctx.datalen * 8) - endian.unchecked_put_u64le(ctx.data[56:], ctx.bitlen) - transform(ctx, ctx.data[:]) - - for i = 0; i < DIGEST_SIZE / 4; i += 1 { - endian.unchecked_put_u32le(hash[i * 4:], ctx.state[i]) - } - - ctx.is_initialized = false -} - -/* - MD5 implementation -*/ - -BLOCK_SIZE :: 64 - -Context :: struct { - data: [BLOCK_SIZE]byte, - state: [4]u32, - bitlen: u64, - datalen: u32, - - is_initialized: bool, -} - -/* - @note(zh): F, G, H and I, as mentioned in the RFC, have been inlined into FF, GG, HH - and II respectively, instead of declaring them separately. -*/ - -@(private) -FF :: #force_inline proc "contextless" (a, b, c, d, m: u32, s: int, t: u32) -> u32 { - return b + bits.rotate_left32(a + ((b & c) | (~b & d)) + m + t, s) -} - -@(private) -GG :: #force_inline proc "contextless" (a, b, c, d, m: u32, s: int, t: u32) -> u32 { - return b + bits.rotate_left32(a + ((b & d) | (c & ~d)) + m + t, s) -} - -@(private) -HH :: #force_inline proc "contextless" (a, b, c, d, m: u32, s: int, t: u32) -> u32 { - return b + bits.rotate_left32(a + (b ~ c ~ d) + m + t, s) -} - -@(private) -II :: #force_inline proc "contextless" (a, b, c, d, m: u32, s: int, t: u32) -> u32 { - return b + bits.rotate_left32(a + (c ~ (b | ~d)) + m + t, s) -} - -@(private) -transform :: proc "contextless" (ctx: ^Context, data: []byte) { - m: [DIGEST_SIZE]u32 - - for i := 0; i < DIGEST_SIZE; i += 1 { - m[i] = endian.unchecked_get_u32le(data[i * 4:]) - } - - a := ctx.state[0] - b := ctx.state[1] - c := ctx.state[2] - d := ctx.state[3] - - a = FF(a, b, c, d, m[0], 7, 0xd76aa478) - d = FF(d, a, b, c, m[1], 12, 0xe8c7b756) - c = FF(c, d, a, b, m[2], 17, 0x242070db) - b = FF(b, c, d, a, m[3], 22, 0xc1bdceee) - a = FF(a, b, c, d, m[4], 7, 0xf57c0faf) - d = FF(d, a, b, c, m[5], 12, 0x4787c62a) - c = FF(c, d, a, b, m[6], 17, 0xa8304613) - b = FF(b, c, d, a, m[7], 22, 0xfd469501) - a = FF(a, b, c, d, m[8], 7, 0x698098d8) - d = FF(d, a, b, c, m[9], 12, 0x8b44f7af) - c = FF(c, d, a, b, m[10], 17, 0xffff5bb1) - b = FF(b, c, d, a, m[11], 22, 0x895cd7be) - a = FF(a, b, c, d, m[12], 7, 0x6b901122) - d = FF(d, a, b, c, m[13], 12, 0xfd987193) - c = FF(c, d, a, b, m[14], 17, 0xa679438e) - b = FF(b, c, d, a, m[15], 22, 0x49b40821) - - a = GG(a, b, c, d, m[1], 5, 0xf61e2562) - d = GG(d, a, b, c, m[6], 9, 0xc040b340) - c = GG(c, d, a, b, m[11], 14, 0x265e5a51) - b = GG(b, c, d, a, m[0], 20, 0xe9b6c7aa) - a = GG(a, b, c, d, m[5], 5, 0xd62f105d) - d = GG(d, a, b, c, m[10], 9, 0x02441453) - c = GG(c, d, a, b, m[15], 14, 0xd8a1e681) - b = GG(b, c, d, a, m[4], 20, 0xe7d3fbc8) - a = GG(a, b, c, d, m[9], 5, 0x21e1cde6) - d = GG(d, a, b, c, m[14], 9, 0xc33707d6) - c = GG(c, d, a, b, m[3], 14, 0xf4d50d87) - b = GG(b, c, d, a, m[8], 20, 0x455a14ed) - a = GG(a, b, c, d, m[13], 5, 0xa9e3e905) - d = GG(d, a, b, c, m[2], 9, 0xfcefa3f8) - c = GG(c, d, a, b, m[7], 14, 0x676f02d9) - b = GG(b, c, d, a, m[12], 20, 0x8d2a4c8a) - - a = HH(a, b, c, d, m[5], 4, 0xfffa3942) - d = HH(d, a, b, c, m[8], 11, 0x8771f681) - c = HH(c, d, a, b, m[11], 16, 0x6d9d6122) - b = HH(b, c, d, a, m[14], 23, 0xfde5380c) - a = HH(a, b, c, d, m[1], 4, 0xa4beea44) - d = HH(d, a, b, c, m[4], 11, 0x4bdecfa9) - c = HH(c, d, a, b, m[7], 16, 0xf6bb4b60) - b = HH(b, c, d, a, m[10], 23, 0xbebfbc70) - a = HH(a, b, c, d, m[13], 4, 0x289b7ec6) - d = HH(d, a, b, c, m[0], 11, 0xeaa127fa) - c = HH(c, d, a, b, m[3], 16, 0xd4ef3085) - b = HH(b, c, d, a, m[6], 23, 0x04881d05) - a = HH(a, b, c, d, m[9], 4, 0xd9d4d039) - d = HH(d, a, b, c, m[12], 11, 0xe6db99e5) - c = HH(c, d, a, b, m[15], 16, 0x1fa27cf8) - b = HH(b, c, d, a, m[2], 23, 0xc4ac5665) - - a = II(a, b, c, d, m[0], 6, 0xf4292244) - d = II(d, a, b, c, m[7], 10, 0x432aff97) - c = II(c, d, a, b, m[14], 15, 0xab9423a7) - b = II(b, c, d, a, m[5], 21, 0xfc93a039) - a = II(a, b, c, d, m[12], 6, 0x655b59c3) - d = II(d, a, b, c, m[3], 10, 0x8f0ccc92) - c = II(c, d, a, b, m[10], 15, 0xffeff47d) - b = II(b, c, d, a, m[1], 21, 0x85845dd1) - a = II(a, b, c, d, m[8], 6, 0x6fa87e4f) - d = II(d, a, b, c, m[15], 10, 0xfe2ce6e0) - c = II(c, d, a, b, m[6], 15, 0xa3014314) - b = II(b, c, d, a, m[13], 21, 0x4e0811a1) - a = II(a, b, c, d, m[4], 6, 0xf7537e82) - d = II(d, a, b, c, m[11], 10, 0xbd3af235) - c = II(c, d, a, b, m[2], 15, 0x2ad7d2bb) - b = II(b, c, d, a, m[9], 21, 0xeb86d391) - - ctx.state[0] += a - ctx.state[1] += b - ctx.state[2] += c - ctx.state[3] += d -} diff --git a/core/crypto/sha1/sha1.odin b/core/crypto/sha1/sha1.odin deleted file mode 100644 index 6c4407067..000000000 --- a/core/crypto/sha1/sha1.odin +++ /dev/null @@ -1,252 +0,0 @@ -package sha1 - -/* - Copyright 2021 zhibog - Made available under the BSD-3 license. - - List of contributors: - zhibog, dotbmp: Initial implementation. - - Implementation of the SHA1 hashing algorithm, as defined in RFC 3174 -*/ - -import "core:encoding/endian" -import "core:io" -import "core:math/bits" -import "core:mem" -import "core:os" - -/* - High level API -*/ - -DIGEST_SIZE :: 20 - -// hash_string will hash the given input and return the -// computed hash -hash_string :: proc(data: string) -> [DIGEST_SIZE]byte { - return hash_bytes(transmute([]byte)(data)) -} - -// hash_bytes will hash the given input and return the -// computed hash -hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte { - hash: [DIGEST_SIZE]byte - ctx: Context - init(&ctx) - update(&ctx, data) - final(&ctx, hash[:]) - return hash -} - -// hash_string_to_buffer will hash the given input and assign the -// computed hash to the second parameter. -// It requires that the destination buffer is at least as big as the digest size -hash_string_to_buffer :: proc(data: string, hash: []byte) { - hash_bytes_to_buffer(transmute([]byte)(data), hash) -} - -// hash_bytes_to_buffer will hash the given input and write the -// computed hash into the second parameter. -// It requires that the destination buffer is at least as big as the digest size -hash_bytes_to_buffer :: proc(data, hash: []byte) { - ctx: Context - init(&ctx) - update(&ctx, data) - final(&ctx, hash) -} - -// hash_stream will read the stream in chunks and compute a -// hash from its contents -hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) { - hash: [DIGEST_SIZE]byte - ctx: Context - init(&ctx) - - buf := make([]byte, 512) - defer delete(buf) - - read := 1 - for read > 0 { - read, _ = io.read(s, buf) - if read > 0 { - update(&ctx, buf[:read]) - } - } - final(&ctx, hash[:]) - return hash, true -} - -// hash_file will read the file provided by the given handle -// and compute a hash -hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) { - if !load_at_once { - return hash_stream(os.stream_from_handle(hd)) - } else { - if buf, ok := os.read_entire_file(hd); ok { - return hash_bytes(buf[:]), ok - } - } - return [DIGEST_SIZE]byte{}, false -} - -hash :: proc { - hash_stream, - hash_file, - hash_bytes, - hash_string, - hash_bytes_to_buffer, - hash_string_to_buffer, -} - -/* - Low level API -*/ - -init :: proc(ctx: ^Context) { - ctx.state[0] = 0x67452301 - ctx.state[1] = 0xefcdab89 - ctx.state[2] = 0x98badcfe - ctx.state[3] = 0x10325476 - ctx.state[4] = 0xc3d2e1f0 - ctx.k[0] = 0x5a827999 - ctx.k[1] = 0x6ed9eba1 - ctx.k[2] = 0x8f1bbcdc - ctx.k[3] = 0xca62c1d6 - - ctx.datalen = 0 - ctx.bitlen = 0 - - ctx.is_initialized = true -} - -update :: proc(ctx: ^Context, data: []byte) { - assert(ctx.is_initialized) - - for i := 0; i < len(data); i += 1 { - ctx.data[ctx.datalen] = data[i] - ctx.datalen += 1 - if (ctx.datalen == BLOCK_SIZE) { - transform(ctx, ctx.data[:]) - ctx.bitlen += 512 - ctx.datalen = 0 - } - } -} - -final :: proc(ctx: ^Context, hash: []byte) { - assert(ctx.is_initialized) - - if len(hash) < DIGEST_SIZE { - panic("crypto/sha1: invalid destination digest size") - } - - i := ctx.datalen - - if ctx.datalen < 56 { - ctx.data[i] = 0x80 - i += 1 - for i < 56 { - ctx.data[i] = 0x00 - i += 1 - } - } else { - ctx.data[i] = 0x80 - i += 1 - for i < BLOCK_SIZE { - ctx.data[i] = 0x00 - i += 1 - } - transform(ctx, ctx.data[:]) - mem.set(&ctx.data, 0, 56) - } - - ctx.bitlen += u64(ctx.datalen * 8) - endian.unchecked_put_u64be(ctx.data[56:], ctx.bitlen) - transform(ctx, ctx.data[:]) - - for i = 0; i < DIGEST_SIZE / 4; i += 1 { - endian.unchecked_put_u32be(hash[i * 4:], ctx.state[i]) - } - - ctx.is_initialized = false -} - -/* - SHA1 implementation -*/ - -BLOCK_SIZE :: 64 - -Context :: struct { - data: [BLOCK_SIZE]byte, - datalen: u32, - bitlen: u64, - state: [5]u32, - k: [4]u32, - - is_initialized: bool, -} - -@(private) -transform :: proc "contextless" (ctx: ^Context, data: []byte) { - a, b, c, d, e, i, t: u32 - m: [80]u32 - - for i = 0; i < 16; i += 1 { - m[i] = endian.unchecked_get_u32be(data[i * 4:]) - } - for i < 80 { - m[i] = (m[i - 3] ~ m[i - 8] ~ m[i - 14] ~ m[i - 16]) - m[i] = (m[i] << 1) | (m[i] >> 31) - i += 1 - } - - a = ctx.state[0] - b = ctx.state[1] - c = ctx.state[2] - d = ctx.state[3] - e = ctx.state[4] - - for i = 0; i < 20; i += 1 { - t = bits.rotate_left32(a, 5) + ((b & c) ~ (~b & d)) + e + ctx.k[0] + m[i] - e = d - d = c - c = bits.rotate_left32(b, 30) - b = a - a = t - } - for i < 40 { - t = bits.rotate_left32(a, 5) + (b ~ c ~ d) + e + ctx.k[1] + m[i] - e = d - d = c - c = bits.rotate_left32(b, 30) - b = a - a = t - i += 1 - } - for i < 60 { - t = bits.rotate_left32(a, 5) + ((b & c) ~ (b & d) ~ (c & d)) + e + ctx.k[2] + m[i] - e = d - d = c - c = bits.rotate_left32(b, 30) - b = a - a = t - i += 1 - } - for i < 80 { - t = bits.rotate_left32(a, 5) + (b ~ c ~ d) + e + ctx.k[3] + m[i] - e = d - d = c - c = bits.rotate_left32(b, 30) - b = a - a = t - i += 1 - } - - ctx.state[0] += a - ctx.state[1] += b - ctx.state[2] += c - ctx.state[3] += d - ctx.state[4] += e -} diff --git a/examples/all/all_main.odin b/examples/all/all_main.odin index bdbaf0e6c..0872e0550 100644 --- a/examples/all/all_main.odin +++ b/examples/all/all_main.odin @@ -27,10 +27,10 @@ import blake2b "core:crypto/blake2b" import blake2s "core:crypto/blake2s" import chacha20 "core:crypto/chacha20" import chacha20poly1305 "core:crypto/chacha20poly1305" -import keccak "core:crypto/keccak" -import md5 "core:crypto/md5" +import keccak "core:crypto/legacy/keccak" +import md5 "core:crypto/legacy/md5" +import sha1 "core:crypto/legacy/sha1" import poly1305 "core:crypto/poly1305" -import sha1 "core:crypto/sha1" import sha2 "core:crypto/sha2" import sha3 "core:crypto/sha3" import shake "core:crypto/shake" diff --git a/examples/all/all_vendor.odin b/examples/all/all_vendor.odin index 075276c11..0e92c94bb 100644 --- a/examples/all/all_vendor.odin +++ b/examples/all/all_vendor.odin @@ -2,9 +2,9 @@ package all import botan_bindings "vendor:botan/bindings" import botan_blake2b "vendor:botan/blake2b" -import keccak "vendor:botan/keccak" -import md5 "vendor:botan/md5" -import sha1 "vendor:botan/sha1" +import keccak "vendor:botan/legacy/keccak" +import md5 "vendor:botan/legacy/md5" +import sha1 "vendor:botan/legacy/sha1" import sha2 "vendor:botan/sha2" import sha3 "vendor:botan/sha3" import shake "vendor:botan/shake" diff --git a/tests/core/crypto/test_core_crypto.odin b/tests/core/crypto/test_core_crypto.odin index 508af63ad..0e347a702 100644 --- a/tests/core/crypto/test_core_crypto.odin +++ b/tests/core/crypto/test_core_crypto.odin @@ -16,16 +16,16 @@ import "core:testing" import "core:fmt" import "core:strings" -import "core:crypto/md5" -import "core:crypto/sha1" import "core:crypto/sha2" import "core:crypto/sha3" -import "core:crypto/keccak" import "core:crypto/shake" import "core:crypto/blake2b" import "core:crypto/blake2s" import "core:crypto/sm3" import "core:crypto/siphash" +import "core:crypto/legacy/keccak" +import "core:crypto/legacy/md5" +import "core:crypto/legacy/sha1" import "core:os" TEST_count := 0 diff --git a/tests/vendor/botan/test_vendor_botan.odin b/tests/vendor/botan/test_vendor_botan.odin index 661f79790..465589407 100644 --- a/tests/vendor/botan/test_vendor_botan.odin +++ b/tests/vendor/botan/test_vendor_botan.odin @@ -17,11 +17,11 @@ import "core:fmt" import "core:os" import "core:strings" -import "vendor:botan/md5" -import "vendor:botan/sha1" +import "vendor:botan/legacy/md5" +import "vendor:botan/legacy/sha1" import "vendor:botan/sha2" import "vendor:botan/sha3" -import "vendor:botan/keccak" +import "vendor:botan/legacy/keccak" import "vendor:botan/shake" import "vendor:botan/blake2b" import "vendor:botan/sm3" diff --git a/vendor/botan/README.md b/vendor/botan/README.md index 890949158..8dc4e0575 100644 --- a/vendor/botan/README.md +++ b/vendor/botan/README.md @@ -9,13 +9,13 @@ Wrappers for hashing algorithms have been added to match the API within the Odin | Algorithm | | |:-------------------------------------------------------------------------------------------------------------|:-----------------| | [BLAKE2B](https://datatracker.ietf.org/doc/html/rfc7693) | ✔️ | -| [Keccak](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf) | ✔️ | -| [MD5](https://datatracker.ietf.org/doc/html/rfc1321) | ✔️ | -| [SHA-1](https://datatracker.ietf.org/doc/html/rfc3174) | ✔️ | | [SHA-2](https://csrc.nist.gov/csrc/media/publications/fips/180/2/archive/2002-08-01/documents/fips180-2.pdf) | ✔️ | | [SHA-3](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf) | ✔️ | | [SHAKE](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf) | ✔️ | | [SM3](https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02) | ✔️ | +| legacy/[Keccak](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf) | ✔️ | +| legacy/[MD5](https://datatracker.ietf.org/doc/html/rfc1321) | ✔️ | +| legacy/[SHA-1](https://datatracker.ietf.org/doc/html/rfc3174) | ✔️ | #### High level API Each hash algorithm contains a procedure group named `hash`, or if the algorithm provides more than one digest size `hash_`. diff --git a/vendor/botan/keccak/keccak.odin b/vendor/botan/keccak/keccak.odin deleted file mode 100644 index 1d08f427d..000000000 --- a/vendor/botan/keccak/keccak.odin +++ /dev/null @@ -1,118 +0,0 @@ -package vendor_keccak - -/* - Copyright 2021 zhibog - Made available under the BSD-3 license. - - List of contributors: - zhibog, dotbmp: Initial implementation. - - Interface for the Keccak hashing algorithm. - The hash will be computed via bindings to the Botan crypto library -*/ - -import "core:os" -import "core:io" - -import botan "../bindings" - -/* - High level API -*/ - -DIGEST_SIZE_512 :: 64 - -// hash_string_512 will hash the given input and return the -// computed hash -hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte { - return hash_bytes_512(transmute([]byte)(data)) -} - -// hash_bytes_512 will hash the given input and return the -// computed hash -hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte { - hash: [DIGEST_SIZE_512]byte - ctx: Context - init(&ctx) - update(&ctx, data) - final(&ctx, hash[:]) - return hash -} - -// hash_string_to_buffer_512 will hash the given input and assign the -// computed hash to the second parameter. -// It requires that the destination buffer is at least as big as the digest size -hash_string_to_buffer_512 :: proc(data: string, hash: []byte) { - hash_bytes_to_buffer_512(transmute([]byte)(data), hash) -} - -// hash_bytes_to_buffer_512 will hash the given input and write the -// computed hash into the second parameter. -// It requires that the destination buffer is at least as big as the digest size -hash_bytes_to_buffer_512 :: proc(data, hash: []byte) { - assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size") - ctx: Context - init(&ctx) - update(&ctx, data) - final(&ctx, hash[:]) -} - -// hash_stream_512 will read the stream in chunks and compute a -// hash from its contents -hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) { - hash: [DIGEST_SIZE_512]byte - ctx: Context - init(&ctx) - buf := make([]byte, 512) - defer delete(buf) - i := 1 - for i > 0 { - i, _ = io.read(s, buf) - if i > 0 { - update(&ctx, buf[:i]) - } - } - final(&ctx, hash[:]) - return hash, true -} - -// hash_file_512 will read the file provided by the given handle -// and compute a hash -hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) { - if !load_at_once { - return hash_stream_512(os.stream_from_handle(hd)) - } else { - if buf, ok := os.read_entire_file(hd); ok { - return hash_bytes_512(buf[:]), ok - } - } - return [DIGEST_SIZE_512]byte{}, false -} - -hash_512 :: proc { - hash_stream_512, - hash_file_512, - hash_bytes_512, - hash_string_512, - hash_bytes_to_buffer_512, - hash_string_to_buffer_512, -} - -/* - Low level API -*/ - -Context :: botan.hash_t - -init :: proc "contextless" (ctx: ^Context) { - botan.hash_init(ctx, botan.HASH_KECCAK_512, 0) -} - -update :: proc "contextless" (ctx: ^Context, data: []byte) { - botan.hash_update(ctx^, len(data) == 0 ? nil : &data[0], uint(len(data))) -} - -final :: proc "contextless" (ctx: ^Context, hash: []byte) { - botan.hash_final(ctx^, &hash[0]) - botan.hash_destroy(ctx^) -} diff --git a/vendor/botan/legacy/README.md b/vendor/botan/legacy/README.md new file mode 100644 index 000000000..e1ba6f54b --- /dev/null +++ b/vendor/botan/legacy/README.md @@ -0,0 +1,10 @@ +# crypto/legacy + +These are algorithms that are shipped solely for the purpose of +interoperability with legacy systems. The use of these packages in +any other capacity is discouraged, especially those that are known +to be broken. + +- keccak - The draft version of the algorithm that became SHA-3 +- MD5 - Broken (https://eprint.iacr.org/2005/075) +- SHA-1 - Broken (https://eprint.iacr.org/2017/190) diff --git a/vendor/botan/legacy/keccak/keccak.odin b/vendor/botan/legacy/keccak/keccak.odin new file mode 100644 index 000000000..02f05378c --- /dev/null +++ b/vendor/botan/legacy/keccak/keccak.odin @@ -0,0 +1,118 @@ +package vendor_keccak + +/* + Copyright 2021 zhibog + Made available under the BSD-3 license. + + List of contributors: + zhibog, dotbmp: Initial implementation. + + Interface for the Keccak hashing algorithm. + The hash will be computed via bindings to the Botan crypto library +*/ + +import "core:os" +import "core:io" + +import botan "../../bindings" + +/* + High level API +*/ + +DIGEST_SIZE_512 :: 64 + +// hash_string_512 will hash the given input and return the +// computed hash +hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte { + return hash_bytes_512(transmute([]byte)(data)) +} + +// hash_bytes_512 will hash the given input and return the +// computed hash +hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte { + hash: [DIGEST_SIZE_512]byte + ctx: Context + init(&ctx) + update(&ctx, data) + final(&ctx, hash[:]) + return hash +} + +// hash_string_to_buffer_512 will hash the given input and assign the +// computed hash to the second parameter. +// It requires that the destination buffer is at least as big as the digest size +hash_string_to_buffer_512 :: proc(data: string, hash: []byte) { + hash_bytes_to_buffer_512(transmute([]byte)(data), hash) +} + +// hash_bytes_to_buffer_512 will hash the given input and write the +// computed hash into the second parameter. +// It requires that the destination buffer is at least as big as the digest size +hash_bytes_to_buffer_512 :: proc(data, hash: []byte) { + assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size") + ctx: Context + init(&ctx) + update(&ctx, data) + final(&ctx, hash[:]) +} + +// hash_stream_512 will read the stream in chunks and compute a +// hash from its contents +hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) { + hash: [DIGEST_SIZE_512]byte + ctx: Context + init(&ctx) + buf := make([]byte, 512) + defer delete(buf) + i := 1 + for i > 0 { + i, _ = io.read(s, buf) + if i > 0 { + update(&ctx, buf[:i]) + } + } + final(&ctx, hash[:]) + return hash, true +} + +// hash_file_512 will read the file provided by the given handle +// and compute a hash +hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) { + if !load_at_once { + return hash_stream_512(os.stream_from_handle(hd)) + } else { + if buf, ok := os.read_entire_file(hd); ok { + return hash_bytes_512(buf[:]), ok + } + } + return [DIGEST_SIZE_512]byte{}, false +} + +hash_512 :: proc { + hash_stream_512, + hash_file_512, + hash_bytes_512, + hash_string_512, + hash_bytes_to_buffer_512, + hash_string_to_buffer_512, +} + +/* + Low level API +*/ + +Context :: botan.hash_t + +init :: proc "contextless" (ctx: ^Context) { + botan.hash_init(ctx, botan.HASH_KECCAK_512, 0) +} + +update :: proc "contextless" (ctx: ^Context, data: []byte) { + botan.hash_update(ctx^, len(data) == 0 ? nil : &data[0], uint(len(data))) +} + +final :: proc "contextless" (ctx: ^Context, hash: []byte) { + botan.hash_final(ctx^, &hash[0]) + botan.hash_destroy(ctx^) +} diff --git a/vendor/botan/legacy/md5/md5.odin b/vendor/botan/legacy/md5/md5.odin new file mode 100644 index 000000000..7071a9234 --- /dev/null +++ b/vendor/botan/legacy/md5/md5.odin @@ -0,0 +1,118 @@ +package vendor_md5 + +/* + Copyright 2021 zhibog + Made available under the BSD-3 license. + + List of contributors: + zhibog: Initial implementation. + + Interface for the MD5 hashing algorithm. + The hash will be computed via bindings to the Botan crypto library +*/ + +import "core:os" +import "core:io" + +import botan "../../bindings" + +/* + High level API +*/ + +DIGEST_SIZE :: 16 + +// hash_string will hash the given input and return the +// computed hash +hash_string :: proc "contextless" (data: string) -> [DIGEST_SIZE]byte { + return hash_bytes(transmute([]byte)(data)) +} + +// hash_bytes will hash the given input and return the +// computed hash +hash_bytes :: proc "contextless" (data: []byte) -> [DIGEST_SIZE]byte { + hash: [DIGEST_SIZE]byte + ctx: Context + init(&ctx) + update(&ctx, data) + final(&ctx, hash[:]) + return hash +} + +// hash_string_to_buffer will hash the given input and assign the +// computed hash to the second parameter. +// It requires that the destination buffer is at least as big as the digest size +hash_string_to_buffer :: proc(data: string, hash: []byte) { + hash_bytes_to_buffer(transmute([]byte)(data), hash) +} + +// hash_bytes_to_buffer will hash the given input and write the +// computed hash into the second parameter. +// It requires that the destination buffer is at least as big as the digest size +hash_bytes_to_buffer :: proc(data, hash: []byte) { + assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size") + ctx: Context + init(&ctx) + update(&ctx, data) + final(&ctx, hash[:]) +} + +// hash_stream will read the stream in chunks and compute a +// hash from its contents +hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) { + hash: [DIGEST_SIZE]byte + ctx: Context + init(&ctx) + buf := make([]byte, 512) + defer delete(buf) + i := 1 + for i > 0 { + i, _ = io.read(s, buf) + if i > 0 { + update(&ctx, buf[:i]) + } + } + final(&ctx, hash[:]) + return hash, true +} + +// hash_file will read the file provided by the given handle +// and compute a hash +hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) { + if !load_at_once { + return hash_stream(os.stream_from_handle(hd)) + } else { + if buf, ok := os.read_entire_file(hd); ok { + return hash_bytes(buf[:]), ok + } + } + return [DIGEST_SIZE]byte{}, false +} + +hash :: proc { + hash_stream, + hash_file, + hash_bytes, + hash_string, + hash_bytes_to_buffer, + hash_string_to_buffer, +} + +/* + Low level API +*/ + +Context :: botan.hash_t + +init :: proc "contextless" (ctx: ^Context) { + botan.hash_init(ctx, botan.HASH_MD5, 0) +} + +update :: proc "contextless" (ctx: ^Context, data: []byte) { + botan.hash_update(ctx^, len(data) == 0 ? nil : &data[0], uint(len(data))) +} + +final :: proc "contextless" (ctx: ^Context, hash: []byte) { + botan.hash_final(ctx^, &hash[0]) + botan.hash_destroy(ctx^) +} diff --git a/vendor/botan/legacy/sha1/sha1.odin b/vendor/botan/legacy/sha1/sha1.odin new file mode 100644 index 000000000..0fc79d6cc --- /dev/null +++ b/vendor/botan/legacy/sha1/sha1.odin @@ -0,0 +1,118 @@ +package vendor_sha1 + +/* + Copyright 2021 zhibog + Made available under the BSD-3 license. + + List of contributors: + zhibog: Initial implementation. + + Interface for the SHA-1 hashing algorithm. + The hash will be computed via bindings to the Botan crypto library +*/ + +import "core:os" +import "core:io" + +import botan "../../bindings" + +/* + High level API +*/ + +DIGEST_SIZE :: 20 + +// hash_string will hash the given input and return the +// computed hash +hash_string :: proc "contextless" (data: string) -> [DIGEST_SIZE]byte { + return hash_bytes(transmute([]byte)(data)) +} + +// hash_bytes will hash the given input and return the +// computed hash +hash_bytes :: proc "contextless" (data: []byte) -> [DIGEST_SIZE]byte { + hash: [DIGEST_SIZE]byte + ctx: Context + init(&ctx) + update(&ctx, data) + final(&ctx, hash[:]) + return hash +} + +// hash_string_to_buffer will hash the given input and assign the +// computed hash to the second parameter. +// It requires that the destination buffer is at least as big as the digest size +hash_string_to_buffer :: proc(data: string, hash: []byte) { + hash_bytes_to_buffer(transmute([]byte)(data), hash) +} + +// hash_bytes_to_buffer will hash the given input and write the +// computed hash into the second parameter. +// It requires that the destination buffer is at least as big as the digest size +hash_bytes_to_buffer :: proc(data, hash: []byte) { + assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size") + ctx: Context + init(&ctx) + update(&ctx, data) + final(&ctx, hash[:]) +} + +// hash_stream will read the stream in chunks and compute a +// hash from its contents +hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) { + hash: [DIGEST_SIZE]byte + ctx: Context + init(&ctx) + buf := make([]byte, 512) + defer delete(buf) + i := 1 + for i > 0 { + i, _ = io.read(s, buf) + if i > 0 { + update(&ctx, buf[:i]) + } + } + final(&ctx, hash[:]) + return hash, true +} + +// hash_file will read the file provided by the given handle +// and compute a hash +hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) { + if !load_at_once { + return hash_stream(os.stream_from_handle(hd)) + } else { + if buf, ok := os.read_entire_file(hd); ok { + return hash_bytes(buf[:]), ok + } + } + return [DIGEST_SIZE]byte{}, false +} + +hash :: proc { + hash_stream, + hash_file, + hash_bytes, + hash_string, + hash_bytes_to_buffer, + hash_string_to_buffer, +} + +/* + Low level API +*/ + +Context :: botan.hash_t + +init :: proc "contextless" (ctx: ^Context) { + botan.hash_init(ctx, botan.HASH_SHA1, 0) +} + +update :: proc "contextless" (ctx: ^Context, data: []byte) { + botan.hash_update(ctx^, len(data) == 0 ? nil : &data[0], uint(len(data))) +} + +final :: proc "contextless" (ctx: ^Context, hash: []byte) { + botan.hash_final(ctx^, &hash[0]) + botan.hash_destroy(ctx^) +} diff --git a/vendor/botan/md5/md5.odin b/vendor/botan/md5/md5.odin deleted file mode 100644 index c2ed7c45c..000000000 --- a/vendor/botan/md5/md5.odin +++ /dev/null @@ -1,118 +0,0 @@ -package vendor_md5 - -/* - Copyright 2021 zhibog - Made available under the BSD-3 license. - - List of contributors: - zhibog: Initial implementation. - - Interface for the MD5 hashing algorithm. - The hash will be computed via bindings to the Botan crypto library -*/ - -import "core:os" -import "core:io" - -import botan "../bindings" - -/* - High level API -*/ - -DIGEST_SIZE :: 16 - -// hash_string will hash the given input and return the -// computed hash -hash_string :: proc "contextless" (data: string) -> [DIGEST_SIZE]byte { - return hash_bytes(transmute([]byte)(data)) -} - -// hash_bytes will hash the given input and return the -// computed hash -hash_bytes :: proc "contextless" (data: []byte) -> [DIGEST_SIZE]byte { - hash: [DIGEST_SIZE]byte - ctx: Context - init(&ctx) - update(&ctx, data) - final(&ctx, hash[:]) - return hash -} - -// hash_string_to_buffer will hash the given input and assign the -// computed hash to the second parameter. -// It requires that the destination buffer is at least as big as the digest size -hash_string_to_buffer :: proc(data: string, hash: []byte) { - hash_bytes_to_buffer(transmute([]byte)(data), hash) -} - -// hash_bytes_to_buffer will hash the given input and write the -// computed hash into the second parameter. -// It requires that the destination buffer is at least as big as the digest size -hash_bytes_to_buffer :: proc(data, hash: []byte) { - assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size") - ctx: Context - init(&ctx) - update(&ctx, data) - final(&ctx, hash[:]) -} - -// hash_stream will read the stream in chunks and compute a -// hash from its contents -hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) { - hash: [DIGEST_SIZE]byte - ctx: Context - init(&ctx) - buf := make([]byte, 512) - defer delete(buf) - i := 1 - for i > 0 { - i, _ = io.read(s, buf) - if i > 0 { - update(&ctx, buf[:i]) - } - } - final(&ctx, hash[:]) - return hash, true -} - -// hash_file will read the file provided by the given handle -// and compute a hash -hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) { - if !load_at_once { - return hash_stream(os.stream_from_handle(hd)) - } else { - if buf, ok := os.read_entire_file(hd); ok { - return hash_bytes(buf[:]), ok - } - } - return [DIGEST_SIZE]byte{}, false -} - -hash :: proc { - hash_stream, - hash_file, - hash_bytes, - hash_string, - hash_bytes_to_buffer, - hash_string_to_buffer, -} - -/* - Low level API -*/ - -Context :: botan.hash_t - -init :: proc "contextless" (ctx: ^Context) { - botan.hash_init(ctx, botan.HASH_MD5, 0) -} - -update :: proc "contextless" (ctx: ^Context, data: []byte) { - botan.hash_update(ctx^, len(data) == 0 ? nil : &data[0], uint(len(data))) -} - -final :: proc "contextless" (ctx: ^Context, hash: []byte) { - botan.hash_final(ctx^, &hash[0]) - botan.hash_destroy(ctx^) -} diff --git a/vendor/botan/sha1/sha1.odin b/vendor/botan/sha1/sha1.odin deleted file mode 100644 index 929f4c0be..000000000 --- a/vendor/botan/sha1/sha1.odin +++ /dev/null @@ -1,118 +0,0 @@ -package vendor_sha1 - -/* - Copyright 2021 zhibog - Made available under the BSD-3 license. - - List of contributors: - zhibog: Initial implementation. - - Interface for the SHA-1 hashing algorithm. - The hash will be computed via bindings to the Botan crypto library -*/ - -import "core:os" -import "core:io" - -import botan "../bindings" - -/* - High level API -*/ - -DIGEST_SIZE :: 20 - -// hash_string will hash the given input and return the -// computed hash -hash_string :: proc "contextless" (data: string) -> [DIGEST_SIZE]byte { - return hash_bytes(transmute([]byte)(data)) -} - -// hash_bytes will hash the given input and return the -// computed hash -hash_bytes :: proc "contextless" (data: []byte) -> [DIGEST_SIZE]byte { - hash: [DIGEST_SIZE]byte - ctx: Context - init(&ctx) - update(&ctx, data) - final(&ctx, hash[:]) - return hash -} - -// hash_string_to_buffer will hash the given input and assign the -// computed hash to the second parameter. -// It requires that the destination buffer is at least as big as the digest size -hash_string_to_buffer :: proc(data: string, hash: []byte) { - hash_bytes_to_buffer(transmute([]byte)(data), hash) -} - -// hash_bytes_to_buffer will hash the given input and write the -// computed hash into the second parameter. -// It requires that the destination buffer is at least as big as the digest size -hash_bytes_to_buffer :: proc(data, hash: []byte) { - assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size") - ctx: Context - init(&ctx) - update(&ctx, data) - final(&ctx, hash[:]) -} - -// hash_stream will read the stream in chunks and compute a -// hash from its contents -hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) { - hash: [DIGEST_SIZE]byte - ctx: Context - init(&ctx) - buf := make([]byte, 512) - defer delete(buf) - i := 1 - for i > 0 { - i, _ = io.read(s, buf) - if i > 0 { - update(&ctx, buf[:i]) - } - } - final(&ctx, hash[:]) - return hash, true -} - -// hash_file will read the file provided by the given handle -// and compute a hash -hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) { - if !load_at_once { - return hash_stream(os.stream_from_handle(hd)) - } else { - if buf, ok := os.read_entire_file(hd); ok { - return hash_bytes(buf[:]), ok - } - } - return [DIGEST_SIZE]byte{}, false -} - -hash :: proc { - hash_stream, - hash_file, - hash_bytes, - hash_string, - hash_bytes_to_buffer, - hash_string_to_buffer, -} - -/* - Low level API -*/ - -Context :: botan.hash_t - -init :: proc "contextless" (ctx: ^Context) { - botan.hash_init(ctx, botan.HASH_SHA1, 0) -} - -update :: proc "contextless" (ctx: ^Context, data: []byte) { - botan.hash_update(ctx^, len(data) == 0 ? nil : &data[0], uint(len(data))) -} - -final :: proc "contextless" (ctx: ^Context, hash: []byte) { - botan.hash_final(ctx^, &hash[0]) - botan.hash_destroy(ctx^) -} -- cgit v1.2.3