aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/ci.yml233
-rw-r--r--.github/workflows/nightly.yml41
-rw-r--r--base/builtin/builtin.odin2
-rw-r--r--base/intrinsics/intrinsics.odin4
-rw-r--r--base/runtime/core.odin40
-rw-r--r--base/runtime/core_builtin.odin10
-rw-r--r--base/runtime/default_allocators_general.odin2
-rw-r--r--base/runtime/docs.odin2
-rw-r--r--base/runtime/entry_wasm.odin36
-rw-r--r--base/runtime/error_checks.odin4
-rw-r--r--base/runtime/heap_allocator_orca.odin29
-rw-r--r--base/runtime/heap_allocator_other.odin2
-rw-r--r--base/runtime/internal.odin4
-rw-r--r--base/runtime/os_specific_orca.odin43
-rw-r--r--base/runtime/procs.odin26
-rw-r--r--base/runtime/wasm_allocator.odin28
-rwxr-xr-xbuild_odin.sh4
-rw-r--r--ci/create_nightly_json.py51
-rw-r--r--ci/delete_old_binaries.py34
-rw-r--r--ci/nightly.py140
-rwxr-xr-xci/upload_create_nightly.sh25
-rw-r--r--core/bufio/reader.odin4
-rw-r--r--core/bytes/buffer.odin70
-rw-r--r--core/c/libc/signal.odin2
-rw-r--r--core/c/libc/stdio.odin14
-rw-r--r--core/crypto/_aes/aes.odin28
-rw-r--r--core/crypto/_aes/ct64/api.odin96
-rw-r--r--core/crypto/_aes/ct64/ct64.odin265
-rw-r--r--core/crypto/_aes/ct64/ct64_dec.odin135
-rw-r--r--core/crypto/_aes/ct64/ct64_enc.odin95
-rw-r--r--core/crypto/_aes/ct64/ct64_keysched.odin179
-rw-r--r--core/crypto/_aes/ct64/ghash.odin136
-rw-r--r--core/crypto/_aes/ct64/helpers.odin75
-rw-r--r--core/crypto/aes/aes.odin22
-rw-r--r--core/crypto/aes/aes_ctr.odin199
-rw-r--r--core/crypto/aes/aes_ecb.odin57
-rw-r--r--core/crypto/aes/aes_gcm.odin253
-rw-r--r--core/crypto/aes/aes_impl.odin41
-rw-r--r--core/crypto/aes/aes_impl_hw_gen.odin43
-rw-r--r--core/crypto/rand_darwin.odin4
-rw-r--r--core/crypto/rand_linux.odin2
-rw-r--r--core/crypto/rand_windows.odin20
-rw-r--r--core/encoding/ansi/ansi.odin137
-rw-r--r--core/encoding/ansi/doc.odin20
-rw-r--r--core/encoding/cbor/cbor.odin4
-rw-r--r--core/encoding/cbor/coding.odin107
-rw-r--r--core/encoding/cbor/marshal.odin16
-rw-r--r--core/encoding/cbor/unmarshal.odin102
-rw-r--r--core/encoding/entity/entity.odin123
-rw-r--r--core/encoding/hex/hex.odin11
-rw-r--r--core/encoding/hxa/hxa.odin31
-rw-r--r--core/encoding/hxa/read.odin42
-rw-r--r--core/encoding/ini/ini.odin189
-rw-r--r--core/encoding/json/marshal.odin4
-rw-r--r--core/encoding/json/parser.odin65
-rw-r--r--core/encoding/json/types.odin14
-rw-r--r--core/encoding/xml/tokenizer.odin43
-rw-r--r--core/encoding/xml/xml_reader.odin24
-rw-r--r--core/fmt/fmt.odin26
-rw-r--r--core/fmt/fmt_os.odin1
-rw-r--r--core/image/bmp/bmp.odin746
-rw-r--r--core/image/bmp/bmp_js.odin4
-rw-r--r--core/image/bmp/bmp_os.odin34
-rw-r--r--core/image/common.odin162
-rw-r--r--core/image/png/png.odin66
-rw-r--r--core/log/file_console_logger.odin41
-rw-r--r--core/log/multi_logger.odin5
-rw-r--r--core/math/big/combinatorics.odin60
-rw-r--r--core/math/big/prime.odin3
-rw-r--r--core/math/big/radix.odin1
-rw-r--r--core/math/cmplx/cmplx_trig.odin2
-rw-r--r--core/math/linalg/general.odin25
-rw-r--r--core/math/linalg/specific.odin6
-rw-r--r--core/math/linalg/specific_euler_angles_f16.odin2
-rw-r--r--core/math/linalg/specific_euler_angles_f32.odin2
-rw-r--r--core/math/linalg/specific_euler_angles_f64.odin2
-rw-r--r--core/math/math.odin18
-rw-r--r--core/math/math_gamma.odin6
-rw-r--r--core/math/math_lgamma.odin14
-rw-r--r--core/math/math_sincos.odin2
-rw-r--r--core/math/rand/exp.odin6
-rw-r--r--core/math/rand/normal.odin6
-rw-r--r--core/mem/raw.odin15
-rw-r--r--core/mem/rollback_stack_allocator.odin341
-rw-r--r--core/mem/tlsf/LICENSE36
-rw-r--r--core/mem/tlsf/tlsf.odin156
-rw-r--r--core/mem/tlsf/tlsf_internal.odin738
-rw-r--r--core/mem/tracking_allocator.odin14
-rw-r--r--core/odin/ast/ast.odin4
-rw-r--r--core/odin/ast/clone.odin2
-rw-r--r--core/odin/ast/walk.odin1
-rw-r--r--core/odin/parser/parser.odin47
-rw-r--r--core/os/dir_windows.odin6
-rw-r--r--core/os/os2/internal_util.odin2
-rw-r--r--core/os/os_darwin.odin2
-rw-r--r--core/os/os_freebsd.odin61
-rw-r--r--core/os/os_netbsd.odin8
-rw-r--r--core/path/filepath/path_unix.odin2
-rw-r--r--core/simd/x86/aes.odin49
-rw-r--r--core/slice/permute.odin105
-rw-r--r--core/strconv/generic_float.odin2
-rw-r--r--core/strconv/strconv.odin292
-rw-r--r--core/strings/builder.odin8
-rw-r--r--core/sync/futex_darwin.odin2
-rw-r--r--core/sync/primitives_netbsd.odin8
-rw-r--r--core/sys/info/platform_darwin.odin1
-rw-r--r--core/sys/linux/sys.odin5
-rw-r--r--core/sys/linux/types.odin9
-rw-r--r--core/sys/unix/pthread_freebsd.odin4
-rw-r--r--core/sys/unix/pthread_openbsd.odin4
-rw-r--r--core/sys/unix/pthread_unix.odin1
-rwxr-xr-x[-rw-r--r--]core/sys/windows/kernel32.odin17
-rw-r--r--core/testing/events.odin48
-rw-r--r--core/testing/logging.odin71
-rw-r--r--core/testing/reporting.odin329
-rw-r--r--core/testing/runner.odin822
-rw-r--r--core/testing/runner_other.odin14
-rw-r--r--core/testing/runner_windows.odin235
-rw-r--r--core/testing/signal_handler.odin33
-rw-r--r--core/testing/signal_handler_libc.odin149
-rw-r--r--core/testing/signal_handler_other.odin19
-rw-r--r--core/testing/testing.odin69
-rw-r--r--core/text/i18n/qt_linguist.odin2
-rw-r--r--core/thread/thread_pool.odin133
-rw-r--r--core/thread/thread_unix.odin23
-rw-r--r--core/time/datetime/datetime.odin4
-rw-r--r--core/time/iso8601.odin (renamed from core/time/iso8061.odin)0
-rw-r--r--core/time/time.odin1
-rw-r--r--core/time/time_orca.odin24
-rw-r--r--core/unicode/tables.odin10
-rw-r--r--examples/all/all_main.odin2
-rw-r--r--src/bug_report.cpp1
-rw-r--r--src/build_settings.cpp64
-rw-r--r--src/check_builtin.cpp343
-rw-r--r--src/check_decl.cpp67
-rw-r--r--src/check_expr.cpp65
-rw-r--r--src/check_stmt.cpp30
-rw-r--r--src/check_type.cpp28
-rw-r--r--src/checker.cpp252
-rw-r--r--src/checker.hpp24
-rw-r--r--src/checker_builtin_procs.hpp4
-rw-r--r--src/entity.cpp5
-rw-r--r--src/error.cpp69
-rw-r--r--src/gb/gb.h3
-rw-r--r--src/linker.cpp34
-rw-r--r--src/llvm_abi.cpp10
-rw-r--r--src/llvm_backend.cpp264
-rw-r--r--src/llvm_backend.hpp3
-rw-r--r--src/llvm_backend_debug.cpp53
-rw-r--r--src/llvm_backend_expr.cpp55
-rw-r--r--src/llvm_backend_general.cpp7
-rw-r--r--src/llvm_backend_proc.cpp15
-rw-r--r--src/llvm_backend_stmt.cpp4
-rw-r--r--src/llvm_backend_utility.cpp2
-rw-r--r--src/main.cpp187
-rw-r--r--src/parser.cpp94
-rw-r--r--src/parser.hpp4
-rw-r--r--src/parser_pos.cpp2
-rw-r--r--src/threading.cpp2
-rw-r--r--src/types.cpp19
-rw-r--r--tests/benchmark/all.odin4
-rw-r--r--tests/benchmark/crypto/benchmark_crypto.odin356
-rw-r--r--tests/benchmark/hash/benchmark_hash.odin218
-rw-r--r--tests/common/common.odin81
-rw-r--r--tests/core/.gitignore1
-rw-r--r--tests/core/Makefile106
-rw-r--r--tests/core/assets/XML/attribute-whitespace.xml8
-rw-r--r--tests/core/build.bat110
-rw-r--r--tests/core/c/libc/test_core_libc.odin36
-rw-r--r--tests/core/c/libc/test_core_libc_complex_pow.odin15
-rw-r--r--tests/core/compress/test_core_compress.odin103
-rw-r--r--tests/core/container/test_core_avl.odin73
-rw-r--r--tests/core/container/test_core_container.odin26
-rw-r--r--tests/core/container/test_core_rbtree.odin90
-rw-r--r--tests/core/container/test_core_small_array.odin69
-rw-r--r--tests/core/crypto/test_core_crypto.odin95
-rw-r--r--tests/core/crypto/test_core_crypto_aes.odin446
-rw-r--r--tests/core/crypto/test_core_crypto_ecc25519.odin293
-rw-r--r--tests/core/crypto/test_core_crypto_hash.odin98
-rw-r--r--tests/core/crypto/test_core_crypto_kdf.odin54
-rw-r--r--tests/core/crypto/test_core_crypto_mac.odin80
-rw-r--r--tests/core/crypto/test_core_crypto_sha3_variants.odin96
-rw-r--r--tests/core/crypto/test_crypto_benchmark.odin301
-rw-r--r--tests/core/download_assets.py93
-rw-r--r--tests/core/encoding/base64/base64.odin65
-rw-r--r--tests/core/encoding/cbor/test_core_cbor.odin341
-rw-r--r--tests/core/encoding/hex/test_core_hex.odin94
-rw-r--r--tests/core/encoding/hxa/test_core_hxa.odin202
-rw-r--r--tests/core/encoding/json/test_core_json.odin88
-rw-r--r--tests/core/encoding/varint/test_core_varint.odin84
-rw-r--r--tests/core/encoding/xml/test_core_xml.odin248
-rw-r--r--tests/core/fmt/test_core_fmt.odin164
-rw-r--r--tests/core/hash/test_core_hash.odin308
-rw-r--r--tests/core/hash/test_vectors_xxhash.odin6
-rw-r--r--tests/core/image/build.bat4
-rw-r--r--tests/core/image/test_core_image.odin901
-rw-r--r--tests/core/math/big/build.bat2
-rw-r--r--tests/core/math/big/test.odin2
-rw-r--r--tests/core/math/big/test_core_math_big.odin37
-rw-r--r--tests/core/math/linalg/glsl/test_linalg_glsl_math.odin20
-rw-r--r--tests/core/math/noise/test_core_math_noise.odin195
-rw-r--r--tests/core/math/test_core_math.odin258
-rw-r--r--tests/core/mem/test_core_mem.odin41
-rw-r--r--tests/core/net/test_core_net.odin277
-rw-r--r--tests/core/normal.odin39
-rw-r--r--tests/core/odin/test_parser.odin53
-rw-r--r--tests/core/os/test_core_os_exit.odin10
-rw-r--r--tests/core/path/filepath/test_core_filepath.odin73
-rw-r--r--tests/core/reflect/test_core_reflect.odin87
-rw-r--r--tests/core/runtime/test_core_runtime.odin43
-rw-r--r--tests/core/slice/test_core_slice.odin164
-rw-r--r--tests/core/speed.odin6
-rw-r--r--tests/core/strconv/test_core_strconv.odin145
-rw-r--r--tests/core/strings/test_core_strings.odin86
-rw-r--r--tests/core/text/i18n/test_core_text_i18n.odin167
-rw-r--r--tests/core/text/match/test_core_text_match.odin159
-rw-r--r--tests/core/thread/test_core_thread.odin56
-rw-r--r--tests/core/time/test_core_time.odin170
-rw-r--r--tests/internal/Makefile23
-rw-r--r--tests/internal/build.bat10
-rw-r--r--tests/internal/test_128.odin52
-rw-r--r--tests/internal/test_asan.odin47
-rw-r--r--tests/internal/test_map.odin162
-rw-r--r--tests/internal/test_pow.odin48
-rw-r--r--tests/internal/test_rtti.odin71
-rw-r--r--tests/internal/test_string_compare.odin62
-rw-r--r--tests/issues/run.bat19
-rwxr-xr-xtests/issues/run.sh26
-rw-r--r--tests/issues/test_issue_1592.odin293
-rw-r--r--tests/issues/test_issue_2056.odin5
-rw-r--r--tests/issues/test_issue_2087.odin62
-rw-r--r--tests/issues/test_issue_2395.odin2
-rw-r--r--tests/issues/test_issue_2466.odin5
-rw-r--r--tests/issues/test_issue_829.odin7
-rw-r--r--tests/vendor/Makefile10
-rw-r--r--tests/vendor/all.odin3
-rw-r--r--tests/vendor/build.bat8
-rw-r--r--tests/vendor/glfw/test_vendor_glfw.odin45
-rw-r--r--vendor/cgltf/cgltf.odin22
-rw-r--r--vendor/darwin/Metal/MetalClasses.odin8
-rw-r--r--vendor/directx/d3d11/d3d11.odin2
-rw-r--r--vendor/egl/egl.odin2
-rw-r--r--vendor/microui/microui.odin13
-rw-r--r--vendor/miniaudio/common.odin12
-rw-r--r--vendor/miniaudio/data_conversion.odin6
-rw-r--r--vendor/miniaudio/decoding.odin6
-rw-r--r--vendor/miniaudio/device_io_procs.odin6
-rw-r--r--vendor/miniaudio/effects.odin6
-rw-r--r--vendor/miniaudio/encoding.odin6
-rw-r--r--vendor/miniaudio/engine.odin6
-rw-r--r--vendor/miniaudio/filtering.odin6
-rw-r--r--vendor/miniaudio/generation.odin6
-rw-r--r--vendor/miniaudio/job_queue.odin6
-rw-r--r--vendor/miniaudio/logging.odin6
-rw-r--r--vendor/miniaudio/node_graph.odin6
-rw-r--r--vendor/miniaudio/resource_manager.odin6
-rw-r--r--vendor/miniaudio/synchronization.odin6
-rw-r--r--vendor/miniaudio/utilities.odin6
-rw-r--r--vendor/miniaudio/vfs.odin6
-rw-r--r--vendor/raylib/raygui.odin60
-rw-r--r--vendor/raylib/raylib.odin106
-rw-r--r--vendor/raylib/raymath.odin2
-rw-r--r--vendor/raylib/rlgl.odin564
-rw-r--r--vendor/raylib/rlgl/rlgl.odin581
-rw-r--r--vendor/sdl2/sdl_render.odin2
-rw-r--r--vendor/stb/image/stb_image.odin25
-rw-r--r--vendor/stb/image/stb_image_resize.odin23
-rw-r--r--vendor/stb/image/stb_image_write.odin21
-rw-r--r--vendor/stb/rect_pack/stb_rect_pack.odin21
-rw-r--r--vendor/stb/src/Makefile2
-rw-r--r--vendor/stb/truetype/stb_truetype.odin22
-rw-r--r--vendor/stb/vorbis/stb_vorbis.odin24
-rw-r--r--vendor/wasm/js/runtime.js104
-rw-r--r--vendor/x11/xlib/xlib_const.odin5
-rw-r--r--vendor/x11/xlib/xlib_procs.odin1413
275 files changed, 14248 insertions, 7595 deletions
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index ffb2077d1..c9c453331 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -10,7 +10,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Build, Check, and Test
- timeout-minutes: 25
+ timeout-minutes: 15
uses: vmactions/netbsd-vm@v1
with:
release: "10.0"
@@ -24,157 +24,135 @@ jobs:
/usr/sbin/pkg_add https://github.com/andreas-jonsson/llvm17-netbsd-bin/releases/download/pkgsrc-current/llvm-17.0.6.tgz
/usr/sbin/pkg_add https://github.com/andreas-jonsson/llvm17-netbsd-bin/releases/download/pkgsrc-current/clang-17.0.6.tgz
ln -s /usr/pkg/bin/python3.11 /usr/bin/python3
- ln -s /usr/pkg/bin/bash /bin/bash
run: |
git config --global --add safe.directory $(pwd)
gmake release
./odin version
./odin report
+ gmake -C vendor/stb/src
+ gmake -C vendor/cgltf/src
+ gmake -C vendor/miniaudio/src
./odin check examples/all -vet -strict-style -target:netbsd_amd64
- (cd tests/core; gmake all_bsd)
- (cd tests/internal; gmake all_bsd)
+ ./odin check examples/all -vet -strict-style -target:netbsd_arm64
+ ./odin test tests/core/normal.odin -file -all-packages -define:ODIN_TEST_FANCY=false
+ ./odin test tests/core/speed.odin -file -all-packages -o:speed -define:ODIN_TEST_FANCY=false
+ ./odin test tests/vendor -all-packages -define:ODIN_TEST_FANCY=false
+ ./odin test tests/benchmark -all-packages -define:ODIN_TEST_FANCY=false
(cd tests/issues; ./run.sh)
- build_linux:
- name: Ubuntu Build, Check, and Test
+ build_freebsd:
+ name: FreeBSD Build, Check, and Test
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v1
- - name: Download LLVM
+ - uses: actions/checkout@v4
+ - name: Build, Check, and Test
+ timeout-minutes: 15
+ uses: vmactions/freebsd-vm@v1
+ with:
+ usesh: true
+ copyback: false
+ prepare: |
+ pkg install -y gmake git bash python3 libxml2 llvm17
+ run: |
+ # `set -e` is needed for test failures to register. https://github.com/vmactions/freebsd-vm/issues/72
+ set -e -x
+ git config --global --add safe.directory $(pwd)
+ gmake release
+ ./odin version
+ ./odin report
+ gmake -C vendor/stb/src
+ gmake -C vendor/cgltf/src
+ gmake -C vendor/miniaudio/src
+ ./odin check examples/all -vet -strict-style -target:freebsd_amd64
+ ./odin test tests/core/normal.odin -file -all-packages -define:ODIN_TEST_FANCY=false
+ ./odin test tests/core/speed.odin -file -all-packages -o:speed -define:ODIN_TEST_FANCY=false
+ ./odin test tests/vendor -all-packages -define:ODIN_TEST_FANCY=false
+ ./odin test tests/benchmark -all-packages -define:ODIN_TEST_FANCY=false
+ (cd tests/issues; ./run.sh)
+ ci:
+ strategy:
+ fail-fast: false
+ matrix:
+ # MacOS 13 runs on Intel, 14 runs on ARM
+ os: [ubuntu-latest, macos-13, macos-14]
+ runs-on: ${{ matrix.os }}
+ name: ${{ matrix.os == 'macos-14' && 'MacOS ARM' || (matrix.os == 'macos-13' && 'MacOS Intel' || 'Ubuntu') }} Build, Check, and Test
+ timeout-minutes: 15
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Download LLVM (Linux)
+ if: matrix.os == 'ubuntu-latest'
run: |
wget https://apt.llvm.org/llvm.sh
chmod +x llvm.sh
sudo ./llvm.sh 17
echo "/usr/lib/llvm-17/bin" >> $GITHUB_PATH
- - name: build odin
+
+ - name: Download LLVM (MacOS Intel)
+ if: matrix.os == 'macos-13'
+ run: |
+ brew install llvm@17
+ echo "/usr/local/opt/llvm@17/bin" >> $GITHUB_PATH
+
+ - name: Download LLVM (MacOS ARM)
+ if: matrix.os == 'macos-14'
+ run: |
+ brew install llvm@17
+ echo "/opt/homebrew/opt/llvm@17/bin" >> $GITHUB_PATH
+
+ - name: Build Odin
run: ./build_odin.sh release
- name: Odin version
run: ./odin version
- timeout-minutes: 1
- name: Odin report
run: ./odin report
- timeout-minutes: 1
+ - name: Compile needed Vendor
+ run: |
+ make -C vendor/stb/src
+ make -C vendor/cgltf/src
+ make -C vendor/miniaudio/src
- name: Odin check
run: ./odin check examples/demo -vet
- timeout-minutes: 10
- name: Odin run
run: ./odin run examples/demo
- timeout-minutes: 10
- name: Odin run -debug
run: ./odin run examples/demo -debug
- timeout-minutes: 10
- name: Odin check examples/all
run: ./odin check examples/all -strict-style
- timeout-minutes: 10
- - name: Core library tests
- run: |
- cd tests/core
- make
- timeout-minutes: 10
+ - name: Normal Core library tests
+ run: ./odin test tests/core/normal.odin -file -all-packages -define:ODIN_TEST_FANCY=false
+ - name: Optimized Core library tests
+ run: ./odin test tests/core/speed.odin -o:speed -file -all-packages -define:ODIN_TEST_FANCY=false
- name: Vendor library tests
- run: |
- cd tests/vendor
- make
- timeout-minutes: 10
- - name: Odin internals tests
- run: |
- cd tests/internal
- make
- timeout-minutes: 10
+ run: ./odin test tests/vendor -all-packages -define:ODIN_TEST_FANCY=false
+ - name: Internals tests
+ run: ./odin test tests/internal -all-packages -define:ODIN_TEST_FANCY=false
+ - name: Core library benchmarks
+ run: ./odin test tests/benchmark -all-packages -define:ODIN_TEST_FANCY=false
+ - name: GitHub Issue tests
+ run: |
+ cd tests/issues
+ ./run.sh
+
- name: Odin check examples/all for Linux i386
run: ./odin check examples/all -vet -strict-style -target:linux_i386
- timeout-minutes: 10
+ if: matrix.os == 'ubuntu-latest'
- name: Odin check examples/all for Linux arm64
run: ./odin check examples/all -vet -strict-style -target:linux_arm64
- timeout-minutes: 10
+ if: matrix.os == 'ubuntu-latest'
- name: Odin check examples/all for FreeBSD amd64
run: ./odin check examples/all -vet -strict-style -target:freebsd_amd64
- timeout-minutes: 10
+ if: matrix.os == 'ubuntu-latest'
- name: Odin check examples/all for OpenBSD amd64
run: ./odin check examples/all -vet -strict-style -target:openbsd_amd64
- timeout-minutes: 10
- build_macOS:
- name: MacOS Build, Check, and Test
- runs-on: macos-13
- steps:
- - uses: actions/checkout@v1
- - name: Download LLVM, and setup PATH
- run: |
- brew install llvm@17
- echo "/usr/local/opt/llvm@17/bin" >> $GITHUB_PATH
- - name: build odin
- run: ./build_odin.sh release
- - name: Odin version
- run: ./odin version
- timeout-minutes: 1
- - name: Odin report
- run: ./odin report
- timeout-minutes: 1
- - name: Odin check
- run: ./odin check examples/demo -vet
- timeout-minutes: 10
- - name: Odin run
- run: ./odin run examples/demo
- timeout-minutes: 10
- - name: Odin run -debug
- run: ./odin run examples/demo -debug
- timeout-minutes: 10
- - name: Odin check examples/all
- run: ./odin check examples/all -strict-style
- timeout-minutes: 10
- - name: Core library tests
- run: |
- cd tests/core
- make
- timeout-minutes: 10
- - name: Odin internals tests
- run: |
- cd tests/internal
- make
- timeout-minutes: 10
- build_macOS_arm:
- name: MacOS ARM Build, Check, and Test
- runs-on: macos-14 # This is an arm/m1 runner.
- steps:
- - uses: actions/checkout@v1
- - name: Download LLVM and setup PATH
- run: |
- brew install llvm@17
- echo "/opt/homebrew/opt/llvm@17/bin" >> $GITHUB_PATH
- - name: build odin
- run: ./build_odin.sh release
- - name: Odin version
- run: ./odin version
- timeout-minutes: 1
- - name: Odin report
- run: ./odin report
- timeout-minutes: 1
- - name: Odin check
- run: ./odin check examples/demo -vet
- timeout-minutes: 10
- - name: Odin run
- run: ./odin run examples/demo
- timeout-minutes: 10
- - name: Odin run -debug
- run: ./odin run examples/demo -debug
- timeout-minutes: 10
- - name: Odin check examples/all
- run: ./odin check examples/all -strict-style
- timeout-minutes: 10
- - name: Core library tests
- run: |
- cd tests/core
- make
- timeout-minutes: 10
- - name: Odin internals tests
- run: |
- cd tests/internal
- make
- timeout-minutes: 10
+ if: matrix.os == 'ubuntu-latest'
+
build_windows:
name: Windows Build, Check, and Test
runs-on: windows-2022
+ timeout-minutes: 15
steps:
- - uses: actions/checkout@v1
+ - uses: actions/checkout@v4
- name: build Odin
shell: cmd
run: |
@@ -182,72 +160,67 @@ jobs:
./build.bat 1
- name: Odin version
run: ./odin version
- timeout-minutes: 1
- name: Odin report
run: ./odin report
- timeout-minutes: 1
- name: Odin check
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
odin check examples/demo -vet
- timeout-minutes: 10
- name: Odin run
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
odin run examples/demo
- timeout-minutes: 10
- name: Odin run -debug
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
odin run examples/demo -debug
- timeout-minutes: 10
- name: Odin check examples/all
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
odin check examples/all -strict-style
- timeout-minutes: 10
- name: Core library tests
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
- cd tests\core
- call build.bat
- timeout-minutes: 10
+ odin test tests/core/normal.odin -file -all-packages -define:ODIN_TEST_FANCY=false
+ - name: Optimized core library tests
+ shell: cmd
+ run: |
+ call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
+ odin test tests/core/speed.odin -o:speed -file -all-packages -define:ODIN_TEST_FANCY=false
+ - name: Core library benchmarks
+ shell: cmd
+ run: |
+ call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
+ odin test tests/benchmark -all-packages -define:ODIN_TEST_FANCY=false
- name: Vendor library tests
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
- cd tests\vendor
- call build.bat
- timeout-minutes: 10
+ odin test tests/vendor -all-packages -define:ODIN_TEST_FANCY=false
- name: Odin internals tests
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
- cd tests\internal
- call build.bat
- timeout-minutes: 10
+ odin test tests/internal -all-packages -define:ODIN_TEST_FANCY=false
- name: Odin documentation tests
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
cd tests\documentation
call build.bat
- timeout-minutes: 10
- name: core:math/big tests
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
cd tests\core\math\big
call build.bat
- timeout-minutes: 10
- name: Odin check examples/all for Windows 32bits
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
odin check examples/all -strict-style -target:windows_i386
- timeout-minutes: 10
diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml
index c9a2c821b..0857e99ad 100644
--- a/.github/workflows/nightly.yml
+++ b/.github/workflows/nightly.yml
@@ -11,7 +11,7 @@ jobs:
if: github.repository == 'odin-lang/Odin'
runs-on: windows-2022
steps:
- - uses: actions/checkout@v1
+ - uses: actions/checkout@v4
- name: build Odin
shell: cmd
run: |
@@ -45,7 +45,7 @@ jobs:
if: github.repository == 'odin-lang/Odin'
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v1
+ - uses: actions/checkout@v4
- name: (Linux) Download LLVM
run: |
wget https://apt.llvm.org/llvm.sh
@@ -79,7 +79,7 @@ jobs:
if: github.repository == 'odin-lang/Odin'
runs-on: macos-13
steps:
- - uses: actions/checkout@v1
+ - uses: actions/checkout@v4
- name: Download LLVM and setup PATH
run: |
brew install llvm@17 dylibbundler
@@ -113,7 +113,7 @@ jobs:
if: github.repository == 'odin-lang/Odin'
runs-on: macos-14 # ARM machine
steps:
- - uses: actions/checkout@v1
+ - uses: actions/checkout@v4
- name: Download LLVM and setup PATH
run: |
brew install llvm@17 dylibbundler
@@ -146,16 +146,16 @@ jobs:
runs-on: [ubuntu-latest]
needs: [build_windows, build_macos, build_macos_arm, build_ubuntu]
steps:
- - uses: actions/checkout@v1
+ - uses: actions/checkout@v4
- uses: actions/setup-python@v2
with:
python-version: '3.8.x'
- - name: Install B2 CLI
+ - name: Install B2 SDK
shell: bash
run: |
python -m pip install --upgrade pip
- pip install --upgrade b2
+ pip install --upgrade b2sdk
- name: Display Python version
run: python -c "import sys; print(sys.version)"
@@ -188,24 +188,9 @@ jobs:
BUCKET: ${{ secrets.B2_BUCKET }}
DAYS_TO_KEEP: ${{ secrets.B2_DAYS_TO_KEEP }}
run: |
- echo Authorizing B2 account
- b2 authorize-account "$APPID" "$APPKEY"
-
- echo Uploading artifcates to B2
- chmod +x ./ci/upload_create_nightly.sh
- ./ci/upload_create_nightly.sh "$BUCKET" windows-amd64 windows_artifacts/
- ./ci/upload_create_nightly.sh "$BUCKET" ubuntu-amd64 ubuntu_artifacts/dist.zip
- ./ci/upload_create_nightly.sh "$BUCKET" macos-amd64 macos_artifacts/dist.zip
- ./ci/upload_create_nightly.sh "$BUCKET" macos-arm64 macos_arm_artifacts/dist.zip
-
- echo Deleting old artifacts in B2
- python3 ci/delete_old_binaries.py "$BUCKET" "$DAYS_TO_KEEP"
-
- echo Creating nightly.json
- python3 ci/create_nightly_json.py "$BUCKET" > nightly.json
-
- echo Uploading nightly.json
- b2 upload-file "$BUCKET" nightly.json nightly.json
-
- echo Clear B2 account info
- b2 clear-account
+ python3 ci/nightly.py artifact windows-amd64 windows_artifacts/
+ python3 ci/nightly.py artifact ubuntu-amd64 ubuntu_artifacts/dist.zip
+ python3 ci/nightly.py artifact macos-amd64 macos_artifacts/dist.zip
+ python3 ci/nightly.py artifact macos-arm64 macos_arm_artifacts/dist.zip
+ python3 ci/nightly.py prune
+ python3 ci/nightly.py json
diff --git a/base/builtin/builtin.odin b/base/builtin/builtin.odin
index 5cba3c8ea..c4a9b141f 100644
--- a/base/builtin/builtin.odin
+++ b/base/builtin/builtin.odin
@@ -126,3 +126,5 @@ clamp :: proc(value, minimum, maximum: T) -> T ---
soa_zip :: proc(slices: ...) -> #soa[]Struct ---
soa_unzip :: proc(value: $S/#soa[]$E) -> (slices: ...) ---
+
+unreachable :: proc() -> ! ---
diff --git a/base/intrinsics/intrinsics.odin b/base/intrinsics/intrinsics.odin
index 8873f3bbc..4f6fa2713 100644
--- a/base/intrinsics/intrinsics.odin
+++ b/base/intrinsics/intrinsics.odin
@@ -295,6 +295,10 @@ simd_rotate_right :: proc(a: #simd[N]T, $offset: int) -> #simd[N]T ---
// if all listed features are supported.
has_target_feature :: proc($test: $T) -> bool where type_is_string(T) || type_is_proc(T) ---
+
+// Returns the value of the procedure where `x` must be a call expression
+procedure_of :: proc(x: $T) -> T where type_is_proc(T) ---
+
// WASM targets only
wasm_memory_grow :: proc(index, delta: uintptr) -> int ---
wasm_memory_size :: proc(index: uintptr) -> int ---
diff --git a/base/runtime/core.odin b/base/runtime/core.odin
index 66099e787..8671920f5 100644
--- a/base/runtime/core.odin
+++ b/base/runtime/core.odin
@@ -470,6 +470,15 @@ Raw_Soa_Pointer :: struct {
index: int,
}
+Raw_Complex32 :: struct {real, imag: f16}
+Raw_Complex64 :: struct {real, imag: f32}
+Raw_Complex128 :: struct {real, imag: f64}
+Raw_Quaternion64 :: struct {imag, jmag, kmag: f16, real: f16}
+Raw_Quaternion128 :: struct {imag, jmag, kmag: f32, real: f32}
+Raw_Quaternion256 :: struct {imag, jmag, kmag: f64, real: f64}
+Raw_Quaternion64_Vector_Scalar :: struct {vector: [3]f16, scalar: f16}
+Raw_Quaternion128_Vector_Scalar :: struct {vector: [3]f32, scalar: f32}
+Raw_Quaternion256_Vector_Scalar :: struct {vector: [3]f64, scalar: f64}
/*
@@ -481,7 +490,9 @@ Raw_Soa_Pointer :: struct {
Linux,
Essence,
FreeBSD,
+ Haiku,
OpenBSD,
+ NetBSD,
WASI,
JS,
Freestanding,
@@ -508,6 +519,7 @@ Odin_Arch_Type :: type_of(ODIN_ARCH)
Odin_Build_Mode_Type :: enum int {
Executable,
Dynamic,
+ Static,
Object,
Assembly,
LLVM_IR,
@@ -548,6 +560,19 @@ Odin_Platform_Subtarget_Type :: type_of(ODIN_PLATFORM_SUBTARGET)
*/
Odin_Sanitizer_Flags :: type_of(ODIN_SANITIZER_FLAGS)
+/*
+ // Defined internally by the compiler
+ Odin_Optimization_Mode :: enum int {
+ None = -1,
+ Minimal = 0,
+ Size = 1,
+ Speed = 2,
+ Aggressive = 3,
+ }
+
+ ODIN_OPTIMIZATION_MODE // is a constant
+*/
+Odin_Optimization_Mode :: type_of(ODIN_OPTIMIZATION_MODE)
/////////////////////////////
// Init Startup Procedures //
@@ -689,7 +714,7 @@ default_assertion_failure_proc :: proc(prefix, message: string, loc: Source_Code
when ODIN_OS == .Freestanding {
// Do nothing
} else {
- when !ODIN_DISABLE_ASSERT {
+ when ODIN_OS != .Orca && !ODIN_DISABLE_ASSERT {
print_caller_location(loc)
print_string(" ")
}
@@ -698,7 +723,18 @@ default_assertion_failure_proc :: proc(prefix, message: string, loc: Source_Code
print_string(": ")
print_string(message)
}
- print_byte('\n')
+
+ when ODIN_OS == .Orca {
+ assert_fail(
+ cstring(raw_data(loc.file_path)),
+ cstring(raw_data(loc.procedure)),
+ loc.line,
+ "",
+ cstring(raw_data(orca_stderr_buffer[:orca_stderr_buffer_idx])),
+ )
+ } else {
+ print_byte('\n')
+ }
}
trap()
}
diff --git a/base/runtime/core_builtin.odin b/base/runtime/core_builtin.odin
index 00c30d3fd..a9566c831 100644
--- a/base/runtime/core_builtin.odin
+++ b/base/runtime/core_builtin.odin
@@ -383,7 +383,7 @@ clear_map :: proc "contextless" (m: ^$T/map[$K]$V) {
//
// Note: Prefer the procedure group `reserve`
@builtin
-reserve_map :: proc(m: ^$T/map[$K]$V, capacity: int, loc := #caller_location) -> Allocator_Error {
+reserve_map :: proc(m: ^$T/map[$K]$V, #any_int capacity: int, loc := #caller_location) -> Allocator_Error {
return __dynamic_map_reserve((^Raw_Map)(m), map_info(T), uint(capacity), loc) if m != nil else nil
}
@@ -721,12 +721,12 @@ _reserve_dynamic_array :: #force_inline proc(array: ^$T/[dynamic]$E, capacity: i
}
@builtin
-reserve_dynamic_array :: proc(array: ^$T/[dynamic]$E, capacity: int, loc := #caller_location) -> Allocator_Error {
+reserve_dynamic_array :: proc(array: ^$T/[dynamic]$E, #any_int capacity: int, loc := #caller_location) -> Allocator_Error {
return _reserve_dynamic_array(array, capacity, true, loc)
}
@builtin
-non_zero_reserve_dynamic_array :: proc(array: ^$T/[dynamic]$E, capacity: int, loc := #caller_location) -> Allocator_Error {
+non_zero_reserve_dynamic_array :: proc(array: ^$T/[dynamic]$E, #any_int capacity: int, loc := #caller_location) -> Allocator_Error {
return _reserve_dynamic_array(array, capacity, false, loc)
}
@@ -773,12 +773,12 @@ _resize_dynamic_array :: #force_inline proc(array: ^$T/[dynamic]$E, length: int,
}
@builtin
-resize_dynamic_array :: proc(array: ^$T/[dynamic]$E, length: int, loc := #caller_location) -> Allocator_Error {
+resize_dynamic_array :: proc(array: ^$T/[dynamic]$E, #any_int length: int, loc := #caller_location) -> Allocator_Error {
return _resize_dynamic_array(array, length, true, loc=loc)
}
@builtin
-non_zero_resize_dynamic_array :: proc(array: ^$T/[dynamic]$E, length: int, loc := #caller_location) -> Allocator_Error {
+non_zero_resize_dynamic_array :: proc(array: ^$T/[dynamic]$E, #any_int length: int, loc := #caller_location) -> Allocator_Error {
return _resize_dynamic_array(array, length, false, loc=loc)
}
diff --git a/base/runtime/default_allocators_general.odin b/base/runtime/default_allocators_general.odin
index ab4dd1db8..64af6c904 100644
--- a/base/runtime/default_allocators_general.odin
+++ b/base/runtime/default_allocators_general.odin
@@ -6,7 +6,7 @@ when ODIN_DEFAULT_TO_NIL_ALLOCATOR {
} else when ODIN_DEFAULT_TO_PANIC_ALLOCATOR {
default_allocator_proc :: panic_allocator_proc
default_allocator :: panic_allocator
-} else when ODIN_ARCH == .wasm32 || ODIN_ARCH == .wasm64p32 {
+} else when ODIN_OS != .Orca && (ODIN_ARCH == .wasm32 || ODIN_ARCH == .wasm64p32) {
default_allocator :: default_wasm_allocator
default_allocator_proc :: wasm_allocator_proc
} else {
diff --git a/base/runtime/docs.odin b/base/runtime/docs.odin
index 865eeb9ef..f6b439aa0 100644
--- a/base/runtime/docs.odin
+++ b/base/runtime/docs.odin
@@ -157,7 +157,7 @@ __dynamic_map_get // dynamic map calls
__dynamic_map_set // dynamic map calls
-## Dynamic literals ([dymamic]T and map[K]V) (can be disabled with -no-dynamic-literals)
+## Dynamic literals ([dynamic]T and map[K]V) (can be disabled with -no-dynamic-literals)
__dynamic_array_reserve
__dynamic_array_append
diff --git a/base/runtime/entry_wasm.odin b/base/runtime/entry_wasm.odin
index c608942ba..a24c6f4b7 100644
--- a/base/runtime/entry_wasm.odin
+++ b/base/runtime/entry_wasm.odin
@@ -6,15 +6,29 @@ package runtime
import "base:intrinsics"
when !ODIN_TEST && !ODIN_NO_ENTRY_POINT {
- @(link_name="_start", linkage="strong", require, export)
- _start :: proc "c" () {
- context = default_context()
- #force_no_inline _startup_runtime()
- intrinsics.__entry_point()
+ when ODIN_OS == .Orca {
+ @(linkage="strong", require, export)
+ oc_on_init :: proc "c" () {
+ context = default_context()
+ #force_no_inline _startup_runtime()
+ intrinsics.__entry_point()
+ }
+ @(linkage="strong", require, export)
+ oc_on_terminate :: proc "c" () {
+ context = default_context()
+ #force_no_inline _cleanup_runtime()
+ }
+ } else {
+ @(link_name="_start", linkage="strong", require, export)
+ _start :: proc "c" () {
+ context = default_context()
+ #force_no_inline _startup_runtime()
+ intrinsics.__entry_point()
+ }
+ @(link_name="_end", linkage="strong", require, export)
+ _end :: proc "c" () {
+ context = default_context()
+ #force_no_inline _cleanup_runtime()
+ }
}
- @(link_name="_end", linkage="strong", require, export)
- _end :: proc "c" () {
- context = default_context()
- #force_no_inline _cleanup_runtime()
- }
-} \ No newline at end of file
+}
diff --git a/base/runtime/error_checks.odin b/base/runtime/error_checks.odin
index 742e06a71..32a895c3f 100644
--- a/base/runtime/error_checks.odin
+++ b/base/runtime/error_checks.odin
@@ -4,6 +4,8 @@ package runtime
bounds_trap :: proc "contextless" () -> ! {
when ODIN_OS == .Windows {
windows_trap_array_bounds()
+ } else when ODIN_OS == .Orca {
+ abort_ext("", "", 0, "bounds trap")
} else {
trap()
}
@@ -13,6 +15,8 @@ bounds_trap :: proc "contextless" () -> ! {
type_assertion_trap :: proc "contextless" () -> ! {
when ODIN_OS == .Windows {
windows_trap_type_assertion()
+ } else when ODIN_OS == .Orca {
+ abort_ext("", "", 0, "type assertion trap")
} else {
trap()
}
diff --git a/base/runtime/heap_allocator_orca.odin b/base/runtime/heap_allocator_orca.odin
new file mode 100644
index 000000000..c22a67ca1
--- /dev/null
+++ b/base/runtime/heap_allocator_orca.odin
@@ -0,0 +1,29 @@
+//+build orca
+//+private
+package runtime
+
+foreign {
+ @(link_name="malloc") _orca_malloc :: proc "c" (size: int) -> rawptr ---
+ @(link_name="calloc") _orca_calloc :: proc "c" (num, size: int) -> rawptr ---
+ @(link_name="free") _orca_free :: proc "c" (ptr: rawptr) ---
+ @(link_name="realloc") _orca_realloc :: proc "c" (ptr: rawptr, size: int) -> rawptr ---
+}
+
+_heap_alloc :: proc(size: int, zero_memory := true) -> rawptr {
+ if size <= 0 {
+ return nil
+ }
+ if zero_memory {
+ return _orca_calloc(1, size)
+ } else {
+ return _orca_malloc(size)
+ }
+}
+
+_heap_resize :: proc(ptr: rawptr, new_size: int) -> rawptr {
+ return _orca_realloc(ptr, new_size)
+}
+
+_heap_free :: proc(ptr: rawptr) {
+ _orca_free(ptr)
+}
diff --git a/base/runtime/heap_allocator_other.odin b/base/runtime/heap_allocator_other.odin
index 45049c7e9..74536ada9 100644
--- a/base/runtime/heap_allocator_other.odin
+++ b/base/runtime/heap_allocator_other.odin
@@ -12,4 +12,4 @@ _heap_resize :: proc(ptr: rawptr, new_size: int) -> rawptr {
_heap_free :: proc(ptr: rawptr) {
unimplemented("base:runtime 'heap_free' procedure is not supported on this platform")
-} \ No newline at end of file
+}
diff --git a/base/runtime/internal.odin b/base/runtime/internal.odin
index 8e1b3d633..378eea256 100644
--- a/base/runtime/internal.odin
+++ b/base/runtime/internal.odin
@@ -483,7 +483,7 @@ quaternion256_ne :: #force_inline proc "contextless" (a, b: quaternion256) -> bo
string_decode_rune :: #force_inline proc "contextless" (s: string) -> (rune, int) {
// NOTE(bill): Duplicated here to remove dependency on package unicode/utf8
- @static accept_sizes := [256]u8{
+ @(static, rodata) accept_sizes := [256]u8{
0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, // 0x00-0x0f
0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, // 0x10-0x1f
0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, // 0x20-0x2f
@@ -504,7 +504,7 @@ string_decode_rune :: #force_inline proc "contextless" (s: string) -> (rune, int
}
Accept_Range :: struct {lo, hi: u8}
- @static accept_ranges := [5]Accept_Range{
+ @(static, rodata) accept_ranges := [5]Accept_Range{
{0x80, 0xbf},
{0xa0, 0xbf},
{0x80, 0x9f},
diff --git a/base/runtime/os_specific_orca.odin b/base/runtime/os_specific_orca.odin
new file mode 100644
index 000000000..b6f5930ab
--- /dev/null
+++ b/base/runtime/os_specific_orca.odin
@@ -0,0 +1,43 @@
+//+build orca
+//+private
+package runtime
+
+import "base:intrinsics"
+
+// Constants allowing to specify the level of logging verbosity.
+log_level :: enum u32 {
+ // Only errors are logged.
+ ERROR = 0,
+ // Only warnings and errors are logged.
+ WARNING = 1,
+ // All messages are logged.
+ INFO = 2,
+ COUNT = 3,
+}
+
+@(default_calling_convention="c", link_prefix="oc_")
+foreign {
+ abort_ext :: proc(file: cstring, function: cstring, line: i32, fmt: cstring, #c_vararg args: ..any) -> ! ---
+ assert_fail :: proc(file: cstring, function: cstring, line: i32, src: cstring, fmt: cstring, #c_vararg args: ..any) -> ! ---
+ log_ext :: proc(level: log_level, function: cstring, file: cstring, line: i32, fmt: cstring, #c_vararg args: ..any) ---
+}
+
+// NOTE: This is all pretty gross, don't look.
+
+// WASM is single threaded so this should be fine.
+orca_stderr_buffer: [4096]byte
+orca_stderr_buffer_idx: int
+
+_stderr_write :: proc "contextless" (data: []byte) -> (int, _OS_Errno) {
+ for b in data {
+ orca_stderr_buffer[orca_stderr_buffer_idx] = b
+ orca_stderr_buffer_idx += 1
+
+ if b == '\n' || orca_stderr_buffer_idx == len(orca_stderr_buffer)-1 {
+ log_ext(.ERROR, "", "", 0, cstring(raw_data(orca_stderr_buffer[:orca_stderr_buffer_idx])))
+ orca_stderr_buffer_idx = 0
+ }
+ }
+
+ return len(data), 0
+}
diff --git a/base/runtime/procs.odin b/base/runtime/procs.odin
index 454574c35..002a6501f 100644
--- a/base/runtime/procs.odin
+++ b/base/runtime/procs.odin
@@ -25,13 +25,19 @@ when ODIN_NO_CRT && ODIN_OS == .Windows {
RtlMoveMemory(dst, src, len)
return dst
}
-} else when ODIN_NO_CRT || (ODIN_ARCH == .wasm32 || ODIN_ARCH == .wasm64p32) {
+} else when ODIN_NO_CRT || (ODIN_OS != .Orca && (ODIN_ARCH == .wasm32 || ODIN_ARCH == .wasm64p32)) {
+ // NOTE: on wasm, calls to these procs are generated (by LLVM) with type `i32` instead of `int`.
+ //
+ // NOTE: `#any_int` is also needed, because calls that we generate (and package code)
+ // will be using `int` and need to be converted.
+ int_t :: i32 when ODIN_ARCH == .wasm64p32 else int
+
@(link_name="memset", linkage="strong", require)
- memset :: proc "c" (ptr: rawptr, val: i32, len: int) -> rawptr {
+ memset :: proc "c" (ptr: rawptr, val: i32, #any_int len: int_t) -> rawptr {
if ptr != nil && len != 0 {
b := byte(val)
p := ([^]byte)(ptr)
- for i := 0; i < len; i += 1 {
+ for i := int_t(0); i < len; i += 1 {
p[i] = b
}
}
@@ -39,10 +45,10 @@ when ODIN_NO_CRT && ODIN_OS == .Windows {
}
@(link_name="bzero", linkage="strong", require)
- bzero :: proc "c" (ptr: rawptr, len: int) -> rawptr {
+ bzero :: proc "c" (ptr: rawptr, #any_int len: int_t) -> rawptr {
if ptr != nil && len != 0 {
p := ([^]byte)(ptr)
- for i := 0; i < len; i += 1 {
+ for i := int_t(0); i < len; i += 1 {
p[i] = 0
}
}
@@ -50,7 +56,7 @@ when ODIN_NO_CRT && ODIN_OS == .Windows {
}
@(link_name="memmove", linkage="strong", require)
- memmove :: proc "c" (dst, src: rawptr, len: int) -> rawptr {
+ memmove :: proc "c" (dst, src: rawptr, #any_int len: int_t) -> rawptr {
d, s := ([^]byte)(dst), ([^]byte)(src)
if d == s || len == 0 {
return dst
@@ -63,7 +69,7 @@ when ODIN_NO_CRT && ODIN_OS == .Windows {
}
if s > d && uintptr(s)-uintptr(d) < uintptr(len) {
- for i := 0; i < len; i += 1 {
+ for i := int_t(0); i < len; i += 1 {
d[i] = s[i]
}
return dst
@@ -71,10 +77,10 @@ when ODIN_NO_CRT && ODIN_OS == .Windows {
return memcpy(dst, src, len)
}
@(link_name="memcpy", linkage="strong", require)
- memcpy :: proc "c" (dst, src: rawptr, len: int) -> rawptr {
+ memcpy :: proc "c" (dst, src: rawptr, #any_int len: int_t) -> rawptr {
d, s := ([^]byte)(dst), ([^]byte)(src)
if d != s {
- for i := 0; i < len; i += 1 {
+ for i := int_t(0); i < len; i += 1 {
d[i] = s[i]
}
}
@@ -92,4 +98,4 @@ when ODIN_NO_CRT && ODIN_OS == .Windows {
}
return ptr
}
-} \ No newline at end of file
+}
diff --git a/base/runtime/wasm_allocator.odin b/base/runtime/wasm_allocator.odin
index acfc80b0a..6bca0b3d6 100644
--- a/base/runtime/wasm_allocator.odin
+++ b/base/runtime/wasm_allocator.odin
@@ -7,20 +7,20 @@ import "base:intrinsics"
Port of emmalloc, modified for use in Odin.
Invariants:
- - Per-allocation header overhead is 8 bytes, smallest allocated payload
- amount is 8 bytes, and a multiple of 4 bytes.
- - Acquired memory blocks are subdivided into disjoint regions that lie
- next to each other.
- - A region is either in used or free.
- Used regions may be adjacent, and a used and unused region
- may be adjacent, but not two unused ones - they would be
- merged.
- - Memory allocation takes constant time, unless the alloc needs to wasm_memory_grow()
- or memory is very close to being exhausted.
- - Free and used regions are managed inside "root regions", which are slabs
- of memory acquired via wasm_memory_grow().
- - Memory retrieved using wasm_memory_grow() can not be given back to the OS.
- Therefore, frees are internal to the allocator.
+ - Per-allocation header overhead is 8 bytes, smallest allocated payload
+ amount is 8 bytes, and a multiple of 4 bytes.
+ - Acquired memory blocks are subdivided into disjoint regions that lie
+ next to each other.
+ - A region is either in used or free.
+ Used regions may be adjacent, and a used and unused region
+ may be adjacent, but not two unused ones - they would be
+ merged.
+ - Memory allocation takes constant time, unless the alloc needs to wasm_memory_grow()
+ or memory is very close to being exhausted.
+ - Free and used regions are managed inside "root regions", which are slabs
+ of memory acquired via wasm_memory_grow().
+ - Memory retrieved using wasm_memory_grow() can not be given back to the OS.
+ Therefore, frees are internal to the allocator.
Copyright (c) 2010-2014 Emscripten authors, see AUTHORS file.
diff --git a/build_odin.sh b/build_odin.sh
index ec65bb49d..d2f865e24 100755
--- a/build_odin.sh
+++ b/build_odin.sh
@@ -71,8 +71,8 @@ Darwin)
fi
darwin_sysroot=
- if [ $(which xcode-select) ]; then
- darwin_sysroot="--sysroot $(xcode-select -p)/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk"
+ if [ $(which xcrun) ]; then
+ darwin_sysroot="--sysroot $(xcrun --sdk macosx --show-sdk-path)"
elif [[ -e "/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk" ]]; then
darwin_sysroot="--sysroot /Library/Developer/CommandLineTools/SDKs/MacOSX.sdk"
else
diff --git a/ci/create_nightly_json.py b/ci/create_nightly_json.py
deleted file mode 100644
index 2ad086a82..000000000
--- a/ci/create_nightly_json.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import subprocess
-import sys
-import json
-import datetime
-import urllib.parse
-import sys
-
-def main():
- files_by_date = {}
- bucket = sys.argv[1]
-
- files_lines = execute_cli(f"b2 ls --long {bucket} nightly").split("\n")
- for x in files_lines:
- parts = x.split(" ", 1)
- if parts[0]:
- json_str = execute_cli(f"b2 get-file-info {parts[0]}")
- data = json.loads(json_str)
- name = remove_prefix(data['fileName'], "nightly/")
- url = f"https://f001.backblazeb2.com/file/{bucket}/nightly/{urllib.parse.quote_plus(name)}"
- sha1 = data['contentSha1']
- size = int(data['size'])
- ts = int(data['fileInfo']['src_last_modified_millis'])
- date = datetime.datetime.fromtimestamp(ts/1000).strftime('%Y-%m-%d')
-
- if date not in files_by_date.keys():
- files_by_date[date] = []
-
- files_by_date[date].append({
- 'name': name,
- 'url': url,
- 'sha1': sha1,
- 'sizeInBytes': size,
- })
-
- now = datetime.datetime.utcnow().isoformat()
-
- print(json.dumps({
- 'last_updated' : now,
- 'files': files_by_date
- }, sort_keys=True, indent=4))
-
-def remove_prefix(text, prefix):
- return text[text.startswith(prefix) and len(prefix):]
-
-def execute_cli(command):
- sb = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
- return sb.stdout.read().decode("utf-8");
-
-if __name__ == '__main__':
- sys.exit(main())
-
diff --git a/ci/delete_old_binaries.py b/ci/delete_old_binaries.py
deleted file mode 100644
index 206d849f5..000000000
--- a/ci/delete_old_binaries.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import subprocess
-import sys
-import json
-import datetime
-import urllib.parse
-import sys
-
-def main():
- files_by_date = {}
- bucket = sys.argv[1]
- days_to_keep = int(sys.argv[2])
- print(f"Looking for binaries to delete older than {days_to_keep} days")
-
- files_lines = execute_cli(f"b2 ls --long --versions {bucket} nightly").split("\n")
- for x in files_lines:
- parts = [y for y in x.split(' ') if y]
-
- if parts and parts[0]:
- date = datetime.datetime.strptime(parts[2], '%Y-%m-%d').replace(hour=0, minute=0, second=0, microsecond=0)
- now = datetime.datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
- delta = now - date
-
- if delta.days > days_to_keep:
- print(f'Deleting {parts[5]}')
- execute_cli(f'b2 delete-file-version {parts[0]}')
-
-
-def execute_cli(command):
- sb = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
- return sb.stdout.read().decode("utf-8");
-
-if __name__ == '__main__':
- sys.exit(main())
-
diff --git a/ci/nightly.py b/ci/nightly.py
new file mode 100644
index 000000000..7bd32899d
--- /dev/null
+++ b/ci/nightly.py
@@ -0,0 +1,140 @@
+import os
+import sys
+from zipfile import ZipFile, ZIP_DEFLATED
+from b2sdk.v2 import InMemoryAccountInfo, B2Api
+from datetime import datetime
+import json
+
+UPLOAD_FOLDER = "nightly/"
+
+info = InMemoryAccountInfo()
+b2_api = B2Api(info)
+application_key_id = os.environ['APPID']
+application_key = os.environ['APPKEY']
+bucket_name = os.environ['BUCKET']
+days_to_keep = os.environ['DAYS_TO_KEEP']
+
+def auth() -> bool:
+ try:
+ realm = b2_api.account_info.get_realm()
+ return True # Already authenticated
+ except:
+ pass # Not yet authenticated
+
+ err = b2_api.authorize_account("production", application_key_id, application_key)
+ return err == None
+
+def get_bucket():
+ if not auth(): sys.exit(1)
+ return b2_api.get_bucket_by_name(bucket_name)
+
+def remove_prefix(text: str, prefix: str) -> str:
+ return text[text.startswith(prefix) and len(prefix):]
+
+def create_and_upload_artifact_zip(platform: str, artifact: str) -> int:
+ now = datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
+ destination_zip_name = "odin-{}-nightly+{}.zip".format(platform, now.strftime("%Y-%m-%d"))
+
+ source_zip_name = artifact
+ if not artifact.endswith(".zip"):
+ print(f"Creating archive {destination_zip_name} from {artifact} and uploading to {bucket_name}")
+
+ source_zip_name = destination_zip_name
+ with ZipFile(source_zip_name, mode='w', compression=ZIP_DEFLATED, compresslevel=9) as z:
+ for root, directory, filenames in os.walk(artifact):
+ for file in filenames:
+ file_path = os.path.join(root, file)
+ zip_path = os.path.join("dist", os.path.relpath(file_path, artifact))
+ z.write(file_path, zip_path)
+
+ if not os.path.exists(source_zip_name):
+ print(f"Error: Newly created ZIP archive {source_zip_name} not found.")
+ return 1
+
+ print("Uploading {} to {}".format(source_zip_name, UPLOAD_FOLDER + destination_zip_name))
+ bucket = get_bucket()
+ res = bucket.upload_local_file(
+ source_zip_name, # Local file to upload
+ "nightly/" + destination_zip_name, # B2 destination path
+ )
+ return 0
+
+def prune_artifacts():
+ print(f"Looking for binaries to delete older than {days_to_keep} days")
+
+ bucket = get_bucket()
+ for file, _ in bucket.ls(UPLOAD_FOLDER, latest_only=False):
+ # Timestamp is in milliseconds
+ date = datetime.fromtimestamp(file.upload_timestamp / 1_000.0).replace(hour=0, minute=0, second=0, microsecond=0)
+ now = datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
+ delta = now - date
+
+ if delta.days > int(days_to_keep):
+ print("Deleting {}".format(file.file_name))
+ file.delete()
+
+ return 0
+
+def update_nightly_json():
+ print(f"Updating nightly.json with files {days_to_keep} days or newer")
+
+ files_by_date = {}
+
+ bucket = get_bucket()
+
+ for file, _ in bucket.ls(UPLOAD_FOLDER, latest_only=True):
+ # Timestamp is in milliseconds
+ date = datetime.fromtimestamp(file.upload_timestamp / 1_000.0).replace(hour=0, minute=0, second=0, microsecond=0).strftime('%Y-%m-%d')
+ name = remove_prefix(file.file_name, UPLOAD_FOLDER)
+ sha1 = file.content_sha1
+ size = file.size
+ url = bucket.get_download_url(file.file_name)
+
+ if date not in files_by_date.keys():
+ files_by_date[date] = []
+
+ files_by_date[date].append({
+ 'name': name,
+ 'url': url,
+ 'sha1': sha1,
+ 'sizeInBytes': size,
+ })
+
+ now = datetime.utcnow().isoformat()
+
+ nightly = json.dumps({
+ 'last_updated' : now,
+ 'files': files_by_date
+ }, sort_keys=True, indent=4, ensure_ascii=False).encode('utf-8')
+
+ res = bucket.upload_bytes(
+ nightly, # JSON bytes
+ "nightly.json", # B2 destination path
+ )
+ return 0
+
+if __name__ == "__main__":
+ if len(sys.argv) == 1:
+ print("Usage: {} <verb> [arguments]".format(sys.argv[0]))
+ print("\tartifact <platform prefix> <artifact path>\n\t\tCreates and uploads a platform artifact zip.")
+ print("\tprune\n\t\tDeletes old artifacts from bucket")
+ print("\tjson\n\t\tUpdate and upload nightly.json")
+ sys.exit(1)
+ else:
+ command = sys.argv[1].lower()
+ if command == "artifact":
+ if len(sys.argv) != 4:
+ print("Usage: {} artifact <platform prefix> <artifact path>".format(sys.argv[0]))
+ print("Error: Expected artifact command to be given platform prefix and artifact path.\n")
+ sys.exit(1)
+
+ res = create_and_upload_artifact_zip(sys.argv[2], sys.argv[3])
+ sys.exit(res)
+
+ elif command == "prune":
+ res = prune_artifacts()
+ sys.exit(res)
+
+ elif command == "json":
+ res = update_nightly_json()
+ sys.exit(res) \ No newline at end of file
diff --git a/ci/upload_create_nightly.sh b/ci/upload_create_nightly.sh
deleted file mode 100755
index 065cb13bf..000000000
--- a/ci/upload_create_nightly.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-
-set -e
-
-bucket=$1
-platform=$2
-artifact=$3
-
-now=$(date +'%Y-%m-%d')
-filename="odin-$platform-nightly+$now.zip"
-
-echo "Creating archive $filename from $artifact and uploading to $bucket"
-
-# If this is already zipped up (done before artifact upload to keep permissions in tact), just move it.
-if [ "${artifact: -4}" == ".zip" ]
-then
- echo "Artifact already a zip"
- mkdir -p "output"
- mv "$artifact" "output/$filename"
-else
- echo "Artifact needs to be zipped"
- 7z a -bd "output/$filename" -r "$artifact"
-fi
-
-b2 upload-file --noProgress "$bucket" "output/$filename" "nightly/$filename"
diff --git a/core/bufio/reader.odin b/core/bufio/reader.odin
index 8ec736a66..a875c732d 100644
--- a/core/bufio/reader.odin
+++ b/core/bufio/reader.odin
@@ -29,12 +29,12 @@ MIN_READ_BUFFER_SIZE :: 16
@(private)
DEFAULT_MAX_CONSECUTIVE_EMPTY_READS :: 128
-reader_init :: proc(b: ^Reader, rd: io.Reader, size: int = DEFAULT_BUF_SIZE, allocator := context.allocator) {
+reader_init :: proc(b: ^Reader, rd: io.Reader, size: int = DEFAULT_BUF_SIZE, allocator := context.allocator, loc := #caller_location) {
size := size
size = max(size, MIN_READ_BUFFER_SIZE)
reader_reset(b, rd)
b.buf_allocator = allocator
- b.buf = make([]byte, size, allocator)
+ b.buf = make([]byte, size, allocator, loc)
}
reader_init_with_buf :: proc(b: ^Reader, rd: io.Reader, buf: []byte) {
diff --git a/core/bytes/buffer.odin b/core/bytes/buffer.odin
index cb2ef9c62..a7e9b1c64 100644
--- a/core/bytes/buffer.odin
+++ b/core/bytes/buffer.odin
@@ -27,19 +27,19 @@ Read_Op :: enum i8 {
}
-buffer_init :: proc(b: ^Buffer, buf: []byte) {
- resize(&b.buf, len(buf))
+buffer_init :: proc(b: ^Buffer, buf: []byte, loc := #caller_location) {
+ resize(&b.buf, len(buf), loc=loc)
copy(b.buf[:], buf)
}
-buffer_init_string :: proc(b: ^Buffer, s: string) {
- resize(&b.buf, len(s))
+buffer_init_string :: proc(b: ^Buffer, s: string, loc := #caller_location) {
+ resize(&b.buf, len(s), loc=loc)
copy(b.buf[:], s)
}
-buffer_init_allocator :: proc(b: ^Buffer, len, cap: int, allocator := context.allocator) {
+buffer_init_allocator :: proc(b: ^Buffer, len, cap: int, allocator := context.allocator, loc := #caller_location) {
if b.buf == nil {
- b.buf = make([dynamic]byte, len, cap, allocator)
+ b.buf = make([dynamic]byte, len, cap, allocator, loc)
return
}
@@ -96,28 +96,28 @@ buffer_truncate :: proc(b: ^Buffer, n: int) {
}
@(private)
-_buffer_try_grow :: proc(b: ^Buffer, n: int) -> (int, bool) {
+_buffer_try_grow :: proc(b: ^Buffer, n: int, loc := #caller_location) -> (int, bool) {
if l := len(b.buf); n <= cap(b.buf)-l {
- resize(&b.buf, l+n)
+ resize(&b.buf, l+n, loc=loc)
return l, true
}
return 0, false
}
@(private)
-_buffer_grow :: proc(b: ^Buffer, n: int) -> int {
+_buffer_grow :: proc(b: ^Buffer, n: int, loc := #caller_location) -> int {
m := buffer_length(b)
if m == 0 && b.off != 0 {
buffer_reset(b)
}
- if i, ok := _buffer_try_grow(b, n); ok {
+ if i, ok := _buffer_try_grow(b, n, loc=loc); ok {
return i
}
if b.buf == nil && n <= SMALL_BUFFER_SIZE {
// Fixes #2756 by preserving allocator if already set on Buffer via init_buffer_allocator
- reserve(&b.buf, SMALL_BUFFER_SIZE)
- resize(&b.buf, n)
+ reserve(&b.buf, SMALL_BUFFER_SIZE, loc=loc)
+ resize(&b.buf, n, loc=loc)
return 0
}
@@ -127,31 +127,31 @@ _buffer_grow :: proc(b: ^Buffer, n: int) -> int {
} else if c > max(int) - c - n {
panic("bytes.Buffer: too large")
} else {
- resize(&b.buf, 2*c + n)
+ resize(&b.buf, 2*c + n, loc=loc)
copy(b.buf[:], b.buf[b.off:])
}
b.off = 0
- resize(&b.buf, m+n)
+ resize(&b.buf, m+n, loc=loc)
return m
}
-buffer_grow :: proc(b: ^Buffer, n: int) {
+buffer_grow :: proc(b: ^Buffer, n: int, loc := #caller_location) {
if n < 0 {
panic("bytes.buffer_grow: negative count")
}
- m := _buffer_grow(b, n)
- resize(&b.buf, m)
+ m := _buffer_grow(b, n, loc=loc)
+ resize(&b.buf, m, loc=loc)
}
-buffer_write_at :: proc(b: ^Buffer, p: []byte, offset: int) -> (n: int, err: io.Error) {
+buffer_write_at :: proc(b: ^Buffer, p: []byte, offset: int, loc := #caller_location) -> (n: int, err: io.Error) {
b.last_read = .Invalid
if offset < 0 {
err = .Invalid_Offset
return
}
- _, ok := _buffer_try_grow(b, offset+len(p))
+ _, ok := _buffer_try_grow(b, offset+len(p), loc=loc)
if !ok {
- _ = _buffer_grow(b, offset+len(p))
+ _ = _buffer_grow(b, offset+len(p), loc=loc)
}
if len(b.buf) <= offset {
return 0, .Short_Write
@@ -160,47 +160,47 @@ buffer_write_at :: proc(b: ^Buffer, p: []byte, offset: int) -> (n: int, err: io.
}
-buffer_write :: proc(b: ^Buffer, p: []byte) -> (n: int, err: io.Error) {
+buffer_write :: proc(b: ^Buffer, p: []byte, loc := #caller_location) -> (n: int, err: io.Error) {
b.last_read = .Invalid
- m, ok := _buffer_try_grow(b, len(p))
+ m, ok := _buffer_try_grow(b, len(p), loc=loc)
if !ok {
- m = _buffer_grow(b, len(p))
+ m = _buffer_grow(b, len(p), loc=loc)
}
return copy(b.buf[m:], p), nil
}
-buffer_write_ptr :: proc(b: ^Buffer, ptr: rawptr, size: int) -> (n: int, err: io.Error) {
- return buffer_write(b, ([^]byte)(ptr)[:size])
+buffer_write_ptr :: proc(b: ^Buffer, ptr: rawptr, size: int, loc := #caller_location) -> (n: int, err: io.Error) {
+ return buffer_write(b, ([^]byte)(ptr)[:size], loc=loc)
}
-buffer_write_string :: proc(b: ^Buffer, s: string) -> (n: int, err: io.Error) {
+buffer_write_string :: proc(b: ^Buffer, s: string, loc := #caller_location) -> (n: int, err: io.Error) {
b.last_read = .Invalid
- m, ok := _buffer_try_grow(b, len(s))
+ m, ok := _buffer_try_grow(b, len(s), loc=loc)
if !ok {
- m = _buffer_grow(b, len(s))
+ m = _buffer_grow(b, len(s), loc=loc)
}
return copy(b.buf[m:], s), nil
}
-buffer_write_byte :: proc(b: ^Buffer, c: byte) -> io.Error {
+buffer_write_byte :: proc(b: ^Buffer, c: byte, loc := #caller_location) -> io.Error {
b.last_read = .Invalid
- m, ok := _buffer_try_grow(b, 1)
+ m, ok := _buffer_try_grow(b, 1, loc=loc)
if !ok {
- m = _buffer_grow(b, 1)
+ m = _buffer_grow(b, 1, loc=loc)
}
b.buf[m] = c
return nil
}
-buffer_write_rune :: proc(b: ^Buffer, r: rune) -> (n: int, err: io.Error) {
+buffer_write_rune :: proc(b: ^Buffer, r: rune, loc := #caller_location) -> (n: int, err: io.Error) {
if r < utf8.RUNE_SELF {
- buffer_write_byte(b, byte(r))
+ buffer_write_byte(b, byte(r), loc=loc)
return 1, nil
}
b.last_read = .Invalid
- m, ok := _buffer_try_grow(b, utf8.UTF_MAX)
+ m, ok := _buffer_try_grow(b, utf8.UTF_MAX, loc=loc)
if !ok {
- m = _buffer_grow(b, utf8.UTF_MAX)
+ m = _buffer_grow(b, utf8.UTF_MAX, loc=loc)
}
res: [4]byte
res, n = utf8.encode_rune(r)
diff --git a/core/c/libc/signal.odin b/core/c/libc/signal.odin
index 186b74d8c..1489779fe 100644
--- a/core/c/libc/signal.odin
+++ b/core/c/libc/signal.odin
@@ -34,7 +34,7 @@ when ODIN_OS == .Windows {
SIGTERM :: 15
}
-when ODIN_OS == .Linux || ODIN_OS == .FreeBSD {
+when ODIN_OS == .Linux || ODIN_OS == .FreeBSD || ODIN_OS == .Haiku || ODIN_OS == .OpenBSD || ODIN_OS == .NetBSD {
SIG_ERR :: rawptr(~uintptr(0))
SIG_DFL :: rawptr(uintptr(0))
SIG_IGN :: rawptr(uintptr(1))
diff --git a/core/c/libc/stdio.odin b/core/c/libc/stdio.odin
index f17d3bd06..3e1d0f5a2 100644
--- a/core/c/libc/stdio.odin
+++ b/core/c/libc/stdio.odin
@@ -102,10 +102,12 @@ when ODIN_OS == .OpenBSD || ODIN_OS == .NetBSD {
SEEK_END :: 2
foreign libc {
- stderr: ^FILE
- stdin: ^FILE
- stdout: ^FILE
+ __sF: [3]FILE
}
+
+ stdin: ^FILE = &__sF[0]
+ stdout: ^FILE = &__sF[1]
+ stderr: ^FILE = &__sF[2]
}
when ODIN_OS == .FreeBSD {
@@ -127,9 +129,9 @@ when ODIN_OS == .FreeBSD {
SEEK_END :: 2
foreign libc {
- stderr: ^FILE
- stdin: ^FILE
- stdout: ^FILE
+ @(link_name="__stderrp") stderr: ^FILE
+ @(link_name="__stdinp") stdin: ^FILE
+ @(link_name="__stdoutp") stdout: ^FILE
}
}
diff --git a/core/crypto/_aes/aes.odin b/core/crypto/_aes/aes.odin
new file mode 100644
index 000000000..4f52485d2
--- /dev/null
+++ b/core/crypto/_aes/aes.odin
@@ -0,0 +1,28 @@
+package _aes
+
+// KEY_SIZE_128 is the AES-128 key size in bytes.
+KEY_SIZE_128 :: 16
+// KEY_SIZE_192 is the AES-192 key size in bytes.
+KEY_SIZE_192 :: 24
+// KEY_SIZE_256 is the AES-256 key size in bytes.
+KEY_SIZE_256 :: 32
+
+// BLOCK_SIZE is the AES block size in bytes.
+BLOCK_SIZE :: 16
+
+// ROUNDS_128 is the number of rounds for AES-128.
+ROUNDS_128 :: 10
+// ROUNDS_192 is the number of rounds for AES-192.
+ROUNDS_192 :: 12
+// ROUNDS_256 is the number of rounds for AES-256.
+ROUNDS_256 :: 14
+
+// GHASH_KEY_SIZE is the GHASH key size in bytes.
+GHASH_KEY_SIZE :: 16
+// GHASH_BLOCK_SIZE is the GHASH block size in bytes.
+GHASH_BLOCK_SIZE :: 16
+// GHASH_TAG_SIZE is the GHASH tag size in bytes.
+GHASH_TAG_SIZE :: 16
+
+// RCON is the AES keyschedule round constants.
+RCON := [10]byte{0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36}
diff --git a/core/crypto/_aes/ct64/api.odin b/core/crypto/_aes/ct64/api.odin
new file mode 100644
index 000000000..ae624971c
--- /dev/null
+++ b/core/crypto/_aes/ct64/api.odin
@@ -0,0 +1,96 @@
+package aes_ct64
+
+import "base:intrinsics"
+import "core:mem"
+
+STRIDE :: 4
+
+// Context is a keyed AES (ECB) instance.
+Context :: struct {
+ _sk_exp: [120]u64,
+ _num_rounds: int,
+ _is_initialized: bool,
+}
+
+// init initializes a context for AES with the provided key.
+init :: proc(ctx: ^Context, key: []byte) {
+ skey: [30]u64 = ---
+
+ ctx._num_rounds = keysched(skey[:], key)
+ skey_expand(ctx._sk_exp[:], skey[:], ctx._num_rounds)
+ ctx._is_initialized = true
+}
+
+// encrypt_block sets `dst` to `AES-ECB-Encrypt(src)`.
+encrypt_block :: proc(ctx: ^Context, dst, src: []byte) {
+ assert(ctx._is_initialized)
+
+ q: [8]u64
+ load_blockx1(&q, src)
+ _encrypt(&q, ctx._sk_exp[:], ctx._num_rounds)
+ store_blockx1(dst, &q)
+}
+
+// encrypt_block sets `dst` to `AES-ECB-Decrypt(src)`.
+decrypt_block :: proc(ctx: ^Context, dst, src: []byte) {
+ assert(ctx._is_initialized)
+
+ q: [8]u64
+ load_blockx1(&q, src)
+ _decrypt(&q, ctx._sk_exp[:], ctx._num_rounds)
+ store_blockx1(dst, &q)
+}
+
+// encrypt_blocks sets `dst` to `AES-ECB-Encrypt(src[0], .. src[n])`.
+encrypt_blocks :: proc(ctx: ^Context, dst, src: [][]byte) {
+ assert(ctx._is_initialized)
+
+ q: [8]u64 = ---
+ src, dst := src, dst
+
+ n := len(src)
+ for n > 4 {
+ load_blocks(&q, src[0:4])
+ _encrypt(&q, ctx._sk_exp[:], ctx._num_rounds)
+ store_blocks(dst[0:4], &q)
+
+ src = src[4:]
+ dst = dst[4:]
+ n -= 4
+ }
+ if n > 0 {
+ load_blocks(&q, src)
+ _encrypt(&q, ctx._sk_exp[:], ctx._num_rounds)
+ store_blocks(dst, &q)
+ }
+}
+
+// decrypt_blocks sets dst to `AES-ECB-Decrypt(src[0], .. src[n])`.
+decrypt_blocks :: proc(ctx: ^Context, dst, src: [][]byte) {
+ assert(ctx._is_initialized)
+
+ q: [8]u64 = ---
+ src, dst := src, dst
+
+ n := len(src)
+ for n > 4 {
+ load_blocks(&q, src[0:4])
+ _decrypt(&q, ctx._sk_exp[:], ctx._num_rounds)
+ store_blocks(dst[0:4], &q)
+
+ src = src[4:]
+ dst = dst[4:]
+ n -= 4
+ }
+ if n > 0 {
+ load_blocks(&q, src)
+ _decrypt(&q, ctx._sk_exp[:], ctx._num_rounds)
+ store_blocks(dst, &q)
+ }
+}
+
+// reset sanitizes the Context. The Context must be re-initialized to
+// be used again.
+reset :: proc(ctx: ^Context) {
+ mem.zero_explicit(ctx, size_of(ctx))
+}
diff --git a/core/crypto/_aes/ct64/ct64.odin b/core/crypto/_aes/ct64/ct64.odin
new file mode 100644
index 000000000..f198cab81
--- /dev/null
+++ b/core/crypto/_aes/ct64/ct64.odin
@@ -0,0 +1,265 @@
+// Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHORS “AS IS” AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package aes_ct64
+
+import "base:intrinsics"
+
+// Bitsliced AES for 64-bit general purpose (integer) registers. Each
+// invocation will process up to 4 blocks at a time. This implementation
+// is derived from the BearSSL ct64 code, and distributed under a 1-clause
+// BSD license with permission from the original author.
+//
+// WARNING: "hic sunt dracones"
+//
+// This package also deliberately exposes enough internals to be able to
+// function as a replacement for `AESENC` and `AESDEC` from AES-NI, to
+// allow the implementation of non-AES primitives that use the AES round
+// function such as AEGIS and Deoxys-II. This should ONLY be done when
+// implementing something other than AES itself.
+
+sub_bytes :: proc "contextless" (q: ^[8]u64) {
+ // This S-box implementation is a straightforward translation of
+ // the circuit described by Boyar and Peralta in "A new
+ // combinational logic minimization technique with applications
+ // to cryptology" (https://eprint.iacr.org/2009/191.pdf).
+ //
+ // Note that variables x* (input) and s* (output) are numbered
+ // in "reverse" order (x0 is the high bit, x7 is the low bit).
+
+ x0 := q[7]
+ x1 := q[6]
+ x2 := q[5]
+ x3 := q[4]
+ x4 := q[3]
+ x5 := q[2]
+ x6 := q[1]
+ x7 := q[0]
+
+ // Top linear transformation.
+ y14 := x3 ~ x5
+ y13 := x0 ~ x6
+ y9 := x0 ~ x3
+ y8 := x0 ~ x5
+ t0 := x1 ~ x2
+ y1 := t0 ~ x7
+ y4 := y1 ~ x3
+ y12 := y13 ~ y14
+ y2 := y1 ~ x0
+ y5 := y1 ~ x6
+ y3 := y5 ~ y8
+ t1 := x4 ~ y12
+ y15 := t1 ~ x5
+ y20 := t1 ~ x1
+ y6 := y15 ~ x7
+ y10 := y15 ~ t0
+ y11 := y20 ~ y9
+ y7 := x7 ~ y11
+ y17 := y10 ~ y11
+ y19 := y10 ~ y8
+ y16 := t0 ~ y11
+ y21 := y13 ~ y16
+ y18 := x0 ~ y16
+
+ // Non-linear section.
+ t2 := y12 & y15
+ t3 := y3 & y6
+ t4 := t3 ~ t2
+ t5 := y4 & x7
+ t6 := t5 ~ t2
+ t7 := y13 & y16
+ t8 := y5 & y1
+ t9 := t8 ~ t7
+ t10 := y2 & y7
+ t11 := t10 ~ t7
+ t12 := y9 & y11
+ t13 := y14 & y17
+ t14 := t13 ~ t12
+ t15 := y8 & y10
+ t16 := t15 ~ t12
+ t17 := t4 ~ t14
+ t18 := t6 ~ t16
+ t19 := t9 ~ t14
+ t20 := t11 ~ t16
+ t21 := t17 ~ y20
+ t22 := t18 ~ y19
+ t23 := t19 ~ y21
+ t24 := t20 ~ y18
+
+ t25 := t21 ~ t22
+ t26 := t21 & t23
+ t27 := t24 ~ t26
+ t28 := t25 & t27
+ t29 := t28 ~ t22
+ t30 := t23 ~ t24
+ t31 := t22 ~ t26
+ t32 := t31 & t30
+ t33 := t32 ~ t24
+ t34 := t23 ~ t33
+ t35 := t27 ~ t33
+ t36 := t24 & t35
+ t37 := t36 ~ t34
+ t38 := t27 ~ t36
+ t39 := t29 & t38
+ t40 := t25 ~ t39
+
+ t41 := t40 ~ t37
+ t42 := t29 ~ t33
+ t43 := t29 ~ t40
+ t44 := t33 ~ t37
+ t45 := t42 ~ t41
+ z0 := t44 & y15
+ z1 := t37 & y6
+ z2 := t33 & x7
+ z3 := t43 & y16
+ z4 := t40 & y1
+ z5 := t29 & y7
+ z6 := t42 & y11
+ z7 := t45 & y17
+ z8 := t41 & y10
+ z9 := t44 & y12
+ z10 := t37 & y3
+ z11 := t33 & y4
+ z12 := t43 & y13
+ z13 := t40 & y5
+ z14 := t29 & y2
+ z15 := t42 & y9
+ z16 := t45 & y14
+ z17 := t41 & y8
+
+ // Bottom linear transformation.
+ t46 := z15 ~ z16
+ t47 := z10 ~ z11
+ t48 := z5 ~ z13
+ t49 := z9 ~ z10
+ t50 := z2 ~ z12
+ t51 := z2 ~ z5
+ t52 := z7 ~ z8
+ t53 := z0 ~ z3
+ t54 := z6 ~ z7
+ t55 := z16 ~ z17
+ t56 := z12 ~ t48
+ t57 := t50 ~ t53
+ t58 := z4 ~ t46
+ t59 := z3 ~ t54
+ t60 := t46 ~ t57
+ t61 := z14 ~ t57
+ t62 := t52 ~ t58
+ t63 := t49 ~ t58
+ t64 := z4 ~ t59
+ t65 := t61 ~ t62
+ t66 := z1 ~ t63
+ s0 := t59 ~ t63
+ s6 := t56 ~ ~t62
+ s7 := t48 ~ ~t60
+ t67 := t64 ~ t65
+ s3 := t53 ~ t66
+ s4 := t51 ~ t66
+ s5 := t47 ~ t65
+ s1 := t64 ~ ~s3
+ s2 := t55 ~ ~t67
+
+ q[7] = s0
+ q[6] = s1
+ q[5] = s2
+ q[4] = s3
+ q[3] = s4
+ q[2] = s5
+ q[1] = s6
+ q[0] = s7
+}
+
+orthogonalize :: proc "contextless" (q: ^[8]u64) {
+ CL2 :: 0x5555555555555555
+ CH2 :: 0xAAAAAAAAAAAAAAAA
+ q[0], q[1] = (q[0] & CL2) | ((q[1] & CL2) << 1), ((q[0] & CH2) >> 1) | (q[1] & CH2)
+ q[2], q[3] = (q[2] & CL2) | ((q[3] & CL2) << 1), ((q[2] & CH2) >> 1) | (q[3] & CH2)
+ q[4], q[5] = (q[4] & CL2) | ((q[5] & CL2) << 1), ((q[4] & CH2) >> 1) | (q[5] & CH2)
+ q[6], q[7] = (q[6] & CL2) | ((q[7] & CL2) << 1), ((q[6] & CH2) >> 1) | (q[7] & CH2)
+
+ CL4 :: 0x3333333333333333
+ CH4 :: 0xCCCCCCCCCCCCCCCC
+ q[0], q[2] = (q[0] & CL4) | ((q[2] & CL4) << 2), ((q[0] & CH4) >> 2) | (q[2] & CH4)
+ q[1], q[3] = (q[1] & CL4) | ((q[3] & CL4) << 2), ((q[1] & CH4) >> 2) | (q[3] & CH4)
+ q[4], q[6] = (q[4] & CL4) | ((q[6] & CL4) << 2), ((q[4] & CH4) >> 2) | (q[6] & CH4)
+ q[5], q[7] = (q[5] & CL4) | ((q[7] & CL4) << 2), ((q[5] & CH4) >> 2) | (q[7] & CH4)
+
+ CL8 :: 0x0F0F0F0F0F0F0F0F
+ CH8 :: 0xF0F0F0F0F0F0F0F0
+ q[0], q[4] = (q[0] & CL8) | ((q[4] & CL8) << 4), ((q[0] & CH8) >> 4) | (q[4] & CH8)
+ q[1], q[5] = (q[1] & CL8) | ((q[5] & CL8) << 4), ((q[1] & CH8) >> 4) | (q[5] & CH8)
+ q[2], q[6] = (q[2] & CL8) | ((q[6] & CL8) << 4), ((q[2] & CH8) >> 4) | (q[6] & CH8)
+ q[3], q[7] = (q[3] & CL8) | ((q[7] & CL8) << 4), ((q[3] & CH8) >> 4) | (q[7] & CH8)
+}
+
+@(require_results)
+interleave_in :: proc "contextless" (w: []u32) -> (q0, q1: u64) #no_bounds_check {
+ if len(w) < 4 {
+ intrinsics.trap()
+ }
+ x0, x1, x2, x3 := u64(w[0]), u64(w[1]), u64(w[2]), u64(w[3])
+ x0 |= (x0 << 16)
+ x1 |= (x1 << 16)
+ x2 |= (x2 << 16)
+ x3 |= (x3 << 16)
+ x0 &= 0x0000FFFF0000FFFF
+ x1 &= 0x0000FFFF0000FFFF
+ x2 &= 0x0000FFFF0000FFFF
+ x3 &= 0x0000FFFF0000FFFF
+ x0 |= (x0 << 8)
+ x1 |= (x1 << 8)
+ x2 |= (x2 << 8)
+ x3 |= (x3 << 8)
+ x0 &= 0x00FF00FF00FF00FF
+ x1 &= 0x00FF00FF00FF00FF
+ x2 &= 0x00FF00FF00FF00FF
+ x3 &= 0x00FF00FF00FF00FF
+ q0 = x0 | (x2 << 8)
+ q1 = x1 | (x3 << 8)
+ return
+}
+
+@(require_results)
+interleave_out :: proc "contextless" (q0, q1: u64) -> (w0, w1, w2, w3: u32) {
+ x0 := q0 & 0x00FF00FF00FF00FF
+ x1 := q1 & 0x00FF00FF00FF00FF
+ x2 := (q0 >> 8) & 0x00FF00FF00FF00FF
+ x3 := (q1 >> 8) & 0x00FF00FF00FF00FF
+ x0 |= (x0 >> 8)
+ x1 |= (x1 >> 8)
+ x2 |= (x2 >> 8)
+ x3 |= (x3 >> 8)
+ x0 &= 0x0000FFFF0000FFFF
+ x1 &= 0x0000FFFF0000FFFF
+ x2 &= 0x0000FFFF0000FFFF
+ x3 &= 0x0000FFFF0000FFFF
+ w0 = u32(x0) | u32(x0 >> 16)
+ w1 = u32(x1) | u32(x1 >> 16)
+ w2 = u32(x2) | u32(x2 >> 16)
+ w3 = u32(x3) | u32(x3 >> 16)
+ return
+}
+
+@(private)
+rotr32 :: #force_inline proc "contextless" (x: u64) -> u64 {
+ return (x << 32) | (x >> 32)
+}
diff --git a/core/crypto/_aes/ct64/ct64_dec.odin b/core/crypto/_aes/ct64/ct64_dec.odin
new file mode 100644
index 000000000..408ee6002
--- /dev/null
+++ b/core/crypto/_aes/ct64/ct64_dec.odin
@@ -0,0 +1,135 @@
+// Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHORS “AS IS” AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package aes_ct64
+
+import "base:intrinsics"
+
+inv_sub_bytes :: proc "contextless" (q: ^[8]u64) {
+ // AES S-box is:
+ // S(x) = A(I(x)) ^ 0x63
+ // where I() is inversion in GF(256), and A() is a linear
+ // transform (0 is formally defined to be its own inverse).
+ // Since inversion is an involution, the inverse S-box can be
+ // computed from the S-box as:
+ // iS(x) = B(S(B(x ^ 0x63)) ^ 0x63)
+ // where B() is the inverse of A(). Indeed, for any y in GF(256):
+ // iS(S(y)) = B(A(I(B(A(I(y)) ^ 0x63 ^ 0x63))) ^ 0x63 ^ 0x63) = y
+ //
+ // Note: we reuse the implementation of the forward S-box,
+ // instead of duplicating it here, so that total code size is
+ // lower. By merging the B() transforms into the S-box circuit
+ // we could make faster CBC decryption, but CBC decryption is
+ // already quite faster than CBC encryption because we can
+ // process four blocks in parallel.
+
+ q0 := ~q[0]
+ q1 := ~q[1]
+ q2 := q[2]
+ q3 := q[3]
+ q4 := q[4]
+ q5 := ~q[5]
+ q6 := ~q[6]
+ q7 := q[7]
+ q[7] = q1 ~ q4 ~ q6
+ q[6] = q0 ~ q3 ~ q5
+ q[5] = q7 ~ q2 ~ q4
+ q[4] = q6 ~ q1 ~ q3
+ q[3] = q5 ~ q0 ~ q2
+ q[2] = q4 ~ q7 ~ q1
+ q[1] = q3 ~ q6 ~ q0
+ q[0] = q2 ~ q5 ~ q7
+
+ sub_bytes(q)
+
+ q0 = ~q[0]
+ q1 = ~q[1]
+ q2 = q[2]
+ q3 = q[3]
+ q4 = q[4]
+ q5 = ~q[5]
+ q6 = ~q[6]
+ q7 = q[7]
+ q[7] = q1 ~ q4 ~ q6
+ q[6] = q0 ~ q3 ~ q5
+ q[5] = q7 ~ q2 ~ q4
+ q[4] = q6 ~ q1 ~ q3
+ q[3] = q5 ~ q0 ~ q2
+ q[2] = q4 ~ q7 ~ q1
+ q[1] = q3 ~ q6 ~ q0
+ q[0] = q2 ~ q5 ~ q7
+}
+
+inv_shift_rows :: proc "contextless" (q: ^[8]u64) {
+ for x, i in q {
+ q[i] =
+ (x & 0x000000000000FFFF) |
+ ((x & 0x000000000FFF0000) << 4) |
+ ((x & 0x00000000F0000000) >> 12) |
+ ((x & 0x000000FF00000000) << 8) |
+ ((x & 0x0000FF0000000000) >> 8) |
+ ((x & 0x000F000000000000) << 12) |
+ ((x & 0xFFF0000000000000) >> 4)
+ }
+}
+
+inv_mix_columns :: proc "contextless" (q: ^[8]u64) {
+ q0 := q[0]
+ q1 := q[1]
+ q2 := q[2]
+ q3 := q[3]
+ q4 := q[4]
+ q5 := q[5]
+ q6 := q[6]
+ q7 := q[7]
+ r0 := (q0 >> 16) | (q0 << 48)
+ r1 := (q1 >> 16) | (q1 << 48)
+ r2 := (q2 >> 16) | (q2 << 48)
+ r3 := (q3 >> 16) | (q3 << 48)
+ r4 := (q4 >> 16) | (q4 << 48)
+ r5 := (q5 >> 16) | (q5 << 48)
+ r6 := (q6 >> 16) | (q6 << 48)
+ r7 := (q7 >> 16) | (q7 << 48)
+
+ q[0] = q5 ~ q6 ~ q7 ~ r0 ~ r5 ~ r7 ~ rotr32(q0 ~ q5 ~ q6 ~ r0 ~ r5)
+ q[1] = q0 ~ q5 ~ r0 ~ r1 ~ r5 ~ r6 ~ r7 ~ rotr32(q1 ~ q5 ~ q7 ~ r1 ~ r5 ~ r6)
+ q[2] = q0 ~ q1 ~ q6 ~ r1 ~ r2 ~ r6 ~ r7 ~ rotr32(q0 ~ q2 ~ q6 ~ r2 ~ r6 ~ r7)
+ q[3] = q0 ~ q1 ~ q2 ~ q5 ~ q6 ~ r0 ~ r2 ~ r3 ~ r5 ~ rotr32(q0 ~ q1 ~ q3 ~ q5 ~ q6 ~ q7 ~ r0 ~ r3 ~ r5 ~ r7)
+ q[4] = q1 ~ q2 ~ q3 ~ q5 ~ r1 ~ r3 ~ r4 ~ r5 ~ r6 ~ r7 ~ rotr32(q1 ~ q2 ~ q4 ~ q5 ~ q7 ~ r1 ~ r4 ~ r5 ~ r6)
+ q[5] = q2 ~ q3 ~ q4 ~ q6 ~ r2 ~ r4 ~ r5 ~ r6 ~ r7 ~ rotr32(q2 ~ q3 ~ q5 ~ q6 ~ r2 ~ r5 ~ r6 ~ r7)
+ q[6] = q3 ~ q4 ~ q5 ~ q7 ~ r3 ~ r5 ~ r6 ~ r7 ~ rotr32(q3 ~ q4 ~ q6 ~ q7 ~ r3 ~ r6 ~ r7)
+ q[7] = q4 ~ q5 ~ q6 ~ r4 ~ r6 ~ r7 ~ rotr32(q4 ~ q5 ~ q7 ~ r4 ~ r7)
+}
+
+@(private)
+_decrypt :: proc "contextless" (q: ^[8]u64, skey: []u64, num_rounds: int) {
+ add_round_key(q, skey[num_rounds << 3:])
+ for u := num_rounds - 1; u > 0; u -= 1 {
+ inv_shift_rows(q)
+ inv_sub_bytes(q)
+ add_round_key(q, skey[u << 3:])
+ inv_mix_columns(q)
+ }
+ inv_shift_rows(q)
+ inv_sub_bytes(q)
+ add_round_key(q, skey)
+}
diff --git a/core/crypto/_aes/ct64/ct64_enc.odin b/core/crypto/_aes/ct64/ct64_enc.odin
new file mode 100644
index 000000000..36d4aebc8
--- /dev/null
+++ b/core/crypto/_aes/ct64/ct64_enc.odin
@@ -0,0 +1,95 @@
+// Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHORS “AS IS” AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package aes_ct64
+
+import "base:intrinsics"
+
+add_round_key :: proc "contextless" (q: ^[8]u64, sk: []u64) #no_bounds_check {
+ if len(sk) < 8 {
+ intrinsics.trap()
+ }
+
+ q[0] ~= sk[0]
+ q[1] ~= sk[1]
+ q[2] ~= sk[2]
+ q[3] ~= sk[3]
+ q[4] ~= sk[4]
+ q[5] ~= sk[5]
+ q[6] ~= sk[6]
+ q[7] ~= sk[7]
+}
+
+shift_rows :: proc "contextless" (q: ^[8]u64) {
+ for x, i in q {
+ q[i] =
+ (x & 0x000000000000FFFF) |
+ ((x & 0x00000000FFF00000) >> 4) |
+ ((x & 0x00000000000F0000) << 12) |
+ ((x & 0x0000FF0000000000) >> 8) |
+ ((x & 0x000000FF00000000) << 8) |
+ ((x & 0xF000000000000000) >> 12) |
+ ((x & 0x0FFF000000000000) << 4)
+ }
+}
+
+mix_columns :: proc "contextless" (q: ^[8]u64) {
+ q0 := q[0]
+ q1 := q[1]
+ q2 := q[2]
+ q3 := q[3]
+ q4 := q[4]
+ q5 := q[5]
+ q6 := q[6]
+ q7 := q[7]
+ r0 := (q0 >> 16) | (q0 << 48)
+ r1 := (q1 >> 16) | (q1 << 48)
+ r2 := (q2 >> 16) | (q2 << 48)
+ r3 := (q3 >> 16) | (q3 << 48)
+ r4 := (q4 >> 16) | (q4 << 48)
+ r5 := (q5 >> 16) | (q5 << 48)
+ r6 := (q6 >> 16) | (q6 << 48)
+ r7 := (q7 >> 16) | (q7 << 48)
+
+ q[0] = q7 ~ r7 ~ r0 ~ rotr32(q0 ~ r0)
+ q[1] = q0 ~ r0 ~ q7 ~ r7 ~ r1 ~ rotr32(q1 ~ r1)
+ q[2] = q1 ~ r1 ~ r2 ~ rotr32(q2 ~ r2)
+ q[3] = q2 ~ r2 ~ q7 ~ r7 ~ r3 ~ rotr32(q3 ~ r3)
+ q[4] = q3 ~ r3 ~ q7 ~ r7 ~ r4 ~ rotr32(q4 ~ r4)
+ q[5] = q4 ~ r4 ~ r5 ~ rotr32(q5 ~ r5)
+ q[6] = q5 ~ r5 ~ r6 ~ rotr32(q6 ~ r6)
+ q[7] = q6 ~ r6 ~ r7 ~ rotr32(q7 ~ r7)
+}
+
+@(private)
+_encrypt :: proc "contextless" (q: ^[8]u64, skey: []u64, num_rounds: int) {
+ add_round_key(q, skey)
+ for u in 1 ..< num_rounds {
+ sub_bytes(q)
+ shift_rows(q)
+ mix_columns(q)
+ add_round_key(q, skey[u << 3:])
+ }
+ sub_bytes(q)
+ shift_rows(q)
+ add_round_key(q, skey[num_rounds << 3:])
+}
diff --git a/core/crypto/_aes/ct64/ct64_keysched.odin b/core/crypto/_aes/ct64/ct64_keysched.odin
new file mode 100644
index 000000000..060a2c03e
--- /dev/null
+++ b/core/crypto/_aes/ct64/ct64_keysched.odin
@@ -0,0 +1,179 @@
+// Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHORS “AS IS” AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package aes_ct64
+
+import "base:intrinsics"
+import "core:crypto/_aes"
+import "core:encoding/endian"
+import "core:mem"
+
+@(private, require_results)
+sub_word :: proc "contextless" (x: u32) -> u32 {
+ q := [8]u64{u64(x), 0, 0, 0, 0, 0, 0, 0}
+
+ orthogonalize(&q)
+ sub_bytes(&q)
+ orthogonalize(&q)
+ ret := u32(q[0])
+
+ mem.zero_explicit(&q[0], size_of(u64))
+
+ return ret
+}
+
+@(private, require_results)
+keysched :: proc(comp_skey: []u64, key: []byte) -> int {
+ num_rounds, key_len := 0, len(key)
+ switch key_len {
+ case _aes.KEY_SIZE_128:
+ num_rounds = _aes.ROUNDS_128
+ case _aes.KEY_SIZE_192:
+ num_rounds = _aes.ROUNDS_192
+ case _aes.KEY_SIZE_256:
+ num_rounds = _aes.ROUNDS_256
+ case:
+ panic("crypto/aes: invalid AES key size")
+ }
+
+ skey: [60]u32 = ---
+ nk, nkf := key_len >> 2, (num_rounds + 1) << 2
+ for i in 0 ..< nk {
+ skey[i] = endian.unchecked_get_u32le(key[i << 2:])
+ }
+ tmp := skey[(key_len >> 2) - 1]
+ for i, j, k := nk, 0, 0; i < nkf; i += 1 {
+ if j == 0 {
+ tmp = (tmp << 24) | (tmp >> 8)
+ tmp = sub_word(tmp) ~ u32(_aes.RCON[k])
+ } else if nk > 6 && j == 4 {
+ tmp = sub_word(tmp)
+ }
+ tmp ~= skey[i - nk]
+ skey[i] = tmp
+ if j += 1; j == nk {
+ j = 0
+ k += 1
+ }
+ }
+
+ q: [8]u64 = ---
+ for i, j := 0, 0; i < nkf; i, j = i + 4, j + 2 {
+ q[0], q[4] = interleave_in(skey[i:])
+ q[1] = q[0]
+ q[2] = q[0]
+ q[3] = q[0]
+ q[5] = q[4]
+ q[6] = q[4]
+ q[7] = q[4]
+ orthogonalize(&q)
+ comp_skey[j + 0] =
+ (q[0] & 0x1111111111111111) |
+ (q[1] & 0x2222222222222222) |
+ (q[2] & 0x4444444444444444) |
+ (q[3] & 0x8888888888888888)
+ comp_skey[j + 1] =
+ (q[4] & 0x1111111111111111) |
+ (q[5] & 0x2222222222222222) |
+ (q[6] & 0x4444444444444444) |
+ (q[7] & 0x8888888888888888)
+ }
+
+ mem.zero_explicit(&skey, size_of(skey))
+ mem.zero_explicit(&q, size_of(q))
+
+ return num_rounds
+}
+
+@(private)
+skey_expand :: proc "contextless" (skey, comp_skey: []u64, num_rounds: int) {
+ n := (num_rounds + 1) << 1
+ for u, v := 0, 0; u < n; u, v = u + 1, v + 4 {
+ x0 := comp_skey[u]
+ x1, x2, x3 := x0, x0, x0
+ x0 &= 0x1111111111111111
+ x1 &= 0x2222222222222222
+ x2 &= 0x4444444444444444
+ x3 &= 0x8888888888888888
+ x1 >>= 1
+ x2 >>= 2
+ x3 >>= 3
+ skey[v + 0] = (x0 << 4) - x0
+ skey[v + 1] = (x1 << 4) - x1
+ skey[v + 2] = (x2 << 4) - x2
+ skey[v + 3] = (x3 << 4) - x3
+ }
+}
+
+orthogonalize_roundkey :: proc "contextless" (qq: []u64, key: []byte) {
+ if len(qq) < 8 || len(key) != 16 {
+ intrinsics.trap()
+ }
+
+ skey: [4]u32 = ---
+ skey[0] = endian.unchecked_get_u32le(key[0:])
+ skey[1] = endian.unchecked_get_u32le(key[4:])
+ skey[2] = endian.unchecked_get_u32le(key[8:])
+ skey[3] = endian.unchecked_get_u32le(key[12:])
+
+ q: [8]u64 = ---
+ q[0], q[4] = interleave_in(skey[:])
+ q[1] = q[0]
+ q[2] = q[0]
+ q[3] = q[0]
+ q[5] = q[4]
+ q[6] = q[4]
+ q[7] = q[4]
+ orthogonalize(&q)
+
+ comp_skey: [2]u64 = ---
+ comp_skey[0] =
+ (q[0] & 0x1111111111111111) |
+ (q[1] & 0x2222222222222222) |
+ (q[2] & 0x4444444444444444) |
+ (q[3] & 0x8888888888888888)
+ comp_skey[1] =
+ (q[4] & 0x1111111111111111) |
+ (q[5] & 0x2222222222222222) |
+ (q[6] & 0x4444444444444444) |
+ (q[7] & 0x8888888888888888)
+
+ for x, u in comp_skey {
+ x0 := x
+ x1, x2, x3 := x0, x0, x0
+ x0 &= 0x1111111111111111
+ x1 &= 0x2222222222222222
+ x2 &= 0x4444444444444444
+ x3 &= 0x8888888888888888
+ x1 >>= 1
+ x2 >>= 2
+ x3 >>= 3
+ qq[u * 4 + 0] = (x0 << 4) - x0
+ qq[u * 4 + 1] = (x1 << 4) - x1
+ qq[u * 4 + 2] = (x2 << 4) - x2
+ qq[u * 4 + 3] = (x3 << 4) - x3
+ }
+
+ mem.zero_explicit(&skey, size_of(skey))
+ mem.zero_explicit(&q, size_of(q))
+ mem.zero_explicit(&comp_skey, size_of(comp_skey))
+}
diff --git a/core/crypto/_aes/ct64/ghash.odin b/core/crypto/_aes/ct64/ghash.odin
new file mode 100644
index 000000000..21ac2ca97
--- /dev/null
+++ b/core/crypto/_aes/ct64/ghash.odin
@@ -0,0 +1,136 @@
+// Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHORS “AS IS” AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package aes_ct64
+
+import "base:intrinsics"
+import "core:crypto/_aes"
+import "core:encoding/endian"
+
+@(private = "file")
+bmul64 :: proc "contextless" (x, y: u64) -> u64 {
+ x0 := x & 0x1111111111111111
+ x1 := x & 0x2222222222222222
+ x2 := x & 0x4444444444444444
+ x3 := x & 0x8888888888888888
+ y0 := y & 0x1111111111111111
+ y1 := y & 0x2222222222222222
+ y2 := y & 0x4444444444444444
+ y3 := y & 0x8888888888888888
+ z0 := (x0 * y0) ~ (x1 * y3) ~ (x2 * y2) ~ (x3 * y1)
+ z1 := (x0 * y1) ~ (x1 * y0) ~ (x2 * y3) ~ (x3 * y2)
+ z2 := (x0 * y2) ~ (x1 * y1) ~ (x2 * y0) ~ (x3 * y3)
+ z3 := (x0 * y3) ~ (x1 * y2) ~ (x2 * y1) ~ (x3 * y0)
+ z0 &= 0x1111111111111111
+ z1 &= 0x2222222222222222
+ z2 &= 0x4444444444444444
+ z3 &= 0x8888888888888888
+ return z0 | z1 | z2 | z3
+}
+
+@(private = "file")
+rev64 :: proc "contextless" (x: u64) -> u64 {
+ x := x
+ x = ((x & 0x5555555555555555) << 1) | ((x >> 1) & 0x5555555555555555)
+ x = ((x & 0x3333333333333333) << 2) | ((x >> 2) & 0x3333333333333333)
+ x = ((x & 0x0F0F0F0F0F0F0F0F) << 4) | ((x >> 4) & 0x0F0F0F0F0F0F0F0F)
+ x = ((x & 0x00FF00FF00FF00FF) << 8) | ((x >> 8) & 0x00FF00FF00FF00FF)
+ x = ((x & 0x0000FFFF0000FFFF) << 16) | ((x >> 16) & 0x0000FFFF0000FFFF)
+ return (x << 32) | (x >> 32)
+}
+
+// ghash calculates the GHASH of data, with the key `key`, and input `dst`
+// and `data`, and stores the resulting digest in `dst`.
+//
+// Note: `dst` is both an input and an output, to support easy implementation
+// of GCM.
+ghash :: proc "contextless" (dst, key, data: []byte) {
+ if len(dst) != _aes.GHASH_BLOCK_SIZE || len(key) != _aes.GHASH_BLOCK_SIZE {
+ intrinsics.trap()
+ }
+
+ buf := data
+ l := len(buf)
+
+ y1 := endian.unchecked_get_u64be(dst[0:])
+ y0 := endian.unchecked_get_u64be(dst[8:])
+ h1 := endian.unchecked_get_u64be(key[0:])
+ h0 := endian.unchecked_get_u64be(key[8:])
+ h0r := rev64(h0)
+ h1r := rev64(h1)
+ h2 := h0 ~ h1
+ h2r := h0r ~ h1r
+
+ src: []byte
+ for l > 0 {
+ if l >= _aes.GHASH_BLOCK_SIZE {
+ src = buf
+ buf = buf[_aes.GHASH_BLOCK_SIZE:]
+ l -= _aes.GHASH_BLOCK_SIZE
+ } else {
+ tmp: [_aes.GHASH_BLOCK_SIZE]byte
+ copy(tmp[:], buf)
+ src = tmp[:]
+ l = 0
+ }
+ y1 ~= endian.unchecked_get_u64be(src)
+ y0 ~= endian.unchecked_get_u64be(src[8:])
+
+ y0r := rev64(y0)
+ y1r := rev64(y1)
+ y2 := y0 ~ y1
+ y2r := y0r ~ y1r
+
+ z0 := bmul64(y0, h0)
+ z1 := bmul64(y1, h1)
+ z2 := bmul64(y2, h2)
+ z0h := bmul64(y0r, h0r)
+ z1h := bmul64(y1r, h1r)
+ z2h := bmul64(y2r, h2r)
+ z2 ~= z0 ~ z1
+ z2h ~= z0h ~ z1h
+ z0h = rev64(z0h) >> 1
+ z1h = rev64(z1h) >> 1
+ z2h = rev64(z2h) >> 1
+
+ v0 := z0
+ v1 := z0h ~ z2
+ v2 := z1 ~ z2h
+ v3 := z1h
+
+ v3 = (v3 << 1) | (v2 >> 63)
+ v2 = (v2 << 1) | (v1 >> 63)
+ v1 = (v1 << 1) | (v0 >> 63)
+ v0 = (v0 << 1)
+
+ v2 ~= v0 ~ (v0 >> 1) ~ (v0 >> 2) ~ (v0 >> 7)
+ v1 ~= (v0 << 63) ~ (v0 << 62) ~ (v0 << 57)
+ v3 ~= v1 ~ (v1 >> 1) ~ (v1 >> 2) ~ (v1 >> 7)
+ v2 ~= (v1 << 63) ~ (v1 << 62) ~ (v1 << 57)
+
+ y0 = v2
+ y1 = v3
+ }
+
+ endian.unchecked_put_u64be(dst[0:], y1)
+ endian.unchecked_put_u64be(dst[8:], y0)
+}
diff --git a/core/crypto/_aes/ct64/helpers.odin b/core/crypto/_aes/ct64/helpers.odin
new file mode 100644
index 000000000..169271f6d
--- /dev/null
+++ b/core/crypto/_aes/ct64/helpers.odin
@@ -0,0 +1,75 @@
+package aes_ct64
+
+import "base:intrinsics"
+import "core:crypto/_aes"
+import "core:encoding/endian"
+
+load_blockx1 :: proc "contextless" (q: ^[8]u64, src: []byte) {
+ if len(src) != _aes.BLOCK_SIZE {
+ intrinsics.trap()
+ }
+
+ w: [4]u32 = ---
+ w[0] = endian.unchecked_get_u32le(src[0:])
+ w[1] = endian.unchecked_get_u32le(src[4:])
+ w[2] = endian.unchecked_get_u32le(src[8:])
+ w[3] = endian.unchecked_get_u32le(src[12:])
+ q[0], q[4] = interleave_in(w[:])
+ orthogonalize(q)
+}
+
+store_blockx1 :: proc "contextless" (dst: []byte, q: ^[8]u64) {
+ if len(dst) != _aes.BLOCK_SIZE {
+ intrinsics.trap()
+ }
+
+ orthogonalize(q)
+ w0, w1, w2, w3 := interleave_out(q[0], q[4])
+ endian.unchecked_put_u32le(dst[0:], w0)
+ endian.unchecked_put_u32le(dst[4:], w1)
+ endian.unchecked_put_u32le(dst[8:], w2)
+ endian.unchecked_put_u32le(dst[12:], w3)
+}
+
+load_blocks :: proc "contextless" (q: ^[8]u64, src: [][]byte) {
+ if n := len(src); n > STRIDE || n == 0 {
+ intrinsics.trap()
+ }
+
+ w: [4]u32 = ---
+ for s, i in src {
+ if len(s) != _aes.BLOCK_SIZE {
+ intrinsics.trap()
+ }
+
+ w[0] = endian.unchecked_get_u32le(s[0:])
+ w[1] = endian.unchecked_get_u32le(s[4:])
+ w[2] = endian.unchecked_get_u32le(s[8:])
+ w[3] = endian.unchecked_get_u32le(s[12:])
+ q[i], q[i + 4] = interleave_in(w[:])
+ }
+ orthogonalize(q)
+}
+
+store_blocks :: proc "contextless" (dst: [][]byte, q: ^[8]u64) {
+ if n := len(dst); n > STRIDE || n == 0 {
+ intrinsics.trap()
+ }
+
+ orthogonalize(q)
+ for d, i in dst {
+ // Allow storing [0,4] blocks.
+ if d == nil {
+ break
+ }
+ if len(d) != _aes.BLOCK_SIZE {
+ intrinsics.trap()
+ }
+
+ w0, w1, w2, w3 := interleave_out(q[i], q[i + 4])
+ endian.unchecked_put_u32le(d[0:], w0)
+ endian.unchecked_put_u32le(d[4:], w1)
+ endian.unchecked_put_u32le(d[8:], w2)
+ endian.unchecked_put_u32le(d[12:], w3)
+ }
+}
diff --git a/core/crypto/aes/aes.odin b/core/crypto/aes/aes.odin
new file mode 100644
index 000000000..e895c5fe0
--- /dev/null
+++ b/core/crypto/aes/aes.odin
@@ -0,0 +1,22 @@
+/*
+package aes implements the AES block cipher and some common modes.
+
+See:
+- https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.197-upd1.pdf
+- https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38a.pdf
+- https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38d.pdf
+*/
+
+package aes
+
+import "core:crypto/_aes"
+
+// KEY_SIZE_128 is the AES-128 key size in bytes.
+KEY_SIZE_128 :: _aes.KEY_SIZE_128
+// KEY_SIZE_192 is the AES-192 key size in bytes.
+KEY_SIZE_192 :: _aes.KEY_SIZE_192
+// KEY_SIZE_256 is the AES-256 key size in bytes.
+KEY_SIZE_256 :: _aes.KEY_SIZE_256
+
+// BLOCK_SIZE is the AES block size in bytes.
+BLOCK_SIZE :: _aes.BLOCK_SIZE
diff --git a/core/crypto/aes/aes_ctr.odin b/core/crypto/aes/aes_ctr.odin
new file mode 100644
index 000000000..1821a7bdf
--- /dev/null
+++ b/core/crypto/aes/aes_ctr.odin
@@ -0,0 +1,199 @@
+package aes
+
+import "core:crypto/_aes/ct64"
+import "core:encoding/endian"
+import "core:math/bits"
+import "core:mem"
+
+// CTR_IV_SIZE is the size of the CTR mode IV in bytes.
+CTR_IV_SIZE :: 16
+
+// Context_CTR is a keyed AES-CTR instance.
+Context_CTR :: struct {
+ _impl: Context_Impl,
+ _buffer: [BLOCK_SIZE]byte,
+ _off: int,
+ _ctr_hi: u64,
+ _ctr_lo: u64,
+ _is_initialized: bool,
+}
+
+// init_ctr initializes a Context_CTR with the provided key and IV.
+init_ctr :: proc(ctx: ^Context_CTR, key, iv: []byte, impl := Implementation.Hardware) {
+ if len(iv) != CTR_IV_SIZE {
+ panic("crypto/aes: invalid CTR IV size")
+ }
+
+ init_impl(&ctx._impl, key, impl)
+ ctx._off = BLOCK_SIZE
+ ctx._ctr_hi = endian.unchecked_get_u64be(iv[0:])
+ ctx._ctr_lo = endian.unchecked_get_u64be(iv[8:])
+ ctx._is_initialized = true
+}
+
+// xor_bytes_ctr XORs each byte in src with bytes taken from the AES-CTR
+// keystream, and writes the resulting output to dst. dst and src MUST
+// alias exactly or not at all.
+xor_bytes_ctr :: proc(ctx: ^Context_CTR, dst, src: []byte) {
+ assert(ctx._is_initialized)
+
+ // TODO: Enforcing that dst and src alias exactly or not at all
+ // is a good idea, though odd aliasing should be extremely uncommon.
+
+ src, dst := src, dst
+ if dst_len := len(dst); dst_len < len(src) {
+ src = src[:dst_len]
+ }
+
+ for remaining := len(src); remaining > 0; {
+ // Process multiple blocks at once
+ if ctx._off == BLOCK_SIZE {
+ if nr_blocks := remaining / BLOCK_SIZE; nr_blocks > 0 {
+ direct_bytes := nr_blocks * BLOCK_SIZE
+ ctr_blocks(ctx, dst, src, nr_blocks)
+ remaining -= direct_bytes
+ if remaining == 0 {
+ return
+ }
+ dst = dst[direct_bytes:]
+ src = src[direct_bytes:]
+ }
+
+ // If there is a partial block, generate and buffer 1 block
+ // worth of keystream.
+ ctr_blocks(ctx, ctx._buffer[:], nil, 1)
+ ctx._off = 0
+ }
+
+ // Process partial blocks from the buffered keystream.
+ to_xor := min(BLOCK_SIZE - ctx._off, remaining)
+ buffered_keystream := ctx._buffer[ctx._off:]
+ for i := 0; i < to_xor; i = i + 1 {
+ dst[i] = buffered_keystream[i] ~ src[i]
+ }
+ ctx._off += to_xor
+ dst = dst[to_xor:]
+ src = src[to_xor:]
+ remaining -= to_xor
+ }
+}
+
+// keystream_bytes_ctr fills dst with the raw AES-CTR keystream output.
+keystream_bytes_ctr :: proc(ctx: ^Context_CTR, dst: []byte) {
+ assert(ctx._is_initialized)
+
+ dst := dst
+ for remaining := len(dst); remaining > 0; {
+ // Process multiple blocks at once
+ if ctx._off == BLOCK_SIZE {
+ if nr_blocks := remaining / BLOCK_SIZE; nr_blocks > 0 {
+ direct_bytes := nr_blocks * BLOCK_SIZE
+ ctr_blocks(ctx, dst, nil, nr_blocks)
+ remaining -= direct_bytes
+ if remaining == 0 {
+ return
+ }
+ dst = dst[direct_bytes:]
+ }
+
+ // If there is a partial block, generate and buffer 1 block
+ // worth of keystream.
+ ctr_blocks(ctx, ctx._buffer[:], nil, 1)
+ ctx._off = 0
+ }
+
+ // Process partial blocks from the buffered keystream.
+ to_copy := min(BLOCK_SIZE - ctx._off, remaining)
+ buffered_keystream := ctx._buffer[ctx._off:]
+ copy(dst[:to_copy], buffered_keystream[:to_copy])
+ ctx._off += to_copy
+ dst = dst[to_copy:]
+ remaining -= to_copy
+ }
+}
+
+// reset_ctr sanitizes the Context_CTR. The Context_CTR must be
+// re-initialized to be used again.
+reset_ctr :: proc "contextless" (ctx: ^Context_CTR) {
+ reset_impl(&ctx._impl)
+ ctx._off = 0
+ ctx._ctr_hi = 0
+ ctx._ctr_lo = 0
+ mem.zero_explicit(&ctx._buffer, size_of(ctx._buffer))
+ ctx._is_initialized = false
+}
+
+@(private)
+ctr_blocks :: proc(ctx: ^Context_CTR, dst, src: []byte, nr_blocks: int) {
+ // Use the optimized hardware implementation if available.
+ if _, is_hw := ctx._impl.(Context_Impl_Hardware); is_hw {
+ ctr_blocks_hw(ctx, dst, src, nr_blocks)
+ return
+ }
+
+ // Portable implementation.
+ ct64_inc_ctr := #force_inline proc "contextless" (dst: []byte, hi, lo: u64) -> (u64, u64) {
+ endian.unchecked_put_u64be(dst[0:], hi)
+ endian.unchecked_put_u64be(dst[8:], lo)
+
+ hi, lo := hi, lo
+ carry: u64
+ lo, carry = bits.add_u64(lo, 1, 0)
+ hi, _ = bits.add_u64(hi, 0, carry)
+ return hi, lo
+ }
+
+ impl := &ctx._impl.(ct64.Context)
+ src, dst := src, dst
+ nr_blocks := nr_blocks
+ ctr_hi, ctr_lo := ctx._ctr_hi, ctx._ctr_lo
+
+ tmp: [ct64.STRIDE][BLOCK_SIZE]byte = ---
+ ctrs: [ct64.STRIDE][]byte = ---
+ for i in 0 ..< ct64.STRIDE {
+ ctrs[i] = tmp[i][:]
+ }
+ for nr_blocks > 0 {
+ n := min(ct64.STRIDE, nr_blocks)
+ blocks := ctrs[:n]
+
+ for i in 0 ..< n {
+ ctr_hi, ctr_lo = ct64_inc_ctr(blocks[i], ctr_hi, ctr_lo)
+ }
+ ct64.encrypt_blocks(impl, blocks, blocks)
+
+ xor_blocks(dst, src, blocks)
+
+ if src != nil {
+ src = src[n * BLOCK_SIZE:]
+ }
+ dst = dst[n * BLOCK_SIZE:]
+ nr_blocks -= n
+ }
+
+ // Write back the counter.
+ ctx._ctr_hi, ctx._ctr_lo = ctr_hi, ctr_lo
+
+ mem.zero_explicit(&tmp, size_of(tmp))
+}
+
+@(private)
+xor_blocks :: #force_inline proc "contextless" (dst, src: []byte, blocks: [][]byte) {
+ // Note: This would be faster `core:simd` was used, however if
+ // performance of this implementation matters to where that
+ // optimization would be worth it, use chacha20poly1305, or a
+ // CPU that isn't e-waste.
+ if src != nil {
+ #no_bounds_check {
+ for i in 0 ..< len(blocks) {
+ off := i * BLOCK_SIZE
+ for j in 0 ..< BLOCK_SIZE {
+ blocks[i][j] ~= src[off + j]
+ }
+ }
+ }
+ }
+ for i in 0 ..< len(blocks) {
+ copy(dst[i * BLOCK_SIZE:], blocks[i])
+ }
+}
diff --git a/core/crypto/aes/aes_ecb.odin b/core/crypto/aes/aes_ecb.odin
new file mode 100644
index 000000000..498429e29
--- /dev/null
+++ b/core/crypto/aes/aes_ecb.odin
@@ -0,0 +1,57 @@
+package aes
+
+import "core:crypto/_aes/ct64"
+
+// Context_ECB is a keyed AES-ECB instance.
+//
+// WARNING: Using ECB mode is strongly discouraged unless it is being
+// used to implement higher level constructs.
+Context_ECB :: struct {
+ _impl: Context_Impl,
+ _is_initialized: bool,
+}
+
+// init_ecb initializes a Context_ECB with the provided key.
+init_ecb :: proc(ctx: ^Context_ECB, key: []byte, impl := Implementation.Hardware) {
+ init_impl(&ctx._impl, key, impl)
+ ctx._is_initialized = true
+}
+
+// encrypt_ecb encrypts the BLOCK_SIZE buffer src, and writes the result to dst.
+encrypt_ecb :: proc(ctx: ^Context_ECB, dst, src: []byte) {
+ assert(ctx._is_initialized)
+
+ if len(dst) != BLOCK_SIZE || len(src) != BLOCK_SIZE {
+ panic("crypto/aes: invalid buffer size(s)")
+ }
+
+ switch &impl in ctx._impl {
+ case ct64.Context:
+ ct64.encrypt_block(&impl, dst, src)
+ case Context_Impl_Hardware:
+ encrypt_block_hw(&impl, dst, src)
+ }
+}
+
+// decrypt_ecb decrypts the BLOCK_SIZE buffer src, and writes the result to dst.
+decrypt_ecb :: proc(ctx: ^Context_ECB, dst, src: []byte) {
+ assert(ctx._is_initialized)
+
+ if len(dst) != BLOCK_SIZE || len(src) != BLOCK_SIZE {
+ panic("crypto/aes: invalid buffer size(s)")
+ }
+
+ switch &impl in ctx._impl {
+ case ct64.Context:
+ ct64.decrypt_block(&impl, dst, src)
+ case Context_Impl_Hardware:
+ decrypt_block_hw(&impl, dst, src)
+ }
+}
+
+// reset_ecb sanitizes the Context_ECB. The Context_ECB must be
+// re-initialized to be used again.
+reset_ecb :: proc "contextless" (ctx: ^Context_ECB) {
+ reset_impl(&ctx._impl)
+ ctx._is_initialized = false
+}
diff --git a/core/crypto/aes/aes_gcm.odin b/core/crypto/aes/aes_gcm.odin
new file mode 100644
index 000000000..66ef48db2
--- /dev/null
+++ b/core/crypto/aes/aes_gcm.odin
@@ -0,0 +1,253 @@
+package aes
+
+import "core:crypto"
+import "core:crypto/_aes"
+import "core:crypto/_aes/ct64"
+import "core:encoding/endian"
+import "core:mem"
+
+// GCM_NONCE_SIZE is the size of the GCM nonce in bytes.
+GCM_NONCE_SIZE :: 12
+// GCM_TAG_SIZE is the size of a GCM tag in bytes.
+GCM_TAG_SIZE :: _aes.GHASH_TAG_SIZE
+
+@(private)
+GCM_A_MAX :: max(u64) / 8 // 2^64 - 1 bits -> bytes
+@(private)
+GCM_P_MAX :: 0xfffffffe0 // 2^39 - 256 bits -> bytes
+
+// Context_GCM is a keyed AES-GCM instance.
+Context_GCM :: struct {
+ _impl: Context_Impl,
+ _is_initialized: bool,
+}
+
+// init_gcm initializes a Context_GCM with the provided key.
+init_gcm :: proc(ctx: ^Context_GCM, key: []byte, impl := Implementation.Hardware) {
+ init_impl(&ctx._impl, key, impl)
+ ctx._is_initialized = true
+}
+
+// seal_gcm encrypts the plaintext and authenticates the aad and ciphertext,
+// with the provided Context_GCM and nonce, stores the output in dst and tag.
+//
+// dst and plaintext MUST alias exactly or not at all.
+seal_gcm :: proc(ctx: ^Context_GCM, dst, tag, nonce, aad, plaintext: []byte) {
+ assert(ctx._is_initialized)
+
+ gcm_validate_common_slice_sizes(tag, nonce, aad, plaintext)
+ if len(dst) != len(plaintext) {
+ panic("crypto/aes: invalid destination ciphertext size")
+ }
+
+ if impl, is_hw := ctx._impl.(Context_Impl_Hardware); is_hw {
+ gcm_seal_hw(&impl, dst, tag, nonce, aad, plaintext)
+ return
+ }
+
+ h: [_aes.GHASH_KEY_SIZE]byte
+ j0: [_aes.GHASH_BLOCK_SIZE]byte
+ s: [_aes.GHASH_TAG_SIZE]byte
+ init_ghash_ct64(ctx, &h, &j0, nonce)
+
+ // Note: Our GHASH implementation handles appending padding.
+ ct64.ghash(s[:], h[:], aad)
+ gctr_ct64(ctx, dst, &s, plaintext, &h, nonce, true)
+ final_ghash_ct64(&s, &h, &j0, len(aad), len(plaintext))
+ copy(tag, s[:])
+
+ mem.zero_explicit(&h, len(h))
+ mem.zero_explicit(&j0, len(j0))
+}
+
+// open_gcm authenticates the aad and ciphertext, and decrypts the ciphertext,
+// with the provided Context_GCM, nonce, and tag, and stores the output in dst,
+// returning true iff the authentication was successful. If authentication
+// fails, the destination buffer will be zeroed.
+//
+// dst and plaintext MUST alias exactly or not at all.
+open_gcm :: proc(ctx: ^Context_GCM, dst, nonce, aad, ciphertext, tag: []byte) -> bool {
+ assert(ctx._is_initialized)
+
+ gcm_validate_common_slice_sizes(tag, nonce, aad, ciphertext)
+ if len(dst) != len(ciphertext) {
+ panic("crypto/aes: invalid destination plaintext size")
+ }
+
+ if impl, is_hw := ctx._impl.(Context_Impl_Hardware); is_hw {
+ return gcm_open_hw(&impl, dst, nonce, aad, ciphertext, tag)
+ }
+
+ h: [_aes.GHASH_KEY_SIZE]byte
+ j0: [_aes.GHASH_BLOCK_SIZE]byte
+ s: [_aes.GHASH_TAG_SIZE]byte
+ init_ghash_ct64(ctx, &h, &j0, nonce)
+
+ ct64.ghash(s[:], h[:], aad)
+ gctr_ct64(ctx, dst, &s, ciphertext, &h, nonce, false)
+ final_ghash_ct64(&s, &h, &j0, len(aad), len(ciphertext))
+
+ ok := crypto.compare_constant_time(s[:], tag) == 1
+ if !ok {
+ mem.zero_explicit(raw_data(dst), len(dst))
+ }
+
+ mem.zero_explicit(&h, len(h))
+ mem.zero_explicit(&j0, len(j0))
+ mem.zero_explicit(&s, len(s))
+
+ return ok
+}
+
+// reset_ctr sanitizes the Context_GCM. The Context_GCM must be
+// re-initialized to be used again.
+reset_gcm :: proc "contextless" (ctx: ^Context_GCM) {
+ reset_impl(&ctx._impl)
+ ctx._is_initialized = false
+}
+
+@(private)
+gcm_validate_common_slice_sizes :: proc(tag, nonce, aad, text: []byte) {
+ if len(tag) != GCM_TAG_SIZE {
+ panic("crypto/aes: invalid GCM tag size")
+ }
+
+ // The specification supports nonces in the range [1, 2^64) bits
+ // however per NIST SP 800-38D 5.2.1.1:
+ //
+ // > For IVs, it is recommended that implementations restrict support
+ // > to the length of 96 bits, to promote interoperability, efficiency,
+ // > and simplicity of design.
+ if len(nonce) != GCM_NONCE_SIZE {
+ panic("crypto/aes: invalid GCM nonce size")
+ }
+
+ if aad_len := u64(len(aad)); aad_len > GCM_A_MAX {
+ panic("crypto/aes: oversized GCM aad")
+ }
+ if text_len := u64(len(text)); text_len > GCM_P_MAX {
+ panic("crypto/aes: oversized GCM src data")
+ }
+}
+
+@(private = "file")
+init_ghash_ct64 :: proc(
+ ctx: ^Context_GCM,
+ h: ^[_aes.GHASH_KEY_SIZE]byte,
+ j0: ^[_aes.GHASH_BLOCK_SIZE]byte,
+ nonce: []byte,
+) {
+ impl := &ctx._impl.(ct64.Context)
+
+ // 1. Let H = CIPH(k, 0^128)
+ ct64.encrypt_block(impl, h[:], h[:])
+
+ // ECB encrypt j0, so that we can just XOR with the tag. In theory
+ // this could be processed along with the final GCTR block, to
+ // potentially save a call to AES-ECB, but... just use AES-NI.
+ copy(j0[:], nonce)
+ j0[_aes.GHASH_BLOCK_SIZE - 1] = 1
+ ct64.encrypt_block(impl, j0[:], j0[:])
+}
+
+@(private = "file")
+final_ghash_ct64 :: proc(
+ s: ^[_aes.GHASH_BLOCK_SIZE]byte,
+ h: ^[_aes.GHASH_KEY_SIZE]byte,
+ j0: ^[_aes.GHASH_BLOCK_SIZE]byte,
+ a_len: int,
+ t_len: int,
+) {
+ blk: [_aes.GHASH_BLOCK_SIZE]byte
+ endian.unchecked_put_u64be(blk[0:], u64(a_len) * 8)
+ endian.unchecked_put_u64be(blk[8:], u64(t_len) * 8)
+
+ ct64.ghash(s[:], h[:], blk[:])
+ for i in 0 ..< len(s) {
+ s[i] ~= j0[i]
+ }
+}
+
+@(private = "file")
+gctr_ct64 :: proc(
+ ctx: ^Context_GCM,
+ dst: []byte,
+ s: ^[_aes.GHASH_BLOCK_SIZE]byte,
+ src: []byte,
+ h: ^[_aes.GHASH_KEY_SIZE]byte,
+ nonce: []byte,
+ is_seal: bool,
+) {
+ ct64_inc_ctr32 := #force_inline proc "contextless" (dst: []byte, ctr: u32) -> u32 {
+ endian.unchecked_put_u32be(dst[12:], ctr)
+ return ctr + 1
+ }
+
+ // 2. Define a block J_0 as follows:
+ // if len(IV) = 96, then let J0 = IV || 0^31 || 1
+ //
+ // Note: We only support 96 bit IVs.
+ tmp, tmp2: [ct64.STRIDE][BLOCK_SIZE]byte = ---, ---
+ ctrs, blks: [ct64.STRIDE][]byte = ---, ---
+ ctr: u32 = 2
+ for i in 0 ..< ct64.STRIDE {
+ // Setup scratch space for the keystream.
+ blks[i] = tmp2[i][:]
+
+ // Pre-copy the IV to all the counter blocks.
+ ctrs[i] = tmp[i][:]
+ copy(ctrs[i], nonce)
+ }
+
+ // We stitch the GCTR and GHASH operations together, so that only
+ // one pass over the ciphertext is required.
+
+ impl := &ctx._impl.(ct64.Context)
+ src, dst := src, dst
+
+ nr_blocks := len(src) / BLOCK_SIZE
+ for nr_blocks > 0 {
+ n := min(ct64.STRIDE, nr_blocks)
+ l := n * BLOCK_SIZE
+
+ if !is_seal {
+ ct64.ghash(s[:], h[:], src[:l])
+ }
+
+ // The keystream is written to a separate buffer, as we will
+ // reuse the first 96-bits of each counter.
+ for i in 0 ..< n {
+ ctr = ct64_inc_ctr32(ctrs[i], ctr)
+ }
+ ct64.encrypt_blocks(impl, blks[:n], ctrs[:n])
+
+ xor_blocks(dst, src, blks[:n])
+
+ if is_seal {
+ ct64.ghash(s[:], h[:], dst[:l])
+ }
+
+ src = src[l:]
+ dst = dst[l:]
+ nr_blocks -= n
+ }
+ if l := len(src); l > 0 {
+ if !is_seal {
+ ct64.ghash(s[:], h[:], src[:l])
+ }
+
+ ct64_inc_ctr32(ctrs[0], ctr)
+ ct64.encrypt_block(impl, ctrs[0], ctrs[0])
+
+ for i in 0 ..< l {
+ dst[i] = src[i] ~ ctrs[0][i]
+ }
+
+ if is_seal {
+ ct64.ghash(s[:], h[:], dst[:l])
+ }
+ }
+
+ mem.zero_explicit(&tmp, size_of(tmp))
+ mem.zero_explicit(&tmp2, size_of(tmp2))
+}
diff --git a/core/crypto/aes/aes_impl.odin b/core/crypto/aes/aes_impl.odin
new file mode 100644
index 000000000..03747f1fb
--- /dev/null
+++ b/core/crypto/aes/aes_impl.odin
@@ -0,0 +1,41 @@
+package aes
+
+import "core:crypto/_aes/ct64"
+import "core:mem"
+import "core:reflect"
+
+@(private)
+Context_Impl :: union {
+ ct64.Context,
+ Context_Impl_Hardware,
+}
+
+// Implementation is an AES implementation. Most callers will not need
+// to use this as the package will automatically select the most performant
+// implementation available (See `is_hardware_accelerated()`).
+Implementation :: enum {
+ Portable,
+ Hardware,
+}
+
+@(private)
+init_impl :: proc(ctx: ^Context_Impl, key: []byte, impl: Implementation) {
+ impl := impl
+ if !is_hardware_accelerated() {
+ impl = .Portable
+ }
+
+ switch impl {
+ case .Portable:
+ reflect.set_union_variant_typeid(ctx^, typeid_of(ct64.Context))
+ ct64.init(&ctx.(ct64.Context), key)
+ case .Hardware:
+ reflect.set_union_variant_typeid(ctx^, typeid_of(Context_Impl_Hardware))
+ init_impl_hw(&ctx.(Context_Impl_Hardware), key)
+ }
+}
+
+@(private)
+reset_impl :: proc "contextless" (ctx: ^Context_Impl) {
+ mem.zero_explicit(ctx, size_of(Context_Impl))
+}
diff --git a/core/crypto/aes/aes_impl_hw_gen.odin b/core/crypto/aes/aes_impl_hw_gen.odin
new file mode 100644
index 000000000..94815f61c
--- /dev/null
+++ b/core/crypto/aes/aes_impl_hw_gen.odin
@@ -0,0 +1,43 @@
+package aes
+
+@(private = "file")
+ERR_HW_NOT_SUPPORTED :: "crypto/aes: hardware implementation unsupported"
+
+// is_hardware_accelerated returns true iff hardware accelerated AES
+// is supported.
+is_hardware_accelerated :: proc "contextless" () -> bool {
+ return false
+}
+
+@(private)
+Context_Impl_Hardware :: struct {}
+
+@(private)
+init_impl_hw :: proc(ctx: ^Context_Impl_Hardware, key: []byte) {
+ panic(ERR_HW_NOT_SUPPORTED)
+}
+
+@(private)
+encrypt_block_hw :: proc(ctx: ^Context_Impl_Hardware, dst, src: []byte) {
+ panic(ERR_HW_NOT_SUPPORTED)
+}
+
+@(private)
+decrypt_block_hw :: proc(ctx: ^Context_Impl_Hardware, dst, src: []byte) {
+ panic(ERR_HW_NOT_SUPPORTED)
+}
+
+@(private)
+ctr_blocks_hw :: proc(ctx: ^Context_CTR, dst, src: []byte, nr_blocks: int) {
+ panic(ERR_HW_NOT_SUPPORTED)
+}
+
+@(private)
+gcm_seal_hw :: proc(ctx: ^Context_Impl_Hardware, dst, tag, nonce, aad, plaintext: []byte) {
+ panic(ERR_HW_NOT_SUPPORTED)
+}
+
+@(private)
+gcm_open_hw :: proc(ctx: ^Context_Impl_Hardware, dst, nonce, aad, ciphertext, tag: []byte) -> bool {
+ panic(ERR_HW_NOT_SUPPORTED)
+}
diff --git a/core/crypto/rand_darwin.odin b/core/crypto/rand_darwin.odin
index 5355f31c5..56acb5d22 100644
--- a/core/crypto/rand_darwin.odin
+++ b/core/crypto/rand_darwin.odin
@@ -11,7 +11,7 @@ HAS_RAND_BYTES :: true
_rand_bytes :: proc(dst: []byte) {
err := Sec.RandomCopyBytes(count=len(dst), bytes=raw_data(dst))
if err != .Success {
- msg := CF.StringCopyToOdinString(Sec.CopyErrorMessageString(err))
- panic(fmt.tprintf("crypto/rand_bytes: SecRandomCopyBytes returned non-zero result: %v %s", err, msg))
+ msg := CF.StringCopyToOdinString(Sec.CopyErrorMessageString(err))
+ fmt.panicf("crypto/rand_bytes: SecRandomCopyBytes returned non-zero result: %v %s", err, msg)
}
}
diff --git a/core/crypto/rand_linux.odin b/core/crypto/rand_linux.odin
index 43b3b3075..7e0edbb7e 100644
--- a/core/crypto/rand_linux.odin
+++ b/core/crypto/rand_linux.odin
@@ -32,7 +32,7 @@ _rand_bytes :: proc (dst: []byte) {
// All other failures are things that should NEVER happen
// unless the kernel interface changes (ie: the Linux
// developers break userland).
- panic(fmt.tprintf("crypto: getrandom failed: %v", errno))
+ fmt.panicf("crypto: getrandom failed: %v", errno)
}
l -= n_read
dst = dst[n_read:]
diff --git a/core/crypto/rand_windows.odin b/core/crypto/rand_windows.odin
index a92d376cb..9cd647cc1 100644
--- a/core/crypto/rand_windows.odin
+++ b/core/crypto/rand_windows.odin
@@ -11,16 +11,16 @@ _rand_bytes :: proc(dst: []byte) {
ret := (os.Errno)(win32.BCryptGenRandom(nil, raw_data(dst), u32(len(dst)), win32.BCRYPT_USE_SYSTEM_PREFERRED_RNG))
if ret != os.ERROR_NONE {
switch ret {
- case os.ERROR_INVALID_HANDLE:
- // The handle to the first parameter is invalid.
- // This should not happen here, since we explicitly pass nil to it
- panic("crypto: BCryptGenRandom Invalid handle for hAlgorithm")
- case os.ERROR_INVALID_PARAMETER:
- // One of the parameters was invalid
- panic("crypto: BCryptGenRandom Invalid parameter")
- case:
- // Unknown error
- panic(fmt.tprintf("crypto: BCryptGenRandom failed: %d\n", ret))
+ case os.ERROR_INVALID_HANDLE:
+ // The handle to the first parameter is invalid.
+ // This should not happen here, since we explicitly pass nil to it
+ panic("crypto: BCryptGenRandom Invalid handle for hAlgorithm")
+ case os.ERROR_INVALID_PARAMETER:
+ // One of the parameters was invalid
+ panic("crypto: BCryptGenRandom Invalid parameter")
+ case:
+ // Unknown error
+ fmt.panicf("crypto: BCryptGenRandom failed: %d\n", ret)
}
}
}
diff --git a/core/encoding/ansi/ansi.odin b/core/encoding/ansi/ansi.odin
new file mode 100644
index 000000000..5550a1671
--- /dev/null
+++ b/core/encoding/ansi/ansi.odin
@@ -0,0 +1,137 @@
+package ansi
+
+BEL :: "\a" // Bell
+BS :: "\b" // Backspace
+ESC :: "\e" // Escape
+
+// Fe Escape sequences
+
+CSI :: ESC + "[" // Control Sequence Introducer
+OSC :: ESC + "]" // Operating System Command
+ST :: ESC + "\\" // String Terminator
+
+// CSI sequences
+
+CUU :: "A" // Cursor Up
+CUD :: "B" // Cursor Down
+CUF :: "C" // Cursor Forward
+CUB :: "D" // Cursor Back
+CNL :: "E" // Cursor Next Line
+CPL :: "F" // Cursor Previous Line
+CHA :: "G" // Cursor Horizontal Absolute
+CUP :: "H" // Cursor Position
+ED :: "J" // Erase in Display
+EL :: "K" // Erase in Line
+SU :: "S" // Scroll Up
+SD :: "T" // Scroll Down
+HVP :: "f" // Horizontal Vertical Position
+SGR :: "m" // Select Graphic Rendition
+AUX_ON :: "5i" // AUX Port On
+AUX_OFF :: "4i" // AUX Port Off
+DSR :: "6n" // Device Status Report
+
+// CSI: private sequences
+
+SCP :: "s" // Save Current Cursor Position
+RCP :: "u" // Restore Saved Cursor Position
+DECAWM_ON :: "?7h" // Auto Wrap Mode (Enabled)
+DECAWM_OFF :: "?7l" // Auto Wrap Mode (Disabled)
+DECTCEM_SHOW :: "?25h" // Text Cursor Enable Mode (Visible)
+DECTCEM_HIDE :: "?25l" // Text Cursor Enable Mode (Invisible)
+
+// SGR sequences
+
+RESET :: "0"
+BOLD :: "1"
+FAINT :: "2"
+ITALIC :: "3" // Not widely supported.
+UNDERLINE :: "4"
+BLINK_SLOW :: "5"
+BLINK_RAPID :: "6" // Not widely supported.
+INVERT :: "7" // Also known as reverse video.
+HIDE :: "8" // Not widely supported.
+STRIKE :: "9"
+FONT_PRIMARY :: "10"
+FONT_ALT1 :: "11"
+FONT_ALT2 :: "12"
+FONT_ALT3 :: "13"
+FONT_ALT4 :: "14"
+FONT_ALT5 :: "15"
+FONT_ALT6 :: "16"
+FONT_ALT7 :: "17"
+FONT_ALT8 :: "18"
+FONT_ALT9 :: "19"
+FONT_FRAKTUR :: "20" // Rarely supported.
+UNDERLINE_DOUBLE :: "21" // May be interpreted as "disable bold."
+NO_BOLD_FAINT :: "22"
+NO_ITALIC_BLACKLETTER :: "23"
+NO_UNDERLINE :: "24"
+NO_BLINK :: "25"
+PROPORTIONAL_SPACING :: "26"
+NO_REVERSE :: "27"
+NO_HIDE :: "28"
+NO_STRIKE :: "29"
+
+FG_BLACK :: "30"
+FG_RED :: "31"
+FG_GREEN :: "32"
+FG_YELLOW :: "33"
+FG_BLUE :: "34"
+FG_MAGENTA :: "35"
+FG_CYAN :: "36"
+FG_WHITE :: "37"
+FG_COLOR :: "38"
+FG_COLOR_8_BIT :: "38;5" // Followed by ";n" where n is in 0..=255
+FG_COLOR_24_BIT :: "38;2" // Followed by ";r;g;b" where r,g,b are in 0..=255
+FG_DEFAULT :: "39"
+
+BG_BLACK :: "40"
+BG_RED :: "41"
+BG_GREEN :: "42"
+BG_YELLOW :: "43"
+BG_BLUE :: "44"
+BG_MAGENTA :: "45"
+BG_CYAN :: "46"
+BG_WHITE :: "47"
+BG_COLOR :: "48"
+BG_COLOR_8_BIT :: "48;5" // Followed by ";n" where n is in 0..=255
+BG_COLOR_24_BIT :: "48;2" // Followed by ";r;g;b" where r,g,b are in 0..=255
+BG_DEFAULT :: "49"
+
+NO_PROPORTIONAL_SPACING :: "50"
+FRAMED :: "51"
+ENCIRCLED :: "52"
+OVERLINED :: "53"
+NO_FRAME_ENCIRCLE :: "54"
+NO_OVERLINE :: "55"
+
+// SGR: non-standard bright colors
+
+FG_BRIGHT_BLACK :: "90" // Also known as grey.
+FG_BRIGHT_RED :: "91"
+FG_BRIGHT_GREEN :: "92"
+FG_BRIGHT_YELLOW :: "93"
+FG_BRIGHT_BLUE :: "94"
+FG_BRIGHT_MAGENTA :: "95"
+FG_BRIGHT_CYAN :: "96"
+FG_BRIGHT_WHITE :: "97"
+
+BG_BRIGHT_BLACK :: "100" // Also known as grey.
+BG_BRIGHT_RED :: "101"
+BG_BRIGHT_GREEN :: "102"
+BG_BRIGHT_YELLOW :: "103"
+BG_BRIGHT_BLUE :: "104"
+BG_BRIGHT_MAGENTA :: "105"
+BG_BRIGHT_CYAN :: "106"
+BG_BRIGHT_WHITE :: "107"
+
+// Fp Escape sequences
+
+DECSC :: ESC + "7" // DEC Save Cursor
+DECRC :: ESC + "8" // DEC Restore Cursor
+
+// OSC sequences
+
+WINDOW_TITLE :: "2" // Followed by ";<text>" ST.
+HYPERLINK :: "8" // Followed by ";[params];<URI>" ST. Closed by OSC HYPERLINK ";;" ST.
+CLIPBOARD :: "52" // Followed by ";c;<Base64-encoded string>" ST.
diff --git a/core/encoding/ansi/doc.odin b/core/encoding/ansi/doc.odin
new file mode 100644
index 000000000..a0945c581
--- /dev/null
+++ b/core/encoding/ansi/doc.odin
@@ -0,0 +1,20 @@
+/*
+package ansi implements constant references to many widely-supported ANSI
+escape codes, primarily used in terminal emulators for enhanced graphics, such
+as colors, text styling, and animated displays.
+
+For example, you can print out a line of cyan text like this:
+ fmt.println(ansi.CSI + ansi.FG_CYAN + ansi.SGR + "Hellope!" + ansi.CSI + ansi.RESET + ansi.SGR)
+
+Multiple SGR (Select Graphic Rendition) codes can be joined by semicolons:
+ fmt.println(ansi.CSI + ansi.BOLD + ";" + ansi.FG_BLUE + ansi.SGR + "Hellope!" + ansi.CSI + ansi.RESET + ansi.SGR)
+
+If your terminal supports 24-bit true color mode, you can also do this:
+ fmt.println(ansi.CSI + ansi.FG_COLOR_24_BIT + ";0;255;255" + ansi.SGR + "Hellope!" + ansi.CSI + ansi.RESET + ansi.SGR)
+
+For more information, see:
+ 1. https://en.wikipedia.org/wiki/ANSI_escape_code
+ 2. https://www.vt100.net/docs/vt102-ug/chapter5.html
+ 3. https://invisible-island.net/xterm/ctlseqs/ctlseqs.html
+*/
+package ansi
diff --git a/core/encoding/cbor/cbor.odin b/core/encoding/cbor/cbor.odin
index d0e406ab1..7897b2a37 100644
--- a/core/encoding/cbor/cbor.odin
+++ b/core/encoding/cbor/cbor.odin
@@ -320,8 +320,8 @@ to_diagnostic_format :: proc {
// Turns the given CBOR value into a human-readable string.
// See docs on the proc group `diagnose` for more info.
-to_diagnostic_format_string :: proc(val: Value, padding := 0, allocator := context.allocator) -> (string, mem.Allocator_Error) #optional_allocator_error {
- b := strings.builder_make(allocator)
+to_diagnostic_format_string :: proc(val: Value, padding := 0, allocator := context.allocator, loc := #caller_location) -> (string, mem.Allocator_Error) #optional_allocator_error {
+ b := strings.builder_make(allocator, loc)
w := strings.to_stream(&b)
err := to_diagnostic_format_writer(w, val, padding)
if err == .EOF {
diff --git a/core/encoding/cbor/coding.odin b/core/encoding/cbor/coding.odin
index 0d276a7a1..07f0637a6 100644
--- a/core/encoding/cbor/coding.odin
+++ b/core/encoding/cbor/coding.odin
@@ -95,24 +95,25 @@ decode :: decode_from
// Decodes the given string as CBOR.
// See docs on the proc group `decode` for more information.
-decode_from_string :: proc(s: string, flags: Decoder_Flags = {}, allocator := context.allocator) -> (v: Value, err: Decode_Error) {
+decode_from_string :: proc(s: string, flags: Decoder_Flags = {}, allocator := context.allocator, loc := #caller_location) -> (v: Value, err: Decode_Error) {
r: strings.Reader
strings.reader_init(&r, s)
- return decode_from_reader(strings.reader_to_stream(&r), flags, allocator)
+ return decode_from_reader(strings.reader_to_stream(&r), flags, allocator, loc)
}
// Reads a CBOR value from the given reader.
// See docs on the proc group `decode` for more information.
-decode_from_reader :: proc(r: io.Reader, flags: Decoder_Flags = {}, allocator := context.allocator) -> (v: Value, err: Decode_Error) {
+decode_from_reader :: proc(r: io.Reader, flags: Decoder_Flags = {}, allocator := context.allocator, loc := #caller_location) -> (v: Value, err: Decode_Error) {
return decode_from_decoder(
Decoder{ DEFAULT_MAX_PRE_ALLOC, flags, r },
allocator=allocator,
+ loc = loc,
)
}
// Reads a CBOR value from the given decoder.
// See docs on the proc group `decode` for more information.
-decode_from_decoder :: proc(d: Decoder, allocator := context.allocator) -> (v: Value, err: Decode_Error) {
+decode_from_decoder :: proc(d: Decoder, allocator := context.allocator, loc := #caller_location) -> (v: Value, err: Decode_Error) {
context.allocator = allocator
d := d
@@ -121,13 +122,13 @@ decode_from_decoder :: proc(d: Decoder, allocator := context.allocator) -> (v: V
d.max_pre_alloc = DEFAULT_MAX_PRE_ALLOC
}
- v, err = _decode_from_decoder(d)
+ v, err = _decode_from_decoder(d, {}, allocator, loc)
// Normal EOF does not exist here, we try to read the exact amount that is said to be provided.
if err == .EOF { err = .Unexpected_EOF }
return
}
-_decode_from_decoder :: proc(d: Decoder, hdr: Header = Header(0)) -> (v: Value, err: Decode_Error) {
+_decode_from_decoder :: proc(d: Decoder, hdr: Header = Header(0), allocator := context.allocator, loc := #caller_location) -> (v: Value, err: Decode_Error) {
hdr := hdr
r := d.reader
if hdr == Header(0) { hdr = _decode_header(r) or_return }
@@ -161,11 +162,11 @@ _decode_from_decoder :: proc(d: Decoder, hdr: Header = Header(0)) -> (v: Value,
switch maj {
case .Unsigned: return _decode_tiny_u8(add)
case .Negative: return Negative_U8(_decode_tiny_u8(add) or_return), nil
- case .Bytes: return _decode_bytes_ptr(d, add)
- case .Text: return _decode_text_ptr(d, add)
- case .Array: return _decode_array_ptr(d, add)
- case .Map: return _decode_map_ptr(d, add)
- case .Tag: return _decode_tag_ptr(d, add)
+ case .Bytes: return _decode_bytes_ptr(d, add, .Bytes, allocator, loc)
+ case .Text: return _decode_text_ptr(d, add, allocator, loc)
+ case .Array: return _decode_array_ptr(d, add, allocator, loc)
+ case .Map: return _decode_map_ptr(d, add, allocator, loc)
+ case .Tag: return _decode_tag_ptr(d, add, allocator, loc)
case .Other: return _decode_tiny_simple(add)
case: return nil, .Bad_Major
}
@@ -203,27 +204,27 @@ encode :: encode_into
// Encodes the CBOR value into binary CBOR allocated on the given allocator.
// See the docs on the proc group `encode_into` for more info.
-encode_into_bytes :: proc(v: Value, flags := ENCODE_SMALL, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (data: []byte, err: Encode_Error) {
- b := strings.builder_make(allocator) or_return
+encode_into_bytes :: proc(v: Value, flags := ENCODE_SMALL, allocator := context.allocator, temp_allocator := context.temp_allocator, loc := #caller_location) -> (data: []byte, err: Encode_Error) {
+ b := strings.builder_make(allocator, loc) or_return
encode_into_builder(&b, v, flags, temp_allocator) or_return
return b.buf[:], nil
}
// Encodes the CBOR value into binary CBOR written to the given builder.
// See the docs on the proc group `encode_into` for more info.
-encode_into_builder :: proc(b: ^strings.Builder, v: Value, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator) -> Encode_Error {
- return encode_into_writer(strings.to_stream(b), v, flags, temp_allocator)
+encode_into_builder :: proc(b: ^strings.Builder, v: Value, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator, loc := #caller_location) -> Encode_Error {
+ return encode_into_writer(strings.to_stream(b), v, flags, temp_allocator, loc=loc)
}
// Encodes the CBOR value into binary CBOR written to the given writer.
// See the docs on the proc group `encode_into` for more info.
-encode_into_writer :: proc(w: io.Writer, v: Value, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator) -> Encode_Error {
- return encode_into_encoder(Encoder{flags, w, temp_allocator}, v)
+encode_into_writer :: proc(w: io.Writer, v: Value, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator, loc := #caller_location) -> Encode_Error {
+ return encode_into_encoder(Encoder{flags, w, temp_allocator}, v, loc=loc)
}
// Encodes the CBOR value into binary CBOR written to the given encoder.
// See the docs on the proc group `encode_into` for more info.
-encode_into_encoder :: proc(e: Encoder, v: Value) -> Encode_Error {
+encode_into_encoder :: proc(e: Encoder, v: Value, loc := #caller_location) -> Encode_Error {
e := e
if e.temp_allocator.procedure == nil {
@@ -366,21 +367,21 @@ _encode_u64_exact :: proc(w: io.Writer, v: u64, major: Major = .Unsigned) -> (er
return
}
-_decode_bytes_ptr :: proc(d: Decoder, add: Add, type: Major = .Bytes) -> (v: ^Bytes, err: Decode_Error) {
- v = new(Bytes) or_return
- defer if err != nil { free(v) }
+_decode_bytes_ptr :: proc(d: Decoder, add: Add, type: Major = .Bytes, allocator := context.allocator, loc := #caller_location) -> (v: ^Bytes, err: Decode_Error) {
+ v = new(Bytes, allocator, loc) or_return
+ defer if err != nil { free(v, allocator, loc) }
- v^ = _decode_bytes(d, add, type) or_return
+ v^ = _decode_bytes(d, add, type, allocator, loc) or_return
return
}
-_decode_bytes :: proc(d: Decoder, add: Add, type: Major = .Bytes, allocator := context.allocator) -> (v: Bytes, err: Decode_Error) {
+_decode_bytes :: proc(d: Decoder, add: Add, type: Major = .Bytes, allocator := context.allocator, loc := #caller_location) -> (v: Bytes, err: Decode_Error) {
context.allocator = allocator
add := add
n, scap := _decode_len_str(d, add) or_return
- buf := strings.builder_make(0, scap) or_return
+ buf := strings.builder_make(0, scap, allocator, loc) or_return
defer if err != nil { strings.builder_destroy(&buf) }
buf_stream := strings.to_stream(&buf)
@@ -426,40 +427,40 @@ _encode_bytes :: proc(e: Encoder, val: Bytes, major: Major = .Bytes) -> (err: En
return
}
-_decode_text_ptr :: proc(d: Decoder, add: Add) -> (v: ^Text, err: Decode_Error) {
- v = new(Text) or_return
+_decode_text_ptr :: proc(d: Decoder, add: Add, allocator := context.allocator, loc := #caller_location) -> (v: ^Text, err: Decode_Error) {
+ v = new(Text, allocator, loc) or_return
defer if err != nil { free(v) }
- v^ = _decode_text(d, add) or_return
+ v^ = _decode_text(d, add, allocator, loc) or_return
return
}
-_decode_text :: proc(d: Decoder, add: Add, allocator := context.allocator) -> (v: Text, err: Decode_Error) {
- return (Text)(_decode_bytes(d, add, .Text, allocator) or_return), nil
+_decode_text :: proc(d: Decoder, add: Add, allocator := context.allocator, loc := #caller_location) -> (v: Text, err: Decode_Error) {
+ return (Text)(_decode_bytes(d, add, .Text, allocator, loc) or_return), nil
}
_encode_text :: proc(e: Encoder, val: Text) -> Encode_Error {
return _encode_bytes(e, transmute([]byte)val, .Text)
}
-_decode_array_ptr :: proc(d: Decoder, add: Add) -> (v: ^Array, err: Decode_Error) {
- v = new(Array) or_return
+_decode_array_ptr :: proc(d: Decoder, add: Add, allocator := context.allocator, loc := #caller_location) -> (v: ^Array, err: Decode_Error) {
+ v = new(Array, allocator, loc) or_return
defer if err != nil { free(v) }
- v^ = _decode_array(d, add) or_return
+ v^ = _decode_array(d, add, allocator, loc) or_return
return
}
-_decode_array :: proc(d: Decoder, add: Add) -> (v: Array, err: Decode_Error) {
+_decode_array :: proc(d: Decoder, add: Add, allocator := context.allocator, loc := #caller_location) -> (v: Array, err: Decode_Error) {
n, scap := _decode_len_container(d, add) or_return
- array := make([dynamic]Value, 0, scap) or_return
+ array := make([dynamic]Value, 0, scap, allocator, loc) or_return
defer if err != nil {
- for entry in array { destroy(entry) }
- delete(array)
+ for entry in array { destroy(entry, allocator) }
+ delete(array, loc)
}
for i := 0; n == -1 || i < n; i += 1 {
- val, verr := _decode_from_decoder(d)
+ val, verr := _decode_from_decoder(d, {}, allocator, loc)
if n == -1 && verr == .Break {
break
} else if verr != nil {
@@ -485,39 +486,39 @@ _encode_array :: proc(e: Encoder, arr: Array) -> Encode_Error {
return nil
}
-_decode_map_ptr :: proc(d: Decoder, add: Add) -> (v: ^Map, err: Decode_Error) {
- v = new(Map) or_return
+_decode_map_ptr :: proc(d: Decoder, add: Add, allocator := context.allocator, loc := #caller_location) -> (v: ^Map, err: Decode_Error) {
+ v = new(Map, allocator, loc) or_return
defer if err != nil { free(v) }
- v^ = _decode_map(d, add) or_return
+ v^ = _decode_map(d, add, allocator, loc) or_return
return
}
-_decode_map :: proc(d: Decoder, add: Add) -> (v: Map, err: Decode_Error) {
+_decode_map :: proc(d: Decoder, add: Add, allocator := context.allocator, loc := #caller_location) -> (v: Map, err: Decode_Error) {
n, scap := _decode_len_container(d, add) or_return
- items := make([dynamic]Map_Entry, 0, scap) or_return
+ items := make([dynamic]Map_Entry, 0, scap, allocator, loc) or_return
defer if err != nil {
for entry in items {
destroy(entry.key)
destroy(entry.value)
}
- delete(items)
+ delete(items, loc)
}
for i := 0; n == -1 || i < n; i += 1 {
- key, kerr := _decode_from_decoder(d)
+ key, kerr := _decode_from_decoder(d, {}, allocator, loc)
if n == -1 && kerr == .Break {
break
} else if kerr != nil {
return nil, kerr
}
- value := _decode_from_decoder(d) or_return
+ value := _decode_from_decoder(d, {}, allocator, loc) or_return
append(&items, Map_Entry{
key = key,
value = value,
- }) or_return
+ }, loc) or_return
}
if .Shrink_Excess in d.flags { shrink(&items) }
@@ -578,20 +579,20 @@ _encode_map :: proc(e: Encoder, m: Map) -> (err: Encode_Error) {
return nil
}
-_decode_tag_ptr :: proc(d: Decoder, add: Add) -> (v: Value, err: Decode_Error) {
- tag := _decode_tag(d, add) or_return
+_decode_tag_ptr :: proc(d: Decoder, add: Add, allocator := context.allocator, loc := #caller_location) -> (v: Value, err: Decode_Error) {
+ tag := _decode_tag(d, add, allocator, loc) or_return
if t, ok := tag.?; ok {
defer if err != nil { destroy(t.value) }
- tp := new(Tag) or_return
+ tp := new(Tag, allocator, loc) or_return
tp^ = t
return tp, nil
}
// no error, no tag, this was the self described CBOR tag, skip it.
- return _decode_from_decoder(d)
+ return _decode_from_decoder(d, {}, allocator, loc)
}
-_decode_tag :: proc(d: Decoder, add: Add) -> (v: Maybe(Tag), err: Decode_Error) {
+_decode_tag :: proc(d: Decoder, add: Add, allocator := context.allocator, loc := #caller_location) -> (v: Maybe(Tag), err: Decode_Error) {
num := _decode_uint_as_u64(d.reader, add) or_return
// CBOR can be wrapped in a tag that decoders can use to see/check if the binary data is CBOR.
@@ -602,7 +603,7 @@ _decode_tag :: proc(d: Decoder, add: Add) -> (v: Maybe(Tag), err: Decode_Error)
t := Tag{
number = num,
- value = _decode_from_decoder(d) or_return,
+ value = _decode_from_decoder(d, {}, allocator, loc) or_return,
}
if nested, ok := t.value.(^Tag); ok {
@@ -883,4 +884,4 @@ _encode_deterministic_f64 :: proc(w: io.Writer, v: f64) -> io.Error {
}
return _encode_f64_exact(w, v)
-}
+} \ No newline at end of file
diff --git a/core/encoding/cbor/marshal.odin b/core/encoding/cbor/marshal.odin
index 37c9dd180..775eafd9c 100644
--- a/core/encoding/cbor/marshal.odin
+++ b/core/encoding/cbor/marshal.odin
@@ -45,8 +45,8 @@ marshal :: marshal_into
// Marshals the given value into a CBOR byte stream (allocated using the given allocator).
// See docs on the `marshal_into` proc group for more info.
-marshal_into_bytes :: proc(v: any, flags := ENCODE_SMALL, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (bytes: []byte, err: Marshal_Error) {
- b, alloc_err := strings.builder_make(allocator)
+marshal_into_bytes :: proc(v: any, flags := ENCODE_SMALL, allocator := context.allocator, temp_allocator := context.temp_allocator, loc := #caller_location) -> (bytes: []byte, err: Marshal_Error) {
+ b, alloc_err := strings.builder_make(allocator, loc=loc)
// The builder as a stream also returns .EOF if it ran out of memory so this is consistent.
if alloc_err != nil {
return nil, .EOF
@@ -54,7 +54,7 @@ marshal_into_bytes :: proc(v: any, flags := ENCODE_SMALL, allocator := context.a
defer if err != nil { strings.builder_destroy(&b) }
- if err = marshal_into_builder(&b, v, flags, temp_allocator); err != nil {
+ if err = marshal_into_builder(&b, v, flags, temp_allocator, loc=loc); err != nil {
return
}
@@ -63,20 +63,20 @@ marshal_into_bytes :: proc(v: any, flags := ENCODE_SMALL, allocator := context.a
// Marshals the given value into a CBOR byte stream written to the given builder.
// See docs on the `marshal_into` proc group for more info.
-marshal_into_builder :: proc(b: ^strings.Builder, v: any, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator) -> Marshal_Error {
- return marshal_into_writer(strings.to_writer(b), v, flags, temp_allocator)
+marshal_into_builder :: proc(b: ^strings.Builder, v: any, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator, loc := #caller_location) -> Marshal_Error {
+ return marshal_into_writer(strings.to_writer(b), v, flags, temp_allocator, loc=loc)
}
// Marshals the given value into a CBOR byte stream written to the given writer.
// See docs on the `marshal_into` proc group for more info.
-marshal_into_writer :: proc(w: io.Writer, v: any, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator) -> Marshal_Error {
+marshal_into_writer :: proc(w: io.Writer, v: any, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator, loc := #caller_location) -> Marshal_Error {
encoder := Encoder{flags, w, temp_allocator}
- return marshal_into_encoder(encoder, v)
+ return marshal_into_encoder(encoder, v, loc=loc)
}
// Marshals the given value into a CBOR byte stream written to the given encoder.
// See docs on the `marshal_into` proc group for more info.
-marshal_into_encoder :: proc(e: Encoder, v: any) -> (err: Marshal_Error) {
+marshal_into_encoder :: proc(e: Encoder, v: any, loc := #caller_location) -> (err: Marshal_Error) {
e := e
if e.temp_allocator.procedure == nil {
diff --git a/core/encoding/cbor/unmarshal.odin b/core/encoding/cbor/unmarshal.odin
index a1524d9f4..c31ba1d92 100644
--- a/core/encoding/cbor/unmarshal.odin
+++ b/core/encoding/cbor/unmarshal.odin
@@ -31,8 +31,8 @@ unmarshal :: proc {
unmarshal_from_string,
}
-unmarshal_from_reader :: proc(r: io.Reader, ptr: ^$T, flags := Decoder_Flags{}, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (err: Unmarshal_Error) {
- err = unmarshal_from_decoder(Decoder{ DEFAULT_MAX_PRE_ALLOC, flags, r }, ptr, allocator, temp_allocator)
+unmarshal_from_reader :: proc(r: io.Reader, ptr: ^$T, flags := Decoder_Flags{}, allocator := context.allocator, temp_allocator := context.temp_allocator, loc := #caller_location) -> (err: Unmarshal_Error) {
+ err = unmarshal_from_decoder(Decoder{ DEFAULT_MAX_PRE_ALLOC, flags, r }, ptr, allocator, temp_allocator, loc)
// Normal EOF does not exist here, we try to read the exact amount that is said to be provided.
if err == .EOF { err = .Unexpected_EOF }
@@ -40,21 +40,21 @@ unmarshal_from_reader :: proc(r: io.Reader, ptr: ^$T, flags := Decoder_Flags{},
}
// Unmarshals from a string, see docs on the proc group `Unmarshal` for more info.
-unmarshal_from_string :: proc(s: string, ptr: ^$T, flags := Decoder_Flags{}, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (err: Unmarshal_Error) {
+unmarshal_from_string :: proc(s: string, ptr: ^$T, flags := Decoder_Flags{}, allocator := context.allocator, temp_allocator := context.temp_allocator, loc := #caller_location) -> (err: Unmarshal_Error) {
sr: strings.Reader
r := strings.to_reader(&sr, s)
- err = unmarshal_from_reader(r, ptr, flags, allocator, temp_allocator)
+ err = unmarshal_from_reader(r, ptr, flags, allocator, temp_allocator, loc)
// Normal EOF does not exist here, we try to read the exact amount that is said to be provided.
if err == .EOF { err = .Unexpected_EOF }
return
}
-unmarshal_from_decoder :: proc(d: Decoder, ptr: ^$T, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (err: Unmarshal_Error) {
+unmarshal_from_decoder :: proc(d: Decoder, ptr: ^$T, allocator := context.allocator, temp_allocator := context.temp_allocator, loc := #caller_location) -> (err: Unmarshal_Error) {
d := d
- err = _unmarshal_any_ptr(d, ptr, nil, allocator, temp_allocator)
+ err = _unmarshal_any_ptr(d, ptr, nil, allocator, temp_allocator, loc)
// Normal EOF does not exist here, we try to read the exact amount that is said to be provided.
if err == .EOF { err = .Unexpected_EOF }
@@ -62,7 +62,7 @@ unmarshal_from_decoder :: proc(d: Decoder, ptr: ^$T, allocator := context.alloca
}
-_unmarshal_any_ptr :: proc(d: Decoder, v: any, hdr: Maybe(Header) = nil, allocator := context.allocator, temp_allocator := context.temp_allocator) -> Unmarshal_Error {
+_unmarshal_any_ptr :: proc(d: Decoder, v: any, hdr: Maybe(Header) = nil, allocator := context.allocator, temp_allocator := context.temp_allocator, loc := #caller_location) -> Unmarshal_Error {
context.allocator = allocator
context.temp_allocator = temp_allocator
v := v
@@ -78,10 +78,10 @@ _unmarshal_any_ptr :: proc(d: Decoder, v: any, hdr: Maybe(Header) = nil, allocat
}
data := any{(^rawptr)(v.data)^, ti.variant.(reflect.Type_Info_Pointer).elem.id}
- return _unmarshal_value(d, data, hdr.? or_else (_decode_header(d.reader) or_return))
+ return _unmarshal_value(d, data, hdr.? or_else (_decode_header(d.reader) or_return), allocator, temp_allocator, loc)
}
-_unmarshal_value :: proc(d: Decoder, v: any, hdr: Header) -> (err: Unmarshal_Error) {
+_unmarshal_value :: proc(d: Decoder, v: any, hdr: Header, allocator := context.allocator, temp_allocator := context.temp_allocator, loc := #caller_location) -> (err: Unmarshal_Error) {
v := v
ti := reflect.type_info_base(type_info_of(v.id))
r := d.reader
@@ -104,7 +104,7 @@ _unmarshal_value :: proc(d: Decoder, v: any, hdr: Header) -> (err: Unmarshal_Err
// Allow generic unmarshal by doing it into a `Value`.
switch &dst in v {
case Value:
- dst = err_conv(_decode_from_decoder(d, hdr)) or_return
+ dst = err_conv(_decode_from_decoder(d, hdr, allocator, loc)) or_return
return
}
@@ -308,7 +308,7 @@ _unmarshal_value :: proc(d: Decoder, v: any, hdr: Header) -> (err: Unmarshal_Err
if impl, ok := _tag_implementations_nr[nr]; ok {
return impl->unmarshal(d, nr, v)
} else if nr == TAG_OBJECT_TYPE {
- return _unmarshal_union(d, v, ti, hdr)
+ return _unmarshal_union(d, v, ti, hdr, loc=loc)
} else {
// Discard the tag info and unmarshal as its value.
return _unmarshal_value(d, v, _decode_header(r) or_return)
@@ -316,19 +316,19 @@ _unmarshal_value :: proc(d: Decoder, v: any, hdr: Header) -> (err: Unmarshal_Err
return _unsupported(v, hdr, add)
- case .Bytes: return _unmarshal_bytes(d, v, ti, hdr, add)
- case .Text: return _unmarshal_string(d, v, ti, hdr, add)
- case .Array: return _unmarshal_array(d, v, ti, hdr, add)
- case .Map: return _unmarshal_map(d, v, ti, hdr, add)
+ case .Bytes: return _unmarshal_bytes(d, v, ti, hdr, add, allocator=allocator, loc=loc)
+ case .Text: return _unmarshal_string(d, v, ti, hdr, add, allocator=allocator, loc=loc)
+ case .Array: return _unmarshal_array(d, v, ti, hdr, add, allocator=allocator, loc=loc)
+ case .Map: return _unmarshal_map(d, v, ti, hdr, add, allocator=allocator, loc=loc)
case: return .Bad_Major
}
}
-_unmarshal_bytes :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add) -> (err: Unmarshal_Error) {
+_unmarshal_bytes :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add, allocator := context.allocator, loc := #caller_location) -> (err: Unmarshal_Error) {
#partial switch t in ti.variant {
case reflect.Type_Info_String:
- bytes := err_conv(_decode_bytes(d, add)) or_return
+ bytes := err_conv(_decode_bytes(d, add, allocator=allocator, loc=loc)) or_return
if t.is_cstring {
raw := (^cstring)(v.data)
@@ -347,7 +347,7 @@ _unmarshal_bytes :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
if elem_base.id != byte { return _unsupported(v, hdr) }
- bytes := err_conv(_decode_bytes(d, add)) or_return
+ bytes := err_conv(_decode_bytes(d, add, allocator=allocator, loc=loc)) or_return
raw := (^mem.Raw_Slice)(v.data)
raw^ = transmute(mem.Raw_Slice)bytes
return
@@ -357,12 +357,12 @@ _unmarshal_bytes :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
if elem_base.id != byte { return _unsupported(v, hdr) }
- bytes := err_conv(_decode_bytes(d, add)) or_return
+ bytes := err_conv(_decode_bytes(d, add, allocator=allocator, loc=loc)) or_return
raw := (^mem.Raw_Dynamic_Array)(v.data)
raw.data = raw_data(bytes)
raw.len = len(bytes)
raw.cap = len(bytes)
- raw.allocator = context.allocator
+ raw.allocator = allocator
return
case reflect.Type_Info_Array:
@@ -385,10 +385,10 @@ _unmarshal_bytes :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
return _unsupported(v, hdr)
}
-_unmarshal_string :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add) -> (err: Unmarshal_Error) {
+_unmarshal_string :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add, allocator := context.allocator, temp_allocator := context.temp_allocator, loc := #caller_location) -> (err: Unmarshal_Error) {
#partial switch t in ti.variant {
case reflect.Type_Info_String:
- text := err_conv(_decode_text(d, add)) or_return
+ text := err_conv(_decode_text(d, add, allocator, loc)) or_return
if t.is_cstring {
raw := (^cstring)(v.data)
@@ -403,8 +403,8 @@ _unmarshal_string :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Heade
// Enum by its variant name.
case reflect.Type_Info_Enum:
- text := err_conv(_decode_text(d, add, allocator=context.temp_allocator)) or_return
- defer delete(text, context.temp_allocator)
+ text := err_conv(_decode_text(d, add, allocator=temp_allocator, loc=loc)) or_return
+ defer delete(text, temp_allocator, loc)
for name, i in t.names {
if name == text {
@@ -414,8 +414,8 @@ _unmarshal_string :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Heade
}
case reflect.Type_Info_Rune:
- text := err_conv(_decode_text(d, add, allocator=context.temp_allocator)) or_return
- defer delete(text, context.temp_allocator)
+ text := err_conv(_decode_text(d, add, allocator=temp_allocator, loc=loc)) or_return
+ defer delete(text, temp_allocator, loc)
r := (^rune)(v.data)
dr, n := utf8.decode_rune(text)
@@ -430,13 +430,15 @@ _unmarshal_string :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Heade
return _unsupported(v, hdr)
}
-_unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add) -> (err: Unmarshal_Error) {
+_unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add, allocator := context.allocator, loc := #caller_location) -> (err: Unmarshal_Error) {
assign_array :: proc(
d: Decoder,
da: ^mem.Raw_Dynamic_Array,
elemt: ^reflect.Type_Info,
length: int,
growable := true,
+ allocator := context.allocator,
+ loc := #caller_location,
) -> (out_of_space: bool, err: Unmarshal_Error) {
for idx: uintptr = 0; length == -1 || idx < uintptr(length); idx += 1 {
elem_ptr := rawptr(uintptr(da.data) + idx*uintptr(elemt.size))
@@ -450,13 +452,13 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
if !growable { return true, .Out_Of_Memory }
cap := 2 * da.cap
- ok := runtime.__dynamic_array_reserve(da, elemt.size, elemt.align, cap)
+ ok := runtime.__dynamic_array_reserve(da, elemt.size, elemt.align, cap, loc)
// NOTE: Might be lying here, but it is at least an allocator error.
if !ok { return false, .Out_Of_Memory }
}
- err = _unmarshal_value(d, elem, hdr)
+ err = _unmarshal_value(d, elem, hdr, allocator=allocator, loc=loc)
if length == -1 && err == .Break { break }
if err != nil { return }
@@ -469,10 +471,10 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
// Allow generically storing the values array.
switch &dst in v {
case ^Array:
- dst = err_conv(_decode_array_ptr(d, add)) or_return
+ dst = err_conv(_decode_array_ptr(d, add, allocator=allocator, loc=loc)) or_return
return
case Array:
- dst = err_conv(_decode_array(d, add)) or_return
+ dst = err_conv(_decode_array(d, add, allocator=allocator, loc=loc)) or_return
return
}
@@ -480,8 +482,8 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
case reflect.Type_Info_Slice:
length, scap := err_conv(_decode_len_container(d, add)) or_return
- data := mem.alloc_bytes_non_zeroed(t.elem.size * scap, t.elem.align) or_return
- defer if err != nil { mem.free_bytes(data) }
+ data := mem.alloc_bytes_non_zeroed(t.elem.size * scap, t.elem.align, allocator=allocator, loc=loc) or_return
+ defer if err != nil { mem.free_bytes(data, allocator=allocator, loc=loc) }
da := mem.Raw_Dynamic_Array{raw_data(data), 0, length, context.allocator }
@@ -489,7 +491,7 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
if .Shrink_Excess in d.flags {
// Ignoring an error here, but this is not critical to succeed.
- _ = runtime.__dynamic_array_shrink(&da, t.elem.size, t.elem.align, da.len)
+ _ = runtime.__dynamic_array_shrink(&da, t.elem.size, t.elem.align, da.len, loc=loc)
}
raw := (^mem.Raw_Slice)(v.data)
@@ -500,8 +502,8 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
case reflect.Type_Info_Dynamic_Array:
length, scap := err_conv(_decode_len_container(d, add)) or_return
- data := mem.alloc_bytes_non_zeroed(t.elem.size * scap, t.elem.align) or_return
- defer if err != nil { mem.free_bytes(data) }
+ data := mem.alloc_bytes_non_zeroed(t.elem.size * scap, t.elem.align, loc=loc) or_return
+ defer if err != nil { mem.free_bytes(data, allocator=allocator, loc=loc) }
raw := (^mem.Raw_Dynamic_Array)(v.data)
raw.data = raw_data(data)
@@ -513,7 +515,7 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
if .Shrink_Excess in d.flags {
// Ignoring an error here, but this is not critical to succeed.
- _ = runtime.__dynamic_array_shrink(raw, t.elem.size, t.elem.align, raw.len)
+ _ = runtime.__dynamic_array_shrink(raw, t.elem.size, t.elem.align, raw.len, loc=loc)
}
return
@@ -525,7 +527,7 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
return _unsupported(v, hdr)
}
- da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, length, context.allocator }
+ da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, length, allocator }
out_of_space := assign_array(d, &da, t.elem, length, growable=false) or_return
if out_of_space { return _unsupported(v, hdr) }
@@ -539,7 +541,7 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
return _unsupported(v, hdr)
}
- da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, length, context.allocator }
+ da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, length, allocator }
out_of_space := assign_array(d, &da, t.elem, length, growable=false) or_return
if out_of_space { return _unsupported(v, hdr) }
@@ -553,7 +555,7 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
return _unsupported(v, hdr)
}
- da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, 2, context.allocator }
+ da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, 2, allocator }
info: ^runtime.Type_Info
switch ti.id {
@@ -575,7 +577,7 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
return _unsupported(v, hdr)
}
- da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, 4, context.allocator }
+ da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, 4, allocator }
info: ^runtime.Type_Info
switch ti.id {
@@ -593,17 +595,17 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
}
}
-_unmarshal_map :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add) -> (err: Unmarshal_Error) {
+_unmarshal_map :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add, allocator := context.allocator, loc := #caller_location) -> (err: Unmarshal_Error) {
r := d.reader
- decode_key :: proc(d: Decoder, v: any, allocator := context.allocator) -> (k: string, err: Unmarshal_Error) {
+ decode_key :: proc(d: Decoder, v: any, allocator := context.allocator, loc := #caller_location) -> (k: string, err: Unmarshal_Error) {
entry_hdr := _decode_header(d.reader) or_return
entry_maj, entry_add := _header_split(entry_hdr)
#partial switch entry_maj {
case .Text:
- k = err_conv(_decode_text(d, entry_add, allocator)) or_return
+ k = err_conv(_decode_text(d, entry_add, allocator=allocator, loc=loc)) or_return
return
case .Bytes:
- bytes := err_conv(_decode_bytes(d, entry_add, allocator=allocator)) or_return
+ bytes := err_conv(_decode_bytes(d, entry_add, allocator=allocator, loc=loc)) or_return
k = string(bytes)
return
case:
@@ -615,10 +617,10 @@ _unmarshal_map :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header,
// Allow generically storing the map array.
switch &dst in v {
case ^Map:
- dst = err_conv(_decode_map_ptr(d, add)) or_return
+ dst = err_conv(_decode_map_ptr(d, add, allocator=allocator, loc=loc)) or_return
return
case Map:
- dst = err_conv(_decode_map(d, add)) or_return
+ dst = err_conv(_decode_map(d, add, allocator=allocator, loc=loc)) or_return
return
}
@@ -754,7 +756,7 @@ _unmarshal_map :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header,
// Unmarshal into a union, based on the `TAG_OBJECT_TYPE` tag of the spec, it denotes a tag which
// contains an array of exactly two elements, the first is a textual representation of the following
// CBOR value's type.
-_unmarshal_union :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header) -> (err: Unmarshal_Error) {
+_unmarshal_union :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, loc := #caller_location) -> (err: Unmarshal_Error) {
r := d.reader
#partial switch t in ti.variant {
case reflect.Type_Info_Union:
@@ -792,7 +794,7 @@ _unmarshal_union :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
case reflect.Type_Info_Named:
if vti.name == target_name {
reflect.set_union_variant_raw_tag(v, tag)
- return _unmarshal_value(d, any{v.data, variant.id}, _decode_header(r) or_return)
+ return _unmarshal_value(d, any{v.data, variant.id}, _decode_header(r) or_return, loc=loc)
}
case:
@@ -804,7 +806,7 @@ _unmarshal_union :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
if variant_name == target_name {
reflect.set_union_variant_raw_tag(v, tag)
- return _unmarshal_value(d, any{v.data, variant.id}, _decode_header(r) or_return)
+ return _unmarshal_value(d, any{v.data, variant.id}, _decode_header(r) or_return, loc=loc)
}
}
}
diff --git a/core/encoding/entity/entity.odin b/core/encoding/entity/entity.odin
index cee6230ef..f5208ad6f 100644
--- a/core/encoding/entity/entity.odin
+++ b/core/encoding/entity/entity.odin
@@ -56,38 +56,27 @@ CDATA_END :: "]]>"
COMMENT_START :: "<!--"
COMMENT_END :: "-->"
-/*
- Default: CDATA and comments are passed through unchanged.
-*/
+// Default: CDATA and comments are passed through unchanged.
XML_Decode_Option :: enum u8 {
- /*
- Do not decode & entities. It decodes by default.
- If given, overrides `Decode_CDATA`.
- */
+ // Do not decode & entities. It decodes by default. If given, overrides `Decode_CDATA`.
No_Entity_Decode,
- /*
- CDATA is unboxed.
- */
+ // CDATA is unboxed.
Unbox_CDATA,
- /*
- Unboxed CDATA is decoded as well.
- Ignored if `.Unbox_CDATA` is not given.
- */
+ // Unboxed CDATA is decoded as well. Ignored if `.Unbox_CDATA` is not given.
Decode_CDATA,
- /*
- Comments are stripped.
- */
+ // Comments are stripped.
Comment_Strip,
+
+ // Normalize whitespace
+ Normalize_Whitespace,
}
XML_Decode_Options :: bit_set[XML_Decode_Option; u8]
-/*
- Decode a string that may include SGML/XML/HTML entities.
- The caller has to free the result.
-*/
+// Decode a string that may include SGML/XML/HTML entities.
+// The caller has to free the result.
decode_xml :: proc(input: string, options := XML_Decode_Options{}, allocator := context.allocator) -> (decoded: string, err: Error) {
context.allocator = allocator
@@ -100,14 +89,14 @@ decode_xml :: proc(input: string, options := XML_Decode_Options{}, allocator :=
t := Tokenizer{src=input}
in_data := false
+ prev: rune = ' '
+
loop: for {
advance(&t) or_return
if t.r < 0 { break loop }
- /*
- Below here we're never inside a CDATA tag.
- At most we'll see the start of one, but that doesn't affect the logic.
- */
+ // Below here we're never inside a CDATA tag. At most we'll see the start of one,
+ // but that doesn't affect the logic.
switch t.r {
case '<':
/*
@@ -126,9 +115,7 @@ decode_xml :: proc(input: string, options := XML_Decode_Options{}, allocator :=
in_data = _handle_xml_special(&t, &builder, options) or_return
case ']':
- /*
- If we're unboxing _and_ decoding CDATA, we'll have to check for the end tag.
- */
+ // If we're unboxing _and_ decoding CDATA, we'll have to check for the end tag.
if in_data {
if t.read_offset + len(CDATA_END) < len(t.src) {
if string(t.src[t.offset:][:len(CDATA_END)]) == CDATA_END {
@@ -143,22 +130,16 @@ decode_xml :: proc(input: string, options := XML_Decode_Options{}, allocator :=
case:
if in_data && .Decode_CDATA not_in options {
- /*
- Unboxed, but undecoded.
- */
+ // Unboxed, but undecoded.
write_rune(&builder, t.r)
continue
}
if t.r == '&' {
if entity, entity_err := _extract_xml_entity(&t); entity_err != .None {
- /*
- We read to the end of the string without closing the entity.
- Pass through as-is.
- */
+ // We read to the end of the string without closing the entity. Pass through as-is.
write_string(&builder, entity)
} else {
-
if .No_Entity_Decode not_in options {
if decoded, ok := xml_decode_entity(entity); ok {
write_rune(&builder, decoded)
@@ -166,19 +147,41 @@ decode_xml :: proc(input: string, options := XML_Decode_Options{}, allocator :=
}
}
- /*
- Literal passthrough because the decode failed or we want entities not decoded.
- */
+ // Literal passthrough because the decode failed or we want entities not decoded.
write_string(&builder, "&")
write_string(&builder, entity)
write_string(&builder, ";")
}
} else {
- write_rune(&builder, t.r)
+ // Handle AV Normalization: https://www.w3.org/TR/2006/REC-xml11-20060816/#AVNormalize
+ if .Normalize_Whitespace in options {
+ switch t.r {
+ case ' ', '\r', '\n', '\t':
+ if prev != ' ' {
+ write_rune(&builder, ' ')
+ prev = ' '
+ }
+ case:
+ write_rune(&builder, t.r)
+ prev = t.r
+ }
+ } else {
+ // https://www.w3.org/TR/2006/REC-xml11-20060816/#sec-line-ends
+ switch t.r {
+ case '\n', 0x85, 0x2028:
+ write_rune(&builder, '\n')
+ case '\r': // Do nothing until next character
+ case:
+ if prev == '\r' { // Turn a single carriage return into a \n
+ write_rune(&builder, '\n')
+ }
+ write_rune(&builder, t.r)
+ }
+ prev = t.r
+ }
}
}
}
-
return strings.clone(strings.to_string(builder), allocator), err
}
@@ -253,24 +256,18 @@ xml_decode_entity :: proc(entity: string) -> (decoded: rune, ok: bool) {
return rune(val), true
case:
- /*
- Named entity.
- */
+ // Named entity.
return named_xml_entity_to_rune(entity)
}
}
-/*
- Private XML helper to extract `&<stuff>;` entity.
-*/
+// Private XML helper to extract `&<stuff>;` entity.
@(private="file")
_extract_xml_entity :: proc(t: ^Tokenizer) -> (entity: string, err: Error) {
assert(t != nil && t.r == '&')
- /*
- All of these would be in the ASCII range.
- Even if one is not, it doesn't matter. All characters we need to compare to extract are.
- */
+ // All of these would be in the ASCII range.
+ // Even if one is not, it doesn't matter. All characters we need to compare to extract are.
length := len(t.src)
found := false
@@ -292,9 +289,7 @@ _extract_xml_entity :: proc(t: ^Tokenizer) -> (entity: string, err: Error) {
return string(t.src[t.offset : t.read_offset]), .Invalid_Entity_Encoding
}
-/*
- Private XML helper for CDATA and comments.
-*/
+// Private XML helper for CDATA and comments.
@(private="file")
_handle_xml_special :: proc(t: ^Tokenizer, builder: ^strings.Builder, options: XML_Decode_Options) -> (in_data: bool, err: Error) {
assert(t != nil && t.r == '<')
@@ -304,20 +299,14 @@ _handle_xml_special :: proc(t: ^Tokenizer, builder: ^strings.Builder, options: X
t.read_offset += len(CDATA_START) - 1
if .Unbox_CDATA in options && .Decode_CDATA in options {
- /*
- We're unboxing _and_ decoding CDATA
- */
+ // We're unboxing _and_ decoding CDATA
return true, .None
}
- /*
- CDATA is passed through.
- */
+ // CDATA is passed through.
offset := t.offset
- /*
- Scan until end of CDATA.
- */
+ // Scan until end of CDATA.
for {
advance(t) or_return
if t.r < 0 { return true, .CDATA_Not_Terminated }
@@ -341,14 +330,10 @@ _handle_xml_special :: proc(t: ^Tokenizer, builder: ^strings.Builder, options: X
} else if string(t.src[t.offset:][:len(COMMENT_START)]) == COMMENT_START {
t.read_offset += len(COMMENT_START)
- /*
- Comment is passed through by default.
- */
+ // Comment is passed through by default.
offset := t.offset
- /*
- Scan until end of Comment.
- */
+ // Scan until end of Comment.
for {
advance(t) or_return
if t.r < 0 { return true, .Comment_Not_Terminated }
diff --git a/core/encoding/hex/hex.odin b/core/encoding/hex/hex.odin
index dbffe216b..c2cd89c5b 100644
--- a/core/encoding/hex/hex.odin
+++ b/core/encoding/hex/hex.odin
@@ -2,8 +2,8 @@ package encoding_hex
import "core:strings"
-encode :: proc(src: []byte, allocator := context.allocator) -> []byte #no_bounds_check {
- dst := make([]byte, len(src) * 2, allocator)
+encode :: proc(src: []byte, allocator := context.allocator, loc := #caller_location) -> []byte #no_bounds_check {
+ dst := make([]byte, len(src) * 2, allocator, loc)
for i, j := 0, 0; i < len(src); i += 1 {
v := src[i]
dst[j] = HEXTABLE[v>>4]
@@ -15,12 +15,12 @@ encode :: proc(src: []byte, allocator := context.allocator) -> []byte #no_bounds
}
-decode :: proc(src: []byte, allocator := context.allocator) -> (dst: []byte, ok: bool) #no_bounds_check {
+decode :: proc(src: []byte, allocator := context.allocator, loc := #caller_location) -> (dst: []byte, ok: bool) #no_bounds_check {
if len(src) % 2 == 1 {
return
}
- dst = make([]byte, len(src) / 2, allocator)
+ dst = make([]byte, len(src) / 2, allocator, loc)
for i, j := 0, 1; j < len(src); j += 2 {
p := src[j-1]
q := src[j]
@@ -69,5 +69,4 @@ hex_digit :: proc(char: byte) -> (u8, bool) {
case 'A' ..= 'F': return char - 'A' + 10, true
case: return 0, false
}
-}
-
+} \ No newline at end of file
diff --git a/core/encoding/hxa/hxa.odin b/core/encoding/hxa/hxa.odin
index 9b24ede9c..9d0c58196 100644
--- a/core/encoding/hxa/hxa.odin
+++ b/core/encoding/hxa/hxa.odin
@@ -160,34 +160,35 @@ CONVENTION_SOFT_TRANSFORM :: "transform"
/* destroy procedures */
-meta_destroy :: proc(meta: Meta, allocator := context.allocator) {
+meta_destroy :: proc(meta: Meta, allocator := context.allocator, loc := #caller_location) {
if nested, ok := meta.value.([]Meta); ok {
for m in nested {
- meta_destroy(m)
+ meta_destroy(m, loc=loc)
}
- delete(nested, allocator)
+ delete(nested, allocator, loc=loc)
}
}
-nodes_destroy :: proc(nodes: []Node, allocator := context.allocator) {
+nodes_destroy :: proc(nodes: []Node, allocator := context.allocator, loc := #caller_location) {
for node in nodes {
for meta in node.meta_data {
- meta_destroy(meta)
+ meta_destroy(meta, loc=loc)
}
- delete(node.meta_data, allocator)
+ delete(node.meta_data, allocator, loc=loc)
switch n in node.content {
case Node_Geometry:
- delete(n.corner_stack, allocator)
- delete(n.edge_stack, allocator)
- delete(n.face_stack, allocator)
+ delete(n.corner_stack, allocator, loc=loc)
+ delete(n.vertex_stack, allocator, loc=loc)
+ delete(n.edge_stack, allocator, loc=loc)
+ delete(n.face_stack, allocator, loc=loc)
case Node_Image:
- delete(n.image_stack, allocator)
+ delete(n.image_stack, allocator, loc=loc)
}
}
- delete(nodes, allocator)
+ delete(nodes, allocator, loc=loc)
}
-file_destroy :: proc(file: File) {
- nodes_destroy(file.nodes, file.allocator)
- delete(file.backing, file.allocator)
-}
+file_destroy :: proc(file: File, loc := #caller_location) {
+ nodes_destroy(file.nodes, file.allocator, loc=loc)
+ delete(file.backing, file.allocator, loc=loc)
+} \ No newline at end of file
diff --git a/core/encoding/hxa/read.odin b/core/encoding/hxa/read.odin
index f37dc3193..5c8503229 100644
--- a/core/encoding/hxa/read.odin
+++ b/core/encoding/hxa/read.odin
@@ -11,24 +11,21 @@ Read_Error :: enum {
Unable_To_Read_File,
}
-read_from_file :: proc(filename: string, print_error := false, allocator := context.allocator) -> (file: File, err: Read_Error) {
+read_from_file :: proc(filename: string, print_error := false, allocator := context.allocator, loc := #caller_location) -> (file: File, err: Read_Error) {
context.allocator = allocator
- data, ok := os.read_entire_file(filename)
+ data, ok := os.read_entire_file(filename, allocator, loc)
if !ok {
err = .Unable_To_Read_File
+ delete(data, allocator, loc)
return
}
- defer if !ok {
- delete(data)
- } else {
- file.backing = data
- }
- file, err = read(data, filename, print_error, allocator)
+ file, err = read(data, filename, print_error, allocator, loc)
+ file.backing = data
return
}
-read :: proc(data: []byte, filename := "<input>", print_error := false, allocator := context.allocator) -> (file: File, err: Read_Error) {
+read :: proc(data: []byte, filename := "<input>", print_error := false, allocator := context.allocator, loc := #caller_location) -> (file: File, err: Read_Error) {
Reader :: struct {
filename: string,
data: []byte,
@@ -79,8 +76,8 @@ read :: proc(data: []byte, filename := "<input>", print_error := false, allocato
return string(data[:len]), nil
}
- read_meta :: proc(r: ^Reader, capacity: u32le) -> (meta_data: []Meta, err: Read_Error) {
- meta_data = make([]Meta, int(capacity))
+ read_meta :: proc(r: ^Reader, capacity: u32le, allocator := context.allocator, loc := #caller_location) -> (meta_data: []Meta, err: Read_Error) {
+ meta_data = make([]Meta, int(capacity), allocator=allocator)
count := 0
defer meta_data = meta_data[:count]
for &m in meta_data {
@@ -111,10 +108,10 @@ read :: proc(data: []byte, filename := "<input>", print_error := false, allocato
return
}
- read_layer_stack :: proc(r: ^Reader, capacity: u32le) -> (layers: Layer_Stack, err: Read_Error) {
+ read_layer_stack :: proc(r: ^Reader, capacity: u32le, allocator := context.allocator, loc := #caller_location) -> (layers: Layer_Stack, err: Read_Error) {
stack_count := read_value(r, u32le) or_return
layer_count := 0
- layers = make(Layer_Stack, stack_count)
+ layers = make(Layer_Stack, stack_count, allocator=allocator, loc=loc)
defer layers = layers[:layer_count]
for &layer in layers {
layer.name = read_name(r) or_return
@@ -170,7 +167,8 @@ read :: proc(data: []byte, filename := "<input>", print_error := false, allocato
node_count := 0
file.header = header^
- file.nodes = make([]Node, header.internal_node_count)
+ file.nodes = make([]Node, header.internal_node_count, allocator=allocator, loc=loc)
+ file.allocator = allocator
defer if err != nil {
nodes_destroy(file.nodes)
file.nodes = nil
@@ -198,15 +196,15 @@ read :: proc(data: []byte, filename := "<input>", print_error := false, allocato
case .Geometry:
g: Node_Geometry
- g.vertex_count = read_value(r, u32le) or_return
- g.vertex_stack = read_layer_stack(r, g.vertex_count) or_return
- g.edge_corner_count = read_value(r, u32le) or_return
- g.corner_stack = read_layer_stack(r, g.edge_corner_count) or_return
+ g.vertex_count = read_value(r, u32le) or_return
+ g.vertex_stack = read_layer_stack(r, g.vertex_count, loc=loc) or_return
+ g.edge_corner_count = read_value(r, u32le) or_return
+ g.corner_stack = read_layer_stack(r, g.edge_corner_count, loc=loc) or_return
if header.version > 2 {
- g.edge_stack = read_layer_stack(r, g.edge_corner_count) or_return
+ g.edge_stack = read_layer_stack(r, g.edge_corner_count, loc=loc) or_return
}
- g.face_count = read_value(r, u32le) or_return
- g.face_stack = read_layer_stack(r, g.face_count) or_return
+ g.face_count = read_value(r, u32le) or_return
+ g.face_stack = read_layer_stack(r, g.face_count, loc=loc) or_return
node.content = g
@@ -233,4 +231,4 @@ read :: proc(data: []byte, filename := "<input>", print_error := false, allocato
}
return
-}
+} \ No newline at end of file
diff --git a/core/encoding/ini/ini.odin b/core/encoding/ini/ini.odin
new file mode 100644
index 000000000..eb0ad9e7c
--- /dev/null
+++ b/core/encoding/ini/ini.odin
@@ -0,0 +1,189 @@
+package encoding_ini
+
+import "base:runtime"
+import "base:intrinsics"
+import "core:strings"
+import "core:strconv"
+import "core:io"
+import "core:os"
+import "core:fmt"
+_ :: fmt
+
+Options :: struct {
+ comment: string,
+ key_lower_case: bool,
+}
+
+DEFAULT_OPTIONS :: Options {
+ comment = ";",
+ key_lower_case = false,
+}
+
+Iterator :: struct {
+ section: string,
+ _src: string,
+ options: Options,
+}
+
+iterator_from_string :: proc(src: string, options := DEFAULT_OPTIONS) -> Iterator {
+ return {
+ section = "",
+ options = options,
+ _src = src,
+ }
+}
+
+
+// Returns the raw `key` and `value`. `ok` will be false if no more key=value pairs cannot be found.
+// They key and value may be quoted, which may require the use of `strconv.unquote_string`.
+iterate :: proc(it: ^Iterator) -> (key, value: string, ok: bool) {
+ for line_ in strings.split_lines_iterator(&it._src) {
+ line := strings.trim_space(line_)
+
+ if len(line) == 0 {
+ continue
+ }
+
+ if line[0] == '[' {
+ end_idx := strings.index_byte(line, ']')
+ if end_idx < 0 {
+ end_idx = len(line)
+ }
+ it.section = line[1:end_idx]
+ continue
+ }
+
+ if it.options.comment != "" && strings.has_prefix(line, it.options.comment) {
+ continue
+ }
+
+ equal := strings.index(line, " =") // check for things keys that `ctrl+= = zoom_in`
+ quote := strings.index_byte(line, '"')
+ if equal < 0 || quote > 0 && quote < equal {
+ equal = strings.index_byte(line, '=')
+ if equal < 0 {
+ continue
+ }
+ } else {
+ equal += 1
+ }
+
+ key = strings.trim_space(line[:equal])
+ value = strings.trim_space(line[equal+1:])
+ ok = true
+ return
+ }
+
+ it.section = ""
+ return
+}
+
+Map :: distinct map[string]map[string]string
+
+load_map_from_string :: proc(src: string, allocator: runtime.Allocator, options := DEFAULT_OPTIONS) -> (m: Map, err: runtime.Allocator_Error) {
+ unquote :: proc(val: string) -> (string, runtime.Allocator_Error) {
+ v, allocated, ok := strconv.unquote_string(val)
+ if !ok {
+ return strings.clone(val)
+ }
+ if allocated {
+ return v, nil
+ }
+ return strings.clone(v)
+
+ }
+
+ context.allocator = allocator
+
+ it := iterator_from_string(src, options)
+
+ for key, value in iterate(&it) {
+ section := it.section
+ if section not_in m {
+ section = strings.clone(section) or_return
+ m[section] = {}
+ }
+
+ // store key-value pair
+ pairs := &m[section]
+ new_key := unquote(key) or_return
+ if options.key_lower_case {
+ old_key := new_key
+ new_key = strings.to_lower(key) or_return
+ delete(old_key) or_return
+ }
+ pairs[new_key] = unquote(value) or_return
+ }
+ return
+}
+
+load_map_from_path :: proc(path: string, allocator: runtime.Allocator, options := DEFAULT_OPTIONS) -> (m: Map, err: runtime.Allocator_Error, ok: bool) {
+ data := os.read_entire_file(path, allocator) or_return
+ defer delete(data, allocator)
+ m, err = load_map_from_string(string(data), allocator, options)
+ ok = err != nil
+ defer if !ok {
+ delete_map(m)
+ }
+ return
+}
+
+save_map_to_string :: proc(m: Map, allocator: runtime.Allocator) -> (data: string) {
+ b := strings.builder_make(allocator)
+ _, _ = write_map(strings.to_writer(&b), m)
+ return strings.to_string(b)
+}
+
+delete_map :: proc(m: Map) {
+ allocator := m.allocator
+ for section, pairs in m {
+ for key, value in pairs {
+ delete(key, allocator)
+ delete(value, allocator)
+ }
+ delete(section)
+ }
+ delete(m)
+}
+
+write_section :: proc(w: io.Writer, name: string, n_written: ^int = nil) -> (n: int, err: io.Error) {
+ defer if n_written != nil { n_written^ += n }
+ io.write_byte (w, '[', &n) or_return
+ io.write_string(w, name, &n) or_return
+ io.write_byte (w, ']', &n) or_return
+ return
+}
+
+write_pair :: proc(w: io.Writer, key: string, value: $T, n_written: ^int = nil) -> (n: int, err: io.Error) {
+ defer if n_written != nil { n_written^ += n }
+ io.write_string(w, key, &n) or_return
+ io.write_string(w, " = ", &n) or_return
+ when intrinsics.type_is_string(T) {
+ val := string(value)
+ if len(val) > 0 && (val[0] == ' ' || val[len(val)-1] == ' ') {
+ io.write_quoted_string(w, val, n_written=&n) or_return
+ } else {
+ io.write_string(w, val, &n) or_return
+ }
+ } else {
+ n += fmt.wprint(w, value)
+ }
+ io.write_byte(w, '\n', &n) or_return
+ return
+}
+
+write_map :: proc(w: io.Writer, m: Map) -> (n: int, err: io.Error) {
+ section_index := 0
+ for section, pairs in m {
+ if section_index == 0 && section == "" {
+ // ignore section
+ } else {
+ write_section(w, section, &n) or_return
+ }
+ for key, value in pairs {
+ write_pair(w, key, value, &n) or_return
+ }
+ section_index += 1
+ }
+ return
+}
diff --git a/core/encoding/json/marshal.odin b/core/encoding/json/marshal.odin
index b41a76856..dfca8b9db 100644
--- a/core/encoding/json/marshal.odin
+++ b/core/encoding/json/marshal.odin
@@ -62,8 +62,8 @@ Marshal_Options :: struct {
mjson_skipped_first_braces_end: bool,
}
-marshal :: proc(v: any, opt: Marshal_Options = {}, allocator := context.allocator) -> (data: []byte, err: Marshal_Error) {
- b := strings.builder_make(allocator)
+marshal :: proc(v: any, opt: Marshal_Options = {}, allocator := context.allocator, loc := #caller_location) -> (data: []byte, err: Marshal_Error) {
+ b := strings.builder_make(allocator, loc)
defer if err != nil {
strings.builder_destroy(&b)
}
diff --git a/core/encoding/json/parser.odin b/core/encoding/json/parser.odin
index 3973725dc..38f71edf6 100644
--- a/core/encoding/json/parser.odin
+++ b/core/encoding/json/parser.odin
@@ -28,27 +28,27 @@ make_parser_from_string :: proc(data: string, spec := DEFAULT_SPECIFICATION, par
}
-parse :: proc(data: []byte, spec := DEFAULT_SPECIFICATION, parse_integers := false, allocator := context.allocator) -> (Value, Error) {
- return parse_string(string(data), spec, parse_integers, allocator)
+parse :: proc(data: []byte, spec := DEFAULT_SPECIFICATION, parse_integers := false, allocator := context.allocator, loc := #caller_location) -> (Value, Error) {
+ return parse_string(string(data), spec, parse_integers, allocator, loc)
}
-parse_string :: proc(data: string, spec := DEFAULT_SPECIFICATION, parse_integers := false, allocator := context.allocator) -> (Value, Error) {
+parse_string :: proc(data: string, spec := DEFAULT_SPECIFICATION, parse_integers := false, allocator := context.allocator, loc := #caller_location) -> (Value, Error) {
context.allocator = allocator
p := make_parser_from_string(data, spec, parse_integers, allocator)
switch p.spec {
case .JSON:
- return parse_object(&p)
+ return parse_object(&p, loc)
case .JSON5:
- return parse_value(&p)
+ return parse_value(&p, loc)
case .SJSON:
#partial switch p.curr_token.kind {
case .Ident, .String:
- return parse_object_body(&p, .EOF)
+ return parse_object_body(&p, .EOF, loc)
}
- return parse_value(&p)
+ return parse_value(&p, loc)
}
- return parse_object(&p)
+ return parse_object(&p, loc)
}
token_end_pos :: proc(tok: Token) -> Pos {
@@ -106,7 +106,7 @@ parse_comma :: proc(p: ^Parser) -> (do_break: bool) {
return false
}
-parse_value :: proc(p: ^Parser) -> (value: Value, err: Error) {
+parse_value :: proc(p: ^Parser, loc := #caller_location) -> (value: Value, err: Error) {
err = .None
token := p.curr_token
#partial switch token.kind {
@@ -142,13 +142,13 @@ parse_value :: proc(p: ^Parser) -> (value: Value, err: Error) {
case .String:
advance_token(p)
- return unquote_string(token, p.spec, p.allocator)
+ return unquote_string(token, p.spec, p.allocator, loc)
case .Open_Brace:
- return parse_object(p)
+ return parse_object(p, loc)
case .Open_Bracket:
- return parse_array(p)
+ return parse_array(p, loc)
case:
if p.spec != .JSON {
@@ -176,7 +176,7 @@ parse_value :: proc(p: ^Parser) -> (value: Value, err: Error) {
return
}
-parse_array :: proc(p: ^Parser) -> (value: Value, err: Error) {
+parse_array :: proc(p: ^Parser, loc := #caller_location) -> (value: Value, err: Error) {
err = .None
expect_token(p, .Open_Bracket) or_return
@@ -184,14 +184,14 @@ parse_array :: proc(p: ^Parser) -> (value: Value, err: Error) {
array.allocator = p.allocator
defer if err != nil {
for elem in array {
- destroy_value(elem)
+ destroy_value(elem, loc=loc)
}
- delete(array)
+ delete(array, loc)
}
for p.curr_token.kind != .Close_Bracket {
- elem := parse_value(p) or_return
- append(&array, elem)
+ elem := parse_value(p, loc) or_return
+ append(&array, elem, loc)
if parse_comma(p) {
break
@@ -228,38 +228,39 @@ clone_string :: proc(s: string, allocator: mem.Allocator, loc := #caller_locatio
return
}
-parse_object_key :: proc(p: ^Parser, key_allocator: mem.Allocator) -> (key: string, err: Error) {
+parse_object_key :: proc(p: ^Parser, key_allocator: mem.Allocator, loc := #caller_location) -> (key: string, err: Error) {
tok := p.curr_token
if p.spec != .JSON {
if allow_token(p, .Ident) {
- return clone_string(tok.text, key_allocator)
+ return clone_string(tok.text, key_allocator, loc)
}
}
if tok_err := expect_token(p, .String); tok_err != nil {
err = .Expected_String_For_Object_Key
return
}
- return unquote_string(tok, p.spec, key_allocator)
+ return unquote_string(tok, p.spec, key_allocator, loc)
}
-parse_object_body :: proc(p: ^Parser, end_token: Token_Kind) -> (obj: Object, err: Error) {
- obj.allocator = p.allocator
+parse_object_body :: proc(p: ^Parser, end_token: Token_Kind, loc := #caller_location) -> (obj: Object, err: Error) {
+ obj = make(Object, allocator=p.allocator, loc=loc)
+
defer if err != nil {
for key, elem in obj {
- delete(key, p.allocator)
- destroy_value(elem)
+ delete(key, p.allocator, loc)
+ destroy_value(elem, loc=loc)
}
- delete(obj)
+ delete(obj, loc)
}
for p.curr_token.kind != end_token {
- key := parse_object_key(p, p.allocator) or_return
+ key := parse_object_key(p, p.allocator, loc) or_return
parse_colon(p) or_return
- elem := parse_value(p) or_return
+ elem := parse_value(p, loc) or_return
if key in obj {
err = .Duplicate_Object_Key
- delete(key, p.allocator)
+ delete(key, p.allocator, loc)
return
}
@@ -267,7 +268,7 @@ parse_object_body :: proc(p: ^Parser, end_token: Token_Kind) -> (obj: Object, er
// inserting empty key/values into the object and for those we do not
// want to allocate anything
if key != "" {
- reserve_error := reserve(&obj, len(obj) + 1)
+ reserve_error := reserve(&obj, len(obj) + 1, loc)
if reserve_error == mem.Allocator_Error.Out_Of_Memory {
return nil, .Out_Of_Memory
}
@@ -281,9 +282,9 @@ parse_object_body :: proc(p: ^Parser, end_token: Token_Kind) -> (obj: Object, er
return obj, .None
}
-parse_object :: proc(p: ^Parser) -> (value: Value, err: Error) {
+parse_object :: proc(p: ^Parser, loc := #caller_location) -> (value: Value, err: Error) {
expect_token(p, .Open_Brace) or_return
- obj := parse_object_body(p, .Close_Brace) or_return
+ obj := parse_object_body(p, .Close_Brace, loc) or_return
expect_token(p, .Close_Brace) or_return
return obj, .None
}
@@ -480,4 +481,4 @@ unquote_string :: proc(token: Token, spec: Specification, allocator := context.a
}
return string(b[:w]), nil
-}
+} \ No newline at end of file
diff --git a/core/encoding/json/types.odin b/core/encoding/json/types.odin
index 73e183615..41eb21377 100644
--- a/core/encoding/json/types.odin
+++ b/core/encoding/json/types.odin
@@ -89,22 +89,22 @@ Error :: enum {
-destroy_value :: proc(value: Value, allocator := context.allocator) {
+destroy_value :: proc(value: Value, allocator := context.allocator, loc := #caller_location) {
context.allocator = allocator
#partial switch v in value {
case Object:
for key, elem in v {
- delete(key)
- destroy_value(elem)
+ delete(key, loc=loc)
+ destroy_value(elem, loc=loc)
}
- delete(v)
+ delete(v, loc=loc)
case Array:
for elem in v {
- destroy_value(elem)
+ destroy_value(elem, loc=loc)
}
- delete(v)
+ delete(v, loc=loc)
case String:
- delete(v)
+ delete(v, loc=loc)
}
}
diff --git a/core/encoding/xml/tokenizer.odin b/core/encoding/xml/tokenizer.odin
index 0f87c366b..2d06038b7 100644
--- a/core/encoding/xml/tokenizer.odin
+++ b/core/encoding/xml/tokenizer.odin
@@ -218,9 +218,7 @@ scan_identifier :: proc(t: ^Tokenizer) -> string {
for is_valid_identifier_rune(t.ch) {
advance_rune(t)
if t.ch == ':' {
- /*
- A namespaced attr can have at most two parts, `namespace:ident`.
- */
+ // A namespaced attr can have at most two parts, `namespace:ident`.
if namespaced {
break
}
@@ -268,14 +266,10 @@ scan_comment :: proc(t: ^Tokenizer) -> (comment: string, err: Error) {
return string(t.src[offset : t.offset - 1]), .None
}
-/*
- Skip CDATA
-*/
+// Skip CDATA
skip_cdata :: proc(t: ^Tokenizer) -> (err: Error) {
if t.read_offset + len(CDATA_START) >= len(t.src) {
- /*
- Can't be the start of a CDATA tag.
- */
+ // Can't be the start of a CDATA tag.
return .None
}
@@ -290,9 +284,7 @@ skip_cdata :: proc(t: ^Tokenizer) -> (err: Error) {
return .Premature_EOF
}
- /*
- Scan until the end of a CDATA tag.
- */
+ // Scan until the end of a CDATA tag.
if t.read_offset + len(CDATA_END) < len(t.src) {
if string(t.src[t.offset:][:len(CDATA_END)]) == CDATA_END {
t.read_offset += len(CDATA_END)
@@ -319,14 +311,10 @@ scan_string :: proc(t: ^Tokenizer, offset: int, close: rune = '<', consume_close
case '<':
if peek_byte(t) == '!' {
if peek_byte(t, 1) == '[' {
- /*
- Might be the start of a CDATA tag.
- */
+ // Might be the start of a CDATA tag.
skip_cdata(t) or_return
} else if peek_byte(t, 1) == '-' && peek_byte(t, 2) == '-' {
- /*
- Comment start. Eat comment.
- */
+ // Comment start. Eat comment.
t.read_offset += 3
_ = scan_comment(t) or_return
}
@@ -342,17 +330,13 @@ scan_string :: proc(t: ^Tokenizer, offset: int, close: rune = '<', consume_close
}
if t.ch == close {
- /*
- If it's not a CDATA or comment, it's the end of this body.
- */
+ // If it's not a CDATA or comment, it's the end of this body.
break loop
}
advance_rune(t)
}
- /*
- Strip trailing whitespace.
- */
+ // Strip trailing whitespace.
lit := string(t.src[offset : t.offset])
end := len(lit)
@@ -369,11 +353,6 @@ scan_string :: proc(t: ^Tokenizer, offset: int, close: rune = '<', consume_close
if consume_close {
advance_rune(t)
}
-
- /*
- TODO: Handle decoding escape characters and unboxing CDATA.
- */
-
return lit, err
}
@@ -384,7 +363,7 @@ peek :: proc(t: ^Tokenizer) -> (token: Token) {
return token
}
-scan :: proc(t: ^Tokenizer) -> Token {
+scan :: proc(t: ^Tokenizer, multiline_string := false) -> Token {
skip_whitespace(t)
offset := t.offset
@@ -418,7 +397,7 @@ scan :: proc(t: ^Tokenizer) -> Token {
case '"', '\'':
kind = .Invalid
- lit, err = scan_string(t, t.offset, ch, true, false)
+ lit, err = scan_string(t, t.offset, ch, true, multiline_string)
if err == .None {
kind = .String
}
@@ -435,4 +414,4 @@ scan :: proc(t: ^Tokenizer) -> Token {
lit = string(t.src[offset : t.offset])
}
return Token{kind, lit, pos}
-}
+} \ No newline at end of file
diff --git a/core/encoding/xml/xml_reader.odin b/core/encoding/xml/xml_reader.odin
index 5b4b12948..b9656900f 100644
--- a/core/encoding/xml/xml_reader.odin
+++ b/core/encoding/xml/xml_reader.odin
@@ -203,9 +203,7 @@ parse_bytes :: proc(data: []u8, options := DEFAULT_OPTIONS, path := "", error_ha
doc.elements = make([dynamic]Element, 1024, 1024, allocator)
- // strings.intern_init(&doc.intern, allocator, allocator)
-
- err = .Unexpected_Token
+ err = .Unexpected_Token
element, parent: Element_ID
open: Token
@@ -259,8 +257,8 @@ parse_bytes :: proc(data: []u8, options := DEFAULT_OPTIONS, path := "", error_ha
case .Slash:
// Empty tag. Close it.
expect(t, .Gt) or_return
- parent = doc.elements[element].parent
- element = parent
+ parent = doc.elements[element].parent
+ element = parent
case:
error(t, t.offset, "Expected close tag, got: %#v\n", end_token)
@@ -276,8 +274,8 @@ parse_bytes :: proc(data: []u8, options := DEFAULT_OPTIONS, path := "", error_ha
error(t, t.offset, "Mismatched Closing Tag. Expected %v, got %v\n", doc.elements[element].ident, ident.text)
return doc, .Mismatched_Closing_Tag
}
- parent = doc.elements[element].parent
- element = parent
+ parent = doc.elements[element].parent
+ element = parent
} else if open.kind == .Exclaim {
// <!
@@ -463,8 +461,8 @@ validate_options :: proc(options: Options) -> (validated: Options, err: Error) {
return validated, .None
}
-expect :: proc(t: ^Tokenizer, kind: Token_Kind) -> (tok: Token, err: Error) {
- tok = scan(t)
+expect :: proc(t: ^Tokenizer, kind: Token_Kind, multiline_string := false) -> (tok: Token, err: Error) {
+ tok = scan(t, multiline_string=multiline_string)
if tok.kind == kind { return tok, .None }
error(t, t.offset, "Expected \"%v\", got \"%v\".", kind, tok.kind)
@@ -480,7 +478,13 @@ parse_attribute :: proc(doc: ^Document) -> (attr: Attribute, offset: int, err: E
offset = t.offset - len(key.text)
_ = expect(t, .Eq) or_return
- value := expect(t, .String) or_return
+ value := expect(t, .String, multiline_string=true) or_return
+
+ normalized, normalize_err := entity.decode_xml(value.text, {.Normalize_Whitespace}, doc.allocator)
+ if normalize_err == .None {
+ append(&doc.strings_to_free, normalized)
+ value.text = normalized
+ }
attr.key = key.text
attr.val = value.text
diff --git a/core/fmt/fmt.odin b/core/fmt/fmt.odin
index 62cd95968..f9113a7a7 100644
--- a/core/fmt/fmt.odin
+++ b/core/fmt/fmt.odin
@@ -2,6 +2,7 @@ package fmt
import "base:intrinsics"
import "base:runtime"
+import "core:math"
import "core:math/bits"
import "core:mem"
import "core:io"
@@ -1494,7 +1495,7 @@ fmt_pointer :: proc(fi: ^Info, p: rawptr, verb: rune) {
u := u64(uintptr(p))
switch verb {
case 'p', 'v', 'w':
- if !fi.hash && verb == 'v' {
+ if !fi.hash {
io.write_string(fi.writer, "0x", &fi.n)
}
_fmt_int(fi, u, 16, false, 8*size_of(rawptr), __DIGITS_UPPER)
@@ -2968,6 +2969,21 @@ fmt_value :: proc(fi: ^Info, v: any, verb: rune) {
fmt_bit_field(fi, v, verb, info, "")
}
}
+// This proc helps keep some of the code around whether or not to print an
+// intermediate plus sign in complexes and quaternions more readable.
+@(private)
+_cq_should_print_intermediate_plus :: proc "contextless" (fi: ^Info, f: f64) -> bool {
+ if !fi.plus && f >= 0 {
+ #partial switch math.classify(f) {
+ case .Neg_Zero, .Inf:
+ // These two classes print their own signs.
+ return false
+ case:
+ return true
+ }
+ }
+ return false
+}
// Formats a complex number based on the given formatting verb
//
// Inputs:
@@ -2981,7 +2997,7 @@ fmt_complex :: proc(fi: ^Info, c: complex128, bits: int, verb: rune) {
case 'f', 'F', 'v', 'h', 'H', 'w':
r, i := real(c), imag(c)
fmt_float(fi, r, bits/2, verb)
- if !fi.plus && i >= 0 {
+ if _cq_should_print_intermediate_plus(fi, i) {
io.write_rune(fi.writer, '+', &fi.n)
}
fmt_float(fi, i, bits/2, verb)
@@ -3007,19 +3023,19 @@ fmt_quaternion :: proc(fi: ^Info, q: quaternion256, bits: int, verb: rune) {
fmt_float(fi, r, bits/4, verb)
- if !fi.plus && i >= 0 {
+ if _cq_should_print_intermediate_plus(fi, i) {
io.write_rune(fi.writer, '+', &fi.n)
}
fmt_float(fi, i, bits/4, verb)
io.write_rune(fi.writer, 'i', &fi.n)
- if !fi.plus && j >= 0 {
+ if _cq_should_print_intermediate_plus(fi, j) {
io.write_rune(fi.writer, '+', &fi.n)
}
fmt_float(fi, j, bits/4, verb)
io.write_rune(fi.writer, 'j', &fi.n)
- if !fi.plus && k >= 0 {
+ if _cq_should_print_intermediate_plus(fi, k) {
io.write_rune(fi.writer, '+', &fi.n)
}
fmt_float(fi, k, bits/4, verb)
diff --git a/core/fmt/fmt_os.odin b/core/fmt/fmt_os.odin
index a403dcd65..9de0d43be 100644
--- a/core/fmt/fmt_os.odin
+++ b/core/fmt/fmt_os.odin
@@ -1,5 +1,6 @@
//+build !freestanding
//+build !js
+//+build !orca
package fmt
import "base:runtime"
diff --git a/core/image/bmp/bmp.odin b/core/image/bmp/bmp.odin
new file mode 100644
index 000000000..64fc1d5a8
--- /dev/null
+++ b/core/image/bmp/bmp.odin
@@ -0,0 +1,746 @@
+// package bmp implements a Microsoft BMP image reader
+package core_image_bmp
+
+import "core:image"
+import "core:bytes"
+import "core:compress"
+import "core:mem"
+import "base:intrinsics"
+import "base:runtime"
+
+Error :: image.Error
+Image :: image.Image
+Options :: image.Options
+
+RGB_Pixel :: image.RGB_Pixel
+RGBA_Pixel :: image.RGBA_Pixel
+
+FILE_HEADER_SIZE :: 14
+INFO_STUB_SIZE :: FILE_HEADER_SIZE + size_of(image.BMP_Version)
+
+save_to_buffer :: proc(output: ^bytes.Buffer, img: ^Image, options := Options{}, allocator := context.allocator) -> (err: Error) {
+ context.allocator = allocator
+
+ if img == nil {
+ return .Invalid_Input_Image
+ }
+
+ if output == nil {
+ return .Invalid_Output
+ }
+
+ pixels := img.width * img.height
+ if pixels == 0 || pixels > image.MAX_DIMENSIONS {
+ return .Invalid_Input_Image
+ }
+
+ // While the BMP spec (and our loader) support more fanciful image types,
+ // `bmp.save` supports only 3 and 4 channel images with a bit depth of 8.
+ if img.depth != 8 || img.channels < 3 || img.channels > 4 {
+ return .Invalid_Input_Image
+ }
+
+ if img.channels * pixels != len(img.pixels.buf) {
+ return .Invalid_Input_Image
+ }
+
+ // Calculate and allocate size.
+ header_size := u32le(image.BMP_Version.V3)
+ total_header_size := header_size + 14 // file header = 14
+ pixel_count_bytes := u32le(align4(img.width * img.channels) * img.height)
+
+ header := image.BMP_Header{
+ // File header
+ magic = .Bitmap,
+ size = total_header_size + pixel_count_bytes,
+ _res1 = 0,
+ _res2 = 0,
+ pixel_offset = total_header_size,
+ // V3
+ info_size = .V3,
+ width = i32le(img.width),
+ height = i32le(img.height),
+ planes = 1,
+ bpp = u16le(8 * img.channels),
+ compression = .RGB,
+ image_size = pixel_count_bytes,
+ pels_per_meter = {2835, 2835}, // 72 DPI
+ colors_used = 0,
+ colors_important = 0,
+ }
+ written := 0
+
+ if resize(&output.buf, int(header.size)) != nil {
+ return .Unable_To_Allocate_Or_Resize
+ }
+
+ header_bytes := transmute([size_of(image.BMP_Header)]u8)header
+ written += int(total_header_size)
+ copy(output.buf[:], header_bytes[:written])
+
+ switch img.channels {
+ case 3:
+ row_bytes := img.width * img.channels
+ row_padded := align4(row_bytes)
+ pixels := mem.slice_data_cast([]RGB_Pixel, img.pixels.buf[:])
+ for y in 0..<img.height {
+ row_offset := row_padded * (img.height - y - 1) + written
+ for x in 0..<img.width {
+ pix_offset := 3 * x
+ output.buf[row_offset + pix_offset + 0] = pixels[0].b
+ output.buf[row_offset + pix_offset + 1] = pixels[0].g
+ output.buf[row_offset + pix_offset + 2] = pixels[0].r
+ pixels = pixels[1:]
+ }
+ }
+
+ case 4:
+ row_bytes := img.width * img.channels
+ pixels := mem.slice_data_cast([]RGBA_Pixel, img.pixels.buf[:])
+ for y in 0..<img.height {
+ row_offset := row_bytes * (img.height - y - 1) + written
+ for x in 0..<img.width {
+ pix_offset := 4 * x
+ output.buf[row_offset + pix_offset + 0] = pixels[0].b
+ output.buf[row_offset + pix_offset + 1] = pixels[0].g
+ output.buf[row_offset + pix_offset + 2] = pixels[0].r
+ output.buf[row_offset + pix_offset + 3] = pixels[0].a
+ pixels = pixels[1:]
+ }
+ }
+ }
+ return
+}
+
+
+load_from_bytes :: proc(data: []byte, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
+ ctx := &compress.Context_Memory_Input{
+ input_data = data,
+ }
+
+ img, err = load_from_context(ctx, options, allocator)
+ return img, err
+}
+
+@(optimization_mode="speed")
+load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
+ context.allocator = allocator
+ options := options
+
+ // For compress.read_slice(), until that's rewritten to not use temp allocator
+ runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
+
+ if .info in options {
+ options |= {.return_metadata, .do_not_decompress_image}
+ options -= {.info}
+ }
+
+ if .return_header in options && .return_metadata in options {
+ options -= {.return_header}
+ }
+
+ info_buf: [size_of(image.BMP_Header)]u8
+
+ // Read file header (14) + info size (4)
+ stub_data := compress.read_slice(ctx, INFO_STUB_SIZE) or_return
+ copy(info_buf[:], stub_data[:])
+ stub_info := transmute(image.BMP_Header)info_buf
+
+ if stub_info.magic != .Bitmap {
+ for v in image.BMP_Magic {
+ if stub_info.magic == v {
+ return img, .Unsupported_OS2_File
+ }
+ }
+ return img, .Invalid_Signature
+ }
+
+ info: image.BMP_Header
+ switch stub_info.info_size {
+ case .OS2_v1:
+ // Read the remainder of the header
+ os2_data := compress.read_data(ctx, image.OS2_Header) or_return
+
+ info = transmute(image.BMP_Header)info_buf
+ info.width = i32le(os2_data.width)
+ info.height = i32le(os2_data.height)
+ info.planes = os2_data.planes
+ info.bpp = os2_data.bpp
+
+ switch info.bpp {
+ case 1, 4, 8, 24:
+ case:
+ return img, .Unsupported_BPP
+ }
+
+ case .ABBR_16 ..= .V5:
+ // Sizes include V3, V4, V5 and OS2v2 outright, but can also handle truncated headers.
+ // Sometimes called BITMAPV2INFOHEADER or BITMAPV3INFOHEADER.
+ // Let's just try to process it.
+
+ to_read := int(stub_info.info_size) - size_of(image.BMP_Version)
+ info_data := compress.read_slice(ctx, to_read) or_return
+ copy(info_buf[INFO_STUB_SIZE:], info_data[:])
+
+ // Update info struct with the rest of the data we read
+ info = transmute(image.BMP_Header)info_buf
+
+ case:
+ return img, .Unsupported_BMP_Version
+ }
+
+ /* TODO(Jeroen): Add a "strict" option to catch these non-issues that violate spec?
+ if info.planes != 1 {
+ return img, .Invalid_Planes_Value
+ }
+ */
+
+ if img == nil {
+ img = new(Image)
+ }
+ img.which = .BMP
+
+ img.metadata = new_clone(image.BMP_Info{
+ info = info,
+ })
+
+ img.width = abs(int(info.width))
+ img.height = abs(int(info.height))
+ img.channels = 3
+ img.depth = 8
+
+ if img.width == 0 || img.height == 0 {
+ return img, .Invalid_Image_Dimensions
+ }
+
+ total_pixels := abs(img.width * img.height)
+ if total_pixels > image.MAX_DIMENSIONS {
+ return img, .Image_Dimensions_Too_Large
+ }
+
+ // TODO(Jeroen): Handle RGBA.
+ switch info.compression {
+ case .Bit_Fields, .Alpha_Bit_Fields:
+ switch info.bpp {
+ case 16, 32:
+ make_output(img, allocator) or_return
+ decode_rgb(ctx, img, info, allocator) or_return
+ case:
+ if is_os2(info.info_size) {
+ return img, .Unsupported_Compression
+ }
+ return img, .Unsupported_BPP
+ }
+ case .RGB:
+ make_output(img, allocator) or_return
+ decode_rgb(ctx, img, info, allocator) or_return
+ case .RLE4, .RLE8:
+ make_output(img, allocator) or_return
+ decode_rle(ctx, img, info, allocator) or_return
+ case .CMYK, .CMYK_RLE4, .CMYK_RLE8: fallthrough
+ case .PNG, .JPEG: fallthrough
+ case: return img, .Unsupported_Compression
+ }
+
+ // Flipped vertically
+ if info.height < 0 {
+ pixels := mem.slice_data_cast([]RGB_Pixel, img.pixels.buf[:])
+ for y in 0..<img.height / 2 {
+ for x in 0..<img.width {
+ top := y * img.width + x
+ bot := (img.height - y - 1) * img.width + x
+
+ pixels[top], pixels[bot] = pixels[bot], pixels[top]
+ }
+ }
+ }
+ return
+}
+
+is_os2 :: proc(version: image.BMP_Version) -> (res: bool) {
+ #partial switch version {
+ case .OS2_v1, .OS2_v2: return true
+ case: return false
+ }
+}
+
+make_output :: proc(img: ^Image, allocator := context.allocator) -> (err: Error) {
+ assert(img != nil)
+ bytes_needed := img.channels * img.height * img.width
+ img.pixels.buf = make([dynamic]u8, bytes_needed, allocator)
+ if len(img.pixels.buf) != bytes_needed {
+ return .Unable_To_Allocate_Or_Resize
+ }
+ return
+}
+
+write :: proc(img: ^Image, x, y: int, pix: RGB_Pixel) -> (err: Error) {
+ if y >= img.height || x >= img.width {
+ return .Corrupt
+ }
+ out := mem.slice_data_cast([]RGB_Pixel, img.pixels.buf[:])
+ assert(img.height >= 1 && img.width >= 1)
+ out[(img.height - y - 1) * img.width + x] = pix
+ return
+}
+
+Bitmask :: struct {
+ mask: [4]u32le `fmt:"b"`,
+ shift: [4]u32le,
+ bits: [4]u32le,
+}
+
+read_or_make_bit_masks :: proc(ctx: ^$C, info: image.BMP_Header) -> (res: Bitmask, read: int, err: Error) {
+ ctz :: intrinsics.count_trailing_zeros
+ c1s :: intrinsics.count_ones
+
+ #partial switch info.compression {
+ case .RGB:
+ switch info.bpp {
+ case 16:
+ return {
+ mask = {31 << 10, 31 << 5, 31, 0},
+ shift = { 10, 5, 0, 0},
+ bits = { 5, 5, 5, 0},
+ }, int(4 * info.colors_used), nil
+
+ case 32:
+ return {
+ mask = {255 << 16, 255 << 8, 255, 255 << 24},
+ shift = { 16, 8, 0, 24},
+ bits = { 8, 8, 8, 8},
+ }, int(4 * info.colors_used), nil
+
+ case: return {}, 0, .Unsupported_BPP
+ }
+ case .Bit_Fields, .Alpha_Bit_Fields:
+ bf := info.masks
+ alpha_mask := false
+ bit_count: u32le
+
+ #partial switch info.info_size {
+ case .ABBR_52 ..= .V5:
+ // All possible BMP header sizes 52+ bytes long, includes V4 + V5
+ // Bit fields were read as part of the header
+ // V3 header is 40 bytes. We need 56 at a minimum for RGBA bit fields in the next section.
+ if info.info_size >= .ABBR_56 {
+ alpha_mask = true
+ }
+
+ case .V3:
+ // Version 3 doesn't have a bit field embedded, but can still have a 3 or 4 color bit field.
+ // Because it wasn't read as part of the header, we need to read it now.
+
+ if info.compression == .Alpha_Bit_Fields {
+ bf = compress.read_data(ctx, [4]u32le) or_return
+ alpha_mask = true
+ read = 16
+ } else {
+ bf.xyz = compress.read_data(ctx, [3]u32le) or_return
+ read = 12
+ }
+
+ case:
+ // Bit fields are unhandled for this BMP version
+ return {}, 0, .Bitfield_Version_Unhandled
+ }
+
+ if alpha_mask {
+ res = {
+ mask = {bf.r, bf.g, bf.b, bf.a},
+ shift = {ctz(bf.r), ctz(bf.g), ctz(bf.b), ctz(bf.a)},
+ bits = {c1s(bf.r), c1s(bf.g), c1s(bf.b), c1s(bf.a)},
+ }
+
+ bit_count = res.bits.r + res.bits.g + res.bits.b + res.bits.a
+ } else {
+ res = {
+ mask = {bf.r, bf.g, bf.b, 0},
+ shift = {ctz(bf.r), ctz(bf.g), ctz(bf.b), 0},
+ bits = {c1s(bf.r), c1s(bf.g), c1s(bf.b), 0},
+ }
+
+ bit_count = res.bits.r + res.bits.g + res.bits.b
+ }
+
+ if bit_count > u32le(info.bpp) {
+ err = .Bitfield_Sum_Exceeds_BPP
+ }
+
+ overlapped := res.mask.r | res.mask.g | res.mask.b | res.mask.a
+ if c1s(overlapped) < bit_count {
+ err = .Bitfield_Overlapped
+ }
+ return res, read, err
+
+ case:
+ return {}, 0, .Unsupported_Compression
+ }
+ return
+}
+
+scale :: proc(val: $T, mask, shift, bits: u32le) -> (res: u8) {
+ if bits == 0 { return 0 } // Guard against malformed bit fields
+ v := (u32le(val) & mask) >> shift
+ mask_in := u32le(1 << bits) - 1
+ return u8(v * 255 / mask_in)
+}
+
+decode_rgb :: proc(ctx: ^$C, img: ^Image, info: image.BMP_Header, allocator := context.allocator) -> (err: Error) {
+ pixel_offset := int(info.pixel_offset)
+ pixel_offset -= int(info.info_size) + FILE_HEADER_SIZE
+
+ palette: [256]RGBA_Pixel
+
+ // Palette size is info.colors_used if populated. If not it's min(1 << bpp, offset to the pixels / channel count)
+ colors_used := min(256, 1 << info.bpp if info.colors_used == 0 else info.colors_used)
+ max_colors := pixel_offset / 3 if info.info_size == .OS2_v1 else pixel_offset / 4
+ colors_used = min(colors_used, u32le(max_colors))
+
+ switch info.bpp {
+ case 1:
+ if info.info_size == .OS2_v1 {
+ // 2 x RGB palette of instead of variable RGBA palette
+ for i in 0..<colors_used {
+ palette[i].rgb = image.read_data(ctx, RGB_Pixel) or_return
+ }
+ pixel_offset -= int(3 * colors_used)
+ } else {
+ for i in 0..<colors_used {
+ palette[i] = image.read_data(ctx, RGBA_Pixel) or_return
+ }
+ pixel_offset -= int(4 * colors_used)
+ }
+ skip_space(ctx, pixel_offset)
+
+ stride := (img.width + 7) / 8
+ for y in 0..<img.height {
+ data := compress.read_slice(ctx, stride) or_return
+ for x in 0..<img.width {
+ shift := u8(7 - (x & 0x07))
+ p := (data[x / 8] >> shift) & 0x01
+ write(img, x, y, palette[p].bgr) or_return
+ }
+ }
+
+ case 2: // Non-standard on modern Windows, but was allowed on WinCE
+ for i in 0..<colors_used {
+ palette[i] = image.read_data(ctx, RGBA_Pixel) or_return
+ }
+ pixel_offset -= int(4 * colors_used)
+ skip_space(ctx, pixel_offset)
+
+ stride := (img.width + 3) / 4
+ for y in 0..<img.height {
+ data := compress.read_slice(ctx, stride) or_return
+ for x in 0..<img.width {
+ shift := 6 - (x & 0x03) << 1
+ p := (data[x / 4] >> u8(shift)) & 0x03
+ write(img, x, y, palette[p].bgr) or_return
+ }
+ }
+
+ case 4:
+ if info.info_size == .OS2_v1 {
+ // 16 x RGB palette of instead of variable RGBA palette
+ for i in 0..<colors_used {
+ palette[i].rgb = image.read_data(ctx, RGB_Pixel) or_return
+ }
+ pixel_offset -= int(3 * colors_used)
+ } else {
+ for i in 0..<colors_used {
+ palette[i] = image.read_data(ctx, RGBA_Pixel) or_return
+ }
+ pixel_offset -= int(4 * colors_used)
+ }
+ skip_space(ctx, pixel_offset)
+
+ stride := (img.width + 1) / 2
+ for y in 0..<img.height {
+ data := compress.read_slice(ctx, stride) or_return
+ for x in 0..<img.width {
+ p := data[x / 2] >> 4 if x & 1 == 0 else data[x / 2]
+ write(img, x, y, palette[p & 0x0f].bgr) or_return
+ }
+ }
+
+ case 8:
+ if info.info_size == .OS2_v1 {
+ // 256 x RGB palette of instead of variable RGBA palette
+ for i in 0..<colors_used {
+ palette[i].rgb = image.read_data(ctx, RGB_Pixel) or_return
+ }
+ pixel_offset -= int(3 * colors_used)
+ } else {
+ for i in 0..<colors_used {
+ palette[i] = image.read_data(ctx, RGBA_Pixel) or_return
+ }
+ pixel_offset -= int(4 * colors_used)
+ }
+ skip_space(ctx, pixel_offset)
+
+ stride := align4(img.width)
+ for y in 0..<img.height {
+ data := compress.read_slice(ctx, stride) or_return
+ for x in 0..<img.width {
+ write(img, x, y, palette[data[x]].bgr) or_return
+ }
+ }
+
+ case 16:
+ bm, read := read_or_make_bit_masks(ctx, info) or_return
+ // Skip optional palette and other data
+ pixel_offset -= read
+ skip_space(ctx, pixel_offset)
+
+ stride := align4(img.width * 2)
+ for y in 0..<img.height {
+ data := compress.read_slice(ctx, stride) or_return
+ pixels := mem.slice_data_cast([]u16le, data)
+ for x in 0..<img.width {
+ v := pixels[x]
+ r := scale(v, bm.mask.r, bm.shift.r, bm.bits.r)
+ g := scale(v, bm.mask.g, bm.shift.g, bm.bits.g)
+ b := scale(v, bm.mask.b, bm.shift.b, bm.bits.b)
+ write(img, x, y, RGB_Pixel{r, g, b}) or_return
+ }
+ }
+
+ case 24:
+ // Eat useless palette and other padding
+ skip_space(ctx, pixel_offset)
+
+ stride := align4(img.width * 3)
+ for y in 0..<img.height {
+ data := compress.read_slice(ctx, stride) or_return
+ pixels := mem.slice_data_cast([]RGB_Pixel, data)
+ for x in 0..<img.width {
+ write(img, x, y, pixels[x].bgr) or_return
+ }
+ }
+
+ case 32:
+ bm, read := read_or_make_bit_masks(ctx, info) or_return
+ // Skip optional palette and other data
+ pixel_offset -= read
+ skip_space(ctx, pixel_offset)
+
+ for y in 0..<img.height {
+ data := compress.read_slice(ctx, img.width * size_of(RGBA_Pixel)) or_return
+ pixels := mem.slice_data_cast([]u32le, data)
+ for x in 0..<img.width {
+ v := pixels[x]
+ r := scale(v, bm.mask.r, bm.shift.r, bm.bits.r)
+ g := scale(v, bm.mask.g, bm.shift.g, bm.bits.g)
+ b := scale(v, bm.mask.b, bm.shift.b, bm.bits.b)
+ write(img, x, y, RGB_Pixel{r, g, b}) or_return
+ }
+ }
+
+ case:
+ return .Unsupported_BPP
+ }
+ return nil
+}
+
+decode_rle :: proc(ctx: ^$C, img: ^Image, info: image.BMP_Header, allocator := context.allocator) -> (err: Error) {
+ pixel_offset := int(info.pixel_offset)
+ pixel_offset -= int(info.info_size) + FILE_HEADER_SIZE
+
+ bytes_needed := size_of(RGB_Pixel) * img.height * img.width
+ if resize(&img.pixels.buf, bytes_needed) != nil {
+ return .Unable_To_Allocate_Or_Resize
+ }
+ out := mem.slice_data_cast([]RGB_Pixel, img.pixels.buf[:])
+ assert(len(out) == img.height * img.width)
+
+ palette: [256]RGBA_Pixel
+
+ switch info.bpp {
+ case 4:
+ colors_used := info.colors_used if info.colors_used > 0 else 16
+ colors_used = min(colors_used, 16)
+
+ for i in 0..<colors_used {
+ palette[i] = image.read_data(ctx, RGBA_Pixel) or_return
+ pixel_offset -= size_of(RGBA_Pixel)
+ }
+ skip_space(ctx, pixel_offset)
+
+ pixel_size := info.size - info.pixel_offset
+ remaining := compress.input_size(ctx) or_return
+ if remaining < i64(pixel_size) {
+ return .Corrupt
+ }
+
+ data := make([]u8, int(pixel_size) + 4)
+ defer delete(data)
+
+ for i in 0..<pixel_size {
+ data[i] = image.read_u8(ctx) or_return
+ }
+
+ y, x := 0, 0
+ index := 0
+ for {
+ if len(data[index:]) < 2 {
+ return .Corrupt
+ }
+
+ if data[index] > 0 {
+ for count in 0..<data[index] {
+ if count & 1 == 1 {
+ write(img, x, y, palette[(data[index + 1] >> 0) & 0x0f].bgr)
+ } else {
+ write(img, x, y, palette[(data[index + 1] >> 4) & 0x0f].bgr)
+ }
+ x += 1
+ }
+ index += 2
+ } else {
+ switch data[index + 1] {
+ case 0: // EOL
+ x = 0; y += 1
+ index += 2
+ case 1: // EOB
+ return
+ case 2: // MOVE
+ x += int(data[index + 2])
+ y += int(data[index + 3])
+ index += 4
+ case: // Literals
+ run_length := int(data[index + 1])
+ aligned := (align4(run_length) >> 1) + 2
+
+ if index + aligned >= len(data) {
+ return .Corrupt
+ }
+
+ for count in 0..<run_length {
+ val := data[index + 2 + count / 2]
+ if count & 1 == 1 {
+ val &= 0xf
+ } else {
+ val = val >> 4
+ }
+ write(img, x, y, palette[val].bgr)
+ x += 1
+ }
+ index += aligned
+ }
+ }
+ }
+
+ case 8:
+ colors_used := info.colors_used if info.colors_used > 0 else 256
+ colors_used = min(colors_used, 256)
+
+ for i in 0..<colors_used {
+ palette[i] = image.read_data(ctx, RGBA_Pixel) or_return
+ pixel_offset -= size_of(RGBA_Pixel)
+ }
+ skip_space(ctx, pixel_offset)
+
+ pixel_size := info.size - info.pixel_offset
+ remaining := compress.input_size(ctx) or_return
+ if remaining < i64(pixel_size) {
+ return .Corrupt
+ }
+
+ data := make([]u8, int(pixel_size) + 4)
+ defer delete(data)
+
+ for i in 0..<pixel_size {
+ data[i] = image.read_u8(ctx) or_return
+ }
+
+ y, x := 0, 0
+ index := 0
+ for {
+ if len(data[index:]) < 2 {
+ return .Corrupt
+ }
+
+ if data[index] > 0 {
+ for _ in 0..<data[index] {
+ write(img, x, y, palette[data[index + 1]].bgr)
+ x += 1
+ }
+ index += 2
+ } else {
+ switch data[index + 1] {
+ case 0: // EOL
+ x = 0; y += 1
+ index += 2
+ case 1: // EOB
+ return
+ case 2: // MOVE
+ x += int(data[index + 2])
+ y += int(data[index + 3])
+ index += 4
+ case: // Literals
+ run_length := int(data[index + 1])
+ aligned := align2(run_length) + 2
+
+ if index + aligned >= len(data) {
+ return .Corrupt
+ }
+ for count in 0..<run_length {
+ write(img, x, y, palette[data[index + 2 + count]].bgr)
+ x += 1
+ }
+ index += aligned
+ }
+ }
+ }
+
+ case:
+ return .Unsupported_BPP
+ }
+ return nil
+}
+
+align2 :: proc(width: int) -> (stride: int) {
+ stride = width
+ if width & 1 != 0 {
+ stride += 2 - (width & 1)
+ }
+ return
+}
+
+align4 :: proc(width: int) -> (stride: int) {
+ stride = width
+ if width & 3 != 0 {
+ stride += 4 - (width & 3)
+ }
+ return
+}
+
+skip_space :: proc(ctx: ^$C, bytes_to_skip: int) -> (err: Error) {
+ if bytes_to_skip < 0 {
+ return .Corrupt
+ }
+ for _ in 0..<bytes_to_skip {
+ image.read_u8(ctx) or_return
+ }
+ return
+}
+
+// Cleanup of image-specific data.
+destroy :: proc(img: ^Image) {
+ if img == nil {
+ // Nothing to do. Load must've returned with an error.
+ return
+ }
+
+ bytes.buffer_destroy(&img.pixels)
+ if v, ok := img.metadata.(^image.BMP_Info); ok {
+ free(v)
+ }
+ free(img)
+}
+
+@(init, private)
+_register :: proc() {
+ image.register(.BMP, load_from_bytes, destroy)
+} \ No newline at end of file
diff --git a/core/image/bmp/bmp_js.odin b/core/image/bmp/bmp_js.odin
new file mode 100644
index 000000000..d87a7d2d5
--- /dev/null
+++ b/core/image/bmp/bmp_js.odin
@@ -0,0 +1,4 @@
+//+build js
+package core_image_bmp
+
+load :: proc{load_from_bytes, load_from_context}
diff --git a/core/image/bmp/bmp_os.odin b/core/image/bmp/bmp_os.odin
new file mode 100644
index 000000000..d20abc685
--- /dev/null
+++ b/core/image/bmp/bmp_os.odin
@@ -0,0 +1,34 @@
+//+build !js
+package core_image_bmp
+
+import "core:os"
+import "core:bytes"
+
+load :: proc{load_from_file, load_from_bytes, load_from_context}
+
+load_from_file :: proc(filename: string, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
+ context.allocator = allocator
+
+ data, ok := os.read_entire_file(filename)
+ defer delete(data)
+
+ if ok {
+ return load_from_bytes(data, options)
+ } else {
+ return nil, .Unable_To_Read_File
+ }
+}
+
+save :: proc{save_to_buffer, save_to_file}
+
+save_to_file :: proc(output: string, img: ^Image, options := Options{}, allocator := context.allocator) -> (err: Error) {
+ context.allocator = allocator
+
+ out := &bytes.Buffer{}
+ defer bytes.buffer_destroy(out)
+
+ save_to_buffer(out, img, options) or_return
+ write_ok := os.write_entire_file(output, out.buf[:])
+
+ return nil if write_ok else .Unable_To_Write_File
+} \ No newline at end of file
diff --git a/core/image/common.odin b/core/image/common.odin
index b576a9521..fed2c1470 100644
--- a/core/image/common.odin
+++ b/core/image/common.odin
@@ -12,6 +12,7 @@ package image
import "core:bytes"
import "core:mem"
+import "core:io"
import "core:compress"
import "base:runtime"
@@ -62,6 +63,7 @@ Image_Metadata :: union #shared_nil {
^PNG_Info,
^QOI_Info,
^TGA_Info,
+ ^BMP_Info,
}
@@ -159,11 +161,13 @@ Error :: union #shared_nil {
Netpbm_Error,
PNG_Error,
QOI_Error,
+ BMP_Error,
compress.Error,
compress.General_Error,
compress.Deflate_Error,
compress.ZLIB_Error,
+ io.Error,
runtime.Allocator_Error,
}
@@ -197,6 +201,128 @@ General_Image_Error :: enum {
}
/*
+ BMP-specific
+*/
+BMP_Error :: enum {
+ None = 0,
+ Invalid_File_Size,
+ Unsupported_BMP_Version,
+ Unsupported_OS2_File,
+ Unsupported_Compression,
+ Unsupported_BPP,
+ Invalid_Stride,
+ Invalid_Color_Count,
+ Implausible_File_Size,
+ Bitfield_Version_Unhandled, // We don't (yet) handle bit fields for this BMP version.
+ Bitfield_Sum_Exceeds_BPP, // Total mask bit count > bpp
+ Bitfield_Overlapped, // Channel masks overlap
+}
+
+// img.metadata is wrapped in a struct in case we need to add to it later
+// without putting it in BMP_Header
+BMP_Info :: struct {
+ info: BMP_Header,
+}
+
+BMP_Magic :: enum u16le {
+ Bitmap = 0x4d42, // 'BM'
+ OS2_Bitmap_Array = 0x4142, // 'BA'
+ OS2_Icon = 0x4349, // 'IC',
+ OS2_Color_Icon = 0x4943, // 'CI'
+ OS2_Pointer = 0x5450, // 'PT'
+ OS2_Color_Pointer = 0x5043, // 'CP'
+}
+
+// See: http://justsolve.archiveteam.org/wiki/BMP#Well-known_versions
+BMP_Version :: enum u32le {
+ OS2_v1 = 12, // BITMAPCOREHEADER (Windows V2 / OS/2 version 1.0)
+ OS2_v2 = 64, // BITMAPCOREHEADER2 (OS/2 version 2.x)
+ V3 = 40, // BITMAPINFOHEADER
+ V4 = 108, // BITMAPV4HEADER
+ V5 = 124, // BITMAPV5HEADER
+
+ ABBR_16 = 16, // Abbreviated
+ ABBR_24 = 24, // ..
+ ABBR_48 = 48, // ..
+ ABBR_52 = 52, // ..
+ ABBR_56 = 56, // ..
+}
+
+BMP_Header :: struct #packed {
+ // File header
+ magic: BMP_Magic,
+ size: u32le,
+ _res1: u16le, // Reserved; must be zero
+ _res2: u16le, // Reserved; must be zero
+ pixel_offset: u32le, // Offset in bytes, from the beginning of BMP_Header to the pixel data
+ // V3
+ info_size: BMP_Version,
+ width: i32le,
+ height: i32le,
+ planes: u16le,
+ bpp: u16le,
+ compression: BMP_Compression,
+ image_size: u32le,
+ pels_per_meter: [2]u32le,
+ colors_used: u32le,
+ colors_important: u32le, // OS2_v2 is equal up to here
+ // V4
+ masks: [4]u32le `fmt:"32b"`,
+ colorspace: BMP_Logical_Color_Space,
+ endpoints: BMP_CIEXYZTRIPLE,
+ gamma: [3]BMP_GAMMA16_16,
+ // V5
+ intent: BMP_Gamut_Mapping_Intent,
+ profile_data: u32le,
+ profile_size: u32le,
+ reserved: u32le,
+}
+#assert(size_of(BMP_Header) == 138)
+
+OS2_Header :: struct #packed {
+ // BITMAPCOREHEADER minus info_size field
+ width: i16le,
+ height: i16le,
+ planes: u16le,
+ bpp: u16le,
+}
+#assert(size_of(OS2_Header) == 8)
+
+BMP_Compression :: enum u32le {
+ RGB = 0x0000,
+ RLE8 = 0x0001,
+ RLE4 = 0x0002,
+ Bit_Fields = 0x0003, // If Windows
+ Huffman1D = 0x0003, // If OS2v2
+ JPEG = 0x0004, // If Windows
+ RLE24 = 0x0004, // If OS2v2
+ PNG = 0x0005,
+ Alpha_Bit_Fields = 0x0006,
+ CMYK = 0x000B,
+ CMYK_RLE8 = 0x000C,
+ CMYK_RLE4 = 0x000D,
+}
+
+BMP_Logical_Color_Space :: enum u32le {
+ CALIBRATED_RGB = 0x00000000,
+ sRGB = 0x73524742, // 'sRGB'
+ WINDOWS_COLOR_SPACE = 0x57696E20, // 'Win '
+}
+
+BMP_FXPT2DOT30 :: u32le
+BMP_CIEXYZ :: [3]BMP_FXPT2DOT30
+BMP_CIEXYZTRIPLE :: [3]BMP_CIEXYZ
+BMP_GAMMA16_16 :: [2]u16le
+
+BMP_Gamut_Mapping_Intent :: enum u32le {
+ INVALID = 0x00000000, // If not V5, this field will just be zero-initialized and not valid.
+ ABS_COLORIMETRIC = 0x00000008,
+ BUSINESS = 0x00000001,
+ GRAPHICS = 0x00000002,
+ IMAGES = 0x00000004,
+}
+
+/*
Netpbm-specific definitions
*/
Netpbm_Format :: enum {
@@ -1133,6 +1259,40 @@ apply_palette_rgba :: proc(img: ^Image, palette: [256]RGBA_Pixel, allocator := c
}
apply_palette :: proc{apply_palette_rgb, apply_palette_rgba}
+blend_single_channel :: #force_inline proc(fg, alpha, bg: $T) -> (res: T) where T == u8 || T == u16 {
+ MAX :: 256 when T == u8 else 65536
+
+ c := u32(fg) * (MAX - u32(alpha)) + u32(bg) * (1 + u32(alpha))
+ return T(c & (MAX - 1))
+}
+
+blend_pixel :: #force_inline proc(fg: [$N]$T, alpha: T, bg: [N]T) -> (res: [N]T) where (T == u8 || T == u16), N >= 1 && N <= 4 {
+ MAX :: 256 when T == u8 else 65536
+
+ when N == 1 {
+ r := u32(fg.r) * (MAX - u32(alpha)) + u32(bg.r) * (1 + u32(alpha))
+ return {T(r & (MAX - 1))}
+ }
+ when N == 2 {
+ r := u32(fg.r) * (MAX - u32(alpha)) + u32(bg.r) * (1 + u32(alpha))
+ g := u32(fg.g) * (MAX - u32(alpha)) + u32(bg.g) * (1 + u32(alpha))
+ return {T(r & (MAX - 1)), T(g & (MAX - 1))}
+ }
+ when N == 3 || N == 4 {
+ r := u32(fg.r) * (MAX - u32(alpha)) + u32(bg.r) * (1 + u32(alpha))
+ g := u32(fg.g) * (MAX - u32(alpha)) + u32(bg.g) * (1 + u32(alpha))
+ b := u32(fg.b) * (MAX - u32(alpha)) + u32(bg.b) * (1 + u32(alpha))
+
+ when N == 3 {
+ return {T(r & (MAX - 1)), T(g & (MAX - 1)), T(b & (MAX - 1))}
+ } else {
+ return {T(r & (MAX - 1)), T(g & (MAX - 1)), T(b & (MAX - 1)), MAX - 1}
+ }
+ }
+ unreachable()
+}
+blend :: proc{blend_single_channel, blend_pixel}
+
// Replicates grayscale values into RGB(A) 8- or 16-bit images as appropriate.
// Returns early with `false` if already an RGB(A) image.
@@ -1245,4 +1405,4 @@ write_bytes :: proc(buf: ^bytes.Buffer, data: []u8) -> (err: compress.General_Er
return .Resize_Failed
}
return nil
-}
+} \ No newline at end of file
diff --git a/core/image/png/png.odin b/core/image/png/png.odin
index 4bb070da8..aa1c5f781 100644
--- a/core/image/png/png.odin
+++ b/core/image/png/png.odin
@@ -597,7 +597,7 @@ load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.a
dsc := depth_scale_table
scale := dsc[info.header.bit_depth]
if scale != 1 {
- key := mem.slice_data_cast([]u16be, c.data)[0] * u16be(scale)
+ key := (^u16be)(raw_data(c.data))^ * u16be(scale)
c.data = []u8{0, u8(key & 255)}
}
}
@@ -735,59 +735,48 @@ load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.a
return {}, .Unable_To_Allocate_Or_Resize
}
- i := 0; j := 0
-
// If we don't have transparency or drop it without applying it, we can do this:
if (!seen_trns || (seen_trns && .alpha_drop_if_present in options && .alpha_premultiply not_in options)) && .alpha_add_if_missing not_in options {
- for h := 0; h < int(img.height); h += 1 {
- for w := 0; w < int(img.width); w += 1 {
- c := _plte.entries[temp.buf[i]]
- t.buf[j ] = c.r
- t.buf[j+1] = c.g
- t.buf[j+2] = c.b
- i += 1; j += 3
- }
+ output := mem.slice_data_cast([]image.RGB_Pixel, t.buf[:])
+ for pal_idx, idx in temp.buf {
+ output[idx] = _plte.entries[pal_idx]
}
} else if add_alpha || .alpha_drop_if_present in options {
- bg := [3]f32{0, 0, 0}
+ bg := PLTE_Entry{0, 0, 0}
if premultiply && seen_bkgd {
c16 := img.background.([3]u16)
- bg = [3]f32{f32(c16.r), f32(c16.g), f32(c16.b)}
+ bg = {u8(c16.r), u8(c16.g), u8(c16.b)}
}
no_alpha := (.alpha_drop_if_present in options || premultiply) && .alpha_add_if_missing not_in options
blend_background := seen_bkgd && .blend_background in options
- for h := 0; h < int(img.height); h += 1 {
- for w := 0; w < int(img.width); w += 1 {
- index := temp.buf[i]
+ if no_alpha {
+ output := mem.slice_data_cast([]image.RGB_Pixel, t.buf[:])
+ for orig, idx in temp.buf {
+ c := _plte.entries[orig]
+ a := int(orig) < len(trns.data) ? trns.data[orig] : 255
- c := _plte.entries[index]
- a := int(index) < len(trns.data) ? trns.data[index] : 255
- alpha := f32(a) / 255.0
+ if blend_background {
+ output[idx] = image.blend(c, a, bg)
+ } else if premultiply {
+ output[idx] = image.blend(PLTE_Entry{}, a, c)
+ }
+ }
+ } else {
+ output := mem.slice_data_cast([]image.RGBA_Pixel, t.buf[:])
+ for orig, idx in temp.buf {
+ c := _plte.entries[orig]
+ a := int(orig) < len(trns.data) ? trns.data[orig] : 255
if blend_background {
- c.r = u8((1.0 - alpha) * bg[0] + f32(c.r) * alpha)
- c.g = u8((1.0 - alpha) * bg[1] + f32(c.g) * alpha)
- c.b = u8((1.0 - alpha) * bg[2] + f32(c.b) * alpha)
+ c = image.blend(c, a, bg)
a = 255
} else if premultiply {
- c.r = u8(f32(c.r) * alpha)
- c.g = u8(f32(c.g) * alpha)
- c.b = u8(f32(c.b) * alpha)
+ c = image.blend(PLTE_Entry{}, a, c)
}
- t.buf[j ] = c.r
- t.buf[j+1] = c.g
- t.buf[j+2] = c.b
- i += 1
-
- if no_alpha {
- j += 3
- } else {
- t.buf[j+3] = u8(a)
- j += 4
- }
+ output[idx] = {c.r, c.g, c.b, u8(a)}
}
}
} else {
@@ -1015,8 +1004,8 @@ load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.a
return {}, .Unable_To_Allocate_Or_Resize
}
- p := mem.slice_data_cast([]u8, temp.buf[:])
- o := mem.slice_data_cast([]u8, t.buf[:])
+ p := temp.buf[:]
+ o := t.buf[:]
switch raw_image_channels {
case 1:
@@ -1627,7 +1616,6 @@ defilter :: proc(img: ^Image, filter_bytes: ^bytes.Buffer, header: ^image.PNG_IH
return nil
}
-
@(init, private)
_register :: proc() {
image.register(.PNG, load_from_bytes, destroy)
diff --git a/core/log/file_console_logger.odin b/core/log/file_console_logger.odin
index bcce67578..fb968ccb6 100644
--- a/core/log/file_console_logger.odin
+++ b/core/log/file_console_logger.odin
@@ -1,6 +1,7 @@
//+build !freestanding
package log
+import "core:encoding/ansi"
import "core:fmt"
import "core:strings"
import "core:os"
@@ -70,18 +71,10 @@ file_console_logger_proc :: proc(logger_data: rawptr, level: Level, text: string
backing: [1024]byte //NOTE(Hoej): 1024 might be too much for a header backing, unless somebody has really long paths.
buf := strings.builder_from_bytes(backing[:])
- do_level_header(options, level, &buf)
+ do_level_header(options, &buf, level)
when time.IS_SUPPORTED {
- if Full_Timestamp_Opts & options != nil {
- fmt.sbprint(&buf, "[")
- t := time.now()
- y, m, d := time.date(t)
- h, min, s := time.clock(t)
- if .Date in options { fmt.sbprintf(&buf, "%d-%02d-%02d ", y, m, d) }
- if .Time in options { fmt.sbprintf(&buf, "%02d:%02d:%02d", h, min, s) }
- fmt.sbprint(&buf, "] ")
- }
+ do_time_header(options, &buf, time.now())
}
do_location_header(options, &buf, location)
@@ -99,12 +92,12 @@ file_console_logger_proc :: proc(logger_data: rawptr, level: Level, text: string
fmt.fprintf(h, "%s%s\n", strings.to_string(buf), text)
}
-do_level_header :: proc(opts: Options, level: Level, str: ^strings.Builder) {
+do_level_header :: proc(opts: Options, str: ^strings.Builder, level: Level) {
- RESET :: "\x1b[0m"
- RED :: "\x1b[31m"
- YELLOW :: "\x1b[33m"
- DARK_GREY :: "\x1b[90m"
+ RESET :: ansi.CSI + ansi.RESET + ansi.SGR
+ RED :: ansi.CSI + ansi.FG_RED + ansi.SGR
+ YELLOW :: ansi.CSI + ansi.FG_YELLOW + ansi.SGR
+ DARK_GREY :: ansi.CSI + ansi.FG_BRIGHT_BLACK + ansi.SGR
col := RESET
switch level {
@@ -125,6 +118,24 @@ do_level_header :: proc(opts: Options, level: Level, str: ^strings.Builder) {
}
}
+do_time_header :: proc(opts: Options, buf: ^strings.Builder, t: time.Time) {
+ when time.IS_SUPPORTED {
+ if Full_Timestamp_Opts & opts != nil {
+ fmt.sbprint(buf, "[")
+ y, m, d := time.date(t)
+ h, min, s := time.clock(t)
+ if .Date in opts {
+ fmt.sbprintf(buf, "%d-%02d-%02d", y, m, d)
+ if .Time in opts {
+ fmt.sbprint(buf, " ")
+ }
+ }
+ if .Time in opts { fmt.sbprintf(buf, "%02d:%02d:%02d", h, min, s) }
+ fmt.sbprint(buf, "] ")
+ }
+ }
+}
+
do_location_header :: proc(opts: Options, buf: ^strings.Builder, location := #caller_location) {
if Location_Header_Opts & opts == nil {
return
diff --git a/core/log/multi_logger.odin b/core/log/multi_logger.odin
index 55c0f1436..96d0f3dbd 100644
--- a/core/log/multi_logger.odin
+++ b/core/log/multi_logger.odin
@@ -12,11 +12,10 @@ create_multi_logger :: proc(logs: ..Logger) -> Logger {
return Logger{multi_logger_proc, data, Level.Debug, nil}
}
-destroy_multi_logger :: proc(log : ^Logger) {
+destroy_multi_logger :: proc(log: Logger) {
data := (^Multi_Logger_Data)(log.data)
delete(data.loggers)
- free(log.data)
- log^ = nil_logger()
+ free(data)
}
multi_logger_proc :: proc(logger_data: rawptr, level: Level, text: string,
diff --git a/core/math/big/combinatorics.odin b/core/math/big/combinatorics.odin
new file mode 100644
index 000000000..87c76d830
--- /dev/null
+++ b/core/math/big/combinatorics.odin
@@ -0,0 +1,60 @@
+package math_big
+
+/*
+ With `n` items, calculate how many ways that `r` of them can be ordered.
+*/
+permutations_with_repetition :: int_pow_int
+
+/*
+ With `n` items, calculate how many ways that `r` of them can be ordered without any repeats.
+*/
+permutations_without_repetition :: proc(dest: ^Int, n, r: int) -> (error: Error) {
+ if n == r {
+ return factorial(dest, n)
+ }
+
+ tmp := &Int{}
+ defer internal_destroy(tmp)
+
+ // n!
+ // --------
+ // (n - r)!
+ factorial(dest, n) or_return
+ factorial(tmp, n - r) or_return
+ div(dest, dest, tmp) or_return
+
+ return
+}
+
+/*
+ With `n` items, calculate how many ways that `r` of them can be chosen.
+
+ Also known as the multiset coefficient or (n multichoose k).
+*/
+combinations_with_repetition :: proc(dest: ^Int, n, r: int) -> (error: Error) {
+ // (n + r - 1)!
+ // ------------
+ // r! (n - 1)!
+ return combinations_without_repetition(dest, n + r - 1, r)
+}
+
+/*
+ With `n` items, calculate how many ways that `r` of them can be chosen without any repeats.
+
+ Also known as the binomial coefficient or (n choose k).
+*/
+combinations_without_repetition :: proc(dest: ^Int, n, r: int) -> (error: Error) {
+ tmp_a, tmp_b := &Int{}, &Int{}
+ defer internal_destroy(tmp_a, tmp_b)
+
+ // n!
+ // ------------
+ // r! (n - r)!
+ factorial(dest, n) or_return
+ factorial(tmp_a, r) or_return
+ factorial(tmp_b, n - r) or_return
+ mul(tmp_a, tmp_a, tmp_b) or_return
+ div(dest, dest, tmp_a) or_return
+
+ return
+}
diff --git a/core/math/big/prime.odin b/core/math/big/prime.odin
index 5e7c02f37..7fc78c7e5 100644
--- a/core/math/big/prime.odin
+++ b/core/math/big/prime.odin
@@ -1188,9 +1188,6 @@ internal_random_prime :: proc(a: ^Int, size_in_bits: int, trials: int, flags :=
flags := flags
trials := trials
- t := &Int{}
- defer internal_destroy(t)
-
/*
Sanity check the input.
*/
diff --git a/core/math/big/radix.odin b/core/math/big/radix.odin
index f4eed879f..a5100e478 100644
--- a/core/math/big/radix.odin
+++ b/core/math/big/radix.odin
@@ -315,6 +315,7 @@ int_atoi :: proc(res: ^Int, input: string, radix := i8(10), allocator := context
atoi :: proc { int_atoi, }
+string_to_int :: int_atoi
/*
We size for `string` by default.
diff --git a/core/math/cmplx/cmplx_trig.odin b/core/math/cmplx/cmplx_trig.odin
index 7ca404fab..15e757506 100644
--- a/core/math/cmplx/cmplx_trig.odin
+++ b/core/math/cmplx/cmplx_trig.odin
@@ -350,7 +350,7 @@ _reduce_pi_f64 :: proc "contextless" (x: f64) -> f64 #no_bounds_check {
// that is, 1/PI = SUM bdpi[i]*2^(-64*i).
// 19 64-bit digits give 1216 bits of precision
// to handle the largest possible f64 exponent.
- @static bdpi := [?]u64{
+ @(static, rodata) bdpi := [?]u64{
0x0000000000000000,
0x517cc1b727220a94,
0xfe13abe8fa9a6ee0,
diff --git a/core/math/linalg/general.odin b/core/math/linalg/general.odin
index 51dfd2360..37c0447cb 100644
--- a/core/math/linalg/general.odin
+++ b/core/math/linalg/general.odin
@@ -3,6 +3,7 @@ package linalg
import "core:math"
import "base:builtin"
import "base:intrinsics"
+import "base:runtime"
// Generic
@@ -223,33 +224,27 @@ quaternion_mul_quaternion :: proc "contextless" (q1, q2: $Q) -> Q where IS_QUATE
@(require_results)
quaternion64_mul_vector3 :: proc "contextless" (q: $Q/quaternion64, v: $V/[3]$F/f16) -> V {
- Raw_Quaternion :: struct {xyz: [3]f16, r: f16}
-
- q := transmute(Raw_Quaternion)q
+ q := transmute(runtime.Raw_Quaternion64_Vector_Scalar)q
v := v
- t := cross(2*q.xyz, v)
- return V(v + q.r*t + cross(q.xyz, t))
+ t := cross(2*q.vector, v)
+ return V(v + q.scalar*t + cross(q.vector, t))
}
@(require_results)
quaternion128_mul_vector3 :: proc "contextless" (q: $Q/quaternion128, v: $V/[3]$F/f32) -> V {
- Raw_Quaternion :: struct {xyz: [3]f32, r: f32}
-
- q := transmute(Raw_Quaternion)q
+ q := transmute(runtime.Raw_Quaternion128_Vector_Scalar)q
v := v
- t := cross(2*q.xyz, v)
- return V(v + q.r*t + cross(q.xyz, t))
+ t := cross(2*q.vector, v)
+ return V(v + q.scalar*t + cross(q.vector, t))
}
@(require_results)
quaternion256_mul_vector3 :: proc "contextless" (q: $Q/quaternion256, v: $V/[3]$F/f64) -> V {
- Raw_Quaternion :: struct {xyz: [3]f64, r: f64}
-
- q := transmute(Raw_Quaternion)q
+ q := transmute(runtime.Raw_Quaternion256_Vector_Scalar)q
v := v
- t := cross(2*q.xyz, v)
- return V(v + q.r*t + cross(q.xyz, t))
+ t := cross(2*q.vector, v)
+ return V(v + q.scalar*t + cross(q.vector, t))
}
quaternion_mul_vector3 :: proc{quaternion64_mul_vector3, quaternion128_mul_vector3, quaternion256_mul_vector3}
diff --git a/core/math/linalg/specific.odin b/core/math/linalg/specific.odin
index 41d0e5344..b841f0610 100644
--- a/core/math/linalg/specific.odin
+++ b/core/math/linalg/specific.odin
@@ -527,7 +527,7 @@ angle_from_quaternion :: proc{
@(require_results)
axis_from_quaternion_f16 :: proc "contextless" (q: Quaternionf16) -> Vector3f16 {
t1 := 1 - q.w*q.w
- if t1 < 0 {
+ if t1 <= 0 {
return {0, 0, 1}
}
t2 := 1.0 / math.sqrt(t1)
@@ -536,7 +536,7 @@ axis_from_quaternion_f16 :: proc "contextless" (q: Quaternionf16) -> Vector3f16
@(require_results)
axis_from_quaternion_f32 :: proc "contextless" (q: Quaternionf32) -> Vector3f32 {
t1 := 1 - q.w*q.w
- if t1 < 0 {
+ if t1 <= 0 {
return {0, 0, 1}
}
t2 := 1.0 / math.sqrt(t1)
@@ -545,7 +545,7 @@ axis_from_quaternion_f32 :: proc "contextless" (q: Quaternionf32) -> Vector3f32
@(require_results)
axis_from_quaternion_f64 :: proc "contextless" (q: Quaternionf64) -> Vector3f64 {
t1 := 1 - q.w*q.w
- if t1 < 0 {
+ if t1 <= 0 {
return {0, 0, 1}
}
t2 := 1.0 / math.sqrt(t1)
diff --git a/core/math/linalg/specific_euler_angles_f16.odin b/core/math/linalg/specific_euler_angles_f16.odin
index bacda163e..1e9ded9ab 100644
--- a/core/math/linalg/specific_euler_angles_f16.odin
+++ b/core/math/linalg/specific_euler_angles_f16.odin
@@ -159,7 +159,7 @@ roll_from_quaternion_f16 :: proc "contextless" (q: Quaternionf16) -> f16 {
@(require_results)
pitch_from_quaternion_f16 :: proc "contextless" (q: Quaternionf16) -> f16 {
- y := 2 * (q.y*q.z + q.w*q.w)
+ y := 2 * (q.y*q.z + q.w*q.x)
x := q.w*q.w - q.x*q.x - q.y*q.y + q.z*q.z
if abs(x) <= F16_EPSILON && abs(y) <= F16_EPSILON {
diff --git a/core/math/linalg/specific_euler_angles_f32.odin b/core/math/linalg/specific_euler_angles_f32.odin
index b9957034f..e33b1f095 100644
--- a/core/math/linalg/specific_euler_angles_f32.odin
+++ b/core/math/linalg/specific_euler_angles_f32.odin
@@ -159,7 +159,7 @@ roll_from_quaternion_f32 :: proc "contextless" (q: Quaternionf32) -> f32 {
@(require_results)
pitch_from_quaternion_f32 :: proc "contextless" (q: Quaternionf32) -> f32 {
- y := 2 * (q.y*q.z + q.w*q.w)
+ y := 2 * (q.y*q.z + q.w*q.x)
x := q.w*q.w - q.x*q.x - q.y*q.y + q.z*q.z
if abs(x) <= F32_EPSILON && abs(y) <= F32_EPSILON {
diff --git a/core/math/linalg/specific_euler_angles_f64.odin b/core/math/linalg/specific_euler_angles_f64.odin
index 8001d080a..9b5cf4b56 100644
--- a/core/math/linalg/specific_euler_angles_f64.odin
+++ b/core/math/linalg/specific_euler_angles_f64.odin
@@ -159,7 +159,7 @@ roll_from_quaternion_f64 :: proc "contextless" (q: Quaternionf64) -> f64 {
@(require_results)
pitch_from_quaternion_f64 :: proc "contextless" (q: Quaternionf64) -> f64 {
- y := 2 * (q.y*q.z + q.w*q.w)
+ y := 2 * (q.y*q.z + q.w*q.x)
x := q.w*q.w - q.x*q.x - q.y*q.y + q.z*q.z
if abs(x) <= F64_EPSILON && abs(y) <= F64_EPSILON {
diff --git a/core/math/math.odin b/core/math/math.odin
index 8d85c2381..3d0ab3c4e 100644
--- a/core/math/math.odin
+++ b/core/math/math.odin
@@ -130,10 +130,10 @@ pow10 :: proc{
@(require_results)
pow10_f16 :: proc "contextless" (n: f16) -> f16 {
- @static pow10_pos_tab := [?]f16{
+ @(static, rodata) pow10_pos_tab := [?]f16{
1e00, 1e01, 1e02, 1e03, 1e04,
}
- @static pow10_neg_tab := [?]f16{
+ @(static, rodata) pow10_neg_tab := [?]f16{
1e-00, 1e-01, 1e-02, 1e-03, 1e-04, 1e-05, 1e-06, 1e-07,
}
@@ -151,13 +151,13 @@ pow10_f16 :: proc "contextless" (n: f16) -> f16 {
@(require_results)
pow10_f32 :: proc "contextless" (n: f32) -> f32 {
- @static pow10_pos_tab := [?]f32{
+ @(static, rodata) pow10_pos_tab := [?]f32{
1e00, 1e01, 1e02, 1e03, 1e04, 1e05, 1e06, 1e07, 1e08, 1e09,
1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
1e20, 1e21, 1e22, 1e23, 1e24, 1e25, 1e26, 1e27, 1e28, 1e29,
1e30, 1e31, 1e32, 1e33, 1e34, 1e35, 1e36, 1e37, 1e38,
}
- @static pow10_neg_tab := [?]f32{
+ @(static, rodata) pow10_neg_tab := [?]f32{
1e-00, 1e-01, 1e-02, 1e-03, 1e-04, 1e-05, 1e-06, 1e-07, 1e-08, 1e-09,
1e-10, 1e-11, 1e-12, 1e-13, 1e-14, 1e-15, 1e-16, 1e-17, 1e-18, 1e-19,
1e-20, 1e-21, 1e-22, 1e-23, 1e-24, 1e-25, 1e-26, 1e-27, 1e-28, 1e-29,
@@ -179,16 +179,16 @@ pow10_f32 :: proc "contextless" (n: f32) -> f32 {
@(require_results)
pow10_f64 :: proc "contextless" (n: f64) -> f64 {
- @static pow10_tab := [?]f64{
+ @(static, rodata) pow10_tab := [?]f64{
1e00, 1e01, 1e02, 1e03, 1e04, 1e05, 1e06, 1e07, 1e08, 1e09,
1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
1e20, 1e21, 1e22, 1e23, 1e24, 1e25, 1e26, 1e27, 1e28, 1e29,
1e30, 1e31,
}
- @static pow10_pos_tab32 := [?]f64{
+ @(static, rodata) pow10_pos_tab32 := [?]f64{
1e00, 1e32, 1e64, 1e96, 1e128, 1e160, 1e192, 1e224, 1e256, 1e288,
}
- @static pow10_neg_tab32 := [?]f64{
+ @(static, rodata) pow10_neg_tab32 := [?]f64{
1e-00, 1e-32, 1e-64, 1e-96, 1e-128, 1e-160, 1e-192, 1e-224, 1e-256, 1e-288, 1e-320,
}
@@ -1274,7 +1274,7 @@ binomial :: proc "contextless" (n, k: int) -> int {
@(require_results)
factorial :: proc "contextless" (n: int) -> int {
when size_of(int) == size_of(i64) {
- @static table := [21]int{
+ @(static, rodata) table := [21]int{
1,
1,
2,
@@ -1298,7 +1298,7 @@ factorial :: proc "contextless" (n: int) -> int {
2_432_902_008_176_640_000,
}
} else {
- @static table := [13]int{
+ @(static, rodata) table := [13]int{
1,
1,
2,
diff --git a/core/math/math_gamma.odin b/core/math/math_gamma.odin
index 00d4b7316..9f5a364d3 100644
--- a/core/math/math_gamma.odin
+++ b/core/math/math_gamma.odin
@@ -67,7 +67,7 @@ package math
// masks any imprecision in the polynomial.
@(private="file", require_results)
stirling :: proc "contextless" (x: f64) -> (f64, f64) {
- @(static) gamS := [?]f64{
+ @(static, rodata) gamS := [?]f64{
+7.87311395793093628397e-04,
-2.29549961613378126380e-04,
-2.68132617805781232825e-03,
@@ -103,7 +103,7 @@ gamma_f64 :: proc "contextless" (x: f64) -> f64 {
return false
}
- @(static) gamP := [?]f64{
+ @(static, rodata) gamP := [?]f64{
1.60119522476751861407e-04,
1.19135147006586384913e-03,
1.04213797561761569935e-02,
@@ -112,7 +112,7 @@ gamma_f64 :: proc "contextless" (x: f64) -> f64 {
4.94214826801497100753e-01,
9.99999999999999996796e-01,
}
- @(static) gamQ := [?]f64{
+ @(static, rodata) gamQ := [?]f64{
-2.31581873324120129819e-05,
+5.39605580493303397842e-04,
-4.45641913851797240494e-03,
diff --git a/core/math/math_lgamma.odin b/core/math/math_lgamma.odin
index 0705d8564..828f17178 100644
--- a/core/math/math_lgamma.odin
+++ b/core/math/math_lgamma.odin
@@ -123,7 +123,7 @@ lgamma_f64 :: proc "contextless" (x: f64) -> (lgamma: f64, sign: int) {
return -x
}
- @static lgamA := [?]f64{
+ @(static, rodata) lgamA := [?]f64{
0h3FB3C467E37DB0C8,
0h3FD4A34CC4A60FAD,
0h3FB13E001A5562A7,
@@ -137,7 +137,7 @@ lgamma_f64 :: proc "contextless" (x: f64) -> (lgamma: f64, sign: int) {
0h3EFA7074428CFA52,
0h3F07858E90A45837,
}
- @static lgamR := [?]f64{
+ @(static, rodata) lgamR := [?]f64{
1.0,
0h3FF645A762C4AB74,
0h3FE71A1893D3DCDC,
@@ -146,7 +146,7 @@ lgamma_f64 :: proc "contextless" (x: f64) -> (lgamma: f64, sign: int) {
0h3F497DDACA41A95B,
0h3EDEBAF7A5B38140,
}
- @static lgamS := [?]f64{
+ @(static, rodata) lgamS := [?]f64{
0hBFB3C467E37DB0C8,
0h3FCB848B36E20878,
0h3FD4D98F4F139F59,
@@ -155,7 +155,7 @@ lgamma_f64 :: proc "contextless" (x: f64) -> (lgamma: f64, sign: int) {
0h3F5E26B67368F239,
0h3F00BFECDD17E945,
}
- @static lgamT := [?]f64{
+ @(static, rodata) lgamT := [?]f64{
0h3FDEF72BC8EE38A2,
0hBFC2E4278DC6C509,
0h3FB08B4294D5419B,
@@ -172,7 +172,7 @@ lgamma_f64 :: proc "contextless" (x: f64) -> (lgamma: f64, sign: int) {
0hBF347F24ECC38C38,
0h3F35FD3EE8C2D3F4,
}
- @static lgamU := [?]f64{
+ @(static, rodata) lgamU := [?]f64{
0hBFB3C467E37DB0C8,
0h3FE4401E8B005DFF,
0h3FF7475CD119BD6F,
@@ -180,7 +180,7 @@ lgamma_f64 :: proc "contextless" (x: f64) -> (lgamma: f64, sign: int) {
0h3FCD4EAEF6010924,
0h3F8B678BBF2BAB09,
}
- @static lgamV := [?]f64{
+ @(static, rodata) lgamV := [?]f64{
1.0,
0h4003A5D7C2BD619C,
0h40010725A42B18F5,
@@ -188,7 +188,7 @@ lgamma_f64 :: proc "contextless" (x: f64) -> (lgamma: f64, sign: int) {
0h3FBAAE55D6537C88,
0h3F6A5ABB57D0CF61,
}
- @static lgamW := [?]f64{
+ @(static, rodata) lgamW := [?]f64{
0h3FDACFE390C97D69,
0h3FB555555555553B,
0hBF66C16C16B02E5C,
diff --git a/core/math/math_sincos.odin b/core/math/math_sincos.odin
index 578876ac5..b616f410d 100644
--- a/core/math/math_sincos.odin
+++ b/core/math/math_sincos.odin
@@ -234,7 +234,7 @@ _trig_reduce_f64 :: proc "contextless" (x: f64) -> (j: u64, z: f64) #no_bounds_c
// that is, 4/pi = Sum bd_pi4[i]*2^(-64*i)
// 19 64-bit digits and the leading one bit give 1217 bits
// of precision to handle the largest possible f64 exponent.
- @static bd_pi4 := [?]u64{
+ @(static, rodata) bd_pi4 := [?]u64{
0x0000000000000001,
0x45f306dc9c882a53,
0xf84eafa3ea69bb81,
diff --git a/core/math/rand/exp.odin b/core/math/rand/exp.odin
index 719debe75..ebc849b2f 100644
--- a/core/math/rand/exp.odin
+++ b/core/math/rand/exp.odin
@@ -19,7 +19,7 @@ import "core:math"
exp_float64 :: proc(r: ^Rand = nil) -> f64 {
re :: 7.69711747013104972
- @(static)
+ @(static, rodata)
ke := [256]u32{
0xe290a139, 0x0, 0x9beadebc, 0xc377ac71, 0xd4ddb990,
0xde893fb8, 0xe4a8e87c, 0xe8dff16a, 0xebf2deab, 0xee49a6e8,
@@ -74,7 +74,7 @@ exp_float64 :: proc(r: ^Rand = nil) -> f64 {
0xf7b577d2, 0xf69c650c, 0xf51530f0, 0xf2cb0e3c, 0xeeefb15d,
0xe6da6ecf,
}
- @(static)
+ @(static, rodata)
we := [256]f32{
2.0249555e-09, 1.486674e-11, 2.4409617e-11, 3.1968806e-11,
3.844677e-11, 4.4228204e-11, 4.9516443e-11, 5.443359e-11,
@@ -141,7 +141,7 @@ exp_float64 :: proc(r: ^Rand = nil) -> f64 {
1.2393786e-09, 1.276585e-09, 1.3193139e-09, 1.3695435e-09,
1.4305498e-09, 1.508365e-09, 1.6160854e-09, 1.7921248e-09,
}
- @(static)
+ @(static, rodata)
fe := [256]f32{
1, 0.9381437, 0.90046996, 0.87170434, 0.8477855, 0.8269933,
0.8084217, 0.7915276, 0.77595687, 0.7614634, 0.7478686,
diff --git a/core/math/rand/normal.odin b/core/math/rand/normal.odin
index f96163fe9..c8681db80 100644
--- a/core/math/rand/normal.odin
+++ b/core/math/rand/normal.odin
@@ -21,7 +21,7 @@ import "core:math"
norm_float64 :: proc(r: ^Rand = nil) -> f64 {
rn :: 3.442619855899
- @(static)
+ @(static, rodata)
kn := [128]u32{
0x76ad2212, 0x00000000, 0x600f1b53, 0x6ce447a6, 0x725b46a2,
0x7560051d, 0x774921eb, 0x789a25bd, 0x799045c3, 0x7a4bce5d,
@@ -50,7 +50,7 @@ norm_float64 :: proc(r: ^Rand = nil) -> f64 {
0x7da61a1e, 0x7d72a0fb, 0x7d30e097, 0x7cd9b4ab, 0x7c600f1a,
0x7ba90bdc, 0x7a722176, 0x77d664e5,
}
- @(static)
+ @(static, rodata)
wn := [128]f32{
1.7290405e-09, 1.2680929e-10, 1.6897518e-10, 1.9862688e-10,
2.2232431e-10, 2.4244937e-10, 2.601613e-10, 2.7611988e-10,
@@ -85,7 +85,7 @@ norm_float64 :: proc(r: ^Rand = nil) -> f64 {
1.2601323e-09, 1.2857697e-09, 1.3146202e-09, 1.347784e-09,
1.3870636e-09, 1.4357403e-09, 1.5008659e-09, 1.6030948e-09,
}
- @(static)
+ @(static, rodata)
fn := [128]f32{
1.00000000, 0.9635997, 0.9362827, 0.9130436, 0.89228165,
0.87324303, 0.8555006, 0.8387836, 0.8229072, 0.8077383,
diff --git a/core/mem/raw.odin b/core/mem/raw.odin
index 56790e959..f56206957 100644
--- a/core/mem/raw.odin
+++ b/core/mem/raw.odin
@@ -11,12 +11,15 @@ Raw_Dynamic_Array :: runtime.Raw_Dynamic_Array
Raw_Map :: runtime.Raw_Map
Raw_Soa_Pointer :: runtime.Raw_Soa_Pointer
-Raw_Complex64 :: struct {real, imag: f32}
-Raw_Complex128 :: struct {real, imag: f64}
-Raw_Quaternion128 :: struct {imag, jmag, kmag: f32, real: f32}
-Raw_Quaternion256 :: struct {imag, jmag, kmag: f64, real: f64}
-Raw_Quaternion128_Vector_Scalar :: struct {vector: [3]f32, scalar: f32}
-Raw_Quaternion256_Vector_Scalar :: struct {vector: [3]f64, scalar: f64}
+Raw_Complex32 :: runtime.Raw_Complex32
+Raw_Complex64 :: runtime.Raw_Complex64
+Raw_Complex128 :: runtime.Raw_Complex128
+Raw_Quaternion64 :: runtime.Raw_Quaternion64
+Raw_Quaternion128 :: runtime.Raw_Quaternion128
+Raw_Quaternion256 :: runtime.Raw_Quaternion256
+Raw_Quaternion64_Vector_Scalar :: runtime.Raw_Quaternion64_Vector_Scalar
+Raw_Quaternion128_Vector_Scalar :: runtime.Raw_Quaternion128_Vector_Scalar
+Raw_Quaternion256_Vector_Scalar :: runtime.Raw_Quaternion256_Vector_Scalar
make_any :: proc "contextless" (data: rawptr, id: typeid) -> any {
return transmute(any)Raw_Any{data, id}
diff --git a/core/mem/rollback_stack_allocator.odin b/core/mem/rollback_stack_allocator.odin
new file mode 100644
index 000000000..f5e428d87
--- /dev/null
+++ b/core/mem/rollback_stack_allocator.odin
@@ -0,0 +1,341 @@
+package mem
+
+// The Rollback Stack Allocator was designed for the test runner to be fast,
+// able to grow, and respect the Tracking Allocator's requirement for
+// individual frees. It is not overly concerned with fragmentation, however.
+//
+// It has support for expansion when configured with a block allocator and
+// limited support for out-of-order frees.
+//
+// Allocation has constant-time best and usual case performance.
+// At worst, it is linear according to the number of memory blocks.
+//
+// Allocation follows a first-fit strategy when there are multiple memory
+// blocks.
+//
+// Freeing has constant-time best and usual case performance.
+// At worst, it is linear according to the number of memory blocks and number
+// of freed items preceding the last item in a block.
+//
+// Resizing has constant-time performance, if it's the last item in a block, or
+// the new size is smaller. Naturally, this becomes linear-time if there are
+// multiple blocks to search for the pointer's owning block. Otherwise, the
+// allocator defaults to a combined alloc & free operation internally.
+//
+// Out-of-order freeing is accomplished by collapsing a run of freed items
+// from the last allocation backwards.
+//
+// Each allocation has an overhead of 8 bytes and any extra bytes to satisfy
+// the requested alignment.
+
+import "base:runtime"
+
+ROLLBACK_STACK_DEFAULT_BLOCK_SIZE :: 4 * Megabyte
+
+// This limitation is due to the size of `prev_ptr`, but it is only for the
+// head block; any allocation in excess of the allocator's `block_size` is
+// valid, so long as the block allocator can handle it.
+//
+// This is because allocations over the block size are not split up if the item
+// within is freed; they are immediately returned to the block allocator.
+ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE :: 2 * Gigabyte
+
+
+Rollback_Stack_Header :: bit_field u64 {
+ prev_offset: uintptr | 32,
+ is_free: bool | 1,
+ prev_ptr: uintptr | 31,
+}
+
+Rollback_Stack_Block :: struct {
+ next_block: ^Rollback_Stack_Block,
+ last_alloc: rawptr,
+ offset: uintptr,
+ buffer: []byte,
+}
+
+Rollback_Stack :: struct {
+ head: ^Rollback_Stack_Block,
+ block_size: int,
+ block_allocator: Allocator,
+}
+
+
+@(private="file", require_results)
+rb_ptr_in_bounds :: proc(block: ^Rollback_Stack_Block, ptr: rawptr) -> bool {
+ start := raw_data(block.buffer)
+ end := start[block.offset:]
+ return start < ptr && ptr <= end
+}
+
+@(private="file", require_results)
+rb_find_ptr :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> (
+ parent: ^Rollback_Stack_Block,
+ block: ^Rollback_Stack_Block,
+ header: ^Rollback_Stack_Header,
+ err: Allocator_Error,
+) {
+ for block = stack.head; block != nil; block = block.next_block {
+ if rb_ptr_in_bounds(block, ptr) {
+ header = cast(^Rollback_Stack_Header)(cast(uintptr)ptr - size_of(Rollback_Stack_Header))
+ return
+ }
+ parent = block
+ }
+ return nil, nil, nil, .Invalid_Pointer
+}
+
+@(private="file", require_results)
+rb_find_last_alloc :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> (
+ block: ^Rollback_Stack_Block,
+ header: ^Rollback_Stack_Header,
+ ok: bool,
+) {
+ for block = stack.head; block != nil; block = block.next_block {
+ if block.last_alloc == ptr {
+ header = cast(^Rollback_Stack_Header)(cast(uintptr)ptr - size_of(Rollback_Stack_Header))
+ return block, header, true
+ }
+ }
+ return nil, nil, false
+}
+
+@(private="file")
+rb_rollback_block :: proc(block: ^Rollback_Stack_Block, header: ^Rollback_Stack_Header) {
+ header := header
+ for block.offset > 0 && header.is_free {
+ block.offset = header.prev_offset
+ block.last_alloc = raw_data(block.buffer)[header.prev_ptr:]
+ header = cast(^Rollback_Stack_Header)(raw_data(block.buffer)[header.prev_ptr - size_of(Rollback_Stack_Header):])
+ }
+}
+
+@(private="file", require_results)
+rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error {
+ parent, block, header := rb_find_ptr(stack, ptr) or_return
+ if header.is_free {
+ return .Invalid_Pointer
+ }
+ header.is_free = true
+ if block.last_alloc == ptr {
+ block.offset = header.prev_offset
+ rb_rollback_block(block, header)
+ }
+ if parent != nil && block.offset == 0 {
+ parent.next_block = block.next_block
+ runtime.mem_free_with_size(block, size_of(Rollback_Stack_Block) + len(block.buffer), stack.block_allocator)
+ }
+ return nil
+}
+
+@(private="file")
+rb_free_all :: proc(stack: ^Rollback_Stack) {
+ for block := stack.head.next_block; block != nil; /**/ {
+ next_block := block.next_block
+ runtime.mem_free_with_size(block, size_of(Rollback_Stack_Block) + len(block.buffer), stack.block_allocator)
+ block = next_block
+ }
+
+ stack.head.next_block = nil
+ stack.head.last_alloc = nil
+ stack.head.offset = 0
+}
+
+@(private="file", require_results)
+rb_resize :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment: int) -> (result: []byte, err: Allocator_Error) {
+ if ptr != nil {
+ if block, _, ok := rb_find_last_alloc(stack, ptr); ok {
+ // `block.offset` should never underflow because it is contingent
+ // on `old_size` in the first place, assuming sane arguments.
+ assert(block.offset >= cast(uintptr)old_size, "Rollback Stack Allocator received invalid `old_size`.")
+
+ if block.offset + cast(uintptr)size - cast(uintptr)old_size < cast(uintptr)len(block.buffer) {
+ // Prevent singleton allocations from fragmenting by forbidding
+ // them to shrink, removing the possibility of overflow bugs.
+ if len(block.buffer) <= stack.block_size {
+ block.offset += cast(uintptr)size - cast(uintptr)old_size
+ }
+ #no_bounds_check return (cast([^]byte)ptr)[:size], nil
+ }
+ }
+ }
+
+ result = rb_alloc(stack, size, alignment) or_return
+ runtime.mem_copy_non_overlapping(raw_data(result), ptr, old_size)
+ err = rb_free(stack, ptr)
+
+ return
+}
+
+@(private="file", require_results)
+rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byte, err: Allocator_Error) {
+ parent: ^Rollback_Stack_Block
+ for block := stack.head; /**/; block = block.next_block {
+ when !ODIN_DISABLE_ASSERT {
+ allocated_new_block: bool
+ }
+
+ if block == nil {
+ if stack.block_allocator.procedure == nil {
+ return nil, .Out_Of_Memory
+ }
+
+ minimum_size_required := size_of(Rollback_Stack_Header) + size + alignment - 1
+ new_block_size := max(minimum_size_required, stack.block_size)
+ block = rb_make_block(new_block_size, stack.block_allocator) or_return
+ parent.next_block = block
+ when !ODIN_DISABLE_ASSERT {
+ allocated_new_block = true
+ }
+ }
+
+ start := raw_data(block.buffer)[block.offset:]
+ padding := cast(uintptr)calc_padding_with_header(cast(uintptr)start, cast(uintptr)alignment, size_of(Rollback_Stack_Header))
+
+ if block.offset + padding + cast(uintptr)size > cast(uintptr)len(block.buffer) {
+ when !ODIN_DISABLE_ASSERT {
+ if allocated_new_block {
+ panic("Rollback Stack Allocator allocated a new block but did not use it.")
+ }
+ }
+ parent = block
+ continue
+ }
+
+ header := cast(^Rollback_Stack_Header)(start[padding - size_of(Rollback_Stack_Header):])
+ ptr := start[padding:]
+
+ header^ = {
+ prev_offset = block.offset,
+ prev_ptr = uintptr(0) if block.last_alloc == nil else cast(uintptr)block.last_alloc - cast(uintptr)raw_data(block.buffer),
+ is_free = false,
+ }
+
+ block.last_alloc = ptr
+ block.offset += padding + cast(uintptr)size
+
+ if len(block.buffer) > stack.block_size {
+ // This block exceeds the allocator's standard block size and is considered a singleton.
+ // Prevent any further allocations on it.
+ block.offset = cast(uintptr)len(block.buffer)
+ }
+
+ #no_bounds_check return ptr[:size], nil
+ }
+
+ return nil, .Out_Of_Memory
+}
+
+@(private="file", require_results)
+rb_make_block :: proc(size: int, allocator: Allocator) -> (block: ^Rollback_Stack_Block, err: Allocator_Error) {
+ buffer := runtime.mem_alloc(size_of(Rollback_Stack_Block) + size, align_of(Rollback_Stack_Block), allocator) or_return
+
+ block = cast(^Rollback_Stack_Block)raw_data(buffer)
+ #no_bounds_check block.buffer = buffer[size_of(Rollback_Stack_Block):]
+ return
+}
+
+
+rollback_stack_init_buffered :: proc(stack: ^Rollback_Stack, buffer: []byte, location := #caller_location) {
+ MIN_SIZE :: size_of(Rollback_Stack_Block) + size_of(Rollback_Stack_Header) + size_of(rawptr)
+ assert(len(buffer) >= MIN_SIZE, "User-provided buffer to Rollback Stack Allocator is too small.", location)
+
+ block := cast(^Rollback_Stack_Block)raw_data(buffer)
+ block^ = {}
+ #no_bounds_check block.buffer = buffer[size_of(Rollback_Stack_Block):]
+
+ stack^ = {}
+ stack.head = block
+ stack.block_size = len(block.buffer)
+}
+
+rollback_stack_init_dynamic :: proc(
+ stack: ^Rollback_Stack,
+ block_size : int = ROLLBACK_STACK_DEFAULT_BLOCK_SIZE,
+ block_allocator := context.allocator,
+ location := #caller_location,
+) -> Allocator_Error {
+ assert(block_size >= size_of(Rollback_Stack_Header) + size_of(rawptr), "Rollback Stack Allocator block size is too small.", location)
+ when size_of(int) > 4 {
+ // It's impossible to specify an argument in excess when your integer
+ // size is insufficient; check only on platforms with big enough ints.
+ assert(block_size <= ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE, "Rollback Stack Allocators cannot support head blocks larger than 2 gigabytes.", location)
+ }
+
+ block := rb_make_block(block_size, block_allocator) or_return
+
+ stack^ = {}
+ stack.head = block
+ stack.block_size = block_size
+ stack.block_allocator = block_allocator
+
+ return nil
+}
+
+rollback_stack_init :: proc {
+ rollback_stack_init_buffered,
+ rollback_stack_init_dynamic,
+}
+
+rollback_stack_destroy :: proc(stack: ^Rollback_Stack) {
+ if stack.block_allocator.procedure != nil {
+ rb_free_all(stack)
+ free(stack.head, stack.block_allocator)
+ }
+ stack^ = {}
+}
+
+@(require_results)
+rollback_stack_allocator :: proc(stack: ^Rollback_Stack) -> Allocator {
+ return Allocator {
+ data = stack,
+ procedure = rollback_stack_allocator_proc,
+ }
+}
+
+@(require_results)
+rollback_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
+ size, alignment: int,
+ old_memory: rawptr, old_size: int, location := #caller_location,
+) -> (result: []byte, err: Allocator_Error) {
+ stack := cast(^Rollback_Stack)allocator_data
+
+ switch mode {
+ case .Alloc, .Alloc_Non_Zeroed:
+ assert(size >= 0, "Size must be positive or zero.", location)
+ assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", location)
+ result = rb_alloc(stack, size, alignment) or_return
+
+ if mode == .Alloc {
+ zero_slice(result)
+ }
+
+ case .Free:
+ err = rb_free(stack, old_memory)
+
+ case .Free_All:
+ rb_free_all(stack)
+
+ case .Resize, .Resize_Non_Zeroed:
+ assert(size >= 0, "Size must be positive or zero.", location)
+ assert(old_size >= 0, "Old size must be positive or zero.", location)
+ assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", location)
+ result = rb_resize(stack, old_memory, old_size, size, alignment) or_return
+
+ #no_bounds_check if mode == .Resize && size > old_size {
+ zero_slice(result[old_size:])
+ }
+
+ case .Query_Features:
+ set := (^Allocator_Mode_Set)(old_memory)
+ if set != nil {
+ set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed}
+ }
+ return nil, nil
+
+ case .Query_Info:
+ return nil, .Mode_Not_Implemented
+ }
+
+ return
+}
diff --git a/core/mem/tlsf/LICENSE b/core/mem/tlsf/LICENSE
new file mode 100644
index 000000000..9d668ce02
--- /dev/null
+++ b/core/mem/tlsf/LICENSE
@@ -0,0 +1,36 @@
+Original BSD-3 license:
+
+Two Level Segregated Fit memory allocator, version 3.1.
+Written by Matthew Conte
+ http://tlsf.baisoku.org
+
+Based on the original documentation by Miguel Masmano:
+ http://www.gii.upv.es/tlsf/main/docs
+
+This implementation was written to the specification
+of the document, therefore no GPL restrictions apply.
+
+Copyright (c) 2006-2016, Matthew Conte
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the copyright holder nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL MATTHEW CONTE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file
diff --git a/core/mem/tlsf/tlsf.odin b/core/mem/tlsf/tlsf.odin
new file mode 100644
index 000000000..76ecbb4b1
--- /dev/null
+++ b/core/mem/tlsf/tlsf.odin
@@ -0,0 +1,156 @@
+/*
+ Copyright 2024 Jeroen van Rijn <nom@duclavier.com>.
+ Made available under Odin's BSD-3 license.
+
+ List of contributors:
+ Matt Conte: Original C implementation, see LICENSE file in this package
+ Jeroen van Rijn: Source port
+*/
+
+// package mem_tlsf implements a Two Level Segregated Fit memory allocator.
+package mem_tlsf
+
+import "base:runtime"
+
+Error :: enum byte {
+ None = 0,
+ Invalid_Backing_Allocator = 1,
+ Invalid_Alignment = 2,
+ Backing_Buffer_Too_Small = 3,
+ Backing_Buffer_Too_Large = 4,
+ Backing_Allocator_Error = 5,
+}
+
+
+Allocator :: struct {
+ // Empty lists point at this block to indicate they are free.
+ block_null: Block_Header,
+
+ // Bitmaps for free lists.
+ fl_bitmap: u32 `fmt:"-"`,
+ sl_bitmap: [FL_INDEX_COUNT]u32 `fmt:"-"`,
+
+ // Head of free lists.
+ blocks: [FL_INDEX_COUNT][SL_INDEX_COUNT]^Block_Header `fmt:"-"`,
+
+ // Keep track of pools so we can deallocate them.
+ // If `pool.allocator` is blank, we don't do anything.
+ // We also use this linked list of pools to report
+ // statistics like how much memory is still available,
+ // fragmentation, etc.
+ pool: Pool,
+}
+#assert(size_of(Allocator) % ALIGN_SIZE == 0)
+
+
+
+
+@(require_results)
+allocator :: proc(t: ^Allocator) -> runtime.Allocator {
+ return runtime.Allocator{
+ procedure = allocator_proc,
+ data = t,
+ }
+}
+
+@(require_results)
+init_from_buffer :: proc(control: ^Allocator, buf: []byte) -> Error {
+ assert(control != nil)
+ if uintptr(raw_data(buf)) % ALIGN_SIZE != 0 {
+ return .Invalid_Alignment
+ }
+
+ pool_bytes := align_down(len(buf) - POOL_OVERHEAD, ALIGN_SIZE)
+ if pool_bytes < BLOCK_SIZE_MIN {
+ return .Backing_Buffer_Too_Small
+ } else if pool_bytes > BLOCK_SIZE_MAX {
+ return .Backing_Buffer_Too_Large
+ }
+
+ clear(control)
+ return pool_add(control, buf[:])
+}
+
+@(require_results)
+init_from_allocator :: proc(control: ^Allocator, backing: runtime.Allocator, initial_pool_size: int, new_pool_size := 0) -> Error {
+ assert(control != nil)
+ pool_bytes := align_up(uint(initial_pool_size) + POOL_OVERHEAD, ALIGN_SIZE)
+ if pool_bytes < BLOCK_SIZE_MIN {
+ return .Backing_Buffer_Too_Small
+ } else if pool_bytes > BLOCK_SIZE_MAX {
+ return .Backing_Buffer_Too_Large
+ }
+
+ buf, backing_err := runtime.make_aligned([]byte, pool_bytes, ALIGN_SIZE, backing)
+ if backing_err != nil {
+ return .Backing_Allocator_Error
+ }
+ err := init_from_buffer(control, buf)
+ control.pool = Pool{
+ data = buf,
+ allocator = backing,
+ }
+ return err
+}
+init :: proc{init_from_buffer, init_from_allocator}
+
+destroy :: proc(control: ^Allocator) {
+ if control == nil { return }
+
+ // No need to call `pool_remove` or anything, as they're they're embedded in the backing memory.
+ // We do however need to free the `Pool` tracking entities and the backing memory itself.
+ // As `Allocator` is embedded in the first backing slice, the `control` pointer will be
+ // invalid after this call.
+ for p := control.pool.next; p != nil; {
+ next := p.next
+
+ // Free the allocation on the backing allocator
+ runtime.delete(p.data, p.allocator)
+ free(p, p.allocator)
+
+ p = next
+ }
+}
+
+allocator_proc :: proc(allocator_data: rawptr, mode: runtime.Allocator_Mode,
+ size, alignment: int,
+ old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, runtime.Allocator_Error) {
+
+ control := (^Allocator)(allocator_data)
+ if control == nil {
+ return nil, .Invalid_Argument
+ }
+
+ switch mode {
+ case .Alloc:
+ return alloc_bytes(control, uint(size), uint(alignment))
+ case .Alloc_Non_Zeroed:
+ return alloc_bytes_non_zeroed(control, uint(size), uint(alignment))
+
+ case .Free:
+ free_with_size(control, old_memory, uint(old_size))
+ return nil, nil
+
+ case .Free_All:
+ clear(control)
+ return nil, nil
+
+ case .Resize:
+ return resize(control, old_memory, uint(old_size), uint(size), uint(alignment))
+
+ case .Resize_Non_Zeroed:
+ return resize_non_zeroed(control, old_memory, uint(old_size), uint(size), uint(alignment))
+
+ case .Query_Features:
+ set := (^runtime.Allocator_Mode_Set)(old_memory)
+ if set != nil {
+ set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
+ }
+ return nil, nil
+
+ case .Query_Info:
+ return nil, .Mode_Not_Implemented
+ }
+
+ return nil, nil
+} \ No newline at end of file
diff --git a/core/mem/tlsf/tlsf_internal.odin b/core/mem/tlsf/tlsf_internal.odin
new file mode 100644
index 000000000..6f33e516c
--- /dev/null
+++ b/core/mem/tlsf/tlsf_internal.odin
@@ -0,0 +1,738 @@
+/*
+ Copyright 2024 Jeroen van Rijn <nom@duclavier.com>.
+ Made available under Odin's BSD-3 license.
+
+ List of contributors:
+ Matt Conte: Original C implementation, see LICENSE file in this package
+ Jeroen van Rijn: Source port
+*/
+
+
+package mem_tlsf
+
+import "base:intrinsics"
+import "base:runtime"
+// import "core:fmt"
+
+// log2 of number of linear subdivisions of block sizes.
+// Larger values require more memory in the control structure.
+// Values of 4 or 5 are typical.
+TLSF_SL_INDEX_COUNT_LOG2 :: #config(TLSF_SL_INDEX_COUNT_LOG2, 5)
+
+// All allocation sizes and addresses are aligned to 4/8 bytes
+ALIGN_SIZE_LOG2 :: 3 when size_of(uintptr) == 8 else 2
+
+// We can increase this to support larger allocation sizes,
+// at the expense of more overhead in the TLSF structure
+FL_INDEX_MAX :: 32 when size_of(uintptr) == 8 else 30
+#assert(FL_INDEX_MAX < 36)
+
+ALIGN_SIZE :: 1 << ALIGN_SIZE_LOG2
+SL_INDEX_COUNT :: 1 << TLSF_SL_INDEX_COUNT_LOG2
+FL_INDEX_SHIFT :: TLSF_SL_INDEX_COUNT_LOG2 + ALIGN_SIZE_LOG2
+FL_INDEX_COUNT :: FL_INDEX_MAX - FL_INDEX_SHIFT + 1
+SMALL_BLOCK_SIZE :: 1 << FL_INDEX_SHIFT
+
+/*
+We support allocations of sizes up to (1 << `FL_INDEX_MAX`) bits.
+However, because we linearly subdivide the second-level lists, and
+our minimum size granularity is 4 bytes, it doesn't make sense to
+create first-level lists for sizes smaller than `SL_INDEX_COUNT` * 4,
+or (1 << (`TLSF_SL_INDEX_COUNT_LOG2` + 2)) bytes, as there we will be
+trying to split size ranges into more slots than we have available.
+Instead, we calculate the minimum threshold size, and place all
+blocks below that size into the 0th first-level list.
+*/
+
+// SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage tree
+#assert(size_of(uint) * 8 >= SL_INDEX_COUNT)
+
+// Ensure we've properly tuned our sizes.
+#assert(ALIGN_SIZE == SMALL_BLOCK_SIZE / SL_INDEX_COUNT)
+
+#assert(size_of(Allocator) % ALIGN_SIZE == 0)
+
+Pool :: struct {
+ data: []u8 `fmt:"-"`,
+ allocator: runtime.Allocator,
+ next: ^Pool,
+}
+
+
+/*
+Block header structure.
+
+There are several implementation subtleties involved:
+- The `prev_phys_block` field is only valid if the previous block is free.
+- The `prev_phys_block` field is actually stored at the end of the
+ previous block. It appears at the beginning of this structure only to
+ simplify the implementation.
+- The `next_free` / `prev_free` fields are only valid if the block is free.
+*/
+Block_Header :: struct {
+ prev_phys_block: ^Block_Header,
+ size: uint, // The size of this block, excluding the block header
+
+ // Next and previous free blocks.
+ next_free: ^Block_Header,
+ prev_free: ^Block_Header,
+}
+#assert(offset_of(Block_Header, prev_phys_block) == 0)
+
+/*
+Since block sizes are always at least a multiple of 4, the two least
+significant bits of the size field are used to store the block status:
+- bit 0: whether block is busy or free
+- bit 1: whether previous block is busy or free
+*/
+BLOCK_HEADER_FREE :: uint(1 << 0)
+BLOCK_HEADER_PREV_FREE :: uint(1 << 1)
+
+/*
+The size of the block header exposed to used blocks is the `size` field.
+The `prev_phys_block` field is stored *inside* the previous free block.
+*/
+BLOCK_HEADER_OVERHEAD :: uint(size_of(uint))
+
+POOL_OVERHEAD :: 2 * BLOCK_HEADER_OVERHEAD
+
+// User data starts directly after the size field in a used block.
+BLOCK_START_OFFSET :: offset_of(Block_Header, size) + size_of(Block_Header{}.size)
+
+/*
+A free block must be large enough to store its header minus the size of
+the `prev_phys_block` field, and no larger than the number of addressable
+bits for `FL_INDEX`.
+*/
+BLOCK_SIZE_MIN :: uint(size_of(Block_Header) - size_of(^Block_Header))
+BLOCK_SIZE_MAX :: uint(1) << FL_INDEX_MAX
+
+/*
+ TLSF achieves O(1) cost for `alloc` and `free` operations by limiting
+ the search for a free block to a free list of guaranteed size
+ adequate to fulfill the request, combined with efficient free list
+ queries using bitmasks and architecture-specific bit-manipulation
+ routines.
+
+ NOTE: TLSF spec relies on ffs/fls returning value 0..31.
+*/
+
+@(require_results)
+ffs :: proc "contextless" (word: u32) -> (bit: i32) {
+ return -1 if word == 0 else i32(intrinsics.count_trailing_zeros(word))
+}
+
+@(require_results)
+fls :: proc "contextless" (word: u32) -> (bit: i32) {
+ N :: (size_of(u32) * 8) - 1
+ return i32(N - intrinsics.count_leading_zeros(word))
+}
+
+@(require_results)
+fls_uint :: proc "contextless" (size: uint) -> (bit: i32) {
+ N :: (size_of(uint) * 8) - 1
+ return i32(N - intrinsics.count_leading_zeros(size))
+}
+
+@(require_results)
+block_size :: proc "contextless" (block: ^Block_Header) -> (size: uint) {
+ return block.size &~ (BLOCK_HEADER_FREE | BLOCK_HEADER_PREV_FREE)
+}
+
+block_set_size :: proc "contextless" (block: ^Block_Header, size: uint) {
+ old_size := block.size
+ block.size = size | (old_size & (BLOCK_HEADER_FREE | BLOCK_HEADER_PREV_FREE))
+}
+
+@(require_results)
+block_is_last :: proc "contextless" (block: ^Block_Header) -> (is_last: bool) {
+ return block_size(block) == 0
+}
+
+@(require_results)
+block_is_free :: proc "contextless" (block: ^Block_Header) -> (is_free: bool) {
+ return (block.size & BLOCK_HEADER_FREE) == BLOCK_HEADER_FREE
+}
+
+block_set_free :: proc "contextless" (block: ^Block_Header) {
+ block.size |= BLOCK_HEADER_FREE
+}
+
+block_set_used :: proc "contextless" (block: ^Block_Header) {
+ block.size &~= BLOCK_HEADER_FREE
+}
+
+@(require_results)
+block_is_prev_free :: proc "contextless" (block: ^Block_Header) -> (is_prev_free: bool) {
+ return (block.size & BLOCK_HEADER_PREV_FREE) == BLOCK_HEADER_PREV_FREE
+}
+
+block_set_prev_free :: proc "contextless" (block: ^Block_Header) {
+ block.size |= BLOCK_HEADER_PREV_FREE
+}
+
+block_set_prev_used :: proc "contextless" (block: ^Block_Header) {
+ block.size &~= BLOCK_HEADER_PREV_FREE
+}
+
+@(require_results)
+block_from_ptr :: proc(ptr: rawptr) -> (block_ptr: ^Block_Header) {
+ return (^Block_Header)(uintptr(ptr) - BLOCK_START_OFFSET)
+}
+
+@(require_results)
+block_to_ptr :: proc(block: ^Block_Header) -> (ptr: rawptr) {
+ return rawptr(uintptr(block) + BLOCK_START_OFFSET)
+}
+
+// Return location of next block after block of given size.
+@(require_results)
+offset_to_block :: proc(ptr: rawptr, size: uint) -> (block: ^Block_Header) {
+ return (^Block_Header)(uintptr(ptr) + uintptr(size))
+}
+
+@(require_results)
+offset_to_block_backwards :: proc(ptr: rawptr, size: uint) -> (block: ^Block_Header) {
+ return (^Block_Header)(uintptr(ptr) - uintptr(size))
+}
+
+// Return location of previous block.
+@(require_results)
+block_prev :: proc(block: ^Block_Header) -> (prev: ^Block_Header) {
+ assert(block_is_prev_free(block), "previous block must be free")
+ return block.prev_phys_block
+}
+
+// Return location of next existing block.
+@(require_results)
+block_next :: proc(block: ^Block_Header) -> (next: ^Block_Header) {
+ return offset_to_block(block_to_ptr(block), block_size(block) - BLOCK_HEADER_OVERHEAD)
+}
+
+// Link a new block with its physical neighbor, return the neighbor.
+@(require_results)
+block_link_next :: proc(block: ^Block_Header) -> (next: ^Block_Header) {
+ next = block_next(block)
+ next.prev_phys_block = block
+ return
+}
+
+block_mark_as_free :: proc(block: ^Block_Header) {
+ // Link the block to the next block, first.
+ next := block_link_next(block)
+ block_set_prev_free(next)
+ block_set_free(block)
+}
+
+block_mark_as_used :: proc(block: ^Block_Header) {
+ next := block_next(block)
+ block_set_prev_used(next)
+ block_set_used(block)
+}
+
+@(require_results)
+align_up :: proc(x, align: uint) -> (aligned: uint) {
+ assert(0 == (align & (align - 1)), "must align to a power of two")
+ return (x + (align - 1)) &~ (align - 1)
+}
+
+@(require_results)
+align_down :: proc(x, align: uint) -> (aligned: uint) {
+ assert(0 == (align & (align - 1)), "must align to a power of two")
+ return x - (x & (align - 1))
+}
+
+@(require_results)
+align_ptr :: proc(ptr: rawptr, align: uint) -> (aligned: rawptr) {
+ assert(0 == (align & (align - 1)), "must align to a power of two")
+ align_mask := uintptr(align) - 1
+ _ptr := uintptr(ptr)
+ _aligned := (_ptr + align_mask) &~ (align_mask)
+ return rawptr(_aligned)
+}
+
+// Adjust an allocation size to be aligned to word size, and no smaller than internal minimum.
+@(require_results)
+adjust_request_size :: proc(size, align: uint) -> (adjusted: uint) {
+ if size == 0 {
+ return 0
+ }
+
+ // aligned size must not exceed `BLOCK_SIZE_MAX`, or we'll go out of bounds on `sl_bitmap`.
+ if aligned := align_up(size, align); aligned < BLOCK_SIZE_MAX {
+ adjusted = min(aligned, BLOCK_SIZE_MAX)
+ }
+ return
+}
+
+// Adjust an allocation size to be aligned to word size, and no smaller than internal minimum.
+@(require_results)
+adjust_request_size_with_err :: proc(size, align: uint) -> (adjusted: uint, err: runtime.Allocator_Error) {
+ if size == 0 {
+ return 0, nil
+ }
+
+ // aligned size must not exceed `BLOCK_SIZE_MAX`, or we'll go out of bounds on `sl_bitmap`.
+ if aligned := align_up(size, align); aligned < BLOCK_SIZE_MAX {
+ adjusted = min(aligned, BLOCK_SIZE_MAX)
+ } else {
+ err = .Out_Of_Memory
+ }
+ return
+}
+
+// TLSF utility functions. In most cases these are direct translations of
+// the documentation in the research paper.
+
+@(optimization_mode="speed", require_results)
+mapping_insert :: proc(size: uint) -> (fl, sl: i32) {
+ if size < SMALL_BLOCK_SIZE {
+ // Store small blocks in first list.
+ sl = i32(size) / (SMALL_BLOCK_SIZE / SL_INDEX_COUNT)
+ } else {
+ fl = fls_uint(size)
+ sl = i32(size >> (uint(fl) - TLSF_SL_INDEX_COUNT_LOG2)) ~ (1 << TLSF_SL_INDEX_COUNT_LOG2)
+ fl -= (FL_INDEX_SHIFT - 1)
+ }
+ return
+}
+
+@(optimization_mode="speed", require_results)
+mapping_round :: #force_inline proc(size: uint) -> (rounded: uint) {
+ rounded = size
+ if size >= SMALL_BLOCK_SIZE {
+ round := uint(1 << (uint(fls_uint(size) - TLSF_SL_INDEX_COUNT_LOG2))) - 1
+ rounded += round
+ }
+ return
+}
+
+// This version rounds up to the next block size (for allocations)
+@(optimization_mode="speed", require_results)
+mapping_search :: proc(size: uint) -> (fl, sl: i32) {
+ return mapping_insert(mapping_round(size))
+}
+
+@(require_results)
+search_suitable_block :: proc(control: ^Allocator, fli, sli: ^i32) -> (block: ^Block_Header) {
+ // First, search for a block in the list associated with the given fl/sl index.
+ fl := fli^; sl := sli^
+
+ sl_map := control.sl_bitmap[fli^] & (~u32(0) << uint(sl))
+ if sl_map == 0 {
+ // No block exists. Search in the next largest first-level list.
+ fl_map := control.fl_bitmap & (~u32(0) << uint(fl + 1))
+ if fl_map == 0 {
+ // No free blocks available, memory has been exhausted.
+ return {}
+ }
+
+ fl = ffs(fl_map)
+ fli^ = fl
+ sl_map = control.sl_bitmap[fl]
+ }
+ assert(sl_map != 0, "internal error - second level bitmap is null")
+ sl = ffs(sl_map)
+ sli^ = sl
+
+ // Return the first block in the free list.
+ return control.blocks[fl][sl]
+}
+
+// Remove a free block from the free list.
+remove_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl: i32) {
+ prev := block.prev_free
+ next := block.next_free
+ assert(prev != nil, "prev_free can not be nil")
+ assert(next != nil, "next_free can not be nil")
+ next.prev_free = prev
+ prev.next_free = next
+
+ // If this block is the head of the free list, set new head.
+ if control.blocks[fl][sl] == block {
+ control.blocks[fl][sl] = next
+
+ // If the new head is nil, clear the bitmap
+ if next == &control.block_null {
+ control.sl_bitmap[fl] &~= (u32(1) << uint(sl))
+
+ // If the second bitmap is now empty, clear the fl bitmap
+ if control.sl_bitmap[fl] == 0 {
+ control.fl_bitmap &~= (u32(1) << uint(fl))
+ }
+ }
+ }
+}
+
+// Insert a free block into the free block list.
+insert_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl: i32) {
+ current := control.blocks[fl][sl]
+ assert(current != nil, "free lists cannot have a nil entry")
+ assert(block != nil, "cannot insert a nil entry into the free list")
+ block.next_free = current
+ block.prev_free = &control.block_null
+ current.prev_free = block
+
+ assert(block_to_ptr(block) == align_ptr(block_to_ptr(block), ALIGN_SIZE), "block not properly aligned")
+
+ // Insert the new block at the head of the list, and mark the first- and second-level bitmaps appropriately.
+ control.blocks[fl][sl] = block
+ control.fl_bitmap |= (u32(1) << uint(fl))
+ control.sl_bitmap[fl] |= (u32(1) << uint(sl))
+}
+
+// Remove a given block from the free list.
+block_remove :: proc(control: ^Allocator, block: ^Block_Header) {
+ fl, sl := mapping_insert(block_size(block))
+ remove_free_block(control, block, fl, sl)
+}
+
+// Insert a given block into the free list.
+block_insert :: proc(control: ^Allocator, block: ^Block_Header) {
+ fl, sl := mapping_insert(block_size(block))
+ insert_free_block(control, block, fl, sl)
+}
+
+@(require_results)
+block_can_split :: proc(block: ^Block_Header, size: uint) -> (can_split: bool) {
+ return block_size(block) >= size_of(Block_Header) + size
+}
+
+// Split a block into two, the second of which is free.
+@(require_results)
+block_split :: proc(block: ^Block_Header, size: uint) -> (remaining: ^Block_Header) {
+ // Calculate the amount of space left in the remaining block.
+ remaining = offset_to_block(block_to_ptr(block), size - BLOCK_HEADER_OVERHEAD)
+
+ remain_size := block_size(block) - (size + BLOCK_HEADER_OVERHEAD)
+
+ assert(block_to_ptr(remaining) == align_ptr(block_to_ptr(remaining), ALIGN_SIZE),
+ "remaining block not aligned properly")
+
+ assert(block_size(block) == remain_size + size + BLOCK_HEADER_OVERHEAD)
+ block_set_size(remaining, remain_size)
+ assert(block_size(remaining) >= BLOCK_SIZE_MIN, "block split with invalid size")
+
+ block_set_size(block, size)
+ block_mark_as_free(remaining)
+
+ return remaining
+}
+
+// Absorb a free block's storage into an adjacent previous free block.
+@(require_results)
+block_absorb :: proc(prev: ^Block_Header, block: ^Block_Header) -> (absorbed: ^Block_Header) {
+ assert(!block_is_last(prev), "previous block can't be last")
+ // Note: Leaves flags untouched.
+ prev.size += block_size(block) + BLOCK_HEADER_OVERHEAD
+ _ = block_link_next(prev)
+ return prev
+}
+
+// Merge a just-freed block with an adjacent previous free block.
+@(require_results)
+block_merge_prev :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: ^Block_Header) {
+ merged = block
+ if (block_is_prev_free(block)) {
+ prev := block_prev(block)
+ assert(prev != nil, "prev physical block can't be nil")
+ assert(block_is_free(prev), "prev block is not free though marked as such")
+ block_remove(control, prev)
+ merged = block_absorb(prev, block)
+ }
+ return merged
+}
+
+// Merge a just-freed block with an adjacent free block.
+@(require_results)
+block_merge_next :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: ^Block_Header) {
+ merged = block
+ next := block_next(block)
+ assert(next != nil, "next physical block can't be nil")
+
+ if (block_is_free(next)) {
+ assert(!block_is_last(block), "previous block can't be last")
+ block_remove(control, next)
+ merged = block_absorb(block, next)
+ }
+ return merged
+}
+
+// Trim any trailing block space off the end of a free block, return to pool.
+block_trim_free :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
+ assert(block_is_free(block), "block must be free")
+ if (block_can_split(block, size)) {
+ remaining_block := block_split(block, size)
+ _ = block_link_next(block)
+ block_set_prev_free(remaining_block)
+ block_insert(control, remaining_block)
+ }
+}
+
+// Trim any trailing block space off the end of a used block, return to pool.
+block_trim_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
+ assert(!block_is_free(block), "Block must be used")
+ if (block_can_split(block, size)) {
+ // If the next block is free, we must coalesce.
+ remaining_block := block_split(block, size)
+ block_set_prev_used(remaining_block)
+
+ remaining_block = block_merge_next(control, remaining_block)
+ block_insert(control, remaining_block)
+ }
+}
+
+// Trim leading block space, return to pool.
+@(require_results)
+block_trim_free_leading :: proc(control: ^Allocator, block: ^Block_Header, size: uint) -> (remaining: ^Block_Header) {
+ remaining = block
+ if block_can_split(block, size) {
+ // We want the 2nd block.
+ remaining = block_split(block, size - BLOCK_HEADER_OVERHEAD)
+ block_set_prev_free(remaining)
+
+ _ = block_link_next(block)
+ block_insert(control, block)
+ }
+ return remaining
+}
+
+@(require_results)
+block_locate_free :: proc(control: ^Allocator, size: uint) -> (block: ^Block_Header) {
+ fl, sl: i32
+ if size != 0 {
+ fl, sl = mapping_search(size)
+
+ /*
+ `mapping_search` can futz with the size, so for excessively large sizes it can sometimes wind up
+ with indices that are off the end of the block array. So, we protect against that here,
+ since this is the only call site of `mapping_search`. Note that we don't need to check `sl`,
+ as it comes from a modulo operation that guarantees it's always in range.
+ */
+ if fl < FL_INDEX_COUNT {
+ block = search_suitable_block(control, &fl, &sl)
+ }
+ }
+
+ if block != nil {
+ assert(block_size(block) >= size)
+ remove_free_block(control, block, fl, sl)
+ }
+ return block
+}
+
+@(require_results)
+block_prepare_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) -> (res: []byte, err: runtime.Allocator_Error) {
+ if block != nil {
+ assert(size != 0, "Size must be non-zero")
+ block_trim_free(control, block, size)
+ block_mark_as_used(block)
+ res = ([^]byte)(block_to_ptr(block))[:size]
+ }
+ return
+}
+
+// Clear control structure and point all empty lists at the null block
+clear :: proc(control: ^Allocator) {
+ control.block_null.next_free = &control.block_null
+ control.block_null.prev_free = &control.block_null
+
+ control.fl_bitmap = 0
+ for i in 0..<FL_INDEX_COUNT {
+ control.sl_bitmap[i] = 0
+ for j in 0..<SL_INDEX_COUNT {
+ control.blocks[i][j] = &control.block_null
+ }
+ }
+}
+
+@(require_results)
+pool_add :: proc(control: ^Allocator, pool: []u8) -> (err: Error) {
+ assert(uintptr(raw_data(pool)) % ALIGN_SIZE == 0, "Added memory must be aligned")
+
+ pool_overhead := POOL_OVERHEAD
+ pool_bytes := align_down(len(pool) - pool_overhead, ALIGN_SIZE)
+
+ if pool_bytes < BLOCK_SIZE_MIN {
+ return .Backing_Buffer_Too_Small
+ } else if pool_bytes > BLOCK_SIZE_MAX {
+ return .Backing_Buffer_Too_Large
+ }
+
+ // Create the main free block. Offset the start of the block slightly,
+ // so that the `prev_phys_block` field falls outside of the pool -
+ // it will never be used.
+ block := offset_to_block_backwards(raw_data(pool), BLOCK_HEADER_OVERHEAD)
+
+ block_set_size(block, pool_bytes)
+ block_set_free(block)
+ block_set_prev_used(block)
+ block_insert(control, block)
+
+ // Split the block to create a zero-size sentinel block
+ next := block_link_next(block)
+ block_set_size(next, 0)
+ block_set_used(next)
+ block_set_prev_free(next)
+ return
+}
+
+pool_remove :: proc(control: ^Allocator, pool: []u8) {
+ block := offset_to_block_backwards(raw_data(pool), BLOCK_HEADER_OVERHEAD)
+
+ assert(block_is_free(block), "Block should be free")
+ assert(!block_is_free(block_next(block)), "Next block should not be free")
+ assert(block_size(block_next(block)) == 0, "Next block size should be zero")
+
+ fl, sl := mapping_insert(block_size(block))
+ remove_free_block(control, block, fl, sl)
+}
+
+@(require_results)
+alloc_bytes_non_zeroed :: proc(control: ^Allocator, size: uint, align: uint) -> (res: []byte, err: runtime.Allocator_Error) {
+ assert(control != nil)
+ adjust := adjust_request_size(size, ALIGN_SIZE)
+
+ GAP_MINIMUM :: size_of(Block_Header)
+ size_with_gap := adjust_request_size(adjust + align + GAP_MINIMUM, align)
+
+ aligned_size := size_with_gap if adjust != 0 && align > ALIGN_SIZE else adjust
+ if aligned_size == 0 && size > 0 {
+ return nil, .Out_Of_Memory
+ }
+
+ block := block_locate_free(control, aligned_size)
+ if block == nil {
+ return nil, .Out_Of_Memory
+ }
+ ptr := block_to_ptr(block)
+ aligned := align_ptr(ptr, align)
+ gap := uint(int(uintptr(aligned)) - int(uintptr(ptr)))
+
+ if gap != 0 && gap < GAP_MINIMUM {
+ gap_remain := GAP_MINIMUM - gap
+ offset := uintptr(max(gap_remain, align))
+ next_aligned := rawptr(uintptr(aligned) + offset)
+
+ aligned = align_ptr(next_aligned, align)
+
+ gap = uint(int(uintptr(aligned)) - int(uintptr(ptr)))
+ }
+
+ if gap != 0 {
+ assert(gap >= GAP_MINIMUM, "gap size too small")
+ block = block_trim_free_leading(control, block, gap)
+ }
+
+ return block_prepare_used(control, block, adjust)
+}
+
+@(require_results)
+alloc_bytes :: proc(control: ^Allocator, size: uint, align: uint) -> (res: []byte, err: runtime.Allocator_Error) {
+ res, err = alloc_bytes_non_zeroed(control, size, align)
+ if err != nil {
+ intrinsics.mem_zero(raw_data(res), len(res))
+ }
+ return
+}
+
+
+free_with_size :: proc(control: ^Allocator, ptr: rawptr, size: uint) {
+ assert(control != nil)
+ // `size` is currently ignored
+ if ptr == nil {
+ return
+ }
+
+ block := block_from_ptr(ptr)
+ assert(!block_is_free(block), "block already marked as free") // double free
+ block_mark_as_free(block)
+ block = block_merge_prev(control, block)
+ block = block_merge_next(control, block)
+ block_insert(control, block)
+}
+
+
+@(require_results)
+resize :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size: uint, alignment: uint) -> (res: []byte, err: runtime.Allocator_Error) {
+ assert(control != nil)
+ if ptr != nil && new_size == 0 {
+ free_with_size(control, ptr, old_size)
+ return
+ } else if ptr == nil {
+ return alloc_bytes(control, new_size, alignment)
+ }
+
+ block := block_from_ptr(ptr)
+ next := block_next(block)
+
+ curr_size := block_size(block)
+ combined := curr_size + block_size(next) + BLOCK_HEADER_OVERHEAD
+ adjust := adjust_request_size(new_size, max(ALIGN_SIZE, alignment))
+
+ assert(!block_is_free(block), "block already marked as free") // double free
+
+ min_size := min(curr_size, new_size, old_size)
+
+ if adjust > curr_size && (!block_is_free(next) || adjust > combined) {
+ res = alloc_bytes(control, new_size, alignment) or_return
+ if res != nil {
+ copy(res, ([^]byte)(ptr)[:min_size])
+ free_with_size(control, ptr, curr_size)
+ }
+ return
+ }
+ if adjust > curr_size {
+ _ = block_merge_next(control, block)
+ block_mark_as_used(block)
+ }
+
+ block_trim_used(control, block, adjust)
+ res = ([^]byte)(ptr)[:new_size]
+
+ if min_size < new_size {
+ to_zero := ([^]byte)(ptr)[min_size:new_size]
+ runtime.mem_zero(raw_data(to_zero), len(to_zero))
+ }
+ return
+}
+
+@(require_results)
+resize_non_zeroed :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size: uint, alignment: uint) -> (res: []byte, err: runtime.Allocator_Error) {
+ assert(control != nil)
+ if ptr != nil && new_size == 0 {
+ free_with_size(control, ptr, old_size)
+ return
+ } else if ptr == nil {
+ return alloc_bytes_non_zeroed(control, new_size, alignment)
+ }
+
+ block := block_from_ptr(ptr)
+ next := block_next(block)
+
+ curr_size := block_size(block)
+ combined := curr_size + block_size(next) + BLOCK_HEADER_OVERHEAD
+ adjust := adjust_request_size(new_size, max(ALIGN_SIZE, alignment))
+
+ assert(!block_is_free(block), "block already marked as free") // double free
+
+ min_size := min(curr_size, new_size, old_size)
+
+ if adjust > curr_size && (!block_is_free(next) || adjust > combined) {
+ res = alloc_bytes_non_zeroed(control, new_size, alignment) or_return
+ if res != nil {
+ copy(res, ([^]byte)(ptr)[:min_size])
+ free_with_size(control, ptr, old_size)
+ }
+ return
+ }
+
+ if adjust > curr_size {
+ _ = block_merge_next(control, block)
+ block_mark_as_used(block)
+ }
+
+ block_trim_used(control, block, adjust)
+ res = ([^]byte)(ptr)[:new_size]
+ return
+} \ No newline at end of file
diff --git a/core/mem/tracking_allocator.odin b/core/mem/tracking_allocator.odin
index bc624617d..1b57e5fb4 100644
--- a/core/mem/tracking_allocator.odin
+++ b/core/mem/tracking_allocator.odin
@@ -47,6 +47,7 @@ tracking_allocator_destroy :: proc(t: ^Tracking_Allocator) {
}
+// Clear only the current allocation data while keeping the totals intact.
tracking_allocator_clear :: proc(t: ^Tracking_Allocator) {
sync.mutex_lock(&t.mutex)
clear(&t.allocation_map)
@@ -55,6 +56,19 @@ tracking_allocator_clear :: proc(t: ^Tracking_Allocator) {
sync.mutex_unlock(&t.mutex)
}
+// Reset all of a Tracking Allocator's allocation data back to zero.
+tracking_allocator_reset :: proc(t: ^Tracking_Allocator) {
+ sync.mutex_lock(&t.mutex)
+ clear(&t.allocation_map)
+ clear(&t.bad_free_array)
+ t.total_memory_allocated = 0
+ t.total_allocation_count = 0
+ t.total_memory_freed = 0
+ t.total_free_count = 0
+ t.peak_memory_allocated = 0
+ t.current_memory_allocated = 0
+ sync.mutex_unlock(&t.mutex)
+}
@(require_results)
tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator {
diff --git a/core/odin/ast/ast.odin b/core/odin/ast/ast.odin
index be541befa..229f03d3d 100644
--- a/core/odin/ast/ast.odin
+++ b/core/odin/ast/ast.odin
@@ -538,7 +538,7 @@ Foreign_Import_Decl :: struct {
import_tok: tokenizer.Token,
name: ^Ident,
collection_name: string,
- fullpaths: []string,
+ fullpaths: []^Expr,
comment: ^Comment_Group,
}
@@ -753,7 +753,7 @@ Array_Type :: struct {
using node: Expr,
open: tokenizer.Pos,
tag: ^Expr,
- len: ^Expr, // Ellipsis node for [?]T arrray types, nil for slice types
+ len: ^Expr, // Ellipsis node for [?]T array types, nil for slice types
close: tokenizer.Pos,
elem: ^Expr,
}
diff --git a/core/odin/ast/clone.odin b/core/odin/ast/clone.odin
index bca740dd4..b0a1673b2 100644
--- a/core/odin/ast/clone.odin
+++ b/core/odin/ast/clone.odin
@@ -278,7 +278,9 @@ clone_node :: proc(node: ^Node) -> ^Node {
r.foreign_library = clone(r.foreign_library)
r.body = clone(r.body)
case ^Foreign_Import_Decl:
+ r.attributes = clone_dynamic_array(r.attributes)
r.name = auto_cast clone(r.name)
+ r.fullpaths = clone_array(r.fullpaths)
case ^Proc_Group:
r.args = clone(r.args)
case ^Attribute:
diff --git a/core/odin/ast/walk.odin b/core/odin/ast/walk.odin
index 63107a2e2..7304f237c 100644
--- a/core/odin/ast/walk.odin
+++ b/core/odin/ast/walk.odin
@@ -320,6 +320,7 @@ walk :: proc(v: ^Visitor, node: ^Node) {
if n.comment != nil {
walk(v, n.comment)
}
+ walk_expr_list(v, n.fullpaths)
case ^Proc_Group:
walk_expr_list(v, n.args)
diff --git a/core/odin/parser/parser.odin b/core/odin/parser/parser.odin
index e32fbdced..6b0aa2888 100644
--- a/core/odin/parser/parser.odin
+++ b/core/odin/parser/parser.odin
@@ -1190,12 +1190,12 @@ parse_foreign_decl :: proc(p: ^Parser) -> ^ast.Decl {
error(p, name.pos, "illegal foreign import name: '_'")
}
- fullpaths: [dynamic]string
+ fullpaths: [dynamic]^ast.Expr
if allow_token(p, .Open_Brace) {
for p.curr_tok.kind != .Close_Brace &&
p.curr_tok.kind != .EOF {
- path := expect_token(p, .String)
- append(&fullpaths, path.text)
+ path := parse_expr(p, false)
+ append(&fullpaths, path)
allow_token(p, .Comma) or_break
}
@@ -1203,7 +1203,9 @@ parse_foreign_decl :: proc(p: ^Parser) -> ^ast.Decl {
} else {
path := expect_token(p, .String)
reserve(&fullpaths, 1)
- append(&fullpaths, path.text)
+ bl := ast.new(ast.Basic_Lit, path.pos, end_pos(path))
+ bl.tok = path
+ append(&fullpaths, bl)
}
if len(fullpaths) == 0 {
@@ -1453,7 +1455,7 @@ parse_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
case "unroll":
return parse_unrolled_for_loop(p, tag)
case "reverse":
- stmt := parse_for_stmt(p)
+ stmt := parse_stmt(p)
if range, is_range := stmt.derived.(^ast.Range_Stmt); is_range {
if range.reverse {
@@ -3513,6 +3515,25 @@ parse_simple_stmt :: proc(p: ^Parser, flags: Stmt_Allow_Flags) -> ^ast.Stmt {
case op.kind == .Colon:
expect_token_after(p, .Colon, "identifier list")
if .Label in flags && len(lhs) == 1 {
+ is_partial := false
+ is_reverse := false
+
+ partial_token: tokenizer.Token
+ if p.curr_tok.kind == .Hash {
+ name := peek_token(p)
+ if name.kind == .Ident && name.text == "partial" &&
+ peek_token(p, 1).kind == .Switch {
+ partial_token = expect_token(p, .Hash)
+ expect_token(p, .Ident)
+ is_partial = true
+ } else if name.kind == .Ident && name.text == "reverse" &&
+ peek_token(p, 1).kind == .For {
+ partial_token = expect_token(p, .Hash)
+ expect_token(p, .Ident)
+ is_reverse = true
+ }
+ }
+
#partial switch p.curr_tok.kind {
case .Open_Brace, .If, .For, .Switch:
label := lhs[0]
@@ -3527,6 +3548,22 @@ parse_simple_stmt :: proc(p: ^Parser, flags: Stmt_Allow_Flags) -> ^ast.Stmt {
case ^ast.Type_Switch_Stmt: n.label = label
case ^ast.Range_Stmt: n.label = label
}
+
+ if is_partial {
+ #partial switch n in stmt.derived_stmt {
+ case ^ast.Switch_Stmt: n.partial = true
+ case ^ast.Type_Switch_Stmt: n.partial = true
+ case:
+ error(p, partial_token.pos, "incorrect use of directive, use '%s: #partial switch'", partial_token.text)
+ }
+ }
+ if is_reverse {
+ #partial switch n in stmt.derived_stmt {
+ case ^ast.Range_Stmt: n.reverse = true
+ case:
+ error(p, partial_token.pos, "incorrect use of directive, use '%s: #reverse for'", partial_token.text)
+ }
+ }
}
return stmt
diff --git a/core/os/dir_windows.odin b/core/os/dir_windows.odin
index 491507313..9ca78948e 100644
--- a/core/os/dir_windows.odin
+++ b/core/os/dir_windows.odin
@@ -87,8 +87,12 @@ read_dir :: proc(fd: Handle, n: int, allocator := context.allocator) -> (fi: []F
find_data := &win32.WIN32_FIND_DATAW{}
find_handle := win32.FindFirstFileW(raw_data(wpath_search), find_data)
+ if find_handle == win32.INVALID_HANDLE_VALUE {
+ err = Errno(win32.GetLastError())
+ return dfi[:], err
+ }
defer win32.FindClose(find_handle)
- for n != 0 && find_handle != nil {
+ for n != 0 {
fi: File_Info
fi = find_data_to_file_info(path, find_data)
if fi.name != "" {
diff --git a/core/os/os2/internal_util.odin b/core/os/os2/internal_util.odin
index 59d845350..e26cf7439 100644
--- a/core/os/os2/internal_util.odin
+++ b/core/os/os2/internal_util.odin
@@ -111,7 +111,7 @@ next_random :: proc(r: ^[2]u64) -> u64 {
@(require_results)
random_string :: proc(buf: []byte) -> string {
- @static digits := "0123456789"
+ @(static, rodata) digits := "0123456789"
u := next_random(&random_string_seed)
diff --git a/core/os/os_darwin.odin b/core/os/os_darwin.odin
index a688e1ac3..877a90bf1 100644
--- a/core/os/os_darwin.odin
+++ b/core/os/os_darwin.odin
@@ -442,7 +442,7 @@ F_GETPATH :: 50 // return the full path of the fd
foreign libc {
@(link_name="__error") __error :: proc() -> ^c.int ---
- @(link_name="open") _unix_open :: proc(path: cstring, flags: i32, mode: u16) -> Handle ---
+ @(link_name="open") _unix_open :: proc(path: cstring, flags: i32, #c_vararg args: ..any) -> Handle ---
@(link_name="close") _unix_close :: proc(handle: Handle) -> c.int ---
@(link_name="read") _unix_read :: proc(handle: Handle, buffer: rawptr, count: c.size_t) -> int ---
@(link_name="write") _unix_write :: proc(handle: Handle, buffer: rawptr, count: c.size_t) -> int ---
diff --git a/core/os/os_freebsd.odin b/core/os/os_freebsd.odin
index cdd44d301..8fe179478 100644
--- a/core/os/os_freebsd.odin
+++ b/core/os/os_freebsd.odin
@@ -112,15 +112,15 @@ EOWNERDEAD: Errno : 96
O_RDONLY :: 0x00000
O_WRONLY :: 0x00001
O_RDWR :: 0x00002
-O_CREATE :: 0x00040
-O_EXCL :: 0x00080
-O_NOCTTY :: 0x00100
-O_TRUNC :: 0x00200
-O_NONBLOCK :: 0x00800
-O_APPEND :: 0x00400
-O_SYNC :: 0x01000
-O_ASYNC :: 0x02000
-O_CLOEXEC :: 0x80000
+O_NONBLOCK :: 0x00004
+O_APPEND :: 0x00008
+O_ASYNC :: 0x00040
+O_SYNC :: 0x00080
+O_CREATE :: 0x00200
+O_TRUNC :: 0x00400
+O_EXCL :: 0x00800
+O_NOCTTY :: 0x08000
+O_CLOEXEC :: 0100000
SEEK_DATA :: 3
@@ -140,6 +140,8 @@ RTLD_NOLOAD :: 0x02000
MAX_PATH :: 1024
+KINFO_FILE_SIZE :: 1392
+
args := _alloc_command_line_arguments()
Unix_File_Time :: struct {
@@ -191,6 +193,21 @@ OS_Stat :: struct {
lspare: [10]u64,
}
+KInfo_File :: struct {
+ structsize: c.int,
+ type: c.int,
+ fd: c.int,
+ ref_count: c.int,
+ flags: c.int,
+ pad0: c.int,
+ offset: i64,
+
+ // NOTE(Feoramund): This field represents a complicated union that I am
+ // avoiding implementing for now. I only need the path data below.
+ _union: [336]byte,
+
+ path: [MAX_PATH]c.char,
+}
// since FreeBSD v12
Dirent :: struct {
@@ -254,6 +271,8 @@ X_OK :: 1 // Test for execute permission
W_OK :: 2 // Test for write permission
R_OK :: 4 // Test for read permission
+F_KINFO :: 22
+
foreign libc {
@(link_name="__error") __errno_location :: proc() -> ^c.int ---
@@ -274,6 +293,7 @@ foreign libc {
@(link_name="unlink") _unix_unlink :: proc(path: cstring) -> c.int ---
@(link_name="rmdir") _unix_rmdir :: proc(path: cstring) -> c.int ---
@(link_name="mkdir") _unix_mkdir :: proc(path: cstring, mode: mode_t) -> c.int ---
+ @(link_name="fcntl") _unix_fcntl :: proc(fd: Handle, cmd: c.int, arg: uintptr) -> c.int ---
@(link_name="fdopendir") _unix_fdopendir :: proc(fd: Handle) -> Dir ---
@(link_name="closedir") _unix_closedir :: proc(dirp: Dir) -> c.int ---
@@ -365,7 +385,7 @@ seek :: proc(fd: Handle, offset: i64, whence: int) -> (i64, Errno) {
}
file_size :: proc(fd: Handle) -> (i64, Errno) {
- s, err := fstat(fd)
+ s, err := _fstat(fd)
if err != ERROR_NONE {
return -1, err
}
@@ -591,9 +611,26 @@ _readlink :: proc(path: string) -> (string, Errno) {
return "", Errno{}
}
-// XXX FreeBSD
absolute_path_from_handle :: proc(fd: Handle) -> (string, Errno) {
- return "", Errno(ENOSYS)
+ // NOTE(Feoramund): The situation isn't ideal, but this was the best way I
+ // could find to implement this. There are a couple outstanding bug reports
+ // regarding the desire to retrieve an absolute path from a handle, but to
+ // my knowledge, there hasn't been any work done on it.
+ //
+ // https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=198570
+ //
+ // This may be unreliable, according to a comment from 2023.
+
+ kinfo: KInfo_File
+ kinfo.structsize = KINFO_FILE_SIZE
+
+ res := _unix_fcntl(fd, F_KINFO, cast(uintptr)&kinfo)
+ if res == -1 {
+ return "", Errno(get_last_error())
+ }
+
+ path := strings.clone_from_cstring_bounded(cast(cstring)&kinfo.path[0], len(kinfo.path))
+ return path, ERROR_NONE
}
absolute_path_from_relative :: proc(rel: string) -> (path: string, err: Errno) {
diff --git a/core/os/os_netbsd.odin b/core/os/os_netbsd.odin
index e8e551340..c0f237bf5 100644
--- a/core/os/os_netbsd.odin
+++ b/core/os/os_netbsd.odin
@@ -5,7 +5,6 @@ foreign import libc "system:c"
import "base:runtime"
import "core:strings"
-import "core:sys/unix"
import "core:c"
Handle :: distinct i32
@@ -328,6 +327,11 @@ foreign dl {
@(link_name="dlerror") _unix_dlerror :: proc() -> cstring ---
}
+@(private)
+foreign libc {
+ _lwp_self :: proc() -> i32 ---
+}
+
// NOTE(phix): Perhaps share the following functions with FreeBSD if they turn out to be the same in the end.
is_path_separator :: proc(r: rune) -> bool {
@@ -721,7 +725,7 @@ exit :: proc "contextless" (code: int) -> ! {
}
current_thread_id :: proc "contextless" () -> int {
- return cast(int) unix.pthread_self()
+ return int(_lwp_self())
}
dlopen :: proc(filename: string, flags: int) -> rawptr {
diff --git a/core/path/filepath/path_unix.odin b/core/path/filepath/path_unix.odin
index a4b27b027..b44a6a344 100644
--- a/core/path/filepath/path_unix.odin
+++ b/core/path/filepath/path_unix.odin
@@ -56,7 +56,7 @@ foreign libc {
@(link_name="free") _unix_free :: proc(ptr: rawptr) ---
}
-when ODIN_OS == .Darwin {
+when ODIN_OS == .Darwin || ODIN_OS == .FreeBSD {
@(private)
foreign libc {
@(link_name="__error") __error :: proc() -> ^i32 ---
diff --git a/core/simd/x86/aes.odin b/core/simd/x86/aes.odin
new file mode 100644
index 000000000..3a32de0d6
--- /dev/null
+++ b/core/simd/x86/aes.odin
@@ -0,0 +1,49 @@
+//+build i386, amd64
+package simd_x86
+
+@(require_results, enable_target_feature = "aes")
+_mm_aesdec :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
+ return aesdec(a, b)
+}
+
+@(require_results, enable_target_feature = "aes")
+_mm_aesdeclast :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
+ return aesdeclast(a, b)
+}
+
+@(require_results, enable_target_feature = "aes")
+_mm_aesenc :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
+ return aesenc(a, b)
+}
+
+@(require_results, enable_target_feature = "aes")
+_mm_aesenclast :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
+ return aesenclast(a, b)
+}
+
+@(require_results, enable_target_feature = "aes")
+_mm_aesimc :: #force_inline proc "c" (a: __m128i) -> __m128i {
+ return aesimc(a)
+}
+
+@(require_results, enable_target_feature = "aes")
+_mm_aeskeygenassist :: #force_inline proc "c" (a: __m128i, $IMM8: u8) -> __m128i {
+ return aeskeygenassist(a, u8(IMM8))
+}
+
+
+@(private, default_calling_convention = "none")
+foreign _ {
+ @(link_name = "llvm.x86.aesni.aesdec")
+ aesdec :: proc(a, b: __m128i) -> __m128i ---
+ @(link_name = "llvm.x86.aesni.aesdeclast")
+ aesdeclast :: proc(a, b: __m128i) -> __m128i ---
+ @(link_name = "llvm.x86.aesni.aesenc")
+ aesenc :: proc(a, b: __m128i) -> __m128i ---
+ @(link_name = "llvm.x86.aesni.aesenclast")
+ aesenclast :: proc(a, b: __m128i) -> __m128i ---
+ @(link_name = "llvm.x86.aesni.aesimc")
+ aesimc :: proc(a: __m128i) -> __m128i ---
+ @(link_name = "llvm.x86.aesni.aeskeygenassist")
+ aeskeygenassist :: proc(a: __m128i, imm8: u8) -> __m128i ---
+}
diff --git a/core/slice/permute.odin b/core/slice/permute.odin
new file mode 100644
index 000000000..42b6d4129
--- /dev/null
+++ b/core/slice/permute.odin
@@ -0,0 +1,105 @@
+package slice
+
+import "base:runtime"
+
+// An in-place permutation iterator.
+Permutation_Iterator :: struct($T: typeid) {
+ index: int,
+ slice: []T,
+ counters: []int,
+}
+
+/*
+Make an iterator to permute a slice in-place.
+
+*Allocates Using Provided Allocator*
+
+This procedure allocates some state to assist in permutation and does not make
+a copy of the underlying slice. If you want to permute a slice without altering
+the underlying data, use `clone` to create a copy, then permute that instead.
+
+Inputs:
+- slice: The slice to permute.
+- allocator: (default is context.allocator)
+
+Returns:
+- iter: The iterator, to be passed to `permute`.
+- error: An `Allocator_Error`, if allocation failed.
+*/
+make_permutation_iterator :: proc(
+ slice: []$T,
+ allocator := context.allocator,
+) -> (
+ iter: Permutation_Iterator(T),
+ error: runtime.Allocator_Error,
+) #optional_allocator_error {
+ iter.slice = slice
+ iter.counters = make([]int, len(iter.slice), allocator) or_return
+
+ return
+}
+/*
+Free the state allocated by `make_permutation_iterator`.
+
+Inputs:
+- iter: The iterator created by `make_permutation_iterator`.
+- allocator: The allocator used to create the iterator. (default is context.allocator)
+*/
+destroy_permutation_iterator :: proc(
+ iter: Permutation_Iterator($T),
+ allocator := context.allocator,
+) {
+ delete(iter.counters, allocator = allocator)
+}
+/*
+Permute a slice in-place.
+
+Note that the first iteration will always be the original, unpermuted slice.
+
+Inputs:
+- iter: The iterator created by `make_permutation_iterator`.
+
+Returns:
+- ok: True if the permutation succeeded, false if the iteration is complete.
+*/
+permute :: proc(iter: ^Permutation_Iterator($T)) -> (ok: bool) {
+ // This is an iterative, resumable implementation of Heap's algorithm.
+ //
+ // The original algorithm was described by B. R. Heap as "Permutations by
+ // interchanges" in The Computer Journal, 1963.
+ //
+ // This implementation is based on the nonrecursive version described by
+ // Robert Sedgewick in "Permutation Generation Methods" which was published
+ // in ACM Computing Surveys in 1977.
+
+ i := iter.index
+
+ if i == 0 {
+ iter.index = 1
+ return true
+ }
+
+ n := len(iter.counters)
+ #no_bounds_check for i < n {
+ if iter.counters[i] < i {
+ if i & 1 == 0 {
+ iter.slice[0], iter.slice[i] = iter.slice[i], iter.slice[0]
+ } else {
+ iter.slice[iter.counters[i]], iter.slice[i] = iter.slice[i], iter.slice[iter.counters[i]]
+ }
+
+ iter.counters[i] += 1
+ i = 1
+
+ break
+ } else {
+ iter.counters[i] = 0
+ i += 1
+ }
+ }
+ if i == n {
+ return false
+ }
+ iter.index = i
+ return true
+}
diff --git a/core/strconv/generic_float.odin b/core/strconv/generic_float.odin
index 6dc11c0be..b049f0fe1 100644
--- a/core/strconv/generic_float.odin
+++ b/core/strconv/generic_float.odin
@@ -375,7 +375,7 @@ decimal_to_float_bits :: proc(d: ^decimal.Decimal, info: ^Float_Info) -> (b: u64
return
}
- @static power_table := [?]int{1, 3, 6, 9, 13, 16, 19, 23, 26}
+ @(static, rodata) power_table := [?]int{1, 3, 6, 9, 13, 16, 19, 23, 26}
exp = 0
for d.decimal_point > 0 {
diff --git a/core/strconv/strconv.odin b/core/strconv/strconv.odin
index 94842617e..902f1cdc5 100644
--- a/core/strconv/strconv.odin
+++ b/core/strconv/strconv.odin
@@ -835,17 +835,21 @@ Example:
n, _, ok = strconv.parse_f64_prefix("12.34e2")
fmt.printfln("%.3f %v", n, ok)
+
+ n, _, ok = strconv.parse_f64_prefix("13.37 hellope")
+ fmt.printfln("%.3f %v", n, ok)
}
Output:
0.000 false
1234.000 true
+ 13.370 true
**Returns**
- value: The parsed 64-bit floating point number.
- nr: The length of the parsed substring.
-- ok: `false` if a base 10 float could not be found, or if the input string contained more than just the number.
+- ok: `false` if a base 10 float could not be found
*/
parse_f64_prefix :: proc(str: string) -> (value: f64, nr: int, ok: bool) {
common_prefix_len_ignore_case :: proc "contextless" (s, prefix: string) -> int {
@@ -878,13 +882,16 @@ parse_f64_prefix :: proc(str: string) -> (value: f64, nr: int, ok: bool) {
s = s[1:]
fallthrough
case 'i', 'I':
- n = common_prefix_len_ignore_case(s, "infinity")
- if 3 < n && n < 8 { // "inf" or "infinity"
- n = 3
- }
- if n == 3 || n == 8 {
+ m := common_prefix_len_ignore_case(s, "infinity")
+ if 3 <= m && m < 9 { // "inf" to "infinity"
f = 0h7ff00000_00000000 if sign == 1 else 0hfff00000_00000000
- n = nsign + 3
+ if m == 8 {
+ // We only count the entire prefix if it is precisely "infinity".
+ n = nsign + m
+ } else {
+ // The string was either only "inf" or incomplete.
+ n = nsign + 3
+ }
ok = true
return
}
@@ -1088,7 +1095,7 @@ parse_f64_prefix :: proc(str: string) -> (value: f64, nr: int, ok: bool) {
}
trunc_block: if !trunc {
- @static pow10 := [?]f64{
+ @(static, rodata) pow10 := [?]f64{
1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
1e20, 1e21, 1e22,
@@ -1124,6 +1131,275 @@ parse_f64_prefix :: proc(str: string) -> (value: f64, nr: int, ok: bool) {
ok = !overflow
return
}
+/*
+Parses a 128-bit complex number from a string
+
+**Inputs**
+- str: The input string containing a 128-bit complex number.
+- n: An optional pointer to an int to store the length of the parsed substring (default: nil).
+
+Example:
+
+ import "core:fmt"
+ import "core:strconv"
+ parse_complex128_example :: proc() {
+ n: int
+ c, ok := strconv.parse_complex128("3+1i", &n)
+ fmt.printfln("%v %i %t", c, n, ok)
+
+ c, ok = strconv.parse_complex128("5+7i hellope", &n)
+ fmt.printfln("%v %i %t", c, n, ok)
+ }
+
+Output:
+
+ 3+1i 4 true
+ 5+7i 4 false
+
+**Returns**
+- value: The parsed 128-bit complex number.
+- ok: `false` if a complex number could not be found, or if the input string contained more than just the number.
+*/
+parse_complex128 :: proc(str: string, n: ^int = nil) -> (value: complex128, ok: bool) {
+ real_value, imag_value: f64
+ nr_r, nr_i: int
+
+ real_value, nr_r, _ = parse_f64_prefix(str)
+ imag_value, nr_i, _ = parse_f64_prefix(str[nr_r:])
+
+ i_parsed := len(str) >= nr_r + nr_i + 1 && str[nr_r + nr_i] == 'i'
+ if !i_parsed {
+ // No `i` means we refuse to treat the second float we parsed as an
+ // imaginary value.
+ imag_value = 0
+ nr_i = 0
+ }
+
+ ok = i_parsed && len(str) == nr_r + nr_i + 1
+
+ if n != nil {
+ n^ = nr_r + nr_i + (1 if i_parsed else 0)
+ }
+
+ value = complex(real_value, imag_value)
+ return
+}
+/*
+Parses a 64-bit complex number from a string
+
+**Inputs**
+- str: The input string containing a 64-bit complex number.
+- n: An optional pointer to an int to store the length of the parsed substring (default: nil).
+
+Example:
+
+ import "core:fmt"
+ import "core:strconv"
+ parse_complex64_example :: proc() {
+ n: int
+ c, ok := strconv.parse_complex64("3+1i", &n)
+ fmt.printfln("%v %i %t", c, n, ok)
+
+ c, ok = strconv.parse_complex64("5+7i hellope", &n)
+ fmt.printfln("%v %i %t", c, n, ok)
+ }
+
+Output:
+
+ 3+1i 4 true
+ 5+7i 4 false
+
+**Returns**
+- value: The parsed 64-bit complex number.
+- ok: `false` if a complex number could not be found, or if the input string contained more than just the number.
+*/
+parse_complex64 :: proc(str: string, n: ^int = nil) -> (value: complex64, ok: bool) {
+ v: complex128 = ---
+ v, ok = parse_complex128(str, n)
+ return cast(complex64)v, ok
+}
+/*
+Parses a 32-bit complex number from a string
+
+**Inputs**
+- str: The input string containing a 32-bit complex number.
+- n: An optional pointer to an int to store the length of the parsed substring (default: nil).
+
+Example:
+
+ import "core:fmt"
+ import "core:strconv"
+ parse_complex32_example :: proc() {
+ n: int
+ c, ok := strconv.parse_complex32("3+1i", &n)
+ fmt.printfln("%v %i %t", c, n, ok)
+
+ c, ok = strconv.parse_complex32("5+7i hellope", &n)
+ fmt.printfln("%v %i %t", c, n, ok)
+ }
+
+Output:
+
+ 3+1i 4 true
+ 5+7i 4 false
+
+**Returns**
+- value: The parsed 32-bit complex number.
+- ok: `false` if a complex number could not be found, or if the input string contained more than just the number.
+*/
+parse_complex32 :: proc(str: string, n: ^int = nil) -> (value: complex32, ok: bool) {
+ v: complex128 = ---
+ v, ok = parse_complex128(str, n)
+ return cast(complex32)v, ok
+}
+/*
+Parses a 256-bit quaternion from a string
+
+**Inputs**
+- str: The input string containing a 256-bit quaternion.
+- n: An optional pointer to an int to store the length of the parsed substring (default: nil).
+
+Example:
+
+ import "core:fmt"
+ import "core:strconv"
+ parse_quaternion256_example :: proc() {
+ n: int
+ q, ok := strconv.parse_quaternion256("1+2i+3j+4k", &n)
+ fmt.printfln("%v %i %t", q, n, ok)
+
+ q, ok = strconv.parse_quaternion256("1+2i+3j+4k hellope", &n)
+ fmt.printfln("%v %i %t", q, n, ok)
+ }
+
+Output:
+
+ 1+2i+3j+4k 10 true
+ 1+2i+3j+4k 10 false
+
+**Returns**
+- value: The parsed 256-bit quaternion.
+- ok: `false` if a quaternion could not be found, or if the input string contained more than just the quaternion.
+*/
+parse_quaternion256 :: proc(str: string, n: ^int = nil) -> (value: quaternion256, ok: bool) {
+ iterate_and_assign :: proc (iter: ^string, terminator: byte, nr_total: ^int, state: bool) -> (value: f64, ok: bool) {
+ if !state {
+ return
+ }
+
+ nr: int
+ value, nr, _ = parse_f64_prefix(iter^)
+ iter^ = iter[nr:]
+
+ if len(iter) > 0 && iter[0] == terminator {
+ iter^ = iter[1:]
+ nr_total^ += nr + 1
+ ok = true
+ } else {
+ value = 0
+ }
+
+ return
+ }
+
+ real_value, imag_value, jmag_value, kmag_value: f64
+ nr: int
+
+ real_value, nr, _ = parse_f64_prefix(str)
+ iter := str[nr:]
+
+ // Need to have parsed at least something in order to get started.
+ ok = nr > 0
+
+ // Quaternion parsing is done this way to honour the rest of the API with
+ // regards to partial parsing. Otherwise, we could error out early.
+ imag_value, ok = iterate_and_assign(&iter, 'i', &nr, ok)
+ jmag_value, ok = iterate_and_assign(&iter, 'j', &nr, ok)
+ kmag_value, ok = iterate_and_assign(&iter, 'k', &nr, ok)
+
+ if len(iter) != 0 {
+ ok = false
+ }
+
+ if n != nil {
+ n^ = nr
+ }
+
+ value = quaternion(
+ real = real_value,
+ imag = imag_value,
+ jmag = jmag_value,
+ kmag = kmag_value)
+ return
+}
+/*
+Parses a 128-bit quaternion from a string
+
+**Inputs**
+- str: The input string containing a 128-bit quaternion.
+- n: An optional pointer to an int to store the length of the parsed substring (default: nil).
+
+Example:
+
+ import "core:fmt"
+ import "core:strconv"
+ parse_quaternion128_example :: proc() {
+ n: int
+ q, ok := strconv.parse_quaternion128("1+2i+3j+4k", &n)
+ fmt.printfln("%v %i %t", q, n, ok)
+
+ q, ok = strconv.parse_quaternion128("1+2i+3j+4k hellope", &n)
+ fmt.printfln("%v %i %t", q, n, ok)
+ }
+
+Output:
+
+ 1+2i+3j+4k 10 true
+ 1+2i+3j+4k 10 false
+
+**Returns**
+- value: The parsed 128-bit quaternion.
+- ok: `false` if a quaternion could not be found, or if the input string contained more than just the quaternion.
+*/
+parse_quaternion128 :: proc(str: string, n: ^int = nil) -> (value: quaternion128, ok: bool) {
+ v: quaternion256 = ---
+ v, ok = parse_quaternion256(str, n)
+ return cast(quaternion128)v, ok
+}
+/*
+Parses a 64-bit quaternion from a string
+
+**Inputs**
+- str: The input string containing a 64-bit quaternion.
+- n: An optional pointer to an int to store the length of the parsed substring (default: nil).
+
+Example:
+
+ import "core:fmt"
+ import "core:strconv"
+ parse_quaternion64_example :: proc() {
+ n: int
+ q, ok := strconv.parse_quaternion64("1+2i+3j+4k", &n)
+ fmt.printfln("%v %i %t", q, n, ok)
+
+ q, ok = strconv.parse_quaternion64("1+2i+3j+4k hellope", &n)
+ fmt.printfln("%v %i %t", q, n, ok)
+ }
+
+Output:
+
+ 1+2i+3j+4k 10 true
+ 1+2i+3j+4k 10 false
+
+**Returns**
+- value: The parsed 64-bit quaternion.
+- ok: `false` if a quaternion could not be found, or if the input string contained more than just the quaternion.
+*/
+parse_quaternion64 :: proc(str: string, n: ^int = nil) -> (value: quaternion64, ok: bool) {
+ v: quaternion256 = ---
+ v, ok = parse_quaternion256(str, n)
+ return cast(quaternion64)v, ok
+}
/*
Appends a boolean value as a string to the given buffer
diff --git a/core/strings/builder.odin b/core/strings/builder.odin
index 72eb815f9..11885b689 100644
--- a/core/strings/builder.odin
+++ b/core/strings/builder.odin
@@ -350,9 +350,9 @@ Output:
ab
*/
-write_byte :: proc(b: ^Builder, x: byte) -> (n: int) {
+write_byte :: proc(b: ^Builder, x: byte, loc := #caller_location) -> (n: int) {
n0 := len(b.buf)
- append(&b.buf, x)
+ append(&b.buf, x, loc)
n1 := len(b.buf)
return n1-n0
}
@@ -380,9 +380,9 @@ NOTE: The backing dynamic array may be fixed in capacity or fail to resize, `n`
Returns:
- n: The number of bytes appended
*/
-write_bytes :: proc(b: ^Builder, x: []byte) -> (n: int) {
+write_bytes :: proc(b: ^Builder, x: []byte, loc := #caller_location) -> (n: int) {
n0 := len(b.buf)
- append(&b.buf, ..x)
+ append(&b.buf, ..x, loc=loc)
n1 := len(b.buf)
return n1-n0
}
diff --git a/core/sync/futex_darwin.odin b/core/sync/futex_darwin.odin
index 6ea177d1b..fca9aadfe 100644
--- a/core/sync/futex_darwin.odin
+++ b/core/sync/futex_darwin.odin
@@ -52,7 +52,7 @@ _futex_wait_with_timeout :: proc "contextless" (f: ^Futex, expected: u32, durati
}
} else {
- timeout_ns := u32(duration) * 1000
+ timeout_ns := u32(duration)
s := __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, f, u64(expected), timeout_ns)
if s >= 0 {
return true
diff --git a/core/sync/primitives_netbsd.odin b/core/sync/primitives_netbsd.odin
index 042e744e8..594f2ff5c 100644
--- a/core/sync/primitives_netbsd.odin
+++ b/core/sync/primitives_netbsd.odin
@@ -1,8 +1,12 @@
//+private
package sync
-import "core:sys/unix"
+foreign import libc "system:c"
+
+foreign libc {
+ _lwp_self :: proc "c" () -> i32 ---
+}
_current_thread_id :: proc "contextless" () -> int {
- return cast(int) unix.pthread_self()
+ return int(_lwp_self())
}
diff --git a/core/sys/info/platform_darwin.odin b/core/sys/info/platform_darwin.odin
index 122dd42ee..0cae0aa98 100644
--- a/core/sys/info/platform_darwin.odin
+++ b/core/sys/info/platform_darwin.odin
@@ -527,6 +527,7 @@ macos_release_map: map[string]Darwin_To_Release = {
"23D60" = {{23, 3, 0}, "macOS", {"Sonoma", {14, 3, 1}}},
"23E214" = {{23, 4, 0}, "macOS", {"Sonoma", {14, 4, 0}}},
"23E224" = {{23, 4, 0}, "macOS", {"Sonoma", {14, 4, 1}}},
+ "23F79" = {{23, 5, 0}, "macOS", {"Sonoma", {14, 5, 0}}},
}
@(private)
diff --git a/core/sys/linux/sys.odin b/core/sys/linux/sys.odin
index 413c8742b..171829cde 100644
--- a/core/sys/linux/sys.odin
+++ b/core/sys/linux/sys.odin
@@ -487,6 +487,7 @@ connect :: proc "contextless" (sock: Fd, addr: ^$T) -> (Errno)
where
T == Sock_Addr_In ||
T == Sock_Addr_In6 ||
+ T == Sock_Addr_Un ||
T == Sock_Addr_Any
{
ret := syscall(SYS_connect, sock, addr, size_of(T))
@@ -502,6 +503,7 @@ accept :: proc "contextless" (sock: Fd, addr: ^$T, sockflags: Socket_FD_Flags =
where
T == Sock_Addr_In ||
T == Sock_Addr_In6 ||
+ T == Sock_Addr_Un ||
T == Sock_Addr_Any
{
addr_len: i32 = size_of(T)
@@ -514,6 +516,7 @@ recvfrom :: proc "contextless" (sock: Fd, buf: []u8, flags: Socket_Msg, addr: ^$
where
T == Sock_Addr_In ||
T == Sock_Addr_In6 ||
+ T == Sock_Addr_Un ||
T == Sock_Addr_Any
{
addr_len: i32 = size_of(T)
@@ -531,6 +534,7 @@ sendto :: proc "contextless" (sock: Fd, buf: []u8, flags: Socket_Msg, addr: ^$T)
where
T == Sock_Addr_In ||
T == Sock_Addr_In6 ||
+ T == Sock_Addr_Un ||
T == Sock_Addr_Any
{
ret := syscall(SYS_sendto, sock, raw_data(buf), len(buf), transmute(i32) flags, addr, size_of(T))
@@ -590,6 +594,7 @@ bind :: proc "contextless" (sock: Fd, addr: ^$T) -> (Errno)
where
T == Sock_Addr_In ||
T == Sock_Addr_In6 ||
+ T == Sock_Addr_Un ||
T == Sock_Addr_Any
{
ret := syscall(SYS_bind, sock, addr, size_of(T))
diff --git a/core/sys/linux/types.odin b/core/sys/linux/types.odin
index 677bac7e0..5053e1e1c 100644
--- a/core/sys/linux/types.odin
+++ b/core/sys/linux/types.odin
@@ -632,6 +632,14 @@ Sock_Addr_In6 :: struct #packed {
}
/*
+ Struct representing Unix Domain Socket address
+*/
+Sock_Addr_Un :: struct #packed {
+ sun_family: Address_Family,
+ sun_path: [108]u8,
+}
+
+/*
Struct representing an arbitrary socket address.
*/
Sock_Addr_Any :: struct #raw_union {
@@ -641,6 +649,7 @@ Sock_Addr_Any :: struct #raw_union {
},
using ipv4: Sock_Addr_In,
using ipv6: Sock_Addr_In6,
+ using uds: Sock_Addr_Un,
}
/*
diff --git a/core/sys/unix/pthread_freebsd.odin b/core/sys/unix/pthread_freebsd.odin
index 3417d3943..5f4dac289 100644
--- a/core/sys/unix/pthread_freebsd.odin
+++ b/core/sys/unix/pthread_freebsd.odin
@@ -95,7 +95,7 @@ sem_t :: struct {
PTHREAD_CANCEL_ENABLE :: 0
PTHREAD_CANCEL_DISABLE :: 1
PTHREAD_CANCEL_DEFERRED :: 0
-PTHREAD_CANCEL_ASYNCHRONOUS :: 1
+PTHREAD_CANCEL_ASYNCHRONOUS :: 2
foreign import "system:pthread"
@@ -119,4 +119,4 @@ foreign pthread {
pthread_setcancelstate :: proc (state: c.int, old_state: ^c.int) -> c.int ---
pthread_setcanceltype :: proc (type: c.int, old_type: ^c.int) -> c.int ---
pthread_cancel :: proc (thread: pthread_t) -> c.int ---
-} \ No newline at end of file
+}
diff --git a/core/sys/unix/pthread_openbsd.odin b/core/sys/unix/pthread_openbsd.odin
index 7ae82e662..855e7d99c 100644
--- a/core/sys/unix/pthread_openbsd.odin
+++ b/core/sys/unix/pthread_openbsd.odin
@@ -49,7 +49,7 @@ sem_t :: distinct rawptr
PTHREAD_CANCEL_ENABLE :: 0
PTHREAD_CANCEL_DISABLE :: 1
PTHREAD_CANCEL_DEFERRED :: 0
-PTHREAD_CANCEL_ASYNCHRONOUS :: 1
+PTHREAD_CANCEL_ASYNCHRONOUS :: 2
foreign import libc "system:c"
@@ -71,4 +71,4 @@ foreign libc {
pthread_setcancelstate :: proc (state: c.int, old_state: ^c.int) -> c.int ---
pthread_setcanceltype :: proc (type: c.int, old_type: ^c.int) -> c.int ---
pthread_cancel :: proc (thread: pthread_t) -> c.int ---
-} \ No newline at end of file
+}
diff --git a/core/sys/unix/pthread_unix.odin b/core/sys/unix/pthread_unix.odin
index 5760560ee..c876a214a 100644
--- a/core/sys/unix/pthread_unix.odin
+++ b/core/sys/unix/pthread_unix.odin
@@ -116,4 +116,5 @@ foreign pthread {
pthread_mutexattr_setpshared :: proc(attrs: ^pthread_mutexattr_t, value: c.int) -> c.int ---
pthread_mutexattr_getpshared :: proc(attrs: ^pthread_mutexattr_t, result: ^c.int) -> c.int ---
+ pthread_testcancel :: proc () ---
}
diff --git a/core/sys/windows/kernel32.odin b/core/sys/windows/kernel32.odin
index eba275522..3c60cfc43 100644..100755
--- a/core/sys/windows/kernel32.odin
+++ b/core/sys/windows/kernel32.odin
@@ -453,9 +453,9 @@ foreign kernel32 {
// [MS-Docs](https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-setfilecompletionnotificationmodes)
SetFileCompletionNotificationModes :: proc(FileHandle: HANDLE, Flags: u8) -> BOOL ---
// [MS-Docs](https://learn.microsoft.com/en-us/windows/win32/api/ioapiset/nf-ioapiset-createiocompletionport)
- CreateIoCompletionPort :: proc(FileHandle: HANDLE, ExistingCompletionPort: HANDLE, CompletionKey: ^uintptr, NumberOfConcurrentThreads: DWORD) -> HANDLE ---
+ CreateIoCompletionPort :: proc(FileHandle: HANDLE, ExistingCompletionPort: HANDLE, CompletionKey: ULONG_PTR, NumberOfConcurrentThreads: DWORD) -> HANDLE ---
//[MS-Docs](https://learn.microsoft.com/en-us/windows/win32/api/ioapiset/nf-ioapiset-getqueuedcompletionstatus)
- GetQueuedCompletionStatus :: proc(CompletionPort: HANDLE, lpNumberOfBytesTransferred: ^DWORD, lpCompletionKey: uintptr, lpOverlapped: ^^OVERLAPPED, dwMilliseconds: DWORD) -> BOOL ---
+ GetQueuedCompletionStatus :: proc(CompletionPort: HANDLE, lpNumberOfBytesTransferred: ^DWORD, lpCompletionKey: PULONG_PTR, lpOverlapped: ^^OVERLAPPED, dwMilliseconds: DWORD) -> BOOL ---
// [MS-Docs](https://learn.microsoft.com/en-us/windows/win32/api/ioapiset/nf-ioapiset-getqueuedcompletionstatusex)
GetQueuedCompletionStatusEx :: proc(CompletionPort: HANDLE, lpCompletionPortEntries: ^OVERLAPPED_ENTRY, ulCount: c_ulong, ulNumEntriesRemoved: ^c_ulong, dwMilliseconds: DWORD, fAlertable: BOOL) -> BOOL ---
// [MS-Docs](https://learn.microsoft.com/en-us/windows/win32/api/ioapiset/nf-ioapiset-postqueuedcompletionstatus)
@@ -1153,6 +1153,19 @@ foreign kernel32 {
SetCommState :: proc(handle: HANDLE, dcb: ^DCB) -> BOOL ---
}
+COMMTIMEOUTS :: struct {
+ ReadIntervalTimeout: DWORD,
+ ReadTotalTimeoutMultiplier: DWORD,
+ ReadTotalTimeoutConstant: DWORD,
+ WriteTotalTimeoutMultiplier: DWORD,
+ WriteTotalTimeoutConstant: DWORD,
+}
+
+@(default_calling_convention="system")
+foreign kernel32 {
+ GetCommTimeouts :: proc(handle: HANDLE, timeouts: ^COMMTIMEOUTS) -> BOOL ---
+ SetCommTimeouts :: proc(handle: HANDLE, timeouts: ^COMMTIMEOUTS) -> BOOL ---
+}
LPFIBER_START_ROUTINE :: #type proc "system" (lpFiberParameter: LPVOID)
diff --git a/core/testing/events.odin b/core/testing/events.odin
new file mode 100644
index 000000000..bab35aaad
--- /dev/null
+++ b/core/testing/events.odin
@@ -0,0 +1,48 @@
+//+private
+package testing
+
+import "base:runtime"
+import "core:sync/chan"
+import "core:time"
+
+Test_State :: enum {
+ Ready,
+ Running,
+ Successful,
+ Failed,
+}
+
+Update_Channel :: chan.Chan(Channel_Event)
+Update_Channel_Sender :: chan.Chan(Channel_Event, .Send)
+
+Task_Channel :: struct {
+ channel: Update_Channel,
+ test_index: int,
+}
+
+Event_New_Test :: struct {
+ test_index: int,
+}
+
+Event_State_Change :: struct {
+ new_state: Test_State,
+}
+
+Event_Set_Fail_Timeout :: struct {
+ at_time: time.Time,
+ location: runtime.Source_Code_Location,
+}
+
+Event_Log_Message :: struct {
+ level: runtime.Logger_Level,
+ text: string,
+ time: time.Time,
+ formatted_text: string,
+}
+
+Channel_Event :: union {
+ Event_New_Test,
+ Event_State_Change,
+ Event_Set_Fail_Timeout,
+ Event_Log_Message,
+}
diff --git a/core/testing/logging.odin b/core/testing/logging.odin
new file mode 100644
index 000000000..5bbbffeae
--- /dev/null
+++ b/core/testing/logging.odin
@@ -0,0 +1,71 @@
+//+private
+package testing
+
+import "base:runtime"
+import "core:fmt"
+import pkg_log "core:log"
+import "core:strings"
+import "core:sync/chan"
+import "core:time"
+
+Default_Test_Logger_Opts :: runtime.Logger_Options {
+ .Level,
+ .Terminal_Color,
+ .Short_File_Path,
+ .Line,
+ .Procedure,
+ .Date, .Time,
+}
+
+Log_Message :: struct {
+ level: runtime.Logger_Level,
+ text: string,
+ time: time.Time,
+ // `text` may be allocated differently, depending on where a log message
+ // originates from.
+ allocator: runtime.Allocator,
+}
+
+test_logger_proc :: proc(logger_data: rawptr, level: runtime.Logger_Level, text: string, options: runtime.Logger_Options, location := #caller_location) {
+ t := cast(^T)logger_data
+
+ if level >= .Error {
+ t.error_count += 1
+ }
+
+ cloned_text, clone_error := strings.clone(text, t._log_allocator)
+ assert(clone_error == nil, "Error while cloning string in test thread logger proc.")
+
+ now := time.now()
+
+ chan.send(t.channel, Event_Log_Message {
+ level = level,
+ text = cloned_text,
+ time = now,
+ formatted_text = format_log_text(level, text, options, location, now, t._log_allocator),
+ })
+}
+
+runner_logger_proc :: proc(logger_data: rawptr, level: runtime.Logger_Level, text: string, options: runtime.Logger_Options, location := #caller_location) {
+ log_messages := cast(^[dynamic]Log_Message)logger_data
+
+ now := time.now()
+
+ append(log_messages, Log_Message {
+ level = level,
+ text = format_log_text(level, text, options, location, now),
+ time = now,
+ allocator = context.allocator,
+ })
+}
+
+format_log_text :: proc(level: runtime.Logger_Level, text: string, options: runtime.Logger_Options, location: runtime.Source_Code_Location, at_time: time.Time, allocator := context.allocator) -> string{
+ backing: [1024]byte
+ buf := strings.builder_from_bytes(backing[:])
+
+ pkg_log.do_level_header(options, &buf, level)
+ pkg_log.do_time_header(options, &buf, at_time)
+ pkg_log.do_location_header(options, &buf, location)
+
+ return fmt.aprintf("%s%s", strings.to_string(buf), text, allocator = allocator)
+}
diff --git a/core/testing/reporting.odin b/core/testing/reporting.odin
new file mode 100644
index 000000000..92e144ccc
--- /dev/null
+++ b/core/testing/reporting.odin
@@ -0,0 +1,329 @@
+//+private
+package testing
+
+import "base:runtime"
+import "core:encoding/ansi"
+import "core:fmt"
+import "core:io"
+import "core:mem"
+import "core:path/filepath"
+import "core:strings"
+
+// Definitions of colors for use in the test runner.
+SGR_RESET :: ansi.CSI + ansi.RESET + ansi.SGR
+SGR_READY :: ansi.CSI + ansi.FG_BRIGHT_BLACK + ansi.SGR
+SGR_RUNNING :: ansi.CSI + ansi.FG_YELLOW + ansi.SGR
+SGR_SUCCESS :: ansi.CSI + ansi.FG_GREEN + ansi.SGR
+SGR_FAILED :: ansi.CSI + ansi.FG_RED + ansi.SGR
+
+MAX_PROGRESS_WIDTH :: 100
+
+// More than enough bytes to cover long package names, long test names, dozens
+// of ANSI codes, et cetera.
+LINE_BUFFER_SIZE :: (MAX_PROGRESS_WIDTH * 8 + 224) * runtime.Byte
+
+PROGRESS_COLUMN_SPACING :: 2
+
+Package_Run :: struct {
+ name: string,
+ header: string,
+
+ frame_ready: bool,
+
+ redraw_buffer: [LINE_BUFFER_SIZE]byte,
+ redraw_string: string,
+
+ last_change_state: Test_State,
+ last_change_name: string,
+
+ tests: []Internal_Test,
+ test_states: []Test_State,
+}
+
+Report :: struct {
+ packages: []Package_Run,
+ packages_by_name: map[string]^Package_Run,
+
+ pkg_column_len: int,
+ test_column_len: int,
+ progress_width: int,
+
+ all_tests: []Internal_Test,
+ all_test_states: []Test_State,
+}
+
+// Organize all tests by package and sort out test state data.
+make_report :: proc(internal_tests: []Internal_Test) -> (report: Report, error: runtime.Allocator_Error) {
+ assert(len(internal_tests) > 0, "make_report called with no tests")
+
+ packages: [dynamic]Package_Run
+
+ report.all_tests = internal_tests
+ report.all_test_states = make([]Test_State, len(internal_tests)) or_return
+
+ // First, figure out what belongs where.
+ #no_bounds_check cur_pkg := internal_tests[0].pkg
+ pkg_start: int
+
+ // This loop assumes the tests are sorted by package already.
+ for it, index in internal_tests {
+ if cur_pkg != it.pkg {
+ #no_bounds_check {
+ append(&packages, Package_Run {
+ name = cur_pkg,
+ tests = report.all_tests[pkg_start:index],
+ test_states = report.all_test_states[pkg_start:index],
+ }) or_return
+ }
+
+ when PROGRESS_WIDTH == 0 {
+ report.progress_width = max(report.progress_width, index - pkg_start)
+ }
+
+ pkg_start = index
+ report.pkg_column_len = max(report.pkg_column_len, len(cur_pkg))
+ cur_pkg = it.pkg
+ }
+ report.test_column_len = max(report.test_column_len, len(it.name))
+ }
+
+ // Handle the last (or only) package.
+ #no_bounds_check {
+ append(&packages, Package_Run {
+ name = cur_pkg,
+ header = cur_pkg,
+ tests = report.all_tests[pkg_start:],
+ test_states = report.all_test_states[pkg_start:],
+ }) or_return
+ }
+ when PROGRESS_WIDTH == 0 {
+ report.progress_width = max(report.progress_width, len(internal_tests) - pkg_start)
+ } else {
+ report.progress_width = PROGRESS_WIDTH
+ }
+ report.progress_width = min(report.progress_width, MAX_PROGRESS_WIDTH)
+
+ report.pkg_column_len = PROGRESS_COLUMN_SPACING + max(report.pkg_column_len, len(cur_pkg))
+
+ shrink(&packages) or_return
+
+ for &pkg in packages {
+ pkg.header = fmt.aprintf("%- *[1]s[", pkg.name, report.pkg_column_len)
+ assert(len(pkg.header) > 0, "Error allocating package header string.")
+
+ // This is safe because the array is done resizing, and it has the same
+ // lifetime as the map.
+ report.packages_by_name[pkg.name] = &pkg
+ }
+
+ // It's okay to discard the dynamic array's allocator information here,
+ // because its capacity has been shrunk to its length, it was allocated by
+ // the caller's context allocator, and it will be deallocated by the same.
+ //
+ // `delete_slice` is equivalent to `delete_dynamic_array` in this case.
+ report.packages = packages[:]
+
+ return
+}
+
+destroy_report :: proc(report: ^Report) {
+ for pkg in report.packages {
+ delete(pkg.header)
+ }
+
+ delete(report.packages)
+ delete(report.packages_by_name)
+ delete(report.all_test_states)
+}
+
+redraw_package :: proc(w: io.Writer, report: Report, pkg: ^Package_Run) {
+ if pkg.frame_ready {
+ io.write_string(w, pkg.redraw_string)
+ return
+ }
+
+ // Write the output line here so we can cache it.
+ line_builder := strings.builder_from_bytes(pkg.redraw_buffer[:])
+ line_writer := strings.to_writer(&line_builder)
+
+ highest_run_index: int
+ failed_count: int
+ done_count: int
+ #no_bounds_check for i := 0; i < len(pkg.test_states); i += 1 {
+ switch pkg.test_states[i] {
+ case .Ready:
+ continue
+ case .Running:
+ highest_run_index = max(highest_run_index, i)
+ case .Successful:
+ done_count += 1
+ case .Failed:
+ failed_count += 1
+ done_count += 1
+ }
+ }
+
+ start := max(0, highest_run_index - (report.progress_width - 1))
+ end := min(start + report.progress_width, len(pkg.test_states))
+
+ // This variable is to keep track of the last ANSI code emitted, in
+ // order to avoid repeating the same code over in a sequence.
+ //
+ // This should help reduce screen flicker.
+ last_state := Test_State(-1)
+
+ io.write_string(line_writer, pkg.header)
+
+ #no_bounds_check for state in pkg.test_states[start:end] {
+ switch state {
+ case .Ready:
+ if last_state != state {
+ io.write_string(line_writer, SGR_READY)
+ last_state = state
+ }
+ case .Running:
+ if last_state != state {
+ io.write_string(line_writer, SGR_RUNNING)
+ last_state = state
+ }
+ case .Successful:
+ if last_state != state {
+ io.write_string(line_writer, SGR_SUCCESS)
+ last_state = state
+ }
+ case .Failed:
+ if last_state != state {
+ io.write_string(line_writer, SGR_FAILED)
+ last_state = state
+ }
+ }
+ io.write_byte(line_writer, '|')
+ }
+
+ for _ in 0 ..< report.progress_width - (end - start) {
+ io.write_byte(line_writer, ' ')
+ }
+
+ io.write_string(line_writer, SGR_RESET + "] ")
+
+ ticker: string
+ if done_count == len(pkg.test_states) {
+ ticker = "[package done]"
+ if failed_count > 0 {
+ ticker = fmt.tprintf("%s (" + SGR_FAILED + "%i" + SGR_RESET + " failed)", ticker, failed_count)
+ }
+ } else {
+ if len(pkg.last_change_name) == 0 {
+ #no_bounds_check pkg.last_change_name = pkg.tests[0].name
+ }
+
+ switch pkg.last_change_state {
+ case .Ready:
+ ticker = fmt.tprintf(SGR_READY + "%s" + SGR_RESET, pkg.last_change_name)
+ case .Running:
+ ticker = fmt.tprintf(SGR_RUNNING + "%s" + SGR_RESET, pkg.last_change_name)
+ case .Failed:
+ ticker = fmt.tprintf(SGR_FAILED + "%s" + SGR_RESET, pkg.last_change_name)
+ case .Successful:
+ ticker = fmt.tprintf(SGR_SUCCESS + "%s" + SGR_RESET, pkg.last_change_name)
+ }
+ }
+
+ if done_count == len(pkg.test_states) {
+ fmt.wprintfln(line_writer, " % 4i :: %s",
+ len(pkg.test_states),
+ ticker,
+ )
+ } else {
+ fmt.wprintfln(line_writer, "% 4i/% 4i :: %s",
+ done_count,
+ len(pkg.test_states),
+ ticker,
+ )
+ }
+
+ pkg.redraw_string = strings.to_string(line_builder)
+ pkg.frame_ready = true
+ io.write_string(w, pkg.redraw_string)
+}
+
+redraw_report :: proc(w: io.Writer, report: Report) {
+ // If we print a line longer than the user's terminal can handle, it may
+ // wrap around, shifting the progress report out of alignment.
+ //
+ // There are ways to get the current terminal width, and that would be the
+ // ideal way to handle this, but it would require system-specific code such
+ // as setting STDIN to be non-blocking in order to read the response from
+ // the ANSI DSR escape code, or reading environment variables.
+ //
+ // The DECAWM escape codes control whether or not the terminal will wrap
+ // long lines or overwrite the last visible character.
+ // This should be fine for now.
+ //
+ // Note that we only do this for the animated summary; log messages are
+ // still perfectly fine to wrap, as they're printed in their own batch,
+ // whereas the animation depends on each package being only on one line.
+ //
+ // Of course, if you resize your terminal while it's printing, things can
+ // still break...
+ fmt.wprint(w, ansi.CSI + ansi.DECAWM_OFF)
+ for &pkg in report.packages {
+ redraw_package(w, report, &pkg)
+ }
+ fmt.wprint(w, ansi.CSI + ansi.DECAWM_ON)
+}
+
+needs_to_redraw :: proc(report: Report) -> bool {
+ for pkg in report.packages {
+ if !pkg.frame_ready {
+ return true
+ }
+ }
+
+ return false
+}
+
+draw_status_bar :: proc(w: io.Writer, threads_string: string, total_done_count, total_test_count: int) {
+ if total_done_count == total_test_count {
+ // All tests are done; print a blank line to maintain the same height
+ // of the progress report.
+ fmt.wprintln(w)
+ } else {
+ fmt.wprintfln(w,
+ "%s % 4i/% 4i :: total",
+ threads_string,
+ total_done_count,
+ total_test_count)
+ }
+}
+
+write_memory_report :: proc(w: io.Writer, tracker: ^mem.Tracking_Allocator, pkg, name: string) {
+ fmt.wprintf(w,
+ "<% 10M/% 10M> <% 10M> (% 5i/% 5i) :: %s.%s",
+ tracker.current_memory_allocated,
+ tracker.total_memory_allocated,
+ tracker.peak_memory_allocated,
+ tracker.total_free_count,
+ tracker.total_allocation_count,
+ pkg,
+ name)
+
+ for ptr, entry in tracker.allocation_map {
+ fmt.wprintf(w,
+ "\n +++ leak % 10M @ %p [%s:%i:%s()]",
+ entry.size,
+ ptr,
+ filepath.base(entry.location.file_path),
+ entry.location.line,
+ entry.location.procedure)
+ }
+
+ for entry in tracker.bad_free_array {
+ fmt.wprintf(w,
+ "\n +++ bad free @ %p [%s:%i:%s()]",
+ entry.memory,
+ filepath.base(entry.location.file_path),
+ entry.location.line,
+ entry.location.procedure)
+ }
+}
diff --git a/core/testing/runner.odin b/core/testing/runner.odin
index 0039f1939..328186c35 100644
--- a/core/testing/runner.odin
+++ b/core/testing/runner.odin
@@ -1,73 +1,823 @@
//+private
package testing
+import "base:intrinsics"
+import "base:runtime"
+import "core:bytes"
+import "core:encoding/ansi"
+@require import "core:encoding/base64"
+import "core:fmt"
import "core:io"
+@require import pkg_log "core:log"
+import "core:mem"
import "core:os"
import "core:slice"
+@require import "core:strings"
+import "core:sync/chan"
+import "core:thread"
+import "core:time"
-reset_t :: proc(t: ^T) {
- clear(&t.cleanups)
- t.error_count = 0
+// Specify how many threads to use when running tests.
+TEST_THREADS : int : #config(ODIN_TEST_THREADS, 0)
+// Track the memory used by each test.
+TRACKING_MEMORY : bool : #config(ODIN_TEST_TRACK_MEMORY, true)
+// Always report how much memory is used, even when there are no leaks or bad frees.
+ALWAYS_REPORT_MEMORY : bool : #config(ODIN_TEST_ALWAYS_REPORT_MEMORY, false)
+// Specify how much memory each thread allocator starts with.
+PER_THREAD_MEMORY : int : #config(ODIN_TEST_THREAD_MEMORY, mem.ROLLBACK_STACK_DEFAULT_BLOCK_SIZE)
+// Select a specific set of tests to run by name.
+// Each test is separated by a comma and may optionally include the package name.
+// This may be useful when running tests on multiple packages with `-all-packages`.
+// The format is: `package.test_name,test_name_only,...`
+TEST_NAMES : string : #config(ODIN_TEST_NAMES, "")
+// Show the fancy animated progress report.
+FANCY_OUTPUT : bool : #config(ODIN_TEST_FANCY, true)
+// Copy failed tests to the clipboard when done.
+USE_CLIPBOARD : bool : #config(ODIN_TEST_CLIPBOARD, false)
+// How many test results to show at a time per package.
+PROGRESS_WIDTH : int : #config(ODIN_TEST_PROGRESS_WIDTH, 24)
+// This is the random seed that will be sent to each test.
+// If it is unspecified, it will be set to the system cycle counter at startup.
+SHARED_RANDOM_SEED : u64 : #config(ODIN_TEST_RANDOM_SEED, 0)
+// Set the lowest log level for this test run.
+LOG_LEVEL : string : #config(ODIN_TEST_LOG_LEVEL, "info")
+
+
+get_log_level :: #force_inline proc() -> runtime.Logger_Level {
+ when ODIN_DEBUG {
+ // Always use .Debug in `-debug` mode.
+ return .Debug
+ } else {
+ when LOG_LEVEL == "debug" { return .Debug }
+ else when LOG_LEVEL == "info" { return .Info }
+ else when LOG_LEVEL == "warning" { return .Warning }
+ else when LOG_LEVEL == "error" { return .Error }
+ else when LOG_LEVEL == "fatal" { return .Fatal }
+ else {
+ #panic("Unknown `ODIN_TEST_LOG_LEVEL`: \"" + LOG_LEVEL + "\", possible levels are: \"debug\", \"info\", \"warning\", \"error\", or \"fatal\".")
+ }
+ }
}
+
end_t :: proc(t: ^T) {
for i := len(t.cleanups)-1; i >= 0; i -= 1 {
- c := t.cleanups[i]
+ #no_bounds_check c := t.cleanups[i]
+ context = c.ctx
c.procedure(c.user_data)
}
+
+ delete(t.cleanups)
+ t.cleanups = {}
+}
+
+Task_Data :: struct {
+ it: Internal_Test,
+ t: T,
+ allocator_index: int,
+}
+
+Task_Timeout :: struct {
+ test_index: int,
+ at_time: time.Time,
+ location: runtime.Source_Code_Location,
+}
+
+run_test_task :: proc(task: thread.Task) {
+ data := cast(^Task_Data)(task.data)
+
+ setup_task_signal_handler(task.user_index)
+
+ chan.send(data.t.channel, Event_New_Test {
+ test_index = task.user_index,
+ })
+
+ chan.send(data.t.channel, Event_State_Change {
+ new_state = .Running,
+ })
+
+ context.assertion_failure_proc = test_assertion_failure_proc
+
+ context.logger = {
+ procedure = test_logger_proc,
+ data = &data.t,
+ lowest_level = get_log_level(),
+ options = Default_Test_Logger_Opts,
+ }
+
+ free_all(context.temp_allocator)
+
+ data.it.p(&data.t)
+
+ end_t(&data.t)
+
+ new_state : Test_State = .Failed if failed(&data.t) else .Successful
+
+ chan.send(data.t.channel, Event_State_Change {
+ new_state = new_state,
+ })
}
runner :: proc(internal_tests: []Internal_Test) -> bool {
- stream := os.stream_from_handle(os.stdout)
- w := io.to_writer(stream)
+ BATCH_BUFFER_SIZE :: 32 * mem.Kilobyte
+ POOL_BLOCK_SIZE :: 16 * mem.Kilobyte
+ CLIPBOARD_BUFFER_SIZE :: 16 * mem.Kilobyte
+
+ BUFFERED_EVENTS_PER_CHANNEL :: 16
+ RESERVED_LOG_MESSAGES :: 64
+ RESERVED_TEST_FAILURES :: 64
+
+ ERROR_STRING_TIMEOUT : string : "Test timed out."
+ ERROR_STRING_UNKNOWN : string : "Test failed for unknown reasons."
+ OSC_WINDOW_TITLE : string : ansi.OSC + ansi.WINDOW_TITLE + ";Odin test runner (%i/%i)" + ansi.ST
+
+ safe_delete_string :: proc(s: string, allocator := context.allocator) {
+ // Guard against bad frees on static strings.
+ switch raw_data(s) {
+ case raw_data(ERROR_STRING_TIMEOUT), raw_data(ERROR_STRING_UNKNOWN):
+ return
+ case:
+ delete(s, allocator)
+ }
+ }
+
+ stdout := io.to_writer(os.stream_from_handle(os.stdout))
+ stderr := io.to_writer(os.stream_from_handle(os.stderr))
+
+ // -- Prepare test data.
+
+ alloc_error: mem.Allocator_Error
+
+ when TEST_NAMES != "" {
+ select_internal_tests: [dynamic]Internal_Test
+ defer delete(select_internal_tests)
+
+ {
+ index_list := TEST_NAMES
+ for selector in strings.split_iterator(&index_list, ",") {
+ // Temp allocator is fine since we just need to identify which test it's referring to.
+ split_selector := strings.split(selector, ".", context.temp_allocator)
- t := &T{}
- t.w = w
- reserve(&t.cleanups, 1024)
- defer delete(t.cleanups)
+ found := false
+ switch len(split_selector) {
+ case 1:
+ // Only the test name?
+ #no_bounds_check name := split_selector[0]
+ find_test_by_name: for it in internal_tests {
+ if it.name == name {
+ found = true
+ _, alloc_error = append(&select_internal_tests, it)
+ fmt.assertf(alloc_error == nil, "Error appending to select internal tests: %v", alloc_error)
+ break find_test_by_name
+ }
+ }
+ case 2:
+ #no_bounds_check pkg := split_selector[0]
+ #no_bounds_check name := split_selector[1]
+ find_test_by_pkg_and_name: for it in internal_tests {
+ if it.pkg == pkg && it.name == name {
+ found = true
+ _, alloc_error = append(&select_internal_tests, it)
+ fmt.assertf(alloc_error == nil, "Error appending to select internal tests: %v", alloc_error)
+ break find_test_by_pkg_and_name
+ }
+ }
+ }
+ if !found {
+ fmt.wprintfln(stderr, "No test found for the name: %q", selector)
+ }
+ }
+ }
+
+ // Intentional shadow with user-specified tests.
+ internal_tests := select_internal_tests[:]
+ }
+ total_failure_count := 0
total_success_count := 0
- total_test_count := len(internal_tests)
+ total_done_count := 0
+ total_test_count := len(internal_tests)
+
+ when !FANCY_OUTPUT {
+ // This is strictly for updating the window title when the progress
+ // report is disabled. We're otherwise able to depend on the call to
+ // `needs_to_redraw`.
+ last_done_count := -1
+ }
+
+ if total_test_count == 0 {
+ // Exit early.
+ fmt.wprintln(stdout, "No tests to run.")
+ return true
+ }
+
+ for it in internal_tests {
+ // NOTE(Feoramund): The old test runner skipped over tests with nil
+ // procedures, but I couldn't find any case where they occurred.
+ // This assert stands to prevent any oversight on my part.
+ fmt.assertf(it.p != nil, "Test %s.%s has <nil> procedure.", it.pkg, it.name)
+ }
slice.sort_by(internal_tests, proc(a, b: Internal_Test) -> bool {
- if a.pkg < b.pkg {
- return true
+ if a.pkg == b.pkg {
+ return a.name < b.name
+ } else {
+ return a.pkg < b.pkg
}
- return a.name < b.name
})
- prev_pkg := ""
+ // -- Set thread count.
- for it in internal_tests {
- if it.p == nil {
- total_test_count -= 1
- continue
+ when TEST_THREADS == 0 {
+ thread_count := os.processor_core_count()
+ } else {
+ thread_count := max(1, TEST_THREADS)
+ }
+
+ thread_count = min(thread_count, total_test_count)
+
+ // -- Allocate.
+
+ pool_stack: mem.Rollback_Stack
+ alloc_error = mem.rollback_stack_init(&pool_stack, POOL_BLOCK_SIZE)
+ fmt.assertf(alloc_error == nil, "Error allocating memory for thread pool: %v", alloc_error)
+ defer mem.rollback_stack_destroy(&pool_stack)
+
+ pool: thread.Pool
+ thread.pool_init(&pool, mem.rollback_stack_allocator(&pool_stack), thread_count)
+ defer thread.pool_destroy(&pool)
+
+ task_channels: []Task_Channel = ---
+ task_channels, alloc_error = make([]Task_Channel, thread_count)
+ fmt.assertf(alloc_error == nil, "Error allocating memory for update channels: %v", alloc_error)
+ defer delete(task_channels)
+
+ for &task_channel, index in task_channels {
+ task_channel.channel, alloc_error = chan.create_buffered(Update_Channel, BUFFERED_EVENTS_PER_CHANNEL, context.allocator)
+ fmt.assertf(alloc_error == nil, "Error allocating memory for update channel #%i: %v", index, alloc_error)
+ }
+ defer for &task_channel in task_channels {
+ chan.destroy(&task_channel.channel)
+ }
+
+ // This buffer is used to batch writes to STDOUT or STDERR, to help reduce
+ // screen flickering.
+ batch_buffer: bytes.Buffer
+ bytes.buffer_init_allocator(&batch_buffer, 0, BATCH_BUFFER_SIZE)
+ batch_writer := io.to_writer(bytes.buffer_to_stream(&batch_buffer))
+ defer bytes.buffer_destroy(&batch_buffer)
+
+ report: Report = ---
+ report, alloc_error = make_report(internal_tests)
+ fmt.assertf(alloc_error == nil, "Error allocating memory for test report: %v", alloc_error)
+ defer destroy_report(&report)
+
+ when FANCY_OUTPUT {
+ // We cannot make use of the ANSI save/restore cursor codes, because they
+ // work by absolute screen coordinates. This will cause unnecessary
+ // scrollback if we print at the bottom of someone's terminal.
+ ansi_redraw_string := fmt.aprintf(
+ // ANSI for "go up N lines then erase the screen from the cursor forward."
+ ansi.CSI + "%i" + ansi.CPL + ansi.CSI + ansi.ED +
+ // We'll combine this with the window title format string, since it
+ // can be printed at the same time.
+ "%s",
+ // 1 extra line for the status bar.
+ 1 + len(report.packages), OSC_WINDOW_TITLE)
+ assert(len(ansi_redraw_string) > 0, "Error allocating ANSI redraw string.")
+ defer delete(ansi_redraw_string)
+
+ thread_count_status_string: string = ---
+ {
+ PADDING :: PROGRESS_COLUMN_SPACING + PROGRESS_WIDTH
+
+ unpadded := fmt.tprintf("%i thread%s", thread_count, "" if thread_count == 1 else "s")
+ thread_count_status_string = fmt.aprintf("%- *[1]s", unpadded, report.pkg_column_len + PADDING)
+ assert(len(thread_count_status_string) > 0, "Error allocating thread count status string.")
}
+ defer delete(thread_count_status_string)
+ }
+
+ task_data_slots: []Task_Data = ---
+ task_data_slots, alloc_error = make([]Task_Data, thread_count)
+ fmt.assertf(alloc_error == nil, "Error allocating memory for task data slots: %v", alloc_error)
+ defer delete(task_data_slots)
- free_all(context.temp_allocator)
- reset_t(t)
- defer end_t(t)
+ // Tests rotate through these allocators as they finish.
+ task_allocators: []mem.Rollback_Stack = ---
+ task_allocators, alloc_error = make([]mem.Rollback_Stack, thread_count)
+ fmt.assertf(alloc_error == nil, "Error allocating memory for task allocators: %v", alloc_error)
+ defer delete(task_allocators)
- if prev_pkg != it.pkg {
- prev_pkg = it.pkg
- logf(t, "[Package: %s]", it.pkg)
+ when TRACKING_MEMORY {
+ task_memory_trackers: []mem.Tracking_Allocator = ---
+ task_memory_trackers, alloc_error = make([]mem.Tracking_Allocator, thread_count)
+ fmt.assertf(alloc_error == nil, "Error allocating memory for memory trackers: %v", alloc_error)
+ defer delete(task_memory_trackers)
+ }
+
+ #no_bounds_check for i in 0 ..< thread_count {
+ alloc_error = mem.rollback_stack_init(&task_allocators[i], PER_THREAD_MEMORY)
+ fmt.assertf(alloc_error == nil, "Error allocating memory for task allocator #%i: %v", i, alloc_error)
+ when TRACKING_MEMORY {
+ mem.tracking_allocator_init(&task_memory_trackers[i], mem.rollback_stack_allocator(&task_allocators[i]))
}
+ }
- logf(t, "[Test: %s]", it.name)
+ defer #no_bounds_check for i in 0 ..< thread_count {
+ when TRACKING_MEMORY {
+ mem.tracking_allocator_destroy(&task_memory_trackers[i])
+ }
+ mem.rollback_stack_destroy(&task_allocators[i])
+ }
- run_internal_test(t, it)
+ task_timeouts: [dynamic]Task_Timeout = ---
+ task_timeouts, alloc_error = make([dynamic]Task_Timeout, 0, thread_count)
+ fmt.assertf(alloc_error == nil, "Error allocating memory for task timeouts: %v", alloc_error)
+ defer delete(task_timeouts)
- if failed(t) {
- logf(t, "[%s : FAILURE]", it.name)
- } else {
- logf(t, "[%s : SUCCESS]", it.name)
- total_success_count += 1
+ failed_test_reason_map: map[int]string = ---
+ failed_test_reason_map, alloc_error = make(map[int]string, RESERVED_TEST_FAILURES)
+ fmt.assertf(alloc_error == nil, "Error allocating memory for failed test reasons: %v", alloc_error)
+ defer delete(failed_test_reason_map)
+
+ log_messages: [dynamic]Log_Message = ---
+ log_messages, alloc_error = make([dynamic]Log_Message, 0, RESERVED_LOG_MESSAGES)
+ fmt.assertf(alloc_error == nil, "Error allocating memory for log message queue: %v", alloc_error)
+ defer delete(log_messages)
+
+ sorted_failed_test_reasons: [dynamic]int = ---
+ sorted_failed_test_reasons, alloc_error = make([dynamic]int, 0, RESERVED_TEST_FAILURES)
+ fmt.assertf(alloc_error == nil, "Error allocating memory for sorted failed test reasons: %v", alloc_error)
+ defer delete(sorted_failed_test_reasons)
+
+ when USE_CLIPBOARD {
+ clipboard_buffer: bytes.Buffer
+ bytes.buffer_init_allocator(&clipboard_buffer, 0, CLIPBOARD_BUFFER_SIZE)
+ defer bytes.buffer_destroy(&clipboard_buffer)
+ }
+
+ when SHARED_RANDOM_SEED == 0 {
+ shared_random_seed := cast(u64)intrinsics.read_cycle_counter()
+ } else {
+ shared_random_seed := SHARED_RANDOM_SEED
+ }
+
+ // -- Setup initial tasks.
+
+ // NOTE(Feoramund): This is the allocator that will be used by threads to
+ // persist log messages past their lifetimes. It has its own variable name
+ // in the event it needs to be changed from `context.allocator` without
+ // digging through the source to divine everywhere it is used for that.
+ shared_log_allocator := context.allocator
+
+ context.logger = {
+ procedure = runner_logger_proc,
+ data = &log_messages,
+ lowest_level = get_log_level(),
+ options = Default_Test_Logger_Opts - {.Short_File_Path, .Line, .Procedure},
+ }
+
+ run_index: int
+
+ setup_tasks: for &data, task_index in task_data_slots {
+ setup_next_test: for run_index < total_test_count {
+ #no_bounds_check it := internal_tests[run_index]
+ defer run_index += 1
+
+ data.it = it
+ data.t.seed = shared_random_seed
+ #no_bounds_check data.t.channel = chan.as_send(task_channels[task_index].channel)
+ data.t._log_allocator = shared_log_allocator
+ data.allocator_index = task_index
+
+ #no_bounds_check when TRACKING_MEMORY {
+ task_allocator := mem.tracking_allocator(&task_memory_trackers[task_index])
+ } else {
+ task_allocator := mem.rollback_stack_allocator(&task_allocators[task_index])
+ }
+
+ thread.pool_add_task(&pool, task_allocator, run_test_task, &data, run_index)
+
+ continue setup_tasks
}
}
- logf(t, "----------------------------------------")
- if total_test_count == 0 {
- log(t, "NO TESTS RAN")
+
+ // -- Run tests.
+
+ setup_signal_handler()
+
+ fmt.wprint(stdout, ansi.CSI + ansi.DECTCEM_HIDE)
+
+ when FANCY_OUTPUT {
+ signals_were_raised := false
+
+ redraw_report(stdout, report)
+ draw_status_bar(stdout, thread_count_status_string, total_done_count, total_test_count)
+ }
+
+ when TEST_THREADS == 0 {
+ pkg_log.infof("Starting test runner with %i thread%s. Set with -define:ODIN_TEST_THREADS=n.",
+ thread_count,
+ "" if thread_count == 1 else "s")
+ } else {
+ pkg_log.infof("Starting test runner with %i thread%s.",
+ thread_count,
+ "" if thread_count == 1 else "s")
+ }
+
+ when SHARED_RANDOM_SEED == 0 {
+ pkg_log.infof("The random seed sent to every test is: %v. Set with -define:ODIN_TEST_RANDOM_SEED=n.", shared_random_seed)
} else {
- logf(t, "%d/%d SUCCESSFUL", total_success_count, total_test_count)
+ pkg_log.infof("The random seed sent to every test is: %v.", shared_random_seed)
+ }
+
+ when TRACKING_MEMORY {
+ when ALWAYS_REPORT_MEMORY {
+ pkg_log.info("Memory tracking is enabled. Tests will log their memory usage when complete.")
+ } else {
+ pkg_log.info("Memory tracking is enabled. Tests will log their memory usage if there's an issue.")
+ }
+ pkg_log.info("< Final Mem/ Total Mem> < Peak Mem> (#Free/Alloc) :: [package.test_name]")
+ } else when ALWAYS_REPORT_MEMORY {
+ pkg_log.warn("ODIN_TEST_ALWAYS_REPORT_MEMORY is true, but ODIN_TRACK_MEMORY is false.")
+ }
+
+ start_time := time.now()
+
+ thread.pool_start(&pool)
+ main_loop: for !thread.pool_is_empty(&pool) {
+ {
+ events_pending := thread.pool_num_done(&pool) > 0
+
+ if !events_pending {
+ poll_tasks: for &task_channel in task_channels {
+ if chan.len(task_channel.channel) > 0 {
+ events_pending = true
+ break poll_tasks
+ }
+ }
+ }
+
+ if !events_pending {
+ // Keep the main thread from pegging a core at 100% usage.
+ time.sleep(1 * time.Microsecond)
+ }
+ }
+
+ cycle_pool: for task in thread.pool_pop_done(&pool) {
+ data := cast(^Task_Data)(task.data)
+
+ when TRACKING_MEMORY {
+ #no_bounds_check tracker := &task_memory_trackers[data.allocator_index]
+
+ memory_is_in_bad_state := len(tracker.allocation_map) + len(tracker.bad_free_array) > 0
+
+ when ALWAYS_REPORT_MEMORY {
+ should_report := true
+ } else {
+ should_report := memory_is_in_bad_state
+ }
+
+ if should_report {
+ write_memory_report(batch_writer, tracker, data.it.pkg, data.it.name)
+
+ pkg_log.log(.Warning if memory_is_in_bad_state else .Info, bytes.buffer_to_string(&batch_buffer))
+ bytes.buffer_reset(&batch_buffer)
+ }
+
+ mem.tracking_allocator_reset(tracker)
+ }
+
+ free_all(task.allocator)
+
+ if run_index < total_test_count {
+ #no_bounds_check it := internal_tests[run_index]
+ defer run_index += 1
+
+ data.it = it
+ data.t.seed = shared_random_seed
+ data.t.error_count = 0
+
+ thread.pool_add_task(&pool, task.allocator, run_test_task, data, run_index)
+ }
+ }
+
+ handle_events: for &task_channel in task_channels {
+ for ev in chan.try_recv(task_channel.channel) {
+ switch event in ev {
+ case Event_New_Test:
+ task_channel.test_index = event.test_index
+
+ case Event_State_Change:
+ #no_bounds_check report.all_test_states[task_channel.test_index] = event.new_state
+
+ #no_bounds_check it := internal_tests[task_channel.test_index]
+ #no_bounds_check pkg := report.packages_by_name[it.pkg]
+
+ #partial switch event.new_state {
+ case .Failed:
+ if task_channel.test_index not_in failed_test_reason_map {
+ failed_test_reason_map[task_channel.test_index] = ERROR_STRING_UNKNOWN
+ }
+ total_failure_count += 1
+ total_done_count += 1
+ case .Successful:
+ total_success_count += 1
+ total_done_count += 1
+ }
+
+ when ODIN_DEBUG {
+ pkg_log.debugf("Test #%i %s.%s changed state to %v.", task_channel.test_index, it.pkg, it.name, event.new_state)
+ }
+
+ pkg.last_change_state = event.new_state
+ pkg.last_change_name = it.name
+ pkg.frame_ready = false
+
+ case Event_Set_Fail_Timeout:
+ _, alloc_error = append(&task_timeouts, Task_Timeout {
+ test_index = task_channel.test_index,
+ at_time = event.at_time,
+ location = event.location,
+ })
+ fmt.assertf(alloc_error == nil, "Error appending to task timeouts: %v", alloc_error)
+
+ case Event_Log_Message:
+ _, alloc_error = append(&log_messages, Log_Message {
+ level = event.level,
+ text = event.formatted_text,
+ time = event.time,
+ allocator = shared_log_allocator,
+ })
+ fmt.assertf(alloc_error == nil, "Error appending to log messages: %v", alloc_error)
+
+ if event.level >= .Error {
+ // Save the message for the final summary.
+ if old_error, ok := failed_test_reason_map[task_channel.test_index]; ok {
+ safe_delete_string(old_error, shared_log_allocator)
+ }
+ failed_test_reason_map[task_channel.test_index] = event.text
+ } else {
+ delete(event.text, shared_log_allocator)
+ }
+ }
+ }
+ }
+
+ check_timeouts: for i := len(task_timeouts) - 1; i >= 0; i -= 1 {
+ #no_bounds_check timeout := &task_timeouts[i]
+
+ if time.since(timeout.at_time) < 0 {
+ continue check_timeouts
+ }
+
+ defer unordered_remove(&task_timeouts, i)
+
+ #no_bounds_check if report.all_test_states[timeout.test_index] > .Running {
+ continue check_timeouts
+ }
+
+ if !thread.pool_stop_task(&pool, timeout.test_index) {
+ // The task may have stopped a split second after we started
+ // checking, but we haven't handled the new state yet.
+ continue check_timeouts
+ }
+
+ #no_bounds_check report.all_test_states[timeout.test_index] = .Failed
+ #no_bounds_check it := internal_tests[timeout.test_index]
+ #no_bounds_check pkg := report.packages_by_name[it.pkg]
+ pkg.frame_ready = false
+
+ if old_error, ok := failed_test_reason_map[timeout.test_index]; ok {
+ safe_delete_string(old_error, shared_log_allocator)
+ }
+ failed_test_reason_map[timeout.test_index] = ERROR_STRING_TIMEOUT
+ total_failure_count += 1
+ total_done_count += 1
+
+ now := time.now()
+ _, alloc_error = append(&log_messages, Log_Message {
+ level = .Error,
+ text = format_log_text(.Error, ERROR_STRING_TIMEOUT, Default_Test_Logger_Opts, timeout.location, now),
+ time = now,
+ allocator = context.allocator,
+ })
+ fmt.assertf(alloc_error == nil, "Error appending to log messages: %v", alloc_error)
+
+ find_task_data: for &data in task_data_slots {
+ if data.it.pkg == it.pkg && data.it.name == it.name {
+ end_t(&data.t)
+ break find_task_data
+ }
+ }
+ }
+
+ if should_stop_runner() {
+ fmt.wprintln(stderr, "\nCaught interrupt signal. Stopping all tests.")
+ thread.pool_shutdown(&pool)
+ break main_loop
+ }
+
+ when FANCY_OUTPUT {
+ // Because the bounds checking procs send directly to STDERR with
+ // no way to redirect or handle them, we need to at least try to
+ // let the user see those messages when using the animated progress
+ // report. This flag may be set by the block of code below if a
+ // signal is raised.
+ //
+ // It'll be purely by luck if the output is interleaved properly,
+ // given the nature of non-thread-safe printing.
+ //
+ // At worst, if Odin did not print any error for this signal, we'll
+ // just re-display the progress report. The fatal log error message
+ // should be enough to clue the user in that something dire has
+ // occurred.
+ bypass_progress_overwrite := false
+ }
+
+ if test_index, reason, ok := should_stop_test(); ok {
+ #no_bounds_check report.all_test_states[test_index] = .Failed
+ #no_bounds_check it := internal_tests[test_index]
+ #no_bounds_check pkg := report.packages_by_name[it.pkg]
+ pkg.frame_ready = false
+
+ fmt.assertf(thread.pool_stop_task(&pool, test_index),
+ "A signal (%v) was raised to stop test #%i %s.%s, but it was unable to be found.",
+ reason, test_index, it.pkg, it.name)
+
+ if test_index not_in failed_test_reason_map {
+ // We only write a new error message here if there wasn't one
+ // already, because the message we can provide based only on
+ // the signal won't be very useful, whereas asserts and panics
+ // will provide a user-written error message.
+ failed_test_reason_map[test_index] = fmt.aprintf("Signal caught: %v", reason, allocator = shared_log_allocator)
+ pkg_log.fatalf("Caught signal to stop test #%i %s.%s for: %v.", test_index, it.pkg, it.name, reason)
+
+ }
+
+ when FANCY_OUTPUT {
+ bypass_progress_overwrite = true
+ signals_were_raised = true
+ }
+
+ total_failure_count += 1
+ total_done_count += 1
+ }
+
+ // -- Redraw.
+
+ when FANCY_OUTPUT {
+ if len(log_messages) == 0 && !needs_to_redraw(report) {
+ continue main_loop
+ }
+
+ if !bypass_progress_overwrite {
+ fmt.wprintf(stdout, ansi_redraw_string, total_done_count, total_test_count)
+ }
+ } else {
+ if total_done_count != last_done_count {
+ fmt.wprintf(stdout, OSC_WINDOW_TITLE, total_done_count, total_test_count)
+ last_done_count = total_done_count
+ }
+
+ if len(log_messages) == 0 {
+ continue main_loop
+ }
+ }
+
+ // Because each thread has its own messenger channel, log messages
+ // arrive in chunks that are in-order, but when they're merged with the
+ // logs from other threads, they become out-of-order.
+ slice.stable_sort_by(log_messages[:], proc(a, b: Log_Message) -> bool {
+ return time.diff(a.time, b.time) > 0
+ })
+
+ for message in log_messages {
+ fmt.wprintln(batch_writer, message.text)
+ delete(message.text, message.allocator)
+ }
+
+ fmt.wprint(stderr, bytes.buffer_to_string(&batch_buffer))
+ clear(&log_messages)
+ bytes.buffer_reset(&batch_buffer)
+
+ when FANCY_OUTPUT {
+ redraw_report(batch_writer, report)
+ draw_status_bar(batch_writer, thread_count_status_string, total_done_count, total_test_count)
+ fmt.wprint(stdout, bytes.buffer_to_string(&batch_buffer))
+ bytes.buffer_reset(&batch_buffer)
+ }
+ }
+
+ // -- All tests are complete, or the runner has been interrupted.
+
+ // NOTE(Feoramund): If you've arrived here after receiving signal 11 or
+ // SIGSEGV on the main runner thread, while using a UNIX-like platform,
+ // there is the possibility that you may have encountered a rare edge case
+ // involving the joining of threads.
+ //
+ // At the time of writing, the thread library is undergoing a rewrite that
+ // should solve this problem; it is not an issue with the test runner itself.
+ thread.pool_join(&pool)
+
+ finished_in := time.since(start_time)
+
+ when !FANCY_OUTPUT {
+ // One line to space out the results, since we don't have the status
+ // bar in plain mode.
+ fmt.wprintln(batch_writer)
+ }
+
+ fmt.wprintf(batch_writer,
+ "Finished %i test%s in %v.",
+ total_done_count,
+ "" if total_done_count == 1 else "s",
+ finished_in)
+
+ if total_done_count != total_test_count {
+ not_run_count := total_test_count - total_done_count
+ fmt.wprintf(batch_writer,
+ " " + SGR_READY + "%i" + SGR_RESET + " %s left undone.",
+ not_run_count,
+ "test was" if not_run_count == 1 else "tests were")
+ }
+
+ if total_success_count == total_test_count {
+ fmt.wprintfln(batch_writer,
+ " %s " + SGR_SUCCESS + "successful." + SGR_RESET,
+ "The test was" if total_test_count == 1 else "All tests were")
+ } else if total_failure_count > 0 {
+ if total_failure_count == total_test_count {
+ fmt.wprintfln(batch_writer,
+ " %s " + SGR_FAILED + "failed." + SGR_RESET,
+ "The test" if total_test_count == 1 else "All tests")
+ } else {
+ fmt.wprintfln(batch_writer,
+ " " + SGR_FAILED + "%i" + SGR_RESET + " test%s failed.",
+ total_failure_count,
+ "" if total_failure_count == 1 else "s")
+ }
+
+ for test_index in failed_test_reason_map {
+ _, alloc_error = append(&sorted_failed_test_reasons, test_index)
+ fmt.assertf(alloc_error == nil, "Error appending to sorted failed test reasons: %v", alloc_error)
+ }
+
+ slice.sort(sorted_failed_test_reasons[:])
+
+ for test_index in sorted_failed_test_reasons {
+ #no_bounds_check last_error := failed_test_reason_map[test_index]
+ #no_bounds_check it := internal_tests[test_index]
+ pkg_and_name := fmt.tprintf("%s.%s", it.pkg, it.name)
+ fmt.wprintfln(batch_writer, " - %- *[1]s\t%s",
+ pkg_and_name,
+ report.pkg_column_len + report.test_column_len,
+ last_error)
+ safe_delete_string(last_error, shared_log_allocator)
+ }
+
+ if total_success_count > 0 {
+ when USE_CLIPBOARD {
+ clipboard_writer := io.to_writer(bytes.buffer_to_stream(&clipboard_buffer))
+ fmt.wprint(clipboard_writer, "-define:ODIN_TEST_NAMES=")
+ for test_index in sorted_failed_test_reasons {
+ #no_bounds_check it := internal_tests[test_index]
+ fmt.wprintf(clipboard_writer, "%s.%s,", it.pkg, it.name)
+ }
+
+ encoded_names := base64.encode(bytes.buffer_to_bytes(&clipboard_buffer), allocator = context.temp_allocator)
+
+ fmt.wprintf(batch_writer,
+ ansi.OSC + ansi.CLIPBOARD + ";c;%s" + ansi.ST +
+ "\nThe name%s of the failed test%s been copied to your clipboard.",
+ encoded_names,
+ "" if total_failure_count == 1 else "s",
+ " has" if total_failure_count == 1 else "s have")
+ } else {
+ fmt.wprintf(batch_writer, "\nTo run only the failed test%s, use:\n\t-define:ODIN_TEST_NAMES=",
+ "" if total_failure_count == 1 else "s")
+ for test_index in sorted_failed_test_reasons {
+ #no_bounds_check it := internal_tests[test_index]
+ fmt.wprintf(batch_writer, "%s.%s,", it.pkg, it.name)
+ }
+ fmt.wprint(batch_writer, "\n\nIf your terminal supports OSC 52, you may use -define:ODIN_TEST_CLIPBOARD to have this copied directly to your clipboard.")
+ }
+
+ fmt.wprintln(batch_writer)
+ }
}
+
+ fmt.wprint(stdout, ansi.CSI + ansi.DECTCEM_SHOW)
+
+ when FANCY_OUTPUT {
+ if signals_were_raised {
+ fmt.wprintln(batch_writer, `
+Signals were raised during this test run. Log messages are likely to have collided with each other.
+To partly mitigate this, redirect STDERR to a file or use the -define:ODIN_TEST_FANCY=false option.`)
+ }
+ }
+
+ fmt.wprintln(stderr, bytes.buffer_to_string(&batch_buffer))
+
return total_success_count == total_test_count
}
diff --git a/core/testing/runner_other.odin b/core/testing/runner_other.odin
deleted file mode 100644
index f3271d209..000000000
--- a/core/testing/runner_other.odin
+++ /dev/null
@@ -1,14 +0,0 @@
-//+private
-//+build !windows
-package testing
-
-import "core:time"
-
-run_internal_test :: proc(t: ^T, it: Internal_Test) {
- // TODO(bill): Catch panics on other platforms
- it.p(t)
-}
-
-_fail_timeout :: proc(t: ^T, duration: time.Duration, loc := #caller_location) {
-
-} \ No newline at end of file
diff --git a/core/testing/runner_windows.odin b/core/testing/runner_windows.odin
deleted file mode 100644
index 15264355b..000000000
--- a/core/testing/runner_windows.odin
+++ /dev/null
@@ -1,235 +0,0 @@
-//+private
-//+build windows
-package testing
-
-import win32 "core:sys/windows"
-import "base:runtime"
-import "base:intrinsics"
-import "core:time"
-
-Sema :: struct {
- count: i32,
-}
-
-sema_reset :: proc "contextless" (s: ^Sema) {
- intrinsics.atomic_store(&s.count, 0)
-}
-sema_wait :: proc "contextless" (s: ^Sema) {
- for {
- original_count := s.count
- for original_count == 0 {
- win32.WaitOnAddress(&s.count, &original_count, size_of(original_count), win32.INFINITE)
- original_count = s.count
- }
- if original_count == intrinsics.atomic_compare_exchange_strong(&s.count, original_count-1, original_count) {
- return
- }
- }
-}
-sema_wait_with_timeout :: proc "contextless" (s: ^Sema, duration: time.Duration) -> bool {
- if duration <= 0 {
- return false
- }
- for {
-
- original_count := intrinsics.atomic_load(&s.count)
- for start := time.tick_now(); original_count == 0; /**/ {
- if intrinsics.atomic_load(&s.count) != original_count {
- remaining := duration - time.tick_since(start)
- if remaining < 0 {
- return false
- }
- ms := u32(remaining/time.Millisecond)
- if !win32.WaitOnAddress(&s.count, &original_count, size_of(original_count), ms) {
- return false
- }
- }
- original_count = s.count
- }
- if original_count == intrinsics.atomic_compare_exchange_strong(&s.count, original_count-1, original_count) {
- return true
- }
- }
-}
-
-sema_post :: proc "contextless" (s: ^Sema, count := 1) {
- intrinsics.atomic_add(&s.count, i32(count))
- if count == 1 {
- win32.WakeByAddressSingle(&s.count)
- } else {
- win32.WakeByAddressAll(&s.count)
- }
-}
-
-
-
-Thread_Proc :: #type proc(^Thread)
-
-MAX_USER_ARGUMENTS :: 8
-
-Thread :: struct {
- using specific: Thread_Os_Specific,
- procedure: Thread_Proc,
-
- t: ^T,
- it: Internal_Test,
- success: bool,
-
- init_context: Maybe(runtime.Context),
-
- creation_allocator: runtime.Allocator,
-
- internal_fail_timeout: time.Duration,
- internal_fail_timeout_loc: runtime.Source_Code_Location,
-}
-
-Thread_Os_Specific :: struct {
- win32_thread: win32.HANDLE,
- win32_thread_id: win32.DWORD,
- done: bool, // see note in `is_done`
-}
-
-thread_create :: proc(procedure: Thread_Proc) -> ^Thread {
- __windows_thread_entry_proc :: proc "system" (t_: rawptr) -> win32.DWORD {
- t := (^Thread)(t_)
- context = t.init_context.? or_else runtime.default_context()
-
- t.procedure(t)
-
- if t.init_context == nil {
- if context.temp_allocator.data == &runtime.global_default_temp_allocator_data {
- runtime.default_temp_allocator_destroy(auto_cast context.temp_allocator.data)
- }
- }
-
- intrinsics.atomic_store(&t.done, true)
- return 0
- }
-
-
- thread := new(Thread)
- if thread == nil {
- return nil
- }
- thread.creation_allocator = context.allocator
-
- win32_thread_id: win32.DWORD
- win32_thread := win32.CreateThread(nil, 0, __windows_thread_entry_proc, thread, win32.CREATE_SUSPENDED, &win32_thread_id)
- if win32_thread == nil {
- free(thread, thread.creation_allocator)
- return nil
- }
- thread.procedure = procedure
- thread.win32_thread = win32_thread
- thread.win32_thread_id = win32_thread_id
- thread.init_context = context
-
- return thread
-}
-
-thread_start :: proc "contextless" (thread: ^Thread) {
- win32.ResumeThread(thread.win32_thread)
-}
-
-thread_join_and_destroy :: proc(thread: ^Thread) {
- if thread.win32_thread != win32.INVALID_HANDLE {
- win32.WaitForSingleObject(thread.win32_thread, win32.INFINITE)
- win32.CloseHandle(thread.win32_thread)
- thread.win32_thread = win32.INVALID_HANDLE
- }
- free(thread, thread.creation_allocator)
-}
-
-thread_terminate :: proc "contextless" (thread: ^Thread, exit_code: int) {
- win32.TerminateThread(thread.win32_thread, u32(exit_code))
-}
-
-
-_fail_timeout :: proc(t: ^T, duration: time.Duration, loc := #caller_location) {
- assert(global_fail_timeout_thread == nil, "set_fail_timeout previously called", loc)
-
- thread := thread_create(proc(thread: ^Thread) {
- t := thread.t
- timeout := thread.internal_fail_timeout
- if !sema_wait_with_timeout(&global_fail_timeout_semaphore, timeout) {
- fail_now(t, "TIMEOUT", thread.internal_fail_timeout_loc)
- }
- })
- thread.internal_fail_timeout = duration
- thread.internal_fail_timeout_loc = loc
- thread.t = t
- global_fail_timeout_thread = thread
- thread_start(thread)
-}
-
-global_fail_timeout_thread: ^Thread
-global_fail_timeout_semaphore: Sema
-
-global_threaded_runner_semaphore: Sema
-global_exception_handler: rawptr
-global_current_thread: ^Thread
-global_current_t: ^T
-
-run_internal_test :: proc(t: ^T, it: Internal_Test) {
- thread := thread_create(proc(thread: ^Thread) {
- exception_handler_proc :: proc "system" (ExceptionInfo: ^win32.EXCEPTION_POINTERS) -> win32.LONG {
- switch ExceptionInfo.ExceptionRecord.ExceptionCode {
- case
- win32.EXCEPTION_DATATYPE_MISALIGNMENT,
- win32.EXCEPTION_BREAKPOINT,
- win32.EXCEPTION_ACCESS_VIOLATION,
- win32.EXCEPTION_ILLEGAL_INSTRUCTION,
- win32.EXCEPTION_ARRAY_BOUNDS_EXCEEDED,
- win32.EXCEPTION_STACK_OVERFLOW:
-
- sema_post(&global_threaded_runner_semaphore)
- return win32.EXCEPTION_EXECUTE_HANDLER
- }
-
- return win32.EXCEPTION_CONTINUE_SEARCH
- }
- global_exception_handler = win32.AddVectoredExceptionHandler(0, exception_handler_proc)
-
- context.assertion_failure_proc = proc(prefix, message: string, loc: runtime.Source_Code_Location) -> ! {
- errorf(global_current_t, "%s %s", prefix, message, loc=loc)
- intrinsics.trap()
- }
-
- t := thread.t
-
- global_fail_timeout_thread = nil
- sema_reset(&global_fail_timeout_semaphore)
-
- thread.it.p(t)
-
- sema_post(&global_fail_timeout_semaphore)
- if global_fail_timeout_thread != nil do thread_join_and_destroy(global_fail_timeout_thread)
-
- thread.success = true
- sema_post(&global_threaded_runner_semaphore)
- })
-
- sema_reset(&global_threaded_runner_semaphore)
- global_current_t = t
-
- t._fail_now = proc() -> ! {
- intrinsics.trap()
- }
-
- thread.t = t
- thread.it = it
- thread.success = false
- thread_start(thread)
-
- sema_wait(&global_threaded_runner_semaphore)
- thread_terminate(thread, int(!thread.success))
- thread_join_and_destroy(thread)
-
- win32.RemoveVectoredExceptionHandler(global_exception_handler)
-
- if !thread.success && t.error_count == 0 {
- t.error_count += 1
- }
-
- return
-}
diff --git a/core/testing/signal_handler.odin b/core/testing/signal_handler.odin
new file mode 100644
index 000000000..891f6bbb6
--- /dev/null
+++ b/core/testing/signal_handler.odin
@@ -0,0 +1,33 @@
+//+private
+package testing
+
+import "base:runtime"
+import pkg_log "core:log"
+
+Stop_Reason :: enum {
+ Unknown,
+ Illegal_Instruction,
+ Arithmetic_Error,
+ Segmentation_Fault,
+}
+
+test_assertion_failure_proc :: proc(prefix, message: string, loc: runtime.Source_Code_Location) -> ! {
+ pkg_log.fatalf("%s: %s", prefix, message, location = loc)
+ runtime.trap()
+}
+
+setup_signal_handler :: proc() {
+ _setup_signal_handler()
+}
+
+setup_task_signal_handler :: proc(test_index: int) {
+ _setup_task_signal_handler(test_index)
+}
+
+should_stop_runner :: proc() -> bool {
+ return _should_stop_runner()
+}
+
+should_stop_test :: proc() -> (test_index: int, reason: Stop_Reason, ok: bool) {
+ return _should_stop_test()
+}
diff --git a/core/testing/signal_handler_libc.odin b/core/testing/signal_handler_libc.odin
new file mode 100644
index 000000000..0ab34776e
--- /dev/null
+++ b/core/testing/signal_handler_libc.odin
@@ -0,0 +1,149 @@
+//+private
+//+build windows, linux, darwin, freebsd, openbsd, netbsd, haiku
+package testing
+
+import "base:intrinsics"
+import "core:c/libc"
+import "core:encoding/ansi"
+import "core:sync"
+import "core:os"
+@require import "core:sys/unix"
+
+@(private="file") stop_runner_flag: libc.sig_atomic_t
+
+@(private="file") stop_test_gate: sync.Mutex
+@(private="file") stop_test_index: libc.sig_atomic_t
+@(private="file") stop_test_reason: libc.sig_atomic_t
+@(private="file") stop_test_alert: libc.sig_atomic_t
+
+@(private="file", thread_local)
+local_test_index: libc.sig_atomic_t
+
+@(private="file")
+stop_runner_callback :: proc "c" (sig: libc.int) {
+ prev := intrinsics.atomic_add(&stop_runner_flag, 1)
+
+ // If the flag was already set (if this is the second signal sent for example),
+ // consider this a forced (not graceful) exit.
+ if prev > 0 {
+ os.exit(int(sig))
+ }
+}
+
+@(private="file")
+stop_test_callback :: proc "c" (sig: libc.int) {
+ if local_test_index == -1 {
+ // We're the test runner, and we ourselves have caught a signal from
+ // which there is no recovery.
+ //
+ // The most we can do now is make sure the user's cursor is visible,
+ // nuke the entire processs, and hope a useful core dump survives.
+
+ // NOTE(Feoramund): Using these write calls in a signal handler is
+ // undefined behavior in C99 but possibly tolerated in POSIX 2008.
+ // Either way, we may as well try to salvage what we can.
+ show_cursor := ansi.CSI + ansi.DECTCEM_SHOW
+ libc.fwrite(raw_data(show_cursor), size_of(byte), len(show_cursor), libc.stdout)
+ libc.fflush(libc.stdout)
+
+ // This is an attempt at being compliant by avoiding printf.
+ sigbuf: [8]byte
+ sigstr: string
+ {
+ signum := cast(int)sig
+ i := len(sigbuf) - 2
+ for signum > 0 {
+ m := signum % 10
+ signum /= 10
+ sigbuf[i] = cast(u8)('0' + m)
+ i -= 1
+ }
+ sigstr = cast(string)sigbuf[1 + i:len(sigbuf) - 1]
+ }
+
+ advisory_a := `
+The test runner's main thread has caught an unrecoverable error (signal `
+ advisory_b := `) and will now forcibly terminate.
+This is a dire bug and should be reported to the Odin developers.
+`
+ libc.fwrite(raw_data(advisory_a), size_of(byte), len(advisory_a), libc.stderr)
+ libc.fwrite(raw_data(sigstr), size_of(byte), len(sigstr), libc.stderr)
+ libc.fwrite(raw_data(advisory_b), size_of(byte), len(advisory_b), libc.stderr)
+
+ // Try to get a core dump.
+ libc.abort()
+ }
+
+ if sync.mutex_guard(&stop_test_gate) {
+ intrinsics.atomic_store(&stop_test_index, local_test_index)
+ intrinsics.atomic_store(&stop_test_reason, cast(libc.sig_atomic_t)sig)
+ intrinsics.atomic_store(&stop_test_alert, 1)
+
+ for {
+ // Idle until this thread is terminated by the runner,
+ // otherwise we may continue to generate signals.
+ intrinsics.cpu_relax()
+
+ when ODIN_OS != .Windows {
+ // NOTE(Feoramund): Some UNIX-like platforms may require this.
+ //
+ // During testing, I found that NetBSD 10.0 refused to
+ // terminate a task thread, even when its thread had been
+ // properly set to PTHREAD_CANCEL_ASYNCHRONOUS.
+ //
+ // The runner would stall after returning from `pthread_cancel`.
+
+ unix.pthread_testcancel()
+ }
+ }
+ }
+}
+
+_setup_signal_handler :: proc() {
+ local_test_index = -1
+
+ // Catch user interrupt / CTRL-C.
+ libc.signal(libc.SIGINT, stop_runner_callback)
+ // Catch polite termination request.
+ libc.signal(libc.SIGTERM, stop_runner_callback)
+
+ // For tests:
+ // Catch asserts and panics.
+ libc.signal(libc.SIGILL, stop_test_callback)
+ // Catch arithmetic errors.
+ libc.signal(libc.SIGFPE, stop_test_callback)
+ // Catch segmentation faults (illegal memory access).
+ libc.signal(libc.SIGSEGV, stop_test_callback)
+}
+
+_setup_task_signal_handler :: proc(test_index: int) {
+ local_test_index = cast(libc.sig_atomic_t)test_index
+}
+
+_should_stop_runner :: proc() -> bool {
+ return intrinsics.atomic_load(&stop_runner_flag) == 1
+}
+
+@(private="file")
+unlock_stop_test_gate :: proc(_: int, _: Stop_Reason, ok: bool) {
+ if ok {
+ sync.mutex_unlock(&stop_test_gate)
+ }
+}
+
+@(deferred_out=unlock_stop_test_gate)
+_should_stop_test :: proc() -> (test_index: int, reason: Stop_Reason, ok: bool) {
+ if intrinsics.atomic_load(&stop_test_alert) == 1 {
+ intrinsics.atomic_store(&stop_test_alert, 0)
+
+ test_index = cast(int)intrinsics.atomic_load(&stop_test_index)
+ switch intrinsics.atomic_load(&stop_test_reason) {
+ case libc.SIGFPE: reason = .Arithmetic_Error
+ case libc.SIGILL: reason = .Illegal_Instruction
+ case libc.SIGSEGV: reason = .Segmentation_Fault
+ }
+ ok = true
+ }
+
+ return
+}
diff --git a/core/testing/signal_handler_other.odin b/core/testing/signal_handler_other.odin
new file mode 100644
index 000000000..04981f5af
--- /dev/null
+++ b/core/testing/signal_handler_other.odin
@@ -0,0 +1,19 @@
+//+private
+//+build !windows !linux !darwin !freebsd !openbsd !netbsd !haiku
+package testing
+
+_setup_signal_handler :: proc() {
+ // Do nothing.
+}
+
+_setup_task_signal_handler :: proc(test_index: int) {
+ // Do nothing.
+}
+
+_should_stop_runner :: proc() -> bool {
+ return false
+}
+
+_should_stop_test :: proc() -> (test_index: int, reason: Stop_Reason, ok: bool) {
+ return 0, {}, false
+}
diff --git a/core/testing/testing.odin b/core/testing/testing.odin
index a8c5ffa48..92b4d391d 100644
--- a/core/testing/testing.odin
+++ b/core/testing/testing.odin
@@ -1,10 +1,11 @@
package testing
-import "core:fmt"
-import "core:io"
-import "core:time"
import "base:intrinsics"
+import "base:runtime"
+import pkg_log "core:log"
import "core:reflect"
+import "core:sync/chan"
+import "core:time"
_ :: reflect // alias reflect to nothing to force visibility for -vet
@@ -22,44 +23,55 @@ Internal_Test :: struct {
Internal_Cleanup :: struct {
procedure: proc(rawptr),
user_data: rawptr,
+ ctx: runtime.Context,
}
T :: struct {
error_count: int,
- w: io.Writer,
+ // If your test needs to perform random operations, it's advised to use
+ // this value to seed a local random number generator rather than relying
+ // on the non-thread-safe global one.
+ //
+ // This way, your results will be deterministic.
+ //
+ // This value is chosen at startup of the test runner, logged, and may be
+ // specified by the user. It is the same for all tests of a single run.
+ seed: u64,
+
+ channel: Update_Channel_Sender,
cleanups: [dynamic]Internal_Cleanup,
+ // This allocator is shared between the test runner and its threads for
+ // cloning log strings, so they can outlive the lifetime of individual
+ // tests during channel transmission.
+ _log_allocator: runtime.Allocator,
+
_fail_now: proc() -> !,
}
+@(deprecated="prefer `log.error`")
error :: proc(t: ^T, args: ..any, loc := #caller_location) {
- fmt.wprintf(t.w, "%v: ", loc)
- fmt.wprintln(t.w, ..args)
- t.error_count += 1
+ pkg_log.error(..args, location = loc)
}
+@(deprecated="prefer `log.errorf`")
errorf :: proc(t: ^T, format: string, args: ..any, loc := #caller_location) {
- fmt.wprintf(t.w, "%v: ", loc)
- fmt.wprintf(t.w, format, ..args)
- fmt.wprintln(t.w)
- t.error_count += 1
+ pkg_log.errorf(format, ..args, location = loc)
}
fail :: proc(t: ^T, loc := #caller_location) {
- error(t, "FAIL", loc=loc)
- t.error_count += 1
+ pkg_log.error("FAIL", location=loc)
}
fail_now :: proc(t: ^T, msg := "", loc := #caller_location) {
if msg != "" {
- error(t, "FAIL:", msg, loc=loc)
+ pkg_log.error("FAIL:", msg, location=loc)
} else {
- error(t, "FAIL", loc=loc)
+ pkg_log.error("FAIL", location=loc)
}
- t.error_count += 1
if t._fail_now != nil {
t._fail_now()
}
@@ -69,32 +81,34 @@ failed :: proc(t: ^T) -> bool {
return t.error_count != 0
}
+@(deprecated="prefer `log.info`")
log :: proc(t: ^T, args: ..any, loc := #caller_location) {
- fmt.wprintln(t.w, ..args)
+ pkg_log.info(..args, location = loc)
}
+@(deprecated="prefer `log.infof`")
logf :: proc(t: ^T, format: string, args: ..any, loc := #caller_location) {
- fmt.wprintf(t.w, format, ..args)
- fmt.wprintln(t.w)
+ pkg_log.infof(format, ..args, location = loc)
}
-// cleanup registers a procedure and user_data, which will be called when the test, and all its subtests, complete
-// cleanup procedures will be called in LIFO (last added, first called) order.
+// cleanup registers a procedure and user_data, which will be called when the test, and all its subtests, complete.
+// Cleanup procedures will be called in LIFO (last added, first called) order.
+// Each procedure will use a copy of the context at the time of registering.
cleanup :: proc(t: ^T, procedure: proc(rawptr), user_data: rawptr) {
- append(&t.cleanups, Internal_Cleanup{procedure, user_data})
+ append(&t.cleanups, Internal_Cleanup{procedure, user_data, context})
}
expect :: proc(t: ^T, ok: bool, msg: string = "", loc := #caller_location) -> bool {
if !ok {
- error(t, msg, loc=loc)
+ pkg_log.error(msg, location=loc)
}
return ok
}
expectf :: proc(t: ^T, ok: bool, format: string, args: ..any, loc := #caller_location) -> bool {
if !ok {
- errorf(t, format, ..args, loc=loc)
+ pkg_log.errorf(format, ..args, location=loc)
}
return ok
}
@@ -102,12 +116,15 @@ expectf :: proc(t: ^T, ok: bool, format: string, args: ..any, loc := #caller_loc
expect_value :: proc(t: ^T, value, expected: $T, loc := #caller_location) -> bool where intrinsics.type_is_comparable(T) {
ok := value == expected || reflect.is_nil(value) && reflect.is_nil(expected)
if !ok {
- errorf(t, "expected %v, got %v", expected, value, loc=loc)
+ pkg_log.errorf("expected %v, got %v", expected, value, location=loc)
}
return ok
}
set_fail_timeout :: proc(t: ^T, duration: time.Duration, loc := #caller_location) {
- _fail_timeout(t, duration, loc)
+ chan.send(t.channel, Event_Set_Fail_Timeout {
+ at_time = time.time_add(time.now(), duration),
+ location = loc,
+ })
}
diff --git a/core/text/i18n/qt_linguist.odin b/core/text/i18n/qt_linguist.odin
index 0e75df873..bdd3f5fd7 100644
--- a/core/text/i18n/qt_linguist.odin
+++ b/core/text/i18n/qt_linguist.odin
@@ -162,8 +162,6 @@ parse_qt_linguist_file :: proc(filename: string, options := DEFAULT_PARSE_OPTION
context.allocator = allocator
data, data_ok := os.read_entire_file(filename)
- defer delete(data)
-
if !data_ok { return {}, .File_Error }
return parse_qt_linguist_from_bytes(data, options, pluralizer, allocator)
diff --git a/core/thread/thread_pool.odin b/core/thread/thread_pool.odin
index fddcac89e..da5e116ff 100644
--- a/core/thread/thread_pool.odin
+++ b/core/thread/thread_pool.odin
@@ -44,6 +44,29 @@ Pool :: struct {
tasks_done: [dynamic]Task,
}
+Pool_Thread_Data :: struct {
+ pool: ^Pool,
+ task: Task,
+}
+
+@(private="file")
+pool_thread_runner :: proc(t: ^Thread) {
+ data := cast(^Pool_Thread_Data)t.data
+ pool := data.pool
+
+ for intrinsics.atomic_load(&pool.is_running) {
+ sync.wait(&pool.sem_available)
+
+ if task, ok := pool_pop_waiting(pool); ok {
+ data.task = task
+ pool_do_work(pool, task)
+ data.task = {}
+ }
+ }
+
+ sync.post(&pool.sem_available, 1)
+}
+
// Once initialized, the pool's memory address is not allowed to change until
// it is destroyed.
//
@@ -58,21 +81,11 @@ pool_init :: proc(pool: ^Pool, allocator: mem.Allocator, thread_count: int) {
pool.is_running = true
for _, i in pool.threads {
- t := create(proc(t: ^Thread) {
- pool := (^Pool)(t.data)
-
- for intrinsics.atomic_load(&pool.is_running) {
- sync.wait(&pool.sem_available)
-
- if task, ok := pool_pop_waiting(pool); ok {
- pool_do_work(pool, task)
- }
- }
-
- sync.post(&pool.sem_available, 1)
- })
+ t := create(pool_thread_runner)
+ data := new(Pool_Thread_Data)
+ data.pool = pool
t.user_index = i
- t.data = pool
+ t.data = data
pool.threads[i] = t
}
}
@@ -82,6 +95,8 @@ pool_destroy :: proc(pool: ^Pool) {
delete(pool.tasks_done)
for &t in pool.threads {
+ data := cast(^Pool_Thread_Data)t.data
+ free(data, pool.allocator)
destroy(t)
}
@@ -103,7 +118,7 @@ pool_join :: proc(pool: ^Pool) {
yield()
-started_count: int
+ started_count: int
for started_count < len(pool.threads) {
started_count = 0
for t in pool.threads {
@@ -138,6 +153,94 @@ pool_add_task :: proc(pool: ^Pool, allocator: mem.Allocator, procedure: Task_Pro
sync.post(&pool.sem_available, 1)
}
+// Forcibly stop a running task by its user index.
+//
+// This will terminate the underlying thread. Ideally, you should use some
+// means of communication to stop a task, as thread termination may leave
+// resources unclaimed.
+//
+// The thread will be restarted to accept new tasks.
+//
+// Returns true if the task was found and terminated.
+pool_stop_task :: proc(pool: ^Pool, user_index: int, exit_code: int = 1) -> bool {
+ sync.guard(&pool.mutex)
+
+ for t, i in pool.threads {
+ data := cast(^Pool_Thread_Data)t.data
+ if data.task.user_index == user_index && data.task.procedure != nil {
+ terminate(t, exit_code)
+
+ append(&pool.tasks_done, data.task)
+ intrinsics.atomic_add(&pool.num_done, 1)
+ intrinsics.atomic_sub(&pool.num_outstanding, 1)
+ intrinsics.atomic_sub(&pool.num_in_processing, 1)
+
+ destroy(t)
+
+ replacement := create(pool_thread_runner)
+ replacement.user_index = t.user_index
+ replacement.data = data
+ data.task = {}
+ pool.threads[i] = replacement
+
+ start(replacement)
+ return true
+ }
+ }
+
+ return false
+}
+
+// Forcibly stop all running tasks.
+//
+// The same notes from `pool_stop_task` apply here.
+pool_stop_all_tasks :: proc(pool: ^Pool, exit_code: int = 1) {
+ sync.guard(&pool.mutex)
+
+ for t, i in pool.threads {
+ data := cast(^Pool_Thread_Data)t.data
+ if data.task.procedure != nil {
+ terminate(t, exit_code)
+
+ append(&pool.tasks_done, data.task)
+ intrinsics.atomic_add(&pool.num_done, 1)
+ intrinsics.atomic_sub(&pool.num_outstanding, 1)
+ intrinsics.atomic_sub(&pool.num_in_processing, 1)
+
+ destroy(t)
+
+ replacement := create(pool_thread_runner)
+ replacement.user_index = t.user_index
+ replacement.data = data
+ data.task = {}
+ pool.threads[i] = replacement
+
+ start(replacement)
+ }
+ }
+}
+
+// Force the pool to stop all of its threads and put it into a state where
+// it will no longer run any more tasks.
+//
+// The pool must still be destroyed after this.
+pool_shutdown :: proc(pool: ^Pool, exit_code: int = 1) {
+ intrinsics.atomic_store(&pool.is_running, false)
+ sync.guard(&pool.mutex)
+
+ for t in pool.threads {
+ terminate(t, exit_code)
+
+ data := cast(^Pool_Thread_Data)t.data
+ if data.task.procedure != nil {
+ append(&pool.tasks_done, data.task)
+ intrinsics.atomic_add(&pool.num_done, 1)
+ intrinsics.atomic_sub(&pool.num_outstanding, 1)
+ intrinsics.atomic_sub(&pool.num_in_processing, 1)
+ }
+ }
+}
+
// Number of tasks waiting to be processed. Only informational, mostly for
// debugging. Don't rely on this value being consistent with other num_*
// values.
diff --git a/core/thread/thread_unix.odin b/core/thread/thread_unix.odin
index acc0e05cb..5291917da 100644
--- a/core/thread/thread_unix.odin
+++ b/core/thread/thread_unix.odin
@@ -2,11 +2,11 @@
// +private
package thread
-import "base:intrinsics"
import "core:sync"
import "core:sys/unix"
+import "core:time"
-CAS :: intrinsics.atomic_compare_exchange_strong
+CAS :: sync.atomic_compare_exchange_strong
// NOTE(tetra): Aligned here because of core/unix/pthread_linux.odin/pthread_t.
// Also see core/sys/darwin/mach_darwin.odin/semaphore_t.
@@ -32,11 +32,13 @@ _create :: proc(procedure: Thread_Proc, priority: Thread_Priority) -> ^Thread {
t.id = sync.current_thread_id()
- for (.Started not_in t.flags) {
- sync.wait(&t.cond, &t.mutex)
+ for (.Started not_in sync.atomic_load(&t.flags)) {
+ // HACK: use a timeout so in the event that the condition is signalled at THIS comment's exact point
+ // (after checking flags, before starting the wait) it gets itself out of that deadlock after a ms.
+ sync.wait_with_timeout(&t.cond, &t.mutex, time.Millisecond)
}
- if .Joined in t.flags {
+ if .Joined in sync.atomic_load(&t.flags) {
return nil
}
@@ -60,11 +62,11 @@ _create :: proc(procedure: Thread_Proc, priority: Thread_Priority) -> ^Thread {
t.procedure(t)
}
- intrinsics.atomic_store(&t.flags, t.flags + { .Done })
+ sync.atomic_or(&t.flags, { .Done })
sync.unlock(&t.mutex)
- if .Self_Cleanup in t.flags {
+ if .Self_Cleanup in sync.atomic_load(&t.flags) {
t.unix_thread = {}
// NOTE(ftphikari): It doesn't matter which context 'free' received, right?
context = {}
@@ -122,13 +124,12 @@ _create :: proc(procedure: Thread_Proc, priority: Thread_Priority) -> ^Thread {
}
_start :: proc(t: ^Thread) {
- // sync.guard(&t.mutex)
- t.flags += { .Started }
+ sync.atomic_or(&t.flags, { .Started })
sync.signal(&t.cond)
}
_is_done :: proc(t: ^Thread) -> bool {
- return .Done in intrinsics.atomic_load(&t.flags)
+ return .Done in sync.atomic_load(&t.flags)
}
_join :: proc(t: ^Thread) {
@@ -139,7 +140,7 @@ _join :: proc(t: ^Thread) {
}
// Preserve other flags besides `.Joined`, like `.Started`.
- unjoined := intrinsics.atomic_load(&t.flags) - {.Joined}
+ unjoined := sync.atomic_load(&t.flags) - {.Joined}
joined := unjoined + {.Joined}
// Try to set `t.flags` from unjoined to joined. If it returns joined,
diff --git a/core/time/datetime/datetime.odin b/core/time/datetime/datetime.odin
index e15ced5a5..89fa2ce98 100644
--- a/core/time/datetime/datetime.odin
+++ b/core/time/datetime/datetime.odin
@@ -127,13 +127,13 @@ days_remaining :: proc "contextless" (date: Date) -> (days_remaining: i64, err:
return delta.days, .None
}
-last_day_of_month :: proc "contextless" (#any_int year: i64, #any_int month: i8) -> (day: i64, err: Error) {
+last_day_of_month :: proc "contextless" (#any_int year: i64, #any_int month: i8) -> (day: i8, err: Error) {
// Not using formula 2.27 from the book. This is far simpler and gives the same answer.
validate(Date{year, month, 1}) or_return
month_days := MONTH_DAYS
- day = i64(month_days[month])
+ day = month_days[month]
if month == 2 && is_leap_year(year) {
day += 1
}
diff --git a/core/time/iso8061.odin b/core/time/iso8601.odin
index 528e0b00a..528e0b00a 100644
--- a/core/time/iso8061.odin
+++ b/core/time/iso8601.odin
diff --git a/core/time/time.odin b/core/time/time.odin
index 4807af840..4575b36f7 100644
--- a/core/time/time.odin
+++ b/core/time/time.odin
@@ -389,6 +389,7 @@ is_leap_year :: proc "contextless" (year: int) -> (leap: bool) {
return year % 4 == 0 && (year % 100 != 0 || year % 400 == 0)
}
+@(rodata)
days_before := [?]i32{
0,
31,
diff --git a/core/time/time_orca.odin b/core/time/time_orca.odin
new file mode 100644
index 000000000..d222c8247
--- /dev/null
+++ b/core/time/time_orca.odin
@@ -0,0 +1,24 @@
+//+private
+//+build orca
+package time
+
+_IS_SUPPORTED :: false
+
+_now :: proc "contextless" () -> Time {
+ return {}
+}
+
+_sleep :: proc "contextless" (d: Duration) {
+}
+
+_tick_now :: proc "contextless" () -> Tick {
+ // mul_div_u64 :: proc "contextless" (val, num, den: i64) -> i64 {
+ // q := val / den
+ // r := val % den
+ // return q * num + r * num / den
+ // }
+ return {}
+}
+
+_yield :: proc "contextless" () {
+}
diff --git a/core/unicode/tables.odin b/core/unicode/tables.odin
index f43827413..dfa5caaa2 100644
--- a/core/unicode/tables.odin
+++ b/core/unicode/tables.odin
@@ -12,6 +12,7 @@ package unicode
@(private) pLo :: pLl | pLu // a letter that is neither upper nor lower case.
@(private) pLmask :: pLo
+@(rodata)
char_properties := [MAX_LATIN1+1]u8{
0x00 = pC, // '\x00'
0x01 = pC, // '\x01'
@@ -272,6 +273,7 @@ char_properties := [MAX_LATIN1+1]u8{
}
+@(rodata)
alpha_ranges := [?]i32{
0x00d8, 0x00f6,
0x00f8, 0x01f5,
@@ -427,6 +429,7 @@ alpha_ranges := [?]i32{
0xffda, 0xffdc,
}
+@(rodata)
alpha_singlets := [?]i32{
0x00aa,
0x00b5,
@@ -462,6 +465,7 @@ alpha_singlets := [?]i32{
0xfe74,
}
+@(rodata)
space_ranges := [?]i32{
0x0009, 0x000d, // tab and newline
0x0020, 0x0020, // space
@@ -477,6 +481,7 @@ space_ranges := [?]i32{
0xfeff, 0xfeff,
}
+@(rodata)
unicode_spaces := [?]i32{
0x0009, // tab
0x000a, // LF
@@ -494,6 +499,7 @@ unicode_spaces := [?]i32{
0xfeff, // unknown
}
+@(rodata)
to_upper_ranges := [?]i32{
0x0061, 0x007a, 468, // a-z A-Z
0x00e0, 0x00f6, 468,
@@ -532,6 +538,7 @@ to_upper_ranges := [?]i32{
0xff41, 0xff5a, 468,
}
+@(rodata)
to_upper_singlets := [?]i32{
0x00ff, 621,
0x0101, 499,
@@ -875,6 +882,7 @@ to_upper_singlets := [?]i32{
0x1ff3, 509,
}
+@(rodata)
to_lower_ranges := [?]i32{
0x0041, 0x005a, 532, // A-Z a-z
0x00c0, 0x00d6, 532, // - -
@@ -914,6 +922,7 @@ to_lower_ranges := [?]i32{
0xff21, 0xff3a, 532, // - -
}
+@(rodata)
to_lower_singlets := [?]i32{
0x0100, 501,
0x0102, 501,
@@ -1250,6 +1259,7 @@ to_lower_singlets := [?]i32{
0x1ffc, 491,
}
+@(rodata)
to_title_singlets := [?]i32{
0x01c4, 501,
0x01c6, 499,
diff --git a/examples/all/all_main.odin b/examples/all/all_main.odin
index 1077df1ae..6c3972987 100644
--- a/examples/all/all_main.odin
+++ b/examples/all/all_main.odin
@@ -25,6 +25,7 @@ import rbtree "core:container/rbtree"
import topological_sort "core:container/topological_sort"
import crypto "core:crypto"
+import aes "core:crypto/aes"
import blake2b "core:crypto/blake2b"
import blake2s "core:crypto/blake2s"
import chacha20 "core:crypto/chacha20"
@@ -150,6 +151,7 @@ _ :: rbtree
_ :: topological_sort
_ :: crypto
_ :: crypto_hash
+_ :: aes
_ :: blake2b
_ :: blake2s
_ :: chacha20
diff --git a/src/bug_report.cpp b/src/bug_report.cpp
index 1f754ce7c..dab8c4391 100644
--- a/src/bug_report.cpp
+++ b/src/bug_report.cpp
@@ -910,6 +910,7 @@ gb_internal void report_os_info() {
{"23D60", {23, 3, 0}, "macOS", {"Sonoma", {14, 3, 1}}},
{"23E214", {23, 4, 0}, "macOS", {"Sonoma", {14, 4, 0}}},
{"23E224", {23, 4, 0}, "macOS", {"Sonoma", {14, 4, 1}}},
+ {"23F79", {23, 5, 0}, "macOS", {"Sonoma", {14, 5, 0}}},
};
diff --git a/src/build_settings.cpp b/src/build_settings.cpp
index d9454ba9b..dc11a5fd2 100644
--- a/src/build_settings.cpp
+++ b/src/build_settings.cpp
@@ -23,6 +23,7 @@ enum TargetOsKind : u16 {
TargetOs_wasi,
TargetOs_js,
+ TargetOs_orca,
TargetOs_freestanding,
@@ -90,6 +91,7 @@ gb_global String target_os_names[TargetOs_COUNT] = {
str_lit("wasi"),
str_lit("js"),
+ str_lit("orca"),
str_lit("freestanding"),
};
@@ -694,6 +696,12 @@ enum TimingsExportFormat : i32 {
TimingsExportCSV = 2,
};
+enum DependenciesExportFormat : i32 {
+ DependenciesExportUnspecified = 0,
+ DependenciesExportMake = 1,
+ DependenciesExportJson = 2,
+};
+
enum ErrorPosStyle {
ErrorPosStyle_Default, // path(line:column) msg
ErrorPosStyle_Unix, // path:line:column: msg
@@ -831,6 +839,8 @@ struct BuildContext {
bool show_timings;
TimingsExportFormat export_timings_format;
String export_timings_file;
+ DependenciesExportFormat export_dependencies_format;
+ String export_dependencies_file;
bool show_unused;
bool show_unused_with_location;
bool show_more_timings;
@@ -891,7 +901,6 @@ struct BuildContext {
u32 cmd_doc_flags;
Array<String> extra_packages;
- StringSet test_names;
bool test_all_packages;
gbAffinity affinity;
@@ -1030,6 +1039,13 @@ gb_global TargetMetrics target_netbsd_amd64 = {
str_lit("x86_64-unknown-netbsd-elf"),
};
+gb_global TargetMetrics target_netbsd_arm64 = {
+ TargetOs_netbsd,
+ TargetArch_arm64,
+ 8, 8, 16, 16,
+ str_lit("aarch64-unknown-netbsd-elf"),
+};
+
gb_global TargetMetrics target_haiku_amd64 = {
TargetOs_haiku,
TargetArch_amd64,
@@ -1067,6 +1083,14 @@ gb_global TargetMetrics target_wasi_wasm32 = {
};
+gb_global TargetMetrics target_orca_wasm32 = {
+ TargetOs_orca,
+ TargetArch_wasm32,
+ 4, 4, 8, 16,
+ str_lit("wasm32-wasi-js"),
+};
+
+
gb_global TargetMetrics target_freestanding_wasm64p32 = {
TargetOs_freestanding,
TargetArch_wasm64p32,
@@ -1113,6 +1137,14 @@ gb_global TargetMetrics target_freestanding_arm64 = {
str_lit("aarch64-none-elf"),
};
+gb_global TargetMetrics target_freestanding_arm32 = {
+ TargetOs_freestanding,
+ TargetArch_arm32,
+ 4, 4, 4, 8,
+ str_lit("arm-unknown-unknown-gnueabihf"),
+};
+
+
struct NamedTargetMetrics {
String name;
TargetMetrics *metrics;
@@ -1136,13 +1168,16 @@ gb_global NamedTargetMetrics named_targets[] = {
{ str_lit("freebsd_amd64"), &target_freebsd_amd64 },
{ str_lit("freebsd_arm64"), &target_freebsd_arm64 },
- { str_lit("openbsd_amd64"), &target_openbsd_amd64 },
{ str_lit("netbsd_amd64"), &target_netbsd_amd64 },
+ { str_lit("netbsd_arm64"), &target_netbsd_arm64 },
+
+ { str_lit("openbsd_amd64"), &target_openbsd_amd64 },
{ str_lit("haiku_amd64"), &target_haiku_amd64 },
{ str_lit("freestanding_wasm32"), &target_freestanding_wasm32 },
{ str_lit("wasi_wasm32"), &target_wasi_wasm32 },
{ str_lit("js_wasm32"), &target_js_wasm32 },
+ { str_lit("orca_wasm32"), &target_orca_wasm32 },
{ str_lit("freestanding_wasm64p32"), &target_freestanding_wasm64p32 },
{ str_lit("js_wasm64p32"), &target_js_wasm64p32 },
@@ -1152,6 +1187,7 @@ gb_global NamedTargetMetrics named_targets[] = {
{ str_lit("freestanding_amd64_win64"), &target_freestanding_amd64_win64 },
{ str_lit("freestanding_arm64"), &target_freestanding_arm64 },
+ { str_lit("freestanding_arm32"), &target_freestanding_arm32 },
};
gb_global NamedTargetMetrics *selected_target_metrics;
@@ -1898,7 +1934,11 @@ gb_internal void init_build_context(TargetMetrics *cross_target, Subtarget subta
#elif defined(GB_SYSTEM_OPENBSD)
metrics = &target_openbsd_amd64;
#elif defined(GB_SYSTEM_NETBSD)
- metrics = &target_netbsd_amd64;
+ #if defined(GB_CPU_ARM)
+ metrics = &target_netbsd_arm64;
+ #else
+ metrics = &target_netbsd_amd64;
+ #endif
#elif defined(GB_SYSTEM_HAIKU)
metrics = &target_haiku_amd64;
#elif defined(GB_CPU_ARM)
@@ -2004,6 +2044,9 @@ gb_internal void init_build_context(TargetMetrics *cross_target, Subtarget subta
bc->link_flags = str_lit("/machine:x86 ");
break;
}
+ } else if (bc->metrics.os == TargetOs_darwin) {
+ bc->link_flags = concatenate3_strings(permanent_allocator(),
+ str_lit("-target "), bc->metrics.target_triplet, str_lit(" "));
} else if (is_arch_wasm()) {
gbString link_flags = gb_string_make(heap_allocator(), " ");
// link_flags = gb_string_appendc(link_flags, "--export-all ");
@@ -2012,17 +2055,22 @@ gb_internal void init_build_context(TargetMetrics *cross_target, Subtarget subta
// if (bc->metrics.arch == TargetArch_wasm64) {
// link_flags = gb_string_appendc(link_flags, "-mwasm64 ");
// }
- if (bc->no_entry_point) {
+ if (bc->no_entry_point || bc->metrics.os == TargetOs_orca) {
link_flags = gb_string_appendc(link_flags, "--no-entry ");
}
-
+
bc->link_flags = make_string_c(link_flags);
-
+
// Disallow on wasm
bc->use_separate_modules = false;
} else {
- bc->link_flags = concatenate3_strings(permanent_allocator(),
- str_lit("-target "), bc->metrics.target_triplet, str_lit(" "));
+ // NOTE: for targets other than darwin, we don't specify a `-target` link flag.
+ // This is because we don't support cross-linking and clang is better at figuring
+ // out what the actual target for linking is,
+ // for example, on x86/alpine/musl it HAS to be `x86_64-alpine-linux-musl` to link correctly.
+ //
+ // Note that codegen will still target the triplet we specify, but the intricate details of
+ // a target shouldn't matter as much to codegen (if it does at all) as it does to linking.
}
// NOTE: needs to be done after adding the -target flag to the linker flags so the linker
diff --git a/src/check_builtin.cpp b/src/check_builtin.cpp
index d85e94db3..3aee804df 100644
--- a/src/check_builtin.cpp
+++ b/src/check_builtin.cpp
@@ -1079,7 +1079,7 @@ gb_internal bool check_builtin_simd_operation(CheckerContext *c, Operand *operan
return false;
}
-gb_internal bool cache_load_file_directive(CheckerContext *c, Ast *call, String const &original_string, bool err_on_not_found, LoadFileCache **cache_) {
+gb_internal bool cache_load_file_directive(CheckerContext *c, Ast *call, String const &original_string, bool err_on_not_found, LoadFileCache **cache_, LoadFileTier tier) {
ast_node(ce, CallExpr, call);
ast_node(bd, BasicDirective, ce->proc);
String builtin_name = bd->name.string;
@@ -1105,12 +1105,16 @@ gb_internal bool cache_load_file_directive(CheckerContext *c, Ast *call, String
gbFileError file_error = gbFileError_None;
String data = {};
+ bool exists = false;
+ LoadFileTier cache_tier = LoadFileTier_Invalid;
LoadFileCache **cache_ptr = string_map_get(&c->info->load_file_cache, path);
LoadFileCache *cache = cache_ptr ? *cache_ptr : nullptr;
if (cache) {
file_error = cache->file_error;
data = cache->data;
+ exists = cache->exists;
+ cache_tier = cache->tier;
}
defer ({
if (cache == nullptr) {
@@ -1118,60 +1122,78 @@ gb_internal bool cache_load_file_directive(CheckerContext *c, Ast *call, String
new_cache->path = path;
new_cache->data = data;
new_cache->file_error = file_error;
+ new_cache->exists = exists;
+ new_cache->tier = cache_tier;
string_map_init(&new_cache->hashes, 32);
string_map_set(&c->info->load_file_cache, path, new_cache);
if (cache_) *cache_ = new_cache;
} else {
cache->data = data;
cache->file_error = file_error;
+ cache->exists = exists;
+ cache->tier = cache_tier;
if (cache_) *cache_ = cache;
}
});
- TEMPORARY_ALLOCATOR_GUARD();
- char *c_str = alloc_cstring(temporary_allocator(), path);
+ if (tier > cache_tier) {
+ cache_tier = tier;
- gbFile f = {};
- if (cache == nullptr) {
+ TEMPORARY_ALLOCATOR_GUARD();
+ char *c_str = alloc_cstring(temporary_allocator(), path);
+
+ gbFile f = {};
file_error = gb_file_open(&f, c_str);
+ defer (gb_file_close(&f));
+
+ if (file_error == gbFileError_None) {
+ exists = true;
+
+ switch(tier) {
+ case LoadFileTier_Exists:
+ // Nothing to do.
+ break;
+ case LoadFileTier_Contents: {
+ isize file_size = cast(isize)gb_file_size(&f);
+ if (file_size > 0) {
+ u8 *ptr = cast(u8 *)gb_alloc(permanent_allocator(), file_size+1);
+ gb_file_read_at(&f, ptr, file_size, 0);
+ ptr[file_size] = '\0';
+ data.text = ptr;
+ data.len = file_size;
+ }
+ break;
+ }
+ default:
+ GB_PANIC("Unhandled LoadFileTier");
+ };
+ }
}
- defer (gb_file_close(&f));
switch (file_error) {
default:
case gbFileError_Invalid:
if (err_on_not_found) {
- error(ce->proc, "Failed to `#%.*s` file: %s; invalid file or cannot be found", LIT(builtin_name), c_str);
+ error(ce->proc, "Failed to `#%.*s` file: %.*s; invalid file or cannot be found", LIT(builtin_name), LIT(path));
}
call->state_flags |= StateFlag_DirectiveWasFalse;
return false;
case gbFileError_NotExists:
if (err_on_not_found) {
- error(ce->proc, "Failed to `#%.*s` file: %s; file cannot be found", LIT(builtin_name), c_str);
+ error(ce->proc, "Failed to `#%.*s` file: %.*s; file cannot be found", LIT(builtin_name), LIT(path));
}
call->state_flags |= StateFlag_DirectiveWasFalse;
return false;
case gbFileError_Permission:
if (err_on_not_found) {
- error(ce->proc, "Failed to `#%.*s` file: %s; file permissions problem", LIT(builtin_name), c_str);
+ error(ce->proc, "Failed to `#%.*s` file: %.*s; file permissions problem", LIT(builtin_name), LIT(path));
}
call->state_flags |= StateFlag_DirectiveWasFalse;
return false;
case gbFileError_None:
// Okay
break;
- }
-
- if (cache == nullptr) {
- isize file_size = cast(isize)gb_file_size(&f);
- if (file_size > 0) {
- u8 *ptr = cast(u8 *)gb_alloc(permanent_allocator(), file_size+1);
- gb_file_read_at(&f, ptr, file_size, 0);
- ptr[file_size] = '\0';
- data.text = ptr;
- data.len = file_size;
- }
- }
+ };
return true;
}
@@ -1263,7 +1285,7 @@ gb_internal LoadDirectiveResult check_load_directive(CheckerContext *c, Operand
operand->mode = Addressing_Constant;
LoadFileCache *cache = nullptr;
- if (cache_load_file_directive(c, call, o.value.value_string, err_on_not_found, &cache)) {
+ if (cache_load_file_directive(c, call, o.value.value_string, err_on_not_found, &cache, LoadFileTier_Contents)) {
operand->value = exact_value_string(cache->data);
return LoadDirective_Success;
}
@@ -1345,6 +1367,8 @@ gb_internal LoadDirectiveResult check_load_directory_directive(CheckerContext *c
map_set(&c->info->load_directory_map, call, new_cache);
} else {
cache->file_error = file_error;
+
+ map_set(&c->info->load_directory_map, call, cache);
}
});
@@ -1389,7 +1413,7 @@ gb_internal LoadDirectiveResult check_load_directory_directive(CheckerContext *c
for (FileInfo fi : list) {
LoadFileCache *cache = nullptr;
- if (cache_load_file_directive(c, call, fi.fullpath, err_on_not_found, &cache)) {
+ if (cache_load_file_directive(c, call, fi.fullpath, err_on_not_found, &cache, LoadFileTier_Contents)) {
array_add(&file_caches, cache);
} else {
result = LoadDirective_Error;
@@ -1403,6 +1427,65 @@ gb_internal LoadDirectiveResult check_load_directory_directive(CheckerContext *c
return result;
}
+gb_internal bool check_hash_kind(CheckerContext *c, Ast *call, String const &hash_kind, u8 const *data, isize data_size, u64 *hash_value) {
+ ast_node(ce, CallExpr, call);
+ ast_node(bd, BasicDirective, ce->proc);
+ String name = bd->name.string;
+ GB_ASSERT(name == "load_hash" || name == "hash");
+
+ String supported_hashes[] = {
+ str_lit("adler32"),
+ str_lit("crc32"),
+ str_lit("crc64"),
+ str_lit("fnv32"),
+ str_lit("fnv64"),
+ str_lit("fnv32a"),
+ str_lit("fnv64a"),
+ str_lit("murmur32"),
+ str_lit("murmur64"),
+ };
+
+ bool hash_found = false;
+ for (isize i = 0; i < gb_count_of(supported_hashes); i++) {
+ if (supported_hashes[i] == hash_kind) {
+ hash_found = true;
+ break;
+ }
+ }
+ if (!hash_found) {
+ ERROR_BLOCK();
+ error(ce->proc, "Invalid hash kind passed to `#%.*s`, got: %.*s", LIT(name), LIT(hash_kind));
+ error_line("\tAvailable hash kinds:\n");
+ for (isize i = 0; i < gb_count_of(supported_hashes); i++) {
+ error_line("\t%.*s\n", LIT(supported_hashes[i]));
+ }
+ return false;
+ }
+
+ if (hash_kind == "adler32") {
+ *hash_value = gb_adler32(data, data_size);
+ } else if (hash_kind == "crc32") {
+ *hash_value = gb_crc32(data, data_size);
+ } else if (hash_kind == "crc64") {
+ *hash_value = gb_crc64(data, data_size);
+ } else if (hash_kind == "fnv32") {
+ *hash_value = gb_fnv32(data, data_size);
+ } else if (hash_kind == "fnv64") {
+ *hash_value = gb_fnv64(data, data_size);
+ } else if (hash_kind == "fnv32a") {
+ *hash_value = fnv32a(data, data_size);
+ } else if (hash_kind == "fnv64a") {
+ *hash_value = fnv64a(data, data_size);
+ } else if (hash_kind == "murmur32") {
+ *hash_value = gb_murmur32(data, data_size);
+ } else if (hash_kind == "murmur64") {
+ *hash_value = gb_murmur64(data, data_size);
+ } else {
+ compiler_error("unhandled hash kind: %.*s", LIT(hash_kind));
+ }
+ return true;
+}
+
gb_internal bool check_builtin_procedure_directive(CheckerContext *c, Operand *operand, Ast *call, Type *type_hint) {
@@ -1429,6 +1512,30 @@ gb_internal bool check_builtin_procedure_directive(CheckerContext *c, Operand *o
operand->type = t_source_code_location;
operand->mode = Addressing_Value;
+ } else if (name == "exists") {
+ if (ce->args.count != 1) {
+ error(ce->close, "'#exists' expects 1 argument, got %td", ce->args.count);
+ return false;
+ }
+
+ Operand o = {};
+ check_expr(c, &o, ce->args[0]);
+ if (o.mode != Addressing_Constant || !is_type_string(o.type)) {
+ error(ce->args[0], "'#exists' expected a constant string argument");
+ return false;
+ }
+
+ operand->type = t_untyped_bool;
+ operand->mode = Addressing_Constant;
+
+ String original_string = o.value.value_string;
+ LoadFileCache *cache = nullptr;
+ if (cache_load_file_directive(c, call, original_string, /* err_on_not_found=*/ false, &cache, LoadFileTier_Exists)) {
+ operand->value = exact_value_bool(cache->exists);
+ } else {
+ operand->value = exact_value_bool(false);
+ }
+
} else if (name == "load") {
return check_load_directive(c, operand, call, type_hint, true) == LoadDirective_Success;
} else if (name == "load_directory") {
@@ -1480,37 +1587,8 @@ gb_internal bool check_builtin_procedure_directive(CheckerContext *c, Operand *o
String original_string = o.value.value_string;
String hash_kind = o_hash.value.value_string;
- String supported_hashes[] = {
- str_lit("adler32"),
- str_lit("crc32"),
- str_lit("crc64"),
- str_lit("fnv32"),
- str_lit("fnv64"),
- str_lit("fnv32a"),
- str_lit("fnv64a"),
- str_lit("murmur32"),
- str_lit("murmur64"),
- };
-
- bool hash_found = false;
- for (isize i = 0; i < gb_count_of(supported_hashes); i++) {
- if (supported_hashes[i] == hash_kind) {
- hash_found = true;
- break;
- }
- }
- if (!hash_found) {
- ERROR_BLOCK();
- error(ce->proc, "Invalid hash kind passed to `#load_hash`, got: %.*s", LIT(hash_kind));
- error_line("\tAvailable hash kinds:\n");
- for (isize i = 0; i < gb_count_of(supported_hashes); i++) {
- error_line("\t%.*s\n", LIT(supported_hashes[i]));
- }
- return false;
- }
-
LoadFileCache *cache = nullptr;
- if (cache_load_file_directive(c, call, original_string, true, &cache)) {
+ if (cache_load_file_directive(c, call, original_string, true, &cache, LoadFileTier_Contents)) {
MUTEX_GUARD(&c->info->load_file_mutex);
// TODO(bill): make these procedures fast :P
u64 hash_value = 0;
@@ -1520,26 +1598,9 @@ gb_internal bool check_builtin_procedure_directive(CheckerContext *c, Operand *o
} else {
u8 *data = cache->data.text;
isize file_size = cache->data.len;
- if (hash_kind == "adler32") {
- hash_value = gb_adler32(data, file_size);
- } else if (hash_kind == "crc32") {
- hash_value = gb_crc32(data, file_size);
- } else if (hash_kind == "crc64") {
- hash_value = gb_crc64(data, file_size);
- } else if (hash_kind == "fnv32") {
- hash_value = gb_fnv32(data, file_size);
- } else if (hash_kind == "fnv64") {
- hash_value = gb_fnv64(data, file_size);
- } else if (hash_kind == "fnv32a") {
- hash_value = fnv32a(data, file_size);
- } else if (hash_kind == "fnv64a") {
- hash_value = fnv64a(data, file_size);
- } else if (hash_kind == "murmur32") {
- hash_value = gb_murmur32(data, file_size);
- } else if (hash_kind == "murmur64") {
- hash_value = gb_murmur64(data, file_size);
- } else {
- compiler_error("unhandled hash kind: %.*s", LIT(hash_kind));
+
+ if (!check_hash_kind(c, call, hash_kind, data, file_size, &hash_value)) {
+ return false;
}
string_map_set(&cache->hashes, hash_kind, hash_value);
}
@@ -1550,6 +1611,62 @@ gb_internal bool check_builtin_procedure_directive(CheckerContext *c, Operand *o
return true;
}
return false;
+ } else if (name == "hash") {
+ if (ce->args.count != 2) {
+ if (ce->args.count == 0) {
+ error(ce->close, "'#hash' expects 2 argument, got 0");
+ } else {
+ error(ce->args[0], "'#hash' expects 2 argument, got %td", ce->args.count);
+ }
+ return false;
+ }
+
+ Ast *arg0 = ce->args[0];
+ Ast *arg1 = ce->args[1];
+ Operand o = {};
+ check_expr(c, &o, arg0);
+ if (o.mode != Addressing_Constant) {
+ error(arg0, "'#hash' expected a constant string argument");
+ return false;
+ }
+
+ if (!is_type_string(o.type)) {
+ gbString str = type_to_string(o.type);
+ error(arg0, "'#hash' expected a constant string, got %s", str);
+ gb_string_free(str);
+ return false;
+ }
+
+ Operand o_hash = {};
+ check_expr(c, &o_hash, arg1);
+ if (o_hash.mode != Addressing_Constant) {
+ error(arg1, "'#hash' expected a constant string argument");
+ return false;
+ }
+
+ if (!is_type_string(o_hash.type)) {
+ gbString str = type_to_string(o.type);
+ error(arg1, "'#hash' expected a constant string, got %s", str);
+ gb_string_free(str);
+ return false;
+ }
+ gbAllocator a = heap_allocator();
+
+ GB_ASSERT(o.value.kind == ExactValue_String);
+ GB_ASSERT(o_hash.value.kind == ExactValue_String);
+
+ String original_string = o.value.value_string;
+ String hash_kind = o_hash.value.value_string;
+
+ // TODO: Cache hash values based off of string constant and hash kind?
+ u64 hash_value = 0;
+ if (check_hash_kind(c, call, hash_kind, original_string.text, original_string.len, &hash_value)) {
+ operand->type = t_untyped_integer;
+ operand->mode = Addressing_Constant;
+ operand->value = exact_value_u64(hash_value);
+ return true;
+ }
+ return false;
} else if (name == "assert") {
if (ce->args.count != 1 && ce->args.count != 2) {
error(call, "'#assert' expects either 1 or 2 arguments, got %td", ce->args.count);
@@ -1726,6 +1843,7 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As
case BuiltinProc_objc_register_class:
case BuiltinProc_atomic_type_is_lock_free:
case BuiltinProc_has_target_feature:
+ case BuiltinProc_procedure_of:
// NOTE(bill): The first arg may be a Type, this will be checked case by case
break;
@@ -2302,6 +2420,9 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As
if (arg_count > max_count) {
error(call, "Too many 'swizzle' indices, %td > %td", arg_count, max_count);
return false;
+ } else if (arg_count < 2) {
+ error(call, "Not enough 'swizzle' indices, %td < 2", arg_count);
+ return false;
}
if (type->kind == Type_Array) {
@@ -5795,15 +5916,9 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As
if (operand->mode != Addressing_Type) {
error(operand->expr, "Expected a record type for '%.*s'", LIT(builtin_name));
} else {
- Type *bt = base_type(operand->type);
- if (bt->kind == Type_Struct) {
- if (bt->Struct.polymorphic_params != nullptr) {
- operand->value = exact_value_i64(bt->Struct.polymorphic_params->Tuple.variables.count);
- }
- } else if (bt->kind == Type_Union) {
- if (bt->Union.polymorphic_params != nullptr) {
- operand->value = exact_value_i64(bt->Union.polymorphic_params->Tuple.variables.count);
- }
+ TypeTuple *tuple = get_record_polymorphic_params(operand->type);
+ if (tuple) {
+ operand->value = exact_value_i64(tuple->variables.count);
} else {
error(operand->expr, "Expected a record type for '%.*s'", LIT(builtin_name));
}
@@ -5835,20 +5950,11 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As
Entity *param = nullptr;
i64 count = 0;
- Type *bt = base_type(operand->type);
- if (bt->kind == Type_Struct) {
- if (bt->Struct.polymorphic_params != nullptr) {
- count = bt->Struct.polymorphic_params->Tuple.variables.count;
- if (index < count) {
- param = bt->Struct.polymorphic_params->Tuple.variables[cast(isize)index];
- }
- }
- } else if (bt->kind == Type_Union) {
- if (bt->Union.polymorphic_params != nullptr) {
- count = bt->Union.polymorphic_params->Tuple.variables.count;
- if (index < count) {
- param = bt->Union.polymorphic_params->Tuple.variables[cast(isize)index];
- }
+ TypeTuple *tuple = get_record_polymorphic_params(operand->type);
+ if (tuple) {
+ count = tuple->variables.count;
+ if (index < count) {
+ param = tuple->variables[cast(isize)index];
}
} else {
error(operand->expr, "Expected a specialized polymorphic record type for '%.*s'", LIT(builtin_name));
@@ -6052,6 +6158,51 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As
break;
}
+ case BuiltinProc_procedure_of:
+ {
+ Ast *call_expr = unparen_expr(ce->args[0]);
+ Operand op = {};
+ check_expr_base(c, &op, ce->args[0], nullptr);
+ if (op.mode != Addressing_Value && !(call_expr && call_expr->kind == Ast_CallExpr)) {
+ error(ce->args[0], "Expected a call expression for '%.*s'", LIT(builtin_name));
+ return false;
+ }
+
+ Ast *proc = call_expr->CallExpr.proc;
+ Entity *e = entity_of_node(proc);
+
+ if (e == nullptr) {
+ error(ce->args[0], "Invalid procedure value, expected a regular/specialized procedure");
+ return false;
+ }
+
+ TypeAndValue tav = proc->tav;
+
+
+ operand->type = e->type;
+ operand->mode = Addressing_Value;
+ operand->value = tav.value;
+ operand->builtin_id = BuiltinProc_Invalid;
+ operand->proc_group = nullptr;
+
+ if (tav.mode == Addressing_Builtin) {
+ operand->mode = tav.mode;
+ operand->builtin_id = cast(BuiltinProcId)e->Builtin.id;
+ break;
+ }
+
+ if (!is_type_proc(e->type)) {
+ gbString s = type_to_string(e->type);
+ error(ce->args[0], "Expected a procedure value, got '%s'", s);
+ gb_string_free(s);
+ return false;
+ }
+
+
+ ce->entity_procedure_of = e;
+ break;
+ }
+
case BuiltinProc_constant_utf16_cstring:
{
String value = {};
diff --git a/src/check_decl.cpp b/src/check_decl.cpp
index 1ec366ae7..02445cbc6 100644
--- a/src/check_decl.cpp
+++ b/src/check_decl.cpp
@@ -88,11 +88,17 @@ gb_internal Type *check_init_variable(CheckerContext *ctx, Entity *e, Operand *o
e->type = t_invalid;
return nullptr;
} else if (is_type_polymorphic(t)) {
- gbString str = type_to_string(t);
- defer (gb_string_free(str));
- error(e->token, "Invalid use of a polymorphic type '%s' in %.*s", str, LIT(context_name));
- e->type = t_invalid;
- return nullptr;
+ Entity *e = entity_of_node(operand->expr);
+ if (e == nullptr) {
+ return nullptr;
+ }
+ if (e->state.load() != EntityState_Resolved) {
+ gbString str = type_to_string(t);
+ defer (gb_string_free(str));
+ error(e->token, "Invalid use of a polymorphic type '%s' in %.*s", str, LIT(context_name));
+ e->type = t_invalid;
+ return nullptr;
+ }
} else if (is_type_empty_union(t)) {
gbString str = type_to_string(t);
defer (gb_string_free(str));
@@ -479,6 +485,9 @@ gb_internal void check_const_decl(CheckerContext *ctx, Entity *e, Ast *type_expr
entity = check_selector(ctx, &operand, init, e->type);
} else {
check_expr_or_type(ctx, &operand, init, e->type);
+ if (init->kind == Ast_CallExpr) {
+ entity = init->CallExpr.entity_procedure_of;
+ }
}
switch (operand.mode) {
@@ -526,6 +535,7 @@ gb_internal void check_const_decl(CheckerContext *ctx, Entity *e, Ast *type_expr
return;
}
+
if (entity != nullptr) {
if (e->type != nullptr) {
Operand x = {};
@@ -724,9 +734,10 @@ gb_internal Entity *init_entity_foreign_library(CheckerContext *ctx, Entity *e)
return nullptr;
}
-gb_internal String handle_link_name(CheckerContext *ctx, Token token, String link_name, String link_prefix) {
+gb_internal String handle_link_name(CheckerContext *ctx, Token token, String link_name, String link_prefix, String link_suffix) {
+ String original_link_name = link_name;
if (link_prefix.len > 0) {
- if (link_name.len > 0) {
+ if (original_link_name.len > 0) {
error(token, "'link_name' and 'link_prefix' cannot be used together");
} else {
isize len = link_prefix.len + token.string.len;
@@ -738,9 +749,28 @@ gb_internal String handle_link_name(CheckerContext *ctx, Token token, String lin
link_name = make_string(name, len);
}
}
+
+ if (link_suffix.len > 0) {
+ if (original_link_name.len > 0) {
+ error(token, "'link_name' and 'link_suffix' cannot be used together");
+ } else {
+ String new_name = token.string;
+ if (link_name != original_link_name) {
+ new_name = link_name;
+ }
+
+ isize len = new_name.len + link_suffix.len;
+ u8 *name = gb_alloc_array(permanent_allocator(), u8, len+1);
+ gb_memmove(name, &new_name[0], new_name.len);
+ gb_memmove(name+new_name.len, &link_suffix[0], link_suffix.len);
+ name[len] = 0;
+ link_name = make_string(name, len);
+ }
+ }
return link_name;
}
+
gb_internal void check_objc_methods(CheckerContext *ctx, Entity *e, AttributeContext const &ac) {
if (!(ac.objc_name.len || ac.objc_is_class_method || ac.objc_type)) {
return;
@@ -862,7 +892,7 @@ gb_internal void check_proc_decl(CheckerContext *ctx, Entity *e, DeclInfo *d) {
}
TypeProc *pt = &proc_type->Proc;
- AttributeContext ac = make_attribute_context(e->Procedure.link_prefix);
+ AttributeContext ac = make_attribute_context(e->Procedure.link_prefix, e->Procedure.link_suffix);
if (d != nullptr) {
check_decl_attributes(ctx, d->attributes, proc_decl_attribute, &ac);
@@ -1015,7 +1045,7 @@ gb_internal void check_proc_decl(CheckerContext *ctx, Entity *e, DeclInfo *d) {
e->deprecated_message = ac.deprecated_message;
e->warning_message = ac.warning_message;
- ac.link_name = handle_link_name(ctx, e->token, ac.link_name, ac.link_prefix);
+ ac.link_name = handle_link_name(ctx, e->token, ac.link_name, ac.link_prefix,ac.link_suffix);
if (ac.has_disabled_proc) {
if (ac.disabled_proc) {
e->flags |= EntityFlag_Disabled;
@@ -1115,7 +1145,14 @@ gb_internal void check_proc_decl(CheckerContext *ctx, Entity *e, DeclInfo *d) {
}
if (ac.link_name.len > 0) {
- e->Procedure.link_name = ac.link_name;
+ String ln = ac.link_name;
+ e->Procedure.link_name = ln;
+ if (ln == "memcpy" ||
+ ln == "memmove" ||
+ ln == "mem_copy" ||
+ ln == "mem_copy_non_overlapping") {
+ e->Procedure.is_memcpy_like = true;
+ }
}
if (ac.deferred_procedure.entity != nullptr) {
@@ -1223,7 +1260,7 @@ gb_internal void check_global_variable_decl(CheckerContext *ctx, Entity *&e, Ast
}
e->flags |= EntityFlag_Visited;
- AttributeContext ac = make_attribute_context(e->Variable.link_prefix);
+ AttributeContext ac = make_attribute_context(e->Variable.link_prefix, e->Variable.link_suffix);
ac.init_expr_list_count = init_expr != nullptr ? 1 : 0;
DeclInfo *decl = decl_info_of_entity(e);
@@ -1244,7 +1281,10 @@ gb_internal void check_global_variable_decl(CheckerContext *ctx, Entity *&e, Ast
if (ac.is_static) {
error(e->token, "@(static) is not supported for global variables, nor required");
}
- ac.link_name = handle_link_name(ctx, e->token, ac.link_name, ac.link_prefix);
+ if (ac.rodata) {
+ e->Variable.is_rodata = true;
+ }
+ ac.link_name = handle_link_name(ctx, e->token, ac.link_name, ac.link_prefix, ac.link_suffix);
if (is_arch_wasm() && e->Variable.thread_local_model.len != 0) {
e->Variable.thread_local_model.len = 0;
@@ -1330,6 +1370,9 @@ gb_internal void check_global_variable_decl(CheckerContext *ctx, Entity *&e, Ast
Operand o = {};
check_expr_with_type_hint(ctx, &o, init_expr, e->type);
check_init_variable(ctx, e, &o, str_lit("variable declaration"));
+ if (e->Variable.is_rodata && o.mode != Addressing_Constant) {
+ error(o.expr, "Variables declared with @(rodata) must have constant initialization");
+ }
check_rtti_type_disallowed(e->token, e->type, "A variable declaration is using a type, %s, which has been disallowed");
}
diff --git a/src/check_expr.cpp b/src/check_expr.cpp
index 8672941c1..359b30276 100644
--- a/src/check_expr.cpp
+++ b/src/check_expr.cpp
@@ -125,6 +125,8 @@ gb_internal Entity *find_polymorphic_record_entity(GenTypesData *found_gen_types
gb_internal bool complete_soa_type(Checker *checker, Type *t, bool wait_to_finish);
+gb_internal bool check_is_castable_to(CheckerContext *c, Operand *operand, Type *y);
+
enum LoadDirectiveResult {
LoadDirective_Success = 0,
LoadDirective_Error = 1,
@@ -279,8 +281,20 @@ gb_internal void error_operand_not_expression(Operand *o) {
gb_internal void error_operand_no_value(Operand *o) {
if (o->mode == Addressing_NoValue) {
- gbString err = expr_to_string(o->expr);
Ast *x = unparen_expr(o->expr);
+
+ if (x->kind == Ast_CallExpr) {
+ Ast *p = unparen_expr(x->CallExpr.proc);
+ if (p->kind == Ast_BasicDirective) {
+ String tag = p->BasicDirective.name.string;
+ if (tag == "panic" ||
+ tag == "assert") {
+ return;
+ }
+ }
+ }
+
+ gbString err = expr_to_string(o->expr);
if (x->kind == Ast_CallExpr) {
error(o->expr, "'%s' call does not return a value and cannot be used as a value", err);
} else {
@@ -564,6 +578,7 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
d->defer_use_checked = false;
Entity *entity = alloc_entity_procedure(nullptr, token, final_proc_type, tags);
+ entity->state.store(EntityState_Resolved);
entity->identifier = ident;
add_entity_and_decl_info(&nctx, ident, entity, d);
@@ -2252,6 +2267,17 @@ gb_internal bool check_representable_as_constant(CheckerContext *c, ExactValue i
gb_internal bool check_integer_exceed_suggestion(CheckerContext *c, Operand *o, Type *type, i64 max_bit_size=0) {
if (is_type_integer(type) && o->value.kind == ExactValue_Integer) {
gbString b = type_to_string(type);
+ defer (gb_string_free(b));
+
+ if (is_type_enum(o->type)) {
+ if (check_is_castable_to(c, o, type)) {
+ gbString ot = type_to_string(o->type);
+ error_line("\tSuggestion: Try casting the '%s' expression to '%s'", ot, b);
+ gb_string_free(ot);
+ }
+ return true;
+ }
+
i64 sz = type_size_of(type);
i64 bit_size = 8*sz;
@@ -2301,7 +2327,6 @@ gb_internal bool check_integer_exceed_suggestion(CheckerContext *c, Operand *o,
}
}
- gb_string_free(b);
return true;
}
@@ -2525,7 +2550,7 @@ gb_internal void check_unary_expr(CheckerContext *c, Operand *o, Token op, Ast *
error_line("\tSuggestion: Did you want to pass the iterable value to the for statement by pointer to get addressable semantics?\n");
}
- if (is_type_map(parent_type)) {
+ if (parent_type != nullptr && is_type_map(parent_type)) {
error_line("\t Prefer doing 'for key, &%.*s in ...'\n", LIT(e->token.string));
} else {
error_line("\t Prefer doing 'for &%.*s in ...'\n", LIT(e->token.string));
@@ -3326,6 +3351,9 @@ gb_internal void check_cast(CheckerContext *c, Operand *x, Type *type) {
if (is_type_untyped(x->type)) {
Type *final_type = type;
if (is_const_expr && !is_type_constant_type(type)) {
+ if (is_type_union(type)) {
+ convert_to_typed(c, x, type);
+ }
final_type = default_type(x->type);
}
update_untyped_expr_type(c, x->expr, final_type, true);
@@ -3536,6 +3564,9 @@ gb_internal void check_binary_matrix(CheckerContext *c, Token const &op, Operand
x->mode = Addressing_Value;
if (are_types_identical(xt, yt)) {
+ if (are_types_identical(x->type, y->type)) {
+ return;
+ }
if (!is_type_named(x->type) && is_type_named(y->type)) {
// prefer the named type
x->type = y->type;
@@ -4274,7 +4305,8 @@ gb_internal void convert_to_typed(CheckerContext *c, Operand *operand, Type *tar
} else {
switch (operand->type->Basic.kind) {
case Basic_UntypedBool:
- if (!is_type_boolean(target_type)) {
+ if (!is_type_boolean(target_type) &&
+ !is_type_integer(target_type)) {
operand->mode = Addressing_Invalid;
convert_untyped_error(c, operand, target_type);
return;
@@ -7319,14 +7351,9 @@ gb_internal CallArgumentError check_polymorphic_record_type(CheckerContext *c, O
gbString s = gb_string_make_reserve(heap_allocator(), e->token.string.len+3);
s = gb_string_append_fmt(s, "%.*s(", LIT(e->token.string));
- Type *params = nullptr;
- switch (bt->kind) {
- case Type_Struct: params = bt->Struct.polymorphic_params; break;
- case Type_Union: params = bt->Union.polymorphic_params; break;
- }
-
- if (params != nullptr) for_array(i, params->Tuple.variables) {
- Entity *v = params->Tuple.variables[i];
+ TypeTuple *tuple = get_record_polymorphic_params(e->type);
+ if (tuple != nullptr) for_array(i, tuple->variables) {
+ Entity *v = tuple->variables[i];
String name = v->token.string;
if (i > 0) {
s = gb_string_append_fmt(s, ", ");
@@ -7408,13 +7435,15 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
String name = bd->name.string;
if (
name == "location" ||
+ name == "exists" ||
name == "assert" ||
name == "panic" ||
name == "defined" ||
name == "config" ||
name == "load" ||
name == "load_directory" ||
- name == "load_hash"
+ name == "load_hash" ||
+ name == "hash"
) {
operand->mode = Addressing_Builtin;
operand->builtin_id = BuiltinProc_DIRECTIVE;
@@ -7647,7 +7676,7 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
if (decl->proc_lit) {
ast_node(pl, ProcLit, decl->proc_lit);
if (pl->inlining == ProcInlining_no_inline) {
- error(call, "'#force_inline' cannot be applied to a procedure that has be marked as '#force_no_inline'");
+ error(call, "'#force_inline' cannot be applied to a procedure that has been marked as '#force_no_inline'");
}
}
}
@@ -8328,6 +8357,7 @@ gb_internal ExprKind check_basic_directive_expr(CheckerContext *c, Operand *o, A
name == "assert" ||
name == "defined" ||
name == "config" ||
+ name == "exists" ||
name == "load" ||
name == "load_hash" ||
name == "load_directory" ||
@@ -8851,6 +8881,10 @@ gb_internal void check_compound_literal_field_values(CheckerContext *c, Slice<As
case Type_Array:
ft = bt->Array.elem;
break;
+ case Type_BitField:
+ is_constant = false;
+ ft = bt->BitField.fields[index]->type;
+ break;
default:
GB_PANIC("invalid type: %s", type_to_string(ft));
break;
@@ -8877,6 +8911,9 @@ gb_internal void check_compound_literal_field_values(CheckerContext *c, Slice<As
case Type_Array:
nested_ft = bt->Array.elem;
break;
+ case Type_BitField:
+ nested_ft = bt->BitField.fields[index]->type;
+ break;
default:
GB_PANIC("invalid type %s", type_to_string(nested_ft));
break;
diff --git a/src/check_stmt.cpp b/src/check_stmt.cpp
index 866cdb5a1..f2e3b0242 100644
--- a/src/check_stmt.cpp
+++ b/src/check_stmt.cpp
@@ -501,6 +501,9 @@ gb_internal Type *check_assignment_variable(CheckerContext *ctx, Operand *lhs, O
return nullptr;
case Addressing_Variable:
+ if (e && e->kind == Entity_Variable && e->Variable.is_rodata) {
+ error(lhs->expr, "Assignment to variable '%.*s' marked as @(rodata) is not allowed", LIT(e->token.string));
+ }
break;
case Addressing_MapIndex: {
@@ -1252,8 +1255,6 @@ gb_internal void check_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags
error_line("\t%.*s\n", LIT(f->token.string));
}
}
- error_line("\n");
-
error_line("\tSuggestion: Was '#partial switch' wanted?\n");
}
}
@@ -2020,7 +2021,7 @@ gb_internal void check_value_decl_stmt(CheckerContext *ctx, Ast *node, u32 mod_f
// TODO NOTE(bill): This technically checks things multple times
- AttributeContext ac = make_attribute_context(ctx->foreign_context.link_prefix);
+ AttributeContext ac = make_attribute_context(ctx->foreign_context.link_prefix, ctx->foreign_context.link_suffix);
check_decl_attributes(ctx, vd->attributes, var_decl_attribute, &ac);
for (isize i = 0; i < entity_count; i++) {
@@ -2037,7 +2038,7 @@ gb_internal void check_value_decl_stmt(CheckerContext *ctx, Ast *node, u32 mod_f
e->type = init_type;
e->state = EntityState_Resolved;
}
- ac.link_name = handle_link_name(ctx, e->token, ac.link_name, ac.link_prefix);
+ ac.link_name = handle_link_name(ctx, e->token, ac.link_name, ac.link_prefix, ac.link_suffix);
if (ac.link_name.len > 0) {
e->Variable.link_name = ac.link_name;
@@ -2055,6 +2056,13 @@ gb_internal void check_value_decl_stmt(CheckerContext *ctx, Ast *node, u32 mod_f
}
}
}
+ if (ac.rodata) {
+ if (ac.is_static) {
+ e->Variable.is_rodata = true;
+ } else {
+ error(e->token, "Only global or @(static) variables can have @(rodata) applied");
+ }
+ }
if (ac.thread_local_model != "") {
String name = e->token.string;
if (name == "_") {
@@ -2216,8 +2224,16 @@ gb_internal void check_expr_stmt(CheckerContext *ctx, Ast *node) {
}
if (do_require) {
gbString expr_str = expr_to_string(ce->proc);
+ defer (gb_string_free(expr_str));
+ if (builtin_id) {
+ String real_name = builtin_procs[builtin_id].name;
+ if (real_name != make_string(cast(u8 const *)expr_str, gb_string_length(expr_str))) {
+ error(node, "'%s' ('%.*s.%.*s') requires that its results must be handled", expr_str,
+ LIT(builtin_proc_pkg_name[builtin_procs[builtin_id].pkg]), LIT(real_name));
+ return;
+ }
+ }
error(node, "'%s' requires that its results must be handled", expr_str);
- gb_string_free(expr_str);
}
return;
} else if (expr && expr->kind == Ast_SelectorCallExpr) {
@@ -2493,6 +2509,10 @@ gb_internal void check_return_stmt(CheckerContext *ctx, Ast *node) {
unsafe_return_error(o, "the address of an indexed variable", f->type);
}
}
+ } else if (o.mode == Addressing_Constant && is_type_slice(o.type)) {
+ ERROR_BLOCK();
+ unsafe_return_error(o, "a compound literal of a slice");
+ error_line("\tNote: A constant slice value will use the memory of the current stack frame\n");
}
}
diff --git a/src/check_type.cpp b/src/check_type.cpp
index 7ed657bee..c56c8a739 100644
--- a/src/check_type.cpp
+++ b/src/check_type.cpp
@@ -564,19 +564,7 @@ gb_internal bool check_record_poly_operand_specialization(CheckerContext *ctx, T
gb_internal Entity *find_polymorphic_record_entity(GenTypesData *found_gen_types, isize param_count, Array<Operand> const &ordered_operands) {
for (Entity *e : found_gen_types->types) {
Type *t = base_type(e->type);
- TypeTuple *tuple = nullptr;
- switch (t->kind) {
- case Type_Struct:
- if (t->Struct.polymorphic_params) {
- tuple = &t->Struct.polymorphic_params->Tuple;
- }
- break;
- case Type_Union:
- if (t->Union.polymorphic_params) {
- tuple = &t->Union.polymorphic_params->Tuple;
- }
- break;
- }
+ TypeTuple *tuple = get_record_polymorphic_params(t);
GB_ASSERT_MSG(tuple != nullptr, "%s :: %s", type_to_string(e->type), type_to_string(t));
GB_ASSERT(param_count == tuple->variables.count);
@@ -663,6 +651,8 @@ gb_internal void check_struct_type(CheckerContext *ctx, Type *struct_type, Ast *
&struct_type->Struct.is_polymorphic,
node, poly_operands
);
+ wait_signal_set(&struct_type->Struct.polymorphic_wait_signal);
+
struct_type->Struct.is_poly_specialized = check_record_poly_operand_specialization(ctx, struct_type, poly_operands, &struct_type->Struct.is_polymorphic);
if (original_type_for_poly) {
GB_ASSERT(named_type != nullptr);
@@ -712,6 +702,8 @@ gb_internal void check_union_type(CheckerContext *ctx, Type *union_type, Ast *no
&union_type->Union.is_polymorphic,
node, poly_operands
);
+ wait_signal_set(&union_type->Union.polymorphic_wait_signal);
+
union_type->Union.is_poly_specialized = check_record_poly_operand_specialization(ctx, union_type, poly_operands, &union_type->Union.is_polymorphic);
if (original_type_for_poly) {
GB_ASSERT(named_type != nullptr);
@@ -784,7 +776,7 @@ gb_internal void check_union_type(CheckerContext *ctx, Type *union_type, Ast *no
}
}
if (variants.count < 2) {
- error(ut->align, "A union with #no_nil must have at least 2 variants");
+ error(node, "A union with #no_nil must have at least 2 variants");
}
break;
}
@@ -1457,8 +1449,8 @@ gb_internal bool check_type_specialization_to(CheckerContext *ctx, Type *special
s->Struct.polymorphic_params != nullptr &&
t->Struct.polymorphic_params != nullptr) {
- TypeTuple *s_tuple = &s->Struct.polymorphic_params->Tuple;
- TypeTuple *t_tuple = &t->Struct.polymorphic_params->Tuple;
+ TypeTuple *s_tuple = get_record_polymorphic_params(s);
+ TypeTuple *t_tuple = get_record_polymorphic_params(t);
GB_ASSERT(t_tuple->variables.count == s_tuple->variables.count);
for_array(i, s_tuple->variables) {
Entity *s_e = s_tuple->variables[i];
@@ -1510,8 +1502,8 @@ gb_internal bool check_type_specialization_to(CheckerContext *ctx, Type *special
s->Union.polymorphic_params != nullptr &&
t->Union.polymorphic_params != nullptr) {
- TypeTuple *s_tuple = &s->Union.polymorphic_params->Tuple;
- TypeTuple *t_tuple = &t->Union.polymorphic_params->Tuple;
+ TypeTuple *s_tuple = get_record_polymorphic_params(s);
+ TypeTuple *t_tuple = get_record_polymorphic_params(t);
GB_ASSERT(t_tuple->variables.count == s_tuple->variables.count);
for_array(i, s_tuple->variables) {
Entity *s_e = s_tuple->variables[i];
diff --git a/src/checker.cpp b/src/checker.cpp
index 9d44c34dc..852fb89bb 100644
--- a/src/checker.cpp
+++ b/src/checker.cpp
@@ -3,6 +3,8 @@
#include "entity.cpp"
#include "types.cpp"
+String get_final_microarchitecture();
+
gb_internal void check_expr(CheckerContext *c, Operand *operand, Ast *expression);
gb_internal void check_expr_or_type(CheckerContext *c, Operand *operand, Ast *expression, Type *type_hint=nullptr);
gb_internal void add_comparison_procedures_for_fields(CheckerContext *c, Type *t);
@@ -1016,6 +1018,7 @@ gb_internal void init_universal(void) {
{"NetBSD", TargetOs_netbsd},
{"WASI", TargetOs_wasi},
{"JS", TargetOs_js},
+ {"Orca", TargetOs_orca},
{"Freestanding", TargetOs_freestanding},
};
@@ -1039,6 +1042,8 @@ gb_internal void init_universal(void) {
add_global_enum_constant(fields, "ODIN_ARCH", bc->metrics.arch);
add_global_string_constant("ODIN_ARCH_STRING", target_arch_names[bc->metrics.arch]);
}
+
+ add_global_string_constant("ODIN_MICROARCH_STRING", get_final_microarchitecture());
{
GlobalEnumValue values[BuildMode_COUNT] = {
@@ -1130,6 +1135,17 @@ gb_internal void init_universal(void) {
add_global_constant("ODIN_COMPILE_TIMESTAMP", t_untyped_integer, exact_value_i64(odin_compile_timestamp()));
{
+ String version = {};
+
+ #ifdef GIT_SHA
+ version.text = cast(u8 *)GIT_SHA;
+ version.len = gb_strlen(GIT_SHA);
+ #endif
+
+ add_global_string_constant("ODIN_VERSION_HASH", version);
+ }
+
+ {
bool f16_supported = lb_use_new_pass_system();
if (is_arch_wasm()) {
f16_supported = false;
@@ -1166,6 +1182,18 @@ gb_internal void init_universal(void) {
add_global_constant("ODIN_SANITIZER_FLAGS", named_type, exact_value_u64(bc->sanitizer_flags));
}
+ {
+ GlobalEnumValue values[5] = {
+ {"None", -1},
+ {"Minimal", 0},
+ {"Size", 1},
+ {"Speed", 2},
+ {"Aggressive", 3},
+ };
+
+ auto fields = add_global_enum_type(str_lit("Odin_Optimization_Mode"), values, gb_count_of(values));
+ add_global_enum_constant(fields, "ODIN_OPTIMIZATION_MODE", bc->optimization_level);
+ }
// Builtin Procedures
@@ -1283,6 +1311,7 @@ gb_internal void init_checker_info(CheckerInfo *i) {
mpsc_init(&i->definition_queue, a); //); // 1<<20);
mpsc_init(&i->required_global_variable_queue, a); // 1<<10);
mpsc_init(&i->required_foreign_imports_through_force_queue, a); // 1<<10);
+ mpsc_init(&i->foreign_imports_to_check_fullpaths, a); // 1<<10);
mpsc_init(&i->intrinsics_entry_point_usage, a); // 1<<10); // just waste some memory here, even if it probably never used
string_map_init(&i->load_directory_cache);
@@ -1307,6 +1336,7 @@ gb_internal void destroy_checker_info(CheckerInfo *i) {
mpsc_destroy(&i->definition_queue);
mpsc_destroy(&i->required_global_variable_queue);
mpsc_destroy(&i->required_foreign_imports_through_force_queue);
+ mpsc_destroy(&i->foreign_imports_to_check_fullpaths);
map_destroy(&i->objc_msgSend_types);
string_map_destroy(&i->load_file_cache);
@@ -1449,6 +1479,10 @@ gb_internal Entity *entity_of_node(Ast *expr) {
case_ast_node(cc, CaseClause, expr);
return cc->implicit_entity;
case_end;
+
+ case_ast_node(ce, CallExpr, expr);
+ return ce->entity_procedure_of;
+ case_end;
}
return nullptr;
}
@@ -3125,6 +3159,18 @@ gb_internal DECL_ATTRIBUTE_PROC(foreign_block_decl_attribute) {
error(elem, "Expected a string value for '%.*s'", LIT(name));
}
return true;
+ } else if (name == "link_suffix") {
+ if (ev.kind == ExactValue_String) {
+ String link_suffix = ev.value_string;
+ if (!is_foreign_name_valid(link_suffix)) {
+ error(elem, "Invalid link suffix: '%.*s'", LIT(link_suffix));
+ } else {
+ c->foreign_context.link_suffix = link_suffix;
+ }
+ } else {
+ error(elem, "Expected a string value for '%.*s'", LIT(name));
+ }
+ return true;
} else if (name == "private") {
EntityVisiblityKind kind = EntityVisiblity_PrivateToPackage;
if (ev.kind == ExactValue_Invalid) {
@@ -3419,6 +3465,18 @@ gb_internal DECL_ATTRIBUTE_PROC(proc_decl_attribute) {
error(elem, "Expected a string value for '%.*s'", LIT(name));
}
return true;
+ } else if (name == "link_suffix") {
+ ExactValue ev = check_decl_attribute_value(c, value);
+
+ if (ev.kind == ExactValue_String) {
+ ac->link_suffix = ev.value_string;
+ if (!is_foreign_name_valid(ac->link_suffix)) {
+ error(elem, "Invalid link suffix: %.*s", LIT(ac->link_suffix));
+ }
+ } else {
+ error(elem, "Expected a string value for '%.*s'", LIT(name));
+ }
+ return true;
} else if (name == "deprecated") {
ExactValue ev = check_decl_attribute_value(c, value);
@@ -3601,6 +3659,12 @@ gb_internal DECL_ATTRIBUTE_PROC(var_decl_attribute) {
}
ac->is_static = true;
return true;
+ } else if (name == "rodata") {
+ if (value != nullptr) {
+ error(elem, "'rodata' does not have any parameters");
+ }
+ ac->rodata = true;
+ return true;
} else if (name == "thread_local") {
ExactValue ev = check_decl_attribute_value(c, value);
if (ac->init_expr_list_count > 0) {
@@ -3700,6 +3764,17 @@ gb_internal DECL_ATTRIBUTE_PROC(var_decl_attribute) {
error(elem, "Expected a string value for '%.*s'", LIT(name));
}
return true;
+ } else if (name == "link_suffix") {
+ ExactValue ev = check_decl_attribute_value(c, value);
+ if (ev.kind == ExactValue_String) {
+ ac->link_suffix = ev.value_string;
+ if (!is_foreign_name_valid(ac->link_suffix)) {
+ error(elem, "Invalid link suffix: %.*s", LIT(ac->link_suffix));
+ }
+ } else {
+ error(elem, "Expected a string value for '%.*s'", LIT(name));
+ }
+ return true;
} else if (name == "link_section") {
ExactValue ev = check_decl_attribute_value(c, value);
if (ev.kind == ExactValue_String) {
@@ -3731,6 +3806,7 @@ gb_internal DECL_ATTRIBUTE_PROC(const_decl_attribute) {
name == "linkage" ||
name == "link_name" ||
name == "link_prefix" ||
+ name == "link_suffix" ||
false) {
error(elem, "@(%.*s) is not supported for compile time constant value declarations", LIT(name));
return true;
@@ -3773,8 +3849,10 @@ gb_internal void check_decl_attributes(CheckerContext *c, Array<Ast *> const &at
if (attributes.count == 0) return;
String original_link_prefix = {};
+ String original_link_suffix = {};
if (ac) {
original_link_prefix = ac->link_prefix;
+ original_link_suffix = ac->link_suffix;
}
StringSet set = {};
@@ -3849,6 +3927,12 @@ gb_internal void check_decl_attributes(CheckerContext *c, Array<Ast *> const &at
ac->link_prefix.len = 0;
}
}
+ if (ac->link_suffix.text == original_link_suffix.text) {
+ if (ac->link_name.len > 0) {
+ ac->link_suffix.text = nullptr;
+ ac->link_suffix.len = 0;
+ }
+ }
}
}
@@ -4143,6 +4227,7 @@ gb_internal void check_collect_value_decl(CheckerContext *c, Ast *decl) {
e->Variable.foreign_library_ident = fl;
e->Variable.link_prefix = c->foreign_context.link_prefix;
+ e->Variable.link_suffix = c->foreign_context.link_suffix;
}
Ast *init_expr = value;
@@ -4217,6 +4302,7 @@ gb_internal void check_collect_value_decl(CheckerContext *c, Ast *decl) {
}
}
e->Procedure.link_prefix = c->foreign_context.link_prefix;
+ e->Procedure.link_suffix = c->foreign_context.link_suffix;
GB_ASSERT(cc != ProcCC_Invalid);
pl->type->ProcType.calling_convention = cc;
@@ -4874,6 +4960,83 @@ gb_internal DECL_ATTRIBUTE_PROC(foreign_import_decl_attribute) {
return false;
}
+gb_internal void check_foreign_import_fullpaths(Checker *c) {
+ CheckerContext ctx = make_checker_context(c);
+
+ UntypedExprInfoMap untyped = {};
+ defer (map_destroy(&untyped));
+
+ for (Entity *e = nullptr; mpsc_dequeue(&c->info.foreign_imports_to_check_fullpaths, &e); /**/) {
+ GB_ASSERT(e != nullptr);
+ GB_ASSERT(e->kind == Entity_LibraryName);
+ Ast *decl = e->LibraryName.decl;
+ ast_node(fl, ForeignImportDecl, decl);
+
+ AstFile *f = decl->file();
+
+ reset_checker_context(&ctx, f, &untyped);
+ ctx.collect_delayed_decls = false;
+
+ GB_ASSERT(ctx.scope == e->scope);
+
+ if (fl->fullpaths.count == 0) {
+ String base_dir = dir_from_path(decl->file()->fullpath);
+
+ auto fullpaths = array_make<String>(permanent_allocator(), 0, fl->filepaths.count);
+
+ for (Ast *fp_node : fl->filepaths) {
+ Operand op = {};
+ check_expr(&ctx, &op, fp_node);
+ if (op.mode != Addressing_Constant && op.value.kind != ExactValue_String) {
+ gbString s = expr_to_string(op.expr);
+ error(fp_node, "Expected a constant string value, got '%s'", s);
+ gb_string_free(s);
+ continue;
+ }
+ if (!is_type_string(op.type)) {
+ gbString s = type_to_string(op.type);
+ error(fp_node, "Expected a constant string value, got value of type '%s'", s);
+ gb_string_free(s);
+ continue;
+ }
+
+ String file_str = op.value.value_string;
+ file_str = string_trim_whitespace(file_str);
+
+ String fullpath = file_str;
+ if (allow_check_foreign_filepath()) {
+ String foreign_path = {};
+ bool ok = determine_path_from_string(nullptr, decl, base_dir, file_str, &foreign_path, /*use error not syntax_error*/true);
+ if (ok) {
+ fullpath = foreign_path;
+ }
+ }
+ array_add(&fullpaths, fullpath);
+ }
+ fl->fullpaths = slice_from_array(fullpaths);
+ }
+
+ for (String const &path : fl->fullpaths) {
+ String ext = path_extension(path);
+ if (str_eq_ignore_case(ext, ".c") ||
+ str_eq_ignore_case(ext, ".cpp") ||
+ str_eq_ignore_case(ext, ".cxx") ||
+ str_eq_ignore_case(ext, ".h") ||
+ str_eq_ignore_case(ext, ".hpp") ||
+ str_eq_ignore_case(ext, ".hxx") ||
+ false
+ ) {
+ error(fl->token, "With 'foreign import', you cannot import a %.*s file/directory, you must precompile the library and link against that", LIT(ext));
+ break;
+ }
+ }
+
+ add_untyped_expressions(ctx.info, &untyped);
+
+ e->LibraryName.paths = fl->fullpaths;
+ }
+}
+
gb_internal void check_add_foreign_import_decl(CheckerContext *ctx, Ast *decl) {
if (decl->state_flags & StateFlag_BeenHandled) return;
decl->state_flags |= StateFlag_BeenHandled;
@@ -4883,59 +5046,26 @@ gb_internal void check_add_foreign_import_decl(CheckerContext *ctx, Ast *decl) {
Scope *parent_scope = ctx->scope;
GB_ASSERT(parent_scope->flags&ScopeFlag_File);
- GB_ASSERT(fl->fullpaths.count > 0);
- String fullpath = fl->fullpaths[0];
- String library_name = path_to_entity_name(fl->library_name.string, fullpath);
- if (is_blank_ident(library_name)) {
- error(fl->token, "File name, %.*s, cannot be as a library name as it is not a valid identifier", LIT(fl->library_name.string));
- return;
+ String library_name = fl->library_name.string;
+ if (library_name.len == 0 && fl->fullpaths.count != 0) {
+ String fullpath = fl->fullpaths[0];
+ library_name = path_to_entity_name(fl->library_name.string, fullpath);
}
-
- for (String const &path : fl->fullpaths) {
- String ext = path_extension(path);
- if (str_eq_ignore_case(ext, ".c") ||
- str_eq_ignore_case(ext, ".cpp") ||
- str_eq_ignore_case(ext, ".cxx") ||
- str_eq_ignore_case(ext, ".h") ||
- str_eq_ignore_case(ext, ".hpp") ||
- str_eq_ignore_case(ext, ".hxx") ||
- false
- ) {
- error(fl->token, "With 'foreign import', you cannot import a %.*s file directory, you must precompile the library and link against that", LIT(ext));
- break;
- }
+ if (library_name.len == 0 || is_blank_ident(library_name)) {
+ error(fl->token, "File name, '%.*s', cannot be as a library name as it is not a valid identifier", LIT(library_name));
+ return;
}
- // if (fl->collection_name != "system") {
- // char *c_str = gb_alloc_array(heap_allocator(), char, fullpath.len+1);
- // defer (gb_free(heap_allocator(), c_str));
- // gb_memmove(c_str, fullpath.text, fullpath.len);
- // c_str[fullpath.len] = '\0';
-
- // gbFile f = {};
- // gbFileError file_err = gb_file_open(&f, c_str);
- // defer (gb_file_close(&f));
-
- // switch (file_err) {
- // case gbFileError_Invalid:
- // error(decl, "Invalid file or cannot be found ('%.*s')", LIT(fullpath));
- // return;
- // case gbFileError_NotExists:
- // error(decl, "File cannot be found ('%.*s')", LIT(fullpath));
- // return;
- // }
- // }
-
GB_ASSERT(fl->library_name.pos.line != 0);
fl->library_name.string = library_name;
Entity *e = alloc_entity_library_name(parent_scope, fl->library_name, t_invalid,
fl->fullpaths, library_name);
+ e->LibraryName.decl = decl;
add_entity_flags_from_file(ctx, e, parent_scope);
add_entity(ctx, parent_scope, nullptr, e);
-
AttributeContext ac = {};
check_decl_attributes(ctx, fl->attributes, foreign_import_decl_attribute, &ac);
if (ac.require_declaration) {
@@ -4950,12 +5080,8 @@ gb_internal void check_add_foreign_import_decl(CheckerContext *ctx, Ast *decl) {
e->LibraryName.extra_linker_flags = extra_linker_flags;
}
- if (has_asm_extension(fullpath)) {
- if (build_context.metrics.arch != TargetArch_amd64 && build_context.metrics.os != TargetOs_darwin) {
- error(decl, "Assembly files are not yet supported on this platform: %.*s_%.*s",
- LIT(target_os_names[build_context.metrics.os]), LIT(target_arch_names[build_context.metrics.arch]));
- }
- }
+ mpsc_enqueue(&ctx->info->foreign_imports_to_check_fullpaths, e);
+
}
// Returns true if a new package is present
@@ -5763,35 +5889,6 @@ gb_internal void remove_neighbouring_duplicate_entires_from_sorted_array(Array<E
gb_internal void check_test_procedures(Checker *c) {
array_sort(c->info.testing_procedures, init_procedures_cmp);
remove_neighbouring_duplicate_entires_from_sorted_array(&c->info.testing_procedures);
-
- if (build_context.test_names.entries.count == 0) {
- return;
- }
-
- AstPackage *pkg = c->info.init_package;
- Scope *s = pkg->scope;
-
- for (String const &name : build_context.test_names) {
- Entity *e = scope_lookup(s, name);
- if (e == nullptr) {
- Token tok = {};
- if (pkg->files.count != 0) {
- tok = pkg->files[0]->tokens[0];
- }
- error(tok, "Unable to find the test '%.*s' in 'package %.*s' ", LIT(name), LIT(pkg->name));
- }
- }
-
- for (isize i = 0; i < c->info.testing_procedures.count; /**/) {
- Entity *e = c->info.testing_procedures[i];
- String name = e->token.string;
- if (!string_set_exists(&build_context.test_names, name)) {
- array_ordered_remove(&c->info.testing_procedures, i);
- } else {
- i += 1;
- }
- }
-
}
@@ -6317,6 +6414,9 @@ gb_internal void check_parsed_files(Checker *c) {
TIME_SECTION("check procedure bodies");
check_procedure_bodies(c);
+ TIME_SECTION("check foreign import fullpaths");
+ check_foreign_import_fullpaths(c);
+
TIME_SECTION("add entities from procedure bodies");
check_merge_queues_into_arrays(c);
diff --git a/src/checker.hpp b/src/checker.hpp
index 2ade9312e..492a64fb6 100644
--- a/src/checker.hpp
+++ b/src/checker.hpp
@@ -51,6 +51,12 @@ enum StmtFlag {
enum BuiltinProcPkg {
BuiltinProcPkg_builtin,
BuiltinProcPkg_intrinsics,
+ BuiltinProcPkg_COUNT
+};
+
+String builtin_proc_pkg_name[BuiltinProcPkg_COUNT] = {
+ str_lit("builtin"),
+ str_lit("intrinsics"),
};
struct BuiltinProc {
@@ -112,6 +118,7 @@ enum InstrumentationFlag : i32 {
struct AttributeContext {
String link_name;
String link_prefix;
+ String link_suffix;
String link_section;
String linkage;
isize init_expr_list_count;
@@ -132,6 +139,7 @@ struct AttributeContext {
bool entry_point_only : 1;
bool instrumentation_enter : 1;
bool instrumentation_exit : 1;
+ bool rodata : 1;
u32 optimization_mode; // ProcedureOptimizationMode
i64 foreign_import_priority_index;
String extra_linker_flags;
@@ -146,9 +154,10 @@ struct AttributeContext {
String enable_target_feature; // will be enabled for the procedure only
};
-gb_internal gb_inline AttributeContext make_attribute_context(String link_prefix) {
+gb_internal gb_inline AttributeContext make_attribute_context(String link_prefix, String link_suffix) {
AttributeContext ac = {};
ac.link_prefix = link_prefix;
+ ac.link_suffix = link_suffix;
return ac;
}
@@ -302,6 +311,7 @@ struct ForeignContext {
Ast * curr_library;
ProcCallingConvention default_cc;
String link_prefix;
+ String link_suffix;
EntityVisiblityKind visibility_kind;
};
@@ -333,7 +343,16 @@ struct ObjcMsgData {
ObjcMsgKind kind;
Type *proc_type;
};
+
+enum LoadFileTier {
+ LoadFileTier_Invalid,
+ LoadFileTier_Exists,
+ LoadFileTier_Contents,
+};
+
struct LoadFileCache {
+ LoadFileTier tier;
+ bool exists;
String path;
gbFileError file_error;
String data;
@@ -414,6 +433,7 @@ struct CheckerInfo {
MPSCQueue<Entity *> entity_queue;
MPSCQueue<Entity *> required_global_variable_queue;
MPSCQueue<Entity *> required_foreign_imports_through_force_queue;
+ MPSCQueue<Entity *> foreign_imports_to_check_fullpaths;
MPSCQueue<Ast *> intrinsics_entry_point_usage;
@@ -434,6 +454,8 @@ struct CheckerInfo {
BlockingMutex load_directory_mutex;
StringMap<LoadDirectoryCache *> load_directory_cache;
PtrMap<Ast *, LoadDirectoryCache *> load_directory_map; // Key: Ast_CallExpr *
+
+
};
struct CheckerContext {
diff --git a/src/checker_builtin_procs.hpp b/src/checker_builtin_procs.hpp
index 5f98bb7b3..35acad42f 100644
--- a/src/checker_builtin_procs.hpp
+++ b/src/checker_builtin_procs.hpp
@@ -299,6 +299,8 @@ BuiltinProc__type_simple_boolean_end,
BuiltinProc__type_end,
+ BuiltinProc_procedure_of,
+
BuiltinProc___entry_point,
BuiltinProc_objc_send,
@@ -614,6 +616,8 @@ gb_global BuiltinProc builtin_procs[BuiltinProc_COUNT] = {
{STR_LIT(""), 0, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
+ {STR_LIT("procedure_of"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
+
{STR_LIT("__entry_point"), 0, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
{STR_LIT("objc_send"), 3, true, Expr_Expr, BuiltinProcPkg_intrinsics, false, true},
diff --git a/src/entity.cpp b/src/entity.cpp
index 8a7417006..8f55c1faf 100644
--- a/src/entity.cpp
+++ b/src/entity.cpp
@@ -223,12 +223,14 @@ struct Entity {
Ast * foreign_library_ident;
String link_name;
String link_prefix;
+ String link_suffix;
String link_section;
CommentGroup *docs;
CommentGroup *comment;
bool is_foreign;
bool is_export;
bool is_global;
+ bool is_rodata;
} Variable;
struct {
Type * type_parameter_specialization;
@@ -243,6 +245,7 @@ struct Entity {
Ast * foreign_library_ident;
String link_name;
String link_prefix;
+ String link_suffix;
DeferredProcedure deferred_procedure;
struct GenProcsData *gen_procs;
@@ -253,6 +256,7 @@ struct Entity {
bool generated_from_polymorphic : 1;
bool entry_point_only : 1;
bool has_instrumentation : 1;
+ bool is_memcpy_like : 1;
} Procedure;
struct {
Array<Entity *> entities;
@@ -266,6 +270,7 @@ struct Entity {
Scope *scope;
} ImportName;
struct {
+ Ast *decl;
Slice<String> paths;
String name;
i64 priority_index;
diff --git a/src/error.cpp b/src/error.cpp
index da444e998..03d96219b 100644
--- a/src/error.cpp
+++ b/src/error.cpp
@@ -390,8 +390,6 @@ gb_internal void error_va(TokenPos const &pos, TokenPos end, char const *fmt, va
error_out_empty();
} else {
error_out_pos(pos);
- }
- if (has_ansi_terminal_colours()) {
error_out_coloured("Error: ", TerminalStyle_Normal, TerminalColour_Red);
}
error_out_va(fmt, va);
@@ -407,29 +405,31 @@ gb_internal void warning_va(TokenPos const &pos, TokenPos end, char const *fmt,
error_va(pos, end, fmt, va);
return;
}
+ if (global_ignore_warnings()) {
+ return;
+ }
+
global_error_collector.warning_count.fetch_add(1);
mutex_lock(&global_error_collector.mutex);
push_error_value(pos, ErrorValue_Warning);
- if (!global_ignore_warnings()) {
- if (pos.line == 0) {
+ if (pos.line == 0) {
+ error_out_empty();
+ error_out_coloured("Warning: ", TerminalStyle_Normal, TerminalColour_Yellow);
+ error_out_va(fmt, va);
+ error_out("\n");
+ } else {
+ // global_error_collector.prev = pos;
+ if (json_errors()) {
error_out_empty();
- error_out_coloured("Warning: ", TerminalStyle_Normal, TerminalColour_Yellow);
- error_out_va(fmt, va);
- error_out("\n");
} else {
- // global_error_collector.prev = pos;
- if (json_errors()) {
- error_out_empty();
- } else {
- error_out_pos(pos);
- }
+ error_out_pos(pos);
error_out_coloured("Warning: ", TerminalStyle_Normal, TerminalColour_Yellow);
- error_out_va(fmt, va);
- error_out("\n");
- show_error_on_line(pos, end);
}
+ error_out_va(fmt, va);
+ error_out("\n");
+ show_error_on_line(pos, end);
}
try_pop_error_value();
mutex_unlock(&global_error_collector.mutex);
@@ -516,7 +516,7 @@ gb_internal void syntax_error_with_verbose_va(TokenPos const &pos, TokenPos end,
if (pos.line == 0) {
error_out_empty();
- error_out_coloured("Syntax_Error: ", TerminalStyle_Normal, TerminalColour_Red);
+ error_out_coloured("Syntax Error: ", TerminalStyle_Normal, TerminalColour_Red);
error_out_va(fmt, va);
error_out("\n");
} else {
@@ -527,7 +527,7 @@ gb_internal void syntax_error_with_verbose_va(TokenPos const &pos, TokenPos end,
error_out_pos(pos);
}
if (has_ansi_terminal_colours()) {
- error_out_coloured("Syntax_Error: ", TerminalStyle_Normal, TerminalColour_Red);
+ error_out_coloured("Syntax Error: ", TerminalStyle_Normal, TerminalColour_Red);
}
error_out_va(fmt, va);
error_out("\n");
@@ -544,30 +544,31 @@ gb_internal void syntax_warning_va(TokenPos const &pos, TokenPos end, char const
syntax_error_va(pos, end, fmt, va);
return;
}
+ if (global_ignore_warnings()) {
+ return;
+ }
mutex_lock(&global_error_collector.mutex);
global_error_collector.warning_count++;
push_error_value(pos, ErrorValue_Warning);
- if (!global_ignore_warnings()) {
- if (pos.line == 0) {
+ if (pos.line == 0) {
+ error_out_empty();
+ error_out_coloured("Syntax Warning: ", TerminalStyle_Normal, TerminalColour_Yellow);
+ error_out_va(fmt, va);
+ error_out("\n");
+ } else {
+ // global_error_collector.prev = pos;
+ if (json_errors()) {
error_out_empty();
- error_out_coloured("Syntax Warning: ", TerminalStyle_Normal, TerminalColour_Yellow);
- error_out_va(fmt, va);
- error_out("\n");
} else {
- // global_error_collector.prev = pos;
- if (json_errors()) {
- error_out_empty();
- } else {
- error_out_pos(pos);
- }
- error_out_coloured("Syntax Warning: ", TerminalStyle_Normal, TerminalColour_Yellow);
- error_out_va(fmt, va);
- error_out("\n");
- // show_error_on_line(pos, end);
+ error_out_pos(pos);
}
+ error_out_coloured("Syntax Warning: ", TerminalStyle_Normal, TerminalColour_Yellow);
+ error_out_va(fmt, va);
+ error_out("\n");
+ // show_error_on_line(pos, end);
}
try_pop_error_value();
@@ -838,4 +839,4 @@ gb_internal void print_all_errors(void) {
gb_file_write(f, res, gb_string_length(res));
errors_already_printed = true;
-} \ No newline at end of file
+}
diff --git a/src/gb/gb.h b/src/gb/gb.h
index 17d5e97d1..22a30a04b 100644
--- a/src/gb/gb.h
+++ b/src/gb/gb.h
@@ -256,6 +256,7 @@ extern "C" {
#if defined(GB_SYSTEM_NETBSD)
#include <stdio.h>
+ #include <lwp.h>
#define lseek64 lseek
#endif
@@ -3027,6 +3028,8 @@ gb_inline u32 gb_thread_current_id(void) {
thread_id = find_thread(NULL);
#elif defined(GB_SYSTEM_FREEBSD)
thread_id = pthread_getthreadid_np();
+#elif defined(GB_SYSTEM_NETBSD)
+ thread_id = (u32)_lwp_self();
#else
#error Unsupported architecture for gb_thread_current_id()
#endif
diff --git a/src/linker.cpp b/src/linker.cpp
index c41f10593..25c54a6ab 100644
--- a/src/linker.cpp
+++ b/src/linker.cpp
@@ -13,6 +13,7 @@ struct LinkerData {
};
gb_internal i32 system_exec_command_line_app(char const *name, char const *fmt, ...);
+gb_internal bool system_exec_command_line_app_output(char const *command, gbString *output);
#if defined(GB_SYSTEM_OSX)
gb_internal void linker_enable_system_library_linking(LinkerData *ld) {
@@ -69,15 +70,40 @@ gb_internal i32 linker_stage(LinkerData *gen) {
if (is_arch_wasm()) {
timings_start_section(timings, str_lit("wasm-ld"));
+ gbString extra_orca_flags = gb_string_make(temporary_allocator(), "");
+
+ gbString inputs = gb_string_make(temporary_allocator(), "");
+ inputs = gb_string_append_fmt(inputs, "\"%.*s.o\"", LIT(output_filename));
+
+ if (build_context.metrics.os == TargetOs_orca) {
+ gbString orca_sdk_path = gb_string_make(temporary_allocator(), "");
+ if (!system_exec_command_line_app_output("orca sdk-path", &orca_sdk_path)) {
+ gb_printf_err("executing `orca sdk-path` failed, make sure Orca is installed and added to your path\n");
+ return 1;
+ }
+ if (gb_string_length(orca_sdk_path) == 0) {
+ gb_printf_err("executing `orca sdk-path` did not produce output\n");
+ return 1;
+ }
+ inputs = gb_string_append_fmt(inputs, " \"%s/orca-libc/lib/crt1.o\" \"%s/orca-libc/lib/libc.o\"", orca_sdk_path, orca_sdk_path);
+
+ extra_orca_flags = gb_string_append_fmt(extra_orca_flags, " -L \"%s/bin\" -lorca_wasm --export-dynamic", orca_sdk_path);
+ }
+
+
#if defined(GB_SYSTEM_WINDOWS)
result = system_exec_command_line_app("wasm-ld",
- "\"%.*s\\bin\\wasm-ld\" \"%.*s.o\" -o \"%.*s\" %.*s %.*s",
+ "\"%.*s\\bin\\wasm-ld\" %s -o \"%.*s\" %.*s %.*s %s",
LIT(build_context.ODIN_ROOT),
- LIT(output_filename), LIT(output_filename), LIT(build_context.link_flags), LIT(build_context.extra_linker_flags));
+ inputs, LIT(output_filename), LIT(build_context.link_flags), LIT(build_context.extra_linker_flags),
+ extra_orca_flags);
#else
result = system_exec_command_line_app("wasm-ld",
- "wasm-ld \"%.*s.o\" -o \"%.*s\" %.*s %.*s",
- LIT(output_filename), LIT(output_filename), LIT(build_context.link_flags), LIT(build_context.extra_linker_flags));
+ "wasm-ld %s -o \"%.*s\" %.*s %.*s %s",
+ inputs, LIT(output_filename),
+ LIT(build_context.link_flags),
+ LIT(build_context.extra_linker_flags),
+ extra_orca_flags);
#endif
return result;
}
diff --git a/src/llvm_abi.cpp b/src/llvm_abi.cpp
index 85a16d321..1f7a39447 100644
--- a/src/llvm_abi.cpp
+++ b/src/llvm_abi.cpp
@@ -900,7 +900,15 @@ namespace lbAbiAmd64SysV {
}
switch (LLVMGetTypeKind(t)) {
- case LLVMIntegerTypeKind:
+ case LLVMIntegerTypeKind: {
+ i64 s = t_size;
+ while (s > 0) {
+ unify(cls, ix + off/8, RegClass_Int);
+ off += 8;
+ s -= 8;
+ }
+ break;
+ }
case LLVMPointerTypeKind:
case LLVMHalfTypeKind:
unify(cls, ix + off/8, RegClass_Int);
diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp
index 03c17a8bb..04c4ce244 100644
--- a/src/llvm_backend.cpp
+++ b/src/llvm_backend.cpp
@@ -9,6 +9,11 @@
#endif
+#ifndef LLVM_IGNORE_VERIFICATION
+#define LLVM_IGNORE_VERIFICATION 0
+#endif
+
+
#include "llvm_backend.hpp"
#include "llvm_abi.cpp"
#include "llvm_backend_opt.cpp"
@@ -1125,6 +1130,53 @@ gb_internal void lb_finalize_objc_names(lbProcedure *p) {
lb_end_procedure_body(p);
}
+gb_internal void lb_verify_function(lbModule *m, lbProcedure *p, bool dump_ll=false) {
+ if (LLVM_IGNORE_VERIFICATION) {
+ return;
+ }
+
+ if (!m->debug_builder && LLVMVerifyFunction(p->value, LLVMReturnStatusAction)) {
+ char *llvm_error = nullptr;
+
+ gb_printf_err("LLVM CODE GEN FAILED FOR PROCEDURE: %.*s\n", LIT(p->name));
+ LLVMDumpValue(p->value);
+ gb_printf_err("\n");
+ if (dump_ll) {
+ gb_printf_err("\n\n\n");
+ String filepath_ll = lb_filepath_ll_for_module(m);
+ if (LLVMPrintModuleToFile(m->mod, cast(char const *)filepath_ll.text, &llvm_error)) {
+ gb_printf_err("LLVM Error: %s\n", llvm_error);
+ }
+ }
+ LLVMVerifyFunction(p->value, LLVMPrintMessageAction);
+ exit_with_errors();
+ }
+}
+
+gb_internal WORKER_TASK_PROC(lb_llvm_module_verification_worker_proc) {
+ char *llvm_error = nullptr;
+ defer (LLVMDisposeMessage(llvm_error));
+ lbModule *m = cast(lbModule *)data;
+
+ if (LLVMVerifyModule(m->mod, LLVMReturnStatusAction, &llvm_error)) {
+ gb_printf_err("LLVM Error:\n%s\n", llvm_error);
+ if (build_context.keep_temp_files) {
+ TIME_SECTION("LLVM Print Module to File");
+ String filepath_ll = lb_filepath_ll_for_module(m);
+ if (LLVMPrintModuleToFile(m->mod, cast(char const *)filepath_ll.text, &llvm_error)) {
+ gb_printf_err("LLVM Error: %s\n", llvm_error);
+ exit_with_errors();
+ return false;
+ }
+ }
+ exit_with_errors();
+ return 1;
+ }
+ return 0;
+}
+
+
+
gb_internal lbProcedure *lb_create_startup_runtime(lbModule *main_module, lbProcedure *objc_names, Array<lbGlobalVariable> &global_variables) { // Startup Runtime
Type *proc_type = alloc_type_proc(nullptr, nullptr, 0, nullptr, 0, false, ProcCC_Odin);
@@ -1160,6 +1212,10 @@ gb_internal lbProcedure *lb_create_startup_runtime(lbModule *main_module, lbProc
if (is_type_untyped_nil(init.type)) {
LLVMSetInitializer(var.var.value, LLVMConstNull(global_type));
var.is_initialized = true;
+
+ if (e->Variable.is_rodata) {
+ LLVMSetGlobalConstant(var.var.value, true);
+ }
continue;
}
GB_PANIC("Invalid init value, got %s", expr_to_string(init_expr));
@@ -1174,6 +1230,10 @@ gb_internal lbProcedure *lb_create_startup_runtime(lbModule *main_module, lbProc
}
LLVMSetInitializer(var.var.value, init.value);
var.is_initialized = true;
+
+ if (e->Variable.is_rodata) {
+ LLVMSetGlobalConstant(var.var.value, true);
+ }
continue;
}
} else {
@@ -1206,8 +1266,9 @@ gb_internal lbProcedure *lb_create_startup_runtime(lbModule *main_module, lbProc
var.is_initialized = true;
}
+
+
}
-
CheckerInfo *info = main_module->gen->info;
for (Entity *e : info->init_procedures) {
@@ -1218,13 +1279,7 @@ gb_internal lbProcedure *lb_create_startup_runtime(lbModule *main_module, lbProc
lb_end_procedure_body(p);
- if (!main_module->debug_builder && LLVMVerifyFunction(p->value, LLVMReturnStatusAction)) {
- gb_printf_err("LLVM CODE GEN FAILED FOR PROCEDURE: %s\n", "main");
- LLVMDumpValue(p->value);
- gb_printf_err("\n\n\n\n");
- LLVMVerifyFunction(p->value, LLVMAbortProcessAction);
- }
-
+ lb_verify_function(main_module, p);
return p;
}
@@ -1247,31 +1302,21 @@ gb_internal lbProcedure *lb_create_cleanup_runtime(lbModule *main_module) { // C
lb_end_procedure_body(p);
- if (!main_module->debug_builder && LLVMVerifyFunction(p->value, LLVMReturnStatusAction)) {
- gb_printf_err("LLVM CODE GEN FAILED FOR PROCEDURE: %s\n", "main");
- LLVMDumpValue(p->value);
- gb_printf_err("\n\n\n\n");
- LLVMVerifyFunction(p->value, LLVMAbortProcessAction);
- }
-
+ lb_verify_function(main_module, p);
return p;
}
gb_internal WORKER_TASK_PROC(lb_generate_procedures_and_types_per_module) {
lbModule *m = cast(lbModule *)data;
- for (Entity *e : m->global_procedures_and_types_to_create) {
- if (e->kind == Entity_TypeName) {
- (void)lb_get_entity_name(m, e);
- lb_type(m, e->type);
- }
+ for (Entity *e : m->global_types_to_create) {
+ (void)lb_get_entity_name(m, e);
+ (void)lb_type(m, e->type);
}
- for (Entity *e : m->global_procedures_and_types_to_create) {
- if (e->kind == Entity_Procedure) {
- (void)lb_get_entity_name(m, e);
- array_add(&m->procedures_to_generate, lb_create_procedure(m, e));
- }
+ for (Entity *e : m->global_procedures_to_create) {
+ (void)lb_get_entity_name(m, e);
+ array_add(&m->procedures_to_generate, lb_create_procedure(m, e));
}
return 0;
}
@@ -1325,16 +1370,24 @@ gb_internal void lb_create_global_procedures_and_types(lbGenerator *gen, Checker
m = lb_module_of_entity(gen, e);
}
- array_add(&m->global_procedures_and_types_to_create, e);
+ if (e->kind == Entity_Procedure) {
+ array_add(&m->global_procedures_to_create, e);
+ } else if (e->kind == Entity_TypeName) {
+ array_add(&m->global_types_to_create, e);
+ }
}
- for (auto const &entry : gen->modules) {
- lbModule *m = entry.value;
- if (do_threading) {
+ if (do_threading) {
+ for (auto const &entry : gen->modules) {
+ lbModule *m = entry.value;
thread_pool_add_task(lb_generate_procedures_and_types_per_module, m);
- } else {
+ }
+ } else {
+ for (auto const &entry : gen->modules) {
+ lbModule *m = entry.value;
lb_generate_procedures_and_types_per_module(m);
}
+
}
thread_pool_wait();
@@ -2365,16 +2418,19 @@ gb_internal WORKER_TASK_PROC(lb_generate_procedures_worker_proc) {
}
gb_internal void lb_generate_procedures(lbGenerator *gen, bool do_threading) {
- for (auto const &entry : gen->modules) {
- lbModule *m = entry.value;
- if (do_threading) {
+ if (do_threading) {
+ for (auto const &entry : gen->modules) {
+ lbModule *m = entry.value;
thread_pool_add_task(lb_generate_procedures_worker_proc, m);
- } else {
+ }
+
+ thread_pool_wait();
+ } else {
+ for (auto const &entry : gen->modules) {
+ lbModule *m = entry.value;
lb_generate_procedures_worker_proc(m);
}
}
-
- thread_pool_wait();
}
gb_internal WORKER_TASK_PROC(lb_generate_missing_procedures_to_check_worker_proc) {
@@ -2388,17 +2444,20 @@ gb_internal WORKER_TASK_PROC(lb_generate_missing_procedures_to_check_worker_proc
}
gb_internal void lb_generate_missing_procedures(lbGenerator *gen, bool do_threading) {
- for (auto const &entry : gen->modules) {
- lbModule *m = entry.value;
- // NOTE(bill): procedures may be added during generation
- if (do_threading) {
+ if (do_threading) {
+ for (auto const &entry : gen->modules) {
+ lbModule *m = entry.value;
+ // NOTE(bill): procedures may be added during generation
thread_pool_add_task(lb_generate_missing_procedures_to_check_worker_proc, m);
- } else {
+ }
+ thread_pool_wait();
+ } else {
+ for (auto const &entry : gen->modules) {
+ lbModule *m = entry.value;
+ // NOTE(bill): procedures may be added during generation
lb_generate_missing_procedures_to_check_worker_proc(m);
}
}
-
- thread_pool_wait();
}
gb_internal void lb_debug_info_complete_types_and_finalize(lbGenerator *gen) {
@@ -2411,32 +2470,45 @@ gb_internal void lb_debug_info_complete_types_and_finalize(lbGenerator *gen) {
}
gb_internal void lb_llvm_function_passes(lbGenerator *gen, bool do_threading) {
- for (auto const &entry : gen->modules) {
- lbModule *m = entry.value;
- if (do_threading) {
+ if (do_threading) {
+ for (auto const &entry : gen->modules) {
+ lbModule *m = entry.value;
thread_pool_add_task(lb_llvm_function_pass_per_module, m);
- } else {
+ }
+ thread_pool_wait();
+ } else {
+ for (auto const &entry : gen->modules) {
+ lbModule *m = entry.value;
lb_llvm_function_pass_per_module(m);
}
}
- thread_pool_wait();
}
gb_internal void lb_llvm_module_passes(lbGenerator *gen, bool do_threading) {
- for (auto const &entry : gen->modules) {
- lbModule *m = entry.value;
- auto wd = gb_alloc_item(permanent_allocator(), lbLLVMModulePassWorkerData);
- wd->m = m;
- wd->target_machine = m->target_machine;
+ if (do_threading) {
+ for (auto const &entry : gen->modules) {
+ lbModule *m = entry.value;
+ auto wd = gb_alloc_item(permanent_allocator(), lbLLVMModulePassWorkerData);
+ wd->m = m;
+ wd->target_machine = m->target_machine;
- if (do_threading) {
- thread_pool_add_task(lb_llvm_module_pass_worker_proc, wd);
- } else {
+ if (do_threading) {
+ thread_pool_add_task(lb_llvm_module_pass_worker_proc, wd);
+ } else {
+ lb_llvm_module_pass_worker_proc(wd);
+ }
+ }
+ thread_pool_wait();
+ } else {
+ for (auto const &entry : gen->modules) {
+ lbModule *m = entry.value;
+ auto wd = gb_alloc_item(permanent_allocator(), lbLLVMModulePassWorkerData);
+ wd->m = m;
+ wd->target_machine = m->target_machine;
lb_llvm_module_pass_worker_proc(wd);
}
}
- thread_pool_wait();
}
gb_internal String lb_filepath_ll_for_module(lbModule *m) {
@@ -2514,40 +2586,27 @@ gb_internal String lb_filepath_obj_for_module(lbModule *m) {
return concatenate_strings(permanent_allocator(), path, ext);
}
-gb_internal WORKER_TASK_PROC(lb_llvm_module_verification_worker_proc) {
- char *llvm_error = nullptr;
- defer (LLVMDisposeMessage(llvm_error));
- lbModule *m = cast(lbModule *)data;
- if (LLVMVerifyModule(m->mod, LLVMReturnStatusAction, &llvm_error)) {
- gb_printf_err("LLVM Error:\n%s\n", llvm_error);
- if (build_context.keep_temp_files) {
- TIME_SECTION("LLVM Print Module to File");
- String filepath_ll = lb_filepath_ll_for_module(m);
- if (LLVMPrintModuleToFile(m->mod, cast(char const *)filepath_ll.text, &llvm_error)) {
- gb_printf_err("LLVM Error: %s\n", llvm_error);
- exit_with_errors();
- return false;
- }
- }
- exit_with_errors();
- return 1;
- }
- return 0;
-}
-
gb_internal bool lb_llvm_module_verification(lbGenerator *gen, bool do_threading) {
- for (auto const &entry : gen->modules) {
- lbModule *m = entry.value;
- if (do_threading) {
+ if (LLVM_IGNORE_VERIFICATION) {
+ return true;
+ }
+
+ if (do_threading) {
+ for (auto const &entry : gen->modules) {
+ lbModule *m = entry.value;
thread_pool_add_task(lb_llvm_module_verification_worker_proc, m);
- } else {
+ }
+ thread_pool_wait();
+
+ } else {
+ for (auto const &entry : gen->modules) {
+ lbModule *m = entry.value;
if (lb_llvm_module_verification_worker_proc(m)) {
return false;
}
}
}
- thread_pool_wait();
return true;
}
@@ -2768,12 +2827,7 @@ gb_internal lbProcedure *lb_create_main_procedure(lbModule *m, lbProcedure *star
}
- if (!m->debug_builder && LLVMVerifyFunction(p->value, LLVMReturnStatusAction)) {
- gb_printf_err("LLVM CODE GEN FAILED FOR PROCEDURE: %s\n", "main");
- LLVMDumpValue(p->value);
- gb_printf_err("\n\n\n\n");
- LLVMVerifyFunction(p->value, LLVMAbortProcessAction);
- }
+ lb_verify_function(m, p);
lb_run_function_pass_manager(default_function_pass_manager, p, lbFunctionPassManager_default);
return p;
@@ -2794,28 +2848,11 @@ gb_internal void lb_generate_procedure(lbModule *m, lbProcedure *p) {
lb_end_procedure(p);
// Add Flags
- if (p->body != nullptr) {
- if (p->name == "memcpy" || p->name == "memmove" ||
- p->name == "runtime.mem_copy" || p->name == "mem_copy_non_overlapping" ||
- string_starts_with(p->name, str_lit("llvm.memcpy")) ||
- string_starts_with(p->name, str_lit("llvm.memmove"))) {
- p->flags |= lbProcedureFlag_WithoutMemcpyPass;
- }
+ if (p->entity && p->entity->kind == Entity_Procedure && p->entity->Procedure.is_memcpy_like) {
+ p->flags |= lbProcedureFlag_WithoutMemcpyPass;
}
- if (!m->debug_builder && LLVMVerifyFunction(p->value, LLVMReturnStatusAction)) {
- char *llvm_error = nullptr;
-
- gb_printf_err("LLVM CODE GEN FAILED FOR PROCEDURE: %.*s\n", LIT(p->name));
- LLVMDumpValue(p->value);
- gb_printf_err("\n\n\n\n");
- String filepath_ll = lb_filepath_ll_for_module(m);
- if (LLVMPrintModuleToFile(m->mod, cast(char const *)filepath_ll.text, &llvm_error)) {
- gb_printf_err("LLVM Error: %s\n", llvm_error);
- }
- LLVMVerifyFunction(p->value, LLVMPrintMessageAction);
- exit_with_errors();
- }
+ lb_verify_function(m, p, true);
}
@@ -3210,14 +3247,21 @@ gb_internal bool lb_generate_code(lbGenerator *gen) {
lbValue init = lb_const_value(m, tav.type, v);
LLVMSetInitializer(g.value, init.value);
var.is_initialized = true;
+ if (e->kind == Entity_Variable && e->Variable.is_rodata) {
+ LLVMSetGlobalConstant(g.value, true);
+ }
}
}
}
if (!var.is_initialized && is_type_untyped_nil(tav.type)) {
var.is_initialized = true;
+ if (e->kind == Entity_Variable && e->Variable.is_rodata) {
+ LLVMSetGlobalConstant(g.value, true);
+ }
}
+ } else if (e->kind == Entity_Variable && e->Variable.is_rodata) {
+ LLVMSetGlobalConstant(g.value, true);
}
-
array_add(&global_variables, var);
lb_add_entity(m, e, g);
diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp
index 9f7bc8843..447e93d42 100644
--- a/src/llvm_backend.hpp
+++ b/src/llvm_backend.hpp
@@ -181,7 +181,8 @@ struct lbModule {
std::atomic<u32> nested_type_name_guid;
Array<lbProcedure *> procedures_to_generate;
- Array<Entity *> global_procedures_and_types_to_create;
+ Array<Entity *> global_procedures_to_create;
+ Array<Entity *> global_types_to_create;
lbProcedure *curr_procedure;
diff --git a/src/llvm_backend_debug.cpp b/src/llvm_backend_debug.cpp
index 2654a1d28..f1ace5f06 100644
--- a/src/llvm_backend_debug.cpp
+++ b/src/llvm_backend_debug.cpp
@@ -46,6 +46,15 @@ gb_internal LLVMMetadataRef lb_debug_end_location_from_ast(lbProcedure *p, Ast *
return lb_debug_location_from_token_pos(p, ast_end_token(node).pos);
}
+gb_internal void lb_debug_file_line(lbModule *m, Ast *node, LLVMMetadataRef *file, unsigned *line) {
+ if (*file == nullptr) {
+ if (node) {
+ *file = lb_get_llvm_metadata(m, node->file());
+ *line = cast(unsigned)ast_token(node).pos.line;
+ }
+ }
+}
+
gb_internal LLVMMetadataRef lb_debug_type_internal_proc(lbModule *m, Type *type) {
i64 size = type_size_of(type); // Check size
gb_unused(size);
@@ -117,6 +126,8 @@ gb_internal LLVMMetadataRef lb_debug_basic_struct(lbModule *m, String const &nam
gb_internal LLVMMetadataRef lb_debug_struct(lbModule *m, Type *type, Type *bt, String name, LLVMMetadataRef scope, LLVMMetadataRef file, unsigned line) {
GB_ASSERT(bt->kind == Type_Struct);
+ lb_debug_file_line(m, bt->Struct.node, &file, &line);
+
unsigned tag = DW_TAG_structure_type;
if (is_type_raw_union(bt)) {
tag = DW_TAG_union_type;
@@ -336,6 +347,8 @@ gb_internal LLVMMetadataRef lb_debug_union(lbModule *m, Type *type, String name,
Type *bt = base_type(type);
GB_ASSERT(bt->kind == Type_Union);
+ lb_debug_file_line(m, bt->Union.node, &file, &line);
+
u64 size_in_bits = 8*type_size_of(bt);
u32 align_in_bits = 8*cast(u32)type_align_of(bt);
@@ -415,6 +428,8 @@ gb_internal LLVMMetadataRef lb_debug_bitset(lbModule *m, Type *type, String name
Type *bt = base_type(type);
GB_ASSERT(bt->kind == Type_BitSet);
+ lb_debug_file_line(m, bt->BitSet.node, &file, &line);
+
u64 size_in_bits = 8*type_size_of(bt);
u32 align_in_bits = 8*cast(u32)type_align_of(bt);
@@ -494,6 +509,8 @@ gb_internal LLVMMetadataRef lb_debug_enum(lbModule *m, Type *type, String name,
Type *bt = base_type(type);
GB_ASSERT(bt->kind == Type_Enum);
+ lb_debug_file_line(m, bt->Enum.node, &file, &line);
+
u64 size_in_bits = 8*type_size_of(bt);
u32 align_in_bits = 8*cast(u32)type_align_of(bt);
@@ -609,50 +626,50 @@ gb_internal LLVMMetadataRef lb_debug_type_internal(lbModule *m, Type *type) {
case Basic_complex32:
{
LLVMMetadataRef elements[2] = {};
- elements[0] = lb_debug_struct_field(m, str_lit("real"), t_f16, 0);
- elements[1] = lb_debug_struct_field(m, str_lit("imag"), t_f16, 4);
+ elements[0] = lb_debug_struct_field(m, str_lit("real"), t_f16, 0*16);
+ elements[1] = lb_debug_struct_field(m, str_lit("imag"), t_f16, 1*16);
return lb_debug_basic_struct(m, str_lit("complex32"), 64, 32, elements, gb_count_of(elements));
}
case Basic_complex64:
{
LLVMMetadataRef elements[2] = {};
- elements[0] = lb_debug_struct_field(m, str_lit("real"), t_f32, 0);
- elements[1] = lb_debug_struct_field(m, str_lit("imag"), t_f32, 4);
+ elements[0] = lb_debug_struct_field(m, str_lit("real"), t_f32, 0*32);
+ elements[1] = lb_debug_struct_field(m, str_lit("imag"), t_f32, 2*32);
return lb_debug_basic_struct(m, str_lit("complex64"), 64, 32, elements, gb_count_of(elements));
}
case Basic_complex128:
{
LLVMMetadataRef elements[2] = {};
- elements[0] = lb_debug_struct_field(m, str_lit("real"), t_f64, 0);
- elements[1] = lb_debug_struct_field(m, str_lit("imag"), t_f64, 8);
+ elements[0] = lb_debug_struct_field(m, str_lit("real"), t_f64, 0*64);
+ elements[1] = lb_debug_struct_field(m, str_lit("imag"), t_f64, 1*64);
return lb_debug_basic_struct(m, str_lit("complex128"), 128, 64, elements, gb_count_of(elements));
}
case Basic_quaternion64:
{
LLVMMetadataRef elements[4] = {};
- elements[0] = lb_debug_struct_field(m, str_lit("imag"), t_f16, 0);
- elements[1] = lb_debug_struct_field(m, str_lit("jmag"), t_f16, 4);
- elements[2] = lb_debug_struct_field(m, str_lit("kmag"), t_f16, 8);
- elements[3] = lb_debug_struct_field(m, str_lit("real"), t_f16, 12);
+ elements[0] = lb_debug_struct_field(m, str_lit("imag"), t_f16, 0*16);
+ elements[1] = lb_debug_struct_field(m, str_lit("jmag"), t_f16, 1*16);
+ elements[2] = lb_debug_struct_field(m, str_lit("kmag"), t_f16, 2*16);
+ elements[3] = lb_debug_struct_field(m, str_lit("real"), t_f16, 3*16);
return lb_debug_basic_struct(m, str_lit("quaternion64"), 128, 32, elements, gb_count_of(elements));
}
case Basic_quaternion128:
{
LLVMMetadataRef elements[4] = {};
- elements[0] = lb_debug_struct_field(m, str_lit("imag"), t_f32, 0);
- elements[1] = lb_debug_struct_field(m, str_lit("jmag"), t_f32, 4);
- elements[2] = lb_debug_struct_field(m, str_lit("kmag"), t_f32, 8);
- elements[3] = lb_debug_struct_field(m, str_lit("real"), t_f32, 12);
+ elements[0] = lb_debug_struct_field(m, str_lit("imag"), t_f32, 0*32);
+ elements[1] = lb_debug_struct_field(m, str_lit("jmag"), t_f32, 1*32);
+ elements[2] = lb_debug_struct_field(m, str_lit("kmag"), t_f32, 2*32);
+ elements[3] = lb_debug_struct_field(m, str_lit("real"), t_f32, 3*32);
return lb_debug_basic_struct(m, str_lit("quaternion128"), 128, 32, elements, gb_count_of(elements));
}
case Basic_quaternion256:
{
LLVMMetadataRef elements[4] = {};
- elements[0] = lb_debug_struct_field(m, str_lit("imag"), t_f64, 0);
- elements[1] = lb_debug_struct_field(m, str_lit("jmag"), t_f64, 8);
- elements[2] = lb_debug_struct_field(m, str_lit("kmag"), t_f64, 16);
- elements[3] = lb_debug_struct_field(m, str_lit("real"), t_f64, 24);
+ elements[0] = lb_debug_struct_field(m, str_lit("imag"), t_f64, 0*64);
+ elements[1] = lb_debug_struct_field(m, str_lit("jmag"), t_f64, 1*64);
+ elements[2] = lb_debug_struct_field(m, str_lit("kmag"), t_f64, 2*64);
+ elements[3] = lb_debug_struct_field(m, str_lit("real"), t_f64, 3*64);
return lb_debug_basic_struct(m, str_lit("quaternion256"), 256, 32, elements, gb_count_of(elements));
}
diff --git a/src/llvm_backend_expr.cpp b/src/llvm_backend_expr.cpp
index c12489598..a23f8cfbe 100644
--- a/src/llvm_backend_expr.cpp
+++ b/src/llvm_backend_expr.cpp
@@ -504,6 +504,10 @@ gb_internal bool lb_is_matrix_simdable(Type *t) {
if ((mt->Matrix.row_count & 1) ^ (mt->Matrix.column_count & 1)) {
return false;
}
+ if (mt->Matrix.is_row_major) {
+ // TODO(bill): make #row_major matrices work with SIMD
+ return false;
+ }
if (elem->kind == Type_Basic) {
switch (elem->Basic.kind) {
@@ -1869,13 +1873,40 @@ gb_internal lbValue lb_emit_conv(lbProcedure *p, lbValue value, Type *t) {
lbValue res_i128 = lb_emit_runtime_call(p, call, args);
return lb_emit_conv(p, res_i128, t);
}
+ i64 sz = type_size_of(src);
lbValue res = {};
res.type = t;
if (is_type_unsigned(dst)) {
- res.value = LLVMBuildFPToUI(p->builder, value.value, lb_type(m, t), "");
+ switch (sz) {
+ case 2:
+ case 4:
+ res.value = LLVMBuildFPToUI(p->builder, value.value, lb_type(m, t_u32), "");
+ res.value = LLVMBuildIntCast2(p->builder, res.value, lb_type(m, t), false, "");
+ break;
+ case 8:
+ res.value = LLVMBuildFPToUI(p->builder, value.value, lb_type(m, t_u64), "");
+ res.value = LLVMBuildIntCast2(p->builder, res.value, lb_type(m, t), false, "");
+ break;
+ default:
+ GB_PANIC("Unhandled float type");
+ break;
+ }
} else {
- res.value = LLVMBuildFPToSI(p->builder, value.value, lb_type(m, t), "");
+ switch (sz) {
+ case 2:
+ case 4:
+ res.value = LLVMBuildFPToSI(p->builder, value.value, lb_type(m, t_i32), "");
+ res.value = LLVMBuildIntCast2(p->builder, res.value, lb_type(m, t), true, "");
+ break;
+ case 8:
+ res.value = LLVMBuildFPToSI(p->builder, value.value, lb_type(m, t_i64), "");
+ res.value = LLVMBuildIntCast2(p->builder, res.value, lb_type(m, t), true, "");
+ break;
+ default:
+ GB_PANIC("Unhandled float type");
+ break;
+ }
}
return res;
}
@@ -4533,10 +4564,26 @@ gb_internal lbAddr lb_build_addr_compound_lit(lbProcedure *p, Ast *expr) {
if (lb_is_nested_possibly_constant(type, sel, elem)) {
continue;
}
- lbValue dst = lb_emit_deep_field_gep(p, comp_lit_ptr, sel);
field_expr = lb_build_expr(p, elem);
field_expr = lb_emit_conv(p, field_expr, sel.entity->type);
- lb_emit_store(p, dst, field_expr);
+ if (sel.is_bit_field) {
+ Selection sub_sel = trim_selection(sel);
+ lbValue trimmed_dst = lb_emit_deep_field_gep(p, comp_lit_ptr, sub_sel);
+ Type *bf = base_type(type_deref(trimmed_dst.type));
+ if (is_type_pointer(bf)) {
+ trimmed_dst = lb_emit_load(p, trimmed_dst);
+ bf = base_type(type_deref(trimmed_dst.type));
+ }
+ GB_ASSERT(bf->kind == Type_BitField);
+
+ isize idx = sel.index[sel.index.count-1];
+ lbAddr dst = lb_addr_bit_field(trimmed_dst, bf->BitField.fields[idx]->type, bf->BitField.bit_offsets[idx], bf->BitField.bit_sizes[idx]);
+ lb_addr_store(p, dst, field_expr);
+
+ } else {
+ lbValue dst = lb_emit_deep_field_gep(p, comp_lit_ptr, sel);
+ lb_emit_store(p, dst, field_expr);
+ }
continue;
}
diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp
index e8183027f..03d0f8b32 100644
--- a/src/llvm_backend_general.cpp
+++ b/src/llvm_backend_general.cpp
@@ -78,7 +78,8 @@ gb_internal void lb_init_module(lbModule *m, Checker *c) {
array_init(&m->procedures_to_generate, a, 0, c->info.all_procedures.count);
map_init(&m->procedure_values, c->info.all_procedures.count*2);
}
- array_init(&m->global_procedures_and_types_to_create, a, 0, 1024);
+ array_init(&m->global_procedures_to_create, a, 0, 1024);
+ array_init(&m->global_types_to_create, a, 0, 1024);
array_init(&m->missing_procedures_to_check, a, 0, 16);
map_init(&m->debug_values);
@@ -1383,8 +1384,6 @@ gb_internal lbValue lb_addr_load(lbProcedure *p, lbAddr const &addr) {
LLVMTypeRef vector_type = nullptr;
if (lb_try_vector_cast(p->module, addr.addr, &vector_type)) {
- LLVMSetAlignment(res.addr.value, cast(unsigned)lb_alignof(vector_type));
-
LLVMValueRef vp = LLVMBuildPointerCast(p->builder, addr.addr.value, LLVMPointerType(vector_type, 0), "");
LLVMValueRef v = LLVMBuildLoad2(p->builder, vector_type, vp, "");
LLVMValueRef scalars[4] = {};
@@ -1394,6 +1393,8 @@ gb_internal lbValue lb_addr_load(lbProcedure *p, lbAddr const &addr) {
LLVMValueRef mask = LLVMConstVector(scalars, addr.swizzle.count);
LLVMValueRef sv = llvm_basic_shuffle(p, v, mask);
+ LLVMSetAlignment(res.addr.value, cast(unsigned)lb_alignof(LLVMTypeOf(sv)));
+
LLVMValueRef dst = LLVMBuildPointerCast(p->builder, ptr.value, LLVMPointerType(LLVMTypeOf(sv), 0), "");
LLVMBuildStore(p->builder, sv, dst);
} else {
diff --git a/src/llvm_backend_proc.cpp b/src/llvm_backend_proc.cpp
index 3b9b1be05..87f75fb1d 100644
--- a/src/llvm_backend_proc.cpp
+++ b/src/llvm_backend_proc.cpp
@@ -710,13 +710,12 @@ gb_internal void lb_begin_procedure_body(lbProcedure *p) {
lb_set_debug_position_to_procedure_begin(p);
if (p->debug_info != nullptr) {
if (p->context_stack.count != 0) {
+ lbBlock *prev_block = p->curr_block;
p->curr_block = p->decl_block;
lb_add_debug_context_variable(p, lb_find_or_generate_context_ptr(p));
+ p->curr_block = prev_block;
}
-
}
-
- lb_start_block(p, p->entry_block);
}
gb_internal void lb_end_procedure_body(lbProcedure *p) {
@@ -1097,15 +1096,7 @@ gb_internal lbValue lb_emit_call(lbProcedure *p, lbValue value, Array<lbValue> c
ptr = lb_address_from_load_or_generate_local(p, x);
}
} else {
- if (LLVMIsConstant(x.value)) {
- // NOTE(bill): if the value is already constant, then just it as a global variable
- // and pass it by pointer
- lbAddr addr = lb_add_global_generated(p->module, original_type, x);
- lb_make_global_private_const(addr);
- ptr = addr.addr;
- } else {
- ptr = lb_copy_value_to_ptr(p, x, original_type, 16);
- }
+ ptr = lb_copy_value_to_ptr(p, x, original_type, 16);
}
array_add(&processed_args, ptr);
}
diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp
index b18db4e45..9f28e45e0 100644
--- a/src/llvm_backend_stmt.cpp
+++ b/src/llvm_backend_stmt.cpp
@@ -1850,7 +1850,9 @@ gb_internal void lb_build_static_variables(lbProcedure *p, AstValueDecl *vd) {
LLVMSetInitializer(global, LLVMConstNull(lb_type(p->module, e->type)));
if (value.value != nullptr) {
LLVMSetInitializer(global, value.value);
- } else {
+ }
+ if (e->Variable.is_rodata) {
+ LLVMSetGlobalConstant(global, true);
}
if (e->Variable.thread_local_model != "") {
LLVMSetThreadLocal(global, true);
diff --git a/src/llvm_backend_utility.cpp b/src/llvm_backend_utility.cpp
index f7674a8bc..94153e233 100644
--- a/src/llvm_backend_utility.cpp
+++ b/src/llvm_backend_utility.cpp
@@ -1365,6 +1365,8 @@ gb_internal lbValue lb_emit_deep_field_gep(lbProcedure *p, lbValue e, Selection
} else {
e = lb_emit_ptr_offset(p, lb_emit_load(p, arr), index);
}
+ e.type = alloc_type_multi_pointer_to_pointer(e.type);
+
} else if (is_type_quaternion(type)) {
e = lb_emit_struct_ep(p, e, index);
} else if (is_type_raw_union(type)) {
diff --git a/src/main.cpp b/src/main.cpp
index 4df6f97d5..70def5802 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -155,6 +155,38 @@ gb_internal i32 system_exec_command_line_app(char const *name, char const *fmt,
return exit_code;
}
+#if defined(GB_SYSTEM_WINDOWS)
+#define popen _popen
+#define pclose _pclose
+#endif
+
+gb_internal bool system_exec_command_line_app_output(char const *command, gbString *output) {
+ GB_ASSERT(output);
+
+ u8 buffer[256];
+ FILE *stream;
+ stream = popen(command, "r");
+ if (!stream) {
+ return false;
+ }
+ defer (pclose(stream));
+
+ while (!feof(stream)) {
+ size_t n = fread(buffer, 1, 255, stream);
+ *output = gb_string_append_length(*output, buffer, n);
+
+ if (ferror(stream)) {
+ return false;
+ }
+ }
+
+ if (build_context.show_system_calls) {
+ gb_printf_err("[SYSTEM CALL OUTPUT] %s -> %s\n", command, *output);
+ }
+
+ return true;
+}
+
gb_internal Array<String> setup_args(int argc, char const **argv) {
gbAllocator a = heap_allocator();
@@ -234,6 +266,8 @@ enum BuildFlagKind {
BuildFlag_ShowMoreTimings,
BuildFlag_ExportTimings,
BuildFlag_ExportTimingsFile,
+ BuildFlag_ExportDependencies,
+ BuildFlag_ExportDependenciesFile,
BuildFlag_ShowSystemCalls,
BuildFlag_ThreadCount,
BuildFlag_KeepTempFiles,
@@ -276,8 +310,6 @@ enum BuildFlagKind {
BuildFlag_RelocMode,
BuildFlag_DisableRedZone,
- BuildFlag_TestName,
-
BuildFlag_DisallowDo,
BuildFlag_DefaultToNilAllocator,
BuildFlag_DefaultToPanicAllocator,
@@ -320,7 +352,6 @@ enum BuildFlagKind {
BuildFlag_Subsystem,
#endif
-
BuildFlag_COUNT,
};
@@ -427,6 +458,8 @@ gb_internal bool parse_build_flags(Array<String> args) {
add_flag(&build_flags, BuildFlag_ShowMoreTimings, str_lit("show-more-timings"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_ExportTimings, str_lit("export-timings"), BuildFlagParam_String, Command__does_check);
add_flag(&build_flags, BuildFlag_ExportTimingsFile, str_lit("export-timings-file"), BuildFlagParam_String, Command__does_check);
+ add_flag(&build_flags, BuildFlag_ExportDependencies, str_lit("export-dependencies"), BuildFlagParam_String, Command__does_build);
+ add_flag(&build_flags, BuildFlag_ExportDependenciesFile, str_lit("export-dependencies-file"), BuildFlagParam_String, Command__does_build);
add_flag(&build_flags, BuildFlag_ShowUnused, str_lit("show-unused"), BuildFlagParam_None, Command_check);
add_flag(&build_flags, BuildFlag_ShowUnusedWithLocation, str_lit("show-unused-with-location"), BuildFlagParam_None, Command_check);
add_flag(&build_flags, BuildFlag_ShowSystemCalls, str_lit("show-system-calls"), BuildFlagParam_None, Command_all);
@@ -471,8 +504,6 @@ gb_internal bool parse_build_flags(Array<String> args) {
add_flag(&build_flags, BuildFlag_RelocMode, str_lit("reloc-mode"), BuildFlagParam_String, Command__does_build);
add_flag(&build_flags, BuildFlag_DisableRedZone, str_lit("disable-red-zone"), BuildFlagParam_None, Command__does_build);
- add_flag(&build_flags, BuildFlag_TestName, str_lit("test-name"), BuildFlagParam_String, Command_test);
-
add_flag(&build_flags, BuildFlag_DisallowDo, str_lit("disallow-do"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_DefaultToNilAllocator, str_lit("default-to-nil-allocator"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_DefaultToPanicAllocator, str_lit("default-to-panic-allocator"),BuildFlagParam_None, Command__does_check);
@@ -753,6 +784,36 @@ gb_internal bool parse_build_flags(Array<String> args) {
break;
}
+ case BuildFlag_ExportDependencies: {
+ GB_ASSERT(value.kind == ExactValue_String);
+
+ if (value.value_string == "make") {
+ build_context.export_dependencies_format = DependenciesExportMake;
+ } else if (value.value_string == "json") {
+ build_context.export_dependencies_format = DependenciesExportJson;
+ } else {
+ gb_printf_err("Invalid export format for -export-dependencies:<string>, got %.*s\n", LIT(value.value_string));
+ gb_printf_err("Valid export formats:\n");
+ gb_printf_err("\tmake\n");
+ gb_printf_err("\tjson\n");
+ bad_flags = true;
+ }
+
+ break;
+ }
+ case BuildFlag_ExportDependenciesFile: {
+ GB_ASSERT(value.kind == ExactValue_String);
+
+ String export_path = string_trim_whitespace(value.value_string);
+ if (is_build_flag_path_valid(export_path)) {
+ build_context.export_dependencies_file = path_to_full_path(heap_allocator(), export_path);
+ } else {
+ gb_printf_err("Invalid -export-dependencies path, got %.*s\n", LIT(export_path));
+ bad_flags = true;
+ }
+
+ break;
+ }
case BuildFlag_ShowSystemCalls: {
GB_ASSERT(value.kind == ExactValue_Invalid);
build_context.show_system_calls = true;
@@ -1119,21 +1180,6 @@ gb_internal bool parse_build_flags(Array<String> args) {
case BuildFlag_DisableRedZone:
build_context.disable_red_zone = true;
break;
- case BuildFlag_TestName: {
- GB_ASSERT(value.kind == ExactValue_String);
- {
- String name = value.value_string;
- if (!string_is_valid_identifier(name)) {
- gb_printf_err("Test name '%.*s' must be a valid identifier\n", LIT(name));
- bad_flags = true;
- break;
- }
- string_set_add(&build_context.test_names, name);
-
- // NOTE(bill): Allow for multiple -test-name
- continue;
- }
- }
case BuildFlag_DisallowDo:
build_context.disallow_do = true;
break;
@@ -1635,6 +1681,74 @@ gb_internal void show_timings(Checker *c, Timings *t) {
}
}
+gb_internal void export_dependencies(Parser *p) {
+ GB_ASSERT(build_context.export_dependencies_format != DependenciesExportUnspecified);
+
+ if (build_context.export_dependencies_file.len <= 0) {
+ gb_printf_err("No dependency file specified with `-export-dependencies-file`\n");
+ exit_with_errors();
+ return;
+ }
+
+ gbFile f = {};
+ char * fileName = (char *)build_context.export_dependencies_file.text;
+ gbFileError err = gb_file_open_mode(&f, gbFileMode_Write, fileName);
+ if (err != gbFileError_None) {
+ gb_printf_err("Failed to export dependencies to: %s\n", fileName);
+ exit_with_errors();
+ return;
+ }
+ defer (gb_file_close(&f));
+
+ if (build_context.export_dependencies_format == DependenciesExportMake) {
+ String exe_name = path_to_string(heap_allocator(), build_context.build_paths[BuildPath_Output]);
+ defer (gb_free(heap_allocator(), exe_name.text));
+
+ gb_fprintf(&f, "%.*s:", LIT(exe_name));
+
+ isize current_line_length = exe_name.len + 1;
+
+ for(AstPackage *pkg : p->packages) {
+ for(AstFile *file : pkg->files) {
+ /* Arbitrary line break value. Maybe make this better? */
+ if (current_line_length >= 80-2) {
+ gb_file_write(&f, " \\\n ", 4);
+ current_line_length = 1;
+ }
+
+ gb_file_write(&f, " ", 1);
+ current_line_length++;
+
+ for (isize k = 0; k < file->fullpath.len; k++) {
+ char part = file->fullpath.text[k];
+ if (part == ' ') {
+ gb_file_write(&f, "\\", 1);
+ current_line_length++;
+ }
+ gb_file_write(&f, &part, 1);
+ current_line_length++;
+ }
+ }
+ }
+
+ gb_fprintf(&f, "\n");
+ } else if (build_context.export_dependencies_format == DependenciesExportJson) {
+ gb_fprintf(&f, "{\n");
+
+ gb_fprintf(&f, "\t\"source_files\": [\n");
+
+ for(AstPackage *pkg : p->packages) {
+ for(AstFile *file : pkg->files) {
+ gb_fprintf(&f, "\t\t\"%.*s\",\n", LIT(file->fullpath));
+ }
+ }
+
+ gb_fprintf(&f, "\t],\n");
+
+ gb_fprintf(&f, "}\n");
+ }
+}
+
gb_internal void remove_temp_files(lbGenerator *gen) {
if (build_context.keep_temp_files) return;
@@ -1790,6 +1904,18 @@ gb_internal void print_show_help(String const arg0, String const &command) {
print_usage_line(2, "Example: -export-timings-file:timings.json");
print_usage_line(0, "");
+ print_usage_line(1, "-export-dependencies:<format>");
+ print_usage_line(2, "Exports dependencies to one of a few formats. Requires `-export-dependencies-file`.");
+ print_usage_line(2, "Available options:");
+ print_usage_line(3, "-export-dependencies:make Exports in Makefile format");
+ print_usage_line(3, "-export-dependencies:json Exports in JSON format");
+ print_usage_line(0, "");
+
+ print_usage_line(1, "-export-dependencies-file:<filename>");
+ print_usage_line(2, "Specifies the filename for `-export-dependencies`.");
+ print_usage_line(2, "Example: -export-dependencies-file:dependencies.d");
+ print_usage_line(0, "");
+
print_usage_line(1, "-thread-count:<integer>");
print_usage_line(2, "Overrides the number of threads the compiler will use to compile with.");
print_usage_line(2, "Example: -thread-count:2");
@@ -1962,10 +2088,6 @@ gb_internal void print_show_help(String const arg0, String const &command) {
}
if (test_only) {
- print_usage_line(1, "-test-name:<string>");
- print_usage_line(2, "Runs specific test only by name.");
- print_usage_line(0, "");
-
print_usage_line(1, "-all-packages");
print_usage_line(2, "Tests all packages imported into the given initial package.");
print_usage_line(0, "");
@@ -2489,7 +2611,6 @@ int main(int arg_count, char const **arg_ptr) {
TIME_SECTION("init args");
map_init(&build_context.defined_values);
build_context.extra_packages.allocator = heap_allocator();
- string_set_init(&build_context.test_names);
Array<String> args = setup_args(arg_count, arg_ptr);
@@ -2657,6 +2778,10 @@ int main(int arg_count, char const **arg_ptr) {
gb_printf_err("Expected either a directory or a .odin file, got '%.*s'\n", LIT(init_filename));
return 1;
}
+ if (!gb_file_exists(cast(const char*)init_filename.text)) {
+ gb_printf_err("The file '%.*s' was not found.\n", LIT(init_filename));
+ return 1;
+ }
}
}
}
@@ -2881,6 +3006,10 @@ int main(int arg_count, char const **arg_ptr) {
if (build_context.show_timings) {
show_timings(checker, &global_timings);
}
+
+ if (build_context.export_dependencies_format != DependenciesExportUnspecified) {
+ export_dependencies(parser);
+ }
return result;
}
break;
@@ -2903,6 +3032,10 @@ int main(int arg_count, char const **arg_ptr) {
if (build_context.show_timings) {
show_timings(checker, &global_timings);
}
+
+ if (build_context.export_dependencies_format != DependenciesExportUnspecified) {
+ export_dependencies(parser);
+ }
return result;
}
break;
@@ -2916,6 +3049,10 @@ int main(int arg_count, char const **arg_ptr) {
show_timings(checker, &global_timings);
}
+ if (build_context.export_dependencies_format != DependenciesExportUnspecified) {
+ export_dependencies(parser);
+ }
+
if (run_output) {
String exe_name = path_to_string(heap_allocator(), build_context.build_paths[BuildPath_Output]);
defer (gb_free(heap_allocator(), exe_name.text));
diff --git a/src/parser.cpp b/src/parser.cpp
index 5aa11b5d0..0cd96f5b5 100644
--- a/src/parser.cpp
+++ b/src/parser.cpp
@@ -555,7 +555,7 @@ gb_internal Ast *ast_unary_expr(AstFile *f, Token op, Ast *expr) {
syntax_error_with_verbose(expr, "'or_return' within an unary expression not wrapped in parentheses (...)");
break;
case Ast_OrBranchExpr:
- syntax_error_with_verbose(expr, "'or_%.*s' within an unary expression not wrapped in parentheses (...)", LIT(expr->OrBranchExpr.token.string));
+ syntax_error_with_verbose(expr, "'%.*s' within an unary expression not wrapped in parentheses (...)", LIT(expr->OrBranchExpr.token.string));
break;
}
@@ -583,7 +583,7 @@ gb_internal Ast *ast_binary_expr(AstFile *f, Token op, Ast *left, Ast *right) {
syntax_error_with_verbose(left, "'or_return' within a binary expression not wrapped in parentheses (...)");
break;
case Ast_OrBranchExpr:
- syntax_error_with_verbose(left, "'or_%.*s' within a binary expression not wrapped in parentheses (...)", LIT(left->OrBranchExpr.token.string));
+ syntax_error_with_verbose(left, "'%.*s' within a binary expression not wrapped in parentheses (...)", LIT(left->OrBranchExpr.token.string));
break;
}
if (right) switch (right->kind) {
@@ -591,7 +591,7 @@ gb_internal Ast *ast_binary_expr(AstFile *f, Token op, Ast *left, Ast *right) {
syntax_error_with_verbose(right, "'or_return' within a binary expression not wrapped in parentheses (...)");
break;
case Ast_OrBranchExpr:
- syntax_error_with_verbose(right, "'or_%.*s' within a binary expression not wrapped in parentheses (...)", LIT(right->OrBranchExpr.token.string));
+ syntax_error_with_verbose(right, "'%.*s' within a binary expression not wrapped in parentheses (...)", LIT(right->OrBranchExpr.token.string));
break;
}
@@ -1284,14 +1284,16 @@ gb_internal Ast *ast_import_decl(AstFile *f, Token token, Token relpath, Token i
return result;
}
-gb_internal Ast *ast_foreign_import_decl(AstFile *f, Token token, Array<Token> filepaths, Token library_name,
- CommentGroup *docs, CommentGroup *comment) {
+gb_internal Ast *ast_foreign_import_decl(AstFile *f, Token token, Array<Ast *> filepaths, Token library_name,
+ bool multiple_filepaths,
+ CommentGroup *docs, CommentGroup *comment) {
Ast *result = alloc_ast_node(f, Ast_ForeignImportDecl);
result->ForeignImportDecl.token = token;
result->ForeignImportDecl.filepaths = slice_from_array(filepaths);
result->ForeignImportDecl.library_name = library_name;
result->ForeignImportDecl.docs = docs;
result->ForeignImportDecl.comment = comment;
+ result->ForeignImportDecl.multiple_filepaths = multiple_filepaths;
result->ForeignImportDecl.attributes.allocator = ast_allocator(f);
return result;
@@ -2089,6 +2091,9 @@ gb_internal bool ast_on_same_line(Token const &x, Ast *yp) {
gb_internal Ast *parse_force_inlining_operand(AstFile *f, Token token) {
Ast *expr = parse_unary_expr(f, false);
Ast *e = strip_or_return_expr(expr);
+ if (e == nullptr) {
+ return expr;
+ }
if (e->kind != Ast_ProcLit && e->kind != Ast_CallExpr) {
syntax_error(expr, "%.*s must be followed by a procedure literal or call, got %.*s", LIT(token.string), LIT(ast_strings[expr->kind]));
return ast_bad_expr(f, token, f->curr_token);
@@ -3100,7 +3105,7 @@ gb_internal void parse_check_or_return(Ast *operand, char const *msg) {
syntax_error_with_verbose(operand, "'or_return' use within %s is not wrapped in parentheses (...)", msg);
break;
case Ast_OrBranchExpr:
- syntax_error_with_verbose(operand, "'or_%.*s' use within %s is not wrapped in parentheses (...)", msg, LIT(operand->OrBranchExpr.token.string));
+ syntax_error_with_verbose(operand, "'%.*s' use within %s is not wrapped in parentheses (...)", msg, LIT(operand->OrBranchExpr.token.string));
break;
}
}
@@ -3745,8 +3750,10 @@ gb_internal Ast *parse_simple_stmt(AstFile *f, u32 flags) {
case Ast_TypeSwitchStmt:
stmt->TypeSwitchStmt.partial = true;
break;
+ default:
+ syntax_error(partial_token, "Incorrect use of directive, use '%.*s: #partial switch'", LIT(ast_token(name).string));
+ break;
}
- syntax_error(partial_token, "Incorrect use of directive, use '#partial %.*s: switch'", LIT(ast_token(name).string));
} else if (is_reverse) {
switch (stmt->kind) {
case Ast_RangeStmt:
@@ -4882,14 +4889,17 @@ gb_internal Ast *parse_foreign_decl(AstFile *f) {
if (is_blank_ident(lib_name)) {
syntax_error(lib_name, "Illegal foreign import name: '_'");
}
- Array<Token> filepaths = {};
+ bool multiple_filepaths = false;
+
+ Array<Ast *> filepaths = {};
if (allow_token(f, Token_OpenBrace)) {
+ multiple_filepaths = true;
array_init(&filepaths, ast_allocator(f));
while (f->curr_token.kind != Token_CloseBrace &&
f->curr_token.kind != Token_EOF) {
- Token path = expect_token(f, Token_String);
+ Ast *path = parse_expr(f, false);
array_add(&filepaths, path);
if (!allow_field_separator(f)) {
@@ -4898,9 +4908,10 @@ gb_internal Ast *parse_foreign_decl(AstFile *f) {
}
expect_closing_brace_of_field_list(f);
} else {
- filepaths = array_make<Token>(ast_allocator(f), 0, 1);
+ filepaths = array_make<Ast *>(ast_allocator(f), 0, 1);
Token path = expect_token(f, Token_String);
- array_add(&filepaths, path);
+ Ast *lit = ast_basic_lit(f, path);
+ array_add(&filepaths, lit);
}
Ast *s = nullptr;
@@ -4909,9 +4920,9 @@ gb_internal Ast *parse_foreign_decl(AstFile *f) {
s = ast_bad_decl(f, lib_name, f->curr_token);
} else if (f->curr_proc != nullptr) {
syntax_error(lib_name, "You cannot use foreign import within a procedure. This must be done at the file scope");
- s = ast_bad_decl(f, lib_name, filepaths[0]);
+ s = ast_bad_decl(f, lib_name, ast_token(filepaths[0]));
} else {
- s = ast_foreign_import_decl(f, token, filepaths, lib_name, docs, f->line_comment);
+ s = ast_foreign_import_decl(f, token, filepaths, lib_name, multiple_filepaths, docs, f->line_comment);
}
expect_semicolon(f);
return s;
@@ -5170,7 +5181,7 @@ gb_internal Ast *parse_stmt(AstFile *f) {
} else if (tag == "unroll") {
return parse_unrolled_for_loop(f, name);
} else if (tag == "reverse") {
- Ast *for_stmt = parse_for_stmt(f);
+ Ast *for_stmt = parse_stmt(f);
if (for_stmt->kind == Ast_RangeStmt) {
if (for_stmt->RangeStmt.reverse) {
syntax_error(token, "#reverse already applied to a 'for in' statement");
@@ -5648,9 +5659,19 @@ gb_internal bool is_package_name_reserved(String const &name) {
}
-gb_internal bool determine_path_from_string(BlockingMutex *file_mutex, Ast *node, String base_dir, String const &original_string, String *path) {
+gb_internal bool determine_path_from_string(BlockingMutex *file_mutex, Ast *node, String base_dir, String const &original_string, String *path, bool use_check_errors=false) {
GB_ASSERT(path != nullptr);
+ void (*do_error)(Ast *, char const *, ...);
+ void (*do_warning)(Token const &, char const *, ...);
+
+ do_error = &syntax_error;
+ do_warning = &syntax_warning;
+ if (use_check_errors) {
+ do_error = &error;
+ do_error = &warning;
+ }
+
// NOTE(bill): if file_mutex == nullptr, this means that the code is used within the semantics stage
String collection_name = {};
@@ -5677,7 +5698,7 @@ gb_internal bool determine_path_from_string(BlockingMutex *file_mutex, Ast *node
String file_str = {};
if (colon_pos == 0) {
- syntax_error(node, "Expected a collection name");
+ do_error(node, "Expected a collection name");
return false;
}
@@ -5692,11 +5713,11 @@ gb_internal bool determine_path_from_string(BlockingMutex *file_mutex, Ast *node
if (has_windows_drive) {
String sub_file_path = substring(file_str, 3, file_str.len);
if (!is_import_path_valid(sub_file_path)) {
- syntax_error(node, "Invalid import path: '%.*s'", LIT(file_str));
+ do_error(node, "Invalid import path: '%.*s'", LIT(file_str));
return false;
}
} else if (!is_import_path_valid(file_str)) {
- syntax_error(node, "Invalid import path: '%.*s'", LIT(file_str));
+ do_error(node, "Invalid import path: '%.*s'", LIT(file_str));
return false;
}
@@ -5718,16 +5739,16 @@ gb_internal bool determine_path_from_string(BlockingMutex *file_mutex, Ast *node
}
if (replace_with_base) {
if (ast_file_vet_deprecated(node->file())) {
- syntax_error(node, "import \"core:%.*s\" has been deprecated in favour of \"base:%.*s\"", LIT(file_str), LIT(file_str));
+ do_error(node, "import \"core:%.*s\" has been deprecated in favour of \"base:%.*s\"", LIT(file_str), LIT(file_str));
} else {
- syntax_warning(ast_token(node), "import \"core:%.*s\" has been deprecated in favour of \"base:%.*s\"", LIT(file_str), LIT(file_str));
+ do_warning(ast_token(node), "import \"core:%.*s\" has been deprecated in favour of \"base:%.*s\"", LIT(file_str), LIT(file_str));
}
}
}
if (collection_name == "system") {
if (node->kind != Ast_ForeignImportDecl) {
- syntax_error(node, "The library collection 'system' is restrict for 'foreign_library'");
+ do_error(node, "The library collection 'system' is restrict for 'foreign import'");
return false;
} else {
*path = file_str;
@@ -5735,7 +5756,7 @@ gb_internal bool determine_path_from_string(BlockingMutex *file_mutex, Ast *node
}
} else if (!find_library_collection_path(collection_name, &base_dir)) {
// NOTE(bill): It's a naughty name
- syntax_error(node, "Unknown library collection: '%.*s'", LIT(collection_name));
+ do_error(node, "Unknown library collection: '%.*s'", LIT(collection_name));
return false;
}
} else {
@@ -5759,7 +5780,7 @@ gb_internal bool determine_path_from_string(BlockingMutex *file_mutex, Ast *node
if (collection_name == "core" || collection_name == "base") {
return true;
} else {
- syntax_error(node, "The package '%.*s' must be imported with the 'base' library collection: 'base:%.*s'", LIT(file_str), LIT(file_str));
+ do_error(node, "The package '%.*s' must be imported with the 'base' library collection: 'base:%.*s'", LIT(file_str), LIT(file_str));
return false;
}
}
@@ -5844,30 +5865,29 @@ gb_internal void parse_setup_file_decls(Parser *p, AstFile *f, String const &bas
} else if (node->kind == Ast_ForeignImportDecl) {
ast_node(fl, ForeignImportDecl, node);
- auto fullpaths = array_make<String>(permanent_allocator(), 0, fl->filepaths.count);
-
- for (Token const &fp : fl->filepaths) {
- String file_str = string_trim_whitespace(string_value_from_token(f, fp));
+ if (fl->filepaths.count == 0) {
+ syntax_error(decls[i], "No foreign paths found");
+ decls[i] = ast_bad_decl(f, ast_token(fl->filepaths[0]), ast_end_token(fl->filepaths[fl->filepaths.count-1]));
+ goto end;
+ } else if (!fl->multiple_filepaths &&
+ fl->filepaths.count == 1) {
+ Ast *fp = fl->filepaths[0];
+ GB_ASSERT(fp->kind == Ast_BasicLit);
+ Token fp_token = fp->BasicLit.token;
+ String file_str = string_trim_whitespace(string_value_from_token(f, fp_token));
String fullpath = file_str;
if (allow_check_foreign_filepath()) {
String foreign_path = {};
bool ok = determine_path_from_string(&p->file_decl_mutex, node, base_dir, file_str, &foreign_path);
if (!ok) {
- decls[i] = ast_bad_decl(f, fp, fl->filepaths[fl->filepaths.count-1]);
+ decls[i] = ast_bad_decl(f, fp_token, fp_token);
goto end;
}
fullpath = foreign_path;
}
- array_add(&fullpaths, fullpath);
+ fl->fullpaths = slice_make<String>(permanent_allocator(), 1);
+ fl->fullpaths[0] = fullpath;
}
- if (fullpaths.count == 0) {
- syntax_error(decls[i], "No foreign paths found");
- decls[i] = ast_bad_decl(f, fl->filepaths[0], fl->filepaths[fl->filepaths.count-1]);
- goto end;
- }
-
- fl->fullpaths = slice_from_array(fullpaths);
-
} else if (node->kind == Ast_WhenStmt) {
ast_node(ws, WhenStmt, node);
diff --git a/src/parser.hpp b/src/parser.hpp
index 5820275c8..02f2af28d 100644
--- a/src/parser.hpp
+++ b/src/parser.hpp
@@ -458,6 +458,7 @@ AST_KIND(_ExprBegin, "", bool) \
bool optional_ok_one; \
bool was_selector; \
AstSplitArgs *split_args; \
+ Entity *entity_procedure_of; \
}) \
AST_KIND(FieldValue, "field value", struct { Token eq; Ast *field, *value; }) \
AST_KIND(EnumFieldValue, "enum field value", struct { \
@@ -631,7 +632,8 @@ AST_KIND(_DeclBegin, "", bool) \
}) \
AST_KIND(ForeignImportDecl, "foreign import declaration", struct { \
Token token; \
- Slice<Token> filepaths; \
+ Slice<Ast *> filepaths; \
+ bool multiple_filepaths; \
Token library_name; \
String collection_name; \
Slice<String> fullpaths; \
diff --git a/src/parser_pos.cpp b/src/parser_pos.cpp
index b2e12999b..1ffd3a82f 100644
--- a/src/parser_pos.cpp
+++ b/src/parser_pos.cpp
@@ -278,7 +278,7 @@ Token ast_end_token(Ast *node) {
case Ast_ImportDecl: return node->ImportDecl.relpath;
case Ast_ForeignImportDecl:
if (node->ForeignImportDecl.filepaths.count > 0) {
- return node->ForeignImportDecl.filepaths[node->ForeignImportDecl.filepaths.count-1];
+ return ast_end_token(node->ForeignImportDecl.filepaths[node->ForeignImportDecl.filepaths.count-1]);
}
if (node->ForeignImportDecl.library_name.kind != Token_Invalid) {
return node->ForeignImportDecl.library_name;
diff --git a/src/threading.cpp b/src/threading.cpp
index 48c58e8f4..717dcb874 100644
--- a/src/threading.cpp
+++ b/src/threading.cpp
@@ -494,6 +494,8 @@ gb_internal u32 thread_current_id(void) {
thread_id = find_thread(NULL);
#elif defined(GB_SYSTEM_FREEBSD)
thread_id = pthread_getthreadid_np();
+#elif defined(GB_SYSTEM_NETBSD)
+ thread_id = (u32)_lwp_self();
#else
#error Unsupported architecture for thread_current_id()
#endif
diff --git a/src/types.cpp b/src/types.cpp
index e568d2af2..97e8267a3 100644
--- a/src/types.cpp
+++ b/src/types.cpp
@@ -140,6 +140,7 @@ struct TypeStruct {
i64 custom_field_align;
Type * polymorphic_params; // Type_Tuple
Type * polymorphic_parent;
+ Wait_Signal polymorphic_wait_signal;
Type * soa_elem;
i32 soa_count;
@@ -167,6 +168,7 @@ struct TypeUnion {
i64 custom_align;
Type * polymorphic_params; // Type_Tuple
Type * polymorphic_parent;
+ Wait_Signal polymorphic_wait_signal;
i16 tag_size;
bool is_polymorphic;
@@ -457,6 +459,15 @@ gb_internal Selection sub_selection(Selection const &sel, isize offset) {
return res;
}
+gb_internal Selection trim_selection(Selection const &sel) {
+ Selection res = {};
+ res.index.data = sel.index.data;
+ res.index.count = gb_max(sel.index.count - 1, 0);
+ res.index.capacity = res.index.count;
+ return res;
+}
+
+
gb_global Type basic_types[] = {
{Type_Basic, {Basic_Invalid, 0, 0, STR_LIT("invalid type")}},
@@ -1084,6 +1095,7 @@ gb_internal Type *alloc_type_struct() {
gb_internal Type *alloc_type_struct_complete() {
Type *t = alloc_type(Type_Struct);
wait_signal_set(&t->Struct.fields_wait_signal);
+ wait_signal_set(&t->Struct.polymorphic_wait_signal);
return t;
}
@@ -1482,10 +1494,10 @@ gb_internal i64 matrix_align_of(Type *t, struct TypePath *tp) {
i64 total_expected_size = row_count*t->Matrix.column_count*elem_size;
// i64 min_alignment = prev_pow2(elem_align * row_count);
i64 min_alignment = prev_pow2(total_expected_size);
- while ((total_expected_size % min_alignment) != 0) {
+ while (total_expected_size != 0 && (total_expected_size % min_alignment) != 0) {
min_alignment >>= 1;
}
- GB_ASSERT(min_alignment >= elem_align);
+ min_alignment = gb_max(min_alignment, elem_align);
i64 align = gb_min(min_alignment, build_context.max_simd_align);
return align;
@@ -2127,15 +2139,18 @@ gb_internal bool is_type_polymorphic_record_unspecialized(Type *t) {
return false;
}
+
gb_internal TypeTuple *get_record_polymorphic_params(Type *t) {
t = base_type(t);
switch (t->kind) {
case Type_Struct:
+ wait_signal_until_available(&t->Struct.polymorphic_wait_signal);
if (t->Struct.polymorphic_params) {
return &t->Struct.polymorphic_params->Tuple;
}
break;
case Type_Union:
+ wait_signal_until_available(&t->Union.polymorphic_wait_signal);
if (t->Union.polymorphic_params) {
return &t->Union.polymorphic_params->Tuple;
}
diff --git a/tests/benchmark/all.odin b/tests/benchmark/all.odin
new file mode 100644
index 000000000..d1b7662e2
--- /dev/null
+++ b/tests/benchmark/all.odin
@@ -0,0 +1,4 @@
+package benchmarks
+
+@(require) import "crypto"
+@(require) import "hash"
diff --git a/tests/benchmark/crypto/benchmark_crypto.odin b/tests/benchmark/crypto/benchmark_crypto.odin
new file mode 100644
index 000000000..e90216ad6
--- /dev/null
+++ b/tests/benchmark/crypto/benchmark_crypto.odin
@@ -0,0 +1,356 @@
+package benchmark_core_crypto
+
+import "base:runtime"
+import "core:encoding/hex"
+import "core:fmt"
+import "core:log"
+import "core:strings"
+import "core:testing"
+import "core:time"
+
+import "core:crypto/aes"
+import "core:crypto/chacha20"
+import "core:crypto/chacha20poly1305"
+import "core:crypto/ed25519"
+import "core:crypto/poly1305"
+import "core:crypto/x25519"
+
+// Cryptographic primitive benchmarks.
+
+@(test)
+benchmark_crypto :: proc(t: ^testing.T) {
+ runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
+
+ str: strings.Builder
+ strings.builder_init(&str, context.allocator)
+ defer {
+ log.info(strings.to_string(str))
+ strings.builder_destroy(&str)
+ }
+
+ {
+ name := "ChaCha20 64 bytes"
+ options := &time.Benchmark_Options {
+ rounds = 1_000,
+ bytes = 64,
+ setup = _setup_sized_buf,
+ bench = _benchmark_chacha20,
+ teardown = _teardown_sized_buf,
+ }
+
+ err := time.benchmark(options, context.allocator)
+ testing.expect(t, err == nil, name)
+ benchmark_print(&str, name, options)
+
+ name = "ChaCha20 1024 bytes"
+ options.bytes = 1024
+ err = time.benchmark(options, context.allocator)
+ testing.expect(t, err == nil, name)
+ benchmark_print(&str, name, options)
+
+ name = "ChaCha20 65536 bytes"
+ options.bytes = 65536
+ err = time.benchmark(options, context.allocator)
+ testing.expect(t, err == nil, name)
+ benchmark_print(&str, name, options)
+ }
+ {
+ name := "Poly1305 64 zero bytes"
+ options := &time.Benchmark_Options {
+ rounds = 1_000,
+ bytes = 64,
+ setup = _setup_sized_buf,
+ bench = _benchmark_poly1305,
+ teardown = _teardown_sized_buf,
+ }
+
+ err := time.benchmark(options, context.allocator)
+ testing.expect(t, err == nil, name)
+ benchmark_print(&str, name, options)
+
+ name = "Poly1305 1024 zero bytes"
+ options.bytes = 1024
+ err = time.benchmark(options, context.allocator)
+ testing.expect(t, err == nil, name)
+ benchmark_print(&str, name, options)
+ }
+ {
+ name := "chacha20poly1305 64 bytes"
+ options := &time.Benchmark_Options {
+ rounds = 1_000,
+ bytes = 64,
+ setup = _setup_sized_buf,
+ bench = _benchmark_chacha20poly1305,
+ teardown = _teardown_sized_buf,
+ }
+
+ err := time.benchmark(options, context.allocator)
+ testing.expect(t, err == nil, name)
+ benchmark_print(&str, name, options)
+
+ name = "chacha20poly1305 1024 bytes"
+ options.bytes = 1024
+ err = time.benchmark(options, context.allocator)
+ testing.expect(t, err == nil, name)
+ benchmark_print(&str, name, options)
+
+ name = "chacha20poly1305 65536 bytes"
+ options.bytes = 65536
+ err = time.benchmark(options, context.allocator)
+ testing.expect(t, err == nil, name)
+ benchmark_print(&str, name, options)
+ }
+ {
+ name := "AES256-GCM 64 bytes"
+ options := &time.Benchmark_Options {
+ rounds = 1_000,
+ bytes = 64,
+ setup = _setup_sized_buf,
+ bench = _benchmark_aes256_gcm,
+ teardown = _teardown_sized_buf,
+ }
+
+ key := [aes.KEY_SIZE_256]byte {
+ 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
+ 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
+ 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
+ 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
+ }
+ ctx: aes.Context_GCM
+ aes.init_gcm(&ctx, key[:])
+
+ context.user_ptr = &ctx
+
+ err := time.benchmark(options, context.allocator)
+ testing.expect(t, err == nil, name)
+ benchmark_print(&str, name, options)
+
+ name = "AES256-GCM 1024 bytes"
+ options.bytes = 1024
+ err = time.benchmark(options, context.allocator)
+ testing.expect(t, err == nil, name)
+ benchmark_print(&str, name, options)
+
+ name = "AES256-GCM 65536 bytes"
+ options.bytes = 65536
+ err = time.benchmark(options, context.allocator)
+ testing.expect(t, err == nil, name)
+ benchmark_print(&str, name, options)
+ }
+ {
+ iters :: 10000
+
+ priv_str := "cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe"
+ priv_bytes, _ := hex.decode(transmute([]byte)(priv_str), context.temp_allocator)
+ priv_key: ed25519.Private_Key
+ start := time.now()
+ for i := 0; i < iters; i = i + 1 {
+ ok := ed25519.private_key_set_bytes(&priv_key, priv_bytes)
+ assert(ok, "private key should deserialize")
+ }
+ elapsed := time.since(start)
+ fmt.sbprintfln(&str,
+ "ed25519.private_key_set_bytes: ~%f us/op",
+ time.duration_microseconds(elapsed) / iters,
+ )
+
+ pub_bytes := priv_key._pub_key._b[:] // "I know what I am doing"
+ pub_key: ed25519.Public_Key
+ start = time.now()
+ for i := 0; i < iters; i = i + 1 {
+ ok := ed25519.public_key_set_bytes(&pub_key, pub_bytes[:])
+ assert(ok, "public key should deserialize")
+ }
+ elapsed = time.since(start)
+ fmt.sbprintfln(&str,
+ "ed25519.public_key_set_bytes: ~%f us/op",
+ time.duration_microseconds(elapsed) / iters,
+ )
+
+ msg := "Got a job for you, 621."
+ sig_bytes: [ed25519.SIGNATURE_SIZE]byte
+ msg_bytes := transmute([]byte)(msg)
+ start = time.now()
+ for i := 0; i < iters; i = i + 1 {
+ ed25519.sign(&priv_key, msg_bytes, sig_bytes[:])
+ }
+ elapsed = time.since(start)
+ fmt.sbprintfln(&str,
+ "ed25519.sign: ~%f us/op",
+ time.duration_microseconds(elapsed) / iters,
+ )
+
+ start = time.now()
+ for i := 0; i < iters; i = i + 1 {
+ ok := ed25519.verify(&pub_key, msg_bytes, sig_bytes[:])
+ assert(ok, "signature should validate")
+ }
+ elapsed = time.since(start)
+ fmt.sbprintfln(&str,
+ "ed25519.verify: ~%f us/op",
+ time.duration_microseconds(elapsed) / iters,
+ )
+ }
+ {
+ point_str := "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"
+ scalar_str := "cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe"
+
+ point, _ := hex.decode(transmute([]byte)(point_str), context.temp_allocator)
+ scalar, _ := hex.decode(transmute([]byte)(scalar_str), context.temp_allocator)
+ out: [x25519.POINT_SIZE]byte = ---
+
+ iters :: 10000
+ start := time.now()
+ for i := 0; i < iters; i = i + 1 {
+ x25519.scalarmult(out[:], scalar[:], point[:])
+ }
+ elapsed := time.since(start)
+
+ fmt.sbprintfln(&str,
+ "x25519.scalarmult: ~%f us/op",
+ time.duration_microseconds(elapsed) / iters,
+ )
+ }
+}
+
+@(private)
+_setup_sized_buf :: proc(
+ options: ^time.Benchmark_Options,
+ allocator := context.allocator,
+) -> (
+ err: time.Benchmark_Error,
+) {
+ assert(options != nil)
+
+ options.input = make([]u8, options.bytes, allocator)
+ return nil if len(options.input) == options.bytes else .Allocation_Error
+}
+
+@(private)
+_teardown_sized_buf :: proc(
+ options: ^time.Benchmark_Options,
+ allocator := context.allocator,
+) -> (
+ err: time.Benchmark_Error,
+) {
+ assert(options != nil)
+
+ delete(options.input)
+ return nil
+}
+
+@(private)
+_benchmark_chacha20 :: proc(
+ options: ^time.Benchmark_Options,
+ allocator := context.allocator,
+) -> (
+ err: time.Benchmark_Error,
+) {
+ buf := options.input
+ key := [chacha20.KEY_SIZE]byte {
+ 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
+ 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
+ 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
+ 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
+ }
+ nonce := [chacha20.NONCE_SIZE]byte {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ }
+
+ ctx: chacha20.Context = ---
+ chacha20.init(&ctx, key[:], nonce[:])
+
+ for _ in 0 ..= options.rounds {
+ chacha20.xor_bytes(&ctx, buf, buf)
+ }
+ options.count = options.rounds
+ options.processed = options.rounds * options.bytes
+ return nil
+}
+
+@(private)
+_benchmark_poly1305 :: proc(
+ options: ^time.Benchmark_Options,
+ allocator := context.allocator,
+) -> (
+ err: time.Benchmark_Error,
+) {
+ buf := options.input
+ key := [poly1305.KEY_SIZE]byte {
+ 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
+ 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
+ 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
+ 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
+ }
+
+ tag: [poly1305.TAG_SIZE]byte = ---
+ for _ in 0 ..= options.rounds {
+ poly1305.sum(tag[:], buf, key[:])
+ }
+ options.count = options.rounds
+ options.processed = options.rounds * options.bytes
+ //options.hash = u128(h)
+ return nil
+}
+
+@(private)
+_benchmark_chacha20poly1305 :: proc(
+ options: ^time.Benchmark_Options,
+ allocator := context.allocator,
+) -> (
+ err: time.Benchmark_Error,
+) {
+ buf := options.input
+ key := [chacha20.KEY_SIZE]byte {
+ 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
+ 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
+ 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
+ 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
+ }
+ nonce := [chacha20.NONCE_SIZE]byte {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ }
+
+ tag: [chacha20poly1305.TAG_SIZE]byte = ---
+
+ for _ in 0 ..= options.rounds {
+ chacha20poly1305.encrypt(buf, tag[:], key[:], nonce[:], nil, buf)
+ }
+ options.count = options.rounds
+ options.processed = options.rounds * options.bytes
+ return nil
+}
+
+_benchmark_aes256_gcm :: proc(
+ options: ^time.Benchmark_Options,
+ allocator := context.allocator,
+) -> (
+ err: time.Benchmark_Error,
+) {
+ buf := options.input
+ nonce: [aes.GCM_NONCE_SIZE]byte
+ tag: [aes.GCM_TAG_SIZE]byte = ---
+
+ ctx := transmute(^aes.Context_GCM)context.user_ptr
+
+ for _ in 0 ..= options.rounds {
+ aes.seal_gcm(ctx, buf, tag[:], nonce[:], nil, buf)
+ }
+ options.count = options.rounds
+ options.processed = options.rounds * options.bytes
+ return nil
+}
+
+@(private)
+benchmark_print :: proc(str: ^strings.Builder, name: string, options: ^time.Benchmark_Options, loc := #caller_location) {
+ fmt.sbprintfln(str, "[%v] %v rounds, %v bytes processed in %v ns\n\t\t%5.3f rounds/s, %5.3f MiB/s\n",
+ name,
+ options.rounds,
+ options.processed,
+ time.duration_nanoseconds(options.duration),
+ options.rounds_per_second,
+ options.megabytes_per_second,
+ )
+}
diff --git a/tests/benchmark/hash/benchmark_hash.odin b/tests/benchmark/hash/benchmark_hash.odin
new file mode 100644
index 000000000..84eb827e7
--- /dev/null
+++ b/tests/benchmark/hash/benchmark_hash.odin
@@ -0,0 +1,218 @@
+package benchmark_core_hash
+
+import "core:fmt"
+import "core:hash/xxhash"
+import "base:intrinsics"
+import "core:strings"
+import "core:testing"
+import "core:time"
+
+@(test)
+benchmark_hash :: proc(t: ^testing.T) {
+ str: strings.Builder
+ strings.builder_init(&str, context.allocator)
+ defer {
+ fmt.println(strings.to_string(str))
+ strings.builder_destroy(&str)
+ }
+
+ {
+ name := "XXH32 100 zero bytes"
+ options := &time.Benchmark_Options{
+ rounds = 1_000,
+ bytes = 100,
+ setup = setup_xxhash,
+ bench = benchmark_xxh32,
+ teardown = teardown_xxhash,
+ }
+ err := time.benchmark(options, context.allocator)
+ testing.expectf(t, err == nil, "%s failed with err %v", name, err)
+ hash := u128(0x85f6413c)
+ testing.expectf(t, options.hash == hash, "%v hash expected to be %v, got %v", name, hash, options.hash)
+ benchmark_print(&str, name, options)
+ }
+ {
+ name := "XXH32 1 MiB zero bytes"
+ options := &time.Benchmark_Options{
+ rounds = 1_000,
+ bytes = 1_048_576,
+ setup = setup_xxhash,
+ bench = benchmark_xxh32,
+ teardown = teardown_xxhash,
+ }
+ err := time.benchmark(options, context.allocator)
+ testing.expectf(t, err == nil, "%s failed with err %v", name, err)
+ hash := u128(0x9430f97f)
+ testing.expectf(t, options.hash == hash, "%v hash expected to be %v, got %v", name, hash, options.hash)
+ benchmark_print(&str, name, options)
+ }
+ {
+ name := "XXH64 100 zero bytes"
+ options := &time.Benchmark_Options{
+ rounds = 1_000,
+ bytes = 100,
+ setup = setup_xxhash,
+ bench = benchmark_xxh64,
+ teardown = teardown_xxhash,
+ }
+ err := time.benchmark(options, context.allocator)
+ testing.expectf(t, err == nil, "%s failed with err %v", name, err)
+ hash := u128(0x17bb1103c92c502f)
+ testing.expectf(t, options.hash == hash, "%v hash expected to be %v, got %v", name, hash, options.hash)
+ benchmark_print(&str, name, options)
+ }
+ {
+ name := "XXH64 1 MiB zero bytes"
+ options := &time.Benchmark_Options{
+ rounds = 1_000,
+ bytes = 1_048_576,
+ setup = setup_xxhash,
+ bench = benchmark_xxh64,
+ teardown = teardown_xxhash,
+ }
+ err := time.benchmark(options, context.allocator)
+ testing.expectf(t, err == nil, "%s failed with err %v", name, err)
+ hash := u128(0x87d2a1b6e1163ef1)
+ testing.expectf(t, options.hash == hash, "%v hash expected to be %v, got %v", name, hash, options.hash)
+ benchmark_print(&str, name, options)
+ }
+ {
+ name := "XXH3_64 100 zero bytes"
+ options := &time.Benchmark_Options{
+ rounds = 1_000,
+ bytes = 100,
+ setup = setup_xxhash,
+ bench = benchmark_xxh3_64,
+ teardown = teardown_xxhash,
+ }
+ err := time.benchmark(options, context.allocator)
+ testing.expectf(t, err == nil, "%s failed with err %v", name, err)
+ hash := u128(0x801fedc74ccd608c)
+ testing.expectf(t, options.hash == hash, "%v hash expected to be %v, got %v", name, hash, options.hash)
+ benchmark_print(&str, name, options)
+ }
+ {
+ name := "XXH3_64 1 MiB zero bytes"
+ options := &time.Benchmark_Options{
+ rounds = 1_000,
+ bytes = 1_048_576,
+ setup = setup_xxhash,
+ bench = benchmark_xxh3_64,
+ teardown = teardown_xxhash,
+ }
+ err := time.benchmark(options, context.allocator)
+ testing.expectf(t, err == nil, "%s failed with err %v", name, err)
+ hash := u128(0x918780b90550bf34)
+ testing.expectf(t, options.hash == hash, "%v hash expected to be %v, got %v", name, hash, options.hash)
+ benchmark_print(&str, name, options)
+ }
+ {
+ name := "XXH3_128 100 zero bytes"
+ options := &time.Benchmark_Options{
+ rounds = 1_000,
+ bytes = 100,
+ setup = setup_xxhash,
+ bench = benchmark_xxh3_128,
+ teardown = teardown_xxhash,
+ }
+ err := time.benchmark(options, context.allocator)
+ testing.expectf(t, err == nil, "%s failed with err %v", name, err)
+ hash := u128(0x6ba30a4e9dffe1ff801fedc74ccd608c)
+ testing.expectf(t, options.hash == hash, "%v hash expected to be %v, got %v", name, hash, options.hash)
+ benchmark_print(&str, name, options)
+ }
+ {
+ name := "XXH3_128 1 MiB zero bytes"
+ options := &time.Benchmark_Options{
+ rounds = 1_000,
+ bytes = 1_048_576,
+ setup = setup_xxhash,
+ bench = benchmark_xxh3_128,
+ teardown = teardown_xxhash,
+ }
+ err := time.benchmark(options, context.allocator)
+ testing.expectf(t, err == nil, "%s failed with err %v", name, err)
+ hash := u128(0xb6ef17a3448492b6918780b90550bf34)
+ testing.expectf(t, options.hash == hash, "%v hash expected to be %v, got %v", name, hash, options.hash)
+ benchmark_print(&str, name, options)
+ }
+}
+
+// Benchmarks
+
+setup_xxhash :: proc(options: ^time.Benchmark_Options, allocator := context.allocator) -> (err: time.Benchmark_Error) {
+ assert(options != nil)
+
+ options.input = make([]u8, options.bytes, allocator)
+ return nil if len(options.input) == options.bytes else .Allocation_Error
+}
+
+teardown_xxhash :: proc(options: ^time.Benchmark_Options, allocator := context.allocator) -> (err: time.Benchmark_Error) {
+ assert(options != nil)
+
+ delete(options.input)
+ return nil
+}
+
+benchmark_xxh32 :: proc(options: ^time.Benchmark_Options, allocator := context.allocator) -> (err: time.Benchmark_Error) {
+ buf := options.input
+
+ h: u32
+ for _ in 0..=options.rounds {
+ h = xxhash.XXH32(buf)
+ }
+ options.count = options.rounds
+ options.processed = options.rounds * options.bytes
+ options.hash = u128(h)
+ return nil
+}
+
+benchmark_xxh64 :: proc(options: ^time.Benchmark_Options, allocator := context.allocator) -> (err: time.Benchmark_Error) {
+ buf := options.input
+
+ h: u64
+ for _ in 0..=options.rounds {
+ h = xxhash.XXH64(buf)
+ }
+ options.count = options.rounds
+ options.processed = options.rounds * options.bytes
+ options.hash = u128(h)
+ return nil
+}
+
+benchmark_xxh3_64 :: proc(options: ^time.Benchmark_Options, allocator := context.allocator) -> (err: time.Benchmark_Error) {
+ buf := options.input
+
+ h: u64
+ for _ in 0..=options.rounds {
+ h = xxhash.XXH3_64(buf)
+ }
+ options.count = options.rounds
+ options.processed = options.rounds * options.bytes
+ options.hash = u128(h)
+ return nil
+}
+
+benchmark_xxh3_128 :: proc(options: ^time.Benchmark_Options, allocator := context.allocator) -> (err: time.Benchmark_Error) {
+ buf := options.input
+
+ h: u128
+ for _ in 0..=options.rounds {
+ h = xxhash.XXH3_128(buf)
+ }
+ options.count = options.rounds
+ options.processed = options.rounds * options.bytes
+ options.hash = h
+ return nil
+}
+
+benchmark_print :: proc(str: ^strings.Builder, name: string, options: ^time.Benchmark_Options, loc := #caller_location) {
+ fmt.sbprintfln(str, "[%v] %v rounds, %v bytes processed in %v ns\n\t\t%5.3f rounds/s, %5.3f MiB/s\n",
+ name,
+ options.rounds,
+ options.processed,
+ time.duration_nanoseconds(options.duration),
+ options.rounds_per_second,
+ options.megabytes_per_second,
+ )
+} \ No newline at end of file
diff --git a/tests/common/common.odin b/tests/common/common.odin
deleted file mode 100644
index 021fb21c5..000000000
--- a/tests/common/common.odin
+++ /dev/null
@@ -1,81 +0,0 @@
-// Boilerplate for tests
-package common
-
-import "core:testing"
-import "core:fmt"
-import "core:os"
-import "core:strings"
-
-TEST_count := 0
-TEST_fail := 0
-
-when ODIN_TEST {
- expect :: testing.expect
- log :: testing.log
- errorf :: testing.errorf
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v:%s] FAIL %v\n", loc, loc.procedure, message)
- return
- }
- }
- errorf :: proc(t: ^testing.T, message: string, args: ..any, loc := #caller_location) {
- TEST_fail += 1
- fmt.printf("[%v:%s] Error %v\n", loc, loc.procedure, fmt.tprintf(message, ..args))
- return
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-}
-
-report :: proc(t: ^testing.T) {
- if TEST_fail > 0 {
- if TEST_fail > 1 {
- fmt.printf("%v/%v tests successful, %v tests failed.\n", TEST_count - TEST_fail, TEST_count, TEST_fail)
- } else {
- fmt.printf("%v/%v tests successful, 1 test failed.\n", TEST_count - TEST_fail, TEST_count)
- }
- os.exit(1)
- } else {
- fmt.printf("%v/%v tests successful.\n", TEST_count, TEST_count)
- }
-}
-
-// Returns absolute path to `sub_path` where `sub_path` is within the "tests/" sub-directory of the Odin project root
-// and we're being run from the Odin project root or from a sub-directory of "tests/"
-// e.g. get_data_path("assets/blah") will return "/Odin_root/tests/assets/blah" if run within "/Odin_root",
-// "/Odin_root/tests" or "/Odin_root/tests/subdir" etc
-get_data_path :: proc(t: ^testing.T, sub_path: string) -> (data_path: string) {
-
- cwd := os.get_current_directory()
- defer delete(cwd)
-
- when ODIN_OS == .Windows {
- norm, was_allocation := strings.replace_all(cwd, "\\", "/")
- if !was_allocation {
- norm = strings.clone(norm)
- }
- defer delete(norm)
- } else {
- norm := cwd
- }
-
- last_index := strings.last_index(norm, "/tests/")
- if last_index == -1 {
- len := len(norm)
- if len >= 6 && norm[len-6:] == "/tests" {
- data_path = fmt.tprintf("%s/%s", norm, sub_path)
- } else {
- data_path = fmt.tprintf("%s/tests/%s", norm, sub_path)
- }
- } else {
- data_path = fmt.tprintf("%s/tests/%s", norm[:last_index], sub_path)
- }
-
- return data_path
-}
diff --git a/tests/core/.gitignore b/tests/core/.gitignore
index cd013e188..2a4e21679 100644
--- a/tests/core/.gitignore
+++ b/tests/core/.gitignore
@@ -1,3 +1,4 @@
+*.bmp
*.zip
*.png
math_big_test_library.* \ No newline at end of file
diff --git a/tests/core/Makefile b/tests/core/Makefile
deleted file mode 100644
index 9026ed3d9..000000000
--- a/tests/core/Makefile
+++ /dev/null
@@ -1,106 +0,0 @@
-ODIN=../../odin
-PYTHON=$(shell which python3)
-COMMON=-vet -strict-style
-COLLECTION=-collection:tests=..
-
-all: all_bsd \
- net_test
-
-all_bsd: c_libc_test \
- compress_test \
- container_test \
- crypto_test \
- download_test_assets \
- encoding_test \
- filepath_test \
- fmt_test \
- hash_test \
- i18n_test \
- image_test \
- linalg_glsl_math_test \
- match_test \
- math_test \
- noise_test \
- os_exit_test \
- reflect_test \
- slice_test \
- strings_test \
- thread_test \
- runtime_test \
- time_test \
- fmt_test
-
-download_test_assets:
- $(PYTHON) download_assets.py
-
-image_test:
- $(ODIN) run image $(COMMON) -out:test_core_image
-
-compress_test:
- $(ODIN) run compress $(COMMON) -out:test_core_compress
-
-container_test:
- $(ODIN) run container $(COMMON) $(COLLECTION) -out:test_core_container
-
-strings_test:
- $(ODIN) run strings $(COMMON) -out:test_core_strings
-
-hash_test:
- $(ODIN) run hash $(COMMON) -o:speed -no-bounds-check -out:test_hash
-
-crypto_test:
- $(ODIN) run crypto $(COMMON) $(COLLECTION) -o:speed -no-bounds-check -out:test_crypto
-
-noise_test:
- $(ODIN) run math/noise $(COMMON) -out:test_noise
-
-encoding_test:
- $(ODIN) run encoding/hxa $(COMMON) $(COLLECTION) -out:test_hxa
- $(ODIN) run encoding/json $(COMMON) -out:test_json
- $(ODIN) run encoding/varint $(COMMON) -out:test_varint
- $(ODIN) run encoding/xml $(COMMON) -out:test_xml
- $(ODIN) run encoding/cbor $(COMMON) -out:test_cbor
- $(ODIN) run encoding/hex $(COMMON) -out:test_hex
- $(ODIN) run encoding/base64 $(COMMON) -out:test_base64
-
-math_test:
- $(ODIN) run math $(COMMON) $(COLLECTION) -out:test_core_math
-
-linalg_glsl_math_test:
- $(ODIN) run math/linalg/glsl $(COMMON) $(COLLECTION) -out:test_linalg_glsl_math
-
-filepath_test:
- $(ODIN) run path/filepath $(COMMON) $(COLLECTION) -out:test_core_filepath
-
-reflect_test:
- $(ODIN) run reflect $(COMMON) $(COLLECTION) -out:test_core_reflect
-
-slice_test:
- $(ODIN) run slice $(COMMON) -out:test_core_slice
-
-os_exit_test:
- $(ODIN) run os/test_core_os_exit.odin -file -out:test_core_os_exit && exit 1 || exit 0
-
-i18n_test:
- $(ODIN) run text/i18n $(COMMON) -out:test_core_i18n
-
-match_test:
- $(ODIN) run text/match $(COMMON) -out:test_core_match
-
-c_libc_test:
- $(ODIN) run c/libc $(COMMON) -out:test_core_libc
-
-net_test:
- $(ODIN) run net $(COMMON) -out:test_core_net
-
-fmt_test:
- $(ODIN) run fmt $(COMMON) -out:test_core_fmt
-
-thread_test:
- $(ODIN) run thread $(COMMON) -out:test_core_thread
-
-runtime_test:
- $(ODIN) run runtime $(COMMON) -out:test_core_runtime
-
-time_test:
- $(ODIN) run time $(COMMON) -out:test_core_time
diff --git a/tests/core/assets/XML/attribute-whitespace.xml b/tests/core/assets/XML/attribute-whitespace.xml
new file mode 100644
index 000000000..6381225d5
--- /dev/null
+++ b/tests/core/assets/XML/attribute-whitespace.xml
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!DOCTYPE foozle>
+<foozle>Barzle</foozle>
+<부끄러운:barzle>
+ <name foo:bar=" birmese
+ kittens have
+ fur ">Indeed!</name>
+</부끄러운:barzle> \ No newline at end of file
diff --git a/tests/core/build.bat b/tests/core/build.bat
deleted file mode 100644
index 7871e52e2..000000000
--- a/tests/core/build.bat
+++ /dev/null
@@ -1,110 +0,0 @@
-@echo off
-set COMMON=-no-bounds-check -vet -strict-style
-set COLLECTION=-collection:tests=..
-set PATH_TO_ODIN==..\..\odin
-python3 download_assets.py
-echo ---
-echo Running core:compress tests
-echo ---
-%PATH_TO_ODIN% run compress %COMMON% -out:test_core_compress.exe || exit /b
-
-echo ---
-echo Running core:container tests
-echo ---
-%PATH_TO_ODIN% run container %COMMON% %COLLECTION% -out:test_core_container.exe || exit /b
-
-echo ---
-echo Running core:crypto tests
-echo ---
-%PATH_TO_ODIN% run crypto %COMMON% %COLLECTION% -out:test_crypto.exe || exit /b
-
-echo ---
-echo Running core:encoding tests
-echo ---
-rem %PATH_TO_ODIN% run encoding/hxa %COMMON% %COLLECTION% -out:test_hxa.exe || exit /b
-%PATH_TO_ODIN% run encoding/json %COMMON% -out:test_json.exe || exit /b
-%PATH_TO_ODIN% run encoding/varint %COMMON% -out:test_varint.exe || exit /b
-%PATH_TO_ODIN% run encoding/xml %COMMON% -out:test_xml.exe || exit /b
-%PATH_TO_ODIN% test encoding/cbor %COMMON% -out:test_cbor.exe || exit /b
-%PATH_TO_ODIN% run encoding/hex %COMMON% -out:test_hex.exe || exit /b
-%PATH_TO_ODIN% run encoding/base64 %COMMON% -out:test_base64.exe || exit /b
-
-echo ---
-echo Running core:fmt tests
-echo ---
-%PATH_TO_ODIN% run fmt %COMMON% %COLLECTION% -out:test_core_fmt.exe || exit /b
-
-echo ---
-echo Running core:hash tests
-echo ---
-%PATH_TO_ODIN% run hash %COMMON% -o:size -out:test_core_hash.exe || exit /b
-
-echo ---
-echo Running core:image tests
-echo ---
-%PATH_TO_ODIN% run image %COMMON% -out:test_core_image.exe || exit /b
-
-echo ---
-echo Running core:math tests
-echo ---
-%PATH_TO_ODIN% run math %COMMON% %COLLECTION% -out:test_core_math.exe || exit /b
-
-echo ---
-echo Running core:math/linalg/glsl tests
-echo ---
-%PATH_TO_ODIN% run math/linalg/glsl %COMMON% %COLLECTION% -out:test_linalg_glsl.exe || exit /b
-
-echo ---
-echo Running core:math/noise tests
-echo ---
-%PATH_TO_ODIN% run math/noise %COMMON% -out:test_noise.exe || exit /b
-
-echo ---
-echo Running core:net
-echo ---
-%PATH_TO_ODIN% run net %COMMON% -out:test_core_net.exe || exit /b
-
-echo ---
-echo Running core:odin tests
-echo ---
-%PATH_TO_ODIN% run odin %COMMON% -o:size -out:test_core_odin.exe || exit /b
-
-echo ---
-echo Running core:path/filepath tests
-echo ---
-%PATH_TO_ODIN% run path/filepath %COMMON% %COLLECTION% -out:test_core_filepath.exe || exit /b
-
-echo ---
-echo Running core:reflect tests
-echo ---
-%PATH_TO_ODIN% run reflect %COMMON% %COLLECTION% -out:test_core_reflect.exe || exit /b
-
-echo ---
-echo Running core:runtime tests
-echo ---
-%PATH_TO_ODIN% run runtime %COMMON% %COLLECTION% -out:test_core_runtime.exe || exit /b
-
-echo ---
-echo Running core:slice tests
-echo ---
-%PATH_TO_ODIN% run slice %COMMON% -out:test_core_slice.exe || exit /b
-
-echo ---
-echo Running core:strings tests
-echo ---
-%PATH_TO_ODIN% run strings %COMMON% -out:test_core_strings.exe || exit /b
-
-echo ---
-echo Running core:text/i18n tests
-echo ---
-%PATH_TO_ODIN% run text\i18n %COMMON% -out:test_core_i18n.exe || exit /b
-
-echo ---
-echo Running core:thread tests
-echo ---
-%PATH_TO_ODIN% run thread %COMMON% %COLLECTION% -out:test_core_thread.exe || exit /b
-
-echo ---
-echo Running core:time tests
-echo ---
-%PATH_TO_ODIN% run time %COMMON% %COLLECTION% -out:test_core_time.exe || exit /b \ No newline at end of file
diff --git a/tests/core/c/libc/test_core_libc.odin b/tests/core/c/libc/test_core_libc.odin
deleted file mode 100644
index 9b5014dee..000000000
--- a/tests/core/c/libc/test_core_libc.odin
+++ /dev/null
@@ -1,36 +0,0 @@
-package test_core_libc
-
-import "core:fmt"
-import "core:os"
-import "core:testing"
-
-TEST_count := 0
-TEST_fail := 0
-
-when ODIN_TEST {
- expect :: testing.expect
- log :: testing.log
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v] %v\n", loc, message)
- return
- }
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-}
-
-main :: proc() {
- t := testing.T{}
- test_libc_complex(&t)
-
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
-}
diff --git a/tests/core/c/libc/test_core_libc_complex_pow.odin b/tests/core/c/libc/test_core_libc_complex_pow.odin
index 90928794c..cd50c8f6a 100644
--- a/tests/core/c/libc/test_core_libc_complex_pow.odin
+++ b/tests/core/c/libc/test_core_libc_complex_pow.odin
@@ -1,8 +1,8 @@
package test_core_libc
import "core:testing"
-import "core:fmt"
import "core:c/libc"
+import "core:log"
reldiff :: proc(lhs, rhs: $T) -> f64 {
if lhs == rhs {
@@ -14,7 +14,7 @@ reldiff :: proc(lhs, rhs: $T) -> f64 {
return out
}
-isclose :: proc(lhs, rhs: $T, rtol:f64 = 1e-12, atol:f64 = 1e-12) -> bool {
+isclose :: proc(t: ^testing.T, lhs, rhs: $T, rtol:f64 = 1e-12, atol:f64 = 1e-12) -> bool {
adiff := f64(abs(lhs - rhs))
if adiff < atol {
return true
@@ -23,7 +23,7 @@ isclose :: proc(lhs, rhs: $T, rtol:f64 = 1e-12, atol:f64 = 1e-12) -> bool {
if rdiff < rtol {
return true
}
- fmt.printf("not close -- lhs:%v rhs:%v -- adiff:%e rdiff:%e\n",lhs, rhs, adiff, rdiff)
+ log.infof("not close -- lhs:%v rhs:%v -- adiff:%e rdiff:%e\n",lhs, rhs, adiff, rdiff)
return false
}
@@ -44,7 +44,6 @@ test_libc_complex :: proc(t: ^testing.T) {
test_libc_pow_binding(t, libc.complex_float, f32, libc_powf, 1e-12, 1e-5)
}
-@test
test_libc_pow_binding :: proc(t: ^testing.T, $LIBC_COMPLEX:typeid, $F:typeid, pow: proc(LIBC_COMPLEX, LIBC_COMPLEX) -> LIBC_COMPLEX,
rtol: f64, atol: f64) {
// Tests that c/libc/pow(f) functions have two arguments and that the function works as expected for simple inputs
@@ -56,8 +55,8 @@ test_libc_pow_binding :: proc(t: ^testing.T, $LIBC_COMPLEX:typeid, $F:typeid, po
for n in -4..=4 {
complex_power := LIBC_COMPLEX(complex(F(n), F(0.)))
result := pow(complex_base, complex_power)
- expect(t, isclose(expected_real, F(real(result)), rtol, atol), fmt.tprintf("ftype:%T, n:%v reldiff(%v, re(%v)) is greater than specified rtol:%e", F{}, n, expected_real, result, rtol))
- expect(t, isclose(expected_imag, F(imag(result)), rtol, atol), fmt.tprintf("ftype:%T, n:%v reldiff(%v, im(%v)) is greater than specified rtol:%e", F{}, n, expected_imag, result, rtol))
+ testing.expectf(t, isclose(t, expected_real, F(real(result)), rtol, atol), "ftype:%T, n:%v reldiff(%v, re(%v)) is greater than specified rtol:%e", F{}, n, expected_real, result, rtol)
+ testing.expectf(t, isclose(t, expected_imag, F(imag(result)), rtol, atol), "ftype:%T, n:%v reldiff(%v, im(%v)) is greater than specified rtol:%e", F{}, n, expected_imag, result, rtol)
expected_real *= 2
}
}
@@ -83,8 +82,8 @@ test_libc_pow_binding :: proc(t: ^testing.T, $LIBC_COMPLEX:typeid, $F:typeid, po
expected_real = 0.
expected_imag = -value
}
- expect(t, isclose(expected_real, F(real(result)), rtol, atol), fmt.tprintf("ftype:%T, n:%v reldiff(%v, re(%v)) is greater than specified rtol:%e", F{}, n, expected_real, result, rtol))
- expect(t, isclose(expected_imag, F(imag(result)), rtol, atol), fmt.tprintf("ftype:%T, n:%v reldiff(%v, im(%v)) is greater than specified rtol:%e", F{}, n, expected_imag, result, rtol))
+ testing.expectf(t, isclose(t, expected_real, F(real(result)), rtol, atol), "ftype:%T, n:%v reldiff(%v, re(%v)) is greater than specified rtol:%e", F{}, n, expected_real, result, rtol)
+ testing.expectf(t, isclose(t, expected_imag, F(imag(result)), rtol, atol), "ftype:%T, n:%v reldiff(%v, im(%v)) is greater than specified rtol:%e", F{}, n, expected_imag, result, rtol)
value *= 2
}
}
diff --git a/tests/core/compress/test_core_compress.odin b/tests/core/compress/test_core_compress.odin
index ac7555e9a..4ab63ae67 100644
--- a/tests/core/compress/test_core_compress.odin
+++ b/tests/core/compress/test_core_compress.odin
@@ -15,47 +15,7 @@ import "core:testing"
import "core:compress/zlib"
import "core:compress/gzip"
import "core:compress/shoco"
-
import "core:bytes"
-import "core:fmt"
-
-import "core:mem"
-import "core:os"
-import "core:io"
-
-TEST_count := 0
-TEST_fail := 0
-
-when ODIN_TEST {
- expect :: testing.expect
- log :: testing.log
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v] %v\n", loc, message)
- return
- }
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-}
-
-main :: proc() {
- w := io.to_writer(os.stream_from_handle(os.stdout))
- t := testing.T{w=w}
- zlib_test(&t)
- gzip_test(&t)
- shoco_test(&t)
-
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
-}
@test
zlib_test :: proc(t: ^testing.T) {
@@ -80,26 +40,14 @@ zlib_test :: proc(t: ^testing.T) {
}
buf: bytes.Buffer
+ err := zlib.inflate(ODIN_DEMO, &buf)
- track: mem.Tracking_Allocator
- mem.tracking_allocator_init(&track, context.allocator)
- context.allocator = mem.tracking_allocator(&track)
-
- err := zlib.inflate(ODIN_DEMO, &buf)
-
- expect(t, err == nil, "ZLIB failed to decompress ODIN_DEMO")
+ testing.expect(t, err == nil, "ZLIB failed to decompress ODIN_DEMO")
s := bytes.buffer_to_string(&buf)
- expect(t, s[68] == 240 && s[69] == 159 && s[70] == 152, "ZLIB result should've contained 😃 at position 68.")
-
- expect(t, len(s) == 438, "ZLIB result has an unexpected length.")
-
+ testing.expect(t, s[68] == 240 && s[69] == 159 && s[70] == 152, "ZLIB result should've contained 😃 at position 68.")
+ testing.expect(t, len(s) == 438, "ZLIB result has an unexpected length.")
bytes.buffer_destroy(&buf)
-
- for _, v in track.allocation_map {
- error := fmt.tprintf("ZLIB test leaked %v bytes", v.size)
- expect(t, false, error)
- }
}
@test
@@ -117,24 +65,12 @@ gzip_test :: proc(t: ^testing.T) {
}
buf: bytes.Buffer
+ err := gzip.load(TEST, &buf)
- track: mem.Tracking_Allocator
- mem.tracking_allocator_init(&track, context.allocator)
- context.allocator = mem.tracking_allocator(&track)
-
- err := gzip.load(TEST, &buf) // , 438);
-
- expect(t, err == nil, "GZIP failed to decompress TEST")
- s := bytes.buffer_to_string(&buf)
-
- expect(t, s == "payload", "GZIP result wasn't 'payload'")
+ testing.expect(t, err == nil, "GZIP failed to decompress TEST")
+ testing.expect(t, bytes.buffer_to_string(&buf) == "payload", "GZIP result wasn't 'payload'")
bytes.buffer_destroy(&buf)
-
- for _, v in track.allocation_map {
- error := fmt.tprintf("GZIP test leaked %v bytes", v.size)
- expect(t, false, error)
- }
}
@test
@@ -168,31 +104,26 @@ shoco_test :: proc(t: ^testing.T) {
defer delete(buffer)
size, err := shoco.decompress(v.compressed, buffer[:])
- msg := fmt.tprintf("Expected `decompress` to return `nil`, got %v", err)
- expect(t, err == nil, msg)
+ testing.expectf(t, err == nil, "Expected `decompress` to return `nil`, got %v", err)
- msg = fmt.tprintf("Decompressed %v bytes into %v. Expected to decompress into %v bytes.", len(v.compressed), size, expected_raw)
- expect(t, size == expected_raw, msg)
- expect(t, string(buffer[:size]) == string(v.raw), "Decompressed contents don't match.")
+ testing.expectf(t, size == expected_raw, "Decompressed %v bytes into %v. Expected to decompress into %v bytes", len(v.compressed), size, expected_raw)
+ testing.expect(t, string(buffer[:size]) == string(v.raw), "Decompressed contents don't match")
size, err = shoco.compress(string(v.raw), buffer[:])
- expect(t, err == nil, "Expected `compress` to return `nil`.")
+ testing.expect(t, err == nil, "Expected `compress` to return `nil`.")
- msg = fmt.tprintf("Compressed %v bytes into %v. Expected to compress into %v bytes.", expected_raw, size, expected_compressed)
- expect(t, size == expected_compressed, msg)
+ testing.expectf(t, size == expected_compressed, "Compressed %v bytes into %v. Expected to compress into %v bytes", expected_raw, size, expected_compressed)
size, err = shoco.decompress(v.compressed, buffer[:expected_raw - 10])
- msg = fmt.tprintf("Decompressing into too small a buffer returned %v, expected `.Output_Too_Short`", err)
- expect(t, err == .Output_Too_Short, msg)
+ testing.expectf(t, err == .Output_Too_Short, "Decompressing into too small a buffer returned %v, expected `.Output_Too_Short`", err)
size, err = shoco.compress(string(v.raw), buffer[:expected_compressed - 10])
- msg = fmt.tprintf("Compressing into too small a buffer returned %v, expected `.Output_Too_Short`", err)
- expect(t, err == .Output_Too_Short, msg)
+ testing.expectf(t, err == .Output_Too_Short, "Compressing into too small a buffer returned %v, expected `.Output_Too_Short`", err)
size, err = shoco.decompress(v.compressed[:v.short_pack], buffer[:])
- expect(t, err == .Stream_Too_Short, "Expected `decompress` to return `Stream_Too_Short` because there was no more data after selecting a pack.")
+ testing.expectf(t, err == .Stream_Too_Short, "Insufficient data after pack returned %v, expected `.Stream_Too_Short`", err)
size, err = shoco.decompress(v.compressed[:v.short_sentinel], buffer[:])
- expect(t, err == .Stream_Too_Short, "Expected `decompress` to return `Stream_Too_Short` because there was no more data after non-ASCII sentinel.")
+ testing.expectf(t, err == .Stream_Too_Short, "No more data after non-ASCII sentinel returned %v, expected `.Stream_Too_Short`", err)
}
-} \ No newline at end of file
+}
diff --git a/tests/core/container/test_core_avl.odin b/tests/core/container/test_core_avl.odin
index 2244ab7f6..99dbba8b2 100644
--- a/tests/core/container/test_core_avl.odin
+++ b/tests/core/container/test_core_avl.odin
@@ -4,53 +4,54 @@ import "core:container/avl"
import "core:math/rand"
import "core:slice"
import "core:testing"
-import "core:fmt"
-import tc "tests:common"
+import "core:log"
@(test)
test_avl :: proc(t: ^testing.T) {
- tc.log(t, fmt.tprintf("Testing avl, using random seed %v, add -define:RANDOM_SEED=%v to reuse it.", random_seed, random_seed))
+ log.infof("Testing avl using random seed %v.", t.seed)
// Initialization.
tree: avl.Tree(int)
avl.init(&tree, slice.cmp_proc(int))
- tc.expect(t, avl.len(&tree) == 0, "empty: len should be 0")
- tc.expect(t, avl.first(&tree) == nil, "empty: first should be nil")
- tc.expect(t, avl.last(&tree) == nil, "empty: last should be nil")
+ testing.expect(t, avl.len(&tree) == 0, "empty: len should be 0")
+ testing.expect(t, avl.first(&tree) == nil, "empty: first should be nil")
+ testing.expect(t, avl.last(&tree) == nil, "empty: last should be nil")
iter := avl.iterator(&tree, avl.Direction.Forward)
- tc.expect(t, avl.iterator_get(&iter) == nil, "empty/iterator: first node should be nil")
+ testing.expect(t, avl.iterator_get(&iter) == nil, "empty/iterator: first node should be nil")
r: rand.Rand
- rand.init(&r, random_seed)
+ rand.init(&r, t.seed)
// Test insertion.
NR_INSERTS :: 32 + 1 // Ensure at least 1 collision.
inserted_map := make(map[int]^avl.Node(int))
+ defer delete(inserted_map)
for i := 0; i < NR_INSERTS; i += 1 {
v := int(rand.uint32(&r) & 0x1f)
existing_node, in_map := inserted_map[v]
n, ok, _ := avl.find_or_insert(&tree, v)
- tc.expect(t, in_map != ok, "insert: ok should match inverse of map lookup")
+ testing.expect(t, in_map != ok, "insert: ok should match inverse of map lookup")
if ok {
inserted_map[v] = n
} else {
- tc.expect(t, existing_node == n, "insert: expecting existing node")
+ testing.expect(t, existing_node == n, "insert: expecting existing node")
}
}
nrEntries := len(inserted_map)
- tc.expect(t, avl.len(&tree) == nrEntries, "insert: len after")
+ testing.expect(t, avl.len(&tree) == nrEntries, "insert: len after")
validate_avl(t, &tree)
// Ensure that all entries can be found.
for k, v in inserted_map {
- tc.expect(t, v == avl.find(&tree, k), "Find(): Node")
- tc.expect(t, k == v.value, "Find(): Node value")
+ testing.expect(t, v == avl.find(&tree, k), "Find(): Node")
+ testing.expect(t, k == v.value, "Find(): Node value")
}
// Test the forward/backward iterators.
inserted_values: [dynamic]int
+ defer delete(inserted_values)
for k in inserted_map {
append(&inserted_values, k)
}
@@ -60,38 +61,38 @@ test_avl :: proc(t: ^testing.T) {
visited: int
for node in avl.iterator_next(&iter) {
v, idx := node.value, visited
- tc.expect(t, inserted_values[idx] == v, "iterator/forward: value")
- tc.expect(t, node == avl.iterator_get(&iter), "iterator/forward: get")
+ testing.expect(t, inserted_values[idx] == v, "iterator/forward: value")
+ testing.expect(t, node == avl.iterator_get(&iter), "iterator/forward: get")
visited += 1
}
- tc.expect(t, visited == nrEntries, "iterator/forward: visited")
+ testing.expect(t, visited == nrEntries, "iterator/forward: visited")
slice.reverse(inserted_values[:])
iter = avl.iterator(&tree, avl.Direction.Backward)
visited = 0
for node in avl.iterator_next(&iter) {
v, idx := node.value, visited
- tc.expect(t, inserted_values[idx] == v, "iterator/backward: value")
+ testing.expect(t, inserted_values[idx] == v, "iterator/backward: value")
visited += 1
}
- tc.expect(t, visited == nrEntries, "iterator/backward: visited")
+ testing.expect(t, visited == nrEntries, "iterator/backward: visited")
// Test removal.
rand.shuffle(inserted_values[:], &r)
for v, i in inserted_values {
node := avl.find(&tree, v)
- tc.expect(t, node != nil, "remove: find (pre)")
+ testing.expect(t, node != nil, "remove: find (pre)")
ok := avl.remove(&tree, v)
- tc.expect(t, ok, "remove: succeeds")
- tc.expect(t, nrEntries - (i + 1) == avl.len(&tree), "remove: len (post)")
+ testing.expect(t, ok, "remove: succeeds")
+ testing.expect(t, nrEntries - (i + 1) == avl.len(&tree), "remove: len (post)")
validate_avl(t, &tree)
- tc.expect(t, nil == avl.find(&tree, v), "remove: find (post")
+ testing.expect(t, nil == avl.find(&tree, v), "remove: find (post")
}
- tc.expect(t, avl.len(&tree) == 0, "remove: len should be 0")
- tc.expect(t, avl.first(&tree) == nil, "remove: first should be nil")
- tc.expect(t, avl.last(&tree) == nil, "remove: last should be nil")
+ testing.expect(t, avl.len(&tree) == 0, "remove: len should be 0")
+ testing.expect(t, avl.first(&tree) == nil, "remove: first should be nil")
+ testing.expect(t, avl.last(&tree) == nil, "remove: last should be nil")
// Refill the tree.
for v in inserted_values {
@@ -104,25 +105,25 @@ test_avl :: proc(t: ^testing.T) {
v := node.value
ok := avl.iterator_remove(&iter)
- tc.expect(t, ok, "iterator/remove: success")
+ testing.expect(t, ok, "iterator/remove: success")
ok = avl.iterator_remove(&iter)
- tc.expect(t, !ok, "iterator/remove: redundant removes should fail")
+ testing.expect(t, !ok, "iterator/remove: redundant removes should fail")
- tc.expect(t, avl.find(&tree, v) == nil, "iterator/remove: node should be gone")
- tc.expect(t, avl.iterator_get(&iter) == nil, "iterator/remove: get should return nil")
+ testing.expect(t, avl.find(&tree, v) == nil, "iterator/remove: node should be gone")
+ testing.expect(t, avl.iterator_get(&iter) == nil, "iterator/remove: get should return nil")
// Ensure that iterator_next still works.
node, ok = avl.iterator_next(&iter)
- tc.expect(t, ok == (avl.len(&tree) > 0), "iterator/remove: next should return false")
- tc.expect(t, node == avl.first(&tree), "iterator/remove: next should return first")
+ testing.expect(t, ok == (avl.len(&tree) > 0), "iterator/remove: next should return false")
+ testing.expect(t, node == avl.first(&tree), "iterator/remove: next should return first")
validate_avl(t, &tree)
}
- tc.expect(t, avl.len(&tree) == nrEntries - 1, "iterator/remove: len should drop by 1")
+ testing.expect(t, avl.len(&tree) == nrEntries - 1, "iterator/remove: len should drop by 1")
avl.destroy(&tree)
- tc.expect(t, avl.len(&tree) == 0, "destroy: len should be 0")
+ testing.expect(t, avl.len(&tree) == 0, "destroy: len should be 0")
}
@(private)
@@ -141,10 +142,10 @@ tree_check_invariants :: proc(
}
// Validate the parent pointer.
- tc.expect(t, parent == node._parent, "invalid parent pointer")
+ testing.expect(t, parent == node._parent, "invalid parent pointer")
// Validate that the balance factor is -1, 0, 1.
- tc.expect(
+ testing.expect(
t,
node._balance == -1 || node._balance == 0 || node._balance == 1,
"invalid balance factor",
@@ -155,7 +156,7 @@ tree_check_invariants :: proc(
r_height := tree_check_invariants(t, tree, node._right, node)
// Validate the AVL invariant and the balance factor.
- tc.expect(t, int(node._balance) == r_height - l_height, "AVL balance factor invariant violated")
+ testing.expect(t, int(node._balance) == r_height - l_height, "AVL balance factor invariant violated")
if l_height > r_height {
return l_height + 1
}
diff --git a/tests/core/container/test_core_container.odin b/tests/core/container/test_core_container.odin
deleted file mode 100644
index 7dd4a3628..000000000
--- a/tests/core/container/test_core_container.odin
+++ /dev/null
@@ -1,26 +0,0 @@
-package test_core_container
-
-import "core:fmt"
-import "core:testing"
-
-import tc "tests:common"
-
-expect_equal :: proc(t: ^testing.T, the_slice, expected: []int, loc := #caller_location) {
- _eq :: proc(a, b: []int) -> bool {
- if len(a) != len(b) do return false
- for a, i in a {
- if b[i] != a do return false
- }
- return true
- }
- tc.expect(t, _eq(the_slice, expected), fmt.tprintf("Expected %v, got %v\n", the_slice, expected), loc)
-}
-
-main :: proc() {
- t := testing.T{}
-
- test_avl(&t)
- test_rbtree(&t)
- test_small_array(&t)
- tc.report(&t)
-}
diff --git a/tests/core/container/test_core_rbtree.odin b/tests/core/container/test_core_rbtree.odin
index 89742b1d0..8def8edb6 100644
--- a/tests/core/container/test_core_rbtree.odin
+++ b/tests/core/container/test_core_rbtree.odin
@@ -3,14 +3,10 @@ package test_core_container
import rb "core:container/rbtree"
import "core:math/rand"
import "core:testing"
-import "core:fmt"
import "base:intrinsics"
import "core:mem"
import "core:slice"
-import tc "tests:common"
-
-RANDOM_SEED :: #config(RANDOM_SEED, 0)
-random_seed := u64(intrinsics.read_cycle_counter()) when RANDOM_SEED == 0 else u64(RANDOM_SEED)
+import "core:log"
test_rbtree_integer :: proc(t: ^testing.T, $Key: typeid, $Value: typeid) {
track: mem.Tracking_Allocator
@@ -19,17 +15,17 @@ test_rbtree_integer :: proc(t: ^testing.T, $Key: typeid, $Value: typeid) {
context.allocator = mem.tracking_allocator(&track)
r: rand.Rand
- rand.init(&r, random_seed)
+ rand.init(&r, t.seed)
- tc.log(t, fmt.tprintf("Testing Red-Black Tree($Key=%v,$Value=%v), using random seed %v, add -define:RANDOM_SEED=%v to reuse it.", type_info_of(Key), type_info_of(Value), random_seed, random_seed))
+ log.infof("Testing Red-Black Tree($Key=%v,$Value=%v) using random seed %v.", type_info_of(Key), type_info_of(Value), t.seed)
tree: rb.Tree(Key, Value)
rb.init(&tree)
- tc.expect(t, rb.len(&tree) == 0, "empty: len should be 0")
- tc.expect(t, rb.first(&tree) == nil, "empty: first should be nil")
- tc.expect(t, rb.last(&tree) == nil, "empty: last should be nil")
+ testing.expect(t, rb.len(&tree) == 0, "empty: len should be 0")
+ testing.expect(t, rb.first(&tree) == nil, "empty: first should be nil")
+ testing.expect(t, rb.last(&tree) == nil, "empty: last should be nil")
iter := rb.iterator(&tree, .Forward)
- tc.expect(t, rb.iterator_get(&iter) == nil, "empty/iterator: first node should be nil")
+ testing.expect(t, rb.iterator_get(&iter) == nil, "empty/iterator: first node should be nil")
// Test insertion.
NR_INSERTS :: 32 + 1 // Ensure at least 1 collision.
@@ -45,27 +41,27 @@ test_rbtree_integer :: proc(t: ^testing.T, $Key: typeid, $Value: typeid) {
existing_node, in_map := inserted_map[k]
n, inserted, _ := rb.find_or_insert(&tree, k, v)
- tc.expect(t, in_map != inserted, "insert: inserted should match inverse of map lookup")
+ testing.expect(t, in_map != inserted, "insert: inserted should match inverse of map lookup")
if inserted {
inserted_map[k] = n
} else {
- tc.expect(t, existing_node == n, "insert: expecting existing node")
+ testing.expect(t, existing_node == n, "insert: expecting existing node")
}
}
entry_count := len(inserted_map)
- tc.expect(t, rb.len(&tree) == entry_count, "insert: len after")
+ testing.expect(t, rb.len(&tree) == entry_count, "insert: len after")
validate_rbtree(t, &tree)
first := rb.first(&tree)
last := rb.last(&tree)
- tc.expect(t, first != nil && first.key == min_key, fmt.tprintf("insert: first should be present with key %v", min_key))
- tc.expect(t, last != nil && last.key == max_key, fmt.tprintf("insert: last should be present with key %v", max_key))
+ testing.expectf(t, first != nil && first.key == min_key, "insert: first should be present with key %v", min_key)
+ testing.expectf(t, last != nil && last.key == max_key, "insert: last should be present with key %v", max_key)
// Ensure that all entries can be found.
for k, v in inserted_map {
- tc.expect(t, v == rb.find(&tree, k), "Find(): Node")
- tc.expect(t, k == v.key, "Find(): Node key")
+ testing.expect(t, v == rb.find(&tree, k), "Find(): Node")
+ testing.expect(t, k == v.key, "Find(): Node key")
}
// Test the forward/backward iterators.
@@ -79,21 +75,21 @@ test_rbtree_integer :: proc(t: ^testing.T, $Key: typeid, $Value: typeid) {
visited: int
for node in rb.iterator_next(&iter) {
k, idx := node.key, visited
- tc.expect(t, inserted_keys[idx] == k, "iterator/forward: key")
- tc.expect(t, node == rb.iterator_get(&iter), "iterator/forward: get")
+ testing.expect(t, inserted_keys[idx] == k, "iterator/forward: key")
+ testing.expect(t, node == rb.iterator_get(&iter), "iterator/forward: get")
visited += 1
}
- tc.expect(t, visited == entry_count, "iterator/forward: visited")
+ testing.expect(t, visited == entry_count, "iterator/forward: visited")
slice.reverse(inserted_keys[:])
iter = rb.iterator(&tree, rb.Direction.Backward)
visited = 0
for node in rb.iterator_next(&iter) {
k, idx := node.key, visited
- tc.expect(t, inserted_keys[idx] == k, "iterator/backward: key")
+ testing.expect(t, inserted_keys[idx] == k, "iterator/backward: key")
visited += 1
}
- tc.expect(t, visited == entry_count, "iterator/backward: visited")
+ testing.expect(t, visited == entry_count, "iterator/backward: visited")
// Test removal (and on_remove callback)
rand.shuffle(inserted_keys[:], &r)
@@ -104,19 +100,19 @@ test_rbtree_integer :: proc(t: ^testing.T, $Key: typeid, $Value: typeid) {
}
for k, i in inserted_keys {
node := rb.find(&tree, k)
- tc.expect(t, node != nil, "remove: find (pre)")
+ testing.expect(t, node != nil, "remove: find (pre)")
ok := rb.remove(&tree, k)
- tc.expect(t, ok, "remove: succeeds")
- tc.expect(t, entry_count - (i + 1) == rb.len(&tree), "remove: len (post)")
+ testing.expect(t, ok, "remove: succeeds")
+ testing.expect(t, entry_count - (i + 1) == rb.len(&tree), "remove: len (post)")
validate_rbtree(t, &tree)
- tc.expect(t, nil == rb.find(&tree, k), "remove: find (post")
+ testing.expect(t, nil == rb.find(&tree, k), "remove: find (post")
}
- tc.expect(t, rb.len(&tree) == 0, "remove: len should be 0")
- tc.expect(t, callback_count == 0, fmt.tprintf("remove: on_remove should've been called %v times, it was %v", entry_count, callback_count))
- tc.expect(t, rb.first(&tree) == nil, "remove: first should be nil")
- tc.expect(t, rb.last(&tree) == nil, "remove: last should be nil")
+ testing.expect(t, rb.len(&tree) == 0, "remove: len should be 0")
+ testing.expectf(t, callback_count == 0, "remove: on_remove should've been called %v times, it was %v", entry_count, callback_count)
+ testing.expect(t, rb.first(&tree) == nil, "remove: first should be nil")
+ testing.expect(t, rb.last(&tree) == nil, "remove: last should be nil")
// Refill the tree.
for k in inserted_keys {
@@ -130,32 +126,32 @@ test_rbtree_integer :: proc(t: ^testing.T, $Key: typeid, $Value: typeid) {
k := node.key
ok := rb.iterator_remove(&iter)
- tc.expect(t, ok, "iterator/remove: success")
+ testing.expect(t, ok, "iterator/remove: success")
ok = rb.iterator_remove(&iter)
- tc.expect(t, !ok, "iterator/remove: redundant removes should fail")
+ testing.expect(t, !ok, "iterator/remove: redundant removes should fail")
- tc.expect(t, rb.find(&tree, k) == nil, "iterator/remove: node should be gone")
- tc.expect(t, rb.iterator_get(&iter) == nil, "iterator/remove: get should return nil")
+ testing.expect(t, rb.find(&tree, k) == nil, "iterator/remove: node should be gone")
+ testing.expect(t, rb.iterator_get(&iter) == nil, "iterator/remove: get should return nil")
// Ensure that iterator_next still works.
node, ok = rb.iterator_next(&iter)
- tc.expect(t, ok == (rb.len(&tree) > 0), "iterator/remove: next should return false")
- tc.expect(t, node == rb.first(&tree), "iterator/remove: next should return first")
+ testing.expect(t, ok == (rb.len(&tree) > 0), "iterator/remove: next should return false")
+ testing.expect(t, node == rb.first(&tree), "iterator/remove: next should return first")
validate_rbtree(t, &tree)
}
- tc.expect(t, rb.len(&tree) == entry_count - 1, "iterator/remove: len should drop by 1")
+ testing.expect(t, rb.len(&tree) == entry_count - 1, "iterator/remove: len should drop by 1")
rb.destroy(&tree)
- tc.expect(t, rb.len(&tree) == 0, "destroy: len should be 0")
- tc.expect(t, callback_count == 0, fmt.tprintf("remove: on_remove should've been called %v times, it was %v", entry_count, callback_count))
+ testing.expect(t, rb.len(&tree) == 0, "destroy: len should be 0")
+ testing.expectf(t, callback_count == 0, "remove: on_remove should've been called %v times, it was %v", entry_count, callback_count)
// print_tree_node(tree._root)
delete(inserted_map)
delete(inserted_keys)
- tc.expect(t, len(track.allocation_map) == 0, fmt.tprintf("Expected 0 leaks, have %v", len(track.allocation_map)))
- tc.expect(t, len(track.bad_free_array) == 0, fmt.tprintf("Expected 0 bad frees, have %v", len(track.bad_free_array)))
+ testing.expectf(t, len(track.allocation_map) == 0, "Expected 0 leaks, have %v", len(track.allocation_map))
+ testing.expectf(t, len(track.bad_free_array) == 0, "Expected 0 bad frees, have %v", len(track.bad_free_array))
return
}
@@ -194,7 +190,7 @@ validate_rbtree :: proc(t: ^testing.T, tree: ^$T/rb.Tree($Key, $Value)) {
}
verify_rbtree_propery_1 :: proc(t: ^testing.T, n: ^$N/rb.Node($Key, $Value)) {
- tc.expect(t, rb.node_color(n) == .Black || rb.node_color(n) == .Red, "Property #1: Each node is either red or black.")
+ testing.expect(t, rb.node_color(n) == .Black || rb.node_color(n) == .Red, "Property #1: Each node is either red or black.")
if n == nil {
return
}
@@ -203,14 +199,14 @@ verify_rbtree_propery_1 :: proc(t: ^testing.T, n: ^$N/rb.Node($Key, $Value)) {
}
verify_rbtree_propery_2 :: proc(t: ^testing.T, root: ^$N/rb.Node($Key, $Value)) {
- tc.expect(t, rb.node_color(root) == .Black, "Property #2: Root node should be black.")
+ testing.expect(t, rb.node_color(root) == .Black, "Property #2: Root node should be black.")
}
verify_rbtree_propery_4 :: proc(t: ^testing.T, n: ^$N/rb.Node($Key, $Value)) {
if rb.node_color(n) == .Red {
// A red node's left, right and parent should be black
all_black := rb.node_color(n._left) == .Black && rb.node_color(n._right) == .Black && rb.node_color(n._parent) == .Black
- tc.expect(t, all_black, "Property #3: Red node's children + parent must be black.")
+ testing.expect(t, all_black, "Property #3: Red node's children + parent must be black.")
}
if n == nil {
return
@@ -233,7 +229,7 @@ verify_rbtree_propery_5_helper :: proc(t: ^testing.T, n: ^$N/rb.Node($Key, $Valu
if path_black_count^ == -1 {
path_black_count^ = black_count
} else {
- tc.expect(t, black_count == path_black_count^, "Property #5: Paths from a node to its leaves contain same black count.")
+ testing.expect(t, black_count == path_black_count^, "Property #5: Paths from a node to its leaves contain same black count.")
}
return
}
@@ -241,4 +237,4 @@ verify_rbtree_propery_5_helper :: proc(t: ^testing.T, n: ^$N/rb.Node($Key, $Valu
verify_rbtree_propery_5_helper(t, n._right, black_count, path_black_count)
}
// Properties 4 and 5 together guarantee that no path in the tree is more than about twice as long as any other path,
-// which guarantees that it has O(log n) height. \ No newline at end of file
+// which guarantees that it has O(log n) height.
diff --git a/tests/core/container/test_core_small_array.odin b/tests/core/container/test_core_small_array.odin
index 78998de16..580df793e 100644
--- a/tests/core/container/test_core_small_array.odin
+++ b/tests/core/container/test_core_small_array.odin
@@ -3,44 +3,47 @@ package test_core_container
import "core:testing"
import "core:container/small_array"
-import tc "tests:common"
-
-@(test)
-test_small_array :: proc(t: ^testing.T) {
- tc.log(t, "Testing small_array")
-
- test_small_array_removes(t)
- test_small_array_inject_at(t)
-}
-
@(test)
test_small_array_removes :: proc(t: ^testing.T) {
- array: small_array.Small_Array(10, int)
- small_array.append(&array, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
-
- small_array.ordered_remove(&array, 0)
- expect_equal(t, small_array.slice(&array), []int { 1, 2, 3, 4, 5, 6, 7, 8, 9 })
- small_array.ordered_remove(&array, 5)
- expect_equal(t, small_array.slice(&array), []int { 1, 2, 3, 4, 5, 7, 8, 9 })
- small_array.ordered_remove(&array, 6)
- expect_equal(t, small_array.slice(&array), []int { 1, 2, 3, 4, 5, 7, 9 })
- small_array.unordered_remove(&array, 0)
- expect_equal(t, small_array.slice(&array), []int { 9, 2, 3, 4, 5, 7 })
- small_array.unordered_remove(&array, 2)
- expect_equal(t, small_array.slice(&array), []int { 9, 2, 7, 4, 5 })
- small_array.unordered_remove(&array, 4)
- expect_equal(t, small_array.slice(&array), []int { 9, 2, 7, 4 })
+ array: small_array.Small_Array(10, int)
+ small_array.append(&array, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
+
+ small_array.ordered_remove(&array, 0)
+ testing.expect(t, slice_equal(small_array.slice(&array), []int { 1, 2, 3, 4, 5, 6, 7, 8, 9 }))
+ small_array.ordered_remove(&array, 5)
+ testing.expect(t, slice_equal(small_array.slice(&array), []int { 1, 2, 3, 4, 5, 7, 8, 9 }))
+ small_array.ordered_remove(&array, 6)
+ testing.expect(t, slice_equal(small_array.slice(&array), []int { 1, 2, 3, 4, 5, 7, 9 }))
+ small_array.unordered_remove(&array, 0)
+ testing.expect(t, slice_equal(small_array.slice(&array), []int { 9, 2, 3, 4, 5, 7 }))
+ small_array.unordered_remove(&array, 2)
+ testing.expect(t, slice_equal(small_array.slice(&array), []int { 9, 2, 7, 4, 5 }))
+ small_array.unordered_remove(&array, 4)
+ testing.expect(t, slice_equal(small_array.slice(&array), []int { 9, 2, 7, 4 }))
}
@(test)
test_small_array_inject_at :: proc(t: ^testing.T) {
- array: small_array.Small_Array(13, int)
- small_array.append(&array, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
+ array: small_array.Small_Array(13, int)
+ small_array.append(&array, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
+
+ testing.expect(t, small_array.inject_at(&array, 0, 0), "Expected to be able to inject into small array")
+ testing.expect(t, slice_equal(small_array.slice(&array), []int { 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }))
+ testing.expect(t, small_array.inject_at(&array, 0, 5), "Expected to be able to inject into small array")
+ testing.expect(t, slice_equal(small_array.slice(&array), []int { 0, 0, 1, 2, 3, 0, 4, 5, 6, 7, 8, 9 }))
+ testing.expect(t, small_array.inject_at(&array, 0, small_array.len(array)), "Expected to be able to inject into small array")
+ testing.expect(t, slice_equal(small_array.slice(&array), []int { 0, 0, 1, 2, 3, 0, 4, 5, 6, 7, 8, 9, 0 }))
+}
- tc.expect(t, small_array.inject_at(&array, 0, 0), "Expected to be able to inject into small array")
- expect_equal(t, small_array.slice(&array), []int { 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 })
- tc.expect(t, small_array.inject_at(&array, 0, 5), "Expected to be able to inject into small array")
- expect_equal(t, small_array.slice(&array), []int { 0, 0, 1, 2, 3, 0, 4, 5, 6, 7, 8, 9 })
- tc.expect(t, small_array.inject_at(&array, 0, small_array.len(array)), "Expected to be able to inject into small array")
- expect_equal(t, small_array.slice(&array), []int { 0, 0, 1, 2, 3, 0, 4, 5, 6, 7, 8, 9, 0 })
+slice_equal :: proc(a, b: []int) -> bool {
+ if len(a) != len(b) {
+ return false
+ }
+
+ for a, i in a {
+ if b[i] != a {
+ return false
+ }
+ }
+ return true
}
diff --git a/tests/core/crypto/test_core_crypto.odin b/tests/core/crypto/test_core_crypto.odin
index 95db3f292..f3f76646b 100644
--- a/tests/core/crypto/test_core_crypto.odin
+++ b/tests/core/crypto/test_core_crypto.odin
@@ -13,40 +13,20 @@ package test_core_crypto
*/
import "core:encoding/hex"
-import "core:fmt"
import "core:mem"
import "core:testing"
+import "base:runtime"
+import "core:log"
import "core:crypto"
import "core:crypto/chacha20"
import "core:crypto/chacha20poly1305"
-import tc "tests:common"
-
-main :: proc() {
- t := testing.T{}
-
- test_rand_bytes(&t)
-
- test_hash(&t)
- test_mac(&t)
- test_kdf(&t) // After hash/mac tests because those should pass first.
- test_ecc25519(&t)
-
- test_chacha20(&t)
- test_chacha20poly1305(&t)
- test_sha3_variants(&t)
-
- bench_crypto(&t)
-
- tc.report(&t)
-}
-
_PLAINTEXT_SUNSCREEN_STR := "Ladies and Gentlemen of the class of '99: If I could offer you only one tip for the future, sunscreen would be it."
@(test)
test_chacha20 :: proc(t: ^testing.T) {
- tc.log(t, "Testing (X)ChaCha20")
+ runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
// Test cases taken from RFC 8439, and draft-irtf-cfrg-xchacha-03
plaintext := transmute([]byte)(_PLAINTEXT_SUNSCREEN_STR)
@@ -89,14 +69,12 @@ test_chacha20 :: proc(t: ^testing.T) {
chacha20.xor_bytes(&ctx, derived_ciphertext[:], plaintext[:])
derived_ciphertext_str := string(hex.encode(derived_ciphertext[:], context.temp_allocator))
- tc.expect(
+ testing.expectf(
t,
derived_ciphertext_str == ciphertext_str,
- fmt.tprintf(
- "Expected %s for xor_bytes(plaintext_str), but got %s instead",
- ciphertext_str,
- derived_ciphertext_str,
- ),
+ "Expected %s for xor_bytes(plaintext_str), but got %s instead",
+ ciphertext_str,
+ derived_ciphertext_str,
)
xkey := [chacha20.KEY_SIZE]byte {
@@ -136,21 +114,17 @@ test_chacha20 :: proc(t: ^testing.T) {
chacha20.xor_bytes(&ctx, derived_ciphertext[:], plaintext[:])
derived_ciphertext_str = string(hex.encode(derived_ciphertext[:], context.temp_allocator))
- tc.expect(
+ testing.expectf(
t,
derived_ciphertext_str == xciphertext_str,
- fmt.tprintf(
- "Expected %s for xor_bytes(plaintext_str), but got %s instead",
- xciphertext_str,
- derived_ciphertext_str,
- ),
+ "Expected %s for xor_bytes(plaintext_str), but got %s instead",
+ xciphertext_str,
+ derived_ciphertext_str,
)
}
@(test)
test_chacha20poly1305 :: proc(t: ^testing.T) {
- tc.log(t, "Testing chacha20poly1205")
-
plaintext := transmute([]byte)(_PLAINTEXT_SUNSCREEN_STR)
aad := [12]byte {
@@ -208,25 +182,21 @@ test_chacha20poly1305 :: proc(t: ^testing.T) {
)
derived_ciphertext_str := string(hex.encode(derived_ciphertext[:], context.temp_allocator))
- tc.expect(
+ testing.expectf(
t,
derived_ciphertext_str == ciphertext_str,
- fmt.tprintf(
- "Expected ciphertext %s for encrypt(aad, plaintext), but got %s instead",
- ciphertext_str,
- derived_ciphertext_str,
- ),
+ "Expected ciphertext %s for encrypt(aad, plaintext), but got %s instead",
+ ciphertext_str,
+ derived_ciphertext_str,
)
derived_tag_str := string(hex.encode(derived_tag[:], context.temp_allocator))
- tc.expect(
+ testing.expectf(
t,
derived_tag_str == tag_str,
- fmt.tprintf(
- "Expected tag %s for encrypt(aad, plaintext), but got %s instead",
- tag_str,
- derived_tag_str,
- ),
+ "Expected tag %s for encrypt(aad, plaintext), but got %s instead",
+ tag_str,
+ derived_tag_str,
)
derived_plaintext: [114]byte
@@ -239,15 +209,13 @@ test_chacha20poly1305 :: proc(t: ^testing.T) {
ciphertext[:],
)
derived_plaintext_str := string(derived_plaintext[:])
- tc.expect(t, ok, "Expected true for decrypt(tag, aad, ciphertext)")
- tc.expect(
+ testing.expect(t, ok, "Expected true for decrypt(tag, aad, ciphertext)")
+ testing.expectf(
t,
derived_plaintext_str == _PLAINTEXT_SUNSCREEN_STR,
- fmt.tprintf(
- "Expected plaintext %s for decrypt(tag, aad, ciphertext), but got %s instead",
- _PLAINTEXT_SUNSCREEN_STR,
- derived_plaintext_str,
- ),
+ "Expected plaintext %s for decrypt(tag, aad, ciphertext), but got %s instead",
+ _PLAINTEXT_SUNSCREEN_STR,
+ derived_plaintext_str,
)
derived_ciphertext[0] ~= 0xa5
@@ -259,7 +227,7 @@ test_chacha20poly1305 :: proc(t: ^testing.T) {
aad[:],
derived_ciphertext[:],
)
- tc.expect(t, !ok, "Expected false for decrypt(tag, aad, corrupted_ciphertext)")
+ testing.expect(t, !ok, "Expected false for decrypt(tag, aad, corrupted_ciphertext)")
aad[0] ~= 0xa5
ok = chacha20poly1305.decrypt(
@@ -270,15 +238,13 @@ test_chacha20poly1305 :: proc(t: ^testing.T) {
aad[:],
ciphertext[:],
)
- tc.expect(t, !ok, "Expected false for decrypt(tag, corrupted_aad, ciphertext)")
+ testing.expect(t, !ok, "Expected false for decrypt(tag, corrupted_aad, ciphertext)")
}
@(test)
test_rand_bytes :: proc(t: ^testing.T) {
- tc.log(t, "Testing rand_bytes")
-
if !crypto.HAS_RAND_BYTES {
- tc.log(t, "rand_bytes not supported - skipping")
+ log.info("rand_bytes not supported - skipping")
return
}
@@ -306,10 +272,5 @@ test_rand_bytes :: proc(t: ^testing.T) {
break
}
}
-
- tc.expect(
- t,
- seems_ok,
- "Expected to randomize the head and tail of the buffer within a handful of attempts",
- )
+ testing.expect(t, seems_ok, "Expected to randomize the head and tail of the buffer within a handful of attempts")
}
diff --git a/tests/core/crypto/test_core_crypto_aes.odin b/tests/core/crypto/test_core_crypto_aes.odin
new file mode 100644
index 000000000..4d4c06bdc
--- /dev/null
+++ b/tests/core/crypto/test_core_crypto_aes.odin
@@ -0,0 +1,446 @@
+package test_core_crypto
+
+import "base:runtime"
+import "core:encoding/hex"
+import "core:log"
+import "core:testing"
+
+import "core:crypto/aes"
+import "core:crypto/sha2"
+
+@(test)
+test_aes :: proc(t: ^testing.T) {
+ runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
+
+ log.info("Testing AES")
+
+ impls := make([dynamic]aes.Implementation, 0, 2)
+ defer delete(impls)
+ append(&impls, aes.Implementation.Portable)
+ if aes.is_hardware_accelerated() {
+ append(&impls, aes.Implementation.Hardware)
+ }
+
+ for impl in impls {
+ test_aes_ecb(t, impl)
+ test_aes_ctr(t, impl)
+ test_aes_gcm(t, impl)
+ }
+}
+
+test_aes_ecb :: proc(t: ^testing.T, impl: aes.Implementation) {
+ log.infof("Testing AES-ECB/%v", impl)
+
+ test_vectors := []struct {
+ key: string,
+ plaintext: string,
+ ciphertext: string,
+ } {
+ // http://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38a.pdf
+ {
+ "2b7e151628aed2a6abf7158809cf4f3c",
+ "6bc1bee22e409f96e93d7e117393172a",
+ "3ad77bb40d7a3660a89ecaf32466ef97",
+ },
+ {
+ "2b7e151628aed2a6abf7158809cf4f3c",
+ "ae2d8a571e03ac9c9eb76fac45af8e51",
+ "f5d3d58503b9699de785895a96fdbaaf",
+ },
+ {
+ "2b7e151628aed2a6abf7158809cf4f3c",
+ "30c81c46a35ce411e5fbc1191a0a52ef",
+ "43b1cd7f598ece23881b00e3ed030688",
+ },
+ {
+ "2b7e151628aed2a6abf7158809cf4f3c",
+ "f69f2445df4f9b17ad2b417be66c3710",
+ "7b0c785e27e8ad3f8223207104725dd4",
+ },
+ {
+ "8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b",
+ "6bc1bee22e409f96e93d7e117393172a",
+ "bd334f1d6e45f25ff712a214571fa5cc",
+ },
+ {
+ "8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b",
+ "ae2d8a571e03ac9c9eb76fac45af8e51",
+ "974104846d0ad3ad7734ecb3ecee4eef",
+ },
+ {
+ "8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b",
+ "30c81c46a35ce411e5fbc1191a0a52ef",
+ "ef7afd2270e2e60adce0ba2face6444e",
+ },
+ {
+ "8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b",
+ "f69f2445df4f9b17ad2b417be66c3710",
+ "9a4b41ba738d6c72fb16691603c18e0e",
+ },
+ {
+ "603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4",
+ "6bc1bee22e409f96e93d7e117393172a",
+ "f3eed1bdb5d2a03c064b5a7e3db181f8",
+ },
+ {
+ "603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4",
+ "ae2d8a571e03ac9c9eb76fac45af8e51",
+ "591ccb10d410ed26dc5ba74a31362870",
+ },
+ {
+ "603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4",
+ "30c81c46a35ce411e5fbc1191a0a52ef",
+ "b6ed21b99ca6f4f9f153e7b1beafed1d",
+ },
+ {
+ "603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4",
+ "f69f2445df4f9b17ad2b417be66c3710",
+ "23304b7a39f9f3ff067d8d8f9e24ecc7",
+ },
+ }
+ for v, _ in test_vectors {
+ key, _ := hex.decode(transmute([]byte)(v.key), context.temp_allocator)
+ plaintext, _ := hex.decode(transmute([]byte)(v.plaintext), context.temp_allocator)
+ ciphertext, _ := hex.decode(transmute([]byte)(v.ciphertext), context.temp_allocator)
+
+ ctx: aes.Context_ECB
+ dst: [aes.BLOCK_SIZE]byte
+ aes.init_ecb(&ctx, key, impl)
+
+ aes.encrypt_ecb(&ctx, dst[:], plaintext)
+ dst_str := string(hex.encode(dst[:], context.temp_allocator))
+ testing.expectf(
+ t,
+ dst_str == v.ciphertext,
+ "AES-ECB/%v: Expected: %s for encrypt(%s, %s), but got %s instead",
+ impl,
+ v.ciphertext,
+ v.key,
+ v.plaintext,
+ dst_str,
+ )
+
+ aes.decrypt_ecb(&ctx, dst[:], ciphertext)
+ dst_str = string(hex.encode(dst[:], context.temp_allocator))
+ testing.expectf(
+ t,
+ dst_str == v.plaintext,
+ "AES-ECB/%v: Expected: %s for decrypt(%s, %s), but got %s instead",
+ impl,
+ v.plaintext,
+ v.key,
+ v.ciphertext,
+ dst_str,
+ )
+ }
+}
+
+test_aes_ctr :: proc(t: ^testing.T, impl: aes.Implementation) {
+ log.infof("Testing AES-CTR/%v", impl)
+
+ test_vectors := []struct {
+ key: string,
+ iv: string,
+ plaintext: string,
+ ciphertext: string,
+ } {
+ // http://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38a.pdf
+ {
+ "2b7e151628aed2a6abf7158809cf4f3c",
+ "f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
+ "6bc1bee22e409f96e93d7e117393172aae2d8a571e03ac9c9eb76fac45af8e5130c81c46a35ce411e5fbc1191a0a52eff69f2445df4f9b17ad2b417be66c3710",
+ "874d6191b620e3261bef6864990db6ce9806f66b7970fdff8617187bb9fffdff5ae4df3edbd5d35e5b4f09020db03eab1e031dda2fbe03d1792170a0f3009cee",
+ },
+ {
+ "8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b",
+ "f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
+ "6bc1bee22e409f96e93d7e117393172aae2d8a571e03ac9c9eb76fac45af8e5130c81c46a35ce411e5fbc1191a0a52eff69f2445df4f9b17ad2b417be66c3710",
+ "1abc932417521ca24f2b0459fe7e6e0b090339ec0aa6faefd5ccc2c6f4ce8e941e36b26bd1ebc670d1bd1d665620abf74f78a7f6d29809585a97daec58c6b050",
+ },
+ {
+ "603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4",
+ "f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
+ "6bc1bee22e409f96e93d7e117393172aae2d8a571e03ac9c9eb76fac45af8e5130c81c46a35ce411e5fbc1191a0a52eff69f2445df4f9b17ad2b417be66c3710",
+ "601ec313775789a5b7a7f504bbf3d228f443e3ca4d62b59aca84e990cacaf5c52b0930daa23de94ce87017ba2d84988ddfc9c58db67aada613c2dd08457941a6",
+ },
+ }
+ for v, _ in test_vectors {
+ key, _ := hex.decode(transmute([]byte)(v.key), context.temp_allocator)
+ iv, _ := hex.decode(transmute([]byte)(v.iv), context.temp_allocator)
+ plaintext, _ := hex.decode(transmute([]byte)(v.plaintext), context.temp_allocator)
+ ciphertext, _ := hex.decode(transmute([]byte)(v.ciphertext), context.temp_allocator)
+
+ dst := make([]byte, len(ciphertext), context.temp_allocator)
+
+ ctx: aes.Context_CTR
+ aes.init_ctr(&ctx, key, iv, impl)
+
+ aes.xor_bytes_ctr(&ctx, dst, plaintext)
+
+ dst_str := string(hex.encode(dst[:], context.temp_allocator))
+ testing.expectf(
+ t,
+ dst_str == v.ciphertext,
+ "AES-CTR/%v: Expected: %s for encrypt(%s, %s, %s), but got %s instead",
+ impl,
+ v.ciphertext,
+ v.key,
+ v.iv,
+ v.plaintext,
+ dst_str,
+ )
+ }
+
+ // Incrementally read 1, 2, 3, ..., 2048 bytes of keystream, and
+ // compare the SHA-512/256 digest with a known value. Results
+ // and testcase taken from a known good implementation.
+
+ tmp := make([]byte, 2048, context.temp_allocator)
+
+ ctx: aes.Context_CTR
+ key: [aes.KEY_SIZE_256]byte
+ nonce: [aes.CTR_IV_SIZE]byte
+ aes.init_ctr(&ctx, key[:], nonce[:])
+
+ h_ctx: sha2.Context_512
+ sha2.init_512_256(&h_ctx)
+
+ for i := 1; i < 2048; i = i + 1 {
+ aes.keystream_bytes_ctr(&ctx, tmp[:i])
+ sha2.update(&h_ctx, tmp[:i])
+ }
+
+ digest: [32]byte
+ sha2.final(&h_ctx, digest[:])
+ digest_str := string(hex.encode(digest[:], context.temp_allocator))
+
+ expected_digest_str := "d4445343afeb9d1237f95b10d00358aed4c1d7d57c9fe480cd0afb5e2ffd448c"
+ testing.expectf(
+ t,
+ expected_digest_str == digest_str,
+ "AES-CTR/%v: Expected %s for keystream digest, but got %s instead",
+ impl,
+ expected_digest_str,
+ digest_str,
+ )
+}
+
+test_aes_gcm :: proc(t: ^testing.T, impl: aes.Implementation) {
+ log.infof("Testing AES-GCM/%v", impl)
+
+ // NIST did a reorg of their site, so the source of the test vectors
+ // is only available from an archive. The commented out tests are
+ // for non-96-bit IVs which our implementation does not support.
+ //
+ // https://csrc.nist.rip/groups/ST/toolkit/BCM/documents/proposedmodes/gcm/gcm-revised-spec.pdf
+ test_vectors := []struct {
+ key: string,
+ iv: string,
+ aad: string,
+ plaintext: string,
+ ciphertext: string,
+ tag: string,
+ } {
+ {
+ "00000000000000000000000000000000",
+ "000000000000000000000000",
+ "",
+ "",
+ "",
+ "58e2fccefa7e3061367f1d57a4e7455a",
+ },
+ {
+ "00000000000000000000000000000000",
+ "000000000000000000000000",
+ "",
+ "00000000000000000000000000000000",
+ "0388dace60b6a392f328c2b971b2fe78",
+ "ab6e47d42cec13bdf53a67b21257bddf",
+ },
+ {
+ "feffe9928665731c6d6a8f9467308308",
+ "cafebabefacedbaddecaf888",
+ "",
+ "d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b391aafd255",
+ "42831ec2217774244b7221b784d0d49ce3aa212f2c02a4e035c17e2329aca12e21d514b25466931c7d8f6a5aac84aa051ba30b396a0aac973d58e091473f5985",
+ "4d5c2af327cd64a62cf35abd2ba6fab4",
+ },
+ {
+ "feffe9928665731c6d6a8f9467308308",
+ "cafebabefacedbaddecaf888",
+ "feedfacedeadbeeffeedfacedeadbeefabaddad2",
+ "d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39",
+ "42831ec2217774244b7221b784d0d49ce3aa212f2c02a4e035c17e2329aca12e21d514b25466931c7d8f6a5aac84aa051ba30b396a0aac973d58e091",
+ "5bc94fbc3221a5db94fae95ae7121a47",
+ },
+ /*
+ {
+ "feffe9928665731c6d6a8f9467308308",
+ "cafebabefacedbad",
+ "feedfacedeadbeeffeedfacedeadbeefabaddad2",
+ "d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39",
+ "61353b4c2806934a777ff51fa22a4755699b2a714fcdc6f83766e5f97b6c742373806900e49f24b22b097544d4896b424989b5e1ebac0f07c23f4598",
+ "3612d2e79e3b0785561be14aaca2fccb",
+ },
+ {
+ "feffe9928665731c6d6a8f9467308308",
+ "9313225df88406e555909c5aff5269aa6a7a9538534f7da1e4c303d2a318a728c3c0c95156809539fcf0e2429a6b525416aedbf5a0de6a57a637b39b",
+ "feedfacedeadbeeffeedfacedeadbeefabaddad2",
+ "d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39",
+ "8ce24998625615b603a033aca13fb894be9112a5c3a211a8ba262a3cca7e2ca701e4a9a4fba43c90ccdcb281d48c7c6fd62875d2aca417034c34aee5",
+ "619cc5aefffe0bfa462af43c1699d050",
+ },
+ */
+ {
+ "000000000000000000000000000000000000000000000000",
+ "000000000000000000000000",
+ "",
+ "",
+ "",
+ "cd33b28ac773f74ba00ed1f312572435",
+ },
+ {
+ "000000000000000000000000000000000000000000000000",
+ "000000000000000000000000",
+ "",
+ "00000000000000000000000000000000",
+ "98e7247c07f0fe411c267e4384b0f600",
+ "2ff58d80033927ab8ef4d4587514f0fb",
+ },
+ {
+ "feffe9928665731c6d6a8f9467308308feffe9928665731c",
+ "cafebabefacedbaddecaf888",
+ "",
+ "d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b391aafd255",
+ "3980ca0b3c00e841eb06fac4872a2757859e1ceaa6efd984628593b40ca1e19c7d773d00c144c525ac619d18c84a3f4718e2448b2fe324d9ccda2710acade256",
+ "9924a7c8587336bfb118024db8674a14",
+ },
+ {
+ "feffe9928665731c6d6a8f9467308308feffe9928665731c",
+ "cafebabefacedbaddecaf888",
+ "feedfacedeadbeeffeedfacedeadbeefabaddad2",
+ "d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39",
+ "3980ca0b3c00e841eb06fac4872a2757859e1ceaa6efd984628593b40ca1e19c7d773d00c144c525ac619d18c84a3f4718e2448b2fe324d9ccda2710",
+ "2519498e80f1478f37ba55bd6d27618c",
+ },
+ /*
+ {
+ "feffe9928665731c6d6a8f9467308308feffe9928665731c",
+ "cafebabefacedbad",
+ "feedfacedeadbeeffeedfacedeadbeefabaddad2",
+ "d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39",
+ "0f10f599ae14a154ed24b36e25324db8c566632ef2bbb34f8347280fc4507057fddc29df9a471f75c66541d4d4dad1c9e93a19a58e8b473fa0f062f7",
+ "65dcc57fcf623a24094fcca40d3533f8",
+ },
+ {
+ "feffe9928665731c6d6a8f9467308308feffe9928665731c",
+ "9313225df88406e555909c5aff5269aa6a7a9538534f7da1e4c303d2a318a728c3c0c95156809539fcf0e2429a6b525416aedbf5a0de6a57a637b39b",
+ "feedfacedeadbeeffeedfacedeadbeefabaddad2",
+ "d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39",
+ "d27e88681ce3243c4830165a8fdcf9ff1de9a1d8e6b447ef6ef7b79828666e4581e79012af34ddd9e2f037589b292db3e67c036745fa22e7e9b7373b",
+ "dcf566ff291c25bbb8568fc3d376a6d9",
+ },
+ */
+ {
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ "000000000000000000000000",
+ "",
+ "",
+ "",
+ "530f8afbc74536b9a963b4f1c4cb738b",
+ },
+ {
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ "000000000000000000000000",
+ "",
+ "00000000000000000000000000000000",
+ "cea7403d4d606b6e074ec5d3baf39d18",
+ "d0d1c8a799996bf0265b98b5d48ab919",
+ },
+ {
+ "feffe9928665731c6d6a8f9467308308feffe9928665731c6d6a8f9467308308",
+ "cafebabefacedbaddecaf888",
+ "",
+ "d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b391aafd255",
+ "522dc1f099567d07f47f37a32a84427d643a8cdcbfe5c0c97598a2bd2555d1aa8cb08e48590dbb3da7b08b1056828838c5f61e6393ba7a0abcc9f662898015ad",
+ "b094dac5d93471bdec1a502270e3cc6c",
+ },
+ {
+ "feffe9928665731c6d6a8f9467308308feffe9928665731c6d6a8f9467308308",
+ "cafebabefacedbaddecaf888",
+ "feedfacedeadbeeffeedfacedeadbeefabaddad2",
+ "d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39",
+ "522dc1f099567d07f47f37a32a84427d643a8cdcbfe5c0c97598a2bd2555d1aa8cb08e48590dbb3da7b08b1056828838c5f61e6393ba7a0abcc9f662",
+ "76fc6ece0f4e1768cddf8853bb2d551b",
+ },
+ /*
+ {
+ "feffe9928665731c6d6a8f9467308308feffe9928665731c6d6a8f9467308308",
+ "cafebabefacedbad",
+ "feedfacedeadbeeffeedfacedeadbeefabaddad2",
+ "d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39",
+ "c3762df1ca787d32ae47c13bf19844cbaf1ae14d0b976afac52ff7d79bba9de0feb582d33934a4f0954cc2363bc73f7862ac430e64abe499f47c9b1f",
+ "3a337dbf46a792c45e454913fe2ea8f2",
+ },
+ {
+ "feffe9928665731c6d6a8f9467308308feffe9928665731c6d6a8f9467308308",
+ "9313225df88406e555909c5aff5269aa6a7a9538534f7da1e4c303d2a318a728c3c0c95156809539fcf0e2429a6b525416aedbf5a0de6a57a637b39b",
+ "feedfacedeadbeeffeedfacedeadbeefabaddad2",
+ "d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39",
+ "5a8def2f0c9e53f1f75d7853659e2a20eeb2b22aafde6419a058ab4f6f746bf40fc0c3b780f244452da3ebf1c5d82cdea2418997200ef82e44ae7e3f",
+ "a44a8266ee1c8eb0c8b5d4cf5ae9f19a",
+ },
+ */
+ }
+ for v, _ in test_vectors {
+ key, _ := hex.decode(transmute([]byte)(v.key), context.temp_allocator)
+ iv, _ := hex.decode(transmute([]byte)(v.iv), context.temp_allocator)
+ aad, _ := hex.decode(transmute([]byte)(v.aad), context.temp_allocator)
+ plaintext, _ := hex.decode(transmute([]byte)(v.plaintext), context.temp_allocator)
+ ciphertext, _ := hex.decode(transmute([]byte)(v.ciphertext), context.temp_allocator)
+ tag, _ := hex.decode(transmute([]byte)(v.tag), context.temp_allocator)
+
+ tag_ := make([]byte, len(tag), context.temp_allocator)
+ dst := make([]byte, len(ciphertext), context.temp_allocator)
+
+ ctx: aes.Context_GCM
+ aes.init_gcm(&ctx, key, impl)
+
+ aes.seal_gcm(&ctx, dst, tag_, iv, aad, plaintext)
+ dst_str := string(hex.encode(dst[:], context.temp_allocator))
+ tag_str := string(hex.encode(tag_[:], context.temp_allocator))
+
+ testing.expectf(
+ t,
+ dst_str == v.ciphertext && tag_str == v.tag,
+ "AES-GCM/%v: Expected: (%s, %s) for seal(%s, %s, %s, %s), but got (%s, %s) instead",
+ impl,
+ v.ciphertext,
+ v.tag,
+ v.key,
+ v.iv,
+ v.aad,
+ v.plaintext,
+ dst_str,
+ tag_str,
+ )
+
+ ok := aes.open_gcm(&ctx, dst, iv, aad, ciphertext, tag)
+ dst_str = string(hex.encode(dst[:], context.temp_allocator))
+
+ testing.expectf(
+ t,
+ ok && dst_str == v.plaintext,
+ "AES-GCM/%v: Expected: (%s, true) for open(%s, %s, %s, %s, %s), but got (%s, %s) instead",
+ impl,
+ v.plaintext,
+ v.key,
+ v.iv,
+ v.aad,
+ v.ciphertext,
+ v.tag,
+ dst_str,
+ ok,
+ )
+ }
+}
diff --git a/tests/core/crypto/test_core_crypto_ecc25519.odin b/tests/core/crypto/test_core_crypto_ecc25519.odin
index 5ea008f90..baf4a1a38 100644
--- a/tests/core/crypto/test_core_crypto_ecc25519.odin
+++ b/tests/core/crypto/test_core_crypto_ecc25519.odin
@@ -1,8 +1,6 @@
package test_core_crypto
-import "base:runtime"
import "core:encoding/hex"
-import "core:fmt"
import "core:testing"
import field "core:crypto/_fiat/field_curve25519"
@@ -10,25 +8,8 @@ import "core:crypto/ed25519"
import "core:crypto/ristretto255"
import "core:crypto/x25519"
-import tc "tests:common"
-
-@(test)
-test_ecc25519 :: proc(t: ^testing.T) {
- runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
-
- tc.log(t, "Testing curve25519 ECC")
-
- test_sqrt_ratio_m1(t)
- test_ristretto255(t)
-
- test_ed25519(t)
- test_x25519(t)
-}
-
@(test)
test_sqrt_ratio_m1 :: proc(t: ^testing.T) {
- tc.log(t, "Testing sqrt_ratio_m1")
-
test_vectors := []struct {
u: string,
v: string,
@@ -90,25 +71,21 @@ test_sqrt_ratio_m1 :: proc(t: ^testing.T) {
field.fe_relax_cast(&vee),
)
- tc.expect(
+ testing.expectf(
t,
(was_square == 1) == v.was_square && field.fe_equal_bytes(&r, r_) == 1,
- fmt.tprintf(
- "Expected (%v, %s) for SQRT_RATIO_M1(%s, %s), got %s",
- v.was_square,
- v.r,
- v.u,
- v.v,
- fe_str(&r),
- ),
+ "Expected (%v, %s) for SQRT_RATIO_M1(%s, %s), got %s",
+ v.was_square,
+ v.r,
+ v.u,
+ v.v,
+ fe_str(&r),
)
}
}
@(test)
test_ristretto255 :: proc(t: ^testing.T) {
- tc.log(t, "Testing ristretto255")
-
ge_gen: ristretto255.Group_Element
ristretto255.ge_generator(&ge_gen)
@@ -158,7 +135,7 @@ test_ristretto255 :: proc(t: ^testing.T) {
ge: ristretto255.Group_Element
ok := ristretto255.ge_set_bytes(&ge, b)
- tc.expect(t, !ok, fmt.tprintf("Expected false for %s", x))
+ testing.expectf(t, !ok, "Expected false for %s", x)
}
generator_multiples := []string {
@@ -185,22 +162,20 @@ test_ristretto255 :: proc(t: ^testing.T) {
ge := &ges[i]
ok := ristretto255.ge_set_bytes(ge, b)
- tc.expect(t, ok, fmt.tprintf("Expected true for %s", x))
+ testing.expectf(t, ok, "Expected true for %s", x)
x_check := ge_str(ge)
- tc.expect(
+ testing.expectf(
t,
x == x_check,
- fmt.tprintf(
- "Expected %s (round-trip) but got %s instead",
- x,
- x_check,
- ),
+ "Expected %s (round-trip) but got %s instead",
+ x,
+ x_check,
)
if i == 1 {
- tc.expect(
+ testing.expect(
t,
ristretto255.ge_equal(ge, &ge_gen) == 1,
"Expected element 1 to be the generator",
@@ -217,41 +192,35 @@ test_ristretto255 :: proc(t: ^testing.T) {
ristretto255.ge_scalarmult_generator(&ge_check, &sc)
x_check := ge_str(&ge_check)
- tc.expect(
+ testing.expectf(
t,
x_check == generator_multiples[i],
- fmt.tprintf(
- "Expected %s for G * %d (specialized), got %s",
- generator_multiples[i],
- i,
- x_check,
- ),
+ "Expected %s for G * %d (specialized), got %s",
+ generator_multiples[i],
+ i,
+ x_check,
)
ristretto255.ge_scalarmult(&ge_check, &ges[1], &sc)
x_check = ge_str(&ge_check)
- tc.expect(
+ testing.expectf(
t,
x_check == generator_multiples[i],
- fmt.tprintf(
- "Expected %s for G * %d (generic), got %s (slow compare)",
- generator_multiples[i],
- i,
- x_check,
- ),
+ "Expected %s for G * %d (generic), got %s (slow compare)",
+ generator_multiples[i],
+ i,
+ x_check,
)
ristretto255.ge_scalarmult_vartime(&ge_check, &ges[1], &sc)
x_check = ge_str(&ge_check)
- tc.expect(
+ testing.expectf(
t,
x_check == generator_multiples[i],
- fmt.tprintf(
- "Expected %s for G * %d (generic vartime), got %s (slow compare)",
- generator_multiples[i],
- i,
- x_check,
- ),
+ "Expected %s for G * %d (generic vartime), got %s (slow compare)",
+ generator_multiples[i],
+ i,
+ x_check,
)
switch i {
@@ -261,28 +230,24 @@ test_ristretto255 :: proc(t: ^testing.T) {
ristretto255.ge_add(&ge_check, ge_prev, &ge_gen)
x_check = ge_str(&ge_check)
- tc.expect(
+ testing.expectf(
t,
x_check == generator_multiples[i],
- fmt.tprintf(
- "Expected %s for ges[%d] + ges[%d], got %s (slow compare)",
- generator_multiples[i],
- i-1,
- 1,
- x_check,
- ),
+ "Expected %s for ges[%d] + ges[%d], got %s (slow compare)",
+ generator_multiples[i],
+ i-1,
+ 1,
+ x_check,
)
- tc.expect(
+ testing.expectf(
t,
ristretto255.ge_equal(&ges[i], &ge_check) == 1,
- fmt.tprintf(
- "Expected %s for ges[%d] + ges[%d], got %s (fast compare)",
- generator_multiples[i],
- i-1,
- 1,
- x_check,
- ),
+ "Expected %s for ges[%d] + ges[%d], got %s (fast compare)",
+ generator_multiples[i],
+ i-1,
+ 1,
+ x_check,
)
}
}
@@ -344,22 +309,18 @@ test_ristretto255 :: proc(t: ^testing.T) {
ristretto255.ge_set_wide_bytes(&ge, in_bytes)
ge_check := ge_str(&ge)
- tc.expect(
+ testing.expectf(
t,
ge_check == v.output,
- fmt.tprintf(
- "Expected %s for %s, got %s",
- v.output,
- ge_check,
- ),
+ "Expected %s for %s, got %s",
+ v.output,
+ ge_check,
)
}
}
@(test)
test_ed25519 :: proc(t: ^testing.T) {
- tc.log(t, "Testing ed25519")
-
test_vectors_rfc := []struct {
priv_key: string,
pub_key: string,
@@ -401,87 +362,73 @@ test_ed25519 :: proc(t: ^testing.T) {
priv_key: ed25519.Private_Key
ok := ed25519.private_key_set_bytes(&priv_key, priv_bytes)
- tc.expect(
+ testing.expectf(
t,
ok,
- fmt.tprintf(
- "Expected %s to be a valid private key",
- v.priv_key,
- ),
+ "Expected %s to be a valid private key",
+ v.priv_key,
)
key_bytes: [32]byte
ed25519.private_key_bytes(&priv_key, key_bytes[:])
- tc.expect(
+ testing.expectf(
t,
ok,
- fmt.tprintf(
- "Expected private key %s round-trip, got %s",
- v.priv_key,
- string(hex.encode(key_bytes[:], context.temp_allocator)),
- ),
+ "Expected private key %s round-trip, got %s",
+ v.priv_key,
+ string(hex.encode(key_bytes[:], context.temp_allocator)),
)
pub_key: ed25519.Public_Key
ok = ed25519.public_key_set_bytes(&pub_key, pub_bytes)
- tc.expect(
+ testing.expectf(
t,
ok,
- fmt.tprintf(
- "Expected %s to be a valid public key (priv->pub: %s)",
- v.pub_key,
- string(hex.encode(priv_key._pub_key._b[:], context.temp_allocator)),
- ),
+ "Expected %s to be a valid public key (priv->pub: %s)",
+ v.pub_key,
+ string(hex.encode(priv_key._pub_key._b[:], context.temp_allocator)),
)
ed25519.public_key_bytes(&pub_key, key_bytes[:])
- tc.expect(
+ testing.expectf(
t,
ok,
- fmt.tprintf(
- "Expected public key %s round-trip, got %s",
- v.pub_key,
- string(hex.encode(key_bytes[:], context.temp_allocator)),
- ),
+ "Expected public key %s round-trip, got %s",
+ v.pub_key,
+ string(hex.encode(key_bytes[:], context.temp_allocator)),
)
sig: [ed25519.SIGNATURE_SIZE]byte
ed25519.sign(&priv_key, msg_bytes, sig[:])
x := string(hex.encode(sig[:], context.temp_allocator))
- tc.expect(
+ testing.expectf(
t,
x == v.sig,
- fmt.tprintf(
- "Expected %s for sign(%s, %s), got %s",
- v.sig,
- v.priv_key,
- v.msg,
- x,
- ),
+ "Expected %s for sign(%s, %s), got %s",
+ v.sig,
+ v.priv_key,
+ v.msg,
+ x,
)
ok = ed25519.verify(&pub_key, msg_bytes, sig_bytes)
- tc.expect(
+ testing.expectf(
t,
ok,
- fmt.tprintf(
- "Expected true for verify(%s, %s, %s)",
- v.pub_key,
- v.msg,
- v.sig,
- ),
+ "Expected true for verify(%s, %s, %s)",
+ v.pub_key,
+ v.msg,
+ v.sig,
)
ok = ed25519.verify(&priv_key._pub_key, msg_bytes, sig_bytes)
- tc.expect(
+ testing.expectf(
t,
ok,
- fmt.tprintf(
- "Expected true for verify(pub(%s), %s %s)",
- v.priv_key,
- v.msg,
- v.sig,
- ),
+ "Expected true for verify(pub(%s), %s %s)",
+ v.priv_key,
+ v.msg,
+ v.sig,
)
// Corrupt the message and make sure verification fails.
@@ -493,15 +440,13 @@ test_ed25519 :: proc(t: ^testing.T) {
msg_bytes[0] = msg_bytes[0] ~ 69
}
ok = ed25519.verify(&pub_key, msg_bytes, sig_bytes)
- tc.expect(
+ testing.expectf(
t,
ok == false,
- fmt.tprintf(
- "Expected false for verify(%s, %s (corrupted), %s)",
- v.pub_key,
- v.msg,
- v.sig,
- ),
+ "Expected false for verify(%s, %s (corrupted), %s)",
+ v.pub_key,
+ v.msg,
+ v.sig,
)
}
@@ -634,15 +579,13 @@ test_ed25519 :: proc(t: ^testing.T) {
pub_key: ed25519.Public_Key
ok := ed25519.public_key_set_bytes(&pub_key, pub_bytes)
- tc.expect(
+ testing.expectf(
t,
ok == v.pub_key_ok,
- fmt.tprintf(
- "speccheck/%d: Expected %s to be a (in)valid public key, got %v",
- i,
- v.pub_key,
- ok,
- ),
+ "speccheck/%d: Expected %s to be a (in)valid public key, got %v",
+ i,
+ v.pub_key,
+ ok,
)
// If A is rejected for being non-canonical, skip signature check.
@@ -651,17 +594,15 @@ test_ed25519 :: proc(t: ^testing.T) {
}
ok = ed25519.verify(&pub_key, msg_bytes, sig_bytes)
- tc.expect(
+ testing.expectf(
t,
ok == v.sig_ok,
- fmt.tprintf(
- "speccheck/%d Expected %v for verify(%s, %s, %s)",
- i,
- v.sig_ok,
- v.pub_key,
- v.msg,
- v.sig,
- ),
+ "speccheck/%d Expected %v for verify(%s, %s, %s)",
+ i,
+ v.sig_ok,
+ v.pub_key,
+ v.msg,
+ v.sig,
)
// If the signature is accepted, skip the relaxed signature check.
@@ -670,25 +611,21 @@ test_ed25519 :: proc(t: ^testing.T) {
}
ok = ed25519.verify(&pub_key, msg_bytes, sig_bytes, true)
- tc.expect(
+ testing.expectf(
t,
ok == v.sig_ok_relaxed,
- fmt.tprintf(
- "speccheck/%d Expected %v for verify(%s, %s, %s, true)",
- i,
- v.sig_ok_relaxed,
- v.pub_key,
- v.msg,
- v.sig,
- ),
+ "speccheck/%d Expected %v for verify(%s, %s, %s, true)",
+ i,
+ v.sig_ok_relaxed,
+ v.pub_key,
+ v.msg,
+ v.sig,
)
}
}
@(test)
test_x25519 :: proc(t: ^testing.T) {
- tc.log(t, "Testing X25519")
-
// Local copy of this so that the base point doesn't need to be exported.
_BASE_POINT: [32]byte = {
9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -720,17 +657,15 @@ test_x25519 :: proc(t: ^testing.T) {
x25519.scalarmult(derived_point[:], scalar[:], point[:])
derived_point_str := string(hex.encode(derived_point[:], context.temp_allocator))
- tc.expect(
+ testing.expectf(
t,
derived_point_str == v.product,
- fmt.tprintf(
- "Expected %s for %s * %s, but got %s instead",
- v.product,
- v.scalar,
- v.point,
- derived_point_str,
- ),
- )
+ "Expected %s for %s * %s, but got %s instead",
+ v.product,
+ v.scalar,
+ v.point,
+ derived_point_str,
+ )
// Abuse the test vectors to sanity-check the scalar-basepoint multiply.
p1, p2: [x25519.POINT_SIZE]byte
@@ -738,15 +673,13 @@ test_x25519 :: proc(t: ^testing.T) {
x25519.scalarmult(p2[:], scalar[:], _BASE_POINT[:])
p1_str := string(hex.encode(p1[:], context.temp_allocator))
p2_str := string(hex.encode(p2[:], context.temp_allocator))
- tc.expect(
+ testing.expectf(
t,
p1_str == p2_str,
- fmt.tprintf(
- "Expected %s for %s * basepoint, but got %s instead",
- p2_str,
- v.scalar,
- p1_str,
- ),
+ "Expected %s for %s * basepoint, but got %s instead",
+ p2_str,
+ v.scalar,
+ p1_str,
)
}
}
@@ -763,4 +696,4 @@ fe_str :: proc(fe: ^field.Tight_Field_Element) -> string {
b: [32]byte
field.fe_to_bytes(&b, fe)
return string(hex.encode(b[:], context.temp_allocator))
-}
+} \ No newline at end of file
diff --git a/tests/core/crypto/test_core_crypto_hash.odin b/tests/core/crypto/test_core_crypto_hash.odin
index c4e8e8dd7..9a9d0cc76 100644
--- a/tests/core/crypto/test_core_crypto_hash.odin
+++ b/tests/core/crypto/test_core_crypto_hash.odin
@@ -3,23 +3,17 @@ package test_core_crypto
import "base:runtime"
import "core:bytes"
import "core:encoding/hex"
-import "core:fmt"
import "core:strings"
import "core:testing"
-
import "core:crypto/hash"
-import tc "tests:common"
-
@(test)
test_hash :: proc(t: ^testing.T) {
runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
- tc.log(t, "Testing Hashes")
-
// TODO:
// - Stick the test vectors in a JSON file or something.
- data_1_000_000_a := strings.repeat("a", 1_000_000)
+ data_1_000_000_a := strings.repeat("a", 1_000_000, context.temp_allocator)
digest: [hash.MAX_DIGEST_SIZE]byte
test_vectors := []struct{
@@ -496,16 +490,14 @@ test_hash :: proc(t: ^testing.T) {
dst_str := string(hex.encode(dst, context.temp_allocator))
- tc.expect(
+ testing.expectf(
t,
dst_str == v.hash,
- fmt.tprintf(
- "%s/incremental: Expected: %s for input of %s, but got %s instead",
- algo_name,
- v.hash,
- v.str,
- dst_str,
- ),
+ "%s/incremental: Expected: %s for input of %s, but got %s instead",
+ algo_name,
+ v.hash,
+ v.str,
+ dst_str,
)
}
@@ -521,25 +513,21 @@ test_hash :: proc(t: ^testing.T) {
// still correct.
digest_sz := hash.DIGEST_SIZES[algo]
block_sz := hash.BLOCK_SIZES[algo]
- tc.expect(
+ testing.expectf(
t,
digest_sz <= hash.MAX_DIGEST_SIZE,
- fmt.tprintf(
- "%s: Digest size %d exceeds max %d",
- algo_name,
- digest_sz,
- hash.MAX_DIGEST_SIZE,
- ),
+ "%s: Digest size %d exceeds max %d",
+ algo_name,
+ digest_sz,
+ hash.MAX_DIGEST_SIZE,
)
- tc.expect(
+ testing.expectf(
t,
block_sz <= hash.MAX_BLOCK_SIZE,
- fmt.tprintf(
- "%s: Block size %d exceeds max %d",
- algo_name,
- block_sz,
- hash.MAX_BLOCK_SIZE,
- ),
+ "%s: Block size %d exceeds max %d",
+ algo_name,
+ block_sz,
+ hash.MAX_BLOCK_SIZE,
)
// Exercise most of the happy-path for the high level interface.
@@ -553,15 +541,13 @@ test_hash :: proc(t: ^testing.T) {
a_str := string(hex.encode(digest_a, context.temp_allocator))
b_str := string(hex.encode(digest_b, context.temp_allocator))
- tc.expect(
+ testing.expectf(
t,
a_str == b_str,
- fmt.tprintf(
- "%s/cmp: Expected: %s (hash_stream) == %s (hash_bytes)",
- algo_name,
- a_str,
- b_str,
- ),
+ "%s/cmp: Expected: %s (hash_stream) == %s (hash_bytes)",
+ algo_name,
+ a_str,
+ b_str,
)
// Exercise the rolling digest functionality, which also covers
@@ -571,25 +557,21 @@ test_hash :: proc(t: ^testing.T) {
api_algo := hash.algorithm(&ctx)
api_digest_size := hash.digest_size(&ctx)
- tc.expect(
+ testing.expectf(
t,
algo == api_algo,
- fmt.tprintf(
- "%s/algorithm: Expected: %v but got %v instead",
- algo_name,
- algo,
- api_algo,
- ),
+ "%s/algorithm: Expected: %v but got %v instead",
+ algo_name,
+ algo,
+ api_algo,
)
- tc.expect(
+ testing.expectf(
t,
hash.DIGEST_SIZES[algo] == api_digest_size,
- fmt.tprintf(
- "%s/digest_size: Expected: %d but got %d instead",
- algo_name,
- hash.DIGEST_SIZES[algo],
- api_digest_size,
- ),
+ "%s/digest_size: Expected: %d but got %d instead",
+ algo_name,
+ hash.DIGEST_SIZES[algo],
+ api_digest_size,
)
hash.update(&ctx, digest_a)
@@ -604,16 +586,14 @@ test_hash :: proc(t: ^testing.T) {
b_str = string(hex.encode(digest_b, context.temp_allocator))
c_str := string(hex.encode(digest_c, context.temp_allocator))
- tc.expect(
+ testing.expectf(
t,
a_str == b_str && b_str == c_str,
- fmt.tprintf(
- "%s/rolling: Expected: %s (first) == %s (second) == %s (third)",
- algo_name,
- a_str,
- b_str,
- c_str,
- ),
+ "%s/rolling: Expected: %s (first) == %s (second) == %s (third)",
+ algo_name,
+ a_str,
+ b_str,
+ c_str,
)
}
-}
+} \ No newline at end of file
diff --git a/tests/core/crypto/test_core_crypto_kdf.odin b/tests/core/crypto/test_core_crypto_kdf.odin
index 73177d8be..247529e65 100644
--- a/tests/core/crypto/test_core_crypto_kdf.odin
+++ b/tests/core/crypto/test_core_crypto_kdf.odin
@@ -2,28 +2,14 @@ package test_core_crypto
import "base:runtime"
import "core:encoding/hex"
-import "core:fmt"
import "core:testing"
-
import "core:crypto/hash"
import "core:crypto/hkdf"
import "core:crypto/pbkdf2"
-import tc "tests:common"
-
-@(test)
-test_kdf :: proc(t: ^testing.T) {
- runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
-
- tc.log(t, "Testing KDFs")
-
- test_hkdf(t)
- test_pbkdf2(t)
-}
-
@(test)
test_hkdf :: proc(t: ^testing.T) {
- tc.log(t, "Testing HKDF")
+ runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
tmp: [128]byte // Good enough.
@@ -70,25 +56,23 @@ test_hkdf :: proc(t: ^testing.T) {
dst_str := string(hex.encode(dst, context.temp_allocator))
- tc.expect(
+ testing.expectf(
t,
dst_str == v.okm,
- fmt.tprintf(
- "HKDF-%s: Expected: %s for input of (%s, %s, %s), but got %s instead",
- algo_name,
- v.okm,
- v.ikm,
- v.salt,
- v.info,
- dst_str,
- ),
+ "HKDF-%s: Expected: %s for input of (%s, %s, %s), but got %s instead",
+ algo_name,
+ v.okm,
+ v.ikm,
+ v.salt,
+ v.info,
+ dst_str,
)
}
}
@(test)
test_pbkdf2 :: proc(t: ^testing.T) {
- tc.log(t, "Testing PBKDF2")
+ runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
tmp: [64]byte // 512-bits is enough for every output for now.
@@ -174,18 +158,16 @@ test_pbkdf2 :: proc(t: ^testing.T) {
dst_str := string(hex.encode(dst, context.temp_allocator))
- tc.expect(
+ testing.expectf(
t,
dst_str == v.dk,
- fmt.tprintf(
- "HMAC-%s: Expected: %s for input of (%s, %s, %d), but got %s instead",
- algo_name,
- v.dk,
- v.password,
- v.salt,
- v.iterations,
- dst_str,
- ),
+ "HMAC-%s: Expected: %s for input of (%s, %s, %d), but got %s instead",
+ algo_name,
+ v.dk,
+ v.password,
+ v.salt,
+ v.iterations,
+ dst_str,
)
}
}
diff --git a/tests/core/crypto/test_core_crypto_mac.odin b/tests/core/crypto/test_core_crypto_mac.odin
index f2eeacb19..ed95ba0ad 100644
--- a/tests/core/crypto/test_core_crypto_mac.odin
+++ b/tests/core/crypto/test_core_crypto_mac.odin
@@ -2,30 +2,17 @@ package test_core_crypto
import "base:runtime"
import "core:encoding/hex"
-import "core:fmt"
import "core:mem"
import "core:testing"
-
import "core:crypto/hash"
import "core:crypto/hmac"
import "core:crypto/poly1305"
import "core:crypto/siphash"
-import tc "tests:common"
-
@(test)
-test_mac :: proc(t: ^testing.T) {
+test_hmac :: proc(t: ^testing.T) {
runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
- tc.log(t, "Testing MACs")
-
- test_hmac(t)
- test_poly1305(t)
- test_siphash_2_4(t)
-}
-
-@(test)
-test_hmac :: proc(t: ^testing.T) {
// Test cases pulled out of RFC 6234, note that HMAC is a generic
// construct so as long as the underlying hash is correct and all
// the code paths are covered the implementation is "fine", so
@@ -86,40 +73,36 @@ test_hmac :: proc(t: ^testing.T) {
msg_str := string(hex.encode(msg, context.temp_allocator))
dst_str := string(hex.encode(dst[:tag_len], context.temp_allocator))
- tc.expect(
+ testing.expectf(
t,
dst_str == expected_str,
- fmt.tprintf(
- "%s/incremental: Expected: %s for input of %s - %s, but got %s instead",
- algo_name,
- tags_sha256[i],
- key_str,
- msg_str,
- dst_str,
- ),
+ "%s/incremental: Expected: %s for input of %s - %s, but got %s instead",
+ algo_name,
+ tags_sha256[i],
+ key_str,
+ msg_str,
+ dst_str,
)
hmac.sum(algo, dst, msg, key)
oneshot_str := string(hex.encode(dst[:tag_len], context.temp_allocator))
- tc.expect(
+ testing.expectf(
t,
oneshot_str == expected_str,
- fmt.tprintf(
- "%s/oneshot: Expected: %s for input of %s - %s, but got %s instead",
- algo_name,
- tags_sha256[i],
- key_str,
- msg_str,
- oneshot_str,
- ),
+ "%s/oneshot: Expected: %s for input of %s - %s, but got %s instead",
+ algo_name,
+ tags_sha256[i],
+ key_str,
+ msg_str,
+ oneshot_str,
)
}
}
@(test)
test_poly1305 :: proc(t: ^testing.T) {
- tc.log(t, "Testing poly1305")
+ runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
// Test cases taken from poly1305-donna.
key := [poly1305.KEY_SIZE]byte {
@@ -157,16 +140,17 @@ test_poly1305 :: proc(t: ^testing.T) {
// Verify - oneshot + compare
ok := poly1305.verify(tag[:], msg[:], key[:])
- tc.expect(t, ok, "oneshot verify call failed")
+ testing.expect(t, ok, "oneshot verify call failed")
// Sum - oneshot
derived_tag: [poly1305.TAG_SIZE]byte
poly1305.sum(derived_tag[:], msg[:], key[:])
derived_tag_str := string(hex.encode(derived_tag[:], context.temp_allocator))
- tc.expect(
+ testing.expectf(
t,
derived_tag_str == tag_str,
- fmt.tprintf("Expected %s for sum(msg, key), but got %s instead", tag_str, derived_tag_str),
+ "Expected %s for sum(msg, key), but got %s instead",
+ tag_str, derived_tag_str,
)
// Incremental
@@ -182,21 +166,16 @@ test_poly1305 :: proc(t: ^testing.T) {
}
poly1305.final(&ctx, derived_tag[:])
derived_tag_str = string(hex.encode(derived_tag[:], context.temp_allocator))
- tc.expect(
+ testing.expectf(
t,
derived_tag_str == tag_str,
- fmt.tprintf(
- "Expected %s for init/update/final - incremental, but got %s instead",
- tag_str,
- derived_tag_str,
- ),
+ "Expected %s for init/update/final - incremental, but got %s instead",
+ tag_str, derived_tag_str,
)
}
@(test)
test_siphash_2_4 :: proc(t: ^testing.T) {
- tc.log(t, "Testing SipHash-2-4")
-
// Test vectors from
// https://github.com/veorq/SipHash/blob/master/vectors.h
test_vectors := [?]u64 {
@@ -225,6 +204,7 @@ test_siphash_2_4 :: proc(t: ^testing.T) {
for i in 0 ..< len(test_vectors) {
data := make([]byte, i)
+ defer delete(data)
for j in 0 ..< i {
data[j] = byte(j)
}
@@ -232,15 +212,13 @@ test_siphash_2_4 :: proc(t: ^testing.T) {
vector := test_vectors[i]
computed := siphash.sum_2_4(data[:], key[:])
- tc.expect(
+ testing.expectf(
t,
computed == vector,
- fmt.tprintf(
- "Expected: 0x%x for input of %v, but got 0x%x instead",
- vector,
- data,
- computed,
- ),
+ "Expected: 0x%x for input of %v, but got 0x%x instead",
+ vector,
+ data,
+ computed,
)
}
}
diff --git a/tests/core/crypto/test_core_crypto_sha3_variants.odin b/tests/core/crypto/test_core_crypto_sha3_variants.odin
index 8e44996bc..c11868e72 100644
--- a/tests/core/crypto/test_core_crypto_sha3_variants.odin
+++ b/tests/core/crypto/test_core_crypto_sha3_variants.odin
@@ -2,30 +2,14 @@ package test_core_crypto
import "base:runtime"
import "core:encoding/hex"
-import "core:fmt"
import "core:testing"
-
import "core:crypto/kmac"
import "core:crypto/shake"
import "core:crypto/tuplehash"
-import tc "tests:common"
-
-@(test)
-test_sha3_variants :: proc(t: ^testing.T) {
- runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
-
- tc.log(t, "Testing SHA3 derived functions")
-
- test_shake(t)
- test_cshake(t)
- test_tuplehash(t)
- test_kmac(t)
-}
-
@(test)
test_shake :: proc(t: ^testing.T) {
- tc.log(t, "Testing SHAKE")
+ runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
test_vectors := []struct {
sec_strength: int,
@@ -67,23 +51,21 @@ test_shake :: proc(t: ^testing.T) {
dst_str := string(hex.encode(dst, context.temp_allocator))
- tc.expect(
+ testing.expectf(
t,
dst_str == v.output,
- fmt.tprintf(
- "SHAKE%d: Expected: %s for input of %s, but got %s instead",
- v.sec_strength,
- v.output,
- v.str,
- dst_str,
- ),
+ "SHAKE%d: Expected: %s for input of %s, but got %s instead",
+ v.sec_strength,
+ v.output,
+ v.str,
+ dst_str,
)
}
}
@(test)
test_cshake :: proc(t: ^testing.T) {
- tc.log(t, "Testing cSHAKE")
+ runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
test_vectors := []struct {
sec_strength: int,
@@ -135,29 +117,27 @@ test_cshake :: proc(t: ^testing.T) {
shake.init_cshake_256(&ctx, domainsep)
}
- data, _ := hex.decode(transmute([]byte)(v.str))
+ data, _ := hex.decode(transmute([]byte)(v.str), context.temp_allocator)
shake.write(&ctx, data)
shake.read(&ctx, dst)
dst_str := string(hex.encode(dst, context.temp_allocator))
- tc.expect(
+ testing.expectf(
t,
dst_str == v.output,
- fmt.tprintf(
- "cSHAKE%d: Expected: %s for input of %s, but got %s instead",
- v.sec_strength,
- v.output,
- v.str,
- dst_str,
- ),
+ "cSHAKE%d: Expected: %s for input of %s, but got %s instead",
+ v.sec_strength,
+ v.output,
+ v.str,
+ dst_str,
)
}
}
@(test)
test_tuplehash :: proc(t: ^testing.T) {
- tc.log(t, "Testing TupleHash(XOF)")
+ runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
test_vectors := []struct {
sec_strength: int,
@@ -317,7 +297,7 @@ test_tuplehash :: proc(t: ^testing.T) {
}
for e in v.tuple {
- data, _ := hex.decode(transmute([]byte)(e))
+ data, _ := hex.decode(transmute([]byte)(e), context.temp_allocator)
tuplehash.write_element(&ctx, data)
}
@@ -332,24 +312,22 @@ test_tuplehash :: proc(t: ^testing.T) {
dst_str := string(hex.encode(dst, context.temp_allocator))
- tc.expect(
+ testing.expectf(
t,
dst_str == v.output,
- fmt.tprintf(
- "TupleHash%s%d: Expected: %s for input of %v, but got %s instead",
- suffix,
- v.sec_strength,
- v.output,
- v.tuple,
- dst_str,
- ),
+ "TupleHash%s%d: Expected: %s for input of %v, but got %s instead",
+ suffix,
+ v.sec_strength,
+ v.output,
+ v.tuple,
+ dst_str,
)
}
}
@(test)
test_kmac :: proc(t:^testing.T) {
- tc.log(t, "Testing KMAC")
+ runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
test_vectors := []struct {
sec_strength: int,
@@ -410,7 +388,7 @@ test_kmac :: proc(t:^testing.T) {
for v in test_vectors {
dst := make([]byte, len(v.output) / 2, context.temp_allocator)
- key, _ := hex.decode(transmute([]byte)(v.key))
+ key, _ := hex.decode(transmute([]byte)(v.key), context.temp_allocator)
domainsep := transmute([]byte)(v.domainsep)
ctx: kmac.Context
@@ -421,24 +399,22 @@ test_kmac :: proc(t:^testing.T) {
kmac.init_256(&ctx, key, domainsep)
}
- data, _ := hex.decode(transmute([]byte)(v.msg))
+ data, _ := hex.decode(transmute([]byte)(v.msg), context.temp_allocator)
kmac.update(&ctx, data)
kmac.final(&ctx, dst)
dst_str := string(hex.encode(dst, context.temp_allocator))
- tc.expect(
+ testing.expectf(
t,
dst_str == v.output,
- fmt.tprintf(
- "KMAC%d: Expected: %s for input of (%s, %s, %s), but got %s instead",
- v.sec_strength,
- v.output,
- v.key,
- v.domainsep,
- v.msg,
- dst_str,
- ),
+ "KMAC%d: Expected: %s for input of (%s, %s, %s), but got %s instead",
+ v.sec_strength,
+ v.output,
+ v.key,
+ v.domainsep,
+ v.msg,
+ dst_str,
)
}
-}
+} \ No newline at end of file
diff --git a/tests/core/crypto/test_crypto_benchmark.odin b/tests/core/crypto/test_crypto_benchmark.odin
deleted file mode 100644
index cc69cb16d..000000000
--- a/tests/core/crypto/test_crypto_benchmark.odin
+++ /dev/null
@@ -1,301 +0,0 @@
-package test_core_crypto
-
-import "base:runtime"
-import "core:encoding/hex"
-import "core:fmt"
-import "core:testing"
-import "core:time"
-
-import "core:crypto/chacha20"
-import "core:crypto/chacha20poly1305"
-import "core:crypto/ed25519"
-import "core:crypto/poly1305"
-import "core:crypto/x25519"
-
-import tc "tests:common"
-
-// Cryptographic primitive benchmarks.
-
-@(test)
-bench_crypto :: proc(t: ^testing.T) {
- runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
-
- fmt.println("Starting benchmarks:")
-
- bench_chacha20(t)
- bench_poly1305(t)
- bench_chacha20poly1305(t)
- bench_ed25519(t)
- bench_x25519(t)
-}
-
-_setup_sized_buf :: proc(
- options: ^time.Benchmark_Options,
- allocator := context.allocator,
-) -> (
- err: time.Benchmark_Error,
-) {
- assert(options != nil)
-
- options.input = make([]u8, options.bytes, allocator)
- return nil if len(options.input) == options.bytes else .Allocation_Error
-}
-
-_teardown_sized_buf :: proc(
- options: ^time.Benchmark_Options,
- allocator := context.allocator,
-) -> (
- err: time.Benchmark_Error,
-) {
- assert(options != nil)
-
- delete(options.input)
- return nil
-}
-
-_benchmark_chacha20 :: proc(
- options: ^time.Benchmark_Options,
- allocator := context.allocator,
-) -> (
- err: time.Benchmark_Error,
-) {
- buf := options.input
- key := [chacha20.KEY_SIZE]byte {
- 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
- 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
- 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
- 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
- }
- nonce := [chacha20.NONCE_SIZE]byte {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- }
-
- ctx: chacha20.Context = ---
- chacha20.init(&ctx, key[:], nonce[:])
-
- for _ in 0 ..= options.rounds {
- chacha20.xor_bytes(&ctx, buf, buf)
- }
- options.count = options.rounds
- options.processed = options.rounds * options.bytes
- return nil
-}
-
-_benchmark_poly1305 :: proc(
- options: ^time.Benchmark_Options,
- allocator := context.allocator,
-) -> (
- err: time.Benchmark_Error,
-) {
- buf := options.input
- key := [poly1305.KEY_SIZE]byte {
- 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
- 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
- 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
- 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
- }
-
- tag: [poly1305.TAG_SIZE]byte = ---
- for _ in 0 ..= options.rounds {
- poly1305.sum(tag[:], buf, key[:])
- }
- options.count = options.rounds
- options.processed = options.rounds * options.bytes
- //options.hash = u128(h)
- return nil
-}
-
-_benchmark_chacha20poly1305 :: proc(
- options: ^time.Benchmark_Options,
- allocator := context.allocator,
-) -> (
- err: time.Benchmark_Error,
-) {
- buf := options.input
- key := [chacha20.KEY_SIZE]byte {
- 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
- 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
- 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
- 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
- }
- nonce := [chacha20.NONCE_SIZE]byte {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- }
-
- tag: [chacha20poly1305.TAG_SIZE]byte = ---
-
- for _ in 0 ..= options.rounds {
- chacha20poly1305.encrypt(buf, tag[:], key[:], nonce[:], nil, buf)
- }
- options.count = options.rounds
- options.processed = options.rounds * options.bytes
- return nil
-}
-
-benchmark_print :: proc(name: string, options: ^time.Benchmark_Options) {
- fmt.printf(
- "\t[%v] %v rounds, %v bytes processed in %v ns\n\t\t%5.3f rounds/s, %5.3f MiB/s\n",
- name,
- options.rounds,
- options.processed,
- time.duration_nanoseconds(options.duration),
- options.rounds_per_second,
- options.megabytes_per_second,
- )
-}
-
-bench_chacha20 :: proc(t: ^testing.T) {
- name := "ChaCha20 64 bytes"
- options := &time.Benchmark_Options {
- rounds = 1_000,
- bytes = 64,
- setup = _setup_sized_buf,
- bench = _benchmark_chacha20,
- teardown = _teardown_sized_buf,
- }
-
- err := time.benchmark(options, context.allocator)
- tc.expect(t, err == nil, name)
- benchmark_print(name, options)
-
- name = "ChaCha20 1024 bytes"
- options.bytes = 1024
- err = time.benchmark(options, context.allocator)
- tc.expect(t, err == nil, name)
- benchmark_print(name, options)
-
- name = "ChaCha20 65536 bytes"
- options.bytes = 65536
- err = time.benchmark(options, context.allocator)
- tc.expect(t, err == nil, name)
- benchmark_print(name, options)
-}
-
-bench_poly1305 :: proc(t: ^testing.T) {
- name := "Poly1305 64 zero bytes"
- options := &time.Benchmark_Options {
- rounds = 1_000,
- bytes = 64,
- setup = _setup_sized_buf,
- bench = _benchmark_poly1305,
- teardown = _teardown_sized_buf,
- }
-
- err := time.benchmark(options, context.allocator)
- tc.expect(t, err == nil, name)
- benchmark_print(name, options)
-
- name = "Poly1305 1024 zero bytes"
- options.bytes = 1024
- err = time.benchmark(options, context.allocator)
- tc.expect(t, err == nil, name)
- benchmark_print(name, options)
-}
-
-bench_chacha20poly1305 :: proc(t: ^testing.T) {
- name := "chacha20poly1305 64 bytes"
- options := &time.Benchmark_Options {
- rounds = 1_000,
- bytes = 64,
- setup = _setup_sized_buf,
- bench = _benchmark_chacha20poly1305,
- teardown = _teardown_sized_buf,
- }
-
- err := time.benchmark(options, context.allocator)
- tc.expect(t, err == nil, name)
- benchmark_print(name, options)
-
- name = "chacha20poly1305 1024 bytes"
- options.bytes = 1024
- err = time.benchmark(options, context.allocator)
- tc.expect(t, err == nil, name)
- benchmark_print(name, options)
-
- name = "chacha20poly1305 65536 bytes"
- options.bytes = 65536
- err = time.benchmark(options, context.allocator)
- tc.expect(t, err == nil, name)
- benchmark_print(name, options)
-}
-
-bench_ed25519 :: proc(t: ^testing.T) {
- iters :: 10000
-
- priv_str := "cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe"
- priv_bytes, _ := hex.decode(transmute([]byte)(priv_str), context.temp_allocator)
- priv_key: ed25519.Private_Key
- start := time.now()
- for i := 0; i < iters; i = i + 1 {
- ok := ed25519.private_key_set_bytes(&priv_key, priv_bytes)
- assert(ok, "private key should deserialize")
- }
- elapsed := time.since(start)
- tc.log(
- t,
- fmt.tprintf(
- "ed25519.private_key_set_bytes: ~%f us/op",
- time.duration_microseconds(elapsed) / iters,
- ),
- )
-
- pub_bytes := priv_key._pub_key._b[:] // "I know what I am doing"
- pub_key: ed25519.Public_Key
- start = time.now()
- for i := 0; i < iters; i = i + 1 {
- ok := ed25519.public_key_set_bytes(&pub_key, pub_bytes[:])
- assert(ok, "public key should deserialize")
- }
- elapsed = time.since(start)
- tc.log(
- t,
- fmt.tprintf(
- "ed25519.public_key_set_bytes: ~%f us/op",
- time.duration_microseconds(elapsed) / iters,
- ),
- )
-
- msg := "Got a job for you, 621."
- sig_bytes: [ed25519.SIGNATURE_SIZE]byte
- msg_bytes := transmute([]byte)(msg)
- start = time.now()
- for i := 0; i < iters; i = i + 1 {
- ed25519.sign(&priv_key, msg_bytes, sig_bytes[:])
- }
- elapsed = time.since(start)
- tc.log(t, fmt.tprintf("ed25519.sign: ~%f us/op", time.duration_microseconds(elapsed) / iters))
-
- start = time.now()
- for i := 0; i < iters; i = i + 1 {
- ok := ed25519.verify(&pub_key, msg_bytes, sig_bytes[:])
- assert(ok, "signature should validate")
- }
- elapsed = time.since(start)
- tc.log(
- t,
- fmt.tprintf("ed25519.verify: ~%f us/op", time.duration_microseconds(elapsed) / iters),
- )
-}
-
-bench_x25519 :: proc(t: ^testing.T) {
- point_str := "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"
- scalar_str := "cafebabecafebabecafebabecafebabecafebabecafebabecafebabecafebabe"
-
- point, _ := hex.decode(transmute([]byte)(point_str), context.temp_allocator)
- scalar, _ := hex.decode(transmute([]byte)(scalar_str), context.temp_allocator)
- out: [x25519.POINT_SIZE]byte = ---
-
- iters :: 10000
- start := time.now()
- for i := 0; i < iters; i = i + 1 {
- x25519.scalarmult(out[:], scalar[:], point[:])
- }
- elapsed := time.since(start)
-
- tc.log(
- t,
- fmt.tprintf("x25519.scalarmult: ~%f us/op", time.duration_microseconds(elapsed) / iters),
- )
-}
diff --git a/tests/core/download_assets.py b/tests/core/download_assets.py
index 7874b7e91..fc4a71cdc 100644
--- a/tests/core/download_assets.py
+++ b/tests/core/download_assets.py
@@ -7,8 +7,8 @@ import zipfile
import hashlib
import hmac
-TEST_SUITES = ['PNG', 'XML']
-DOWNLOAD_BASE_PATH = "assets/{}"
+TEST_SUITES = ['PNG', 'XML', 'BMP']
+DOWNLOAD_BASE_PATH = sys.argv[1] + "/{}"
ASSETS_BASE_URL = "https://raw.githubusercontent.com/odin-lang/test-assets/master/{}/{}"
HMAC_KEY = "https://odin-lang.org"
HMAC_HASH = hashlib.sha3_512
@@ -192,6 +192,94 @@ HMAC_DIGESTS = {
'z06n2c08.png': "94268c1998de1f4304d24219e31175def7375cc26e2bbfc7d1ac20465a42fae49bcc8ff7626873138b537588e8bce21b6d5e1373efaade1f83cae455334074aa",
'z09n2c08.png': "3cbb1bb58d78ecc9dd5568a8e9093ba020b63449ef3ab102f98fac4220fc9619feaa873336a25f3c1ad99cfb3e5d32bcfe52d966bc8640d1d5ba4e061741743e",
+ 'ba-bm.bmp': "2f76d46b1b9bea62e08e7fc5306452a495616cb7af7a0cbb79237ed457b083418d5859c9e6cfd0d9fbf1fe24495319b6f206135f36f2bd19330de01a8eaf20c8",
+ 'badbitcount.bmp': "2d37e22aa2e659416c950815841e5a402f2e9c21eb677390fc026eefaeb5be64345a7ef0fac2965a2cae8abe78c1e12086a7d93d8e62cc8659b35168c82f6d5f",
+ 'badbitssize.bmp': "f59cc30827bcb56f7e946dcffcaab22a5e197f2e3884cf80a2e596f5653f5203b3927674d9d5190486239964e65228f4e3f359cdd2f7d061b09846f5f26bfaa9",
+ 'baddens1.bmp': "aa84bebc41b3d50329269da9ee61fd7e1518ffd0e8f733af6872323bc46ace6ed1c9931a65a367d97b8b2cb2aa772ccd94fd3def0a79fd1c0baf185d669c386f",
+ 'baddens2.bmp': "5c254a8cde716fae77ebf20294a404383fd6afc705d783c5418762e7c4138aa621625bc6d08a8946ee3f1e8c40c767681a39806735bb3b3026fee5eb91d8fadc",
+ 'badfilesize.bmp': "9019b6853a91f69bd246f9b28da47007aec871c0e46fea7cd6ab5c30460a6938a1b09da8fa7ba8895650e37ce14a79d4183e9f2401eb510f60455410e2266eb5",
+ 'badheadersize.bmp': "90412d7c3bff7336d5e0c7ae899d8a53b82235072034f00783fb2403479447cd2959644f7ec70ae0988f99cc49b63356c8710b808ddd2280e19dca484f34074e",
+ 'badpalettesize.bmp': "d914a89f7b78fcdd6ab4433c176355755687b65c3cfc23db57de9d04447c440fa31d993db184940c1dc09b37e8e044324d8237877d3d1b1ad5657c4929d8435a",
+ 'badplanes.bmp': "46f583d4a43ef0c9964765b9d8820369955f0568a4eae0bf215434f508e8e03457bd759b73c344c2f88de7f33fc5379517ce3cf5b2e5a16ebc20c05df73aa723",
+ 'badrle.bmp': "a64e1551fd60159ff469ce25e1f5b4575dc462684f4ff66c7ea69b2990c7c9d2547b72237020e2d001f69dfd31f1ac45e0a9630d0ddd11c77584881f3e25609e",
+ 'badrle4.bmp': "2bd22418010b1ac3eac50932ed06e578411ac2741bfa50a9edd1b360686efa28c74df8b14d92e05b711eeb88a5e826256c6a5cf5a0176a29369fb92b336efb93",
+ 'badrle4bis.bmp': "d7a24ab095e1ca5e888dd1bcb732b19bb1983f787c64c1eb5a273da0f58c4b8cd137197df9ac47572a74c3026aab5af1f08551a2121af37b8941cffa71df1951",
+ 'badrle4ter.bmp': "825cc5361378d44524205b117825f95228c4d093d39ac2fc2ab755be743df78784529f2019418deca31059f3e46889a66658e7424b4f896668ee4cfa281574bc",
+ 'badrlebis.bmp': "f41acfd4f989302bb5ec42a2e759a56f71a5ecac5a814842e32542742ca015464f8579ebeec0e7e9cea45e2aafe51456cfe18b48b509bc3704f992bcc9d321af",
+ 'badrleter.bmp': "a8f3e0b0668fc4f43353028d5fca87d6cac6ff0c917c4e7a61c624918360ff598ec9eaa32f5c6a070da9bf6e90c58426f5a901fdab9dfb0a4fdca0c72ba67de4",
+ 'badwidth.bmp': "68f192a55b8de66f8e13fe316647262a5e4641365eb77d4987c84ab1eae35b7cba20827277cd569583543819de70ec75f383367f72cd229e48743ad1e45bfa9e",
+ 'pal1.bmp': "0194c9b501ac7e043fab78746e6f142e0c880917d0fd6dbb7215765b8fc1ce4403ad85146c555665ba6e37d3b47edad5e687b9260e7a61a27d8a059bc81bb525",
+ 'pal1bg.bmp': "3aafc29122bd6e97d88d740be1f61cb9febe8373d19ae6d731f4af776c868dd489260287bf0cf1c960f9d9afcbc7448e83e45435d3e42e913823c0f5c2a80d9f",
+ 'pal1huffmsb.bmp': "4e122f602c3556f4f5ab45f9e13a617d8210d81f587d08cbd6c2110dc6231573aec92a6344aeb4734c00d3dcf380130f53a887002756811d8edd6bc5aabbafc0",
+ 'pal1p1.bmp': "33e2b2b1c1bed43ba64888d7739eb830c7789857352513de08b6e35718ac0e421afcdae0e7bab97c25d1ad972eb4f09e2c6556c416d4d7367c545330c4123df0",
+ 'pal1wb.bmp': "bc583ad4eaae40f5d2e3a6280aeb3c62ee11b2cf05ba7c8386f9578587e29b66819293992bdcd31c2750c21cd9bf97daa603ce1051fbfdd40fadbc1860156853",
+ 'pal2.bmp': "7b560ba972cf58ca1ed01910fa4f630ca74e657d46d134e2ac0df733eb5773d0a1788e745d5240efa18f182bd8dce22c7ac7cee6f99ddc946a27b65297762764",
+ 'pal2color.bmp': "b868a8aaa22fac3aa86bbd5270eb5ffee06959461be8177880829d838be0391d9617d11d73fab1643520a92364dc333c25c0510bb2628c8fb945719518d2675f",
+ 'pal4.bmp': "53a39fdb86630c828d9003a1e95dbd59c47524c4ec044d8ce72e1b643166b4c2b6ec06ab5191cb25d17be2fcb18bd7a9e0b7ec169722e6d89b725609a15b1df1",
+ 'pal4gs.bmp': "ab4c2078943afdf19bcc02b1ebbe5a69cfa93d1152f7882db6176c39b917191a2760fbb2127e5207b0bfb3dafd711593a6aed61d312807605913062aa1ce9c2f",
+ 'pal4rle.bmp': "c86c86280b75a252ccf484e4bba2df45d3747dc1e4879795e925613959a0c451e2fc4890532e8aef9911e38e45e7d6a8baf29d57e573d26c20923a5823700443",
+ 'pal4rlecut.bmp': "f38d485dbb8e67bdeaefba181f9a05556a986ed3f834edca723c088e813764bb2b42240d4fbb938a1775370b79b9ea2f14277ffe9c7247c1e0e77766fec27189",
+ 'pal4rletrns.bmp': "b81e7fed38854d201a3199ce50ca05e92ca287c860797142857ac20b4a2f28952b058e21687c0fae60712f5784cd2c950ce70148ba1316efe31d4f3fc4006817",
+ 'pal8-0.bmp': "f39a4f1827c52bb620d975f8c72f5e95f90ac6c65ae0a6776ff1ad95808c090de17cbd182188a85157396fd9649ea4b5d84bb7c9175ab49ce2845da214c16bff",
+ 'pal8.bmp': "be27e55a866cbb655fdd917435cd6a5b62c20ae0d6ef7c1533c5a01dd9a893f058cc4ba2d902ab9315380009808e06b7f180116c9b790587cf62aa770c7a4a07",
+ 'pal8badindex.bmp': "bd5fc036985ae705182915a560dee2e5dfb3bd8b50932337b9085e190259c66e6bae5fbc813a261d352a60dcb0755798bdc251d6c2a0b638a7e337ba58811811",
+ 'pal8gs.bmp': "228f944b3e45359f62a2839d4e7b94d7f3a1074ad9e25661fdb9e8fff4c15581c85a7bb0ac75c92b95c7537ececc9d80b835cfe55bc7560a513118224a9ed36f",
+ 'pal8nonsquare.bmp': "b8adc9b03880975b232849ea1e8f87705937929d743df3d35420902b32557065354ab71d0d8176646bf0ad72c583c884cfcd1511017260ffca8c41d5a358a3eb",
+ 'pal8offs.bmp': "c92f1e1835d753fd8484be5198b2b8a2660d5e54117f6c4fc6d2ebc8d1def72a8d09cd820b1b8dcee15740b47151d62b8b7aca0b843e696252e28226b51361cf",
+ 'pal8os2-hs.bmp': "477c04048787eb412f192e7fe47ae96f14d7995391e78b10cc4c365f8c762f60c54cad7ef9d1705a78bd490a578fb346ee0a383c3a3fdf790558a12589eb04eb",
+ 'pal8os2-sz.bmp': "fd0eeb733be9b39f492d0f67dd28fc67207149e41691c206d4de4c693b5dea9458b88699a781383e7050a3b343259659aae64fec0616c98f3f8555cbf5c9e46c",
+ 'pal8os2.bmp': "cdab3ed7bc9f38d89117332a21418b3c916a99a8d8fb6b7ce456d54288c96152af12c0380293b04e96594a7867b83be5c99913d224c9750c7d38295924e0735a",
+ 'pal8os2sp.bmp': "f6e595a6db992ab7d1f79442d31f39f648061e7de13e51b07933283df065ce405c0208e6101ac916e4eb0613e412116f008510021a2d17543aa7f0a32349c96f",
+ 'pal8os2v2-16.bmp': "f52877d434218aa6b772a7aa0aaba4c2ae6ce35ecfa6876943bb350fdf9554f1f763a8d5bb054362fb8f9848eb71ce14a371f4a76da4b9475cdcee4f286109a4",
+ 'pal8os2v2-40sz.bmp': "9481691ada527df1f529316d44b5857c6a840c5dafa7e9795c9cb92dac02c6cc35739d3f6ce33d4ab6ff6bcd6b949741e89dc8c42cf52ad4546ff58cd3b5b66a",
+ 'pal8os2v2-sz.bmp': "99cd2836f90591cd27b0c8696ecff1e7a1debcef284bbe5d21e68759270c1bfe1f32ee8f576c49f3e64d8f4e4d9096574f3c8c79bfdae0545689da18364de3e7",
+ 'pal8os2v2.bmp': "7859b265956c7d369db7a0a357ce09bcda74e98954de88f454cae5e7cb021222146687a7770ce0cc2c58f1439c7c21c45c0c27464944e73913e1c88afc836c8a",
+ 'pal8oversizepal.bmp': "e778864e0669a33fce27c0ccd5b6460b572a5db01975e8d56acec8a9447e1c58d6051ad3516cfa96a39f4eb7f2576154188ea62ec187bcf4ae323883499383c0",
+ 'pal8rle.bmp': "88942a1cd2e36d1e0f0e2748a888034057919c7ec0f8d9b2664deb1daf1a6e45ed3e722dff5d810f413d6fc182e700a16d6563dd25f67dc6d135d751cd736dea",
+ 'pal8rlecut.bmp': "cda9fa274cde590aeaca81516e0465684cfae84e934eb983301801e978e6e2e9c590d22af992d9912e51bb9c2761945276bdbe0b6c47f3a021514414e1f3f455",
+ 'pal8rletrns.bmp': "0b2d5618dc9c81caa72c070070a4245dd9cd3de5d344b76ce9c15d0eeb72e2675efc264201f8709dfcffd234df09e76d6f328f16f2ad873ba846f870cadfa486",
+ 'pal8topdown.bmp': "d470a2b7556fa88eac820427cb900f59a121732cdb4a7f3530ed457798139c946a884a34ab79d822feb84c2ca6f4d9a65f6e792994eafc3a189948b9e4543546",
+ 'pal8v4.bmp': "0382610e32c49d5091a096cb48a54ebbf44d9ba1def96e2f30826fd3ddf249f5aed70ca5b74c484b6cdc3924f4d4bfed2f5194ad0bcf1d99bfaa3a619e299d86",
+ 'pal8v5.bmp': "50fadaa93aac2a377b565c4dc852fd4602538863b913cb43155f5ad7cf79928127ca28b33e5a3b0230076ea4a6e339e3bf57f019333f42c4e9f003a8f2376325",
+ 'pal8w124.bmp': "e54a901b9badda655cad828d226b381831aea7e36aec8729147e9e95a9f2b21a9d74d93756e908e812902a01197f1379fe7e35498dbafed02e27c853a24097b7",
+ 'pal8w125.bmp': "d7a04d45ef5b3830da071ca598f1e2a413c46834968b2db7518985cf8d8c7380842145899e133e71355b6b7d040ee9e97adec1e928ce4739282e0533058467c0",
+ 'pal8w126.bmp': "4b93845a98797679423c669c541a248b4cdfee80736f01cec29d8b40584bf55a27835c80656a2bf5c7ad3ed211c1f7d3c7d5831a6726904b39f10043a76b658d",
+ 'reallybig.bmp': "babbf0335bac63fd2e95a89210c61ae6bbaaeeab5f07974034e76b4dc2a5c755f77501e3d056479357445aac442e2807d7170ec44067bab8fd35742f0e7b8440",
+ 'rgb16-231.bmp': "611a87cb5d29f16ef71971714a3b0e3863a6df51fff16ce4d4df8ee028442f9ce03669fb5d7a6a838a12a75d8a887b56b5a2e44a3ad62f4ef3fc2e238c33f6a1",
+ 'rgb16-3103.bmp': "7fdff66f4d94341da522b4e40586b3b8c327be9778e461bca1600e938bfbaa872b484192b35cd84d9430ca20aa922ec0301567a74fb777c808400819db90b09d",
+ 'rgb16-565.bmp': "777883f64b9ae80d77bf16c6d062082b7a4702f8260c183680afee6ec26e48681bcca75f0f81c470be1ac8fcb55620b3af5ce31d9e114b582dfd82300a3d6654",
+ 'rgb16-565pal.bmp': "57e9dcf159415b3574a1b343a484391b0859ab2f480e22157f2a84bc188fde141a48826f960c6f30b1d1f17ef6503ec3afc883a2f25ff09dd50c437244f9ae7f",
+ 'rgb16-880.bmp': "8d61183623002da4f7a0a66b42aa58a120e3a91578bb0c4a5e2c5ba7d08b875d43a22f2b5b3a449d3caf4cc303cb05111dd1d5169953f288493b7ea3c2423d24",
+ 'rgb16.bmp': "1c0fe56661d4998edd76efedda520a441171d42ae4dad95b350e3b61deda984c3a3255392481fe1903e5e751357da3f35164935e323377f015774280036ba39e",
+ 'rgb16bfdef.bmp': "ed55d086e27ba472076df418be0046b740944958afeb84d05aa2bbe578dec27ced122ffefb6d549e1d07e05eb608979b3ac9b1bd809f8237cf0984ffdaa24716",
+ 'rgb16faketrns.bmp': "9cd4a8e05fe125649e360715851ef912e78a56d30e0ea1b1cfb6eaafd386437d45de9c1e1a845dd8d63ff5a414832355b8ae0e2a96d72a42a7205e8a2742d37c",
+ 'rgb24.bmp': "4f0ce2978bbfea948798b2fdcc4bdbe8983a6c94d1b7326f39daa6059368e08ebf239260984b64eeb0948f7c8089a523e74b7fa6b0437f9205d8af8891340389",
+ 'rgb24largepal.bmp': "b377aee1594c5d9fc806a70bc62ee83cf8d1852b4a2b18fd3e9409a31aa3b5a4cf5e3b4af2cbdebcef2b5861b7985a248239684a72072437c50151adc524e9df",
+ 'rgb24pal.bmp': "f40bb6e01f6ecb3d55aa992bf1d1e2988ea5eb11e3e58a0c59a4fea2448de26f231f45e9f378b7ee1bdd529ec57a1de38ea536e397f5e1ac6998921e066ab427",
+ 'rgb24png.bmp': "c60890bbd79c12472205665959eb6e2dd2103671571f80117b9e963f897cffca103181374a4797f53c7768af01a705e830a0de4dd8fab7241d24c17bee4a4dbe",
+ 'rgb24rle24.bmp': "ea0ff3f512dd04176d14b43dfbee73ac7f1913aa1b77587e187e271781c7dacec880cec73850c4127ea9f8dd885f069e281f159bb5232e93cbb2d1ee9cb50438",
+ 'rgb32-111110.bmp': "732884e300d4edbcf31556a038947beefc0c5e749131a66d2d7aa1d4ec8c8eba07833133038a03bbe4c4fa61a805a5df1f797b5853339ee6a8140478f5f70a76",
+ 'rgb32-7187.bmp': "4c55aab2e4ecf63dc30b04e5685c5d9fba7354ca0e1327d7c4b15d6da10ac66ca1cea6f0303f9c5f046da3bcd2566275384e0e7bb14fcc5196ec39ca90fac194",
+ 'rgb32-xbgr.bmp': "1e9f240eaec6ac2833f8c719f1fb53cc7551809936620e871ccacfab26402e1afc6503b9f707e4ec25f15529e3ce6433c7f999d5714af31dfb856eb67e772f64",
+ 'rgb32.bmp': "32033dbf9462e5321b1182ba739624ed535aa4d33b775ffeeaf09d2d4cb663e4c3505e8c05489d940f754dde4b50a2e0b0688b21d06755e717e6e511b0947525",
+ 'rgb32bf.bmp': "7243c215827a9b4a1d7d52d67fb04ffb43b0b176518fbdab43d413e2a0c18883b106797f1acd85ba68d494ec939b0caab8789564670d058caf0e1175ce7983fb",
+ 'rgb32bfdef.bmp': "a81433babb67ce714285346a77bfccd19cf6203ac1d8245288855aff20cf38146a783f4a7eac221db63d1ee31345da1329e945b432f0e7bcf279ea88ff5bb302",
+ 'rgb32fakealpha.bmp': "abecaf1b5bfad322de7aec897efe7aa6525f2a77a0af86cc0a0a366ed1650da703cf4b7b117a7ba34f21d03a8a0045e5821248cdefa00b0c78e01d434b55e746",
+ 'rgb32h52.bmp': "707d734393c83384bc75720330075ec9ffefc69167343259ebf95f9393948364a40f33712619f962e7056483b73334584570962c16da212cd5291f764b3f2cd1",
+ 'rgba16-1924.bmp': "3e41a5d8d951bac580c780370ca21e0783de8154f4081106bc58d1185bb2815fc5b7f08f2a1c75cd205fc52c888e9d07c91856651603a2d756c9cfc392585598",
+ 'rgba16-4444.bmp': "a6662186b23bd434a7e019d2a71cd95f53a47b64a1afea4c27ae1120685d041a9ff98800a43a9d8f332682670585bdb2fa77ff77b6def65139fe725323f91561",
+ 'rgba16-5551.bmp': "a7d9f8ae7f8349cd6df651ab9d814411939fa2a235506ddfdd0df5a8f8abcf75552c32481ea035ff29e683bdcd34da68eb23730857e0716b79af51d69a60757b",
+ 'rgba32-1.bmp': "3958d18d2a7f32ada69cb11e0b4397821225a5c40acc7b6d36ff28ee4565e150cc508971278a2ddf8948aaff86f66ec6a0c24513db44962d81b79c4239b3e612",
+ 'rgba32-1010102.bmp': "59a28db5563caf954d31b20a1d1cc59366fcfd258b7ba2949f7281978460a3d94bedcc314c089243dd7463bb18d36a9473355158a7d903912cb25b98eab6b068",
+ 'rgba32-2.bmp': "9b7e5965ff9888f011540936ab6b3022edf9f6c5d7e541d6882cb81820cf1d68326d65695a6f0d01999ac10a496a504440906aa45e413da593405563c54c1a05",
+ 'rgba32-61754.bmp': "784ae0a32b19fa925e0c86dbff3bd38d80904d0fa7dc3b03e9d4f707d42b1604c1f54229e901ccc249cab8c2976d58b1e16980157d9bf3dbc4e035e2b2fd1779",
+ 'rgba32-81284.bmp': "fcfca645017c0d15d44b08598a90d238d063763fd06db665d9a8e36ef5099ce0bf4d440e615c6d6b1bf99f38230d4848318bfa1e6d9bfdd6dfd521cc633ba110",
+ 'rgba32abf.bmp': "2883d676966d298d235196f102e044e52ef18f3cb5bb0dd84738c679f0a1901181483ca2df1cccf6e4b3b4e98be39e81de69c9a58f0d70bc3ebb0fcea80daa0c",
+ 'rgba32h56.bmp': "507d0caf29ccb011c83c0c069c21105ea1d58e06b92204f9c612f26102123a7680eae53fef023c701952d903e11b61f8aa07618c381ea08f6808c523f5a84546",
+ 'rgba64.bmp': "d01f14f649c1c33e3809508cc6f089dd2ab0a538baf833a91042f2e54eca3f8e409908e15fa8763b059d7fa022cf5c074d9f5720eed5293a4c922e131c2eae68",
+ 'rletopdown.bmp': "37500893aad0b40656aa80fd5c7c5f9b35d033018b8070d8b1d7baeb34c90f90462288b13295204b90aa3e5c9be797d22a328e3714ab259334e879a09a3de175",
+ 'shortfile.bmp': "be3ffade7999304f00f9b7d152b5b27811ad1166d0fd43004392467a28f44b6a4ec02a23c0296bacd4f02f8041cd824b9ca6c9fc31fed27e36e572113bb47d73",
+
'unicode.xml': "e0cdc94f07fdbb15eea811ed2ae6dcf494a83d197dafe6580c740270feb0d8f5f7146d4a7d4c2d2ea25f8bd9678bc986123484b39399819a6b7262687959d1ae",
}
@@ -233,6 +321,7 @@ def try_download_and_unpack_zip(suite):
hmac_digest = hmac.new(HMAC_KEY.encode(), file_data, HMAC_HASH).hexdigest()
print("{} *{}".format(hmac_digest, file.filename))
+
if not hmac.compare_digest(hmac_digest, HMAC_DIGESTS[file.filename]):
print("FAIL! Expected: {}".format(HMAC_DIGESTS[file.filename]))
return 4
diff --git a/tests/core/encoding/base64/base64.odin b/tests/core/encoding/base64/base64.odin
index e48eea020..6679c8ce2 100644
--- a/tests/core/encoding/base64/base64.odin
+++ b/tests/core/encoding/base64/base64.odin
@@ -1,61 +1,38 @@
package test_encoding_base64
import "base:intrinsics"
-
import "core:encoding/base64"
-import "core:fmt"
-import "core:os"
-import "core:reflect"
import "core:testing"
-TEST_count := 0
-TEST_fail := 0
-
-when ODIN_TEST {
- expect_value :: testing.expect_value
-
-} else {
- expect_value :: proc(t: ^testing.T, value, expected: $T, loc := #caller_location) -> bool where intrinsics.type_is_comparable(T) {
- TEST_count += 1
- ok := value == expected || reflect.is_nil(value) && reflect.is_nil(expected)
- if !ok {
- TEST_fail += 1
- fmt.printf("[%v] expected %v, got %v\n", loc, expected, value)
- }
- return ok
- }
+Test :: struct {
+ vector: string,
+ base64: string,
}
-main :: proc() {
- t := testing.T{}
-
- test_encoding(&t)
- test_decoding(&t)
-
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
+tests :: []Test{
+ {"", ""},
+ {"f", "Zg=="},
+ {"fo", "Zm8="},
+ {"foo", "Zm9v"},
+ {"foob", "Zm9vYg=="},
+ {"fooba", "Zm9vYmE="},
+ {"foobar", "Zm9vYmFy"},
}
@(test)
test_encoding :: proc(t: ^testing.T) {
- expect_value(t, base64.encode(transmute([]byte)string("")), "")
- expect_value(t, base64.encode(transmute([]byte)string("f")), "Zg==")
- expect_value(t, base64.encode(transmute([]byte)string("fo")), "Zm8=")
- expect_value(t, base64.encode(transmute([]byte)string("foo")), "Zm9v")
- expect_value(t, base64.encode(transmute([]byte)string("foob")), "Zm9vYg==")
- expect_value(t, base64.encode(transmute([]byte)string("fooba")), "Zm9vYmE=")
- expect_value(t, base64.encode(transmute([]byte)string("foobar")), "Zm9vYmFy")
+ for test in tests {
+ v := base64.encode(transmute([]byte)test.vector)
+ defer delete(v)
+ testing.expect_value(t, v, test.base64)
+ }
}
@(test)
test_decoding :: proc(t: ^testing.T) {
- expect_value(t, string(base64.decode("")), "")
- expect_value(t, string(base64.decode("Zg==")), "f")
- expect_value(t, string(base64.decode("Zm8=")), "fo")
- expect_value(t, string(base64.decode("Zm9v")), "foo")
- expect_value(t, string(base64.decode("Zm9vYg==")), "foob")
- expect_value(t, string(base64.decode("Zm9vYmE=")), "fooba")
- expect_value(t, string(base64.decode("Zm9vYmFy")), "foobar")
+ for test in tests {
+ v := string(base64.decode(test.base64))
+ defer delete(v)
+ testing.expect_value(t, v, test.vector)
+ }
}
diff --git a/tests/core/encoding/cbor/test_core_cbor.odin b/tests/core/encoding/cbor/test_core_cbor.odin
index 72244e1d3..d069ef05b 100644
--- a/tests/core/encoding/cbor/test_core_cbor.odin
+++ b/tests/core/encoding/cbor/test_core_cbor.odin
@@ -1,105 +1,15 @@
package test_encoding_cbor
import "base:intrinsics"
-
import "core:bytes"
import "core:encoding/cbor"
import "core:fmt"
import "core:io"
import "core:math/big"
-import "core:mem"
-import "core:os"
import "core:reflect"
import "core:testing"
import "core:time"
-TEST_count := 0
-TEST_fail := 0
-
-when ODIN_TEST {
- expect :: testing.expect
- expect_value :: testing.expect_value
- errorf :: testing.errorf
- log :: testing.log
-
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v] %v\n", loc, message)
- return
- }
- }
-
- expect_value :: proc(t: ^testing.T, value, expected: $T, loc := #caller_location) -> bool where intrinsics.type_is_comparable(T) {
- TEST_count += 1
- ok := value == expected || reflect.is_nil(value) && reflect.is_nil(expected)
- if !ok {
- TEST_fail += 1
- fmt.printf("[%v] expected %v, got %v\n", loc, expected, value)
- }
- return ok
- }
-
- errorf :: proc(t: ^testing.T, fmts: string, args: ..any, loc := #caller_location) {
- TEST_fail += 1
- fmt.printf("[%v] ERROR: ", loc)
- fmt.printf(fmts, ..args)
- fmt.println()
- }
-
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-}
-
-main :: proc() {
- t := testing.T{}
-
- test_marshalling(&t)
-
- test_marshalling_maybe(&t)
- test_marshalling_nil_maybe(&t)
-
- test_marshalling_union(&t)
-
- test_lying_length_array(&t)
-
- test_decode_unsigned(&t)
- test_encode_unsigned(&t)
-
- test_decode_negative(&t)
- test_encode_negative(&t)
-
- test_decode_simples(&t)
- test_encode_simples(&t)
-
- test_decode_floats(&t)
- test_encode_floats(&t)
-
- test_decode_bytes(&t)
- test_encode_bytes(&t)
-
- test_decode_strings(&t)
- test_encode_strings(&t)
-
- test_decode_lists(&t)
- test_encode_lists(&t)
-
- test_decode_maps(&t)
- test_encode_maps(&t)
-
- test_decode_tags(&t)
- test_encode_tags(&t)
-
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
-}
-
Foo :: struct {
str: string,
cstr: cstring,
@@ -143,14 +53,6 @@ FooBars :: bit_set[FooBar; u16]
@(test)
test_marshalling :: proc(t: ^testing.T) {
- tracker: mem.Tracking_Allocator
- mem.tracking_allocator_init(&tracker, context.allocator)
- context.allocator = mem.tracking_allocator(&tracker)
- context.temp_allocator = context.allocator
- defer mem.tracking_allocator_destroy(&tracker)
-
- ev :: expect_value
-
{
nice := "16 is a nice number"
now := time.Time{_nsec = 1701117968 * 1e9}
@@ -205,18 +107,18 @@ test_marshalling :: proc(t: ^testing.T) {
}
data, err := cbor.marshal(f, cbor.ENCODE_FULLY_DETERMINISTIC)
- ev(t, err, nil)
+ testing.expect_value(t, err, nil)
defer delete(data)
decoded, derr := cbor.decode(string(data))
- ev(t, derr, nil)
+ testing.expect_value(t, derr, nil)
defer cbor.destroy(decoded)
diagnosis, eerr := cbor.to_diagnostic_format(decoded)
- ev(t, eerr, nil)
+ testing.expect_value(t, eerr, nil)
defer delete(diagnosis)
- ev(t, diagnosis, `{
+ testing.expect_value(t, diagnosis, `{
"base64": 34("MTYgaXMgYSBuaWNlIG51bWJlcg=="),
"biggest": 2(h'f951a9fd3c158afdff08ab8e0'),
"biggie": 18446744073709551615,
@@ -285,7 +187,7 @@ test_marshalling :: proc(t: ^testing.T) {
backf: Foo
uerr := cbor.unmarshal(string(data), &backf)
- ev(t, uerr, nil)
+ testing.expect_value(t, uerr, nil)
defer {
delete(backf.str)
delete(backf.cstr)
@@ -304,104 +206,102 @@ test_marshalling :: proc(t: ^testing.T) {
big.destroy(&backf.smallest)
}
- ev(t, backf.str, f.str)
- ev(t, backf.cstr, f.cstr)
+ testing.expect_value(t, backf.str, f.str)
+ testing.expect_value(t, backf.cstr, f.cstr)
#partial switch v in backf.value {
case ^cbor.Map:
for entry, i in v {
fm := f.value.(^cbor.Map)
- ev(t, entry.key, fm[i].key)
+ testing.expect_value(t, entry.key, fm[i].key)
if str, is_str := entry.value.(^cbor.Text); is_str {
- ev(t, str^, fm[i].value.(^cbor.Text)^)
+ testing.expect_value(t, str^, fm[i].value.(^cbor.Text)^)
} else {
- ev(t, entry.value, fm[i].value)
+ testing.expect_value(t, entry.value, fm[i].value)
}
}
- case: errorf(t, "wrong type %v", v)
+ case: testing.expectf(t, false, "wrong type %v", v)
}
- ev(t, backf.neg, f.neg)
- ev(t, backf.iamint, f.iamint)
- ev(t, backf.base64, f.base64)
- ev(t, backf.renamed, f.renamed)
- ev(t, backf.now, f.now)
- ev(t, backf.nowie, f.nowie)
- for e, i in f.child.dyn { ev(t, backf.child.dyn[i], e) }
- for key, value in f.child.mappy { ev(t, backf.child.mappy[key], value) }
- ev(t, backf.child.my_integers, f.child.my_integers)
- ev(t, len(backf.my_bytes), 0)
- ev(t, len(backf.my_bytes), len(f.my_bytes))
- ev(t, backf.ennie, f.ennie)
- ev(t, backf.ennieb, f.ennieb)
- ev(t, backf.quat, f.quat)
- ev(t, backf.comp, f.comp)
- ev(t, backf.important, f.important)
- ev(t, backf.no, nil)
- ev(t, backf.nos, nil)
- ev(t, backf.yes, f.yes)
- ev(t, backf.biggie, f.biggie)
- ev(t, backf.smallie, f.smallie)
- ev(t, backf.onetwenty, f.onetwenty)
- ev(t, backf.small_onetwenty, f.small_onetwenty)
- ev(t, backf.ignore_this, nil)
+ testing.expect_value(t, backf.neg, f.neg)
+ testing.expect_value(t, backf.iamint, f.iamint)
+ testing.expect_value(t, backf.base64, f.base64)
+ testing.expect_value(t, backf.renamed, f.renamed)
+ testing.expect_value(t, backf.now, f.now)
+ testing.expect_value(t, backf.nowie, f.nowie)
+ for e, i in f.child.dyn { testing.expect_value(t, backf.child.dyn[i], e) }
+ for key, value in f.child.mappy { testing.expect_value(t, backf.child.mappy[key], value) }
+ testing.expect_value(t, backf.child.my_integers, f.child.my_integers)
+ testing.expect_value(t, len(backf.my_bytes), 0)
+ testing.expect_value(t, len(backf.my_bytes), len(f.my_bytes))
+ testing.expect_value(t, backf.ennie, f.ennie)
+ testing.expect_value(t, backf.ennieb, f.ennieb)
+ testing.expect_value(t, backf.quat, f.quat)
+ testing.expect_value(t, backf.comp, f.comp)
+ testing.expect_value(t, backf.important, f.important)
+ testing.expect_value(t, backf.no, nil)
+ testing.expect_value(t, backf.nos, nil)
+ testing.expect_value(t, backf.yes, f.yes)
+ testing.expect_value(t, backf.biggie, f.biggie)
+ testing.expect_value(t, backf.smallie, f.smallie)
+ testing.expect_value(t, backf.onetwenty, f.onetwenty)
+ testing.expect_value(t, backf.small_onetwenty, f.small_onetwenty)
+ testing.expect_value(t, backf.ignore_this, nil)
s_equals, s_err := big.equals(&backf.smallest, &f.smallest)
- ev(t, s_err, nil)
+ testing.expect_value(t, s_err, nil)
if !s_equals {
- errorf(t, "smallest: %v does not equal %v", big.itoa(&backf.smallest), big.itoa(&f.smallest))
+ testing.expectf(t, false, "smallest: %v does not equal %v", big.itoa(&backf.smallest), big.itoa(&f.smallest))
}
b_equals, b_err := big.equals(&backf.biggest, &f.biggest)
- ev(t, b_err, nil)
+ testing.expect_value(t, b_err, nil)
if !b_equals {
- errorf(t, "biggest: %v does not equal %v", big.itoa(&backf.biggest), big.itoa(&f.biggest))
+ testing.expectf(t, false, "biggest: %v does not equal %v", big.itoa(&backf.biggest), big.itoa(&f.biggest))
}
}
-
- for _, leak in tracker.allocation_map {
- errorf(t, "%v leaked %m\n", leak.location, leak.size)
- }
-
- for bad_free in tracker.bad_free_array {
- errorf(t, "%v allocation %p was freed badly\n", bad_free.location, bad_free.memory)
- }
}
@(test)
test_marshalling_maybe :: proc(t: ^testing.T) {
maybe_test: Maybe(int) = 1
data, err := cbor.marshal(maybe_test)
- expect_value(t, err, nil)
+ defer delete(data)
+ testing.expect_value(t, err, nil)
val, derr := cbor.decode(string(data))
- expect_value(t, derr, nil)
+ testing.expect_value(t, derr, nil)
- expect_value(t, cbor.to_diagnostic_format(val), "1")
+ diag := cbor.to_diagnostic_format(val)
+ testing.expect_value(t, diag, "1")
+ delete(diag)
maybe_dest: Maybe(int)
uerr := cbor.unmarshal(string(data), &maybe_dest)
- expect_value(t, uerr, nil)
- expect_value(t, maybe_dest, 1)
+ testing.expect_value(t, uerr, nil)
+ testing.expect_value(t, maybe_dest, 1)
}
@(test)
test_marshalling_nil_maybe :: proc(t: ^testing.T) {
maybe_test: Maybe(int)
data, err := cbor.marshal(maybe_test)
- expect_value(t, err, nil)
+ defer delete(data)
+ testing.expect_value(t, err, nil)
val, derr := cbor.decode(string(data))
- expect_value(t, derr, nil)
+ testing.expect_value(t, derr, nil)
- expect_value(t, cbor.to_diagnostic_format(val), "nil")
+ diag := cbor.to_diagnostic_format(val)
+ testing.expect_value(t, diag, "nil")
+ delete(diag)
maybe_dest: Maybe(int)
uerr := cbor.unmarshal(string(data), &maybe_dest)
- expect_value(t, uerr, nil)
- expect_value(t, maybe_dest, nil)
+ testing.expect_value(t, uerr, nil)
+ testing.expect_value(t, maybe_dest, nil)
}
@(test)
@@ -427,17 +327,24 @@ test_marshalling_union :: proc(t: ^testing.T) {
{
test: My_Union = My_Distinct("Hello, World!")
data, err := cbor.marshal(test)
- expect_value(t, err, nil)
+ defer delete(data)
+ testing.expect_value(t, err, nil)
val, derr := cbor.decode(string(data))
- expect_value(t, derr, nil)
+ defer cbor.destroy(val)
+ testing.expect_value(t, derr, nil)
- expect_value(t, cbor.to_diagnostic_format(val, -1), `1010(["My_Distinct", "Hello, World!"])`)
+ diag := cbor.to_diagnostic_format(val, -1)
+ defer delete(diag)
+ testing.expect_value(t, diag, `1010(["My_Distinct", "Hello, World!"])`)
dest: My_Union
uerr := cbor.unmarshal(string(data), &dest)
- expect_value(t, uerr, nil)
- expect_value(t, dest, My_Distinct("Hello, World!"))
+ testing.expect_value(t, uerr, nil)
+ testing.expect_value(t, dest, My_Distinct("Hello, World!"))
+ if str, ok := dest.(My_Distinct); ok {
+ delete(string(str))
+ }
}
My_Union_No_Nil :: union #no_nil {
@@ -450,17 +357,21 @@ test_marshalling_union :: proc(t: ^testing.T) {
{
test: My_Union_No_Nil = My_Struct{.Two}
data, err := cbor.marshal(test)
- expect_value(t, err, nil)
+ defer delete(data)
+ testing.expect_value(t, err, nil)
val, derr := cbor.decode(string(data))
- expect_value(t, derr, nil)
+ defer cbor.destroy(val)
+ testing.expect_value(t, derr, nil)
- expect_value(t, cbor.to_diagnostic_format(val, -1), `1010(["My_Struct", {"my_enum": 1}])`)
+ diag := cbor.to_diagnostic_format(val, -1)
+ defer delete(diag)
+ testing.expect_value(t, diag, `1010(["My_Struct", {"my_enum": 1}])`)
dest: My_Union_No_Nil
uerr := cbor.unmarshal(string(data), &dest)
- expect_value(t, uerr, nil)
- expect_value(t, dest, My_Struct{.Two})
+ testing.expect_value(t, uerr, nil)
+ testing.expect_value(t, dest, My_Struct{.Two})
}
}
@@ -469,7 +380,7 @@ test_lying_length_array :: proc(t: ^testing.T) {
// Input says this is an array of length max(u64), this should not allocate that amount.
input := []byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}
_, err := cbor.decode(string(input))
- expect_value(t, err, io.Error.Unexpected_EOF) // .Out_Of_Memory would be bad.
+ testing.expect_value(t, err, io.Error.Unexpected_EOF) // .Out_Of_Memory would be bad.
}
@(test)
@@ -691,65 +602,73 @@ test_encode_lists :: proc(t: ^testing.T) {
expect_streamed_encoding(t, "\x9f\xff", &cbor.Array{})
{
- bytes.buffer_reset(&buf)
+ buf: bytes.Buffer
+ bytes.buffer_init_allocator(&buf, 0, 0)
+ defer bytes.buffer_destroy(&buf)
+ stream := bytes.buffer_to_stream(&buf)
+ encoder := cbor.Encoder{cbor.ENCODE_FULLY_DETERMINISTIC, stream, {}}
err: cbor.Encode_Error
err = cbor.encode_stream_begin(stream, .Array)
- expect_value(t, err, nil)
+ testing.expect_value(t, err, nil)
{
err = cbor.encode_stream_array_item(encoder, u8(1))
- expect_value(t, err, nil)
+ testing.expect_value(t, err, nil)
err = cbor.encode_stream_array_item(encoder, &cbor.Array{u8(2), u8(3)})
- expect_value(t, err, nil)
+ testing.expect_value(t, err, nil)
err = cbor.encode_stream_begin(stream, .Array)
- expect_value(t, err, nil)
+ testing.expect_value(t, err, nil)
{
err = cbor.encode_stream_array_item(encoder, u8(4))
- expect_value(t, err, nil)
+ testing.expect_value(t, err, nil)
err = cbor.encode_stream_array_item(encoder, u8(5))
- expect_value(t, err, nil)
+ testing.expect_value(t, err, nil)
}
err = cbor.encode_stream_end(stream)
- expect_value(t, err, nil)
+ testing.expect_value(t, err, nil)
}
err = cbor.encode_stream_end(stream)
- expect_value(t, err, nil)
+ testing.expect_value(t, err, nil)
- expect_value(t, fmt.tprint(bytes.buffer_to_bytes(&buf)), fmt.tprint(transmute([]byte)string("\x9f\x01\x82\x02\x03\x9f\x04\x05\xff\xff")))
+ testing.expect_value(t, fmt.tprint(bytes.buffer_to_bytes(&buf)), fmt.tprint(transmute([]byte)string("\x9f\x01\x82\x02\x03\x9f\x04\x05\xff\xff")))
}
{
- bytes.buffer_reset(&buf)
+ buf: bytes.Buffer
+ bytes.buffer_init_allocator(&buf, 0, 0)
+ defer bytes.buffer_destroy(&buf)
+ stream := bytes.buffer_to_stream(&buf)
+ encoder := cbor.Encoder{cbor.ENCODE_FULLY_DETERMINISTIC, stream, {}}
err: cbor.Encode_Error
err = cbor._encode_u8(stream, 2, .Array)
- expect_value(t, err, nil)
+ testing.expect_value(t, err, nil)
a := "a"
err = cbor.encode(encoder, &a)
- expect_value(t, err, nil)
+ testing.expect_value(t, err, nil)
{
err = cbor.encode_stream_begin(stream, .Map)
- expect_value(t, err, nil)
+ testing.expect_value(t, err, nil)
b := "b"
c := "c"
err = cbor.encode_stream_map_entry(encoder, &b, &c)
- expect_value(t, err, nil)
+ testing.expect_value(t, err, nil)
err = cbor.encode_stream_end(stream)
- expect_value(t, err, nil)
+ testing.expect_value(t, err, nil)
}
- expect_value(t, fmt.tprint(bytes.buffer_to_bytes(&buf)), fmt.tprint(transmute([]byte)string("\x82\x61\x61\xbf\x61\x62\x61\x63\xff")))
+ testing.expect_value(t, fmt.tprint(bytes.buffer_to_bytes(&buf)), fmt.tprint(transmute([]byte)string("\x82\x61\x61\xbf\x61\x62\x61\x63\xff")))
}
}
@@ -807,30 +726,30 @@ expect_decoding :: proc(t: ^testing.T, encoded: string, decoded: string, type: t
res, err := cbor.decode(encoded)
defer cbor.destroy(res)
- expect_value(t, reflect.union_variant_typeid(res), type, loc)
- expect_value(t, err, nil, loc)
+ testing.expect_value(t, reflect.union_variant_typeid(res), type, loc)
+ testing.expect_value(t, err, nil, loc)
str := cbor.to_diagnostic_format(res, padding=-1)
defer delete(str)
- expect_value(t, str, decoded, loc)
+ testing.expect_value(t, str, decoded, loc)
}
expect_tag :: proc(t: ^testing.T, encoded: string, nr: cbor.Tag_Number, value_decoded: string, loc := #caller_location) {
res, err := cbor.decode(encoded)
defer cbor.destroy(res)
- expect_value(t, err, nil, loc)
+ testing.expect_value(t, err, nil, loc)
if tag, is_tag := res.(^cbor.Tag); is_tag {
- expect_value(t, tag.number, nr, loc)
+ testing.expect_value(t, tag.number, nr, loc)
str := cbor.to_diagnostic_format(tag, padding=-1)
defer delete(str)
- expect_value(t, str, value_decoded, loc)
+ testing.expect_value(t, str, value_decoded, loc)
} else {
- errorf(t, "Value %#v is not a tag", res, loc)
+ testing.expectf(t, false, "Value %#v is not a tag", res, loc)
}
}
@@ -838,35 +757,39 @@ expect_float :: proc(t: ^testing.T, encoded: string, expected: $T, loc := #calle
res, err := cbor.decode(encoded)
defer cbor.destroy(res)
- expect_value(t, reflect.union_variant_typeid(res), typeid_of(T), loc)
- expect_value(t, err, nil, loc)
+ testing.expect_value(t, reflect.union_variant_typeid(res), typeid_of(T), loc)
+ testing.expect_value(t, err, nil, loc)
#partial switch r in res {
case f16:
- when T == f16 { expect_value(t, res, expected, loc) } else { unreachable() }
+ when T == f16 { testing.expect_value(t, res, expected, loc) } else { unreachable() }
case f32:
- when T == f32 { expect_value(t, res, expected, loc) } else { unreachable() }
+ when T == f32 { testing.expect_value(t, res, expected, loc) } else { unreachable() }
case f64:
- when T == f64 { expect_value(t, res, expected, loc) } else { unreachable() }
+ when T == f64 { testing.expect_value(t, res, expected, loc) } else { unreachable() }
case:
unreachable()
}
}
-buf: bytes.Buffer
-stream := bytes.buffer_to_stream(&buf)
-encoder := cbor.Encoder{cbor.ENCODE_FULLY_DETERMINISTIC, stream, {}}
-
expect_encoding :: proc(t: ^testing.T, val: cbor.Value, encoded: string, loc := #caller_location) {
- bytes.buffer_reset(&buf)
+ buf: bytes.Buffer
+ bytes.buffer_init_allocator(&buf, 0, 0)
+ defer bytes.buffer_destroy(&buf)
+ stream := bytes.buffer_to_stream(&buf)
+ encoder := cbor.Encoder{cbor.ENCODE_FULLY_DETERMINISTIC, stream, {}}
- err := cbor.encode(encoder, val)
- expect_value(t, err, nil, loc)
- expect_value(t, fmt.tprint(bytes.buffer_to_bytes(&buf)), fmt.tprint(transmute([]byte)encoded), loc)
+ err := cbor.encode(encoder, val, loc)
+ testing.expect_value(t, err, nil, loc)
+ testing.expect_value(t, fmt.tprint(bytes.buffer_to_bytes(&buf)), fmt.tprint(transmute([]byte)encoded), loc)
}
expect_streamed_encoding :: proc(t: ^testing.T, encoded: string, values: ..cbor.Value, loc := #caller_location) {
- bytes.buffer_reset(&buf)
+ buf: bytes.Buffer
+ bytes.buffer_init_allocator(&buf, 0, 0)
+ defer bytes.buffer_destroy(&buf)
+ stream := bytes.buffer_to_stream(&buf)
+ encoder := cbor.Encoder{cbor.ENCODE_FULLY_DETERMINISTIC, stream, {}}
for value, i in values {
err: cbor.Encode_Error
@@ -891,15 +814,15 @@ expect_streamed_encoding :: proc(t: ^testing.T, encoded: string, values: ..cbor.
if err2 != nil { break }
}
case:
- errorf(t, "%v does not support streamed encoding", reflect.union_variant_typeid(value))
+ testing.expectf(t, false, "%v does not support streamed encoding", reflect.union_variant_typeid(value))
}
- expect_value(t, err, nil, loc)
- expect_value(t, err2, nil, loc)
+ testing.expect_value(t, err, nil, loc)
+ testing.expect_value(t, err2, nil, loc)
}
err := cbor.encode_stream_end(stream)
- expect_value(t, err, nil, loc)
+ testing.expect_value(t, err, nil, loc)
- expect_value(t, fmt.tprint(bytes.buffer_to_bytes(&buf)), fmt.tprint(transmute([]byte)encoded), loc)
+ testing.expect_value(t, fmt.tprint(bytes.buffer_to_bytes(&buf)), fmt.tprint(transmute([]byte)encoded), loc)
}
diff --git a/tests/core/encoding/hex/test_core_hex.odin b/tests/core/encoding/hex/test_core_hex.odin
index d928cd28e..6a00c9705 100644
--- a/tests/core/encoding/hex/test_core_hex.odin
+++ b/tests/core/encoding/hex/test_core_hex.odin
@@ -2,42 +2,6 @@ package test_core_hex
import "core:encoding/hex"
import "core:testing"
-import "core:fmt"
-import "core:os"
-
-TEST_count := 0
-TEST_fail := 0
-
-when ODIN_TEST {
- expect :: testing.expect
- log :: testing.log
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v] %v\n", loc, message)
- return
- }
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-}
-
-main :: proc() {
- t := testing.T{}
-
- hex_encode(&t)
- hex_decode(&t)
- hex_decode_sequence(&t)
-
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
-}
CASES :: [][2]string{
{"11", "3131"},
@@ -49,10 +13,14 @@ CASES :: [][2]string{
hex_encode :: proc(t: ^testing.T) {
for test in CASES {
encoded := string(hex.encode(transmute([]byte)test[0]))
- expect(
+ defer delete(encoded)
+ testing.expectf(
t,
encoded == test[1],
- fmt.tprintf("encode: %q -> %q (should be: %q)", test[0], encoded, test[1]),
+ "encode: %q -> %q (should be: %q)",
+ test[0],
+ encoded,
+ test[1],
)
}
}
@@ -61,11 +29,20 @@ hex_encode :: proc(t: ^testing.T) {
hex_decode :: proc(t: ^testing.T) {
for test in CASES {
decoded, ok := hex.decode(transmute([]byte)test[1])
- expect(t, ok, fmt.tprintf("decode: %q not ok", test[1]))
- expect(
+ defer delete(decoded)
+ testing.expectf(
+ t,
+ ok,
+ "decode: %q not ok",
+ test[1],
+ )
+ testing.expectf(
t,
string(decoded) == test[0],
- fmt.tprintf("decode: %q -> %q (should be: %q)", test[1], string(decoded), test[0]),
+ "decode: %q -> %q (should be: %q)",
+ test[1],
+ string(decoded),
+ test[0],
)
}
}
@@ -73,20 +50,37 @@ hex_decode :: proc(t: ^testing.T) {
@(test)
hex_decode_sequence :: proc(t: ^testing.T) {
b, ok := hex.decode_sequence("0x23")
- expect(t, ok, "decode_sequence: 0x23 not ok")
- expect(t, b == '#', fmt.tprintf("decode_sequence: 0x23 -> %c (should be: %c)", b, '#'))
+ testing.expect(t, ok, "decode_sequence: 0x23 not ok")
+ testing.expectf(
+ t,
+ b == '#',
+ "decode_sequence: 0x23 -> %c (should be: %c)",
+ b,
+ '#',
+ )
b, ok = hex.decode_sequence("0X3F")
- expect(t, ok, "decode_sequence: 0X3F not ok")
- expect(t, b == '?', fmt.tprintf("decode_sequence: 0X3F -> %c (should be: %c)", b, '?'))
+ testing.expect(t, ok, "decode_sequence: 0X3F not ok")
+ testing.expectf(
+ t,
+ b == '?',
+ "decode_sequence: 0X3F -> %c (should be: %c)",
+ b,
+ '?',
+ )
b, ok = hex.decode_sequence("2a")
- expect(t, ok, "decode_sequence: 2a not ok")
- expect(t, b == '*', fmt.tprintf("decode_sequence: 2a -> %c (should be: %c)", b, '*'))
+ testing.expect(t, ok, "decode_sequence: 2a not ok")
+ testing.expectf(t,
+ b == '*',
+ "decode_sequence: 2a -> %c (should be: %c)",
+ b,
+ '*',
+ )
_, ok = hex.decode_sequence("1")
- expect(t, !ok, "decode_sequence: 1 should be too short")
+ testing.expect(t, !ok, "decode_sequence: 1 should be too short")
_, ok = hex.decode_sequence("123")
- expect(t, !ok, "decode_sequence: 123 should be too long")
-}
+ testing.expect(t, !ok, "decode_sequence: 123 should be too long")
+} \ No newline at end of file
diff --git a/tests/core/encoding/hxa/test_core_hxa.odin b/tests/core/encoding/hxa/test_core_hxa.odin
index 40c3c2e23..31d40c8b3 100644
--- a/tests/core/encoding/hxa/test_core_hxa.odin
+++ b/tests/core/encoding/hxa/test_core_hxa.odin
@@ -6,127 +6,99 @@ package test_core_hxa
import "core:encoding/hxa"
import "core:fmt"
import "core:testing"
-import tc "tests:common"
-TEAPOT_PATH :: "core/assets/HXA/teapot.hxa"
+TEAPOT_PATH :: ODIN_ROOT + "tests/core/assets/HXA/teapot.hxa"
-main :: proc() {
- t := testing.T{}
-
- test_read(&t)
- test_write(&t)
-
- tc.report(&t)
-}
+import "core:os"
@test
test_read :: proc(t: ^testing.T) {
- filename := tc.get_data_path(t, TEAPOT_PATH)
- defer delete(filename)
+ data, _ := os.read_entire_file(TEAPOT_PATH)
+ // file, err := hxa.read_from_file(TEAPOT_PATH)
+ file, err := hxa.read(data)
+ file.backing = data
+ file.allocator = context.allocator
+ hxa.file_destroy(file)
+ // fmt.printfln("%#v", file)
- file, err := hxa.read_from_file(filename)
e :: hxa.Read_Error.None
- tc.expect(t, err == e, fmt.tprintf("%v: read_from_file(%v) -> %v != %v", #procedure, filename, err, e))
- defer hxa.file_destroy(file)
+ testing.expectf(t, err == e, "read_from_file(%v) -> %v != %v", TEAPOT_PATH, err, e)
/* Header */
- tc.expect(t, file.magic_number == 0x417848, fmt.tprintf("%v: file.magic_number %v != %v",
- #procedure, file.magic_number, 0x417848))
- tc.expect(t, file.version == 1, fmt.tprintf("%v: file.version %v != %v",
- #procedure, file.version, 1))
- tc.expect(t, file.internal_node_count == 1, fmt.tprintf("%v: file.internal_node_count %v != %v",
- #procedure, file.internal_node_count, 1))
+ testing.expectf(t, file.magic_number == 0x417848, "file.magic_number %v != %v", file.magic_number, 0x417848)
+ testing.expectf(t, file.version == 1, "file.version %v != %v", file.version, 1)
+ testing.expectf(t, file.internal_node_count == 1, "file.internal_node_count %v != %v", file.internal_node_count, 1)
/* Nodes (only one) */
- tc.expect(t, len(file.nodes) == 1, fmt.tprintf("%v: len(file.nodes) %v != %v", #procedure, len(file.nodes), 1))
+ testing.expectf(t, len(file.nodes) == 1, "len(file.nodes) %v != %v", len(file.nodes), 1)
m := &file.nodes[0].meta_data
- tc.expect(t, len(m^) == 38, fmt.tprintf("%v: len(m^) %v != %v", #procedure, len(m^), 38))
+ testing.expectf(t, len(m^) == 38, "len(m^) %v != %v", len(m^), 38)
{
e :: "Texture resolution"
- tc.expect(t, m[0].name == e, fmt.tprintf("%v: m[0].name %v != %v", #procedure, m[0].name, e))
+ testing.expectf(t, m[0].name == e, "m[0].name %v != %v", m[0].name, e)
m_v, m_v_ok := m[0].value.([]i64le)
- tc.expect(t, m_v_ok, fmt.tprintf("%v: m_v_ok %v != %v", #procedure, m_v_ok, true))
- tc.expect(t, len(m_v) == 1, fmt.tprintf("%v: len(m_v) %v != %v", #procedure, len(m_v), 1))
- tc.expect(t, m_v[0] == 1024, fmt.tprintf("%v: m_v[0] %v != %v", #procedure, len(m_v), 1024))
+ testing.expectf(t, m_v_ok, "m_v_ok %v != %v", m_v_ok, true)
+ testing.expectf(t, len(m_v) == 1, "len(m_v) %v != %v", len(m_v), 1)
+ testing.expectf(t, m_v[0] == 1024, "m_v[0] %v != %v", len(m_v), 1024)
}
{
e :: "Validate"
- tc.expect(t, m[37].name == e, fmt.tprintf("%v: m[37].name %v != %v", #procedure, m[37].name, e))
+ testing.expectf(t, m[37].name == e, "m[37].name %v != %v", m[37].name, e)
m_v, m_v_ok := m[37].value.([]i64le)
- tc.expect(t, m_v_ok, fmt.tprintf("%v: m_v_ok %v != %v", #procedure, m_v_ok, true))
- tc.expect(t, len(m_v) == 1, fmt.tprintf("%v: len(m_v) %v != %v", #procedure, len(m_v), 1))
- tc.expect(t, m_v[0] == -2054847231, fmt.tprintf("%v: m_v[0] %v != %v", #procedure, len(m_v), -2054847231))
+ testing.expectf(t, m_v_ok, "m_v_ok %v != %v", m_v_ok, true)
+ testing.expectf(t, len(m_v) == 1, "len(m_v) %v != %v", len(m_v), 1)
+ testing.expectf(t, m_v[0] == -2054847231, "m_v[0] %v != %v", len(m_v), -2054847231)
}
/* Node content */
v, v_ok := file.nodes[0].content.(hxa.Node_Geometry)
- tc.expect(t, v_ok, fmt.tprintf("%v: v_ok %v != %v", #procedure, v_ok, true))
+ testing.expectf(t, v_ok, "v_ok %v != %v", v_ok, true)
- tc.expect(t, v.vertex_count == 530, fmt.tprintf("%v: v.vertex_count %v != %v", #procedure, v.vertex_count, 530))
- tc.expect(t, v.edge_corner_count == 2026, fmt.tprintf("%v: v.edge_corner_count %v != %v",
- #procedure, v.edge_corner_count, 2026))
- tc.expect(t, v.face_count == 517, fmt.tprintf("%v: v.face_count %v != %v", #procedure, v.face_count, 517))
+ testing.expectf(t, v.vertex_count == 530, "v.vertex_count %v != %v", v.vertex_count, 530)
+ testing.expectf(t, v.edge_corner_count == 2026, "v.edge_corner_count %v != %v", v.edge_corner_count, 2026)
+ testing.expectf(t, v.face_count == 517, "v.face_count %v != %v", v.face_count, 517)
/* Vertex stack */
- tc.expect(t, len(v.vertex_stack) == 1, fmt.tprintf("%v: len(v.vertex_stack) %v != %v",
- #procedure, len(v.vertex_stack), 1))
+ testing.expectf(t, len(v.vertex_stack) == 1, "len(v.vertex_stack) %v != %v", len(v.vertex_stack), 1)
{
e := "vertex"
- tc.expect(t, v.vertex_stack[0].name == e, fmt.tprintf("%v: v.vertex_stack[0].name %v != %v",
- #procedure, v.vertex_stack[0].name, e))
+ testing.expectf(t, v.vertex_stack[0].name == e, "v.vertex_stack[0].name %v != %v", v.vertex_stack[0].name, e)
}
- tc.expect(t, v.vertex_stack[0].components == 3, fmt.tprintf("%v: v.vertex_stack[0].components %v != %v",
- #procedure, v.vertex_stack[0].components, 3))
+ testing.expectf(t, v.vertex_stack[0].components == 3, "v.vertex_stack[0].components %v != %v", v.vertex_stack[0].components, 3)
/* Vertex stack data */
vs_d, vs_d_ok := v.vertex_stack[0].data.([]f64le)
- tc.expect(t, vs_d_ok, fmt.tprintf("%v: vs_d_ok %v != %v", #procedure, vs_d_ok, true))
- tc.expect(t, len(vs_d) == 1590, fmt.tprintf("%v: len(vs_d) %v != %v", #procedure, len(vs_d), 1590))
-
- tc.expect(t, vs_d[0] == 4.06266, fmt.tprintf("%v: vs_d[0] %v (%h) != %v (%h)",
- #procedure, vs_d[0], vs_d[0], 4.06266, 4.06266))
- tc.expect(t, vs_d[1] == 2.83457, fmt.tprintf("%v: vs_d[1] %v (%h) != %v (%h)",
- #procedure, vs_d[1], vs_d[1], 2.83457, 2.83457))
- tc.expect(t, vs_d[2] == 0hbfbc5da6a4441787, fmt.tprintf("%v: vs_d[2] %v (%h) != %v (%h)",
- #procedure, vs_d[2], vs_d[2],
- 0hbfbc5da6a4441787, 0hbfbc5da6a4441787))
- tc.expect(t, vs_d[3] == 0h4010074fb549f948, fmt.tprintf("%v: vs_d[3] %v (%h) != %v (%h)",
- #procedure, vs_d[3], vs_d[3],
- 0h4010074fb549f948, 0h4010074fb549f948))
- tc.expect(t, vs_d[1587] == 0h400befa82e87d2c7, fmt.tprintf("%v: vs_d[1587] %v (%h) != %v (%h)",
- #procedure, vs_d[1587], vs_d[1587],
- 0h400befa82e87d2c7, 0h400befa82e87d2c7))
- tc.expect(t, vs_d[1588] == 2.83457, fmt.tprintf("%v: vs_d[1588] %v (%h) != %v (%h)",
- #procedure, vs_d[1588], vs_d[1588], 2.83457, 2.83457))
- tc.expect(t, vs_d[1589] == -1.56121, fmt.tprintf("%v: vs_d[1589] %v (%h) != %v (%h)",
- #procedure, vs_d[1589], vs_d[1589], -1.56121, -1.56121))
+ testing.expectf(t, vs_d_ok, "vs_d_ok %v != %v", vs_d_ok, true)
+ testing.expectf(t, len(vs_d) == 1590, "len(vs_d) %v != %v", len(vs_d), 1590)
+ testing.expectf(t, vs_d[0] == 4.06266, "vs_d[0] %v (%h) != %v (%h)", vs_d[0], vs_d[0], 4.06266, 4.06266)
+ testing.expectf(t, vs_d[1] == 2.83457, "vs_d[1] %v (%h) != %v (%h)", vs_d[1], vs_d[1], 2.83457, 2.83457)
+ testing.expectf(t, vs_d[2] == 0hbfbc5da6a4441787, "vs_d[2] %v (%h) != %v (%h)", vs_d[2], vs_d[2], 0hbfbc5da6a4441787, 0hbfbc5da6a4441787)
+ testing.expectf(t, vs_d[3] == 0h4010074fb549f948, "vs_d[3] %v (%h) != %v (%h)", vs_d[3], vs_d[3], 0h4010074fb549f948, 0h4010074fb549f948)
+ testing.expectf(t, vs_d[1587] == 0h400befa82e87d2c7, "vs_d[1587] %v (%h) != %v (%h)", vs_d[1587], vs_d[1587], 0h400befa82e87d2c7, 0h400befa82e87d2c7)
+ testing.expectf(t, vs_d[1588] == 2.83457, "vs_d[1588] %v (%h) != %v (%h)", vs_d[1588], vs_d[1588], 2.83457, 2.83457)
+ testing.expectf(t, vs_d[1589] == -1.56121, "vs_d[1589] %v (%h) != %v (%h)", vs_d[1589], vs_d[1589], -1.56121, -1.56121)
/* Corner stack */
- tc.expect(t, len(v.corner_stack) == 1,
- fmt.tprintf("%v: len(v.corner_stack) %v != %v", #procedure, len(v.corner_stack), 1))
+ testing.expectf(t, len(v.corner_stack) == 1, "len(v.corner_stack) %v != %v", len(v.corner_stack), 1)
{
e := "reference"
- tc.expect(t, v.corner_stack[0].name == e, fmt.tprintf("%v: v.corner_stack[0].name %v != %v",
- #procedure, v.corner_stack[0].name, e))
+ testing.expectf(t, v.corner_stack[0].name == e, "v.corner_stack[0].name %v != %v", v.corner_stack[0].name, e)
}
- tc.expect(t, v.corner_stack[0].components == 1, fmt.tprintf("%v: v.corner_stack[0].components %v != %v",
- #procedure, v.corner_stack[0].components, 1))
+ testing.expectf(t, v.corner_stack[0].components == 1, "v.corner_stack[0].components %v != %v", v.corner_stack[0].components, 1)
/* Corner stack data */
cs_d, cs_d_ok := v.corner_stack[0].data.([]i32le)
- tc.expect(t, cs_d_ok, fmt.tprintf("%v: cs_d_ok %v != %v", #procedure, cs_d_ok, true))
- tc.expect(t, len(cs_d) == 2026, fmt.tprintf("%v: len(cs_d) %v != %v", #procedure, len(cs_d), 2026))
- tc.expect(t, cs_d[0] == 6, fmt.tprintf("%v: cs_d[0] %v != %v", #procedure, cs_d[0], 6))
- tc.expect(t, cs_d[2025] == -32, fmt.tprintf("%v: cs_d[2025] %v != %v", #procedure, cs_d[2025], -32))
+ testing.expectf(t, cs_d_ok, "cs_d_ok %v != %v", cs_d_ok, true)
+ testing.expectf(t, len(cs_d) == 2026, "len(cs_d) %v != %v", len(cs_d), 2026)
+ testing.expectf(t, cs_d[0] == 6, "cs_d[0] %v != %v", cs_d[0], 6)
+ testing.expectf(t, cs_d[2025] == -32, "cs_d[2025] %v != %v", cs_d[2025], -32)
/* Edge and face stacks (empty) */
- tc.expect(t, len(v.edge_stack) == 0, fmt.tprintf("%v: len(v.edge_stack) %v != %v",
- #procedure, len(v.edge_stack), 0))
- tc.expect(t, len(v.face_stack) == 0, fmt.tprintf("%v: len(v.face_stack) %v != %v",
- #procedure, len(v.face_stack), 0))
+ testing.expectf(t, len(v.edge_stack) == 0, "len(v.edge_stack) %v != %v", len(v.edge_stack), 0)
+ testing.expectf(t, len(v.face_stack) == 0, "len(v.face_stack) %v != %v", len(v.face_stack), 0)
}
@test
@@ -154,72 +126,72 @@ test_write :: proc(t: ^testing.T) {
n, write_err := hxa.write(buf, w_file)
write_e :: hxa.Write_Error.None
- tc.expect(t, write_err == write_e, fmt.tprintf("%v: write_err %v != %v", #procedure, write_err, write_e))
- tc.expect(t, n == required_size, fmt.tprintf("%v: n %v != %v", #procedure, n, required_size))
+ testing.expectf(t, write_err == write_e, fmt.tprintf("write_err %v != %v", write_err, write_e))
+ testing.expectf(t, n == required_size, fmt.tprintf("n %v != %v", n, required_size))
file, read_err := hxa.read(buf)
read_e :: hxa.Read_Error.None
- tc.expect(t, read_err == read_e, fmt.tprintf("%v: read_err %v != %v", #procedure, read_err, read_e))
+ testing.expectf(t, read_err == read_e, fmt.tprintf("read_err %v != %v", read_err, read_e))
defer hxa.file_destroy(file)
- tc.expect(t, file.magic_number == 0x417848, fmt.tprintf("%v: file.magic_number %v != %v",
- #procedure, file.magic_number, 0x417848))
- tc.expect(t, file.version == 3, fmt.tprintf("%v: file.version %v != %v", #procedure, file.version, 3))
- tc.expect(t, file.internal_node_count == 1, fmt.tprintf("%v: file.internal_node_count %v != %v",
- #procedure, file.internal_node_count, 1))
+ testing.expectf(t, file.magic_number == 0x417848, fmt.tprintf("file.magic_number %v != %v",
+ file.magic_number, 0x417848))
+ testing.expectf(t, file.version == 3, fmt.tprintf("file.version %v != %v", file.version, 3))
+ testing.expectf(t, file.internal_node_count == 1, fmt.tprintf("file.internal_node_count %v != %v",
+ file.internal_node_count, 1))
- tc.expect(t, len(file.nodes) == len(w_file.nodes), fmt.tprintf("%v: len(file.nodes) %v != %v",
- #procedure, len(file.nodes), len(w_file.nodes)))
+ testing.expectf(t, len(file.nodes) == len(w_file.nodes), fmt.tprintf("len(file.nodes) %v != %v",
+ len(file.nodes), len(w_file.nodes)))
m := &file.nodes[0].meta_data
w_m := &w_file.nodes[0].meta_data
- tc.expect(t, len(m^) == len(w_m^), fmt.tprintf("%v: len(m^) %v != %v", #procedure, len(m^), len(w_m^)))
- tc.expect(t, m[0].name == w_m[0].name, fmt.tprintf("%v: m[0].name %v != %v", #procedure, m[0].name, w_m[0].name))
+ testing.expectf(t, len(m^) == len(w_m^), fmt.tprintf("len(m^) %v != %v", len(m^), len(w_m^)))
+ testing.expectf(t, m[0].name == w_m[0].name, fmt.tprintf("m[0].name %v != %v", m[0].name, w_m[0].name))
m_v, m_v_ok := m[0].value.([]f64le)
- tc.expect(t, m_v_ok, fmt.tprintf("%v: m_v_ok %v != %v", #procedure, m_v_ok, true))
- tc.expect(t, len(m_v) == len(n1_m1_value), fmt.tprintf("%v: %v != len(m_v) %v",
- #procedure, len(m_v), len(n1_m1_value)))
+ testing.expectf(t, m_v_ok, fmt.tprintf("m_v_ok %v != %v", m_v_ok, true))
+ testing.expectf(t, len(m_v) == len(n1_m1_value), fmt.tprintf("%v != len(m_v) %v",
+ len(m_v), len(n1_m1_value)))
for i := 0; i < len(m_v); i += 1 {
- tc.expect(t, m_v[i] == n1_m1_value[i], fmt.tprintf("%v: m_v[%d] %v != %v",
- #procedure, i, m_v[i], n1_m1_value[i]))
+ testing.expectf(t, m_v[i] == n1_m1_value[i], fmt.tprintf("m_v[%d] %v != %v",
+ i, m_v[i], n1_m1_value[i]))
}
v, v_ok := file.nodes[0].content.(hxa.Node_Image)
- tc.expect(t, v_ok, fmt.tprintf("%v: v_ok %v != %v", #procedure, v_ok, true))
- tc.expect(t, v.type == n1_content.type, fmt.tprintf("%v: v.type %v != %v", #procedure, v.type, n1_content.type))
- tc.expect(t, len(v.resolution) == 3, fmt.tprintf("%v: len(v.resolution) %v != %v",
- #procedure, len(v.resolution), 3))
- tc.expect(t, len(v.image_stack) == len(n1_content.image_stack), fmt.tprintf("%v: len(v.image_stack) %v != %v",
- #procedure, len(v.image_stack), len(n1_content.image_stack)))
+ testing.expectf(t, v_ok, fmt.tprintf("v_ok %v != %v", v_ok, true))
+ testing.expectf(t, v.type == n1_content.type, fmt.tprintf("v.type %v != %v", v.type, n1_content.type))
+ testing.expectf(t, len(v.resolution) == 3, fmt.tprintf("len(v.resolution) %v != %v",
+ len(v.resolution), 3))
+ testing.expectf(t, len(v.image_stack) == len(n1_content.image_stack), fmt.tprintf("len(v.image_stack) %v != %v",
+ len(v.image_stack), len(n1_content.image_stack)))
for i := 0; i < len(v.image_stack); i += 1 {
- tc.expect(t, v.image_stack[i].name == n1_content.image_stack[i].name,
- fmt.tprintf("%v: v.image_stack[%d].name %v != %v",
- #procedure, i, v.image_stack[i].name, n1_content.image_stack[i].name))
- tc.expect(t, v.image_stack[i].components == n1_content.image_stack[i].components,
- fmt.tprintf("%v: v.image_stack[%d].components %v != %v",
- #procedure, i, v.image_stack[i].components, n1_content.image_stack[i].components))
+ testing.expectf(t, v.image_stack[i].name == n1_content.image_stack[i].name,
+ fmt.tprintf("v.image_stack[%d].name %v != %v",
+ i, v.image_stack[i].name, n1_content.image_stack[i].name))
+ testing.expectf(t, v.image_stack[i].components == n1_content.image_stack[i].components,
+ fmt.tprintf("v.image_stack[%d].components %v != %v",
+ i, v.image_stack[i].components, n1_content.image_stack[i].components))
switch n1_t in n1_content.image_stack[i].data {
case []u8:
- tc.expect(t, false, fmt.tprintf("%v: n1_content.image_stack[i].data []u8", #procedure))
+ testing.expectf(t, false, fmt.tprintf("n1_content.image_stack[i].data []u8", #procedure))
case []i32le:
- tc.expect(t, false, fmt.tprintf("%v: n1_content.image_stack[i].data []i32le", #procedure))
+ testing.expectf(t, false, fmt.tprintf("n1_content.image_stack[i].data []i32le", #procedure))
case []f32le:
l, l_ok := v.image_stack[i].data.([]f32le)
- tc.expect(t, l_ok, fmt.tprintf("%v: l_ok %v != %v", #procedure, l_ok, true))
- tc.expect(t, len(l) == len(n1_t), fmt.tprintf("%v: len(l) %v != %v", #procedure, len(l), len(n1_t)))
+ testing.expectf(t, l_ok, fmt.tprintf("l_ok %v != %v", l_ok, true))
+ testing.expectf(t, len(l) == len(n1_t), fmt.tprintf("len(l) %v != %v", len(l), len(n1_t)))
for j := 0; j < len(l); j += 1 {
- tc.expect(t, l[j] == n1_t[j], fmt.tprintf("%v: l[%d] %v (%h) != %v (%h)",
- #procedure, j, l[j], l[j], n1_t[j], n1_t[j]))
+ testing.expectf(t, l[j] == n1_t[j], fmt.tprintf("l[%d] %v (%h) != %v (%h)",
+ j, l[j], l[j], n1_t[j], n1_t[j]))
}
case []f64le:
l, l_ok := v.image_stack[i].data.([]f64le)
- tc.expect(t, l_ok, fmt.tprintf("%v: l_ok %v != %v", #procedure, l_ok, true))
- tc.expect(t, len(l) == len(n1_t), fmt.tprintf("%v: len(l) %v != %v", #procedure, len(l), len(n1_t)))
+ testing.expectf(t, l_ok, fmt.tprintf("l_ok %v != %v", l_ok, true))
+ testing.expectf(t, len(l) == len(n1_t), fmt.tprintf("len(l) %v != %v", len(l), len(n1_t)))
for j := 0; j < len(l); j += 1 {
- tc.expect(t, l[j] == n1_t[j], fmt.tprintf("%v: l[%d] %v != %v", #procedure, j, l[j], n1_t[j]))
+ testing.expectf(t, l[j] == n1_t[j], fmt.tprintf("l[%d] %v != %v", j, l[j], n1_t[j]))
}
}
}
-}
+} \ No newline at end of file
diff --git a/tests/core/encoding/json/test_core_json.odin b/tests/core/encoding/json/test_core_json.odin
index 813d11b2c..92c050952 100644
--- a/tests/core/encoding/json/test_core_json.odin
+++ b/tests/core/encoding/json/test_core_json.odin
@@ -2,46 +2,8 @@ package test_core_json
import "core:encoding/json"
import "core:testing"
-import "core:fmt"
-import "core:os"
import "core:mem/virtual"
-TEST_count := 0
-TEST_fail := 0
-
-when ODIN_TEST {
- expect :: testing.expect
- log :: testing.log
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v] %v\n", loc, message)
- return
- }
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-}
-
-main :: proc() {
- t := testing.T{}
-
- parse_json(&t)
- marshal_json(&t)
- unmarshal_json(&t)
- surrogate(&t)
- utf8_string_of_multibyte_characters(&t)
-
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
-}
-
@test
parse_json :: proc(t: ^testing.T) {
@@ -72,10 +34,9 @@ parse_json :: proc(t: ^testing.T) {
}
`
- _, err := json.parse(transmute([]u8)json_data)
-
- msg := fmt.tprintf("Expected `json.parse` to return nil, got %v", err)
- expect(t, err == nil, msg)
+ val, err := json.parse(transmute([]u8)json_data)
+ json.destroy_value(val)
+ testing.expectf(t, err == nil, "Expected `json.parse` to return nil, got %v", err)
}
@test
@@ -83,7 +44,7 @@ out_of_memory_in_parse_json :: proc(t: ^testing.T) {
arena: virtual.Arena
arena_buffer: [256]byte
arena_init_error := virtual.arena_init_buffer(&arena, arena_buffer[:])
- testing.expect(t, arena_init_error == nil, fmt.tprintf("Expected arena initialization to not return error, got: %v\n", arena_init_error))
+ testing.expectf(t, arena_init_error == nil, "Expected arena initialization to not return error, got: %v\n", arena_init_error)
context.allocator = virtual.arena_allocator(&arena)
@@ -114,11 +75,11 @@ out_of_memory_in_parse_json :: proc(t: ^testing.T) {
}
`
- _, err := json.parse(transmute([]u8)json_data)
+ val, err := json.parse(transmute([]u8)json_data)
+ json.destroy_value(val)
expected_error := json.Error.Out_Of_Memory
- msg := fmt.tprintf("Expected `json.parse` to fail with %v, got %v", expected_error, err)
- expect(t, err == json.Error.Out_Of_Memory, msg)
+ testing.expectf(t, err == json.Error.Out_Of_Memory, "Expected `json.parse` to fail with %v, got %v", expected_error, err)
}
@test
@@ -134,9 +95,9 @@ marshal_json :: proc(t: ^testing.T) {
b = 5,
}
- _, err := json.marshal(my_struct)
- msg := fmt.tprintf("Expected `json.marshal` to return nil, got %v", err)
- expect(t, err == nil, msg)
+ data, err := json.marshal(my_struct)
+ defer delete(data)
+ testing.expectf(t, err == nil, "Expected `json.marshal` to return nil, got %v", err)
}
PRODUCTS := `
@@ -378,17 +339,12 @@ unmarshal_json :: proc(t: ^testing.T) {
err := json.unmarshal(transmute([]u8)PRODUCTS, &g, json.DEFAULT_SPECIFICATION)
defer cleanup(g)
- msg := fmt.tprintf("Expected `json.unmarshal` to return nil, got %v", err)
- expect(t, err == nil, msg)
-
- msg = fmt.tprintf("Expected %v products to have been unmarshaled, got %v", len(original_data.products), len(g.products))
- expect(t, len(g.products) == len(original_data.products), msg)
-
- msg = fmt.tprintf("Expected cash to have been unmarshaled as %v, got %v", original_data.cash, g.cash)
- expect(t, original_data.cash == g.cash, msg)
+ testing.expectf(t, err == nil, "Expected `json.unmarshal` to return nil, got %v", err)
+ testing.expectf(t, len(g.products) == len(original_data.products), "Expected %v products to have been unmarshaled, got %v", len(original_data.products), len(g.products))
+ testing.expectf(t, original_data.cash == g.cash, "Expected cash to have been unmarshaled as %v, got %v", original_data.cash, g.cash)
for p, i in g.products {
- expect(t, p == original_data.products[i], "Producted unmarshaled improperly")
+ testing.expect(t, p == original_data.products[i], "Producted unmarshaled improperly")
}
}
@@ -397,17 +353,19 @@ surrogate :: proc(t: ^testing.T) {
input := `+ + * 😃 - /`
out, err := json.marshal(input)
- expect(t, err == nil, fmt.tprintf("Expected `json.marshal(%q)` to return a nil error, got %v", input, err))
+ defer delete(out)
+ testing.expectf(t, err == nil, "Expected `json.marshal(%q)` to return a nil error, got %v", input, err)
back: string
uerr := json.unmarshal(out, &back)
- expect(t, uerr == nil, fmt.tprintf("Expected `json.unmarshal(%q)` to return a nil error, got %v", string(out), uerr))
- expect(t, back == input, fmt.tprintf("Expected `json.unmarshal(%q)` to return %q, got %v", string(out), input, uerr))
+ defer delete(back)
+ testing.expectf(t, uerr == nil, "Expected `json.unmarshal(%q)` to return a nil error, got %v", string(out), uerr)
+ testing.expectf(t, back == input, "Expected `json.unmarshal(%q)` to return %q, got %v", string(out), input, uerr)
}
@test
utf8_string_of_multibyte_characters :: proc(t: ^testing.T) {
- _, err := json.parse_string(`"🐛✅"`)
- msg := fmt.tprintf("Expected `json.parse` to return nil, got %v", err)
- expect(t, err == nil, msg)
-}
+ val, err := json.parse_string(`"🐛✅"`)
+ defer json.destroy_value(val)
+ testing.expectf(t, err == nil, "Expected `json.parse` to return nil, got %v", err)
+} \ No newline at end of file
diff --git a/tests/core/encoding/varint/test_core_varint.odin b/tests/core/encoding/varint/test_core_varint.odin
index ee1798aa7..5058f3022 100644
--- a/tests/core/encoding/varint/test_core_varint.odin
+++ b/tests/core/encoding/varint/test_core_varint.odin
@@ -2,110 +2,74 @@ package test_core_varint
import "core:encoding/varint"
import "core:testing"
-import "core:fmt"
-import "core:os"
import "core:slice"
import "core:math/rand"
-TEST_count := 0
-TEST_fail := 0
-
-RANDOM_TESTS :: 100
-
-when ODIN_TEST {
- expect :: testing.expect
- log :: testing.log
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v] %v\n", loc, message)
- return
- }
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-}
-
-main :: proc() {
- t := testing.T{}
-
- test_leb128(&t)
-
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
-}
+NUM_RANDOM_TESTS_PER_BYTE_SIZE :: 10_000
@(test)
-test_leb128 :: proc(t: ^testing.T) {
+test_uleb :: proc(t: ^testing.T) {
buf: [varint.LEB128_MAX_BYTES]u8
for vector in ULEB_Vectors {
val, size, err := varint.decode_uleb128(vector.encoded)
- msg := fmt.tprintf("Expected %02x to decode to %v consuming %v bytes, got %v and %v", vector.encoded, vector.value, vector.size, val, size)
- expect(t, size == vector.size && val == vector.value, msg)
-
- msg = fmt.tprintf("Expected decoder to return error %v, got %v for vector %v", vector.error, err, vector)
- expect(t, err == vector.error, msg)
+ testing.expectf(t, size == vector.size && val == vector.value, "Expected %02x to decode to %v consuming %v bytes, got %v and %v", vector.encoded, vector.value, vector.size, val, size)
+ testing.expectf(t, err == vector.error, "Expected decoder to return error %v, got %v for vector %v", vector.error, err, vector)
if err == .None { // Try to roundtrip
size, err = varint.encode_uleb128(buf[:], vector.value)
- msg = fmt.tprintf("Expected %v to encode to %02x, got %02x", vector.value, vector.encoded, buf[:size])
- expect(t, size == vector.size && slice.simple_equal(vector.encoded, buf[:size]), msg)
+ testing.expectf(t, size == vector.size && slice.simple_equal(vector.encoded, buf[:size]), "Expected %v to encode to %02x, got %02x", vector.value, vector.encoded, buf[:size])
}
}
+}
+
+@(test)
+test_ileb :: proc(t: ^testing.T) {
+ buf: [varint.LEB128_MAX_BYTES]u8
for vector in ILEB_Vectors {
val, size, err := varint.decode_ileb128(vector.encoded)
- msg := fmt.tprintf("Expected %02x to decode to %v consuming %v bytes, got %v and %v", vector.encoded, vector.value, vector.size, val, size)
- expect(t, size == vector.size && val == vector.value, msg)
-
- msg = fmt.tprintf("Expected decoder to return error %v, got %v", vector.error, err)
- expect(t, err == vector.error, msg)
+ testing.expectf(t, size == vector.size && val == vector.value, "Expected %02x to decode to %v consuming %v bytes, got %v and %v", vector.encoded, vector.value, vector.size, val, size)
+ testing.expectf(t, err == vector.error, "Expected decoder to return error %v, got %v", vector.error, err)
if err == .None { // Try to roundtrip
size, err = varint.encode_ileb128(buf[:], vector.value)
- msg = fmt.tprintf("Expected %v to encode to %02x, got %02x", vector.value, vector.encoded, buf[:size])
- expect(t, size == vector.size && slice.simple_equal(vector.encoded, buf[:size]), msg)
+ testing.expectf(t, size == vector.size && slice.simple_equal(vector.encoded, buf[:size]), "Expected %v to encode to %02x, got %02x", vector.value, vector.encoded, buf[:size])
}
}
+}
+
+@(test)
+test_random :: proc(t: ^testing.T) {
+ buf: [varint.LEB128_MAX_BYTES]u8
for num_bytes in 1..=uint(16) {
- for _ in 0..=RANDOM_TESTS {
+ for _ in 0..=NUM_RANDOM_TESTS_PER_BYTE_SIZE {
unsigned, signed := get_random(num_bytes)
-
{
encode_size, encode_err := varint.encode_uleb128(buf[:], unsigned)
- msg := fmt.tprintf("%v failed to encode as an unsigned LEB128 value, got %v", unsigned, encode_err)
- expect(t, encode_err == .None, msg)
+ testing.expectf(t, encode_err == .None, "%v failed to encode as an unsigned LEB128 value, got %v", unsigned, encode_err)
decoded, decode_size, decode_err := varint.decode_uleb128(buf[:])
- msg = fmt.tprintf("Expected %02x to decode as %v, got %v", buf[:encode_size], unsigned, decoded)
- expect(t, decode_err == .None && decode_size == encode_size && decoded == unsigned, msg)
+ testing.expectf(t, decode_err == .None && decode_size == encode_size && decoded == unsigned, "Expected %02x to decode as %v, got %v", buf[:encode_size], unsigned, decoded)
}
{
encode_size, encode_err := varint.encode_ileb128(buf[:], signed)
- msg := fmt.tprintf("%v failed to encode as a signed LEB128 value, got %v", signed, encode_err)
- expect(t, encode_err == .None, msg)
+ testing.expectf(t, encode_err == .None, "%v failed to encode as a signed LEB128 value, got %v", signed, encode_err)
decoded, decode_size, decode_err := varint.decode_ileb128(buf[:])
- msg = fmt.tprintf("Expected %02x to decode as %v, got %v, err: %v", buf[:encode_size], signed, decoded, decode_err)
- expect(t, decode_err == .None && decode_size == encode_size && decoded == signed, msg)
+ testing.expectf(t, decode_err == .None && decode_size == encode_size && decoded == signed, "Expected %02x to decode as %v, got %v, err: %v", buf[:encode_size], signed, decoded, decode_err)
}
}
}
}
+@(private)
get_random :: proc(byte_count: uint) -> (u: u128, i: i128) {
assert(byte_count >= 0 && byte_count <= size_of(u128))
diff --git a/tests/core/encoding/xml/test_core_xml.odin b/tests/core/encoding/xml/test_core_xml.odin
index c62033491..b29431e10 100644
--- a/tests/core/encoding/xml/test_core_xml.odin
+++ b/tests/core/encoding/xml/test_core_xml.odin
@@ -2,10 +2,10 @@ package test_core_xml
import "core:encoding/xml"
import "core:testing"
-import "core:mem"
import "core:strings"
import "core:io"
import "core:fmt"
+import "core:log"
import "core:hash"
Silent :: proc(pos: xml.Pos, format: string, args: ..any) {}
@@ -14,9 +14,6 @@ OPTIONS :: xml.Options{ flags = { .Ignore_Unsupported, .Intern_Comments, },
expected_doctype = "",
}
-TEST_count := 0
-TEST_fail := 0
-
TEST :: struct {
filename: string,
options: xml.Options,
@@ -24,22 +21,14 @@ TEST :: struct {
crc32: u32,
}
-/*
- Relative to ODIN_ROOT
-*/
-TEST_FILE_PATH_PREFIX :: "tests/core/assets"
-
-TESTS :: []TEST{
- /*
- First we test that certain files parse without error.
- */
+TEST_SUITE_PATH :: ODIN_ROOT + "tests/core/assets/"
- {
- /*
- Tests UTF-8 idents and values.
- Test namespaced ident.
- Tests that nested partial CDATA start doesn't trip up parser.
- */
+@(test)
+xml_test_utf8_normal :: proc(t: ^testing.T) {
+ run_test(t, {
+ // Tests UTF-8 idents and values.
+ // Test namespaced ident.
+ // Tests that nested partial CDATA start doesn't trip up parser.
filename = "XML/utf8.xml",
options = {
flags = {
@@ -47,14 +36,15 @@ TESTS :: []TEST{
},
expected_doctype = "恥ずべきフクロウ",
},
- crc32 = 0xe9b62f03,
- },
-
- {
- /*
- Same as above.
- Unbox CDATA in data tag.
- */
+ crc32 = 0xefa55f27,
+ })
+}
+
+@(test)
+xml_test_utf8_unbox_cdata :: proc(t: ^testing.T) {
+ run_test(t, {
+ // Same as above.
+ // Unbox CDATA in data tag.
filename = "XML/utf8.xml",
options = {
flags = {
@@ -62,14 +52,15 @@ TESTS :: []TEST{
},
expected_doctype = "恥ずべきフクロウ",
},
- crc32 = 0x9c2643ed,
- },
-
- {
- /*
- Simple Qt TS translation file.
- `core:i18n` requires it to be parsed properly.
- */
+ crc32 = 0x2dd27770,
+ })
+}
+
+@(test)
+xml_test_nl_qt_ts :: proc(t: ^testing.T) {
+ run_test(t, {
+ // Simple Qt TS translation file.
+ // `core:i18n` requires it to be parsed properly.
filename = "I18N/nl_NL-qt-ts.ts",
options = {
flags = {
@@ -78,13 +69,14 @@ TESTS :: []TEST{
expected_doctype = "TS",
},
crc32 = 0x859b7443,
- },
+ })
+}
- {
- /*
- Simple XLiff 1.2 file.
- `core:i18n` requires it to be parsed properly.
- */
+@(test)
+xml_test_xliff_1_2 :: proc(t: ^testing.T) {
+ run_test(t, {
+ // Simple XLiff 1.2 file.
+ // `core:i18n` requires it to be parsed properly.
filename = "I18N/nl_NL-xliff-1.2.xliff",
options = {
flags = {
@@ -93,13 +85,14 @@ TESTS :: []TEST{
expected_doctype = "xliff",
},
crc32 = 0x3deaf329,
- },
+ })
+}
- {
- /*
- Simple XLiff 2.0 file.
- `core:i18n` requires it to be parsed properly.
- */
+@(test)
+xml_test_xliff_2_0 :: proc(t: ^testing.T) {
+ run_test(t, {
+ // Simple XLiff 2.0 file.
+ // `core:i18n` requires it to be parsed properly.
filename = "I18N/nl_NL-xliff-2.0.xliff",
options = {
flags = {
@@ -108,9 +101,12 @@ TESTS :: []TEST{
expected_doctype = "xliff",
},
crc32 = 0x0c55e287,
- },
+ })
+}
- {
+@(test)
+xml_test_entities :: proc(t: ^testing.T) {
+ run_test(t, {
filename = "XML/entities.html",
options = {
flags = {
@@ -119,9 +115,12 @@ TESTS :: []TEST{
expected_doctype = "html",
},
crc32 = 0x05373317,
- },
+ })
+}
- {
+@(test)
+xml_test_entities_unbox :: proc(t: ^testing.T) {
+ run_test(t, {
filename = "XML/entities.html",
options = {
flags = {
@@ -129,10 +128,13 @@ TESTS :: []TEST{
},
expected_doctype = "html",
},
- crc32 = 0x3b6d4a90,
- },
+ crc32 = 0x350ca83e,
+ })
+}
- {
+@(test)
+xml_test_entities_unbox_decode :: proc(t: ^testing.T) {
+ run_test(t, {
filename = "XML/entities.html",
options = {
flags = {
@@ -140,13 +142,27 @@ TESTS :: []TEST{
},
expected_doctype = "html",
},
- crc32 = 0x5be2ffdc,
- },
+ crc32 = 0x7f58db7d,
+ })
+}
- /*
- Then we test that certain errors are returned as expected.
- */
- {
+@(test)
+xml_test_attribute_whitespace :: proc(t: ^testing.T) {
+ run_test(t, {
+ // Same as above.
+ // Unbox CDATA in data tag.
+ filename = "XML/attribute-whitespace.xml",
+ options = {
+ flags = {},
+ expected_doctype = "foozle",
+ },
+ crc32 = 0x8f5fd6c1,
+ })
+}
+
+@(test)
+xml_test_invalid_doctype :: proc(t: ^testing.T) {
+ run_test(t, {
filename = "XML/utf8.xml",
options = {
flags = {
@@ -156,12 +172,12 @@ TESTS :: []TEST{
},
err = .Invalid_DocType,
crc32 = 0x49b83d0a,
- },
+ })
+}
- /*
- Parse the 9.08 MiB unicode.xml for good measure.
- */
- {
+@(test)
+xml_test_unicode :: proc(t: ^testing.T) {
+ run_test(t, {
filename = "XML/unicode.xml",
options = {
flags = {
@@ -170,40 +186,38 @@ TESTS :: []TEST{
expected_doctype = "",
},
err = .None,
- crc32 = 0x0b6100ab,
- },
+ crc32 = 0x73070b55,
+ })
}
-when ODIN_TEST {
- expect :: testing.expect
- log :: testing.log
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v] %v\n", loc, message)
- return
- }
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] LOG:\n\t%v\n", loc, v)
- }
-}
+@(private)
+run_test :: proc(t: ^testing.T, test: TEST) {
+ path := strings.concatenate({TEST_SUITE_PATH, test.filename})
+ defer delete(path)
-test_file_path :: proc(filename: string) -> (path: string) {
+ doc, err := xml.load_from_file(path, test.options, Silent)
+ defer xml.destroy(doc)
- path = fmt.tprintf("%v%v/%v", ODIN_ROOT, TEST_FILE_PATH_PREFIX, filename)
- temp := transmute([]u8)path
+ tree_string := doc_to_string(doc)
+ tree_bytes := transmute([]u8)tree_string
+ defer delete(tree_bytes)
- for r, i in path {
- if r == '\\' {
- temp[i] = '/'
- }
+ crc32 := hash.crc32(tree_bytes)
+
+ failed := err != test.err
+ testing.expectf(t, err == test.err, "%v: Expected return value %v, got %v", test.filename, test.err, err)
+
+ failed |= crc32 != test.crc32
+ testing.expectf(t, crc32 == test.crc32, "%v: Expected CRC 0x%08x, got 0x%08x, with options %v", test.filename, test.crc32, crc32, test.options)
+
+ if failed {
+ // Don't fully print big trees.
+ tree_string = tree_string[:min(2_048, len(tree_string))]
+ log.error(tree_string)
}
- return path
}
+@(private)
doc_to_string :: proc(doc: ^xml.Document) -> (result: string) {
/*
Effectively a clone of the debug printer in the xml package.
@@ -284,56 +298,4 @@ doc_to_string :: proc(doc: ^xml.Document) -> (result: string) {
print(strings.to_writer(&buf), doc)
return strings.clone(strings.to_string(buf))
-}
-
-@test
-run_tests :: proc(t: ^testing.T) {
- for test in TESTS {
- path := test_file_path(test.filename)
- log(t, fmt.tprintf("Trying to parse %v", path))
-
- doc, err := xml.load_from_file(path, test.options, Silent)
- defer xml.destroy(doc)
-
- tree_string := doc_to_string(doc)
- tree_bytes := transmute([]u8)tree_string
- defer delete(tree_bytes)
-
- crc32 := hash.crc32(tree_bytes)
-
- failed := err != test.err
- err_msg := fmt.tprintf("Expected return value %v, got %v", test.err, err)
- expect(t, err == test.err, err_msg)
-
- failed |= crc32 != test.crc32
- err_msg = fmt.tprintf("Expected CRC 0x%08x, got 0x%08x, with options %v", test.crc32, crc32, test.options)
- expect(t, crc32 == test.crc32, err_msg)
-
- if failed {
- /*
- Don't fully print big trees.
- */
- tree_string = tree_string[:min(2_048, len(tree_string))]
- fmt.println(tree_string)
- }
- }
-}
-
-main :: proc() {
- t := testing.T{}
-
- track: mem.Tracking_Allocator
- mem.tracking_allocator_init(&track, context.allocator)
- context.allocator = mem.tracking_allocator(&track)
-
- run_tests(&t)
-
- if len(track.allocation_map) > 0 {
- for _, v in track.allocation_map {
- err_msg := fmt.tprintf("%v Leaked %v bytes.", v.location, v.size)
- expect(&t, false, err_msg)
- }
- }
-
- fmt.printf("\n%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
} \ No newline at end of file
diff --git a/tests/core/fmt/test_core_fmt.odin b/tests/core/fmt/test_core_fmt.odin
index 82d009ac6..507e0f433 100644
--- a/tests/core/fmt/test_core_fmt.odin
+++ b/tests/core/fmt/test_core_fmt.odin
@@ -1,47 +1,10 @@
package test_core_fmt
+import "base:runtime"
import "core:fmt"
-import "core:os"
-import "core:testing"
+import "core:math"
import "core:mem"
-
-TEST_count := 0
-TEST_fail := 0
-
-when ODIN_TEST {
- expect :: testing.expect
- log :: testing.log
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v] %v\n", loc, message)
- return
- }
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-}
-
-main :: proc() {
- t := testing.T{}
- test_fmt_memory(&t)
- test_fmt_doc_examples(&t)
- test_fmt_options(&t)
-
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
-}
-
-check :: proc(t: ^testing.T, exp: string, format: string, args: ..any, loc := #caller_location) {
- got := fmt.tprintf(format, ..args)
- expect(t, got == exp, fmt.tprintf("(%q, %v): %q != %q", format, args, got, exp), loc)
-}
+import "core:testing"
@(test)
test_fmt_memory :: proc(t: ^testing.T) {
@@ -62,6 +25,70 @@ test_fmt_memory :: proc(t: ^testing.T) {
}
@(test)
+test_fmt_complex_quaternion :: proc(t: ^testing.T) {
+ neg_inf := math.inf_f64(-1)
+ pos_inf := math.inf_f64(+1)
+ neg_zero := f64(0h80000000_00000000)
+ nan := math.nan_f64()
+
+ // NOTE(Feoramund): Doing it this way, because complex construction is broken.
+ // Reported in issue #3665.
+ c: complex128
+ cptr := cast(^runtime.Raw_Complex128)&c
+
+ cptr^ = {0, 0}
+ check(t, "0+0i", "%v", c)
+ cptr^ = {1, 1}
+ check(t, "1+1i", "%v", c)
+ cptr^ = {1, 0}
+ check(t, "1+0i", "%v", c)
+ cptr^ = {-1, -1}
+ check(t, "-1-1i", "%v", c)
+ cptr^ = {0, neg_zero}
+ check(t, "0-0i", "%v", c)
+ cptr^ = {nan, nan}
+ check(t, "NaNNaNi", "%v", c)
+ cptr^ = {pos_inf, pos_inf}
+ check(t, "+Inf+Infi", "%v", c)
+ cptr^ = {neg_inf, neg_inf}
+ check(t, "-Inf-Infi", "%v", c)
+
+ // Check forced plus signs.
+ cptr^ = {0, neg_zero}
+ check(t, "+0-0i", "%+v", c)
+ cptr^ = {1, 1}
+ check(t, "+1+1i", "%+v", c)
+ cptr^ = {nan, nan}
+ check(t, "NaNNaNi", "%+v", c)
+ cptr^ = {pos_inf, pos_inf}
+ check(t, "+Inf+Infi", "%+v", c)
+ cptr^ = {neg_inf, neg_inf}
+ check(t, "-Inf-Infi", "%+v", c)
+
+ // Remember that the real number is the last in a quaternion's data layout,
+ // opposed to a complex, where it is the first.
+ q: quaternion256
+ qptr := cast(^runtime.Raw_Quaternion256)&q
+
+ qptr^ = {0, 0, 0, 0}
+ check(t, "0+0i+0j+0k", "%v", q)
+ qptr^ = {1, 1, 1, 1}
+ check(t, "1+1i+1j+1k", "%v", q)
+ qptr^ = {2, 3, 4, 1}
+ check(t, "1+2i+3j+4k", "%v", q)
+ qptr^ = {-1, -1, -1, -1}
+ check(t, "-1-1i-1j-1k", "%v", q)
+ qptr^ = {2, neg_zero, neg_zero, 1}
+ check(t, "1+2i-0j-0k", "%v", q)
+ qptr^ = {neg_inf, neg_inf, neg_inf, -1}
+ check(t, "-1-Infi-Infj-Infk", "%v", q)
+ qptr^ = {pos_inf, pos_inf, pos_inf, -1}
+ check(t, "-1+Infi+Infj+Infk", "%v", q)
+ qptr^ = {nan, nan, nan, -1}
+ check(t, "-1NaNiNaNjNaNk", "%v", q)
+}
+
+@(test)
test_fmt_doc_examples :: proc(t: ^testing.T) {
// C-like syntax
check(t, "37 13", "%[1]d %[0]d", 13, 37)
@@ -75,7 +102,7 @@ test_fmt_doc_examples :: proc(t: ^testing.T) {
}
@(test)
-test_fmt_options :: proc(t: ^testing.T) {
+test_fmt_escaping_prefixes :: proc(t: ^testing.T) {
// Escaping
check(t, "% { } 0 { } } {", "%% {{ }} {} {{ }} }} {{", 0 )
@@ -86,7 +113,10 @@ test_fmt_options :: proc(t: ^testing.T) {
check(t, "+3", "%+i", 3 )
check(t, "0b11", "%#b", 3 )
check(t, "0xA", "%#X", 10 )
+}
+@(test)
+test_fmt_indexing :: proc(t: ^testing.T) {
// Specific index formatting
check(t, "1 2 3", "%i %i %i", 1, 2, 3)
check(t, "1 2 3", "%[0]i %[1]i %[2]i", 1, 2, 3)
@@ -95,7 +125,10 @@ test_fmt_options :: proc(t: ^testing.T) {
check(t, "1 2 3", "%i %[1]i %i", 1, 2, 3)
check(t, "1 3 2", "%i %[2]i %i", 1, 2, 3)
check(t, "1 1 1", "%[0]i %[0]i %[0]i", 1)
+}
+@(test)
+test_fmt_width_precision :: proc(t: ^testing.T) {
// Width
check(t, "3.140", "%f", 3.14)
check(t, "3.140", "%4f", 3.14)
@@ -133,7 +166,10 @@ test_fmt_options :: proc(t: ^testing.T) {
check(t, "3.140", "%*[1].*[2][0]f", 3.14, 5, 3)
check(t, "3.140", "%*[2].*[1]f", 3.14, 3, 5)
check(t, "3.140", "%5.*[1]f", 3.14, 3)
+}
+@(test)
+test_fmt_arg_errors :: proc(t: ^testing.T) {
// Error checking
check(t, "%!(MISSING ARGUMENT)%!(NO VERB)", "%" )
@@ -156,7 +192,10 @@ test_fmt_options :: proc(t: ^testing.T) {
check(t, "%!(BAD ARGUMENT NUMBER)%!(NO VERB)%!(EXTRA 0)", "%[1]", 0)
check(t, "3.1%!(EXTRA 3.14)", "%.1f", 3.14, 3.14)
+}
+@(test)
+test_fmt_python_syntax :: proc(t: ^testing.T) {
// Python-like syntax
check(t, "1 2 3", "{} {} {}", 1, 2, 3)
check(t, "3 2 1", "{2} {1} {0}", 1, 2, 3)
@@ -181,3 +220,46 @@ test_fmt_options :: proc(t: ^testing.T) {
check(t, "%!(MISSING CLOSE BRACE)%!(EXTRA 1)", "{", 1)
check(t, "%!(MISSING CLOSE BRACE)%!(EXTRA 1)", "{0", 1 )
}
+
+@(test)
+test_pointers :: proc(t: ^testing.T) {
+ S :: struct { i: int }
+ a: rawptr
+ b: ^int
+ c: ^S
+ d: ^S = cast(^S)cast(uintptr)0xFFFF
+
+ check(t, "0x0", "%p", a)
+ check(t, "0x0", "%p", b)
+ check(t, "0x0", "%p", c)
+ check(t, "0xFFFF", "%p", d)
+
+ check(t, "0x0", "%#p", a)
+ check(t, "0x0", "%#p", b)
+ check(t, "0x0", "%#p", c)
+ check(t, "0xFFFF", "%#p", d)
+
+ check(t, "0x0", "%v", a)
+ check(t, "0x0", "%v", b)
+ check(t, "<nil>", "%v", c)
+
+ check(t, "0x0", "%#v", a)
+ check(t, "0x0", "%#v", b)
+ check(t, "<nil>", "%#v", c)
+
+ check(t, "0x0000", "%4p", a)
+ check(t, "0x0000", "%4p", b)
+ check(t, "0x0000", "%4p", c)
+ check(t, "0xFFFF", "%4p", d)
+
+ check(t, "0x0000", "%#4p", a)
+ check(t, "0x0000", "%#4p", b)
+ check(t, "0x0000", "%#4p", c)
+ check(t, "0xFFFF", "%#4p", d)
+}
+
+@(private)
+check :: proc(t: ^testing.T, exp: string, format: string, args: ..any, loc := #caller_location) {
+ got := fmt.tprintf(format, ..args)
+ testing.expectf(t, got == exp, "(%q, %v): %q != %q", format, args, got, exp, loc = loc)
+}
diff --git a/tests/core/hash/test_core_hash.odin b/tests/core/hash/test_core_hash.odin
index 932d2f34c..c332383e7 100644
--- a/tests/core/hash/test_core_hash.odin
+++ b/tests/core/hash/test_core_hash.odin
@@ -2,201 +2,12 @@ package test_core_hash
import "core:hash/xxhash"
import "core:hash"
-import "core:time"
import "core:testing"
-import "core:fmt"
-import "core:os"
import "core:math/rand"
import "base:intrinsics"
-TEST_count := 0
-TEST_fail := 0
-
-when ODIN_TEST {
- expect :: testing.expect
- log :: testing.log
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v] %v\n", loc, message)
- return
- }
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-}
-
-main :: proc() {
- t := testing.T{}
- test_benchmark_runner(&t)
- test_crc64_vectors(&t)
- test_xxhash_vectors(&t)
- test_xxhash_large(&t)
-
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
-}
-
-/*
- Benchmarks
-*/
-
-setup_xxhash :: proc(options: ^time.Benchmark_Options, allocator := context.allocator) -> (err: time.Benchmark_Error) {
- assert(options != nil)
-
- options.input = make([]u8, options.bytes, allocator)
- return nil if len(options.input) == options.bytes else .Allocation_Error
-}
-
-teardown_xxhash :: proc(options: ^time.Benchmark_Options, allocator := context.allocator) -> (err: time.Benchmark_Error) {
- assert(options != nil)
-
- delete(options.input)
- return nil
-}
-
-benchmark_xxh32 :: proc(options: ^time.Benchmark_Options, allocator := context.allocator) -> (err: time.Benchmark_Error) {
- buf := options.input
-
- h: u32
- for _ in 0..=options.rounds {
- h = xxhash.XXH32(buf)
- }
- options.count = options.rounds
- options.processed = options.rounds * options.bytes
- options.hash = u128(h)
- return nil
-}
-
-benchmark_xxh64 :: proc(options: ^time.Benchmark_Options, allocator := context.allocator) -> (err: time.Benchmark_Error) {
- buf := options.input
-
- h: u64
- for _ in 0..=options.rounds {
- h = xxhash.XXH64(buf)
- }
- options.count = options.rounds
- options.processed = options.rounds * options.bytes
- options.hash = u128(h)
- return nil
-}
-
-benchmark_xxh3_64 :: proc(options: ^time.Benchmark_Options, allocator := context.allocator) -> (err: time.Benchmark_Error) {
- buf := options.input
-
- h: u64
- for _ in 0..=options.rounds {
- h = xxhash.XXH3_64(buf)
- }
- options.count = options.rounds
- options.processed = options.rounds * options.bytes
- options.hash = u128(h)
- return nil
-}
-
-benchmark_xxh3_128 :: proc(options: ^time.Benchmark_Options, allocator := context.allocator) -> (err: time.Benchmark_Error) {
- buf := options.input
-
- h: u128
- for _ in 0..=options.rounds {
- h = xxhash.XXH3_128(buf)
- }
- options.count = options.rounds
- options.processed = options.rounds * options.bytes
- options.hash = h
- return nil
-}
-
-benchmark_print :: proc(name: string, options: ^time.Benchmark_Options) {
- fmt.printf("\t[%v] %v rounds, %v bytes processed in %v ns\n\t\t%5.3f rounds/s, %5.3f MiB/s\n",
- name,
- options.rounds,
- options.processed,
- time.duration_nanoseconds(options.duration),
- options.rounds_per_second,
- options.megabytes_per_second,
- )
-}
-
-@test
-test_benchmark_runner :: proc(t: ^testing.T) {
- fmt.println("Starting benchmarks:")
-
- name := "XXH32 100 zero bytes"
- options := &time.Benchmark_Options{
- rounds = 1_000,
- bytes = 100,
- setup = setup_xxhash,
- bench = benchmark_xxh32,
- teardown = teardown_xxhash,
- }
-
- err := time.benchmark(options, context.allocator)
- expect(t, err == nil, name)
- expect(t, options.hash == 0x85f6413c, name)
- benchmark_print(name, options)
-
- name = "XXH32 1 MiB zero bytes"
- options.bytes = 1_048_576
- err = time.benchmark(options, context.allocator)
- expect(t, err == nil, name)
- expect(t, options.hash == 0x9430f97f, name)
- benchmark_print(name, options)
-
- name = "XXH64 100 zero bytes"
- options.bytes = 100
- options.bench = benchmark_xxh64
- err = time.benchmark(options, context.allocator)
- expect(t, err == nil, name)
- expect(t, options.hash == 0x17bb1103c92c502f, name)
- benchmark_print(name, options)
-
- name = "XXH64 1 MiB zero bytes"
- options.bytes = 1_048_576
- err = time.benchmark(options, context.allocator)
- expect(t, err == nil, name)
- expect(t, options.hash == 0x87d2a1b6e1163ef1, name)
- benchmark_print(name, options)
-
- name = "XXH3_64 100 zero bytes"
- options.bytes = 100
- options.bench = benchmark_xxh3_64
- err = time.benchmark(options, context.allocator)
- expect(t, err == nil, name)
- expect(t, options.hash == 0x801fedc74ccd608c, name)
- benchmark_print(name, options)
-
- name = "XXH3_64 1 MiB zero bytes"
- options.bytes = 1_048_576
- err = time.benchmark(options, context.allocator)
- expect(t, err == nil, name)
- expect(t, options.hash == 0x918780b90550bf34, name)
- benchmark_print(name, options)
-
- name = "XXH3_128 100 zero bytes"
- options.bytes = 100
- options.bench = benchmark_xxh3_128
- err = time.benchmark(options, context.allocator)
- expect(t, err == nil, name)
- expect(t, options.hash == 0x6ba30a4e9dffe1ff801fedc74ccd608c, name)
- benchmark_print(name, options)
-
- name = "XXH3_128 1 MiB zero bytes"
- options.bytes = 1_048_576
- err = time.benchmark(options, context.allocator)
- expect(t, err == nil, name)
- expect(t, options.hash == 0xb6ef17a3448492b6918780b90550bf34, name)
- benchmark_print(name, options)
-}
-
@test
-test_xxhash_large :: proc(t: ^testing.T) {
+test_xxhash_zero_fixed :: proc(t: ^testing.T) {
many_zeroes := make([]u8, 16 * 1024 * 1024)
defer delete(many_zeroes)
@@ -204,62 +15,45 @@ test_xxhash_large :: proc(t: ^testing.T) {
for i, v in ZERO_VECTORS {
b := many_zeroes[:i]
- fmt.printf("[test_xxhash_large] All at once. Size: %v\n", i)
-
xxh32 := xxhash.XXH32(b)
xxh64 := xxhash.XXH64(b)
xxh3_64 := xxhash.XXH3_64(b)
xxh3_128 := xxhash.XXH3_128(b)
- xxh32_error := fmt.tprintf("[ XXH32(%03d) ] Expected: %08x. Got: %08x.", i, v.xxh_32, xxh32)
- xxh64_error := fmt.tprintf("[ XXH64(%03d) ] Expected: %16x. Got: %16x.", i, v.xxh_64, xxh64)
- xxh3_64_error := fmt.tprintf("[XXH3_64(%03d) ] Expected: %16x. Got: %16x.", i, v.xxh3_64, xxh3_64)
- xxh3_128_error := fmt.tprintf("[XXH3_128(%03d) ] Expected: %32x. Got: %32x.", i, v.xxh3_128, xxh3_128)
-
- expect(t, xxh32 == v.xxh_32, xxh32_error)
- expect(t, xxh64 == v.xxh_64, xxh64_error)
- expect(t, xxh3_64 == v.xxh3_64, xxh3_64_error)
- expect(t, xxh3_128 == v.xxh3_128, xxh3_128_error)
+ testing.expectf(t, xxh32 == v.xxh_32, "[ XXH32(%03d) ] Expected: %08x, got: %08x", i, v.xxh_32, xxh32)
+ testing.expectf(t, xxh64 == v.xxh_64, "[ XXH64(%03d) ] Expected: %16x, got: %16x", i, v.xxh_64, xxh64)
+ testing.expectf(t, xxh3_64 == v.xxh3_64, "[XXH3_64(%03d) ] Expected: %16x, got: %16x", i, v.xxh3_64, xxh3_64)
+ testing.expectf(t, xxh3_128 == v.xxh3_128, "[XXH3_128(%03d) ] Expected: %32x, got: %32x", i, v.xxh3_128, xxh3_128)
}
+}
- when #config(RAND_STATE, -1) >= 0 && #config(RAND_INC, -1) >= 0 {
- random_seed := rand.Rand{
- state = u64(#config(RAND_STATE, -1)),
- inc = u64(#config(RAND_INC, -1)),
- }
- fmt.printf("Using user-selected seed {{%v,%v}} for update size randomness.\n", random_seed.state, random_seed.inc)
- } else {
- random_seed := rand.create(u64(intrinsics.read_cycle_counter()))
- fmt.printf("Randonly selected seed {{%v,%v}} for update size randomness.\n", random_seed.state, random_seed.inc)
- }
+@(test)
+test_xxhash_zero_streamed_random_updates :: proc(t: ^testing.T) {
+ many_zeroes := make([]u8, 16 * 1024 * 1024)
+ defer delete(many_zeroes)
// Streamed
for i, v in ZERO_VECTORS {
b := many_zeroes[:i]
- fmt.printf("[test_xxhash_large] Streamed. Size: %v\n", i)
-
- // bytes_per_update := []int{1, 42, 13, 7, 16, 5, 23, 74, 1024, 511, 1023, 47}
- // update_size_idx: int
-
xxh_32_state, xxh_32_err := xxhash.XXH32_create_state()
defer xxhash.XXH32_destroy_state(xxh_32_state)
- expect(t, xxh_32_err == nil, "Problem initializing XXH_32 state.")
+ testing.expect(t, xxh_32_err == nil, "Problem initializing XXH_32 state")
xxh_64_state, xxh_64_err := xxhash.XXH64_create_state()
defer xxhash.XXH64_destroy_state(xxh_64_state)
- expect(t, xxh_64_err == nil, "Problem initializing XXH_64 state.")
+ testing.expect(t, xxh_64_err == nil, "Problem initializing XXH_64 state")
xxh3_64_state, xxh3_64_err := xxhash.XXH3_create_state()
defer xxhash.XXH3_destroy_state(xxh3_64_state)
- expect(t, xxh3_64_err == nil, "Problem initializing XXH3_64 state.")
+ testing.expect(t, xxh3_64_err == nil, "Problem initializing XXH3_64 state")
xxh3_128_state, xxh3_128_err := xxhash.XXH3_create_state()
defer xxhash.XXH3_destroy_state(xxh3_128_state)
- expect(t, xxh3_128_err == nil, "Problem initializing XXH3_128 state.")
+ testing.expect(t, xxh3_128_err == nil, "Problem initializing XXH3_128 state")
// XXH3_128_update
-
+ random_seed := rand.create(t.seed)
for len(b) > 0 {
update_size := min(len(b), rand.int_max(8192, &random_seed))
if update_size > 4096 {
@@ -281,28 +75,19 @@ test_xxhash_large :: proc(t: ^testing.T) {
xxh3_64 := xxhash.XXH3_64_digest(xxh3_64_state)
xxh3_128 := xxhash.XXH3_128_digest(xxh3_128_state)
- xxh32_error := fmt.tprintf("[ XXH32(%03d) ] Expected: %08x. Got: %08x.", i, v.xxh_32, xxh32)
- xxh64_error := fmt.tprintf("[ XXH64(%03d) ] Expected: %16x. Got: %16x.", i, v.xxh_64, xxh64)
- xxh3_64_error := fmt.tprintf("[XXH3_64(%03d) ] Expected: %16x. Got: %16x.", i, v.xxh3_64, xxh3_64)
- xxh3_128_error := fmt.tprintf("[XXH3_128(%03d) ] Expected: %32x. Got: %32x.", i, v.xxh3_128, xxh3_128)
-
- expect(t, xxh32 == v.xxh_32, xxh32_error)
- expect(t, xxh64 == v.xxh_64, xxh64_error)
- expect(t, xxh3_64 == v.xxh3_64, xxh3_64_error)
- expect(t, xxh3_128 == v.xxh3_128, xxh3_128_error)
+ testing.expectf(t, xxh32 == v.xxh_32, "[ XXH32(%03d) ] Expected: %08x, got: %08x", i, v.xxh_32, xxh32)
+ testing.expectf(t, xxh64 == v.xxh_64, "[ XXH64(%03d) ] Expected: %16x, got: %16x", i, v.xxh_64, xxh64)
+ testing.expectf(t, xxh3_64 == v.xxh3_64, "[XXH3_64(%03d) ] Expected: %16x, got: %16x", i, v.xxh3_64, xxh3_64)
+ testing.expectf(t, xxh3_128 == v.xxh3_128, "[XXH3_128(%03d) ] Expected: %32x, got: %32x", i, v.xxh3_128, xxh3_128)
}
}
@test
-test_xxhash_vectors :: proc(t: ^testing.T) {
- fmt.println("Verifying against XXHASH_TEST_VECTOR_SEEDED:")
-
+test_xxhash_seeded :: proc(t: ^testing.T) {
buf := make([]u8, 256)
defer delete(buf)
for seed, table in XXHASH_TEST_VECTOR_SEEDED {
- fmt.printf("\tSeed: %v\n", seed)
-
for v, i in table {
b := buf[:i]
@@ -311,60 +96,48 @@ test_xxhash_vectors :: proc(t: ^testing.T) {
xxh3_64 := xxhash.XXH3_64(b, seed)
xxh3_128 := xxhash.XXH3_128(b, seed)
- xxh32_error := fmt.tprintf("[ XXH32(%03d) ] Expected: %08x. Got: %08x.", i, v.xxh_32, xxh32)
- xxh64_error := fmt.tprintf("[ XXH64(%03d) ] Expected: %16x. Got: %16x.", i, v.xxh_64, xxh64)
-
- xxh3_64_error := fmt.tprintf("[XXH3_64(%03d) ] Expected: %16x. Got: %16x.", i, v.xxh3_64, xxh3_64)
- xxh3_128_error := fmt.tprintf("[XXH3_128(%03d) ] Expected: %32x. Got: %32x.", i, v.xxh3_128, xxh3_128)
-
- expect(t, xxh32 == v.xxh_32, xxh32_error)
- expect(t, xxh64 == v.xxh_64, xxh64_error)
- expect(t, xxh3_64 == v.xxh3_64, xxh3_64_error)
- expect(t, xxh3_128 == v.xxh3_128, xxh3_128_error)
+ testing.expectf(t, xxh32 == v.xxh_32, "[ XXH32(%03d) ] Expected: %08x, got: %08x", i, v.xxh_32, xxh32)
+ testing.expectf(t, xxh64 == v.xxh_64, "[ XXH64(%03d) ] Expected: %16x, got: %16x", i, v.xxh_64, xxh64)
+ testing.expectf(t, xxh3_64 == v.xxh3_64, "[XXH3_64(%03d) ] Expected: %16x, got: %16x", i, v.xxh3_64, xxh3_64)
+ testing.expectf(t, xxh3_128 == v.xxh3_128, "[XXH3_128(%03d) ] Expected: %32x, got: %32x", i, v.xxh3_128, xxh3_128)
if len(b) > xxhash.XXH3_MIDSIZE_MAX {
- fmt.printf("XXH3 - size: %v\n", len(b))
-
xxh3_state, _ := xxhash.XXH3_create_state()
xxhash.XXH3_64_reset_with_seed(xxh3_state, seed)
xxhash.XXH3_64_update(xxh3_state, b)
xxh3_64_streamed := xxhash.XXH3_64_digest(xxh3_state)
xxhash.XXH3_destroy_state(xxh3_state)
- xxh3_64s_error := fmt.tprintf("[XXH3_64s(%03d) ] Expected: %16x. Got: %16x.", i, v.xxh3_64, xxh3_64_streamed)
- expect(t, xxh3_64_streamed == v.xxh3_64, xxh3_64s_error)
+ testing.expectf(t, xxh3_64_streamed == v.xxh3_64, "[XXH3_64s(%03d) ] Expected: %16x, got: %16x", i, v.xxh3_64, xxh3_64_streamed)
xxh3_state2, _ := xxhash.XXH3_create_state()
xxhash.XXH3_128_reset_with_seed(xxh3_state2, seed)
xxhash.XXH3_128_update(xxh3_state2, b)
xxh3_128_streamed := xxhash.XXH3_128_digest(xxh3_state2)
xxhash.XXH3_destroy_state(xxh3_state2)
- xxh3_128s_error := fmt.tprintf("[XXH3_128s(%03d) ] Expected: %32x. Got: %32x.", i, v.xxh3_128, xxh3_128_streamed)
- expect(t, xxh3_128_streamed == v.xxh3_128, xxh3_128s_error)
+ testing.expectf(t, xxh3_128_streamed == v.xxh3_128, "[XXH3_128s(%03d) ] Expected: %32x, got: %32x", i, v.xxh3_128, xxh3_128_streamed)
}
}
}
+}
- fmt.println("Verifying against XXHASH_TEST_VECTOR_SECRET:")
- for secret, table in XXHASH_TEST_VECTOR_SECRET {
- fmt.printf("\tSecret:\n\t\t\"%v\"\n", secret)
+@test
+test_xxhash_secret :: proc(t: ^testing.T) {
+ buf := make([]u8, 256)
+ defer delete(buf)
+ for secret, table in XXHASH_TEST_VECTOR_SECRET {
secret_bytes := transmute([]u8)secret
-
for v, i in table {
b := buf[:i]
xxh3_128 := xxhash.XXH3_128(b, secret_bytes)
- xxh3_128_error := fmt.tprintf("[XXH3_128(%03d)] Expected: %32x. Got: %32x.", i, v.xxh3_128_secret, xxh3_128)
-
- expect(t, xxh3_128 == v.xxh3_128_secret, xxh3_128_error)
+ testing.expectf(t, xxh3_128 == v.xxh3_128_secret, "[XXH3_128(%03d)] Expected: %32x, got: %32x", i, v.xxh3_128_secret, xxh3_128)
}
}
}
@test
test_crc64_vectors :: proc(t: ^testing.T) {
- fmt.println("Verifying CRC-64:")
-
vectors := map[string][4]u64 {
"123456789" = {
0x6c40df5f0b497347, // ECMA-182,
@@ -379,23 +152,18 @@ test_crc64_vectors :: proc(t: ^testing.T) {
0xe7fcf1006b503b61, // ISO 3306, input and output inverted
},
}
+ defer delete(vectors)
for vector, expected in vectors {
- fmt.println("\tVector:", vector)
b := transmute([]u8)vector
ecma := hash.crc64_ecma_182(b)
xz := hash.crc64_xz(b)
iso := hash.crc64_iso_3306(b)
iso2 := hash.crc64_iso_3306_inverse(b)
- ecma_error := fmt.tprintf("[ CRC-64 ECMA ] Expected: %016x. Got: %016x.", expected[0], ecma)
- xz_error := fmt.tprintf("[ CRC-64 XZ ] Expected: %016x. Got: %016x.", expected[1], xz)
- iso_error := fmt.tprintf("[ CRC-64 ISO 3306] Expected: %016x. Got: %016x.", expected[2], iso)
- iso2_error := fmt.tprintf("[~CRC-64 ISO 3306] Expected: %016x. Got: %016x.", expected[3], iso2)
-
- expect(t, ecma == expected[0], ecma_error)
- expect(t, xz == expected[1], xz_error)
- expect(t, iso == expected[2], iso_error)
- expect(t, iso2 == expected[3], iso2_error)
+ testing.expectf(t, ecma == expected[0], "[ CRC-64 ECMA ] Expected: %016x, got: %016x", expected[0], ecma)
+ testing.expectf(t, xz == expected[1], "[ CRC-64 XZ ] Expected: %016x, got: %016x", expected[1], xz)
+ testing.expectf(t, iso == expected[2], "[ CRC-64 ISO 3306] Expected: %016x, got: %016x", expected[2], iso)
+ testing.expectf(t, iso2 == expected[3], "[~CRC-64 ISO 3306] Expected: %016x, got: %016x", expected[3], iso2)
}
} \ No newline at end of file
diff --git a/tests/core/hash/test_vectors_xxhash.odin b/tests/core/hash/test_vectors_xxhash.odin
index 6a37aef30..f72e2699a 100644
--- a/tests/core/hash/test_vectors_xxhash.odin
+++ b/tests/core/hash/test_vectors_xxhash.odin
@@ -1,6 +1,4 @@
-/*
- Hash Test Vectors
-*/
+// Hash Test Vectors
package test_core_hash
XXHASH_Test_Vectors :: struct #packed {
@@ -6789,4 +6787,4 @@ XXHASH_TEST_VECTOR_SECRET := map[string][257]XXHASH_Test_Vectors_With_Secret{
/* XXH3_128_with_secret */ 0x0f9b41191242ade48bbde48dff0d38ec,
},
},
-}
+} \ No newline at end of file
diff --git a/tests/core/image/build.bat b/tests/core/image/build.bat
deleted file mode 100644
index 03ee6b9a5..000000000
--- a/tests/core/image/build.bat
+++ /dev/null
@@ -1,4 +0,0 @@
-@echo off
-pushd ..
-odin run image
-popd \ No newline at end of file
diff --git a/tests/core/image/test_core_image.odin b/tests/core/image/test_core_image.odin
index ae92ca617..899596229 100644
--- a/tests/core/image/test_core_image.odin
+++ b/tests/core/image/test_core_image.odin
@@ -1,11 +1,11 @@
/*
- Copyright 2021 Jeroen van Rijn <nom@duclavier.com>.
+ Copyright 2021-2024 Jeroen van Rijn <nom@duclavier.com>.
Made available under Odin's BSD-3 license.
List of contributors:
Jeroen van Rijn: Initial implementation.
- A test suite for PNG + QOI.
+ A test suite for PNG, TGA, NetPBM, QOI and BMP.
*/
package test_core_image
@@ -13,6 +13,7 @@ import "core:testing"
import "core:compress"
import "core:image"
+import "core:image/bmp"
import pbm "core:image/netpbm"
import "core:image/png"
import "core:image/qoi"
@@ -20,59 +21,24 @@ import "core:image/tga"
import "core:bytes"
import "core:hash"
-import "core:fmt"
import "core:strings"
-
import "core:mem"
-import "core:os"
import "core:time"
-import "base:runtime"
-
-TEST_SUITE_PATH :: "assets/PNG"
-
-TEST_count := 0
-TEST_fail := 0
+TEST_SUITE_PATH_PNG :: ODIN_ROOT + "tests/core/assets/PNG"
+TEST_SUITE_PATH_BMP :: ODIN_ROOT + "tests/core/assets/BMP"
-when ODIN_TEST {
- expect :: testing.expect
- log :: testing.log
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v] %v\n", loc, message)
- return
- }
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-}
I_Error :: image.Error
-main :: proc() {
- t := testing.T{}
- png_test(&t)
-
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
-}
-
-PNG_Test :: struct {
+Test :: struct {
file: string,
tests: []struct {
options: image.Options,
expected_error: image.Error,
- dims: PNG_Dims,
+ dims: Dims,
hash: u32,
},
}
-
Default :: image.Options{}
Alpha_Add :: image.Options{.alpha_add_if_missing}
Premul_Drop :: image.Options{.alpha_premultiply, .alpha_drop_if_present}
@@ -82,19 +48,18 @@ Blend_BG_Keep :: image.Options{.blend_background, .alpha_add_if_missing}
Return_Metadata :: image.Options{.return_metadata}
No_Channel_Expansion :: image.Options{.do_not_expand_channels, .return_metadata}
-PNG_Dims :: struct {
+Dims :: struct {
width: int,
height: int,
channels: int,
depth: int,
}
-Basic_PNG_Tests := []PNG_Test{
+Basic_PNG_Tests := []Test{
/*
Basic format tests:
http://www.schaik.com/pngsuite/pngsuite_bas_png.html
*/
-
{
"basn0g01", // Black and white.
{
@@ -202,7 +167,7 @@ Basic_PNG_Tests := []PNG_Test{
},
}
-Interlaced_PNG_Tests := []PNG_Test{
+Interlaced_PNG_Tests := []Test{
/*
Interlaced format tests:
http://www.schaik.com/pngsuite/pngsuite_int_png.html
@@ -320,9 +285,9 @@ Interlaced_PNG_Tests := []PNG_Test{
},
}
-Odd_Sized_PNG_Tests := []PNG_Test{
+Odd_Sized_PNG_Tests := []Test{
/*
-" PngSuite", // Odd sizes / PNG-files:
+ "PngSuite", // Odd sizes / PNG-files:
http://www.schaik.com/pngsuite/pngsuite_siz_png.html
This tests curious sizes with and without interlacing.
@@ -546,7 +511,7 @@ Odd_Sized_PNG_Tests := []PNG_Test{
},
}
-PNG_bKGD_Tests := []PNG_Test{
+PNG_bKGD_Tests := []Test{
/*
" PngSuite", // Background colors / PNG-files:
http://www.schaik.com/pngsuite/pngsuite_bck_png.html
@@ -633,7 +598,7 @@ PNG_bKGD_Tests := []PNG_Test{
},
}
-PNG_tRNS_Tests := []PNG_Test{
+PNG_tRNS_Tests := []Test{
/*
PngSuite - Transparency:
http://www.schaik.com/pngsuite/pngsuite_trn_png.html
@@ -795,7 +760,7 @@ PNG_tRNS_Tests := []PNG_Test{
},
}
-PNG_Filter_Tests := []PNG_Test{
+PNG_Filter_Tests := []Test{
/*
PngSuite - Image filtering:
@@ -874,7 +839,7 @@ PNG_Filter_Tests := []PNG_Test{
},
}
-PNG_Varied_IDAT_Tests := []PNG_Test{
+PNG_Varied_IDAT_Tests := []Test{
/*
PngSuite - Chunk ordering:
@@ -933,7 +898,7 @@ PNG_Varied_IDAT_Tests := []PNG_Test{
},
}
-PNG_ZLIB_Levels_Tests := []PNG_Test{
+PNG_ZLIB_Levels_Tests := []Test{
/*
PngSuite - Zlib compression:
@@ -974,7 +939,7 @@ PNG_ZLIB_Levels_Tests := []PNG_Test{
},
}
-PNG_sPAL_Tests := []PNG_Test{
+PNG_sPAL_Tests := []Test{
/*
PngSuite - Additional palettes:
@@ -1021,7 +986,7 @@ PNG_sPAL_Tests := []PNG_Test{
},
}
-PNG_Ancillary_Tests := []PNG_Test{
+PNG_Ancillary_Tests := []Test{
/*
PngSuite" - Ancillary chunks:
@@ -1189,7 +1154,7 @@ PNG_Ancillary_Tests := []PNG_Test{
}
-Corrupt_PNG_Tests := []PNG_Test{
+Corrupt_PNG_Tests := []Test{
/*
PngSuite - Corrupted files / PNG-files:
@@ -1285,7 +1250,7 @@ Corrupt_PNG_Tests := []PNG_Test{
}
-No_Postprocesing_Tests := []PNG_Test{
+No_Postprocesing_Tests := []Test{
/*
These are some custom tests where we skip expanding to RGB(A).
*/
@@ -1309,8 +1274,6 @@ No_Postprocesing_Tests := []PNG_Test{
},
}
-
-
Text_Title :: "PngSuite"
Text_Software :: "Created on a NeXTstation color using \"pnmtopng\"."
Text_Descrption :: "A compilation of a set of images created to test the\nvarious color-types of the PNG format. Included are\nblack&white, color, paletted, with alpha channel, with\ntransparency formats. All bit-depths allowed according\nto the spec are present."
@@ -1430,82 +1393,92 @@ Expected_Text := map[string]map[string]png.Text {
}
@test
-png_test :: proc(t: ^testing.T) {
-
- total_tests := 0
- total_expected := 235
-
- PNG_Suites := [][]PNG_Test{
- Basic_PNG_Tests,
- Interlaced_PNG_Tests,
- Odd_Sized_PNG_Tests,
- PNG_bKGD_Tests,
- PNG_tRNS_Tests,
- PNG_Filter_Tests,
- PNG_Varied_IDAT_Tests,
- PNG_ZLIB_Levels_Tests,
- PNG_sPAL_Tests,
- PNG_Ancillary_Tests,
- Corrupt_PNG_Tests,
-
- No_Postprocesing_Tests,
+png_test_basic :: proc(t: ^testing.T) {
+ run_png_suite(t, Basic_PNG_Tests)
+}
- }
+@test
+png_test_interlaced :: proc(t: ^testing.T) {
+ run_png_suite(t, Interlaced_PNG_Tests)
+}
- for suite in PNG_Suites {
- total_tests += run_png_suite(t, suite)
- }
+@test
+png_test_odd_sized :: proc(t: ^testing.T) {
+ run_png_suite(t, Odd_Sized_PNG_Tests)
+}
+
+@test
+png_test_bKGD :: proc(t: ^testing.T) {
+ run_png_suite(t, PNG_bKGD_Tests)
+}
+
+@test
+png_test_tRNS :: proc(t: ^testing.T) {
+ run_png_suite(t, PNG_tRNS_Tests)
+}
+
+@test
+png_test_sPAL :: proc(t: ^testing.T) {
+ run_png_suite(t, PNG_sPAL_Tests)
+}
+
+@test
+png_test_filter :: proc(t: ^testing.T) {
+ run_png_suite(t, PNG_Filter_Tests)
+}
+
+@test
+png_test_varied_idat :: proc(t: ^testing.T) {
+ run_png_suite(t, PNG_Varied_IDAT_Tests)
+}
+
+@test
+png_test_zlib_levels :: proc(t: ^testing.T) {
+ run_png_suite(t, PNG_ZLIB_Levels_Tests)
+}
- error := fmt.tprintf("Expected %v PNG tests, %v ran.", total_expected, total_tests)
- expect(t, total_tests == total_expected, error)
+@test
+png_test_ancillary :: proc(t: ^testing.T) {
+ run_png_suite(t, PNG_Ancillary_Tests)
}
-run_png_suite :: proc(t: ^testing.T, suite: []PNG_Test) -> (subtotal: int) {
+@test
+png_test_corrupt :: proc(t: ^testing.T) {
+ run_png_suite(t, Corrupt_PNG_Tests)
+}
- context = runtime.default_context()
+@test
+png_test_no_postproc :: proc(t: ^testing.T) {
+ run_png_suite(t, No_Postprocesing_Tests)
+}
+run_png_suite :: proc(t: ^testing.T, suite: []Test) {
for file in suite {
- test_file := strings.concatenate({TEST_SUITE_PATH, "/", file.file, ".png"}, context.temp_allocator)
+ test_file := strings.concatenate({TEST_SUITE_PATH_PNG, "/", file.file, ".png"}, context.allocator)
+ defer delete(test_file)
img: ^png.Image
err: png.Error
count := 0
for test in file.tests {
- count += 1
- subtotal += 1
- passed := false
-
- track: mem.Tracking_Allocator
- mem.tracking_allocator_init(&track, context.allocator)
- context.allocator = mem.tracking_allocator(&track)
+ count += 1
img, err = png.load(test_file, test.options)
- error := fmt.tprintf("%v failed with %v.", test_file, err)
-
- passed = (test.expected_error == nil && err == nil) || (test.expected_error == err)
-
- expect(t, passed, error)
+ passed := (test.expected_error == nil && err == nil) || (test.expected_error == err)
+ testing.expectf(t, passed, "%q failed to load with error %v.", file.file, err)
if err == nil { // No point in running the other tests if it didn't load.
pixels := bytes.buffer_to_bytes(&img.pixels)
- // This struct compare fails at -opt:2 if PNG_Dims is not #packed.
-
- dims := PNG_Dims{img.width, img.height, img.channels, img.depth}
- error = fmt.tprintf("%v has %v, expected: %v.", file.file, dims, test.dims)
-
+ dims := Dims{img.width, img.height, img.channels, img.depth}
dims_pass := test.dims == dims
-
- expect(t, dims_pass, error)
-
+ testing.expectf(t, dims_pass, "%v has %v, expected: %v", file.file, dims, test.dims)
passed &= dims_pass
- png_hash := hash.crc32(pixels)
- error = fmt.tprintf("%v test %v hash is %08x, expected %08x with %v.", file.file, count, png_hash, test.hash, test.options)
- expect(t, test.hash == png_hash, error)
-
+ png_hash := hash.crc32(pixels)
+ testing.expectf(t, test.hash == png_hash, "%v test %v hash is %08x, expected %08x with %v", file.file, count, png_hash, test.hash, test.options)
passed &= test.hash == png_hash
if passed {
@@ -1515,19 +1488,16 @@ run_png_suite :: proc(t: ^testing.T, suite: []PNG_Test) -> (subtotal: int) {
defer bytes.buffer_destroy(&qoi_buffer)
qoi_save_err := qoi.save(&qoi_buffer, img)
- error = fmt.tprintf("%v test %v QOI save failed with %v.", file.file, count, qoi_save_err)
- expect(t, qoi_save_err == nil, error)
+ testing.expectf(t, qoi_save_err == nil, "%v test %v QOI save failed with %v", file.file, count, qoi_save_err)
if qoi_save_err == nil {
qoi_img, qoi_load_err := qoi.load(qoi_buffer.buf[:])
defer qoi.destroy(qoi_img)
- error = fmt.tprintf("%v test %v QOI load failed with %v.", file.file, count, qoi_load_err)
- expect(t, qoi_load_err == nil, error)
+ testing.expectf(t, qoi_load_err == nil, "%v test %v QOI load failed with %v", file.file, count, qoi_load_err)
qoi_hash := hash.crc32(qoi_img.pixels.buf[:])
- error = fmt.tprintf("%v test %v QOI load hash is %08x, expected it match PNG's %08x with %v.", file.file, count, qoi_hash, png_hash, test.options)
- expect(t, qoi_hash == png_hash, error)
+ testing.expectf(t, qoi_hash == png_hash, "%v test %v QOI load hash is %08x, expected it match PNG's %08x with %v", file.file, count, qoi_hash, png_hash, test.options)
}
}
@@ -1537,19 +1507,15 @@ run_png_suite :: proc(t: ^testing.T, suite: []PNG_Test) -> (subtotal: int) {
defer bytes.buffer_destroy(&tga_buffer)
tga_save_err := tga.save(&tga_buffer, img)
- error = fmt.tprintf("%v test %v TGA save failed with %v.", file.file, count, tga_save_err)
- expect(t, tga_save_err == nil, error)
-
+ testing.expectf(t, tga_save_err == nil, "%v test %v TGA save failed with %v", file.file, count, tga_save_err)
if tga_save_err == nil {
tga_img, tga_load_err := tga.load(tga_buffer.buf[:])
defer tga.destroy(tga_img)
- error = fmt.tprintf("%v test %v TGA load failed with %v.", file.file, count, tga_load_err)
- expect(t, tga_load_err == nil, error)
+ testing.expectf(t, tga_load_err == nil, "%v test %v TGA load failed with %v", file.file, count, tga_load_err)
tga_hash := hash.crc32(tga_img.pixels.buf[:])
- error = fmt.tprintf("%v test %v TGA load hash is %08x, expected it match PNG's %08x with %v.", file.file, count, tga_hash, png_hash, test.options)
- expect(t, tga_hash == png_hash, error)
+ testing.expectf(t, tga_hash == png_hash, "%v test %v TGA load hash is %08x, expected it match PNG's %08x with %v", file.file, count, tga_hash, png_hash, test.options)
}
}
@@ -1558,22 +1524,18 @@ run_png_suite :: proc(t: ^testing.T, suite: []PNG_Test) -> (subtotal: int) {
pbm_buf, pbm_save_err := pbm.save_to_buffer(img)
defer delete(pbm_buf)
- error = fmt.tprintf("%v test %v PBM save failed with %v.", file.file, count, pbm_save_err)
- expect(t, pbm_save_err == nil, error)
+ testing.expectf(t, pbm_save_err == nil, "%v test %v PBM save failed with %v", file.file, count, pbm_save_err)
if pbm_save_err == nil {
// Try to load it again.
pbm_img, pbm_load_err := pbm.load(pbm_buf)
defer pbm.destroy(pbm_img)
- error = fmt.tprintf("%v test %v PBM load failed with %v.", file.file, count, pbm_load_err)
- expect(t, pbm_load_err == nil, error)
+ testing.expectf(t, pbm_load_err == nil, "%v test %v PBM load failed with %v", file.file, count, pbm_load_err)
if pbm_load_err == nil {
pbm_hash := hash.crc32(pbm_img.pixels.buf[:])
-
- error = fmt.tprintf("%v test %v PBM load hash is %08x, expected it match PNG's %08x with %v.", file.file, count, pbm_hash, png_hash, test.options)
- expect(t, pbm_hash == png_hash, error)
+ testing.expectf(t, pbm_hash == png_hash, "%v test %v PBM load hash is %08x, expected it match PNG's %08x with %v", file.file, count, pbm_hash, png_hash, test.options)
}
}
}
@@ -1587,22 +1549,18 @@ run_png_suite :: proc(t: ^testing.T, suite: []PNG_Test) -> (subtotal: int) {
pbm_buf, pbm_save_err := pbm.save_to_buffer(img, pbm_info)
defer delete(pbm_buf)
- error = fmt.tprintf("%v test %v PBM save failed with %v.", file.file, count, pbm_save_err)
- expect(t, pbm_save_err == nil, error)
+ testing.expectf(t, pbm_save_err == nil, "%v test %v PBM save failed with %v", file.file, count, pbm_save_err)
if pbm_save_err == nil {
// Try to load it again.
pbm_img, pbm_load_err := pbm.load(pbm_buf)
defer pbm.destroy(pbm_img)
- error = fmt.tprintf("%v test %v PBM load failed with %v.", file.file, count, pbm_load_err)
- expect(t, pbm_load_err == nil, error)
+ testing.expectf(t, pbm_load_err == nil, "%v test %v PBM load failed with %v", file.file, count, pbm_load_err)
if pbm_load_err == nil {
pbm_hash := hash.crc32(pbm_img.pixels.buf[:])
-
- error = fmt.tprintf("%v test %v PBM load hash is %08x, expected it match PNG's %08x with %v.", file.file, count, pbm_hash, png_hash, test.options)
- expect(t, pbm_hash == png_hash, error)
+ testing.expectf(t, pbm_hash == png_hash, "%v test %v PBM load hash is %08x, expected it match PNG's %08x with %v", file.file, count, pbm_hash, png_hash, test.options)
}
}
}
@@ -1655,21 +1613,18 @@ run_png_suite :: proc(t: ^testing.T, suite: []PNG_Test) -> (subtotal: int) {
float_pbm_buf, float_pbm_save_err := pbm.save_to_buffer(float_img, pbm_info)
defer delete(float_pbm_buf)
- error = fmt.tprintf("%v test %v save as PFM failed with %v", file.file, count, float_pbm_save_err)
- expect(t, float_pbm_save_err == nil, error)
+ testing.expectf(t, float_pbm_save_err == nil, "%v test %v save as PFM failed with %v", file.file, count, float_pbm_save_err)
if float_pbm_save_err == nil {
// Load float image and compare.
float_pbm_img, float_pbm_load_err := pbm.load(float_pbm_buf)
defer pbm.destroy(float_pbm_img)
- error = fmt.tprintf("%v test %v PFM load failed with %v", file.file, count, float_pbm_load_err)
- expect(t, float_pbm_load_err == nil, error)
+ testing.expectf(t, float_pbm_load_err == nil, "%v test %v PFM load failed with %v", file.file, count, float_pbm_load_err)
load_float := mem.slice_data_cast([]f32, float_pbm_img.pixels.buf[:])
- error = fmt.tprintf("%v test %v PFM load returned %v floats, expected %v", file.file, count, len(load_float), len(orig_float))
- expect(t, len(load_float) == len(orig_float), error)
+ testing.expectf(t, len(load_float) == len(orig_float), "%v test %v PFM load returned %v floats, expected %v", file.file, count, len(load_float), len(orig_float))
// Compare floats
equal := true
@@ -1679,15 +1634,13 @@ run_png_suite :: proc(t: ^testing.T, suite: []PNG_Test) -> (subtotal: int) {
break
}
}
- error = fmt.tprintf("%v test %v PFM loaded floats to match", file.file, count)
- expect(t, equal, error)
+ testing.expectf(t, equal, "%v test %v PFM loaded floats to match", file.file, count)
}
}
}
}
if .return_metadata in test.options {
-
if v, ok := img.metadata.(^image.PNG_Info); ok {
for c in v.chunks {
#partial switch(c.header.type) {
@@ -1696,8 +1649,7 @@ run_png_suite :: proc(t: ^testing.T, suite: []PNG_Test) -> (subtotal: int) {
case "pp0n2c16", "pp0n6a08":
gamma, gamma_ok := png.gamma(c)
expected_gamma := f32(1.0)
- error = fmt.tprintf("%v test %v gAMA is %v, expected %v.", file.file, count, gamma, expected_gamma)
- expect(t, gamma == expected_gamma && gamma_ok, error)
+ testing.expectf(t, gamma == expected_gamma && gamma_ok, "%v test %v gAMA is %v, expected %v", file.file, count, gamma, expected_gamma)
}
case .PLTE:
switch(file.file) {
@@ -1705,8 +1657,7 @@ run_png_suite :: proc(t: ^testing.T, suite: []PNG_Test) -> (subtotal: int) {
plte, plte_ok := png.plte(c)
expected_plte_len := u16(216)
- error = fmt.tprintf("%v test %v PLTE length is %v, expected %v.", file.file, count, plte.used, expected_plte_len)
- expect(t, expected_plte_len == plte.used && plte_ok, error)
+ testing.expectf(t, expected_plte_len == plte.used && plte_ok, "%v test %v PLTE length is %v, expected %v", file.file, count, plte.used, expected_plte_len)
}
case .sPLT:
switch(file.file) {
@@ -1714,12 +1665,10 @@ run_png_suite :: proc(t: ^testing.T, suite: []PNG_Test) -> (subtotal: int) {
splt, splt_ok := png.splt(c)
expected_splt_len := u16(216)
- error = fmt.tprintf("%v test %v sPLT length is %v, expected %v.", file.file, count, splt.used, expected_splt_len)
- expect(t, expected_splt_len == splt.used && splt_ok, error)
+ testing.expectf(t, expected_splt_len == splt.used && splt_ok, "%v test %v sPLT length is %v, expected %v", file.file, count, splt.used, expected_splt_len)
expected_splt_name := "six-cube"
- error = fmt.tprintf("%v test %v sPLT name is %v, expected %v.", file.file, count, splt.name, expected_splt_name)
- expect(t, expected_splt_name == splt.name && splt_ok, error)
+ testing.expectf(t, expected_splt_name == splt.name && splt_ok, "%v test %v sPLT name is %v, expected %v", file.file, count, splt.name, expected_splt_name)
png.splt_destroy(splt)
}
@@ -1733,48 +1682,37 @@ run_png_suite :: proc(t: ^testing.T, suite: []PNG_Test) -> (subtotal: int) {
g = png.CIE_1931{x = 0.3000, y = 0.6000},
b = png.CIE_1931{x = 0.1500, y = 0.0600},
}
- error = fmt.tprintf("%v test %v cHRM is %v, expected %v.", file.file, count, chrm, expected_chrm)
- expect(t, expected_chrm == chrm && chrm_ok, error)
+ testing.expectf(t, expected_chrm == chrm && chrm_ok, "%v test %v cHRM is %v, expected %v", file.file, count, chrm, expected_chrm)
}
case .pHYs:
phys, phys_ok := png.phys(c)
- phys_err := "%v test %v cHRM is %v, expected %v."
switch (file.file) {
case "cdfn2c08":
expected_phys := png.pHYs{ppu_x = 1, ppu_y = 4, unit = .Unknown}
- error = fmt.tprintf(phys_err, file.file, count, phys, expected_phys)
- expect(t, expected_phys == phys && phys_ok, error)
+ testing.expectf(t, expected_phys == phys && phys_ok, "%v test %v cHRM is %v, expected %v", file.file, count, phys, expected_phys)
case "cdhn2c08":
expected_phys := png.pHYs{ppu_x = 4, ppu_y = 1, unit = .Unknown}
- error = fmt.tprintf(phys_err, file.file, count, phys, expected_phys)
- expect(t, expected_phys == phys && phys_ok, error)
+ testing.expectf(t, expected_phys == phys && phys_ok, "%v test %v cHRM is %v, expected %v", file.file, count, phys, expected_phys)
case "cdsn2c08":
expected_phys := png.pHYs{ppu_x = 1, ppu_y = 1, unit = .Unknown}
- error = fmt.tprintf(phys_err, file.file, count, phys, expected_phys)
- expect(t, expected_phys == phys && phys_ok, error)
+ testing.expectf(t, expected_phys == phys && phys_ok, "%v test %v cHRM is %v, expected %v", file.file, count, phys, expected_phys)
case "cdun2c08":
expected_phys := png.pHYs{ppu_x = 1000, ppu_y = 1000, unit = .Meter}
- error = fmt.tprintf(phys_err, file.file, count, phys, expected_phys)
- expect(t, expected_phys == phys && phys_ok, error)
+ testing.expectf(t, expected_phys == phys && phys_ok, "%v test %v cHRM is %v, expected %v", file.file, count, phys, expected_phys)
}
case .hIST:
hist, hist_ok := png.hist(c)
- hist_err := "%v test %v hIST has %v entries, expected %v."
switch (file.file) {
case "ch1n3p04":
- error = fmt.tprintf(hist_err, file.file, count, hist.used, 15)
- expect(t, hist.used == 15 && hist_ok, error)
+ testing.expectf(t, hist.used == 15 && hist_ok, "%v test %v hIST has %v entries, expected %v", file.file, count, hist.used, 15)
case "ch2n3p08":
- error = fmt.tprintf(hist_err, file.file, count, hist.used, 256)
- expect(t, hist.used == 256 && hist_ok, error)
+ testing.expectf(t, hist.used == 256 && hist_ok, "%v test %v hIST has %v entries, expected %v", file.file, count, hist.used, 256)
}
case .tIME:
png_time, png_time_ok := png.time(c)
- time_err := "%v test %v tIME was %v, expected %v."
expected_time: png.tIME
core_time, core_time_ok := png.core_time(c)
- time_core_err := "%v test %v tIME->core:time is %v, expected %v."
expected_core: time.Time
switch(file.file) {
@@ -1789,14 +1727,10 @@ run_png_suite :: proc(t: ^testing.T, suite: []PNG_Test) -> (subtotal: int) {
expected_core = time.Time{_nsec = 946684799000000000}
}
- error = fmt.tprintf(time_err, file.file, count, png_time, expected_time)
- expect(t, png_time == expected_time && png_time_ok, error)
-
- error = fmt.tprintf(time_core_err, file.file, count, core_time, expected_core)
- expect(t, core_time == expected_core && core_time_ok, error)
+ testing.expectf(t, png_time == expected_time && png_time_ok, "%v test %v tIME was %v, expected %v", file.file, count, png_time, expected_time)
+ testing.expectf(t, core_time == expected_core && core_time_ok, "%v test %v tIME->core:time is %v, expected %v", file.file, count, core_time, expected_core)
case .sBIT:
sbit, sbit_ok := png.sbit(c)
- sbit_err := "%v test %v sBIT was %v, expected %v."
expected_sbit: [4]u8
switch (file.file) {
@@ -1815,8 +1749,7 @@ run_png_suite :: proc(t: ^testing.T, suite: []PNG_Test) -> (subtotal: int) {
case "cdfn2c08", "cdhn2c08", "cdsn2c08", "cdun2c08", "ch1n3p04", "basn3p04":
expected_sbit = [4]u8{ 4, 4, 4, 0}
}
- error = fmt.tprintf(sbit_err, file.file, count, sbit, expected_sbit)
- expect(t, sbit == expected_sbit && sbit_ok, error)
+ testing.expectf(t, sbit == expected_sbit && sbit_ok, "%v test %v sBIT was %v, expected %v", file.file, count, sbit, expected_sbit)
case .tEXt, .zTXt:
text, text_ok := png.text(c)
defer png.text_destroy(text)
@@ -1828,8 +1761,7 @@ run_png_suite :: proc(t: ^testing.T, suite: []PNG_Test) -> (subtotal: int) {
if file.file in Expected_Text {
if text.keyword in Expected_Text[file.file] {
test_text := Expected_Text[file.file][text.keyword].text
- error = fmt.tprintf("%v test %v text keyword {{%v}}:'%v', expected '%v'.", file.file, count, text.keyword, text.text, test_text)
- expect(t, text.text == test_text && text_ok, error)
+ testing.expectf(t, text.text == test_text && text_ok, "%v test %v text keyword {{%v}}:'%v', expected '%v'", file.file, count, text.keyword, text.text, test_text)
}
}
}
@@ -1842,74 +1774,623 @@ run_png_suite :: proc(t: ^testing.T, suite: []PNG_Test) -> (subtotal: int) {
if file.file in Expected_Text {
if text.keyword in Expected_Text[file.file] {
test := Expected_Text[file.file][text.keyword]
- error = fmt.tprintf("%v test %v text keyword {{%v}}:'%v', expected '%v'.", file.file, count, text.keyword, text, test)
- expect(t, text.language == test.language && text_ok, error)
- expect(t, text.keyword_localized == test.keyword_localized && text_ok, error)
+ testing.expectf(t, text.language == test.language && text_ok, "%v test %v text keyword {{%v}}:'%v', expected '%v'", file.file, count, text.keyword, text, test)
+ testing.expectf(t, text.keyword_localized == test.keyword_localized && text_ok, "%v test %v text keyword {{%v}}:'%v', expected '%v'", file.file, count, text.keyword, text, test)
}
}
case "ctfn0g04": // international UTF-8, finnish
if file.file in Expected_Text {
if text.keyword in Expected_Text[file.file] {
test := Expected_Text[file.file][text.keyword]
- error = fmt.tprintf("%v test %v text keyword {{%v}}:'%v', expected '%v'.", file.file, count, text.keyword, text, test)
- expect(t, text.text == test.text && text_ok, error)
- expect(t, text.language == test.language && text_ok, error)
- expect(t, text.keyword_localized == test.keyword_localized && text_ok, error)
+ testing.expectf(t, text.text == test.text && text_ok, "%v test %v text keyword {{%v}}:'%v', expected '%v'", file.file, count, text.keyword, text, test)
+ testing.expectf(t, text.language == test.language && text_ok, "%v test %v text keyword {{%v}}:'%v', expected '%v'", file.file, count, text.keyword, text, test)
+ testing.expectf(t, text.keyword_localized == test.keyword_localized && text_ok, "%v test %v text keyword {{%v}}:'%v', expected '%v'", file.file, count, text.keyword, text, test)
}
}
case "ctgn0g04": // international UTF-8, greek
if file.file in Expected_Text {
if text.keyword in Expected_Text[file.file] {
test := Expected_Text[file.file][text.keyword]
- error = fmt.tprintf("%v test %v text keyword {{%v}}:'%v', expected '%v'.", file.file, count, text.keyword, text, test)
- expect(t, text.text == test.text && text_ok, error)
- expect(t, text.language == test.language && text_ok, error)
- expect(t, text.keyword_localized == test.keyword_localized && text_ok, error)
+ testing.expectf(t, text.text == test.text && text_ok, "%v test %v text keyword {{%v}}:'%v', expected '%v'", file.file, count, text.keyword, text, test)
+ testing.expectf(t, text.language == test.language && text_ok, "%v test %v text keyword {{%v}}:'%v', expected '%v'", file.file, count, text.keyword, text, test)
+ testing.expectf(t, text.keyword_localized == test.keyword_localized && text_ok, "%v test %v text keyword {{%v}}:'%v', expected '%v'", file.file, count, text.keyword, text, test)
}
}
case "cthn0g04": // international UTF-8, hindi
if file.file in Expected_Text {
if text.keyword in Expected_Text[file.file] {
test := Expected_Text[file.file][text.keyword]
- error = fmt.tprintf("%v test %v text keyword {{%v}}:'%v', expected '%v'.", file.file, count, text.keyword, text, test)
- expect(t, text.text == test.text && text_ok, error)
- expect(t, text.language == test.language && text_ok, error)
- expect(t, text.keyword_localized == test.keyword_localized && text_ok, error)
+ testing.expectf(t, text.text == test.text && text_ok, "%v test %v text keyword {{%v}}:'%v', expected '%v'", file.file, count, text.keyword, text, test)
+ testing.expectf(t, text.language == test.language && text_ok, "%v test %v text keyword {{%v}}:'%v', expected '%v'", file.file, count, text.keyword, text, test)
+ testing.expectf(t, text.keyword_localized == test.keyword_localized && text_ok, "%v test %v text keyword {{%v}}:'%v', expected '%v'", file.file, count, text.keyword, text, test)
}
}
case "ctjn0g04": // international UTF-8, japanese
if file.file in Expected_Text {
if text.keyword in Expected_Text[file.file] {
test := Expected_Text[file.file][text.keyword]
- error = fmt.tprintf("%v test %v text keyword {{%v}}:'%v', expected '%v'.", file.file, count, text.keyword, text, test)
- expect(t, text.text == test.text && text_ok, error)
- expect(t, text.language == test.language && text_ok, error)
- expect(t, text.keyword_localized == test.keyword_localized && text_ok, error)
+ testing.expectf(t, text.text == test.text && text_ok, "%v test %v text keyword {{%v}}:'%v', expected '%v'", file.file, count, text.keyword, text, test)
+ testing.expectf(t, text.language == test.language && text_ok, "%v test %v text keyword {{%v}}:'%v', expected '%v'", file.file, count, text.keyword, text, test)
+ testing.expectf(t, text.keyword_localized == test.keyword_localized && text_ok, "%v test %v text keyword {{%v}}:'%v', expected '%v'", file.file, count, text.keyword, text, test)
}
}
}
case .eXIf:
if file.file == "exif2c08" { // chunk with jpeg exif data
exif, exif_ok := png.exif(c)
- error = fmt.tprintf("%v test %v eXIf byte order '%v', expected 'big_endian'.", file.file, count, exif.byte_order)
- error_len := fmt.tprintf("%v test %v eXIf data length '%v', expected '%v'.", file.file, len(exif.data), 978)
- expect(t, exif.byte_order == .big_endian && exif_ok, error)
- expect(t, len(exif.data) == 978 && exif_ok, error_len)
+ testing.expectf(t, exif.byte_order == .big_endian && exif_ok, "%v test %v eXIf byte order '%v', expected 'big_endian'.", file.file, count, exif.byte_order)
+ testing.expectf(t, len(exif.data) == 978 && exif_ok, "%v test %v eXIf data length '%v', expected '%v'", file.file, len(exif.data), 978)
}
}
}
}
}
}
-
png.destroy(img)
+ }
+ }
+ return
+}
+
+/*
+ Basic format tests:
+ https://entropymine.com/jason/bmpsuite/bmpsuite/html/bmpsuite.html - Version 2.8; 2023-11-28
+
+ The BMP Suite image generator itself is GPL, and isn't included, nor did it have its code referenced.
+ We do thank the author for the well-researched test suite, which we are free to include:
+
+ "Image files generated by this program are not covered by this license, and are
+ in the public domain (except for the embedded ICC profiles)."
+
+ The files with embedded ICC profiles aren't part of Odin's test assets. We don't support BMP metadata.
+ We don't support all "possibly correct" images, and thus only ship a subset of these from the BMP Suite.
+*/
+Basic_BMP_Tests := []Test{
+ {
+ "pal1", {
+ {Default, nil, {127, 64, 3, 8}, 0x_3ce8_1fae},
+ },
+ },
+ {
+ "pal1wb", {
+ {Default, nil, {127, 64, 3, 8}, 0x_3ce8_1fae},
+ },
+ },
+ {
+ "pal1bg", {
+ {Default, nil, {127, 64, 3, 8}, 0x_9e91_174a},
+ },
+ },
+ {
+ "pal4", {
+ {Default, nil, {127, 64, 3, 8}, 0x_288e_4371},
+ },
+ },
+ {
+ "pal4gs", {
+ {Default, nil, {127, 64, 3, 8}, 0x_452d_a01a},
+ },
+ },
+ {
+ "pal4rle", {
+ {Default, nil, {127, 64, 3, 8}, 0x_288e_4371},
+ },
+ },
+ {
+ "pal8", {
+ {Default, nil, {127, 64, 3, 8}, 0x_3845_4155},
+ },
+ },
+ {
+ "pal8-0", {
+ {Default, nil, {127, 64, 3, 8}, 0x_3845_4155},
+ },
+ },
+ {
+ "pal8gs", {
+ {Default, nil, {127, 64, 3, 8}, 0x_09c2_7834},
+ },
+ },
+ {
+ "pal8rle", {
+ {Default, nil, {127, 64, 3, 8}, 0x_3845_4155},
+ },
+ },
+ {
+ "pal8w126", {
+ {Default, nil, {126, 63, 3, 8}, 0x_bb66_4cda},
+ },
+ },
+ {
+ "pal8w125", {
+ {Default, nil, {125, 62, 3, 8}, 0x_3ab8_f7c5},
+ },
+ },
+ {
+ "pal8w124", {
+ {Default, nil, {124, 61, 3, 8}, 0x_b53e_e6c8},
+ },
+ },
+ {
+ "pal8topdown", {
+ {Default, nil, {127, 64, 3, 8}, 0x_3845_4155},
+ },
+ },
+ {
+ "pal8nonsquare", {
+ {Default, nil, {127, 32, 3, 8}, 0x_8409_c689},
+ },
+ },
+ {
+ "pal8v4", {
+ {Default, nil, {127, 64, 3, 8}, 0x_3845_4155},
+ },
+ },
+ {
+ "pal8v5", {
+ {Default, nil, {127, 64, 3, 8}, 0x_3845_4155},
+ },
+ },
+ {
+ "rgb16", {
+ {Default, nil, {127, 64, 3, 8}, 0x_8b6f_81a2},
+ },
+ },
+ {
+ "rgb16bfdef", {
+ {Default, nil, {127, 64, 3, 8}, 0x_8b6f_81a2},
+ },
+ },
+ {
+ "rgb16-565", {
+ {Default, nil, {127, 64, 3, 8}, 0x_8c73_a2ff},
+ },
+ },
+ {
+ "rgb16-565pal", {
+ {Default, nil, {127, 64, 3, 8}, 0x_8c73_a2ff},
+ },
+ },
+ {
+ "rgb24", {
+ {Default, nil, {127, 64, 3, 8}, 0x_025b_ba0a},
+ },
+ },
+ {
+ "rgb24pal", {
+ {Default, nil, {127, 64, 3, 8}, 0x_025b_ba0a},
+ },
+ },
+ {
+ "rgb32", {
+ {Default, nil, {127, 64, 3, 8}, 0x_025b_ba0a},
+ },
+ },
+ {
+ "rgb32bf", {
+ {Default, nil, {127, 64, 3, 8}, 0x_025b_ba0a},
+ },
+ },
+ {
+ "rgb32bfdef", {
+ {Default, nil, {127, 64, 3, 8}, 0x_025b_ba0a},
+ },
+ },
+}
+
+OS2_Tests := []Test{
+ {
+ "pal8os2", { // An OS/2-style bitmap. This format can be called OS/2 BMPv1, or Windows BMPv2.
+ {Default, nil, {127, 64, 3, 8}, 0x_3845_4155},
+ },
+ },
+ {
+ "pal8os2-sz", { // An OS/2-style bitmap. This format can be called OS/2 BMPv1, or Windows BMPv2.
+ {Default, nil, {127, 64, 3, 8}, 0x_3845_4155},
+ },
+ },
+ {
+ "pal8os2-hs", { // An OS/2-style bitmap. This format can be called OS/2 BMPv1, or Windows BMPv2.
+ {Default, nil, {127, 64, 3, 8}, 0x_3845_4155},
+ },
+ },
+ {
+ "pal8os2sp", { // An OS/2-style bitmap. This format can be called OS/2 BMPv1, or Windows BMPv2.
+ {Default, nil, {127, 64, 3, 8}, 0x_3845_4155},
+ },
+ },
+ {
+ "pal8os2v2", { // An OS/2-style bitmap. This format can be called OS/2 BMPv1, or Windows BMPv2.
+ {Default, nil, {127, 64, 3, 8}, 0x_3845_4155},
+ },
+ },
+ {
+ "pal8os2v2-16", { // An OS/2-style bitmap. This format can be called OS/2 BMPv1, or Windows BMPv2.
+ {Default, nil, {127, 64, 3, 8}, 0x_3845_4155},
+ },
+ },
+ {
+ "pal8os2v2-sz", { // An OS/2-style bitmap. This format can be called OS/2 BMPv1, or Windows BMPv2.
+ {Default, nil, {127, 64, 3, 8}, 0x_3845_4155},
+ },
+ },
+ {
+ "pal8os2v2-40sz", { // An OS/2-style bitmap. This format can be called OS/2 BMPv1, or Windows BMPv2.
+ {Default, nil, {127, 64, 3, 8}, 0x_3845_4155},
+ },
+ },
+}
+
+// BMP files that aren't 100% to spec. Some we support, some we don't.
+Questionable_BMP_Tests := []Test{
+ {
+ "pal1p1", { // Spec says 1-bit image has 2 palette entries. This one has 1.
+ {Default, nil, {127, 64, 3, 8}, 0x_2b54_2560},
+ },
+ },
+ {
+ "pal2", { // 2-bit. Allowed on Windows CE. Irfanview doesn't support it.
+ {Default, nil, {127, 64, 3, 8}, 0x_0da2_7594},
+ },
+ },
+ {
+ "pal2color", { // 2-bit, with color palette.
+ {Default, nil, {127, 64, 3, 8}, 0x_f0d8_c5d6},
+ },
+ },
+ {
+ "pal8offs", { // 300 palette entries (yes, only 256 can be used)
+ {Default, nil, {127, 64, 3, 8}, 0x_3845_4155},
+ },
+ },
+ {
+ "pal8oversizepal", { // Some padding between palette and image data
+ {Default, nil, {127, 64, 3, 8}, 0x_3845_4155},
+ },
+ },
+ {
+ "pal4rletrns", { // Using palette tricks to skip pixels
+ {Default, nil, {127, 64, 3, 8}, 0x_eed4_e744},
+ },
+ },
+ {
+ "pal4rlecut", { // Using palette tricks to skip pixels
+ {Default, nil, {127, 64, 3, 8}, 0x_473fbc7d},
+ },
+ },
+ {
+ "pal8rletrns", { // Using palette tricks to skip pixels
+ {Default, nil, {127, 64, 3, 8}, 0x_fe1f_e560},
+ },
+ },
+ {
+ "pal8rlecut", { // Using palette tricks to skip pixels
+ {Default, nil, {127, 64, 3, 8}, 0x_bd04_3619},
+ },
+ },
+ {
+ "rgb16faketrns", { // Using palette tricks to skip pixels
+ {Default, nil, {127, 64, 3, 8}, 0x_8b6f_81a2},
+ },
+ },
+ {
+ "rgb16-231", { // Custom bit fields
+ {Default, nil, {127, 64, 3, 8}, 0x_7393_a163},
+ },
+ },
+ {
+ "rgb16-3103", { // Custom bit fields
+ {Default, nil, {127, 64, 3, 8}, 0x_3b66_2189},
+ },
+ },
+ {
+ "rgba16-4444", { // Custom bit fields
+ {Default, nil, {127, 64, 3, 8}, 0x_b785_1f9f},
+ },
+ },
+ {
+ "rgba16-5551", { // Custom bit fields
+ {Default, nil, {127, 64, 3, 8}, 0x_8b6f_81a2},
+ },
+ },
+ {
+ "rgba16-1924", { // Custom bit fields
+ {Default, nil, {127, 64, 3, 8}, 0x_f038_2bed},
+ },
+ },
+ {
+ "rgb32-xbgr", { // Custom bit fields
+ {Default, nil, {127, 64, 3, 8}, 0x_025b_ba0a},
+ },
+ },
+ {
+ "rgb32fakealpha", { // Custom bit fields
+ {Default, nil, {127, 64, 3, 8}, 0x_025b_ba0a},
+ },
+ },
+ {
+ "rgb32-111110", { // Custom bit fields
+ {Default, nil, {127, 64, 3, 8}, 0x_b2c7_a8ff},
+ },
+ },
+ {
+ "rgb32-7187", { // Custom bit fields
+ {Default, nil, {127, 64, 3, 8}, 0x_b93a_4291},
+ },
+ },
+ {
+ "rgba32-1", { // Custom bit fields
+ {Default, nil, {127, 64, 3, 8}, 0x_7b67_823d},
+ },
+ },
+ {
+ "rgba32-2", { // Custom bit fields
+ {Default, nil, {127, 64, 3, 8}, 0x_7b67_823d},
+ },
+ },
+ {
+ "rgba32-1010102", { // Custom bit fields
+ {Default, nil, {127, 64, 3, 8}, 0x_aa42_0b16},
+ },
+ },
+ {
+ "rgba32-81284", { // Custom bit fields
+ {Default, nil, {127, 64, 3, 8}, 0x_28a2_4c16},
+ },
+ },
+ {
+ "rgba32-61754", { // Custom bit fields
+ {Default, nil, {127, 64, 3, 8}, 0x_4aae_26ed},
+ },
+ },
+ {
+ "rgba32abf", { // Custom bit fields
+ {Default, nil, {127, 64, 3, 8}, 0x_7b67_823d},
+ },
+ },
+ {
+ "rgb32h52", { // Truncated header (RGB bit fields included)
+ {Default, nil, {127, 64, 3, 8}, 0x_025b_ba0a},
+ },
+ },
+ {
+ "rgba32h56", { // Truncated header (RGBA bit fields included)
+ {Default, nil, {127, 64, 3, 8}, 0x_7b67_823d},
+ },
+ },
+}
- for _, v in track.allocation_map {
- error = fmt.tprintf("%v test %v leaked %v bytes @ loc %v.", file.file, count, v.size, v.location)
- expect(t, false, error)
+// Unsupported BMP features, or malformed images.
+Unsupported_BMP_Tests := []Test{
+ {
+ "ba-bm", { // An OS/2 Bitmap array. We don't support this BA format.
+ {Default, .Unsupported_OS2_File, {127, 32, 3, 8}, 0x_0000_0000},
+ },
+ },
+ {
+ "pal1huffmsb", { // An OS/2 file with Huffman 1D compression
+ {Default, .Unsupported_Compression, {127, 32, 3, 8}, 0x_0000_0000},
+ },
+ },
+ {
+ "rgb24rle24", { // An OS/2 file with RLE24 compression
+ {Default, .Unsupported_Compression, {127, 64, 3, 8}, 0x_0000_0000},
+ },
+ },
+ {
+ "rgba64", { // An OS/2 file with RLE24 compression
+ {Default, .Unsupported_BPP, {127, 64, 3, 8}, 0x_0000_0000},
+ },
+ },
+}
+
+// Malformed / malicious files
+Known_Bad_BMP_Tests := []Test{
+ {
+ "badbitcount", {
+ {Default, .Unsupported_BPP, {127, 64, 3, 8}, 0x_3ce81fae},
+ },
+ },
+ {
+ "badbitssize", {
+ {Default, nil, {127, 64, 3, 8}, 0x_3ce8_1fae},
+ },
+ },
+ {
+ "baddens1", {
+ {Default, nil, {127, 64, 3, 8}, 0x_3ce8_1fae},
+ },
+ },
+ {
+ "baddens2", {
+ {Default, nil, {127, 64, 3, 8}, 0x_3ce8_1fae},
+ },
+ },
+ {
+ "badfilesize", {
+ {Default, nil, {127, 64, 3, 8}, 0x_3ce8_1fae},
+ },
+ },
+ {
+ "badheadersize", {
+ {Default, nil, {127, 64, 3, 8}, 0x_3ce8_1fae},
+ },
+ },
+ {
+ "badpalettesize", {
+ {Default, nil, {127, 64, 3, 8}, 0x_3845_4155},
+ },
+ },
+ {
+ "badplanes", {
+ {Default, nil, {127, 64, 3, 8}, 0x_3ce8_1fae},
+ },
+ },
+ {
+ "badrle", {
+ {Default, nil, {127, 64, 3, 8}, 0x_1457_aae4},
+ },
+ },
+ {
+ "badrle4", {
+ {Default, nil, {127, 64, 3, 8}, 0x_6764_d2ac},
+ },
+ },
+ {
+ "badrle4bis", {
+ {Default, nil, {127, 64, 3, 8}, 0x_935d_bb37},
+ },
+ },
+ {
+ "badrle4ter", {
+ {Default, nil, {127, 64, 3, 8}, 0x_f2ba_5b08},
+ },
+ },
+ {
+ "badrlebis", {
+ {Default, nil, {127, 64, 3, 8}, 0x_07e2_d730},
+ },
+ },
+ {
+ "badrleter", {
+ {Default, nil, {127, 64, 3, 8}, 0x_a874_2742},
+ },
+ },
+ {
+ "badwidth", {
+ {Default, nil, {127, 64, 3, 8}, 0x_3ce8_1fae},
+ },
+ },
+ {
+ "pal8badindex", {
+ {Default, nil, {127, 64, 3, 8}, 0x_0450_0d02},
+ },
+ },
+ {
+ "reallybig", {
+ {Default, .Image_Dimensions_Too_Large, {3000000, 2000000, 1, 24}, 0x_0000_0000},
+ },
+ },
+ {
+ "rgb16-880", {
+ {Default, nil, {127, 64, 3, 8}, 0x_f1c2_0c73},
+ },
+ },
+ {
+ "rletopdown", {
+ {Default, nil, {127, 64, 3, 8}, 0x_3845_4155},
+ },
+ },
+ {
+ "shortfile", {
+ {Default, .Short_Buffer, {127, 64, 1, 1}, 0x_0000_0000},
+ },
+ },
+}
+
+@test
+bmp_test_basic :: proc(t: ^testing.T) {
+ run_bmp_suite(t, Basic_BMP_Tests)
+}
+
+@test
+bmp_test_os2 :: proc(t: ^testing.T) {
+ run_bmp_suite(t, OS2_Tests)
+}
+
+@test
+bmp_test_questionable :: proc(t: ^testing.T) {
+ run_bmp_suite(t, Questionable_BMP_Tests)
+}
+
+@test
+bmp_test_unsupported :: proc(t: ^testing.T) {
+ run_bmp_suite(t, Unsupported_BMP_Tests)
+}
+
+@test
+bmp_test_known_bad :: proc(t: ^testing.T) {
+ run_bmp_suite(t, Known_Bad_BMP_Tests)
+}
+
+run_bmp_suite :: proc(t: ^testing.T, suite: []Test) {
+ for file in suite {
+ test_file := strings.concatenate({TEST_SUITE_PATH_BMP, "/", file.file, ".bmp"}, context.allocator)
+ defer delete(test_file)
+
+ for test in file.tests {
+ img, err := bmp.load(test_file, test.options)
+
+ passed := (test.expected_error == nil && err == nil) || (test.expected_error == err)
+ testing.expectf(t, passed, "%q failed to load with error %v.", file.file, err)
+
+ if err == nil { // No point in running the other tests if it didn't load.
+ pixels := bytes.buffer_to_bytes(&img.pixels)
+
+ dims := Dims{img.width, img.height, img.channels, img.depth}
+ testing.expectf(t, test.dims == dims, "%v has %v, expected: %v.", file.file, dims, test.dims)
+
+ img_hash := hash.crc32(pixels)
+ testing.expectf(t, test.hash == img_hash, "%v test #1's hash is %08x, expected %08x with %v.", file.file, img_hash, test.hash, test.options)
+
+ // Save to BMP file in memory
+ buf: bytes.Buffer
+ save_err := bmp.save(&buf, img)
+ testing.expectf(t, save_err == nil, "expected saving to BMP in memory not to raise error, got %v", save_err)
+
+ // Reload BMP from memory
+ reload_img, reload_err := bmp.load(buf.buf[:])
+ testing.expectf(t, reload_err == nil, "expected reloading BMP from memory not to raise error, got %v", reload_err)
+
+ testing.expect(t, img.width == reload_img.width && img.height == reload_img.height, "expected saved BMP to have the same dimensions")
+ testing.expect(t, img.channels == reload_img.channels && img.depth == reload_img.depth, "expected saved BMP to have the same dimensions")
+
+ reload_pixels := bytes.buffer_to_bytes(&reload_img.pixels)
+ reload_hash := hash.crc32(reload_pixels)
+
+ testing.expectf(t, img_hash == reload_hash, "expected saved BMP to have the same pixel hash (%08x), got %08x", img_hash, reload_hash)
+
+ bytes.buffer_destroy(&buf)
+ bmp.destroy(reload_img)
}
+ bmp.destroy(img)
}
}
-
return
}
+
+@test
+will_it_blend :: proc(t: ^testing.T) {
+ Pixel :: image.RGB_Pixel
+ Pixel_16 :: image.RGB_Pixel_16
+
+ {
+ bg := Pixel{255, 255, 0}
+ fg := Pixel{ 0, 0, 255}
+
+ for a in 0..=255 {
+ blended := Pixel{
+ image.blend(fg.r, u8(a), bg.r),
+ image.blend(fg.g, u8(a), bg.g),
+ image.blend(fg.b, u8(a), bg.b),
+ }
+ testing.expectf(t, blended.r == bg.r - u8(a), "Expected blend(%v, %3d, %v) = %v, got %v", fg.r, a, bg.r, bg.r - u8(a), blended.r)
+ testing.expectf(t, blended.b == 255 - blended.r, "Expected blend(%v, %3d, %v) = %v, got %v", fg.b, a, bg.b, 255 - blended.r, blended.b)
+ }
+ }
+
+ {
+ bg := Pixel_16{65535, 65535, 0}
+ fg := Pixel_16{ 0, 0, 65535}
+
+ for a in 0..=65535 {
+ blended := Pixel_16{
+ image.blend(fg.r, u16(a), bg.r),
+ image.blend(fg.g, u16(a), bg.g),
+ image.blend(fg.b, u16(a), bg.b),
+ }
+ testing.expectf(t, blended.r == bg.r - u16(a), "Expected blend(%v, %3d, %v) = %v, got %v", fg.r, a, bg.r, bg.r - u16(a), blended.r)
+ testing.expectf(t, blended.b == 65535 - blended.r, "Expected blend(%v, %3d, %v) = %v, got %v", fg.b, a, bg.b, 65535 - blended.r, blended.b)
+ }
+ }
+} \ No newline at end of file
diff --git a/tests/core/math/big/build.bat b/tests/core/math/big/build.bat
index ad199d775..54b715a4f 100644
--- a/tests/core/math/big/build.bat
+++ b/tests/core/math/big/build.bat
@@ -5,7 +5,7 @@ set TEST_ARGS=-fast-tests
set TEST_ARGS=-no-random
set TEST_ARGS=
set OUT_NAME=math_big_test_library.dll
-set COMMON=-build-mode:shared -show-timings -no-bounds-check -define:MATH_BIG_EXE=false -vet -strict-style
+set COMMON=-build-mode:shared -show-timings -no-bounds-check -define:MATH_BIG_EXE=false -vet -strict-style -define:ODIN_TEST_FANCY=false
echo ---
echo Running core:math/big tests
echo ---
diff --git a/tests/core/math/big/test.odin b/tests/core/math/big/test.odin
index e0762a66d..f35f0b72b 100644
--- a/tests/core/math/big/test.odin
+++ b/tests/core/math/big/test.odin
@@ -8,7 +8,7 @@
This file exports procedures for use with the test.py test suite.
*/
-package math_big_tests
+package test_core_math_big
/*
TODO: Write tests for `internal_*` and test reusing parameters with the public implementations.
diff --git a/tests/core/math/big/test_core_math_big.odin b/tests/core/math/big/test_core_math_big.odin
new file mode 100644
index 000000000..9a1e7b01b
--- /dev/null
+++ b/tests/core/math/big/test_core_math_big.odin
@@ -0,0 +1,37 @@
+package test_core_math_big
+
+import "core:math/big"
+import "core:testing"
+
+@(test)
+test_permutations_and_combinations :: proc(t: ^testing.T) {
+ {
+ calc, exp := &big.Int{}, &big.Int{}
+ defer big.destroy(calc, exp)
+ big.permutations_without_repetition(calc, 9000, 10)
+ big.int_atoi(exp, "3469387884476822917768284664849390080000")
+ equals, error := big.equals(calc, exp)
+ testing.expect(t, equals)
+ testing.expect_value(t, error, nil)
+ }
+
+ {
+ calc, exp := &big.Int{}, &big.Int{}
+ defer big.destroy(calc, exp)
+ big.combinations_with_repetition(calc, 9000, 10)
+ big.int_atoi(exp, "965678962435231708695393645683400")
+ equals, error := big.equals(calc, exp)
+ testing.expect(t, equals)
+ testing.expect_value(t, error, nil)
+ }
+
+ {
+ calc, exp := &big.Int{}, &big.Int{}
+ defer big.destroy(calc, exp)
+ big.combinations_without_repetition(calc, 9000, 10)
+ big.int_atoi(exp, "956070294443568925751842114431600")
+ equals, error := big.equals(calc, exp)
+ testing.expect(t, equals)
+ testing.expect_value(t, error, nil)
+ }
+}
diff --git a/tests/core/math/linalg/glsl/test_linalg_glsl_math.odin b/tests/core/math/linalg/glsl/test_linalg_glsl_math.odin
index cf91b8a97..6d4571b24 100644
--- a/tests/core/math/linalg/glsl/test_linalg_glsl_math.odin
+++ b/tests/core/math/linalg/glsl/test_linalg_glsl_math.odin
@@ -1,24 +1,10 @@
// Tests "linalg_glsl_math.odin" in "core:math/linalg/glsl".
-// Must be run with `-collection:tests=` flag, e.g.
-// ./odin run tests/core/math/linalg/glsl/test_linalg_glsl_math.odin -collection:tests=./tests
package test_core_math_linalg_glsl_math
import glsl "core:math/linalg/glsl"
-import "core:fmt"
import "core:math"
import "core:testing"
-import tc "tests:common"
-
-main :: proc() {
-
- t := testing.T{}
-
- test_fract_f32(&t)
- test_fract_f64(&t)
-
- tc.report(&t)
-}
@test
test_fract_f32 :: proc(t: ^testing.T) {
@@ -45,7 +31,7 @@ test_fract_f32 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r = glsl.fract(d.v)
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(%v (%h)) -> %v (%h) != %v", i, #procedure, d.v, d.v, r, r, d.e))
+ testing.expectf(t, r == d.e, "%v (%h) -> %v (%h) != %v", d.v, d.v, r, r, d.e)
}
}
@@ -74,6 +60,6 @@ test_fract_f64 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r = glsl.fract(d.v)
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(%v (%h)) -> %v (%h) != %v", i, #procedure, d.v, d.v, r, r, d.e))
+ testing.expectf(t, r == d.e, "%v (%h) -> %v (%h) != %v", d.v, d.v, r, r, d.e)
}
-}
+} \ No newline at end of file
diff --git a/tests/core/math/noise/test_core_math_noise.odin b/tests/core/math/noise/test_core_math_noise.odin
index a0360e695..f835cf58c 100644
--- a/tests/core/math/noise/test_core_math_noise.odin
+++ b/tests/core/math/noise/test_core_math_noise.odin
@@ -2,42 +2,6 @@ package test_core_math_noise
import "core:testing"
import "core:math/noise"
-import "core:fmt"
-import "core:os"
-
-TEST_count := 0
-TEST_fail := 0
-
-V2 :: noise.Vec2
-V3 :: noise.Vec3
-V4 :: noise.Vec4
-
-when ODIN_TEST {
- expect :: testing.expect
- log :: testing.log
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v] %v\n", loc, message)
- return
- }
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-}
-
-main :: proc() {
- t := testing.T{}
- noise_test(&t)
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
-}
Test_Vector :: struct {
seed: i64,
@@ -51,6 +15,10 @@ Test_Vector :: struct {
},
}
+V2 :: noise.Vec2
+V3 :: noise.Vec3
+V4 :: noise.Vec4
+
SEED_1 :: 2324223232
SEED_2 :: 932466901
SEED_3 :: 9321
@@ -59,93 +27,78 @@ COORD_1 :: V4{ 242.0, 3433.0, 920.0, 222312.0}
COORD_2 :: V4{ 590.0, 9411.0, 5201.0, 942124256.0}
COORD_3 :: V4{12090.0, 19411.0, 81950901.0, 4224219.0}
-Noise_Tests := []Test_Vector{
- /*
- `noise_2d` tests.
- */
- {SEED_1, COORD_1.xy, 0.25010583, noise.noise_2d},
- {SEED_2, COORD_2.xy, -0.92513955, noise.noise_2d},
- {SEED_3, COORD_3.xy, 0.67327416, noise.noise_2d},
-
- /*
- `noise_2d_improve_x` tests.
- */
- {SEED_1, COORD_1.xy, 0.17074019, noise.noise_2d_improve_x},
- {SEED_2, COORD_2.xy, 0.72330487, noise.noise_2d_improve_x},
- {SEED_3, COORD_3.xy, -0.032076947, noise.noise_2d_improve_x},
-
- /*
- `noise_3d_improve_xy` tests.
- */
- {SEED_1, COORD_1.xyz, 0.14819577, noise.noise_3d_improve_xy},
- {SEED_2, COORD_2.xyz, -0.065345764, noise.noise_3d_improve_xy},
- {SEED_3, COORD_3.xyz, -0.37761918, noise.noise_3d_improve_xy},
-
- /*
- `noise_3d_improve_xz` tests.
- */
- {SEED_1, COORD_1.xyz, -0.50075006, noise.noise_3d_improve_xz},
- {SEED_2, COORD_2.xyz, -0.36039603, noise.noise_3d_improve_xz},
- {SEED_3, COORD_3.xyz, -0.3479203, noise.noise_3d_improve_xz},
-
- /*
- `noise_3d_fallback` tests.
- */
- {SEED_1, COORD_1.xyz, 0.6557345, noise.noise_3d_fallback},
- {SEED_2, COORD_2.xyz, 0.55452216, noise.noise_3d_fallback},
- {SEED_3, COORD_3.xyz, -0.26408964, noise.noise_3d_fallback},
-
- /*
- `noise_3d_fallback` tests.
- */
- {SEED_1, COORD_1.xyz, 0.6557345, noise.noise_3d_fallback},
- {SEED_2, COORD_2.xyz, 0.55452216, noise.noise_3d_fallback},
- {SEED_3, COORD_3.xyz, -0.26408964, noise.noise_3d_fallback},
-
- /*
- `noise_4d_improve_xyz_improve_xy` tests.
- */
- {SEED_1, COORD_1, 0.44929826, noise.noise_4d_improve_xyz_improve_xy},
- {SEED_2, COORD_2, -0.13270882, noise.noise_4d_improve_xyz_improve_xy},
- {SEED_3, COORD_3, 0.10298563, noise.noise_4d_improve_xyz_improve_xy},
-
- /*
- `noise_4d_improve_xyz_improve_xz` tests.
- */
- {SEED_1, COORD_1, -0.078514606, noise.noise_4d_improve_xyz_improve_xz},
- {SEED_2, COORD_2, -0.032157656, noise.noise_4d_improve_xyz_improve_xz},
- {SEED_3, COORD_3, -0.38607058, noise.noise_4d_improve_xyz_improve_xz},
-
- /*
- `noise_4d_improve_xyz` tests.
- */
- {SEED_1, COORD_1, -0.4442258, noise.noise_4d_improve_xyz},
- {SEED_2, COORD_2, 0.36822623, noise.noise_4d_improve_xyz},
- {SEED_3, COORD_3, 0.22628775, noise.noise_4d_improve_xyz},
-
- /*
- `noise_4d_fallback` tests.
- */
- {SEED_1, COORD_1, -0.14233987, noise.noise_4d_fallback},
- {SEED_2, COORD_2, 0.1354035, noise.noise_4d_fallback},
- {SEED_3, COORD_3, 0.14565045, noise.noise_4d_fallback},
+@(test)
+test_noise_2d :: proc(t: ^testing.T) {
+ test(t, {SEED_1, COORD_1.xy, 0.25010583, noise.noise_2d})
+ test(t, {SEED_2, COORD_2.xy, -0.92513955, noise.noise_2d})
+ test(t, {SEED_3, COORD_3.xy, 0.67327416, noise.noise_2d})
+}
+
+@(test)
+test_noise_2d_improve_x :: proc(t: ^testing.T) {
+ test(t, {SEED_1, COORD_1.xy, 0.17074019, noise.noise_2d_improve_x})
+ test(t, {SEED_2, COORD_2.xy, 0.72330487, noise.noise_2d_improve_x})
+ test(t, {SEED_3, COORD_3.xy, -0.032076947, noise.noise_2d_improve_x})
+}
+
+@(test)
+test_noise_3d_improve_xy :: proc(t: ^testing.T) {
+ test(t, {SEED_1, COORD_1.xyz, 0.14819577, noise.noise_3d_improve_xy})
+ test(t, {SEED_2, COORD_2.xyz, -0.065345764, noise.noise_3d_improve_xy})
+ test(t, {SEED_3, COORD_3.xyz, -0.37761918, noise.noise_3d_improve_xy})
+}
+
+@(test)
+test_noise_3d_improve_xz :: proc(t: ^testing.T) {
+ test(t, {SEED_1, COORD_1.xyz, -0.50075006, noise.noise_3d_improve_xz})
+ test(t, {SEED_2, COORD_2.xyz, -0.36039603, noise.noise_3d_improve_xz})
+ test(t, {SEED_3, COORD_3.xyz, -0.3479203, noise.noise_3d_improve_xz})
+}
+
+@(test)
+test_noise_3d_fallback :: proc(t: ^testing.T) {
+ test(t, {SEED_1, COORD_1.xyz, 0.6557345, noise.noise_3d_fallback})
+ test(t, {SEED_2, COORD_2.xyz, 0.55452216, noise.noise_3d_fallback})
+ test(t, {SEED_3, COORD_3.xyz, -0.26408964, noise.noise_3d_fallback})
+}
+
+@(test)
+test_noise_4d_improve_xyz_improve_xy :: proc(t: ^testing.T) {
+ test(t, {SEED_1, COORD_1, 0.44929826, noise.noise_4d_improve_xyz_improve_xy})
+ test(t, {SEED_2, COORD_2, -0.13270882, noise.noise_4d_improve_xyz_improve_xy})
+ test(t, {SEED_3, COORD_3, 0.10298563, noise.noise_4d_improve_xyz_improve_xy})
+}
+
+@(test)
+test_noise_4d_improve_xyz_improve_xz :: proc(t: ^testing.T) {
+ test(t, {SEED_1, COORD_1, -0.078514606, noise.noise_4d_improve_xyz_improve_xz})
+ test(t, {SEED_2, COORD_2, -0.032157656, noise.noise_4d_improve_xyz_improve_xz})
+ test(t, {SEED_3, COORD_3, -0.38607058, noise.noise_4d_improve_xyz_improve_xz})
+}
+
+@(test)
+test_noise_4d_improve_xyz :: proc(t: ^testing.T) {
+ test(t, {SEED_1, COORD_1, -0.4442258, noise.noise_4d_improve_xyz})
+ test(t, {SEED_2, COORD_2, 0.36822623, noise.noise_4d_improve_xyz})
+ test(t, {SEED_3, COORD_3, 0.22628775, noise.noise_4d_improve_xyz})
+}
+@(test)
+test_noise_4d_fallback :: proc(t: ^testing.T) {
+ test(t, {SEED_1, COORD_1, -0.14233987, noise.noise_4d_fallback})
+ test(t, {SEED_2, COORD_2, 0.1354035, noise.noise_4d_fallback})
+ test(t, {SEED_3, COORD_3, 0.14565045, noise.noise_4d_fallback})
}
-noise_test :: proc(t: ^testing.T) {
- for test in Noise_Tests {
- output: f32
-
- switch coord in test.coord {
- case V2:
- output = test.test_proc.(proc(_: i64, _: V2) -> f32)(test.seed, test.coord.(V2))
- case V3:
- output = test.test_proc.(proc(_: i64, _: V3) -> f32)(test.seed, test.coord.(V3))
- case V4:
- output = test.test_proc.(proc(_: i64, _: V4) -> f32)(test.seed, test.coord.(V4))
- }
-
- error := fmt.tprintf("Seed %v, Coord: %v, Expected: %3.8f. Got %3.8f", test.seed, test.coord, test.expected, output)
- expect(t, test.expected == output, error)
+test :: proc(t: ^testing.T, test: Test_Vector) {
+ output: f32
+ switch coord in test.coord {
+ case V2:
+ output = test.test_proc.(proc(_: i64, _: V2) -> f32)(test.seed, test.coord.(V2))
+ case V3:
+ output = test.test_proc.(proc(_: i64, _: V3) -> f32)(test.seed, test.coord.(V3))
+ case V4:
+ output = test.test_proc.(proc(_: i64, _: V4) -> f32)(test.seed, test.coord.(V4))
}
+ testing.expectf(t, test.expected == output, "Seed %v, Coord: %v, Expected: %3.8f. Got %3.8f", test.seed, test.coord, test.expected, output)
} \ No newline at end of file
diff --git a/tests/core/math/test_core_math.odin b/tests/core/math/test_core_math.odin
index df989bff6..2a752e366 100644
--- a/tests/core/math/test_core_math.odin
+++ b/tests/core/math/test_core_math.odin
@@ -1,49 +1,8 @@
// Tests "math.odin" in "core:math".
-// Must be run with `-collection:tests=` flag, e.g.
-// ./odin run tests/core/math/test_core_math.odin -collection:tests=./tests
package test_core_math
-import "core:fmt"
import "core:math"
import "core:testing"
-import tc "tests:common"
-
-main :: proc() {
- t := testing.T{}
-
- test_classify_f16(&t)
- test_classify_f32(&t)
- test_classify_f64(&t)
-
- test_trunc_f16(&t)
- test_trunc_f32(&t)
- test_trunc_f64(&t)
-
- test_round_f16(&t)
- test_round_f32(&t)
- test_round_f64(&t)
-
- test_nan(&t)
- test_acos(&t)
- test_acosh(&t)
- test_asin(&t)
- test_asinh(&t)
- test_atan(&t)
- test_atanh(&t)
- test_atan2(&t)
- test_cos(&t)
- test_cosh(&t)
- test_sin(&t)
- test_sinh(&t)
- test_sqrt(&t)
- test_tan(&t)
- test_tanh(&t)
- test_large_cos(&t)
- test_large_sin(&t)
- test_large_tan(&t)
-
- tc.report(&t)
-}
@test
test_classify_f16 :: proc(t: ^testing.T) {
@@ -68,7 +27,7 @@ test_classify_f16 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r = math.classify_f16(d.v)
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(%h) -> %v != %v", i, #procedure, d.v, r, d.e))
+ testing.expectf(t, r == d.e, "%h -> %v != %v", d.v, r, d.e)
}
/* Check all subnormals (exponent 0, 10-bit significand non-zero) */
@@ -76,7 +35,7 @@ test_classify_f16 :: proc(t: ^testing.T) {
v := transmute(f16)i
r = math.classify_f16(v)
e :: math.Float_Class.Subnormal
- tc.expect(t, r == e, fmt.tprintf("i:%d %s(%h) -> %v != %v", i, #procedure, v, r, e))
+ testing.expectf(t, r == e, "%h -> %v != %v", v, r, e)
}
}
@@ -103,7 +62,7 @@ test_classify_f32 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r = math.classify_f32(d.v)
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(%h) -> %v != %v", i, #procedure, d.v, r, d.e))
+ testing.expectf(t, r == d.e, "%h -> %v != %v", d.v, r, d.e)
}
}
@@ -130,7 +89,7 @@ test_classify_f64 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r = math.classify_f64(d.v)
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(%h) -> %v != %v", i, #procedure, d.v, r, d.e))
+ testing.expectf(t, r == d.e, "%h -> %v != %v", d.v, r, d.e)
}
}
@@ -175,16 +134,16 @@ test_trunc_f16 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r = math.trunc_f16(d.v)
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(%h) -> %h != %h", i, #procedure, d.v, r, d.e))
+ testing.expectf(t, r == d.e, "%h -> %h != %h", d.v, r, d.e)
}
v = math.SNAN_F16
r = math.trunc_f16(v)
- tc.expect(t, math.is_nan_f16(r), fmt.tprintf("%s(%f) -> %f != NaN", #procedure, v, r))
+ testing.expectf(t, math.is_nan_f16(r), "%f != NaN", v, r)
v = math.QNAN_F16
r = math.trunc_f16(v)
- tc.expect(t, math.is_nan_f16(r), fmt.tprintf("%s(%f) -> %f != NaN", #procedure, v, r))
+ testing.expectf(t, math.is_nan_f16(r), "%f != NaN", v, r)
}
@test
@@ -237,16 +196,16 @@ test_trunc_f32 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r = math.trunc_f32(d.v)
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(%h) -> %h != %h", i, #procedure, d.v, r, d.e))
+ testing.expectf(t, r == d.e, "%h -> %h != %h", d.v, r, d.e)
}
v = math.SNAN_F32
r = math.trunc_f32(v)
- tc.expect(t, math.is_nan_f32(r), fmt.tprintf("%s(%f) -> %f != NaN", #procedure, v, r))
+ testing.expectf(t, math.is_nan_f32(r), "%f -> %f != NaN", v, r)
v = math.QNAN_F32
r = math.trunc_f32(v)
- tc.expect(t, math.is_nan_f32(r), fmt.tprintf("%s(%f) -> %f != NaN", #procedure, v, r))
+ testing.expectf(t, math.is_nan_f32(r), "%f -> %f != NaN", v, r)
}
@test
@@ -299,16 +258,16 @@ test_trunc_f64 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r = math.trunc_f64(d.v)
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(%h) -> %h != %h", i, #procedure, d.v, r, d.e))
+ testing.expectf(t, r == d.e, "%h -> %h != %h", d.v, r, d.e)
}
v = math.SNAN_F64
r = math.trunc_f64(v)
- tc.expect(t, math.is_nan_f64(r), fmt.tprintf("%s(%f) -> %f != NaN", #procedure, v, r))
+ testing.expectf(t, math.is_nan_f64(r), "%f -> %f != NaN", v, r)
v = math.QNAN_F64
r = math.trunc_f64(v)
- tc.expect(t, math.is_nan_f64(r), fmt.tprintf("%s(%f) -> %f != NaN", #procedure, v, r))
+ testing.expectf(t, math.is_nan_f64(r), "%f -> %f != NaN", v, r)
}
@test
@@ -352,16 +311,16 @@ test_round_f16 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r = math.round_f16(d.v)
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(%h) -> %h != %h", i, #procedure, d.v, r, d.e))
+ testing.expectf(t, r == d.e, "%h -> %h != %h", d.v, r, d.e)
}
v = math.SNAN_F16
r = math.round_f16(v)
- tc.expect(t, math.is_nan_f16(r), fmt.tprintf("%s(%f) -> %f != NaN", #procedure, v, r))
+ testing.expectf(t, math.is_nan_f16(r), "%f -> %f != NaN", v, r)
v = math.QNAN_F16
r = math.round_f16(v)
- tc.expect(t, math.is_nan_f16(r), fmt.tprintf("%s(%f) -> %f != NaN", #procedure, v, r))
+ testing.expectf(t, math.is_nan_f16(r), "%f -> %f != NaN", v, r)
}
@test
@@ -414,16 +373,16 @@ test_round_f32 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r = math.round_f32(d.v)
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(%h) -> %h != %h", i, #procedure, d.v, r, d.e))
+ testing.expectf(t, r == d.e, "%h -> %h != %h", i, d.v, r, d.e)
}
v = math.SNAN_F32
r = math.round_f32(v)
- tc.expect(t, math.is_nan_f32(r), fmt.tprintf("%s(%f) -> %f != NaN", #procedure, v, r))
+ testing.expectf(t, math.is_nan_f32(r), "%f -> %f != NaN", v, r)
v = math.QNAN_F32
r = math.round_f32(v)
- tc.expect(t, math.is_nan_f32(r), fmt.tprintf("%s(%f) -> %f != NaN", #procedure, v, r))
+ testing.expectf(t, math.is_nan_f32(r), "%f -> %f != NaN", v, r)
}
@test
@@ -476,16 +435,16 @@ test_round_f64 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r = math.round_f64(d.v)
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(%h) -> %h != %h", i, #procedure, d.v, r, d.e))
+ testing.expectf(t, r == d.e, "%h -> %h != %h", d.v, r, d.e)
}
v = math.SNAN_F64
r = math.round_f64(v)
- tc.expect(t, math.is_nan_f64(r), fmt.tprintf("%s(%f) -> %f != NaN", #procedure, v, r))
+ testing.expectf(t, math.is_nan_f64(r), "%f -> %f != NaN", v, r)
v = math.QNAN_F64
r = math.round_f64(v)
- tc.expect(t, math.is_nan_f64(r), fmt.tprintf("%s(%f) -> %f != NaN", #procedure, v, r))
+ testing.expectf(t, math.is_nan_f64(r), "%f -> %f != NaN", v, r)
}
@@ -1033,17 +992,17 @@ tolerance :: proc(a, b, e: f64) -> bool {
}
close :: proc(t: ^testing.T, a, b: f64, loc := #caller_location) -> bool {
ok := tolerance(a, b, 1e-9)
- // tc.expect(t, ok, fmt.tprintf("%.15g is not close to %.15g", a, b), loc)
+ testing.expectf(t, ok, "%.15g is not close to %.15g", a, b, loc=loc)
return ok
}
veryclose :: proc(t: ^testing.T, a, b: f64, loc := #caller_location) -> bool {
ok := tolerance(a, b, 4e-14)
- // tc.expect(t, ok, fmt.tprintf("%.15g is not veryclose to %.15g", a, b), loc)
+ testing.expectf(t, ok, "%.15g is not veryclose to %.15g", a, b, loc=loc)
return ok
}
soclose :: proc(t: ^testing.T, a, b, e: f64, loc := #caller_location) -> bool {
ok := tolerance(a, b, e)
- // tc.expect(t, ok, fmt.tprintf("%.15g is not soclose to %.15g", a, b), loc)
+ testing.expectf(t, ok, "%.15g is not soclose to %.15g", a, b, loc=loc)
return ok
}
alike :: proc(t: ^testing.T, a, b: f64, loc := #caller_location) -> bool {
@@ -1054,34 +1013,34 @@ alike :: proc(t: ^testing.T, a, b: f64, loc := #caller_location) -> bool {
case a == b:
ok = math.signbit(a) == math.signbit(b)
}
- // tc.expect(t, ok, fmt.tprintf("%.15g is not alike to %.15g", a, b), loc)
+ testing.expectf(t, ok, "%.15g is not alike to %.15g", a, b, loc=loc)
return ok
}
@test
-test_nan :: proc(t: ^testing.T) {
+test_nan32 :: proc(t: ^testing.T) {
+ float32 := f32(NaN)
+ equal := float32 == float32
+ testing.expectf(t, !equal, "float32(NaN) is %.15g, expected NaN", float32)
+}
+
+@test
+test_nan64 :: proc(t: ^testing.T) {
float64 := NaN
- if float64 == float64 {
- tc.errorf(t, "NaN returns %.15g, expected NaN", float64)
- }
- float32 := f32(float64)
- if float32 == float32 {
- tc.errorf(t, "float32(NaN) is %.15g, expected NaN", float32)
- }
+ equal := float64 == float64
+ testing.expectf(t, !equal, "NaN returns %.15g, expected NaN", float64)
}
@test
test_acos :: proc(t: ^testing.T) {
for _, i in vf {
a := vf[i] / 10
- if f := math.acos(a); !close(t, acos[i], f) {
- tc.errorf(t, "math.acos(%.15g) = %.15g, want %.15g", a, f, acos[i])
- }
+ f := math.acos(a)
+ testing.expectf(t, close(t, acos[i], f), "math.acos(%.15g) = %.15g, want %.15g", a, f, acos[i])
}
for _, i in vfacos_sc {
- if f := math.acos(vfacos_sc[i]); !alike(t, acos_sc[i], f) {
- tc.errorf(t, "math.acos(%.15g) = %.15g, want %.15g", vfacos_sc[i], f, acos_sc[i])
- }
+ f := math.acos(vfacos_sc[i])
+ testing.expectf(t, alike(t, acos_sc[i], f), "math.acos(%.15g) = %.15g, want %.15g", vfacos_sc[i], f, acos_sc[i])
}
}
@@ -1089,14 +1048,12 @@ test_acos :: proc(t: ^testing.T) {
test_acosh :: proc(t: ^testing.T) {
for _, i in vf {
a := 1 + abs(vf[i])
- if f := math.acosh(a); !veryclose(t, acosh[i], f) {
- tc.errorf(t, "math.acosh(%.15g) = %.15g, want %.15g", a, f, acosh[i])
- }
+ f := math.acosh(a)
+ testing.expectf(t, veryclose(t, acosh[i], f), "math.acosh(%.15g) = %.15g, want %.15g", a, f, acosh[i])
}
for _, i in vfacosh_sc {
- if f := math.acosh(vfacosh_sc[i]); !alike(t, acosh_sc[i], f) {
- tc.errorf(t, "math.acosh(%.15g) = %.15g, want %.15g", vfacosh_sc[i], f, acosh_sc[i])
- }
+ f := math.acosh(vfacosh_sc[i])
+ testing.expectf(t, alike(t, acosh_sc[i], f), "math.acosh(%.15g) = %.15g, want %.15g", vfacosh_sc[i], f, acosh_sc[i])
}
}
@@ -1104,42 +1061,36 @@ test_acosh :: proc(t: ^testing.T) {
test_asin :: proc(t: ^testing.T) {
for _, i in vf {
a := vf[i] / 10
- if f := math.asin(a); !veryclose(t, asin[i], f) {
- tc.errorf(t, "math.asin(%.15g) = %.15g, want %.15g", a, f, asin[i])
- }
+ f := math.asin(a)
+ testing.expectf(t, veryclose(t, asin[i], f), "math.asin(%.15g) = %.15g, want %.15g", a, f, asin[i])
}
for _, i in vfasin_sc {
- if f := math.asin(vfasin_sc[i]); !alike(t, asin_sc[i], f) {
- tc.errorf(t, "math.asin(%.15g) = %.15g, want %.15g", vfasin_sc[i], f, asin_sc[i])
- }
+ f := math.asin(vfasin_sc[i])
+ testing.expectf(t, alike(t, asin_sc[i], f), "math.asin(%.15g) = %.15g, want %.15g", vfasin_sc[i], f, asin_sc[i])
}
}
@test
test_asinh :: proc(t: ^testing.T) {
for _, i in vf {
- if f := math.asinh(vf[i]); !veryclose(t, asinh[i], f) {
- tc.errorf(t, "math.asinh(%.15g) = %.15g, want %.15g", vf[i], f, asinh[i])
- }
+ f := math.asinh(vf[i])
+ testing.expectf(t, veryclose(t, asinh[i], f), "math.asinh(%.15g) = %.15g, want %.15g", vf[i], f, asinh[i])
}
for _, i in vfasinh_sc {
- if f := math.asinh(vfasinh_sc[i]); !alike(t, asinh_sc[i], f) {
- tc.errorf(t, "math.asinh(%.15g) = %.15g, want %.15g", vfasinh_sc[i], f, asinh_sc[i])
- }
+ f := math.asinh(vfasinh_sc[i])
+ testing.expectf(t, alike(t, asinh_sc[i], f), "math.asinh(%.15g) = %.15g, want %.15g", vfasinh_sc[i], f, asinh_sc[i])
}
}
@test
test_atan :: proc(t: ^testing.T) {
for _, i in vf {
- if f := math.atan(vf[i]); !veryclose(t, atan[i], f) {
- tc.errorf(t, "math.atan(%.15g) = %.15g, want %.15g", vf[i], f, atan[i])
- }
+ f := math.atan(vf[i])
+ testing.expectf(t, veryclose(t, atan[i], f), "math.atan(%.15g) = %.15g, want %.15g", vf[i], f, atan[i])
}
for _, i in vfatan_sc {
- if f := math.atan(vfatan_sc[i]); !alike(t, atan_sc[i], f) {
- tc.errorf(t, "math.atan(%.15g) = %.15g, want %.15g", vfatan_sc[i], f, atan_sc[i])
- }
+ f := math.atan(vfatan_sc[i])
+ testing.expectf(t, alike(t, atan_sc[i], f), "math.atan(%.15g) = %.15g, want %.15g", vfatan_sc[i], f, atan_sc[i])
}
}
@@ -1147,84 +1098,72 @@ test_atan :: proc(t: ^testing.T) {
test_atanh :: proc(t: ^testing.T) {
for _, i in vf {
a := vf[i] / 10
- if f := math.atanh(a); !veryclose(t, atanh[i], f) {
- tc.errorf(t, "math.atanh(%.15g) = %.15g, want %.15g", a, f, atanh[i])
- }
+ f := math.atanh(a)
+ testing.expectf(t, veryclose(t, atanh[i], f), "math.atanh(%.15g) = %.15g, want %.15g", a, f, atanh[i])
}
for _, i in vfatanh_sc {
- if f := math.atanh(vfatanh_sc[i]); !alike(t, atanh_sc[i], f) {
- tc.errorf(t, "math.atanh(%.15g) = %.15g, want %.15g", vfatanh_sc[i], f, atanh_sc[i])
- }
+ f := math.atanh(vfatanh_sc[i])
+ testing.expectf(t, alike(t, atanh_sc[i], f), "math.atanh(%.15g) = %.15g, want %.15g", vfatanh_sc[i], f, atanh_sc[i])
}
}
@test
test_atan2 :: proc(t: ^testing.T) {
for _, i in vf {
- if f := math.atan2(10, vf[i]); !veryclose(t, atan2[i], f) {
- tc.errorf(t, "math.atan2(10, %.15g) = %.15g, want %.15g", vf[i], f, atan2[i])
- }
+ f := math.atan2(10, vf[i])
+ testing.expectf(t, veryclose(t, atan2[i], f), "math.atan2(10, %.15g) = %.15g, want %.15g", vf[i], f, atan2[i])
}
for _, i in vfatan2_sc {
- if f := math.atan2(vfatan2_sc[i][0], vfatan2_sc[i][1]); !alike(t, atan2_sc[i], f) {
- tc.errorf(t, "math.atan2(%.15g, %.15g) = %.15g, want %.15g", vfatan2_sc[i][0], vfatan2_sc[i][1], f, atan2_sc[i])
- }
+ f := math.atan2(vfatan2_sc[i][0], vfatan2_sc[i][1])
+ testing.expectf(t, alike(t, atan2_sc[i], f), "math.atan2(%.15g, %.15g) = %.15g, want %.15g", vfatan2_sc[i][0], vfatan2_sc[i][1], f, atan2_sc[i])
}
}
@test
test_cos :: proc(t: ^testing.T) {
for _, i in vf {
- if f := math.cos(vf[i]); !veryclose(t, cos[i], f) {
- tc.errorf(t, "math.cos(%.15g) = %.15g, want %.15g", vf[i], f, cos[i])
- }
+ f := math.cos(vf[i])
+ testing.expectf(t, veryclose(t, cos[i], f), "math.cos(%.15g) = %.15g, want %.15g", vf[i], f, cos[i])
}
for _, i in vfcos_sc {
- if f := math.cos(vfcos_sc[i]); !alike(t, cos_sc[i], f) {
- tc.errorf(t, "math.cos(%.15g) = %.15g, want %.15g", vfcos_sc[i], f, cos_sc[i])
- }
+ f := math.cos(vfcos_sc[i])
+ testing.expectf(t, alike(t, cos_sc[i], f), "math.cos(%.15g) = %.15g, want %.15g", vfcos_sc[i], f, cos_sc[i])
}
}
@test
test_cosh :: proc(t: ^testing.T) {
for _, i in vf {
- if f := math.cosh(vf[i]); !close(t, cosh[i], f) {
- tc.errorf(t, "math.cosh(%.15g) = %.15g, want %.15g", vf[i], f, cosh[i])
- }
+ f := math.cosh(vf[i])
+ testing.expectf(t, close(t, cosh[i], f), "math.cosh(%.15g) = %.15g, want %.15g", vf[i], f, cosh[i])
}
for _, i in vfcosh_sc {
- if f := math.cosh(vfcosh_sc[i]); !alike(t, cosh_sc[i], f) {
- tc.errorf(t, "math.cosh(%.15g) = %.15g, want %.15g", vfcosh_sc[i], f, cosh_sc[i])
- }
+ f := math.cosh(vfcosh_sc[i])
+ testing.expectf(t, alike(t, cosh_sc[i], f), "math.cosh(%.15g) = %.15g, want %.15g", vfcosh_sc[i], f, cosh_sc[i])
}
}
@test
test_sin :: proc(t: ^testing.T) {
for _, i in vf {
- if f := math.sin(vf[i]); !veryclose(t, sin[i], f) {
- tc.errorf(t, "math.sin(%.15g) = %.15g, want %.15g", vf[i], f, sin[i])
- }
+ f := math.sin(vf[i])
+ testing.expectf(t, veryclose(t, sin[i], f), "math.sin(%.15g) = %.15g, want %.15g", vf[i], f, sin[i])
}
for _, i in vfsin_sc {
- if f := math.sin(vfsin_sc[i]); !alike(t, sin_sc[i], f) {
- tc.errorf(t, "math.sin(%.15g) = %.15g, want %.15g", vfsin_sc[i], f, sin_sc[i])
- }
+ f := math.sin(vfsin_sc[i])
+ testing.expectf(t, alike(t, sin_sc[i], f), "math.sin(%.15g) = %.15g, want %.15g", vfsin_sc[i], f, sin_sc[i])
}
}
@test
test_sinh :: proc(t: ^testing.T) {
for _, i in vf {
- if f := math.sinh(vf[i]); !close(t, sinh[i], f) {
- tc.errorf(t, "math.sinh(%.15g) = %.15g, want %.15g", vf[i], f, sinh[i])
- }
+ f := math.sinh(vf[i])
+ testing.expectf(t, close(t, sinh[i], f), "math.sinh(%.15g) = %.15g, want %.15g", vf[i], f, sinh[i])
}
for _, i in vfsinh_sc {
- if f := math.sinh(vfsinh_sc[i]); !alike(t, sinh_sc[i], f) {
- tc.errorf(t, "math.sinh(%.15g) = %.15g, want %.15g", vfsinh_sc[i], f, sinh_sc[i])
- }
+ f := math.sinh(vfsinh_sc[i])
+ testing.expectf(t, alike(t, sinh_sc[i], f), "math.sinh(%.15g) = %.15g, want %.15g", vfsinh_sc[i], f, sinh_sc[i])
}
}
@@ -1232,38 +1171,33 @@ test_sinh :: proc(t: ^testing.T) {
test_sqrt :: proc(t: ^testing.T) {
for _, i in vf {
a := abs(vf[i])
- if f := math.sqrt(a); !veryclose(t, sqrt[i], f) {
- tc.errorf(t, "math.sqrt(%.15g) = %.15g, want %.15g", a, f, sqrt[i])
- }
+ f := math.sqrt(a)
+ testing.expectf(t, veryclose(t, sqrt[i], f), "math.sqrt(%.15g) = %.15g, want %.15g", a, f, sqrt[i])
}
}
@test
test_tan :: proc(t: ^testing.T) {
for _, i in vf {
- if f := math.tan(vf[i]); !veryclose(t, tan[i], f) {
- tc.errorf(t, "math.tan(%.15g) = %.15g, want %.15g", vf[i], f, tan[i])
- }
+ f := math.tan(vf[i])
+ testing.expectf(t, veryclose(t, tan[i], f), "math.tan(%.15g) = %.15g, want %.15g", vf[i], f, tan[i])
}
// same special cases as Sin
for _, i in vfsin_sc {
- if f := math.tan(vfsin_sc[i]); !alike(t, sin_sc[i], f) {
- tc.errorf(t, "math.tan(%.15g) = %.15g, want %.15g", vfsin_sc[i], f, sin_sc[i])
- }
+ f := math.tan(vfsin_sc[i])
+ testing.expectf(t, alike(t, sin_sc[i], f), "math.tan(%.15g) = %.15g, want %.15g", vfsin_sc[i], f, sin_sc[i])
}
}
@test
test_tanh :: proc(t: ^testing.T) {
for _, i in vf {
- if f := math.tanh(vf[i]); !veryclose(t, tanh[i], f) {
- tc.errorf(t, "math.tanh(%.15g) = %.15g, want %.15g", vf[i], f, tanh[i])
- }
+ f := math.tanh(vf[i])
+ testing.expectf(t, veryclose(t, tanh[i], f), "math.tanh(%.15g) = %.15g, want %.15g", vf[i], f, tanh[i])
}
for _, i in vftanh_sc {
- if f := math.tanh(vftanh_sc[i]); !alike(t, tanh_sc[i], f) {
- tc.errorf(t, "math.tanh(%.15g) = %.15g, want %.15g", vftanh_sc[i], f, tanh_sc[i])
- }
+ f := math.tanh(vftanh_sc[i])
+ testing.expectf(t, alike(t, tanh_sc[i], f), "math.tanh(%.15g) = %.15g, want %.15g", vftanh_sc[i], f, tanh_sc[i])
}
}
@@ -1273,9 +1207,7 @@ test_large_cos :: proc(t: ^testing.T) {
for _, i in vf {
f1 := cosLarge[i]
f2 := math.cos(vf[i] + large)
- if !close(t, f1, f2) {
- tc.errorf(t, "math.cos(%.15g) = %.15g, want %.15g", vf[i]+large, f2, f1)
- }
+ testing.expectf(t, close(t, f1, f2), "math.cos(%.15g) = %.15g, want %.15g", vf[i]+large, f2, f1)
}
}
@@ -1285,9 +1217,7 @@ test_large_sin :: proc(t: ^testing.T) {
for _, i in vf {
f1 := sinLarge[i]
f2 := math.sin(vf[i] + large)
- if !close(t, f1, f2) {
- tc.errorf(t, "math.sin(%.15g) = %.15g, want %.15g", vf[i]+large, f2, f1)
- }
+ testing.expectf(t, close(t, f1, f2), "math.sin(%.15g) = %.15g, want %.15g", vf[i]+large, f2, f1)
}
}
@@ -1297,8 +1227,6 @@ test_large_tan :: proc(t: ^testing.T) {
for _, i in vf {
f1 := tanLarge[i]
f2 := math.tan(vf[i] + large)
- if !close(t, f1, f2) {
- tc.errorf(t, "math.tan(%.15g) = %.15g, want %.15g", vf[i]+large, f2, f1)
- }
+ testing.expectf(t, close(t, f1, f2), "math.tan(%.15g) = %.15g, want %.15g", vf[i]+large, f2, f1)
}
} \ No newline at end of file
diff --git a/tests/core/mem/test_core_mem.odin b/tests/core/mem/test_core_mem.odin
new file mode 100644
index 000000000..d282ae1fd
--- /dev/null
+++ b/tests/core/mem/test_core_mem.odin
@@ -0,0 +1,41 @@
+package test_core_mem
+
+import "core:mem/tlsf"
+import "core:testing"
+
+@test
+test_tlsf_bitscan :: proc(t: ^testing.T) {
+ Vector :: struct {
+ op: enum{ffs, fls, fls_uint},
+ v: union{u32, uint},
+ exp: i32,
+ }
+ Tests := []Vector{
+ {.ffs, u32 (0x0000_0000_0000_0000), -1},
+ {.ffs, u32 (0x0000_0000_0000_0000), -1},
+ {.fls, u32 (0x0000_0000_0000_0000), -1},
+ {.ffs, u32 (0x0000_0000_0000_0001), 0},
+ {.fls, u32 (0x0000_0000_0000_0001), 0},
+ {.ffs, u32 (0x0000_0000_8000_0000), 31},
+ {.ffs, u32 (0x0000_0000_8000_8000), 15},
+ {.fls, u32 (0x0000_0000_8000_0008), 31},
+ {.fls, u32 (0x0000_0000_7FFF_FFFF), 30},
+ {.fls_uint, uint(0x0000_0000_8000_0000), 31},
+ {.fls_uint, uint(0x0000_0001_0000_0000), 32},
+ {.fls_uint, uint(0xffff_ffff_ffff_ffff), 63},
+ }
+
+ for test in Tests {
+ switch test.op {
+ case .ffs:
+ res := tlsf.ffs(test.v.?)
+ testing.expectf(t, res == test.exp, "Expected tlsf.ffs(0x%08x) == %v, got %v", test.v, test.exp, res)
+ case .fls:
+ res := tlsf.fls(test.v.?)
+ testing.expectf(t, res == test.exp, "Expected tlsf.fls(0x%08x) == %v, got %v", test.v, test.exp, res)
+ case .fls_uint:
+ res := tlsf.fls_uint(test.v.?)
+ testing.expectf(t, res == test.exp, "Expected tlsf.fls_uint(0x%16x) == %v, got %v", test.v, test.exp, res)
+ }
+ }
+} \ No newline at end of file
diff --git a/tests/core/net/test_core_net.odin b/tests/core/net/test_core_net.odin
index 9df03414c..f806463e9 100644
--- a/tests/core/net/test_core_net.odin
+++ b/tests/core/net/test_core_net.odin
@@ -8,74 +8,16 @@
A test suite for `core:net`
*/
+//+build !netbsd !freebsd !openbsd
package test_core_net
import "core:testing"
-import "core:mem"
-import "core:fmt"
import "core:net"
import "core:strconv"
import "core:sync"
import "core:time"
import "core:thread"
-import "core:os"
-
-_, _ :: time, thread
-
-TEST_count := 0
-TEST_fail := 0
-
-t := &testing.T{}
-
-when ODIN_TEST {
- expect :: testing.expect
- log :: testing.log
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v] %v\n", loc, message)
- return
- }
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-}
-
-_tracking_allocator := mem.Tracking_Allocator{}
-
-print_tracking_allocator_report :: proc() {
- for _, leak in _tracking_allocator.allocation_map {
- fmt.printf("%v leaked %v bytes\n", leak.location, leak.size)
- }
-
- for bf in _tracking_allocator.bad_free_array {
- fmt.printf("%v allocation %p was freed badly\n", bf.location, bf.memory)
- }
-}
-
-main :: proc() {
- mem.tracking_allocator_init(&_tracking_allocator, context.allocator)
- context.allocator = mem.tracking_allocator(&_tracking_allocator)
-
- address_parsing_test(t)
-
- tcp_tests(t)
-
- split_url_test(t)
- join_url_test(t)
-
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
-
- print_tracking_allocator_report()
-
- if TEST_fail > 0 {
- os.exit(1)
- }
-}
+import "core:fmt"
@test
address_parsing_test :: proc(t: ^testing.T) {
@@ -89,127 +31,66 @@ address_parsing_test :: proc(t: ^testing.T) {
}
valid := len(vector.binstr) > 0
-
- fmt.printf("%v %v\n", kind, vector.input)
-
- msg := "-set a proper message-"
switch vector.family {
case .IP4, .IP4_Alt:
- /*
- Does `net.parse_ip4_address` think we parsed the address properly?
- */
+ // Does `net.parse_ip4_address` think we parsed the address properly?
non_decimal := vector.family == .IP4_Alt
+ any_addr := net.parse_address(vector.input, non_decimal)
+ parsed_ok := any_addr != nil
+ parsed: net.IP4_Address
- any_addr := net.parse_address(vector.input, non_decimal)
- parsed_ok := any_addr != nil
- parsed: net.IP4_Address
-
- /*
- Ensure that `parse_address` doesn't parse IPv4 addresses into IPv6 addreses by mistake.
- */
+ // Ensure that `parse_address` doesn't parse IPv4 addresses into IPv6 addreses by mistake.
switch addr in any_addr {
case net.IP4_Address:
parsed = addr
case net.IP6_Address:
parsed_ok = false
- msg = fmt.tprintf("parse_address mistook %v as IPv6 address %04x", vector.input, addr)
- expect(t, false, msg)
+ testing.expectf(t, false, "parse_address mistook %v as IPv6 address %04x", vector.input, addr)
}
if !parsed_ok && valid {
- msg = fmt.tprintf("parse_ip4_address failed to parse %v, expected %v", vector.input, binstr_to_address(vector.binstr))
+ testing.expectf(t, parsed_ok == valid, "parse_ip4_address failed to parse %v, expected %v", vector.input, binstr_to_address(t, vector.binstr))
} else if parsed_ok && !valid {
- msg = fmt.tprintf("parse_ip4_address parsed %v into %v, expected failure", vector.input, parsed)
+ testing.expectf(t, parsed_ok == valid, "parse_ip4_address parsed %v into %v, expected failure", vector.input, parsed)
}
- expect(t, parsed_ok == valid, msg)
if valid && parsed_ok {
actual_binary := address_to_binstr(parsed)
- msg = fmt.tprintf("parse_ip4_address parsed %v into %v, expected %v", vector.input, actual_binary, vector.binstr)
- expect(t, actual_binary == vector.binstr, msg)
+ testing.expectf(t, actual_binary == vector.binstr, "parse_ip4_address parsed %v into %v, expected %v", vector.input, actual_binary, vector.binstr)
- /*
- Do we turn an address back into the same string properly?
- No point in testing the roundtrip if the first part failed.
- */
+ // Do we turn an address back into the same string properly? No point in testing the roundtrip if the first part failed.
if len(vector.output) > 0 && actual_binary == vector.binstr {
stringified := net.address_to_string(parsed)
- msg = fmt.tprintf("address_to_string turned %v into %v, expected %v", parsed, stringified, vector.output)
- expect(t, stringified == vector.output, msg)
+ testing.expectf(t, stringified == vector.output, "address_to_string turned %v into %v, expected %v", parsed, stringified, vector.output)
}
}
case .IP6:
- /*
- Do we parse the address properly?
- */
+ // Do we parse the address properly?
parsed, parsed_ok := net.parse_ip6_address(vector.input)
if !parsed_ok && valid {
- msg = fmt.tprintf("parse_ip6_address failed to parse %v, expected %04x", vector.input, binstr_to_address(vector.binstr))
+ testing.expectf(t, parsed_ok == valid, "parse_ip6_address failed to parse %v, expected %04x", vector.input, binstr_to_address(t, vector.binstr))
} else if parsed_ok && !valid {
- msg = fmt.tprintf("parse_ip6_address parsed %v into %04x, expected failure", vector.input, parsed)
+ testing.expectf(t, parsed_ok == valid, "parse_ip6_address parsed %v into %04x, expected failure", vector.input, parsed)
}
- expect(t, parsed_ok == valid, msg)
if valid && parsed_ok {
actual_binary := address_to_binstr(parsed)
- msg = fmt.tprintf("parse_ip6_address parsed %v into %v, expected %v", vector.input, actual_binary, vector.binstr)
- expect(t, actual_binary == vector.binstr, msg)
+ testing.expectf(t, actual_binary == vector.binstr, "parse_ip6_address parsed %v into %v, expected %v", vector.input, actual_binary, vector.binstr)
- /*
- Do we turn an address back into the same string properly?
- No point in testing the roundtrip if the first part failed.
- */
+ // Do we turn an address back into the same string properly? No point in testing the roundtrip if the first part failed.
if len(vector.output) > 0 && actual_binary == vector.binstr {
stringified := net.address_to_string(parsed)
- msg = fmt.tprintf("address_to_string turned %v into %v, expected %v", parsed, stringified, vector.output)
- expect(t, stringified == vector.output, msg)
+ testing.expectf(t, stringified == vector.output, "address_to_string turned %v into %v, expected %v", parsed, stringified, vector.output)
}
}
}
}
}
-address_to_binstr :: proc(address: net.Address) -> (binstr: string) {
- switch t in address {
- case net.IP4_Address:
- b := transmute(u32be)t
- return fmt.tprintf("%08x", b)
- case net.IP6_Address:
- b := transmute(u128be)t
- return fmt.tprintf("%32x", b)
- case:
- return ""
- }
- unreachable()
-}
-
-binstr_to_address :: proc(binstr: string) -> (address: net.Address) {
- switch len(binstr) {
- case 8: // IPv4
- a, ok := strconv.parse_u64_of_base(binstr, 16)
- expect(t, ok, "failed to parse test case bin string")
-
- ipv4 := u32be(a)
- return net.IP4_Address(transmute([4]u8)ipv4)
-
-
- case 32: // IPv6
- a, ok := strconv.parse_u128_of_base(binstr, 16)
- expect(t, ok, "failed to parse test case bin string")
-
- ipv4 := u128be(a)
- return net.IP6_Address(transmute([8]u16be)ipv4)
-
- case 0:
- return nil
- }
- panic("Invalid test case")
-}
-
Kind :: enum {
IP4, // Decimal IPv4
IP4_Alt, // Non-decimal address
@@ -223,10 +104,7 @@ IP_Address_Parsing_Test_Vector :: struct {
// Input address to try and parse.
input: string,
- /*
- Hexadecimal representation of the expected numeric value of the address.
- Zero length means input is invalid and the parser should report failure.
- */
+ // Hexadecimal representation of the expected numeric value of the address. Zero length means input is invalid and the parser should report failure.
binstr: string,
// Expected `address_to_string` output, if a valid input and this string is non-empty.
@@ -335,38 +213,30 @@ IP_Address_Parsing_Test_Vectors :: []IP_Address_Parsing_Test_Vector{
{ .IP6, "c0a8", "", ""},
}
-tcp_tests :: proc(t: ^testing.T) {
- fmt.println("Testing two servers trying to bind to the same endpoint...")
- two_servers_binding_same_endpoint(t)
- fmt.println("Testing client connecting to a closed port...")
- client_connects_to_closed_port(t)
- fmt.println("Testing client sending server data...")
- client_sends_server_data(t)
-}
-
-ENDPOINT := net.Endpoint{
- net.IP4_Address{127, 0, 0, 1},
- 9999,
-}
+ENDPOINT_TWO_SERVERS := net.Endpoint{net.IP4_Address{127, 0, 0, 1}, 9991}
+ENDPOINT_CLOSED_PORT := net.Endpoint{net.IP4_Address{127, 0, 0, 1}, 9992}
+ENDPOINT_SERVER_SENDS := net.Endpoint{net.IP4_Address{127, 0, 0, 1}, 9993}
@(test)
two_servers_binding_same_endpoint :: proc(t: ^testing.T) {
- skt1, err1 := net.listen_tcp(ENDPOINT)
+ skt1, err1 := net.listen_tcp(ENDPOINT_TWO_SERVERS)
defer net.close(skt1)
- skt2, err2 := net.listen_tcp(ENDPOINT)
+ skt2, err2 := net.listen_tcp(ENDPOINT_TWO_SERVERS)
defer net.close(skt2)
- expect(t, err1 == nil, "expected first server binding to endpoint to do so without error")
- expect(t, err2 == net.Bind_Error.Address_In_Use, "expected second server to bind to an endpoint to return .Address_In_Use")
+ testing.expect(t, err1 == nil, "expected first server binding to endpoint to do so without error")
+ testing.expect(t, err2 == net.Bind_Error.Address_In_Use, "expected second server to bind to an endpoint to return .Address_In_Use")
}
@(test)
client_connects_to_closed_port :: proc(t: ^testing.T) {
- skt, err := net.dial_tcp(ENDPOINT)
+
+ skt, err := net.dial_tcp(ENDPOINT_CLOSED_PORT)
defer net.close(skt)
- expect(t, err == net.Dial_Error.Refused, "expected dial of a closed endpoint to return .Refused")
+ testing.expect(t, err == net.Dial_Error.Refused, "expected dial of a closed endpoint to return .Refused")
}
+
@(test)
client_sends_server_data :: proc(t: ^testing.T) {
CONTENT: string: "Hellope!"
@@ -390,8 +260,8 @@ client_sends_server_data :: proc(t: ^testing.T) {
defer sync.wait_group_done(r.wg)
- if r.skt, r.err = net.dial_tcp(ENDPOINT); r.err != nil {
- log(r.t, r.err)
+ if r.skt, r.err = net.dial_tcp(ENDPOINT_SERVER_SENDS); r.err != nil {
+ testing.expectf(r.t, false, "[tcp_client:dial_tcp] %v", r.err)
return
}
@@ -405,19 +275,17 @@ client_sends_server_data :: proc(t: ^testing.T) {
defer sync.wait_group_done(r.wg)
- log(r.t, "tcp_server listen")
- if r.skt, r.err = net.listen_tcp(ENDPOINT); r.err != nil {
+ if r.skt, r.err = net.listen_tcp(ENDPOINT_SERVER_SENDS); r.err != nil {
sync.wait_group_done(r.wg)
- log(r.t, r.err)
+ testing.expectf(r.t, false, "[tcp_server:listen_tcp] %v", r.err)
return
}
sync.wait_group_done(r.wg)
- log(r.t, "tcp_server accept")
client: net.TCP_Socket
if client, _, r.err = net.accept_tcp(r.skt.(net.TCP_Socket)); r.err != nil {
- log(r.t, r.err)
+ testing.expectf(r.t, false, "[tcp_server:accept_tcp] %v", r.err)
return
}
defer net.close(client)
@@ -437,10 +305,7 @@ client_sends_server_data :: proc(t: ^testing.T) {
thread_data[0].wg = &wg
thread_data[0].tid = thread.create_and_start_with_data(&thread_data[0], tcp_server, context)
- log(t, "waiting for server to start listening")
sync.wait_group_wait(&wg)
- log(t, "starting up client")
-
sync.wait_group_add(&wg, 2)
thread_data[1].t = t
@@ -454,20 +319,15 @@ client_sends_server_data :: proc(t: ^testing.T) {
net.close(thread_data[1].skt)
thread.destroy(thread_data[1].tid)
}
-
- log(t, "waiting for threads to finish")
sync.wait_group_wait(&wg)
- log(t, "threads finished")
okay := thread_data[0].err == nil && thread_data[1].err == nil
- msg := fmt.tprintf("Expected client and server to return `nil`, got %v and %v", thread_data[0].err, thread_data[1].err)
- expect(t, okay, msg)
+ testing.expectf(t, okay, "Expected client and server to return `nil`, got %v and %v", thread_data[0].err, thread_data[1].err)
received := string(thread_data[0].data[:thread_data[0].length])
okay = received == CONTENT
- msg = fmt.tprintf("Expected client to send \"{}\", got \"{}\"", CONTENT, received)
- expect(t, okay, msg)
+ testing.expectf(t, okay, "Expected client to send \"{}\", got \"{}\"", CONTENT, received)
}
URL_Test :: struct {
@@ -559,22 +419,15 @@ split_url_test :: proc(t: ^testing.T) {
delete(test.queries)
}
- msg := fmt.tprintf("Expected `net.split_url` to return %s, got %s", test.scheme, scheme)
- expect(t, scheme == test.scheme, msg)
- msg = fmt.tprintf("Expected `net.split_url` to return %s, got %s", test.host, host)
- expect(t, host == test.host, msg)
- msg = fmt.tprintf("Expected `net.split_url` to return %s, got %s", test.path, path)
- expect(t, path == test.path, msg)
- msg = fmt.tprintf("Expected `net.split_url` to return %d queries, got %d queries", len(test.queries), len(queries))
- expect(t, len(queries) == len(test.queries), msg)
+ testing.expectf(t, scheme == test.scheme, "Expected `net.split_url` to return %s, got %s", test.scheme, scheme)
+ testing.expectf(t, host == test.host, "Expected `net.split_url` to return %s, got %s", test.host, host)
+ testing.expectf(t, path == test.path, "Expected `net.split_url` to return %s, got %s", test.path, path)
+ testing.expectf(t, len(queries) == len(test.queries), "Expected `net.split_url` to return %d queries, got %d queries", len(test.queries), len(queries))
for k, v in queries {
expected := test.queries[k]
- msg = fmt.tprintf("Expected `net.split_url` to return %s, got %s", expected, v)
- expect(t, v == expected, msg)
+ testing.expectf(t, v == expected, "Expected `net.split_url` to return %s, got %s", expected, v)
}
- msg = fmt.tprintf("Expected `net.split_url` to return %s, got %s", test.fragment, fragment)
- expect(t, fragment == test.fragment, msg)
-
+ testing.expectf(t, fragment == test.fragment, "Expected `net.split_url` to return %s, got %s", test.fragment, fragment)
}
}
@@ -659,7 +512,45 @@ join_url_test :: proc(t: ^testing.T) {
for test_url in test.url {
pass |= url == test_url
}
- msg := fmt.tprintf("Expected `net.join_url` to return one of %s, got %s", test.url, url)
- expect(t, pass, msg)
+ testing.expectf(t, pass, "Expected `net.join_url` to return one of %s, got %s", test.url, url)
+ }
+}
+
+@(private)
+address_to_binstr :: proc(address: net.Address) -> (binstr: string) {
+ switch t in address {
+ case net.IP4_Address:
+ b := transmute(u32be)t
+ return fmt.tprintf("%08x", b)
+ case net.IP6_Address:
+ b := transmute(u128be)t
+ return fmt.tprintf("%32x", b)
+ case:
+ return ""
+ }
+ unreachable()
+}
+
+@(private)
+binstr_to_address :: proc(t: ^testing.T, binstr: string) -> (address: net.Address) {
+ switch len(binstr) {
+ case 8: // IPv4
+ a, ok := strconv.parse_u64_of_base(binstr, 16)
+ testing.expect(t, ok, "failed to parse test case bin string")
+
+ ipv4 := u32be(a)
+ return net.IP4_Address(transmute([4]u8)ipv4)
+
+
+ case 32: // IPv6
+ a, ok := strconv.parse_u128_of_base(binstr, 16)
+ testing.expect(t, ok, "failed to parse test case bin string")
+
+ ipv4 := u128be(a)
+ return net.IP6_Address(transmute([8]u16be)ipv4)
+
+ case 0:
+ return nil
}
+ panic("Invalid test case")
}
diff --git a/tests/core/normal.odin b/tests/core/normal.odin
new file mode 100644
index 000000000..7620d7d6e
--- /dev/null
+++ b/tests/core/normal.odin
@@ -0,0 +1,39 @@
+package tests_core
+
+import rlibc "core:c/libc"
+
+@(init)
+download_assets :: proc() {
+ if rlibc.system("python3 " + ODIN_ROOT + "tests/core/download_assets.py " + ODIN_ROOT + "tests/core/assets") != 0 {
+ panic("downloading test assets failed!")
+ }
+}
+
+@(require) import "c/libc"
+@(require) import "compress"
+@(require) import "container"
+@(require) import "encoding/base64"
+@(require) import "encoding/cbor"
+@(require) import "encoding/hex"
+@(require) import "encoding/hxa"
+@(require) import "encoding/json"
+@(require) import "encoding/varint"
+@(require) import "encoding/xml"
+@(require) import "fmt"
+@(require) import "math"
+@(require) import "math/big"
+@(require) import "math/linalg/glsl"
+@(require) import "math/noise"
+@(require) import "mem"
+@(require) import "net"
+@(require) import "odin"
+@(require) import "path/filepath"
+@(require) import "reflect"
+@(require) import "runtime"
+@(require) import "slice"
+@(require) import "strconv"
+@(require) import "strings"
+@(require) import "text/i18n"
+@(require) import "text/match"
+@(require) import "thread"
+@(require) import "time"
diff --git a/tests/core/odin/test_parser.odin b/tests/core/odin/test_parser.odin
index 821b7a53c..772ae5982 100644
--- a/tests/core/odin/test_parser.odin
+++ b/tests/core/odin/test_parser.odin
@@ -1,58 +1,29 @@
package test_core_odin_parser
-import "core:fmt"
import "core:odin/ast"
import "core:odin/parser"
-import "core:os"
+import "base:runtime"
import "core:testing"
-
-TEST_count := 0
-TEST_fail := 0
-
-when ODIN_TEST {
- expect :: testing.expect
- log :: testing.log
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v] %v\n", loc, message)
- return
- }
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-}
-
-main :: proc() {
- t := testing.T{}
- test_parse_demo(&t)
- test_parse_bitfield(&t)
-
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
-}
-
-
@test
test_parse_demo :: proc(t: ^testing.T) {
- pkg, ok := parser.parse_package_from_path("examples/demo")
+ context.allocator = context.temp_allocator
+ runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
+
+ pkg, ok := parser.parse_package_from_path(ODIN_ROOT + "examples/demo")
- expect(t, ok == true, "parser.parse_package_from_path failed")
+ testing.expect(t, ok, "parser.parse_package_from_path failed")
for key, value in pkg.files {
- expect(t, value.syntax_error_count == 0, fmt.tprintf("%v should contain zero errors", key))
+ testing.expectf(t, value.syntax_error_count == 0, "%v should contain zero errors", key)
}
}
@test
test_parse_bitfield :: proc(t: ^testing.T) {
+ context.allocator = context.temp_allocator
+ runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
+
file := ast.File{
fullpath = "test.odin",
src = `
@@ -78,5 +49,5 @@ Foo :: bit_field uint {
p := parser.default_parser()
ok := parser.parse_file(&p, &file)
- expect(t, ok == true, "bad parse")
-}
+ testing.expect(t, ok, "bad parse")
+} \ No newline at end of file
diff --git a/tests/core/os/test_core_os_exit.odin b/tests/core/os/test_core_os_exit.odin
deleted file mode 100644
index 2ab274f5e..000000000
--- a/tests/core/os/test_core_os_exit.odin
+++ /dev/null
@@ -1,10 +0,0 @@
-// Tests that Odin run returns exit code of built executable on Unix
-// Needs exit status to be inverted to return 0 on success, e.g.
-// $(./odin run tests/core/os/test_core_os_exit.odin && exit 1 || exit 0)
-package test_core_os_exit
-
-import "core:os"
-
-main :: proc() {
- os.exit(1)
-}
diff --git a/tests/core/path/filepath/test_core_filepath.odin b/tests/core/path/filepath/test_core_filepath.odin
index 4c70e5f28..94b9329bb 100644
--- a/tests/core/path/filepath/test_core_filepath.odin
+++ b/tests/core/path/filepath/test_core_filepath.odin
@@ -1,26 +1,19 @@
// Tests "path.odin" in "core:path/filepath".
-// Must be run with `-collection:tests=` flag, e.g.
-// ./odin run tests/core/path/filepath/test_core_filepath.odin -collection:tests=tests
package test_core_filepath
import "core:fmt"
import "core:path/filepath"
import "core:testing"
-import tc "tests:common"
-
-main :: proc() {
- t := testing.T{}
+@(test)
+test_split_list :: proc(t: ^testing.T) {
when ODIN_OS == .Windows {
- test_split_list_windows(&t)
+ test_split_list_windows(t)
} else {
- test_split_list_unix(&t)
+ test_split_list_unix(t)
}
-
- tc.report(&t)
}
-@test
test_split_list_windows :: proc(t: ^testing.T) {
Datum :: struct {
i: int,
@@ -41,12 +34,12 @@ test_split_list_windows :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i, fmt.tprintf("wrong data index: i %d != d.i %d\n", i, d.i))
r := filepath.split_list(d.v)
- defer delete(r)
- tc.expect(t, len(r) == len(d.e), fmt.tprintf("i:%d %s(%s) len(r) %d != len(d.e) %d",
+ defer delete_split(r)
+ testing.expect(t, len(r) == len(d.e), fmt.tprintf("i:%d %s(%s) len(r) %d != len(d.e) %d",
i, #procedure, d.v, len(r), len(d.e)))
if len(r) == len(d.e) {
for _, j in r {
- tc.expect(t, r[j] == d.e[j], fmt.tprintf("i:%d %s(%v) -> %v[%d] != %v",
+ testing.expect(t, r[j] == d.e[j], fmt.tprintf("i:%d %s(%v) -> %v[%d] != %v",
i, #procedure, d.v, r[j], j, d.e[j]))
}
}
@@ -55,47 +48,43 @@ test_split_list_windows :: proc(t: ^testing.T) {
{
v := ""
r := filepath.split_list(v)
- tc.expect(t, r == nil, fmt.tprintf("%s(%s) -> %v != nil", #procedure, v, r))
+ defer delete_split(r)
+ testing.expect(t, r == nil, fmt.tprintf("%s(%s) -> %v != nil", #procedure, v, r))
}
{
v := "a"
r := filepath.split_list(v)
- defer delete(r)
- tc.expect(t, len(r) == 1, fmt.tprintf("%s(%s) len(r) %d != 1", #procedure, v, len(r)))
+ defer delete_split(r)
+ testing.expect(t, len(r) == 1, fmt.tprintf("%s(%s) len(r) %d != 1", #procedure, v, len(r)))
if len(r) == 1 {
- tc.expect(t, r[0] == "a", fmt.tprintf("%s(%v) -> %v[0] != a", #procedure, v, r[0]))
+ testing.expect(t, r[0] == "a", fmt.tprintf("%s(%v) -> %v[0] != a", #procedure, v, r[0]))
}
}
}
-@test
test_split_list_unix :: proc(t: ^testing.T) {
Datum :: struct {
- i: int,
v: string,
e: [3]string,
}
@static data := []Datum{
- { 0, "/opt/butler:/home/fancykillerpanda/Projects/Odin/Odin:/usr/local/sbin",
+ { "/opt/butler:/home/fancykillerpanda/Projects/Odin/Odin:/usr/local/sbin",
[3]string{"/opt/butler", "/home/fancykillerpanda/Projects/Odin/Odin", "/usr/local/sbin"} }, // Issue #1537
- { 1, "a::b", [3]string{"a", "", "b"} },
- { 2, "a:b:", [3]string{"a", "b", ""} },
- { 3, ":a:b", [3]string{"", "a", "b"} },
- { 4, "::", [3]string{"", "", ""} },
- { 5, "\"a:b\"c:d:\"f\"", [3]string{"a:bc", "d", "f"} },
- { 6, "\"a:b:c\":d\":e\":f", [3]string{"a:b:c", "d:e", "f"} },
+ { "a::b", [3]string{"a", "", "b"} },
+ { "a:b:", [3]string{"a", "b", ""} },
+ { ":a:b", [3]string{"", "a", "b"} },
+ { "::", [3]string{"", "", ""} },
+ { "\"a:b\"c:d:\"f\"", [3]string{"a:bc", "d", "f"} },
+ { "\"a:b:c\":d\":e\":f", [3]string{"a:b:c", "d:e", "f"} },
}
- for d, i in data {
- assert(i == d.i, fmt.tprintf("wrong data index: i %d != d.i %d\n", i, d.i))
+ for d in data {
r := filepath.split_list(d.v)
- defer delete(r)
- tc.expect(t, len(r) == len(d.e), fmt.tprintf("i:%d %s(%s) len(r) %d != len(d.e) %d",
- i, #procedure, d.v, len(r), len(d.e)))
+ defer delete_split(r)
+ testing.expectf(t, len(r) == len(d.e), "%s len(r) %d != len(d.e) %d", d.v, len(r), len(d.e))
if len(r) == len(d.e) {
for _, j in r {
- tc.expect(t, r[j] == d.e[j], fmt.tprintf("i:%d %s(%v) -> %v[%d] != %v",
- i, #procedure, d.v, r[j], j, d.e[j]))
+ testing.expectf(t, r[j] == d.e[j], "%v -> %v[%d] != %v", d.v, r[j], j, d.e[j])
}
}
}
@@ -103,15 +92,23 @@ test_split_list_unix :: proc(t: ^testing.T) {
{
v := ""
r := filepath.split_list(v)
- tc.expect(t, r == nil, fmt.tprintf("%s(%s) -> %v != nil", #procedure, v, r))
+ testing.expectf(t, r == nil, "'%s' -> '%v' != nil", v, r)
}
{
v := "a"
r := filepath.split_list(v)
- defer delete(r)
- tc.expect(t, len(r) == 1, fmt.tprintf("%s(%s) len(r) %d != 1", #procedure, v, len(r)))
+ defer delete_split(r)
+ testing.expectf(t, len(r) == 1, "'%s' len(r) %d != 1", v, len(r))
if len(r) == 1 {
- tc.expect(t, r[0] == "a", fmt.tprintf("%s(%v) -> %v[0] != a", #procedure, v, r[0]))
+ testing.expectf(t, r[0] == "a", "'%v' -> %v[0] != a", v, r[0])
}
}
}
+
+@(private)
+delete_split :: proc(s: []string) {
+ for part in s {
+ delete(part)
+ }
+ delete(s)
+} \ No newline at end of file
diff --git a/tests/core/reflect/test_core_reflect.odin b/tests/core/reflect/test_core_reflect.odin
index a3a66f968..7d2394688 100644
--- a/tests/core/reflect/test_core_reflect.odin
+++ b/tests/core/reflect/test_core_reflect.odin
@@ -1,21 +1,8 @@
// Tests "core:reflect/reflect".
-// Must be run with `-collection:tests=` flag, e.g.
-// ./odin run tests/core/reflect/test_core_reflect.odin -out=tests/core/test_core_reflect -collection:tests=./tests
package test_core_reflect
-import "core:fmt"
import "core:reflect"
import "core:testing"
-import tc "tests:common"
-
-main :: proc() {
- t := testing.T{}
-
- test_as_u64(&t)
- test_as_f64(&t)
-
- tc.report(&t)
-}
@test
test_as_u64 :: proc(t: ^testing.T) {
@@ -31,9 +18,8 @@ test_as_u64 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r, valid := reflect.as_u64(d.v)
- tc.expect(t, valid, fmt.tprintf("i:%d %s(i8 %v) !valid\n", i, #procedure, d.v))
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(i8 %v) -> %v (0x%X) != %v (0x%X)\n",
- i, #procedure, d.v, r, r, d.e, d.e))
+ testing.expectf(t, valid, "i8 %v !valid", d.v)
+ testing.expectf(t, r == d.e, "i8 %v -> %v (0x%X) != %v (0x%X)", d.v, r, r, d.e, d.e)
}
}
{
@@ -48,9 +34,8 @@ test_as_u64 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r, valid := reflect.as_u64(d.v)
- tc.expect(t, valid, fmt.tprintf("i:%d %s(i16 %v) !valid\n", i, #procedure, d.v))
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(i16 %v) -> %v (0x%X) != %v (0x%X)\n",
- i, #procedure, d.v, r, r, d.e, d.e))
+ testing.expectf(t, valid, "i16 %v !valid", d.v)
+ testing.expectf(t, r == d.e, "i16 %v -> %v (0x%X) != %v (0x%X)", d.v, r, r, d.e, d.e)
}
}
{
@@ -65,9 +50,8 @@ test_as_u64 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r, valid := reflect.as_u64(d.v)
- tc.expect(t, valid, fmt.tprintf("i:%d %s(i32 %v) !valid\n", i, #procedure, d.v))
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(i32 %v) -> %v (0x%X) != %v (0x%X)\n",
- i, #procedure, d.v, r, r, d.e, d.e))
+ testing.expectf(t, valid, "i32 %v !valid", d.v)
+ testing.expectf(t, r == d.e, "i32 %v -> %v (0x%X) != %v (0x%X)", d.v, r, r, d.e, d.e)
}
}
{
@@ -82,9 +66,8 @@ test_as_u64 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r, valid := reflect.as_u64(d.v)
- tc.expect(t, valid, fmt.tprintf("i:%d %s(i64 %v) !valid\n", i, #procedure, d.v))
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(i64 %v) -> %v (0x%X) != %v (0x%X)\n",
- i, #procedure, d.v, r, r, d.e, d.e))
+ testing.expectf(t, valid, "i64 %v !valid", d.v)
+ testing.expectf(t, r == d.e, "i64 %v -> %v (0x%X) != %v (0x%X)", d.v, r, r, d.e, d.e)
}
}
{
@@ -102,9 +85,8 @@ test_as_u64 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r, valid := reflect.as_u64(d.v)
- tc.expect(t, valid, fmt.tprintf("i:%d %s(i128 %v) !valid\n", i, #procedure, d.v))
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(i128 %v) -> %v (0x%X) != %v (0x%X)\n",
- i, #procedure, d.v, r, r, d.e, d.e))
+ testing.expectf(t, valid, "i128 %v !valid", d.v)
+ testing.expectf(t, r == d.e, "i128 %v -> %v (0x%X) != %v (0x%X)", d.v, r, r, d.e, d.e)
}
}
{
@@ -118,8 +100,8 @@ test_as_u64 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r, valid := reflect.as_u64(d.v)
- tc.expect(t, valid, fmt.tprintf("i:%d %s(f16 %v) !valid\n", i, #procedure, d.v))
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(f16 %v) -> %v != %v\n", i, #procedure, d.v, r, d.e))
+ testing.expectf(t, valid, "f16 %v !valid", d.v)
+ testing.expectf(t, r == d.e, "f16 %v -> %v != %v", d.v, r, d.e)
}
}
{
@@ -132,8 +114,8 @@ test_as_u64 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r, valid := reflect.as_u64(d.v)
- tc.expect(t, valid, fmt.tprintf("i:%d %s(f32 %v) !valid\n", i, #procedure, d.v))
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(f32 %v) -> %v != %v\n", i, #procedure, d.v, r, d.e))
+ testing.expectf(t, valid, "f32 %v !valid", d.v)
+ testing.expectf(t, r == d.e, "f32 %v -> %v != %v", d.v, r, d.e)
}
}
{
@@ -146,8 +128,8 @@ test_as_u64 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r, valid := reflect.as_u64(d.v)
- tc.expect(t, valid, fmt.tprintf("i:%d %s(f64 %v) !valid\n", i, #procedure, d.v))
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(f64 %v) -> %v != %v\n", i, #procedure, d.v, r, d.e))
+ testing.expectf(t, valid, "f64 %v !valid", d.v)
+ testing.expectf(t, r == d.e, "f64 %v -> %v != %v", d.v, r, d.e)
}
}
}
@@ -166,8 +148,8 @@ test_as_f64 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r, valid := reflect.as_f64(d.v)
- tc.expect(t, valid, fmt.tprintf("i:%d %s(i8 %v) !valid\n", i, #procedure, d.v))
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(i8 %v) -> %v != %v\n", i, #procedure, d.v, r, d.e))
+ testing.expectf(t, valid, "i8 %v !valid", d.v)
+ testing.expectf(t, r == d.e, "i8 %v -> %v != %v", d.v, r, d.e)
}
}
{
@@ -182,8 +164,8 @@ test_as_f64 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r, valid := reflect.as_f64(d.v)
- tc.expect(t, valid, fmt.tprintf("i:%d %s(i16 %v) !valid\n", i, #procedure, d.v))
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(i16 %v) -> %v != %v\n", i, #procedure, d.v, r, d.e))
+ testing.expectf(t, valid, "i16 %v !valid", d.v)
+ testing.expectf(t, r == d.e, "i16 %v -> %v != %v", d.v, r, d.e)
}
}
{
@@ -198,8 +180,8 @@ test_as_f64 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r, valid := reflect.as_f64(d.v)
- tc.expect(t, valid, fmt.tprintf("i:%d %s(i32 %v) !valid\n", i, #procedure, d.v))
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(i32 %v) -> %v != %v\n", i, #procedure, d.v, r, d.e))
+ testing.expectf(t, valid, "i32 %v !valid", d.v)
+ testing.expectf(t, r == d.e, "i32 %v -> %v != %v", d.v, r, d.e)
}
}
{
@@ -214,8 +196,8 @@ test_as_f64 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r, valid := reflect.as_f64(d.v)
- tc.expect(t, valid, fmt.tprintf("i:%d %s(i64 %v) !valid\n", i, #procedure, d.v))
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(i64 %v) -> %v != %v\n", i, #procedure, d.v, r, d.e))
+ testing.expectf(t, valid, "i64 %v !valid", d.v)
+ testing.expectf(t, r == d.e, "i64 %v -> %v != %v", d.v, r, d.e)
}
}
{
@@ -231,9 +213,8 @@ test_as_f64 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r, valid := reflect.as_f64(d.v)
- tc.expect(t, valid, fmt.tprintf("i:%d %s(i128 %v) !valid\n", i, #procedure, d.v))
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(i128 %v) -> %v (%H) != %v (%H)\n",
- i, #procedure, d.v, r, r, d.e, d.e))
+ testing.expectf(t, valid, "i128 %v !valid", d.v)
+ testing.expectf(t, r == d.e, "i128 %v -> %v (%H) != %v (%H)", d.v, r, r, d.e, d.e)
}
}
{
@@ -247,9 +228,8 @@ test_as_f64 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r, valid := reflect.as_f64(d.v)
- tc.expect(t, valid, fmt.tprintf("i:%d %s(f16 %v) !valid\n", i, #procedure, d.v))
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(f16 %v (%H)) -> %v (%H) != %v (%H)\n",
- i, #procedure, d.v, d.v, r, r, d.e, d.e))
+ testing.expectf(t, valid, "f16 %v !valid", d.v)
+ testing.expectf(t, r == d.e, "f16 %v (%H) -> %v (%H) != %v (%H)", d.v, d.v, r, r, d.e, d.e)
}
}
{
@@ -262,9 +242,8 @@ test_as_f64 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r, valid := reflect.as_f64(d.v)
- tc.expect(t, valid, fmt.tprintf("i:%d %s(f32 %v) !valid\n", i, #procedure, d.v))
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(f32 %v (%H)) -> %v (%H) != %v (%H)\n",
- i, #procedure, d.v, d.v, r, r, d.e, d.e))
+ testing.expectf(t, valid, "f32 %v !valid", d.v)
+ testing.expectf(t, r == d.e, "f32 %v (%H) -> %v (%H) != %v (%H)", d.v, d.v, r, r, d.e, d.e)
}
}
{
@@ -277,8 +256,8 @@ test_as_f64 :: proc(t: ^testing.T) {
for d, i in data {
assert(i == d.i)
r, valid := reflect.as_f64(d.v)
- tc.expect(t, valid, fmt.tprintf("i:%d %s(f64 %v) !valid\n", i, #procedure, d.v))
- tc.expect(t, r == d.e, fmt.tprintf("i:%d %s(f64 %v) -> %v != %v\n", i, #procedure, d.v, r, d.e))
+ testing.expectf(t, valid, "f64 %v !valid", d.v)
+ testing.expectf(t, r == d.e, "f64 %v -> %v != %v", d.v, r, d.e)
}
}
-}
+} \ No newline at end of file
diff --git a/tests/core/runtime/test_core_runtime.odin b/tests/core/runtime/test_core_runtime.odin
index 786cf003a..008146dcf 100644
--- a/tests/core/runtime/test_core_runtime.odin
+++ b/tests/core/runtime/test_core_runtime.odin
@@ -1,43 +1,10 @@
package test_core_runtime
-import "core:fmt"
import "base:intrinsics"
import "core:mem"
-import "core:os"
-import "core:reflect"
import "base:runtime"
import "core:testing"
-TEST_count := 0
-TEST_fail := 0
-
-when ODIN_TEST {
- expect_value :: testing.expect_value
-} else {
- expect_value :: proc(t: ^testing.T, value, expected: $T, loc := #caller_location) -> bool where intrinsics.type_is_comparable(T) {
- TEST_count += 1
- ok := value == expected || reflect.is_nil(value) && reflect.is_nil(expected)
- if !ok {
- TEST_fail += 1
- fmt.printf("[%v] expected %v, got %v\n", loc, expected, value)
- }
- return ok
- }
-}
-
-main :: proc() {
- t := testing.T{}
-
- test_temp_allocator_big_alloc_and_alignment(&t)
- test_temp_allocator_alignment_boundary(&t)
- test_temp_allocator_returns_correct_size(&t)
-
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
-}
-
// Tests that having space for the allocation, but not for the allocation and alignment
// is handled correctly.
@(test)
@@ -47,7 +14,7 @@ test_temp_allocator_alignment_boundary :: proc(t: ^testing.T) {
_, _ = mem.alloc(int(runtime.DEFAULT_ARENA_GROWING_MINIMUM_BLOCK_SIZE)-120)
_, err := mem.alloc(112, 32)
- expect_value(t, err, nil)
+ testing.expect(t, err == nil)
}
// Tests that big allocations with big alignments are handled correctly.
@@ -58,7 +25,7 @@ test_temp_allocator_big_alloc_and_alignment :: proc(t: ^testing.T) {
mappy: map[[8]int]int
err := reserve(&mappy, 50000)
- expect_value(t, err, nil)
+ testing.expect(t, err == nil)
}
@(test)
@@ -67,6 +34,6 @@ test_temp_allocator_returns_correct_size :: proc(t: ^testing.T) {
context.allocator = runtime.arena_allocator(&arena)
bytes, err := mem.alloc_bytes(10, 16)
- expect_value(t, err, nil)
- expect_value(t, len(bytes), 10)
-}
+ testing.expect(t, err == nil)
+ testing.expect(t, len(bytes) == 10)
+} \ No newline at end of file
diff --git a/tests/core/slice/test_core_slice.odin b/tests/core/slice/test_core_slice.odin
index 06329ddda..23de1b482 100644
--- a/tests/core/slice/test_core_slice.odin
+++ b/tests/core/slice/test_core_slice.odin
@@ -1,56 +1,16 @@
package test_core_slice
import "core:slice"
-import "core:strings"
import "core:testing"
-import "core:fmt"
-import "core:os"
import "core:math/rand"
-TEST_count := 0
-TEST_fail := 0
-
-when ODIN_TEST {
- expect :: testing.expect
- log :: testing.log
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v] %v\n", loc, message)
- return
- }
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-}
-
-main :: proc() {
- t := testing.T{}
- test_sort_with_indices(&t)
- test_binary_search(&t)
-
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
-}
-
@test
test_sort_with_indices :: proc(t: ^testing.T) {
- seed := rand.uint64()
- fmt.printf("Random seed: %v\n", seed)
-
// Test sizes are all prime.
test_sizes :: []int{7, 13, 347, 1031, 10111, 100003}
for test_size in test_sizes {
- fmt.printf("Sorting %v random u64 values along with index.\n", test_size)
-
- r := rand.create(seed)
+ r := rand.create(t.seed)
vals := make([]u64, test_size)
r_idx := make([]int, test_size) // Reverse index
@@ -61,7 +21,7 @@ test_sort_with_indices :: proc(t: ^testing.T) {
// Set up test values
for _, i in vals {
- vals[i] = rand.uint64(&r)
+ vals[i] = rand.uint64(&r)
}
// Sort
@@ -69,7 +29,7 @@ test_sort_with_indices :: proc(t: ^testing.T) {
defer delete(f_idx)
// Verify sorted test values
- rand.init(&r, seed)
+ rand.init(&r, t.seed)
for v, i in f_idx {
r_idx[v] = i
@@ -79,14 +39,14 @@ test_sort_with_indices :: proc(t: ^testing.T) {
for v, i in vals {
if i > 0 {
val_pass := v >= last
- expect(t, val_pass, "Expected values to have been sorted.")
+ testing.expect(t, val_pass, "Expected randomized test values to have been sorted")
if !val_pass {
break
}
}
idx_pass := vals[r_idx[i]] == rand.uint64(&r)
- expect(t, idx_pass, "Expected index to have been sorted.")
+ testing.expect(t, idx_pass, "Expected index to have been sorted")
if !idx_pass {
break
}
@@ -97,16 +57,11 @@ test_sort_with_indices :: proc(t: ^testing.T) {
@test
test_sort_by_indices :: proc(t: ^testing.T) {
- seed := rand.uint64()
- fmt.printf("Random seed: %v\n", seed)
-
// Test sizes are all prime.
test_sizes :: []int{7, 13, 347, 1031, 10111, 100003}
for test_size in test_sizes {
- fmt.printf("Sorting %v random u64 values along with index.\n", test_size)
-
- r := rand.create(seed)
+ r := rand.create(t.seed)
vals := make([]u64, test_size)
r_idx := make([]int, test_size) // Reverse index
@@ -117,7 +72,7 @@ test_sort_by_indices :: proc(t: ^testing.T) {
// Set up test values
for _, i in vals {
- vals[i] = rand.uint64(&r)
+ vals[i] = rand.uint64(&r)
}
// Sort
@@ -125,7 +80,7 @@ test_sort_by_indices :: proc(t: ^testing.T) {
defer delete(f_idx)
// Verify sorted test values
- rand.init(&r, seed)
+ rand.init(&r, t.seed)
{
indices := make([]int, test_size)
@@ -138,7 +93,7 @@ test_sort_by_indices :: proc(t: ^testing.T) {
defer delete(sorted_indices)
for v, i in sorted_indices {
idx_pass := v == f_idx[i]
- expect(t, idx_pass, "Expected the sorted index to be the same as the result from sort_with_indices")
+ testing.expect(t, idx_pass, "Expected the sorted index to be the same as the result from sort_with_indices")
if !idx_pass {
break
}
@@ -154,7 +109,7 @@ test_sort_by_indices :: proc(t: ^testing.T) {
slice.sort_by_indices_overwrite(indices, f_idx)
for v, i in indices {
idx_pass := v == f_idx[i]
- expect(t, idx_pass, "Expected the sorted index to be the same as the result from sort_with_indices")
+ testing.expect(t, idx_pass, "Expected the sorted index to be the same as the result from sort_with_indices")
if !idx_pass {
break
}
@@ -174,7 +129,7 @@ test_sort_by_indices :: proc(t: ^testing.T) {
slice.sort_by_indices(indices, swap, f_idx)
for v, i in swap {
idx_pass := v == f_idx[i]
- expect(t, idx_pass, "Expected the sorted index to be the same as the result from sort_with_indices")
+ testing.expect(t, idx_pass, "Expected the sorted index to be the same as the result from sort_with_indices")
if !idx_pass {
break
}
@@ -185,61 +140,78 @@ test_sort_by_indices :: proc(t: ^testing.T) {
@test
test_binary_search :: proc(t: ^testing.T) {
- builder := strings.Builder{}
- defer strings.builder_destroy(&builder)
-
- test_search :: proc(t: ^testing.T, b: ^strings.Builder, s: []i32, v: i32) -> (int, bool) {
- log(t, fmt.sbprintf(b, "Searching for %v in %v", v, s))
- strings.builder_reset(b)
- index, found := slice.binary_search(s, v)
- log(t, fmt.sbprintf(b, "index: %v, found: %v", index, found))
- strings.builder_reset(b )
-
- return index, found
- }
-
index: int
found: bool
s := []i32{0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55}
- index, found = test_search(t, &builder, s, 13)
- expect(t, index == 9, "Expected index to be 9.")
- expect(t, found == true, "Expected found to be true.")
+ index, found = slice.binary_search(s, 13)
+ testing.expect(t, index == 9, "Expected index to be 9")
+ testing.expect(t, found == true, "Expected found to be true")
- index, found = test_search(t, &builder, s, 4)
- expect(t, index == 7, "Expected index to be 7.")
- expect(t, found == false, "Expected found to be false.")
+ index, found = slice.binary_search(s, 4)
+ testing.expect(t, index == 7, "Expected index to be 7.")
+ testing.expect(t, found == false, "Expected found to be false.")
- index, found = test_search(t, &builder, s, 100)
- expect(t, index == 13, "Expected index to be 13.")
- expect(t, found == false, "Expected found to be false.")
+ index, found = slice.binary_search(s, 100)
+ testing.expect(t, index == 13, "Expected index to be 13.")
+ testing.expect(t, found == false, "Expected found to be false.")
- index, found = test_search(t, &builder, s, 1)
- expect(t, index >= 1 && index <= 4, "Expected index to be 1, 2, 3, or 4.")
- expect(t, found == true, "Expected found to be true.")
+ index, found = slice.binary_search(s, 1)
+ testing.expect(t, index >= 1 && index <= 4, "Expected index to be 1, 2, 3, or 4.")
+ testing.expect(t, found == true, "Expected found to be true.")
- index, found = test_search(t, &builder, s, -1)
- expect(t, index == 0, "Expected index to be 0.")
- expect(t, found == false, "Expected found to be false.")
+ index, found = slice.binary_search(s, -1)
+ testing.expect(t, index == 0, "Expected index to be 0.")
+ testing.expect(t, found == false, "Expected found to be false.")
a := []i32{}
- index, found = test_search(t, &builder, a, 13)
- expect(t, index == 0, "Expected index to be 0.")
- expect(t, found == false, "Expected found to be false.")
+ index, found = slice.binary_search(a, 13)
+ testing.expect(t, index == 0, "Expected index to be 0.")
+ testing.expect(t, found == false, "Expected found to be false.")
b := []i32{1}
- index, found = test_search(t, &builder, b, 13)
- expect(t, index == 1, "Expected index to be 1.")
- expect(t, found == false, "Expected found to be false.")
+ index, found = slice.binary_search(b, 13)
+ testing.expect(t, index == 1, "Expected index to be 1.")
+ testing.expect(t, found == false, "Expected found to be false.")
+
+ index, found = slice.binary_search(b, 1)
+ testing.expect(t, index == 0, "Expected index to be 0.")
+ testing.expect(t, found == true, "Expected found to be true.")
- index, found = test_search(t, &builder, b, 1)
- expect(t, index == 0, "Expected index to be 0.")
- expect(t, found == true, "Expected found to be true.")
+ index, found = slice.binary_search(b, 0)
+ testing.expect(t, index == 0, "Expected index to be 0.")
+ testing.expect(t, found == false, "Expected found to be false.")
+}
+
+@test
+test_permutation_iterator :: proc(t: ^testing.T) {
+ // Big enough to do some sanity checking but not overly large.
+ FAC_5 :: 120
+ s := []int{1, 2, 3, 4, 5}
+ seen: map[int]bool
+ defer delete(seen)
+
+ iter := slice.make_permutation_iterator(s)
+ defer slice.destroy_permutation_iterator(iter)
+
+ permutations_counted: int
+ for slice.permute(&iter) {
+ n := 0
+ for item, index in s {
+ n *= 10
+ n += item
+ }
+ if n in seen {
+ testing.fail_now(t, "Permutation iterator made a duplicate permutation.")
+ return
+ }
+ seen[n] = true
+ permutations_counted += 1
+ }
- index, found = test_search(t, &builder, b, 0)
- expect(t, index == 0, "Expected index to be 0.")
- expect(t, found == false, "Expected found to be false.")
+ testing.expect_value(t, len(seen), FAC_5)
+ testing.expect_value(t, permutations_counted, FAC_5)
}
diff --git a/tests/core/speed.odin b/tests/core/speed.odin
new file mode 100644
index 000000000..a4b2b6a69
--- /dev/null
+++ b/tests/core/speed.odin
@@ -0,0 +1,6 @@
+// Tests intended to be ran with optimizations on
+package tests_core
+
+@(require) import "crypto"
+@(require) import "hash"
+@(require) import "image" \ No newline at end of file
diff --git a/tests/core/strconv/test_core_strconv.odin b/tests/core/strconv/test_core_strconv.odin
new file mode 100644
index 000000000..6b70654cc
--- /dev/null
+++ b/tests/core/strconv/test_core_strconv.odin
@@ -0,0 +1,145 @@
+package test_core_strconv
+
+import "core:math"
+import "core:strconv"
+import "core:testing"
+
+@(test)
+test_float :: proc(t: ^testing.T) {
+ n: int
+ f: f64
+ ok: bool
+
+ f, ok = strconv.parse_f64("1.2", &n)
+ testing.expect_value(t, f, 1.2)
+ testing.expect_value(t, n, 3)
+ testing.expect_value(t, ok, true)
+
+ f, ok = strconv.parse_f64("1.2a", &n)
+ testing.expect_value(t, f, 1.2)
+ testing.expect_value(t, n, 3)
+ testing.expect_value(t, ok, false)
+
+ f, ok = strconv.parse_f64("+", &n)
+ testing.expect_value(t, f, 0)
+ testing.expect_value(t, n, 0)
+ testing.expect_value(t, ok, false)
+
+ f, ok = strconv.parse_f64("-", &n)
+ testing.expect_value(t, f, 0)
+ testing.expect_value(t, n, 0)
+ testing.expect_value(t, ok, false)
+
+}
+
+@(test)
+test_nan :: proc(t: ^testing.T) {
+ n: int
+ f: f64
+ ok: bool
+
+ f, ok = strconv.parse_f64("nan", &n)
+ testing.expect_value(t, math.classify(f), math.Float_Class.NaN)
+ testing.expect_value(t, n, 3)
+ testing.expect_value(t, ok, true)
+
+ f, ok = strconv.parse_f64("nAN", &n)
+ testing.expect_value(t, math.classify(f), math.Float_Class.NaN)
+ testing.expect_value(t, n, 3)
+ testing.expect_value(t, ok, true)
+
+ f, ok = strconv.parse_f64("Nani", &n)
+ testing.expect_value(t, math.classify(f), math.Float_Class.NaN)
+ testing.expect_value(t, n, 3)
+ testing.expect_value(t, ok, false)
+}
+
+@(test)
+test_infinity :: proc(t: ^testing.T) {
+ pos_inf := math.inf_f64(+1)
+ neg_inf := math.inf_f64(-1)
+
+ n: int
+ s := "infinity"
+
+ for i in 0 ..< len(s) + 1 {
+ ss := s[:i]
+ f, ok := strconv.parse_f64(ss, &n)
+ if i >= 3 { // "inf" .. "infinity"
+ expected_n := 8 if i == 8 else 3
+ expected_ok := i == 3 || i == 8
+ testing.expect_value(t, f, pos_inf)
+ testing.expect_value(t, n, expected_n)
+ testing.expect_value(t, ok, expected_ok)
+ testing.expect_value(t, math.classify(f), math.Float_Class.Inf)
+ } else { // invalid substring
+ testing.expect_value(t, f, 0)
+ testing.expect_value(t, n, 0)
+ testing.expect_value(t, ok, false)
+ testing.expect_value(t, math.classify(f), math.Float_Class.Zero)
+ }
+ }
+
+ s = "+infinity"
+ for i in 0 ..< len(s) + 1 {
+ ss := s[:i]
+ f, ok := strconv.parse_f64(ss, &n)
+ if i >= 4 { // "+inf" .. "+infinity"
+ expected_n := 9 if i == 9 else 4
+ expected_ok := i == 4 || i == 9
+ testing.expect_value(t, f, pos_inf)
+ testing.expect_value(t, n, expected_n)
+ testing.expect_value(t, ok, expected_ok)
+ testing.expect_value(t, math.classify(f), math.Float_Class.Inf)
+ } else { // invalid substring
+ testing.expect_value(t, f, 0)
+ testing.expect_value(t, n, 0)
+ testing.expect_value(t, ok, false)
+ testing.expect_value(t, math.classify(f), math.Float_Class.Zero)
+ }
+ }
+
+ s = "-infinity"
+ for i in 0 ..< len(s) + 1 {
+ ss := s[:i]
+ f, ok := strconv.parse_f64(ss, &n)
+ if i >= 4 { // "-inf" .. "infinity"
+ expected_n := 9 if i == 9 else 4
+ expected_ok := i == 4 || i == 9
+ testing.expect_value(t, f, neg_inf)
+ testing.expect_value(t, n, expected_n)
+ testing.expect_value(t, ok, expected_ok)
+ testing.expect_value(t, math.classify(f), math.Float_Class.Neg_Inf)
+ } else { // invalid substring
+ testing.expect_value(t, f, 0)
+ testing.expect_value(t, n, 0)
+ testing.expect_value(t, ok, false)
+ testing.expect_value(t, math.classify(f), math.Float_Class.Zero)
+ }
+ }
+
+ // Make sure odd casing works.
+ batch := [?]string {"INFiniTY", "iNfInItY", "InFiNiTy"}
+ for ss in batch {
+ f, ok := strconv.parse_f64(ss, &n)
+ testing.expect_value(t, f, pos_inf)
+ testing.expect_value(t, n, 8)
+ testing.expect_value(t, ok, true)
+ testing.expect_value(t, math.classify(f), math.Float_Class.Inf)
+ }
+
+ // Explicitly check how trailing characters are handled.
+ s = "infinityyyy"
+ f, ok := strconv.parse_f64(s, &n)
+ testing.expect_value(t, f, pos_inf)
+ testing.expect_value(t, n, 8)
+ testing.expect_value(t, ok, false)
+ testing.expect_value(t, math.classify(f), math.Float_Class.Inf)
+
+ s = "inflippity"
+ f, ok = strconv.parse_f64(s, &n)
+ testing.expect_value(t, f, pos_inf)
+ testing.expect_value(t, n, 3)
+ testing.expect_value(t, ok, false)
+ testing.expect_value(t, math.classify(f), math.Float_Class.Inf)
+}
diff --git a/tests/core/strings/test_core_strings.odin b/tests/core/strings/test_core_strings.odin
index f49476765..0ee2b3eb9 100644
--- a/tests/core/strings/test_core_strings.odin
+++ b/tests/core/strings/test_core_strings.odin
@@ -2,81 +2,42 @@ package test_core_strings
import "core:strings"
import "core:testing"
-import "core:fmt"
-import "core:os"
import "base:runtime"
-import "core:mem"
-
-TEST_count := 0
-TEST_fail := 0
-
-when ODIN_TEST {
- expect :: testing.expect
- log :: testing.log
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v] %v\n", loc, message)
- return
- }
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-}
-
-main :: proc() {
- t := testing.T{}
- test_index_any_small_string_not_found(&t)
- test_index_any_larger_string_not_found(&t)
- test_index_any_small_string_found(&t)
- test_index_any_larger_string_found(&t)
- test_cut(&t)
- test_case_conversion(&t)
-
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
-}
@test
test_index_any_small_string_not_found :: proc(t: ^testing.T) {
index := strings.index_any(".", "/:\"")
- expect(t, index == -1, "index_any should be negative")
+ testing.expect(t, index == -1, "index_any should be negative")
}
@test
test_index_any_larger_string_not_found :: proc(t: ^testing.T) {
index := strings.index_any("aaaaaaaa.aaaaaaaa", "/:\"")
- expect(t, index == -1, "index_any should be negative")
+ testing.expect(t, index == -1, "index_any should be negative")
}
@test
test_index_any_small_string_found :: proc(t: ^testing.T) {
index := strings.index_any(".", "/:.\"")
- expect(t, index == 0, "index_any should be 0")
+ testing.expect(t, index == 0, "index_any should be 0")
}
@test
test_index_any_larger_string_found :: proc(t: ^testing.T) {
index := strings.index_any("aaaaaaaa:aaaaaaaa", "/:\"")
- expect(t, index == 8, "index_any should be 8")
+ testing.expect(t, index == 8, "index_any should be 8")
}
@test
test_last_index_any_small_string_found :: proc(t: ^testing.T) {
index := strings.last_index_any(".", "/:.\"")
- expect(t, index == 0, "last_index_any should be 0")
+ testing.expect(t, index == 0, "last_index_any should be 0")
}
@test
test_last_index_any_small_string_not_found :: proc(t: ^testing.T) {
index := strings.last_index_any(".", "/:\"")
- expect(t, index == -1, "last_index_any should be -1")
+ testing.expect(t, index == -1, "last_index_any should be -1")
}
Cut_Test :: struct {
@@ -100,9 +61,12 @@ test_cut :: proc(t: ^testing.T) {
res := strings.cut(test.input, test.offset, test.length)
defer delete(res)
- msg := fmt.tprintf("cut(\"%v\", %v, %v) expected to return \"%v\", got \"%v\"",
- test.input, test.offset, test.length, test.output, res)
- expect(t, res == test.output, msg)
+ testing.expectf(
+ t,
+ res == test.output,
+ "cut(\"%v\", %v, %v) expected to return \"%v\", got \"%v\"",
+ test.input, test.offset, test.length, test.output, res,
+ )
}
}
@@ -118,7 +82,7 @@ Case_Kind :: enum {
Ada_Case,
}
-Case_Proc :: proc(r: string, allocator: runtime.Allocator) -> (string, mem.Allocator_Error)
+Case_Proc :: proc(r: string, allocator: runtime.Allocator) -> (string, runtime.Allocator_Error)
test_cases := [Case_Kind]struct{s: string, p: Case_Proc}{
.Lower_Space_Case = {"hellope world", to_lower_space_case},
@@ -132,33 +96,31 @@ test_cases := [Case_Kind]struct{s: string, p: Case_Proc}{
.Ada_Case = {"Hellope_World", to_ada_case},
}
-to_lower_space_case :: proc(r: string, allocator: runtime.Allocator) -> (string, mem.Allocator_Error) {
+to_lower_space_case :: proc(r: string, allocator: runtime.Allocator) -> (string, runtime.Allocator_Error) {
return strings.to_delimiter_case(r, ' ', false, allocator)
}
-to_upper_space_case :: proc(r: string, allocator: runtime.Allocator) -> (string, mem.Allocator_Error) {
+to_upper_space_case :: proc(r: string, allocator: runtime.Allocator) -> (string, runtime.Allocator_Error) {
return strings.to_delimiter_case(r, ' ', true, allocator)
}
// NOTE: we have these wrappers as having #optional_allocator_error changes the type to not be equivalent
-to_snake_case :: proc(r: string, allocator: runtime.Allocator) -> (string, mem.Allocator_Error) { return strings.to_snake_case(r, allocator) }
-to_upper_snake_case :: proc(r: string, allocator: runtime.Allocator) -> (string, mem.Allocator_Error) { return strings.to_upper_snake_case(r, allocator) }
-to_kebab_case :: proc(r: string, allocator: runtime.Allocator) -> (string, mem.Allocator_Error) { return strings.to_kebab_case(r, allocator) }
-to_upper_kebab_case :: proc(r: string, allocator: runtime.Allocator) -> (string, mem.Allocator_Error) { return strings.to_upper_kebab_case(r, allocator) }
-to_camel_case :: proc(r: string, allocator: runtime.Allocator) -> (string, mem.Allocator_Error) { return strings.to_camel_case(r, allocator) }
-to_pascal_case :: proc(r: string, allocator: runtime.Allocator) -> (string, mem.Allocator_Error) { return strings.to_pascal_case(r, allocator) }
-to_ada_case :: proc(r: string, allocator: runtime.Allocator) -> (string, mem.Allocator_Error) { return strings.to_ada_case(r, allocator) }
+to_snake_case :: proc(r: string, allocator: runtime.Allocator) -> (string, runtime.Allocator_Error) { return strings.to_snake_case(r, allocator) }
+to_upper_snake_case :: proc(r: string, allocator: runtime.Allocator) -> (string, runtime.Allocator_Error) { return strings.to_upper_snake_case(r, allocator) }
+to_kebab_case :: proc(r: string, allocator: runtime.Allocator) -> (string, runtime.Allocator_Error) { return strings.to_kebab_case(r, allocator) }
+to_upper_kebab_case :: proc(r: string, allocator: runtime.Allocator) -> (string, runtime.Allocator_Error) { return strings.to_upper_kebab_case(r, allocator) }
+to_camel_case :: proc(r: string, allocator: runtime.Allocator) -> (string, runtime.Allocator_Error) { return strings.to_camel_case(r, allocator) }
+to_pascal_case :: proc(r: string, allocator: runtime.Allocator) -> (string, runtime.Allocator_Error) { return strings.to_pascal_case(r, allocator) }
+to_ada_case :: proc(r: string, allocator: runtime.Allocator) -> (string, runtime.Allocator_Error) { return strings.to_ada_case(r, allocator) }
@test
test_case_conversion :: proc(t: ^testing.T) {
for entry in test_cases {
for test_case, case_kind in test_cases {
result, err := entry.p(test_case.s, context.allocator)
- msg := fmt.tprintf("ERROR: We got the allocation error '{}'\n", err)
- expect(t, err == nil, msg)
+ testing.expectf(t, err == nil, "ERROR: We got the allocation error '{}'\n", err)
defer delete(result)
- msg = fmt.tprintf("ERROR: Input `{}` to converter {} does not match `{}`, got `{}`.\n", test_case.s, case_kind, entry.s, result)
- expect(t, result == entry.s, msg)
+ testing.expectf(t, result == entry.s, "ERROR: Input `{}` to converter {} does not match `{}`, got `{}`.\n", test_case.s, case_kind, entry.s, result)
}
}
} \ No newline at end of file
diff --git a/tests/core/text/i18n/test_core_text_i18n.odin b/tests/core/text/i18n/test_core_text_i18n.odin
index dcbdeb0c4..f6cffc318 100644
--- a/tests/core/text/i18n/test_core_text_i18n.odin
+++ b/tests/core/text/i18n/test_core_text_i18n.odin
@@ -1,31 +1,9 @@
package test_core_text_i18n
-import "core:mem"
-import "core:fmt"
-import "core:os"
+import "base:runtime"
import "core:testing"
import "core:text/i18n"
-TEST_count := 0
-TEST_fail := 0
-
-when ODIN_TEST {
- expect :: testing.expect
- log :: testing.log
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v] %v\n", loc, message)
- return
- }
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-}
T :: i18n.get
Test :: struct {
@@ -37,25 +15,28 @@ Test :: struct {
Test_Suite :: struct {
file: string,
- loader: proc(string, i18n.Parse_Options, proc(int) -> int, mem.Allocator) -> (^i18n.Translation, i18n.Error),
+ loader: proc(string, i18n.Parse_Options, proc(int) -> int, runtime.Allocator) -> (^i18n.Translation, i18n.Error),
plural: proc(int) -> int,
err: i18n.Error,
options: i18n.Parse_Options,
tests: []Test,
}
-// Custom pluralizer for plur.mo
-plur_mo_pluralizer :: proc(n: int) -> (slot: int) {
- switch {
- case n == 1: return 0
- case n != 0 && n % 1_000_000 == 0: return 1
- case: return 2
+TEST_SUITE_PATH :: ODIN_ROOT + "tests/core/assets/I18N/"
+
+@(test)
+test_custom_pluralizer :: proc(t: ^testing.T) {
+ // Custom pluralizer for plur.mo
+ plur_mo_pluralizer :: proc(n: int) -> (slot: int) {
+ switch {
+ case n == 1: return 0
+ case n != 0 && n % 1_000_000 == 0: return 1
+ case: return 2
+ }
}
-}
-TESTS := []Test_Suite{
- {
- file = "assets/I18N/plur.mo",
+ test(t, {
+ file = TEST_SUITE_PATH + "plur.mo",
loader = i18n.parse_mo_file,
plural = plur_mo_pluralizer,
tests = {
@@ -66,14 +47,16 @@ TESTS := []Test_Suite{
{"", "Message1/plural", "This is message 1", 1},
{"", "Message1/plural", "This is message 1 - plural A", 1_000_000},
{"", "Message1/plural", "This is message 1 - plural B", 42},
-
// This isn't in the catalog, so should ruturn the key.
{"", "Come visit us on Discord!", "Come visit us on Discord!", 1},
},
- },
+ })
+}
- {
- file = "assets/I18N/mixed_context.mo",
+@(test)
+test_mixed_context :: proc(t: ^testing.T) {
+ test(t, {
+ file = TEST_SUITE_PATH + "mixed_context.mo",
loader = i18n.parse_mo_file,
plural = nil,
tests = {
@@ -84,19 +67,25 @@ TESTS := []Test_Suite{
// This isn't in the catalog, so should ruturn the key.
{"", "Come visit us on Discord!", "Come visit us on Discord!", 1},
},
- },
+ })
+}
- {
- file = "assets/I18N/mixed_context.mo",
+@(test)
+test_mixed_context_dupe :: proc(t: ^testing.T) {
+ test(t, {
+ file = TEST_SUITE_PATH + "mixed_context.mo",
loader = i18n.parse_mo_file,
plural = nil,
// Message1 exists twice, once within Context, which has been merged into ""
err = .Duplicate_Key,
options = {merge_sections = true},
- },
+ })
+}
- {
- file = "assets/I18N/nl_NL.mo",
+@(test)
+test_nl_mo :: proc(t: ^testing.T) {
+ test(t, {
+ file = TEST_SUITE_PATH + "nl_NL.mo",
loader = i18n.parse_mo_file,
plural = nil, // Default pluralizer
tests = {
@@ -111,12 +100,13 @@ TESTS := []Test_Suite{
// This isn't in the catalog, so should ruturn the key.
{"", "Come visit us on Discord!", "Come visit us on Discord!", 1},
},
- },
-
+ })
+}
- // QT Linguist with default loader options.
- {
- file = "assets/I18N/nl_NL-qt-ts.ts",
+@(test)
+test_qt_linguist :: proc(t: ^testing.T) {
+ test(t, {
+ file = TEST_SUITE_PATH + "nl_NL-qt-ts.ts",
loader = i18n.parse_qt_linguist_file,
plural = nil, // Default pluralizer
tests = {
@@ -131,11 +121,13 @@ TESTS := []Test_Suite{
{"", "Come visit us on Discord!", "Come visit us on Discord!", 1},
{"Fake_Section", "Come visit us on Discord!", "Come visit us on Discord!", 1},
},
- },
+ })
+}
- // QT Linguist, merging sections.
- {
- file = "assets/I18N/nl_NL-qt-ts.ts",
+@(test)
+test_qt_linguist_merge_sections :: proc(t: ^testing.T) {
+ test(t, {
+ file = TEST_SUITE_PATH + "nl_NL-qt-ts.ts",
loader = i18n.parse_qt_linguist_file,
plural = nil, // Default pluralizer
options = {merge_sections = true},
@@ -154,65 +146,38 @@ TESTS := []Test_Suite{
{"apple_count", "%d apple(s)", "%d apple(s)", 1},
{"apple_count", "%d apple(s)", "%d apple(s)", 42},
},
- },
+ })
+}
- // QT Linguist, merging sections. Expecting .Duplicate_Key error because same key exists in more than 1 section.
- {
- file = "assets/I18N/duplicate-key.ts",
+@(test)
+test_qt_linguist_duplicate_key_err :: proc(t: ^testing.T) {
+ test(t, { // QT Linguist, merging sections. Expecting .Duplicate_Key error because same key exists in more than 1 section.
+ file = TEST_SUITE_PATH + "duplicate-key.ts",
loader = i18n.parse_qt_linguist_file,
plural = nil, // Default pluralizer
options = {merge_sections = true},
err = .Duplicate_Key,
- },
+ })
+}
- // QT Linguist, not merging sections. Shouldn't return error despite same key existing in more than 1 section.
- {
- file = "assets/I18N/duplicate-key.ts",
+@(test)
+test_qt_linguist_duplicate_key :: proc(t: ^testing.T) {
+ test(t, { // QT Linguist, not merging sections. Shouldn't return error despite same key existing in more than 1 section.
+ file = TEST_SUITE_PATH + "duplicate-key.ts",
loader = i18n.parse_qt_linguist_file,
plural = nil, // Default pluralizer
- },
+ })
}
-@test
-tests :: proc(t: ^testing.T) {
- cat: ^i18n.Translation
- err: i18n.Error
-
- for suite in TESTS {
- cat, err = suite.loader(suite.file, suite.options, suite.plural, context.allocator)
-
- msg := fmt.tprintf("Expected loading %v to return %v, got %v", suite.file, suite.err, err)
- expect(t, err == suite.err, msg)
-
- if err == .None {
- for test in suite.tests {
- val := T(test.section, test.key, test.n, cat)
-
- msg = fmt.tprintf("Expected key `%v` from section `%v`'s form for value `%v` to equal `%v`, got `%v`", test.key, test.section, test.n, test.val, val)
- expect(t, val == test.val, msg)
- }
- }
- i18n.destroy(cat)
- }
-}
-
-main :: proc() {
- track: mem.Tracking_Allocator
- mem.tracking_allocator_init(&track, context.allocator)
- context.allocator = mem.tracking_allocator(&track)
-
- t := testing.T{}
- tests(&t)
-
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
+test :: proc(t: ^testing.T, suite: Test_Suite, loc := #caller_location) {
+ cat, err := suite.loader(suite.file, suite.options, suite.plural, context.allocator)
+ testing.expectf(t, err == suite.err, "Expected loading %v to return %v, got %v", suite.file, suite.err, err, loc=loc)
- if len(track.allocation_map) > 0 {
- fmt.println()
- for _, v in track.allocation_map {
- fmt.printf("%v Leaked %v bytes.\n", v.location, v.size)
+ if err == .None {
+ for test in suite.tests {
+ val := T(test.section, test.key, test.n, cat)
+ testing.expectf(t, val == test.val, "Expected key `%v` from section `%v`'s form for value `%v` to equal `%v`, got `%v`", test.key, test.section, test.n, test.val, val, loc=loc)
}
}
+ i18n.destroy(cat)
} \ No newline at end of file
diff --git a/tests/core/text/match/test_core_text_match.odin b/tests/core/text/match/test_core_text_match.odin
index b72190f78..5716b06fb 100644
--- a/tests/core/text/match/test_core_text_match.odin
+++ b/tests/core/text/match/test_core_text_match.odin
@@ -2,31 +2,6 @@ package test_strlib
import "core:text/match"
import "core:testing"
-import "core:fmt"
-import "core:os"
-import "core:io"
-
-TEST_count: int
-TEST_fail: int
-
-// inline expect with custom props
-failed :: proc(t: ^testing.T, ok: bool, loc := #caller_location) -> bool {
- TEST_count += 1
-
- if !ok {
- fmt.wprintf(t.w, "%v: ", loc)
- t.error_count += 1
- TEST_fail += 1
- }
-
- return !ok
-}
-
-expect :: testing.expect
-
-logf :: proc(t: ^testing.T, format: string, args: ..any) {
- fmt.wprintf(t.w, format, ..args)
-}
// find correct byte offsets
@test
@@ -61,18 +36,17 @@ test_find :: proc(t: ^testing.T) {
{ "helelo", "h.-l", 0, { 0, 3, true } },
}
- for entry, i in ENTRIES {
+ for entry in ENTRIES {
matcher := match.matcher_init(entry.s, entry.p, entry.offset)
start, end, ok := match.matcher_find(&matcher)
success := entry.match.ok == ok && start == entry.match.start && end == entry.match.end
- if failed(t, success) {
- logf(t, "Find %d failed!\n", i)
- logf(t, "\tHAYSTACK %s\tPATTERN %s\n", entry.s, entry.p)
- logf(t, "\tSTART: %d == %d?\n", entry.match.start, start)
- logf(t, "\tEND: %d == %d?\n", entry.match.end, end)
- logf(t, "\tErr: %v\tLength %d\n", matcher.err, matcher.captures_length)
- }
+ testing.expectf(
+ t,
+ success,
+ "HAYSTACK %q PATTERN %q, START: %d == %d? END: %d == %d? Err: %v Length %d",
+ entry.s, entry.p, entry.match.start, start, entry.match.end, end, matcher.err, matcher.captures_length,
+ )
}
}
@@ -178,17 +152,17 @@ test_match :: proc(t: ^testing.T) {
{ "testing _this_ out", "%b_", "", false },
}
- for entry, i in ENTRIES {
+ for entry in ENTRIES {
matcher := match.matcher_init(entry.s, entry.p)
result, ok := match.matcher_match(&matcher)
success := entry.ok == ok && result == entry.result
- if failed(t, success) {
- logf(t, "Match %d failed!\n", i)
- logf(t, "\tHAYSTACK %s\tPATTERN %s\n", entry.s, entry.p)
- logf(t, "\tResults: WANTED %s\tGOT %s\n", entry.result, result)
- logf(t, "\tErr: %v\tLength %d\n", matcher.err, matcher.captures_length)
- }
+ testing.expectf(
+ t,
+ success,
+ "HAYSTACK %q PATTERN %q WANTED %q GOT %q Err: %v Length %d",
+ entry.s, entry.p, entry.result, result, matcher.err, matcher.captures_length,
+ )
}
}
@@ -203,19 +177,23 @@ test_captures :: proc(t: ^testing.T) {
compare_captures :: proc(t: ^testing.T, test: ^Temp, haystack: string, comp: []string, loc := #caller_location) {
length, err := match.find_aux(haystack, test.pattern, 0, false, &test.captures)
result := len(comp) == length && err == .OK
- if failed(t, result == true) {
- logf(t, "Captures Compare Failed!\n")
- logf(t, "\tErr: %v\n", err)
- logf(t, "\tLengths: %v != %v\n", len(comp), length)
- }
+ testing.expectf(
+ t,
+ result,
+ "Captures Compare Failed! Lengths: %v != %v Err: %v",
+ len(comp), length, err,
+ )
for i in 0..<length {
cap := test.captures[i]
text := haystack[cap.byte_start:cap.byte_end]
- if failed(t, comp[i] == text) {
- logf(t, "Capture don't equal -> %s != %s\n", comp[i], text)
- }
+ testing.expectf(
+ t,
+ comp[i] == text,
+ "Capture don't equal -> %q != %q\n",
+ comp[i], text,
+ )
}
}
@@ -224,11 +202,12 @@ test_captures :: proc(t: ^testing.T) {
length, err := match.find_aux(haystack, test.pattern, 0, false, &test.captures)
result := length > 0 && err == .OK
- if failed(t, result == ok) {
- logf(t, "Capture match failed!\n")
- logf(t, "\tErr: %v\n", err)
- logf(t, "\tLength: %v\n", length)
- }
+ testing.expectf(
+ t,
+ result == ok,
+ "Capture match failed! Length: %v Pattern: %q Haystack: %q Err: %v",
+ length, test.pattern, haystack, err,
+ )
}
temp := Temp { pattern = "(one).+" }
@@ -253,15 +232,8 @@ test_captures :: proc(t: ^testing.T) {
cap2 := captures[2]
text1 := haystack[cap1.byte_start:cap1.byte_end]
text2 := haystack[cap2.byte_start:cap2.byte_end]
- expect(t, text1 == "233", "Multi-Capture failed at 1")
- expect(t, text2 == "hello", "Multi-Capture failed at 2")
- }
-}
-
-gmatch_check :: proc(t: ^testing.T, index: int, a: []string, b: string) {
- if failed(t, a[index] == b) {
- logf(t, "GMATCH %d failed!\n", index)
- logf(t, "\t%s != %s\n", a[index], b)
+ testing.expect(t, text1 == "233", "Multi-Capture failed at 1")
+ testing.expect(t, text2 == "hello", "Multi-Capture failed at 2")
}
}
@@ -298,9 +270,9 @@ test_gmatch :: proc(t: ^testing.T) {
@test
test_gsub :: proc(t: ^testing.T) {
result := match.gsub("testing123testing", "%d+", " sup ", context.temp_allocator)
- expect(t, result == "testing sup testing", "GSUB 0: failed")
+ testing.expect(t, result == "testing sup testing", "GSUB 0: failed")
result = match.gsub("testing123testing", "%a+", "345", context.temp_allocator)
- expect(t, result == "345123345", "GSUB 1: failed")
+ testing.expect(t, result == "345123345", "GSUB 1: failed")
}
@test
@@ -313,10 +285,12 @@ test_gfind :: proc(t: ^testing.T) {
index: int
for word in match.gfind(s, pattern, &captures) {
- if failed(t, output[index] == word) {
- logf(t, "GFIND %d failed!\n", index)
- logf(t, "\t%s != %s\n", output[index], word)
- }
+ testing.expectf(
+ t,
+ output[index] == word,
+ "GFIND %d failed! %q != %q",
+ index, output[index], word,
+ )
index += 1
}
}
@@ -332,11 +306,12 @@ test_frontier :: proc(t: ^testing.T) {
call :: proc(data: rawptr, word: string, haystack: string, captures: []match.Match) {
temp := cast(^Temp) data
- if failed(temp.t, word == temp.output[temp.index]) {
- logf(temp.t, "GSUB_WITH %d failed!\n", temp.index)
- logf(temp.t, "\t%s != %s\n", temp.output[temp.index], word)
- }
-
+ testing.expectf(
+ temp.t,
+ word == temp.output[temp.index],
+ "GSUB_WITH %d failed! %q != %q",
+ temp.index, temp.output[temp.index], word,
+ )
temp.index += 1
}
@@ -369,31 +344,21 @@ test_case_insensitive :: proc(t: ^testing.T) {
pattern := match.pattern_case_insensitive("test", 256, context.temp_allocator)
goal := "[tT][eE][sS][tT]"
- if failed(t, pattern == goal) {
- logf(t, "Case Insensitive Pattern doesn't match result\n")
- logf(t, "\t%s != %s\n", pattern, goal)
- }
+ testing.expectf(
+ t,
+ pattern == goal,
+ "Case Insensitive Pattern doesn't match result. %q != %q",
+ pattern, goal,
+ )
}
}
-main :: proc() {
- t: testing.T
- stream := os.stream_from_handle(os.stdout)
- w := io.to_writer(stream)
- t.w = w
-
- test_find(&t)
- test_match(&t)
- test_captures(&t)
- test_gmatch(&t)
- test_gsub(&t)
- test_gfind(&t)
- test_frontier(&t)
- test_utf8(&t)
- test_case_insensitive(&t)
-
- fmt.wprintf(w, "%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
+@(private)
+gmatch_check :: proc(t: ^testing.T, index: int, a: []string, b: string) {
+ testing.expectf(
+ t,
+ a[index] == b,
+ "GMATCH %d failed! %q != %q",
+ index, a[index], b,
+ )
} \ No newline at end of file
diff --git a/tests/core/thread/test_core_thread.odin b/tests/core/thread/test_core_thread.odin
index c0c7396a7..0b77ad511 100644
--- a/tests/core/thread/test_core_thread.odin
+++ b/tests/core/thread/test_core_thread.odin
@@ -2,39 +2,7 @@ package test_core_thread
import "core:testing"
import "core:thread"
-import "core:fmt"
-import "core:os"
-
-TEST_count := 0
-TEST_fail := 0
-
-t := &testing.T{}
-
-when ODIN_TEST {
- expect :: testing.expect
- log :: testing.log
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v] %v\n", loc, message)
- return
- }
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-}
-
-main :: proc() {
- poly_data_test(t)
-
- if TEST_fail > 0 {
- os.exit(1)
- }
-}
+import "base:intrinsics"
@(test)
poly_data_test :: proc(_t: ^testing.T) {
@@ -46,7 +14,7 @@ poly_data_test :: proc(_t: ^testing.T) {
b: [MAX]byte = 8
t1 := thread.create_and_start_with_poly_data(b, proc(b: [MAX]byte) {
b_expect: [MAX]byte = 8
- expect(poly_data_test_t, b == b_expect, "thread poly data not correct")
+ testing.expect(poly_data_test_t, b == b_expect, "thread poly data not correct")
})
defer free(t1)
@@ -55,8 +23,8 @@ poly_data_test :: proc(_t: ^testing.T) {
t2 := thread.create_and_start_with_poly_data2(b1, b2, proc(b: [3]uintptr, b2: [MAX / 2]byte) {
b_expect: [3]uintptr = 1
b2_expect: [MAX / 2]byte = 3
- expect(poly_data_test_t, b == b_expect, "thread poly data not correct")
- expect(poly_data_test_t, b2 == b2_expect, "thread poly data not correct")
+ testing.expect(poly_data_test_t, b == b_expect, "thread poly data not correct")
+ testing.expect(poly_data_test_t, b2 == b2_expect, "thread poly data not correct")
})
defer free(t2)
@@ -64,21 +32,21 @@ poly_data_test :: proc(_t: ^testing.T) {
b_expect: [3]uintptr = 1
b2_expect: [MAX / 2]byte = 3
- expect(poly_data_test_t, b == b_expect, "thread poly data not correct")
- expect(poly_data_test_t, b2 == b2_expect, "thread poly data not correct")
- expect(poly_data_test_t, b3 == 333, "thread poly data not correct")
+ testing.expect(poly_data_test_t, b == b_expect, "thread poly data not correct")
+ testing.expect(poly_data_test_t, b2 == b2_expect, "thread poly data not correct")
+ testing.expect(poly_data_test_t, b3 == 333, "thread poly data not correct")
})
defer free(t3)
t4 := thread.create_and_start_with_poly_data4(uintptr(111), b1, uintptr(333), u8(5), proc(n: uintptr, b: [3]uintptr, n2: uintptr, n4: u8) {
b_expect: [3]uintptr = 1
- expect(poly_data_test_t, n == 111, "thread poly data not correct")
- expect(poly_data_test_t, b == b_expect, "thread poly data not correct")
- expect(poly_data_test_t, n2 == 333, "thread poly data not correct")
- expect(poly_data_test_t, n4 == 5, "thread poly data not correct")
+ testing.expect(poly_data_test_t, n == 111, "thread poly data not correct")
+ testing.expect(poly_data_test_t, b == b_expect, "thread poly data not correct")
+ testing.expect(poly_data_test_t, n2 == 333, "thread poly data not correct")
+ testing.expect(poly_data_test_t, n4 == 5, "thread poly data not correct")
})
defer free(t4)
thread.join_multiple(t1, t2, t3, t4)
-}
+} \ No newline at end of file
diff --git a/tests/core/time/test_core_time.odin b/tests/core/time/test_core_time.odin
index c6c6869a7..aeae44ca1 100644
--- a/tests/core/time/test_core_time.odin
+++ b/tests/core/time/test_core_time.odin
@@ -1,68 +1,17 @@
package test_core_time
-import "core:fmt"
-import "core:mem"
-import "core:os"
import "core:testing"
import "core:time"
import dt "core:time/datetime"
is_leap_year :: time.is_leap_year
-TEST_count := 0
-TEST_fail := 0
-
-when ODIN_TEST {
- expect :: testing.expect
- expect_value :: testing.expect_value
- log :: testing.log
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v] %v\n", loc, message)
- return
- }
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-}
-
-main :: proc() {
- t := testing.T{}
-
- track: mem.Tracking_Allocator
- mem.tracking_allocator_init(&track, context.allocator)
- defer mem.tracking_allocator_destroy(&track)
- context.allocator = mem.tracking_allocator(&track)
-
- test_ordinal_date_roundtrip(&t)
- test_component_to_time_roundtrip(&t)
- test_parse_rfc3339_string(&t)
- test_parse_iso8601_string(&t)
-
- for _, leak in track.allocation_map {
- expect(&t, false, fmt.tprintf("%v leaked %m\n", leak.location, leak.size))
- }
- for bad_free in track.bad_free_array {
- expect(&t, false, fmt.tprintf("%v allocation %p was freed badly\n", bad_free.location, bad_free.memory))
- }
-
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
-}
-
@test
test_ordinal_date_roundtrip :: proc(t: ^testing.T) {
- expect(t, dt.unsafe_ordinal_to_date(dt.unsafe_date_to_ordinal(dt.MIN_DATE)) == dt.MIN_DATE, "Roundtripping MIN_DATE failed.")
- expect(t, dt.unsafe_date_to_ordinal(dt.unsafe_ordinal_to_date(dt.MIN_ORD)) == dt.MIN_ORD, "Roundtripping MIN_ORD failed.")
- expect(t, dt.unsafe_ordinal_to_date(dt.unsafe_date_to_ordinal(dt.MAX_DATE)) == dt.MAX_DATE, "Roundtripping MAX_DATE failed.")
- expect(t, dt.unsafe_date_to_ordinal(dt.unsafe_ordinal_to_date(dt.MAX_ORD)) == dt.MAX_ORD, "Roundtripping MAX_ORD failed.")
+ testing.expect(t, dt.unsafe_ordinal_to_date(dt.unsafe_date_to_ordinal(dt.MIN_DATE)) == dt.MIN_DATE, "Roundtripping MIN_DATE failed.")
+ testing.expect(t, dt.unsafe_date_to_ordinal(dt.unsafe_ordinal_to_date(dt.MIN_ORD)) == dt.MIN_ORD, "Roundtripping MIN_ORD failed.")
+ testing.expect(t, dt.unsafe_ordinal_to_date(dt.unsafe_date_to_ordinal(dt.MAX_DATE)) == dt.MAX_DATE, "Roundtripping MAX_DATE failed.")
+ testing.expect(t, dt.unsafe_date_to_ordinal(dt.unsafe_ordinal_to_date(dt.MAX_ORD)) == dt.MAX_ORD, "Roundtripping MAX_ORD failed.")
}
/*
@@ -160,22 +109,51 @@ test_parse_rfc3339_string :: proc(t: ^testing.T) {
is_leap := false
if test.apply_offset {
res, consumed := time.rfc3339_to_time_utc(test.rfc_3339, &is_leap)
- msg := fmt.tprintf("[apply offet] Parsing failed: %v -> %v (nsec: %v). Expected %v consumed, got %v", test.rfc_3339, res, res._nsec, test.consumed, consumed)
- expect(t, test.consumed == consumed, msg)
+ testing.expectf(
+ t,
+ test.consumed == consumed,
+ "[apply offet] Parsing failed: %v -> %v (nsec: %v). Expected %v consumed, got %v",
+ test.rfc_3339, res, res._nsec, test.consumed, consumed,
+ )
if test.consumed == consumed {
- expect(t, test.datetime == res, fmt.tprintf("Time didn't match. Expected %v (%v), got %v (%v)", test.datetime, test.datetime._nsec, res, res._nsec))
- expect(t, test.is_leap == is_leap, "Expected a leap second, got none.")
+ testing.expectf(
+ t,
+ test.datetime == res,
+ "Time didn't match. Expected %v (%v), got %v (%v)",
+ test.datetime, test.datetime._nsec, res, res._nsec,
+ )
+ testing.expect(
+ t,
+ test.is_leap == is_leap,
+ "Expected a leap second, got none",
+ )
}
} else {
res, offset, consumed := time.rfc3339_to_time_and_offset(test.rfc_3339)
- msg := fmt.tprintf("Parsing failed: %v -> %v (nsec: %v), offset: %v. Expected %v consumed, got %v", test.rfc_3339, res, res._nsec, offset, test.consumed, consumed)
- expect(t, test.consumed == consumed, msg)
+ testing.expectf(
+ t,
+ test.consumed == consumed,
+ "Parsing failed: %v -> %v (nsec: %v), offset: %v. Expected %v consumed, got %v",
+ test.rfc_3339, res, res._nsec, offset, test.consumed, consumed,
+ )
if test.consumed == consumed {
- expect(t, test.datetime == res, fmt.tprintf("Time didn't match. Expected %v (%v), got %v (%v)", test.datetime, test.datetime._nsec, res, res._nsec))
- expect(t, test.utc_offset == offset, fmt.tprintf("UTC offset didn't match. Expected %v, got %v", test.utc_offset, offset))
- expect(t, test.is_leap == is_leap, "Expected a leap second, got none.")
+ testing.expectf(
+ t, test.datetime == res,
+ "Time didn't match. Expected %v (%v), got %v (%v)",
+ test.datetime, test.datetime._nsec, res, res._nsec,
+ )
+ testing.expectf(
+ t,
+ test.utc_offset == offset,
+ "UTC offset didn't match. Expected %v, got %v",
+ test.utc_offset, offset,
+ )
+ testing.expect(
+ t, test.is_leap == is_leap,
+ "Expected a leap second, got none",
+ )
}
}
}
@@ -187,22 +165,52 @@ test_parse_iso8601_string :: proc(t: ^testing.T) {
is_leap := false
if test.apply_offset {
res, consumed := time.iso8601_to_time_utc(test.iso_8601, &is_leap)
- msg := fmt.tprintf("[apply offet] Parsing failed: %v -> %v (nsec: %v). Expected %v consumed, got %v", test.iso_8601, res, res._nsec, test.consumed, consumed)
- expect(t, test.consumed == consumed, msg)
+ testing.expectf(
+ t,
+ test.consumed == consumed,
+ "[apply offet] Parsing failed: %v -> %v (nsec: %v). Expected %v consumed, got %v",
+ test.iso_8601, res, res._nsec, test.consumed, consumed,
+ )
if test.consumed == consumed {
- expect(t, test.datetime == res, fmt.tprintf("Time didn't match. Expected %v (%v), got %v (%v)", test.datetime, test.datetime._nsec, res, res._nsec))
- expect(t, test.is_leap == is_leap, "Expected a leap second, got none.")
+ testing.expectf(
+ t,
+ test.datetime == res,
+ "Time didn't match. Expected %v (%v), got %v (%v)",
+ test.datetime, test.datetime._nsec, res, res._nsec,
+ )
+ testing.expect(
+ t,
+ test.is_leap == is_leap,
+ "Expected a leap second, got none",
+ )
}
} else {
res, offset, consumed := time.iso8601_to_time_and_offset(test.iso_8601)
- msg := fmt.tprintf("Parsing failed: %v -> %v (nsec: %v), offset: %v. Expected %v consumed, got %v", test.iso_8601, res, res._nsec, offset, test.consumed, consumed)
- expect(t, test.consumed == consumed, msg)
+ testing.expectf(
+ t,
+ test.consumed == consumed,
+ "Parsing failed: %v -> %v (nsec: %v), offset: %v. Expected %v consumed, got %v",
+ test.iso_8601, res, res._nsec, offset, test.consumed, consumed,
+ )
if test.consumed == consumed {
- expect(t, test.datetime == res, fmt.tprintf("Time didn't match. Expected %v (%v), got %v (%v)", test.datetime, test.datetime._nsec, res, res._nsec))
- expect(t, test.utc_offset == offset, fmt.tprintf("UTC offset didn't match. Expected %v, got %v", test.utc_offset, offset))
- expect(t, test.is_leap == is_leap, "Expected a leap second, got none.")
+ testing.expectf(
+ t, test.datetime == res,
+ "Time didn't match. Expected %v (%v), got %v (%v)",
+ test.datetime, test.datetime._nsec, res, res._nsec,
+ )
+ testing.expectf(
+ t,
+ test.utc_offset == offset,
+ "UTC offset didn't match. Expected %v, got %v",
+ test.utc_offset, offset,
+ )
+ testing.expect(
+ t,
+ test.is_leap == is_leap,
+ "Expected a leap second, got none",
+ )
}
}
}
@@ -231,15 +239,21 @@ test_component_to_time_roundtrip :: proc(t: ^testing.T) {
date_component_roundtrip_test :: proc(t: ^testing.T, moment: dt.DateTime) {
res, ok := time.datetime_to_time(moment.year, moment.month, moment.day, moment.hour, moment.minute, moment.second)
- expect(t, ok, "Couldn't convert date components into date")
+ testing.expect(
+ t,
+ ok,
+ "Couldn't convert date components into date",
+ )
YYYY, MM, DD := time.date(res)
hh, mm, ss := time.clock(res)
- expected := fmt.tprintf("Expected %4d-%2d-%2d %2d:%2d:%2d, got %4d-%2d-%2d %2d:%2d:%2d",
- moment.year, moment.month, moment.day, moment.hour, moment.minute, moment.second, YYYY, MM, DD, hh, mm, ss)
-
ok = moment.year == i64(YYYY) && moment.month == i8(MM) && moment.day == i8(DD)
ok &= moment.hour == i8(hh) && moment.minute == i8(mm) && moment.second == i8(ss)
- expect(t, ok, expected)
+ testing.expectf(
+ t,
+ ok,
+ "Expected %4d-%2d-%2d %2d:%2d:%2d, got %4d-%2d-%2d %2d:%2d:%2d",
+ moment.year, moment.month, moment.day, moment.hour, moment.minute, moment.second, YYYY, MM, DD, hh, mm, ss,
+ )
} \ No newline at end of file
diff --git a/tests/internal/Makefile b/tests/internal/Makefile
deleted file mode 100644
index 09182cd23..000000000
--- a/tests/internal/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-ODIN=../../odin
-
-all: all_bsd asan_test
-
-all_bsd: rtti_test map_test pow_test 128_test string_compare_test
-
-rtti_test:
- $(ODIN) run test_rtti.odin -file -vet -strict-style -o:minimal
-
-map_test:
- $(ODIN) run test_map.odin -file -vet -strict-style -o:minimal
-
-pow_test:
- $(ODIN) run test_pow.odin -file -vet -strict-style -o:minimal
-
-128_test:
- $(ODIN) run test_128.odin -file -vet -strict-style -o:minimal
-
-asan_test:
- $(ODIN) run test_asan.odin -file -sanitize:address -debug
-
-string_compare_test:
- $(ODIN) run test_string_compare.odin -file -vet -strict-style -o:minimal
diff --git a/tests/internal/build.bat b/tests/internal/build.bat
deleted file mode 100644
index 29b3e36c3..000000000
--- a/tests/internal/build.bat
+++ /dev/null
@@ -1,10 +0,0 @@
-@echo off
-set PATH_TO_ODIN==..\..\odin
-rem %PATH_TO_ODIN% run test_rtti.odin -file -vet -strict-style -o:minimal || exit /b
-%PATH_TO_ODIN% run test_map.odin -file -vet -strict-style -o:minimal || exit /b
-rem -define:SEED=42
-%PATH_TO_ODIN% run test_pow.odin -file -vet -strict-style -o:minimal || exit /b
-
-%PATH_TO_ODIN% run test_128.odin -file -vet -strict-style -o:minimal || exit /b
-
-%PATH_TO_ODIN% run test_string_compare.odin -file -vet -strict-style -o:minimal || exit /b \ No newline at end of file
diff --git a/tests/internal/test_128.odin b/tests/internal/test_128.odin
index 11ef068ed..7b7d655e8 100644
--- a/tests/internal/test_128.odin
+++ b/tests/internal/test_128.odin
@@ -1,41 +1,7 @@
-package test_128
+package test_internal
-import "core:fmt"
-import "core:os"
import "core:testing"
-TEST_count := 0
-TEST_fail := 0
-
-when ODIN_TEST {
- expect :: testing.expect
- log :: testing.log
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v] %v\n", loc, message)
- return
- }
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-}
-
-main :: proc() {
- t := testing.T{}
-
- test_128_align(&t)
-
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
-}
-
@test
test_128_align :: proc(t: ^testing.T) {
Danger_Struct :: struct {
@@ -45,15 +11,15 @@ test_128_align :: proc(t: ^testing.T) {
list := [?]Danger_Struct{{0, 0}, {1, 0}, {2, 0}, {3, 0}}
- expect(t, list[0].x == 0, fmt.tprintf("[0].x (%v) != 0", list[0].x))
- expect(t, list[0].y == 0, fmt.tprintf("[0].y (%v) != 0", list[0].y))
+ testing.expectf(t, list[0].x == 0, "[0].x (%v) != 0", list[0].x)
+ testing.expectf(t, list[0].y == 0, "[0].y (%v) != 0", list[0].y)
- expect(t, list[1].x == 1, fmt.tprintf("[1].x (%v) != 1", list[1].x))
- expect(t, list[1].y == 0, fmt.tprintf("[1].y (%v) != 0", list[1].y))
+ testing.expectf(t, list[1].x == 1, "[1].x (%v) != 1", list[1].x)
+ testing.expectf(t, list[1].y == 0, "[1].y (%v) != 0", list[1].y)
- expect(t, list[2].x == 2, fmt.tprintf("[2].x (%v) != 2", list[2].x))
- expect(t, list[2].y == 0, fmt.tprintf("[2].y (%v) != 0", list[2].y))
+ testing.expectf(t, list[2].x == 2, "[2].x (%v) != 2", list[2].x)
+ testing.expectf(t, list[2].y == 0, "[2].y (%v) != 0", list[2].y)
- expect(t, list[3].x == 3, fmt.tprintf("[3].x (%v) != 3", list[3].x))
- expect(t, list[3].y == 0, fmt.tprintf("[3].y (%v) != 0", list[3].y))
+ testing.expectf(t, list[3].x == 3, "[3].x (%v) != 3", list[3].x)
+ testing.expectf(t, list[3].y == 0, "[3].y (%v) != 0", list[3].y)
}
diff --git a/tests/internal/test_asan.odin b/tests/internal/test_asan.odin
index 2384ada76..1ac599acf 100644
--- a/tests/internal/test_asan.odin
+++ b/tests/internal/test_asan.odin
@@ -1,42 +1,7 @@
// Intended to contain code that would trigger asan easily if the abi was set up badly.
-package test_asan
+package test_internal
-import "core:fmt"
import "core:testing"
-import "core:os"
-
-TEST_count := 0
-TEST_fail := 0
-
-when ODIN_TEST {
- expect :: testing.expect
- log :: testing.log
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v] %v\n", loc, message)
- return
- }
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-}
-
-main :: proc() {
- t := testing.T{}
-
- test_12_bytes(&t)
- test_12_bytes_two(&t)
-
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
-}
@(test)
test_12_bytes :: proc(t: ^testing.T) {
@@ -45,9 +10,9 @@ test_12_bytes :: proc(t: ^testing.T) {
}
a, b, ok := internal()
- expect(t, a == max(f32), fmt.tprintf("a (%v) != max(f32)", a))
- expect(t, b == 0, fmt.tprintf("b (%v) != 0", b))
- expect(t, ok, fmt.tprintf("ok (%v) != true", ok))
+ testing.expectf(t, a == max(f32), "a (%v) != max(f32)", a)
+ testing.expectf(t, b == 0, "b (%v) != 0", b)
+ testing.expectf(t, ok, "ok (%v) != true", ok)
}
@(test)
@@ -57,6 +22,6 @@ test_12_bytes_two :: proc(t: ^testing.T) {
}
a, b := internal()
- expect(t, a == 100., fmt.tprintf("a (%v) != 100.", a))
- expect(t, b == max(int), fmt.tprintf("b (%v) != max(int)", b))
+ testing.expectf(t, a == 100., "a (%v) != 100.", a)
+ testing.expectf(t, b == max(int), "b (%v) != max(int)", b)
}
diff --git a/tests/internal/test_map.odin b/tests/internal/test_map.odin
index 7d1dbf470..a9a8cf5d4 100644
--- a/tests/internal/test_map.odin
+++ b/tests/internal/test_map.odin
@@ -1,26 +1,22 @@
-package test_internal_map
+package test_internal
-import "core:fmt"
+import "core:log"
import "base:intrinsics"
import "core:math/rand"
-import "core:mem"
-import "core:os"
import "core:testing"
-seed: u64
-
ENTRY_COUNTS := []int{11, 101, 1_001, 10_001, 100_001, 1_000_001}
@test
map_insert_random_key_value :: proc(t: ^testing.T) {
seed_incr := u64(0)
for entries in ENTRY_COUNTS {
- fmt.printf("[map_insert_random_key_value] Testing %v entries.\n", entries)
+ log.infof("Testing %v entries", entries)
m: map[i64]i64
defer delete(m)
unique_keys := 0
- r := rand.create(seed + seed_incr)
+ r := rand.create(t.seed + seed_incr)
for _ in 0..<entries {
k := rand.int63(&r)
v := rand.int63(&r)
@@ -36,11 +32,11 @@ map_insert_random_key_value :: proc(t: ^testing.T) {
key_count += 1
}
- expect(t, key_count == unique_keys, fmt.tprintf("Expected key_count to equal %v, got %v", unique_keys, key_count))
- expect(t, len(m) == unique_keys, fmt.tprintf("Expected len(map) to equal %v, got %v", unique_keys, len(m)))
+ testing.expectf(t, key_count == unique_keys, "Expected key_count to equal %v, got %v", unique_keys, key_count)
+ testing.expectf(t, len(m) == unique_keys, "Expected len(map) to equal %v, got %v", unique_keys, len(m))
// Reset randomizer and verify
- r = rand.create(seed + seed_incr)
+ r = rand.create(t.seed + seed_incr)
num_fails := 0
for _ in 0..<entries {
@@ -51,10 +47,10 @@ map_insert_random_key_value :: proc(t: ^testing.T) {
if !cond {
num_fails += 1
if num_fails > 5 {
- fmt.println("... and more")
+ log.info("... and more")
break
}
- expect(t, false, fmt.tprintf("Unexpected value. Expected m[%v] = %v, got %v", k, v, m[k]))
+ testing.expectf(t, false, "Unexpected value. Expected m[%v] = %v, got %v", k, v, m[k])
}
}
seed_incr += 1
@@ -65,12 +61,12 @@ map_insert_random_key_value :: proc(t: ^testing.T) {
map_update_random_key_value :: proc(t: ^testing.T) {
seed_incr := u64(0)
for entries in ENTRY_COUNTS {
- fmt.printf("[map_update_random_key_value] Testing %v entries.\n", entries)
+ log.infof("Testing %v entries", entries)
m: map[i64]i64
defer delete(m)
unique_keys := 0
- r := rand.create(seed + seed_incr)
+ r := rand.create(t.seed + seed_incr)
for _ in 0..<entries {
k := rand.int63(&r)
v := rand.int63(&r)
@@ -86,13 +82,13 @@ map_update_random_key_value :: proc(t: ^testing.T) {
key_count += 1
}
- expect(t, key_count == unique_keys, fmt.tprintf("Expected key_count to equal %v, got %v", unique_keys, key_count))
- expect(t, len(m) == unique_keys, fmt.tprintf("Expected len(map) to equal %v, got %v", unique_keys, len(m)))
+ testing.expectf(t, key_count == unique_keys, "Expected key_count to equal %v, got %v", unique_keys, key_count)
+ testing.expectf(t, len(m) == unique_keys, "Expected len(map) to equal %v, got %v", unique_keys, len(m))
half_entries := entries / 2
// Reset randomizer and update half the entries
- r = rand.create(seed + seed_incr)
+ r = rand.create(t.seed + seed_incr)
for _ in 0..<half_entries {
k := rand.int63(&r)
v := rand.int63(&r)
@@ -101,7 +97,7 @@ map_update_random_key_value :: proc(t: ^testing.T) {
}
// Reset randomizer and verify
- r = rand.create(seed + seed_incr)
+ r = rand.create(t.seed + seed_incr)
num_fails := 0
for i in 0..<entries {
@@ -113,10 +109,10 @@ map_update_random_key_value :: proc(t: ^testing.T) {
if !cond {
num_fails += 1
if num_fails > 5 {
- fmt.println("... and more")
+ log.info("... and more")
break
}
- expect(t, false, fmt.tprintf("Unexpected value. Expected m[%v] = %v, got %v", k, v, m[k]))
+ testing.expectf(t, false, "Unexpected value. Expected m[%v] = %v, got %v", k, v, m[k])
}
}
seed_incr += 1
@@ -127,12 +123,12 @@ map_update_random_key_value :: proc(t: ^testing.T) {
map_delete_random_key_value :: proc(t: ^testing.T) {
seed_incr := u64(0)
for entries in ENTRY_COUNTS {
- fmt.printf("[map_delete_random_key_value] Testing %v entries.\n", entries)
+ log.infof("Testing %v entries", entries)
m: map[i64]i64
defer delete(m)
unique_keys := 0
- r := rand.create(seed + seed_incr)
+ r := rand.create(t.seed + seed_incr)
for _ in 0..<entries {
k := rand.int63(&r)
v := rand.int63(&r)
@@ -148,13 +144,13 @@ map_delete_random_key_value :: proc(t: ^testing.T) {
key_count += 1
}
- expect(t, key_count == unique_keys, fmt.tprintf("Expected key_count to equal %v, got %v", unique_keys, key_count))
- expect(t, len(m) == unique_keys, fmt.tprintf("Expected len(map) to equal %v, got %v", unique_keys, len(m)))
+ testing.expectf(t, key_count == unique_keys, "Expected key_count to equal %v, got %v", unique_keys, key_count)
+ testing.expectf(t, len(m) == unique_keys, "Expected len(map) to equal %v, got %v", unique_keys, len(m))
half_entries := entries / 2
// Reset randomizer and delete half the entries
- r = rand.create(seed + seed_incr)
+ r = rand.create(t.seed + seed_incr)
for _ in 0..<half_entries {
k := rand.int63(&r)
_ = rand.int63(&r)
@@ -163,7 +159,7 @@ map_delete_random_key_value :: proc(t: ^testing.T) {
}
// Reset randomizer and verify
- r = rand.create(seed + seed_incr)
+ r = rand.create(t.seed + seed_incr)
num_fails := 0
for i in 0..<entries {
@@ -174,26 +170,26 @@ map_delete_random_key_value :: proc(t: ^testing.T) {
if k in m {
num_fails += 1
if num_fails > 5 {
- fmt.println("... and more")
+ log.info("... and more")
break
}
- expect(t, false, fmt.tprintf("Unexpected key present. Expected m[%v] to have been deleted, got %v", k, m[k]))
+ testing.expectf(t, false, "Unexpected key present. Expected m[%v] to have been deleted, got %v", k, m[k])
}
} else {
if k not_in m {
num_fails += 1
if num_fails > 5 {
- fmt.println("... and more")
+ log.info("... and more")
break
}
- expect(t, false, fmt.tprintf("Expected key not present. Expected m[%v] = %v", k, v))
+ testing.expectf(t, false, "Expected key not present. Expected m[%v] = %v", k, v)
} else if m[k] != v {
num_fails += 1
if num_fails > 5 {
- fmt.println("... and more")
+ log.info("... and more")
break
}
- expect(t, false, fmt.tprintf("Unexpected value. Expected m[%v] = %v, got %v", k, v, m[k]))
+ testing.expectf(t, false, "Unexpected value. Expected m[%v] = %v, got %v", k, v, m[k])
}
}
}
@@ -205,12 +201,12 @@ map_delete_random_key_value :: proc(t: ^testing.T) {
set_insert_random_key_value :: proc(t: ^testing.T) {
seed_incr := u64(0)
for entries in ENTRY_COUNTS {
- fmt.printf("[set_insert_random_key_value] Testing %v entries.\n", entries)
+ log.infof("Testing %v entries", entries)
m: map[i64]struct{}
defer delete(m)
unique_keys := 0
- r := rand.create(seed + seed_incr)
+ r := rand.create(t.seed + seed_incr)
for _ in 0..<entries {
k := rand.int63(&r)
if k not_in m {
@@ -224,11 +220,11 @@ set_insert_random_key_value :: proc(t: ^testing.T) {
key_count += 1
}
- expect(t, key_count == unique_keys, fmt.tprintf("Expected key_count to equal %v, got %v", unique_keys, key_count))
- expect(t, len(m) == unique_keys, fmt.tprintf("Expected len(map) to equal %v, got %v", unique_keys, len(m)))
+ testing.expectf(t, key_count == unique_keys, "Expected key_count to equal %v, got %v", unique_keys, key_count)
+ testing.expectf(t, len(m) == unique_keys, "Expected len(map) to equal %v, got %v", unique_keys, len(m))
// Reset randomizer and verify
- r = rand.create(seed + seed_incr)
+ r = rand.create(t.seed + seed_incr)
num_fails := 0
for _ in 0..<entries {
@@ -238,10 +234,10 @@ set_insert_random_key_value :: proc(t: ^testing.T) {
if !cond {
num_fails += 1
if num_fails > 5 {
- fmt.println("... and more")
+ log.info("... and more")
break
}
- expect(t, false, fmt.tprintf("Unexpected value. Expected m[%v] to exist", k))
+ testing.expectf(t, false, "Unexpected value. Expected m[%v] to exist", k)
}
}
seed_incr += 1
@@ -252,12 +248,12 @@ set_insert_random_key_value :: proc(t: ^testing.T) {
set_delete_random_key_value :: proc(t: ^testing.T) {
seed_incr := u64(0)
for entries in ENTRY_COUNTS {
- fmt.printf("[set_delete_random_key_value] Testing %v entries.\n", entries)
+ log.infof("Testing %v entries", entries)
m: map[i64]struct{}
defer delete(m)
unique_keys := 0
- r := rand.create(seed + seed_incr)
+ r := rand.create(t.seed + seed_incr)
for _ in 0..<entries {
k := rand.int63(&r)
@@ -272,20 +268,20 @@ set_delete_random_key_value :: proc(t: ^testing.T) {
key_count += 1
}
- expect(t, key_count == unique_keys, fmt.tprintf("Expected key_count to equal %v, got %v", unique_keys, key_count))
- expect(t, len(m) == unique_keys, fmt.tprintf("Expected len(map) to equal %v, got %v", unique_keys, len(m)))
+ testing.expectf(t, key_count == unique_keys, "Expected key_count to equal %v, got %v", unique_keys, key_count)
+ testing.expectf(t, len(m) == unique_keys, "Expected len(map) to equal %v, got %v", unique_keys, len(m))
half_entries := entries / 2
// Reset randomizer and delete half the entries
- r = rand.create(seed + seed_incr)
+ r = rand.create(t.seed + seed_incr)
for _ in 0..<half_entries {
k := rand.int63(&r)
delete_key(&m, k)
}
// Reset randomizer and verify
- r = rand.create(seed + seed_incr)
+ r = rand.create(t.seed + seed_incr)
num_fails := 0
for i in 0..<entries {
@@ -295,88 +291,22 @@ set_delete_random_key_value :: proc(t: ^testing.T) {
if k in m {
num_fails += 1
if num_fails > 5 {
- fmt.println("... and more")
+ log.info("... and more")
break
}
- expect(t, false, fmt.tprintf("Unexpected key present. Expected m[%v] to have been deleted", k))
+ testing.expectf(t, false, "Unexpected key present. Expected m[%v] to have been deleted", k)
}
} else {
if k not_in m {
num_fails += 1
if num_fails > 5 {
- fmt.println("... and more")
+ log.info("... and more")
break
}
- expect(t, false, fmt.tprintf("Expected key not present. Expected m[%v] to exist", k))
+ testing.expectf(t, false, "Expected key not present. Expected m[%v] to exist", k)
}
}
}
seed_incr += 1
}
}
-
-// -------- -------- -------- -------- -------- -------- -------- -------- -------- --------
-
-main :: proc() {
- t := testing.T{}
-
- // Allow tests to be repeatable
- SEED :: #config(SEED, -1)
- when SEED > 0 {
- seed = u64(SEED)
- } else {
- seed = u64(intrinsics.read_cycle_counter())
- }
- fmt.println("Initialized seed to", seed)
-
- mem_track_test(&t, map_insert_random_key_value)
- mem_track_test(&t, map_update_random_key_value)
- mem_track_test(&t, map_delete_random_key_value)
-
- mem_track_test(&t, set_insert_random_key_value)
- mem_track_test(&t, set_delete_random_key_value)
-
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
-}
-
-mem_track_test :: proc(t: ^testing.T, test: proc(t: ^testing.T)) {
- track: mem.Tracking_Allocator
- mem.tracking_allocator_init(&track, context.allocator)
- context.allocator = mem.tracking_allocator(&track)
-
- test(t)
-
- expect(t, len(track.allocation_map) == 0, "Expected no leaks.")
- expect(t, len(track.bad_free_array) == 0, "Expected no leaks.")
-
- for _, leak in track.allocation_map {
- fmt.printf("%v leaked %v bytes\n", leak.location, leak.size)
- }
- for bad_free in track.bad_free_array {
- fmt.printf("%v allocation %p was freed badly\n", bad_free.location, bad_free.memory)
- }
-}
-
-TEST_count := 0
-TEST_fail := 0
-
-when ODIN_TEST {
- expect :: testing.expect
- log :: testing.log
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v] %v\n", loc, message)
- return
- }
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-}
diff --git a/tests/internal/test_pow.odin b/tests/internal/test_pow.odin
index 70b81258d..d1939ace5 100644
--- a/tests/internal/test_pow.odin
+++ b/tests/internal/test_pow.odin
@@ -1,8 +1,7 @@
-package test_internal_math_pow
+package test_internal
-import "core:fmt"
+@(require) import "core:log"
import "core:math"
-import "core:os"
import "core:testing"
@test
@@ -19,14 +18,14 @@ pow_test :: proc(t: ^testing.T) {
// pow2_f64 returns the same float on all platforms because it isn't this stupid
_v1 = 0h00000000_00000000
}
- expect(t, _v1 == _v2, fmt.tprintf("Expected math.pow2_f64(%d) == math.pow(2, %d) (= %16x), got %16x", exp, exp, _v1, _v2))
+ testing.expectf(t, _v1 == _v2, "Expected math.pow2_f64(%d) == math.pow(2, %d) (= %16x), got %16x", exp, exp, _v1, _v2)
}
{
v1 := math.pow(2, f32(exp))
v2 := math.pow2_f32(exp)
_v1 := transmute(u32)v1
_v2 := transmute(u32)v2
- expect(t, _v1 == _v2, fmt.tprintf("Expected math.pow2_f32(%d) == math.pow(2, %d) (= %08x), got %08x", exp, exp, _v1, _v2))
+ testing.expectf(t, _v1 == _v2, "Expected math.pow2_f32(%d) == math.pow(2, %d) (= %08x), got %08x", exp, exp, _v1, _v2)
}
{
v1 := math.pow(2, f16(exp))
@@ -36,46 +35,11 @@ pow_test :: proc(t: ^testing.T) {
when ODIN_OS == .Darwin && ODIN_ARCH == .arm64 {
if exp == -25 {
- testing.logf(t, "skipping known test failure on darwin+arm64, Expected math.pow2_f16(-25) == math.pow(2, -25) (= 0000), got 0001")
+ log.info("skipping known test failure on darwin+arm64, Expected math.pow2_f16(-25) == math.pow(2, -25) (= 0000), got 0001")
_v2 = 0
}
}
-
- expect(t, _v1 == _v2, fmt.tprintf("Expected math.pow2_f16(%d) == math.pow(2, %d) (= %04x), got %04x", exp, exp, _v1, _v2))
+ testing.expectf(t, _v1 == _v2, "Expected math.pow2_f16(%d) == math.pow(2, %d) (= %04x), got %04x", exp, exp, _v1, _v2)
}
}
}
-
-// -------- -------- -------- -------- -------- -------- -------- -------- -------- --------
-
-main :: proc() {
- t := testing.T{}
-
- pow_test(&t)
-
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
-}
-
-TEST_count := 0
-TEST_fail := 0
-
-when ODIN_TEST {
- expect :: testing.expect
- log :: testing.log
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v] %v\n", loc, message)
- return
- }
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-}
diff --git a/tests/internal/test_rtti.odin b/tests/internal/test_rtti.odin
index 12f64462b..e72c107b2 100644
--- a/tests/internal/test_rtti.odin
+++ b/tests/internal/test_rtti.odin
@@ -1,11 +1,8 @@
-package test_internal_rtti
+package test_internal
import "core:fmt"
-import "core:mem"
-import "core:os"
import "core:testing"
-
Buggy_Struct :: struct {
a: int,
b: bool,
@@ -28,74 +25,22 @@ rtti_test :: proc(t: ^testing.T) {
for v, i in g_b {
checksum += (i+1) * int(v)
}
- expect(t, checksum == 0, fmt.tprintf("Expected g_b to be zero-initialized, got %v", g_b))
+ testing.expectf(t, checksum == 0, "Expected g_b to be zero-initialized, got %v", g_b)
}
{
checksum := 0
for v, i in l_b {
checksum += (i+1) * int(v)
}
- expect(t, checksum == 0, fmt.tprintf("Expected l_b to be zero-initialized, got %v", l_b))
+ testing.expectf(t, checksum == 0, "Expected l_b to be zero-initialized, got %v", l_b)
}
- expect(t, size_of(Buggy_Struct) == 40, fmt.tprintf("Expected size_of(Buggy_Struct) == 40, got %v", size_of(Buggy_Struct)))
- expect(t, size_of(g_buggy) == 40, fmt.tprintf("Expected size_of(g_buggy) == 40, got %v", size_of(g_buggy)))
- expect(t, size_of(l_buggy) == 40, fmt.tprintf("Expected size_of(l_buggy) == 40, got %v", size_of(l_buggy)))
+ testing.expectf(t, size_of(Buggy_Struct) == 40, "Expected size_of(Buggy_Struct) == 40, got %v", size_of(Buggy_Struct))
+ testing.expectf(t, size_of(g_buggy) == 40, "Expected size_of(g_buggy) == 40, got %v", size_of(g_buggy))
+ testing.expectf(t, size_of(l_buggy) == 40, "Expected size_of(l_buggy) == 40, got %v", size_of(l_buggy))
g_s := fmt.tprintf("%s", g_buggy)
l_s := fmt.tprintf("%s", l_buggy)
- expect(t, g_s == EXPECTED_REPR, fmt.tprintf("Expected fmt.tprintf(\"%%s\", g_s)) to return \"%v\", got \"%v\"", EXPECTED_REPR, g_s))
- expect(t, l_s == EXPECTED_REPR, fmt.tprintf("Expected fmt.tprintf(\"%%s\", l_s)) to return \"%v\", got \"%v\"", EXPECTED_REPR, l_s))
-}
-
-// -------- -------- -------- -------- -------- -------- -------- -------- -------- --------
-
-main :: proc() {
- t := testing.T{}
-
- mem_track_test(&t, rtti_test)
-
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
+ testing.expectf(t, g_s == EXPECTED_REPR, "Expected fmt.tprintf(\"%%s\", g_s)) to return \"%v\", got \"%v\"", EXPECTED_REPR, g_s)
+ testing.expectf(t, l_s == EXPECTED_REPR, "Expected fmt.tprintf(\"%%s\", l_s)) to return \"%v\", got \"%v\"", EXPECTED_REPR, l_s)
}
-
-mem_track_test :: proc(t: ^testing.T, test: proc(t: ^testing.T)) {
- track: mem.Tracking_Allocator
- mem.tracking_allocator_init(&track, context.allocator)
- context.allocator = mem.tracking_allocator(&track)
-
- test(t)
-
- expect(t, len(track.allocation_map) == 0, "Expected no leaks.")
- expect(t, len(track.bad_free_array) == 0, "Expected no leaks.")
-
- for _, leak in track.allocation_map {
- fmt.printf("%v leaked %v bytes\n", leak.location, leak.size)
- }
- for bad_free in track.bad_free_array {
- fmt.printf("%v allocation %p was freed badly\n", bad_free.location, bad_free.memory)
- }
-}
-
-TEST_count := 0
-TEST_fail := 0
-
-when ODIN_TEST {
- expect :: testing.expect
- log :: testing.log
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v] %v\n", loc, message)
- return
- }
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-} \ No newline at end of file
diff --git a/tests/internal/test_string_compare.odin b/tests/internal/test_string_compare.odin
index ff65b41c2..32a93ddf5 100644
--- a/tests/internal/test_string_compare.odin
+++ b/tests/internal/test_string_compare.odin
@@ -1,7 +1,5 @@
-package test_internal_string_compare
+package test_internal
-import "core:fmt"
-import "core:os"
import "core:testing"
Op :: enum { Eq, Lt, Gt }
@@ -29,65 +27,31 @@ string_compare :: proc(t: ^testing.T) {
for res, op in v.res {
switch op {
case .Eq:
- expect(t, (v.a == v.b) == res, fmt.tprintf("Expected cstring(\"%v\") == cstring(\"%v\") to be %v", v.a, v.b, res))
- expect(t, (s_a == s_b) == res, fmt.tprintf("Expected string(\"%v\") == string(\"%v\") to be %v", v.a, v.b, res))
+ testing.expectf(t, (v.a == v.b) == res, "Expected cstring(\"%v\") == cstring(\"%v\") to be %v", v.a, v.b, res)
+ testing.expectf(t, (s_a == s_b) == res, "Expected string(\"%v\") == string(\"%v\") to be %v", v.a, v.b, res)
// If a == b then a != b
- expect(t, (v.a != v.b) == !res, fmt.tprintf("Expected cstring(\"%v\") != cstring(\"%v\") to be %v", v.a, v.b, !res))
- expect(t, (s_a != s_b) == !res, fmt.tprintf("Expected string(\"%v\") != string(\"%v\") to be %v", v.a, v.b, !res))
+ testing.expectf(t, (v.a != v.b) == !res, "Expected cstring(\"%v\") != cstring(\"%v\") to be %v", v.a, v.b, !res)
+ testing.expectf(t, (s_a != s_b) == !res, "Expected string(\"%v\") != string(\"%v\") to be %v", v.a, v.b, !res)
case .Lt:
- expect(t, (v.a < v.b) == res, fmt.tprintf("Expected cstring(\"%v\") < cstring(\"%v\") to be %v", v.a, v.b, res))
- expect(t, (s_a < s_b) == res, fmt.tprintf("Expected string(\"%v\") < string(\"%v\") to be %v", v.a, v.b, res))
+ testing.expectf(t, (v.a < v.b) == res, "Expected cstring(\"%v\") < cstring(\"%v\") to be %v", v.a, v.b, res)
+ testing.expectf(t, (s_a < s_b) == res, "Expected string(\"%v\") < string(\"%v\") to be %v", v.a, v.b, res)
// .Lt | .Eq == .LtEq
lteq := v.res[.Eq] | res
- expect(t, (v.a <= v.b) == lteq, fmt.tprintf("Expected cstring(\"%v\") <= cstring(\"%v\") to be %v", v.a, v.b, lteq))
- expect(t, (s_a <= s_b) == lteq, fmt.tprintf("Expected string(\"%v\") <= string(\"%v\") to be %v", v.a, v.b, lteq))
+ testing.expectf(t, (v.a <= v.b) == lteq, "Expected cstring(\"%v\") <= cstring(\"%v\") to be %v", v.a, v.b, lteq)
+ testing.expectf(t, (s_a <= s_b) == lteq, "Expected string(\"%v\") <= string(\"%v\") to be %v", v.a, v.b, lteq)
case .Gt:
- expect(t, (v.a > v.b) == res, fmt.tprintf("Expected cstring(\"%v\") > cstring(\"%v\") to be %v", v.a, v.b, res))
- expect(t, (s_a > s_b) == res, fmt.tprintf("Expected string(\"%v\") > string(\"%v\") to be %v", v.a, v.b, res))
+ testing.expectf(t, (v.a > v.b) == res, "Expected cstring(\"%v\") > cstring(\"%v\") to be %v", v.a, v.b, res)
+ testing.expectf(t, (s_a > s_b) == res, "Expected string(\"%v\") > string(\"%v\") to be %v", v.a, v.b, res)
// .Gt | .Eq == .GtEq
gteq := v.res[.Eq] | res
- expect(t, (v.a >= v.b) == gteq, fmt.tprintf("Expected cstring(\"%v\") >= cstring(\"%v\") to be %v", v.a, v.b, gteq))
- expect(t, (s_a >= s_b) == gteq, fmt.tprintf("Expected string(\"%v\") >= string(\"%v\") to be %v", v.a, v.b, gteq))
+ testing.expectf(t, (v.a >= v.b) == gteq, "Expected cstring(\"%v\") >= cstring(\"%v\") to be %v", v.a, v.b, gteq)
+ testing.expectf(t, (s_a >= s_b) == gteq, "Expected string(\"%v\") >= string(\"%v\") to be %v", v.a, v.b, gteq)
}
}
}
}
-
-// -------- -------- -------- -------- -------- -------- -------- -------- -------- --------
-
-main :: proc() {
- t := testing.T{}
-
- string_compare(&t)
-
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
-}
-
-TEST_count := 0
-TEST_fail := 0
-
-when ODIN_TEST {
- expect :: testing.expect
- log :: testing.log
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.printf("[%v] %v\n", loc, message)
- return
- }
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-} \ No newline at end of file
diff --git a/tests/issues/run.bat b/tests/issues/run.bat
index 41c52c02f..299e08791 100644
--- a/tests/issues/run.bat
+++ b/tests/issues/run.bat
@@ -3,19 +3,18 @@
if not exist "build\" mkdir build
pushd build
-set COMMON=-collection:tests=..\..
+set COMMON=-define:ODIN_TEST_FANCY=false -file -vet -strict-style
@echo on
-..\..\..\odin test ..\test_issue_829.odin %COMMON% -file || exit /b
-..\..\..\odin test ..\test_issue_1592.odin %COMMON% -file || exit /b
-..\..\..\odin test ..\test_issue_2056.odin %COMMON% -file || exit /b
-..\..\..\odin test ..\test_issue_2087.odin %COMMON% -file || exit /b
-..\..\..\odin build ..\test_issue_2113.odin %COMMON% -file -debug || exit /b
-..\..\..\odin test ..\test_issue_2466.odin %COMMON% -file || exit /b
-..\..\..\odin test ..\test_issue_2615.odin %COMMON% -file || exit /b
-..\..\..\odin test ..\test_issue_2637.odin %COMMON% -file || exit /b
-..\..\..\odin test ..\test_issue_2666.odin %COMMON% -file || exit /b
+..\..\..\odin test ..\test_issue_829.odin %COMMON% || exit /b
+..\..\..\odin test ..\test_issue_1592.odin %COMMON% || exit /b
+..\..\..\odin test ..\test_issue_2056.odin %COMMON% || exit /b
+..\..\..\odin build ..\test_issue_2113.odin %COMMON% -debug || exit /b
+..\..\..\odin test ..\test_issue_2466.odin %COMMON% || exit /b
+..\..\..\odin test ..\test_issue_2615.odin %COMMON% || exit /b
+..\..\..\odin test ..\test_issue_2637.odin %COMMON% || exit /b
+..\..\..\odin test ..\test_issue_2666.odin %COMMON% || exit /b
@echo off
diff --git a/tests/issues/run.sh b/tests/issues/run.sh
index 6d53388a7..8b4c1e7f2 100755
--- a/tests/issues/run.sh
+++ b/tests/issues/run.sh
@@ -1,28 +1,26 @@
-#!/bin/bash
+#!/usr/bin/env bash
set -eu
mkdir -p build
pushd build
ODIN=../../../odin
-COMMON="-collection:tests=../.."
-
-NO_NIL_ERR="Error: "
+COMMON="-define:ODIN_TEST_FANCY=false -file -vet -strict-style"
set -x
-$ODIN test ../test_issue_829.odin $COMMON -file
-$ODIN test ../test_issue_1592.odin $COMMON -file
-$ODIN test ../test_issue_2056.odin $COMMON -file
-$ODIN test ../test_issue_2087.odin $COMMON -file
-$ODIN build ../test_issue_2113.odin $COMMON -file -debug
-$ODIN test ../test_issue_2466.odin $COMMON -file
-$ODIN test ../test_issue_2615.odin $COMMON -file
-$ODIN test ../test_issue_2637.odin $COMMON -file
-$ODIN test ../test_issue_2666.odin $COMMON -file
-if [[ $($ODIN build ../test_issue_2395.odin $COMMON -file 2>&1 >/dev/null | grep -c "$NO_NIL_ERR") -eq 2 ]] ; then
+$ODIN test ../test_issue_829.odin $COMMON
+$ODIN test ../test_issue_1592.odin $COMMON
+$ODIN test ../test_issue_2056.odin $COMMON
+$ODIN build ../test_issue_2113.odin $COMMON -debug
+$ODIN test ../test_issue_2466.odin $COMMON
+$ODIN test ../test_issue_2615.odin $COMMON
+$ODIN test ../test_issue_2637.odin $COMMON
+$ODIN test ../test_issue_2666.odin $COMMON
+if [[ $($ODIN build ../test_issue_2395.odin $COMMON 2>&1 >/dev/null | grep -c "Error:") -eq 2 ]] ; then
echo "SUCCESSFUL 1/1"
else
echo "SUCCESSFUL 0/1"
+ exit 1
fi
set +x
diff --git a/tests/issues/test_issue_1592.odin b/tests/issues/test_issue_1592.odin
index 800314a93..79eff33df 100644
--- a/tests/issues/test_issue_1592.odin
+++ b/tests/issues/test_issue_1592.odin
@@ -1,7 +1,6 @@
// Tests issue #1592 https://github.com/odin-lang/Odin/issues/1592
package test_issues
-import "core:fmt"
import "core:testing"
/* Original issue #1592 example */
@@ -31,428 +30,428 @@ true_result :: proc() -> bool {
@test
test_simple_const_false :: proc(t: ^testing.T) {
if CONSTANT_FALSE {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if (CONSTANT_FALSE) {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if !CONSTANT_FALSE {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if (!CONSTANT_FALSE) {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if !(CONSTANT_FALSE) {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if !!CONSTANT_FALSE {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if CONSTANT_FALSE == true {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if CONSTANT_FALSE == false {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if !(CONSTANT_FALSE == true) {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if !(CONSTANT_FALSE == false) {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
}
@test
test_simple_const_true :: proc(t: ^testing.T) {
if CONSTANT_TRUE {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if (CONSTANT_TRUE) {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if !CONSTANT_TRUE {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if (!CONSTANT_TRUE) {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if (!CONSTANT_TRUE) {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if !(CONSTANT_TRUE) {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if !!CONSTANT_TRUE {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if CONSTANT_TRUE == true {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if CONSTANT_TRUE == false {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if !(CONSTANT_TRUE == true) {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if !(CONSTANT_TRUE == false) {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
}
@test
test_simple_proc_false :: proc(t: ^testing.T) {
if false_result() {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if !false_result() {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
}
@test
test_simple_proc_true :: proc(t: ^testing.T) {
if true_result() {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if !true_result() {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
}
@test
test_const_false_const_false :: proc(t: ^testing.T) {
if CONSTANT_FALSE || CONSTANT_FALSE {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if CONSTANT_FALSE && CONSTANT_FALSE {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if !CONSTANT_FALSE || CONSTANT_FALSE {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if !CONSTANT_FALSE && CONSTANT_FALSE {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if CONSTANT_FALSE || !CONSTANT_FALSE {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if CONSTANT_FALSE && !CONSTANT_FALSE {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if !(CONSTANT_FALSE || CONSTANT_FALSE) {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if !(CONSTANT_FALSE && CONSTANT_FALSE) {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
}
@test
test_const_false_const_true :: proc(t: ^testing.T) {
if CONSTANT_FALSE || CONSTANT_TRUE {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if CONSTANT_FALSE && CONSTANT_TRUE {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if !CONSTANT_FALSE || CONSTANT_TRUE {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if !CONSTANT_FALSE && CONSTANT_TRUE {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if CONSTANT_FALSE || !CONSTANT_TRUE {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if CONSTANT_FALSE && !CONSTANT_TRUE {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if !(CONSTANT_FALSE || CONSTANT_TRUE) {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if !(CONSTANT_FALSE && CONSTANT_TRUE) {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
}
@test
test_const_true_const_false :: proc(t: ^testing.T) {
if CONSTANT_TRUE || CONSTANT_FALSE {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if CONSTANT_TRUE && CONSTANT_FALSE {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if !CONSTANT_TRUE || CONSTANT_FALSE {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if !CONSTANT_TRUE && CONSTANT_FALSE {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if CONSTANT_TRUE || !CONSTANT_FALSE {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if CONSTANT_TRUE && !CONSTANT_FALSE {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if !(CONSTANT_TRUE || CONSTANT_FALSE) {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if !(CONSTANT_TRUE && CONSTANT_FALSE) {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
}
@test
test_const_true_const_true :: proc(t: ^testing.T) {
if CONSTANT_TRUE || CONSTANT_TRUE {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if CONSTANT_TRUE && CONSTANT_TRUE {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if !CONSTANT_TRUE || CONSTANT_TRUE {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if !CONSTANT_TRUE && CONSTANT_TRUE {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if CONSTANT_TRUE || !CONSTANT_TRUE {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if CONSTANT_TRUE && !CONSTANT_TRUE {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if !(CONSTANT_TRUE || CONSTANT_TRUE) {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if !(CONSTANT_TRUE && CONSTANT_TRUE) {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
}
@test
test_proc_false_const_false :: proc(t: ^testing.T) {
if false_result() || CONSTANT_FALSE {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if false_result() && CONSTANT_FALSE {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if !(false_result() || CONSTANT_FALSE) {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if !(false_result() && CONSTANT_FALSE) {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
}
@test
test_proc_false_const_true :: proc(t: ^testing.T) {
if false_result() || CONSTANT_TRUE {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if false_result() && CONSTANT_TRUE {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if !(false_result() || CONSTANT_TRUE) {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if !(false_result() && CONSTANT_TRUE) {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
}
@test
test_proc_true_const_false :: proc(t: ^testing.T) {
if true_result() || CONSTANT_FALSE {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if true_result() && CONSTANT_FALSE {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if !(true_result() || CONSTANT_FALSE) {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if !(true_result() && CONSTANT_FALSE) {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
}
@test
test_proc_true_const_true :: proc(t: ^testing.T) {
if true_result() || CONSTANT_TRUE {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if true_result() && CONSTANT_TRUE {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
} else {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
}
if !(true_result() || CONSTANT_TRUE) {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
if !(true_result() && CONSTANT_TRUE) {
- testing.expect(t, false, fmt.tprintf("%s: !false\n", #procedure))
+ testing.expect(t, false, "!false")
} else {
- testing.expect(t, true, fmt.tprintf("%s: !true\n", #procedure))
+ testing.expect(t, true, "!true")
}
}
diff --git a/tests/issues/test_issue_2056.odin b/tests/issues/test_issue_2056.odin
index 4869b557e..06bc11fba 100644
--- a/tests/issues/test_issue_2056.odin
+++ b/tests/issues/test_issue_2056.odin
@@ -1,7 +1,6 @@
// Tests issue #2056 https://github.com/odin-lang/Odin/issues/2056
package test_issues
-import "core:fmt"
import "core:testing"
@test
@@ -12,9 +11,9 @@ test_scalar_matrix_conversion :: proc(t: ^testing.T) {
for i in 0..<4 {
for j in 0..<4 {
if i == j {
- testing.expect(t, m[i,j] == 1, fmt.tprintf("expected 1 at m[%d,%d], found %f\n", i, j, m[i,j]))
+ testing.expectf(t, m[i,j] == 1, "expected 1 at m[%d,%d], found %f\n", i, j, m[i,j])
} else {
- testing.expect(t, m[i,j] == 0, fmt.tprintf("expected 0 at m[%d,%d], found %f\n", i, j, m[i,j]))
+ testing.expectf(t, m[i,j] == 0, "expected 0 at m[%d,%d], found %f\n", i, j, m[i,j])
}
}
}
diff --git a/tests/issues/test_issue_2087.odin b/tests/issues/test_issue_2087.odin
deleted file mode 100644
index 26b6d487d..000000000
--- a/tests/issues/test_issue_2087.odin
+++ /dev/null
@@ -1,62 +0,0 @@
-// Tests issue #2087 https://github.com/odin-lang/Odin/issues/2087
-package test_issues
-
-import "core:math"
-import "core:strconv"
-import "core:testing"
-
-@(test)
-test_parse_float :: proc(t: ^testing.T) {
- {
- f, ok := strconv.parse_f64("1.2")
- testing.expect(t, ok && f == 1.2, "expected f64(1.2), fully consumed")
- f, ok = strconv.parse_f64("1.2a")
- testing.expect(t, !ok && f == 1.2, "expected f64(1.2), partially consumed")
- f, ok = strconv.parse_f64("+")
- testing.expect(t, !ok && f == 0.0, "expected f64(0.0), with ok=false")
- f, ok = strconv.parse_f64("-")
- testing.expect(t, !ok && f == 0.0, "expected f64(0.0), with ok=false")
-
-
- f, ok = strconv.parse_f64("inf")
- testing.expect(t, ok && math.classify(f) == math.Float_Class.Inf, "expected f64(+inf), fully consumed")
- f, ok = strconv.parse_f64("+inf")
- testing.expect(t, ok && math.classify(f) == math.Float_Class.Inf, "expected f64(+inf), fully consumed")
- f, ok = strconv.parse_f64("-inf")
- testing.expect(t, ok && math.classify(f) == math.Float_Class.Neg_Inf, "expected f64(-inf), fully consumed")
- f, ok = strconv.parse_f64("inFinity")
- testing.expect(t, !ok && math.classify(f) == math.Float_Class.Inf, "expected f64(+inf), partially consumed")
- f, ok = strconv.parse_f64("+InFinity")
- testing.expect(t, !ok && math.classify(f) == math.Float_Class.Inf, "expected f64(+inf), partially consumed")
- f, ok = strconv.parse_f64("-InfiniTy")
- testing.expect(t, !ok && math.classify(f) == math.Float_Class.Neg_Inf, "expected f64(-inf), partially consumed")
- f, ok = strconv.parse_f64("nan")
- testing.expect(t, ok && math.classify(f) == math.Float_Class.NaN, "expected f64(nan), fully consumed")
- f, ok = strconv.parse_f64("nAN")
- testing.expect(t, ok && math.classify(f) == math.Float_Class.NaN, "expected f64(nan), fully consumed")
- }
- {
- f, ok := strconv.parse_f32("1.2")
- testing.expect(t, ok && f == 1.2, "expected f32(1.2), fully consumed")
-
- f, ok = strconv.parse_f32("1.2a")
- testing.expect(t, !ok && f == 1.2, "expected f32(1.2), partially consumed")
-
- f, ok = strconv.parse_f32("inf")
- testing.expect(t, ok && math.classify(f) == math.Float_Class.Inf, "expected f32(+inf), fully consumed")
- f, ok = strconv.parse_f32("+inf")
- testing.expect(t, ok && math.classify(f) == math.Float_Class.Inf, "expected f32(+inf), fully consumed")
- f, ok = strconv.parse_f32("-inf")
- testing.expect(t, ok && math.classify(f) == math.Float_Class.Neg_Inf, "expected f32(-inf), fully consumed")
- f, ok = strconv.parse_f32("inFinity")
- testing.expect(t, !ok && math.classify(f) == math.Float_Class.Inf, "expected f32(+inf), partially consumed")
- f, ok = strconv.parse_f32("+InFinity")
- testing.expect(t, !ok && math.classify(f) == math.Float_Class.Inf, "expected f32(+inf), partially consumed")
- f, ok = strconv.parse_f32("-InfiniTy")
- testing.expect(t, !ok && math.classify(f) == math.Float_Class.Neg_Inf, "expected f32(-inf), partially consumed")
- f, ok = strconv.parse_f32("nan")
- testing.expect(t, ok && math.classify(f) == math.Float_Class.NaN, "expected f32(nan), fully consumed")
- f, ok = strconv.parse_f32("nAN")
- testing.expect(t, ok && math.classify(f) == math.Float_Class.NaN, "expected f32(nan), fully consumed")
- }
-} \ No newline at end of file
diff --git a/tests/issues/test_issue_2395.odin b/tests/issues/test_issue_2395.odin
index 48e1ee516..bbbcb3aea 100644
--- a/tests/issues/test_issue_2395.odin
+++ b/tests/issues/test_issue_2395.odin
@@ -5,8 +5,6 @@
// exactly 2 errors from the invalid unions
package test_issues
-import "core:testing"
-
ValidUnion :: union($T: typeid) #no_nil {
T,
f32,
diff --git a/tests/issues/test_issue_2466.odin b/tests/issues/test_issue_2466.odin
index 4810cfea9..f5987903a 100644
--- a/tests/issues/test_issue_2466.odin
+++ b/tests/issues/test_issue_2466.odin
@@ -1,7 +1,6 @@
// Tests issue #2466 https://github.com/odin-lang/Odin/issues/2466
package test_issues
-import "core:fmt"
import "core:testing"
Bug :: struct {
@@ -16,7 +15,7 @@ test_compound_literal_local_reuse :: proc(t: ^testing.T) {
val = v,
arr = {42},
}
- testing.expect(t, bug.val == 123, fmt.tprintf("expected 123, found %d", bug.val))
- testing.expect(t, bug.arr[0] == 42, fmt.tprintf("expected 42, found %d", bug.arr[0]))
+ testing.expectf(t, bug.val == 123, "expected 123, found %d", bug.val)
+ testing.expectf(t, bug.arr[0] == 42, "expected 42, found %d", bug.arr[0])
}
diff --git a/tests/issues/test_issue_829.odin b/tests/issues/test_issue_829.odin
index 273b3b3b5..229d8e9b4 100644
--- a/tests/issues/test_issue_829.odin
+++ b/tests/issues/test_issue_829.odin
@@ -1,7 +1,6 @@
// Tests issue #829 https://github.com/odin-lang/Odin/issues/829
package test_issues
-import "core:fmt"
import "core:testing"
/* Original issue #829 example */
@@ -13,6 +12,6 @@ env : map[string]proc(a, b : int) -> int = {
@(test)
test_orig_ret :: proc(t: ^testing.T) {
- r := fmt.tprint(env["+"](1, 2))
- testing.expect(t, r == "3", fmt.tprintf("%s: \"%s\" != \"3\"\n", #procedure, r))
-}
+ r := env["+"](1, 2)
+ testing.expectf(t, r == 3, "%q != 3", r)
+} \ No newline at end of file
diff --git a/tests/vendor/Makefile b/tests/vendor/Makefile
deleted file mode 100644
index 7d6b84978..000000000
--- a/tests/vendor/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-ODIN=../../odin
-ODINFLAGS=
-
-OS=$(shell uname)
-
-ifeq ($(OS), OpenBSD)
- ODINFLAGS:=$(ODINFLAGS) -extra-linker-flags:-L/usr/local/lib
-endif
-
-all:
diff --git a/tests/vendor/all.odin b/tests/vendor/all.odin
new file mode 100644
index 000000000..1ce56e786
--- /dev/null
+++ b/tests/vendor/all.odin
@@ -0,0 +1,3 @@
+package tests_vendor
+
+@(require) import "glfw"
diff --git a/tests/vendor/build.bat b/tests/vendor/build.bat
deleted file mode 100644
index 693d344f4..000000000
--- a/tests/vendor/build.bat
+++ /dev/null
@@ -1,8 +0,0 @@
-@echo off
-set COMMON=-show-timings -no-bounds-check -vet -strict-style
-set PATH_TO_ODIN==..\..\odin
-
-echo ---
-echo Running vendor:glfw tests
-echo ---
-%PATH_TO_ODIN% run glfw %COMMON% -out:vendor_glfw.exe || exit /b \ No newline at end of file
diff --git a/tests/vendor/glfw/test_vendor_glfw.odin b/tests/vendor/glfw/test_vendor_glfw.odin
index ce55ad7ef..8a7fb0d0a 100644
--- a/tests/vendor/glfw/test_vendor_glfw.odin
+++ b/tests/vendor/glfw/test_vendor_glfw.odin
@@ -1,49 +1,22 @@
+//+build darwin, windows
package test_vendor_glfw
import "core:testing"
-import "core:fmt"
import "vendor:glfw"
-import "core:os"
GLFW_MAJOR :: 3
GLFW_MINOR :: 4
GLFW_PATCH :: 0
-TEST_count := 0
-TEST_fail := 0
-
-when ODIN_TEST {
- expect :: testing.expect
- log :: testing.log
-} else {
- expect :: proc(t: ^testing.T, condition: bool, message: string, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- TEST_count += 1
- if !condition {
- TEST_fail += 1
- fmt.println(message)
- return
- }
- fmt.println(" PASS")
- }
- log :: proc(t: ^testing.T, v: any, loc := #caller_location) {
- fmt.printf("[%v] ", loc)
- fmt.printf("log: %v\n", v)
- }
-}
-
-main :: proc() {
- t := testing.T{}
- test_glfw(&t)
-
- fmt.printf("%v/%v tests successful.\n", TEST_count - TEST_fail, TEST_count)
- if TEST_fail > 0 {
- os.exit(1)
- }
-}
-
@(test)
test_glfw :: proc(t: ^testing.T) {
major, minor, patch := glfw.GetVersion()
- expect(t, major == GLFW_MAJOR && minor == GLFW_MINOR, fmt.tprintf("Expected GLFW.GetVersion: %v.%v.%v, got %v.%v.%v instead", GLFW_MAJOR, GLFW_MINOR, GLFW_PATCH, major, minor, patch))
+ testing.expectf(
+ t,
+ major == GLFW_MAJOR && \
+ minor == GLFW_MINOR,
+ "Expected GLFW.GetVersion: %v.%v.%v, got %v.%v.%v instead",
+ GLFW_MAJOR, GLFW_MINOR, GLFW_PATCH,
+ major, minor, patch,
+ )
}
diff --git a/vendor/cgltf/cgltf.odin b/vendor/cgltf/cgltf.odin
index 024e8dfaa..a5d474a7b 100644
--- a/vendor/cgltf/cgltf.odin
+++ b/vendor/cgltf/cgltf.odin
@@ -1,9 +1,23 @@
package cgltf
-when ODIN_OS == .Windows { foreign import lib "lib/cgltf.lib" }
-else when ODIN_OS == .Linux { foreign import lib "lib/cgltf.a" }
-else when ODIN_OS == .Darwin { foreign import lib "lib/darwin/cgltf.a" }
-else { foreign import lib "system:cgltf" }
+@(private)
+LIB :: (
+ "lib/cgltf.lib" when ODIN_OS == .Windows
+ else "lib/cgltf.a" when ODIN_OS == .Linux
+ else "lib/darwin/cgltf.a" when ODIN_OS == .Darwin
+ else ""
+)
+
+when LIB != "" {
+ when !#exists(LIB) {
+ // Windows library is shipped with the compiler, so a Windows specific message should not be needed.
+ #panic("Could not find the compiled cgltf library, it can be compiled by running `make -C \"" + ODIN_ROOT + "vendor/cgltf/src\"`")
+ }
+
+ foreign import lib { LIB }
+} else {
+ foreign import lib "system:cgltf"
+}
import "core:c"
diff --git a/vendor/darwin/Metal/MetalClasses.odin b/vendor/darwin/Metal/MetalClasses.odin
index ea1711bbc..2d681b0ee 100644
--- a/vendor/darwin/Metal/MetalClasses.odin
+++ b/vendor/darwin/Metal/MetalClasses.odin
@@ -4949,12 +4949,12 @@ CommandQueue_commandBuffer :: #force_inline proc "c" (self: ^CommandQueue) -> ^C
return msgSend(^CommandBuffer, self, "commandBuffer")
}
@(objc_type=CommandQueue, objc_name="commandBufferWithDescriptor")
-CommandQueue_commandBufferWithDescriptor :: #force_inline proc "c" (self: ^CommandQueue, descriptor: ^CommandBufferDescriptor) -> ^CommandQueue {
- return msgSend(^CommandQueue, self, "commandBufferWithDescriptor:", descriptor)
+CommandQueue_commandBufferWithDescriptor :: #force_inline proc "c" (self: ^CommandQueue, descriptor: ^CommandBufferDescriptor) -> ^CommandBuffer {
+ return msgSend(^CommandBuffer, self, "commandBufferWithDescriptor:", descriptor)
}
@(objc_type=CommandQueue, objc_name="commandBufferWithUnretainedReferences")
-CommandQueue_commandBufferWithUnretainedReferences :: #force_inline proc "c" (self: ^CommandQueue) -> ^CommandQueue {
- return msgSend(^CommandQueue, self, "commandBufferWithUnretainedReferences")
+CommandQueue_commandBufferWithUnretainedReferences :: #force_inline proc "c" (self: ^CommandQueue) -> ^CommandBuffer {
+ return msgSend(^CommandBuffer, self, "commandBufferWithUnretainedReferences")
}
@(objc_type=CommandQueue, objc_name="device")
CommandQueue_device :: #force_inline proc "c" (self: ^CommandQueue) -> ^Device {
diff --git a/vendor/directx/d3d11/d3d11.odin b/vendor/directx/d3d11/d3d11.odin
index a1e3cf039..3af0f2965 100644
--- a/vendor/directx/d3d11/d3d11.odin
+++ b/vendor/directx/d3d11/d3d11.odin
@@ -3374,7 +3374,7 @@ CREATE_DEVICE_FLAG :: enum u32 {
DEBUGGABLE = 6,
PREVENT_ALTERING_LAYER_SETTINGS_FROM_REGISTRY = 7,
DISABLE_GPU_TIMEOUT = 8,
- VIDEO_SUPPORT = 12,
+ VIDEO_SUPPORT = 11,
}
PFN_CREATE_DEVICE :: #type proc "c" (a0: ^dxgi.IAdapter, a1: DRIVER_TYPE, a2: HMODULE, a3: u32, a4: ^FEATURE_LEVEL, a5: u32, a6: u32, a7: ^^IDevice, a8: ^FEATURE_LEVEL, a9: ^^IDeviceContext) -> HRESULT
diff --git a/vendor/egl/egl.odin b/vendor/egl/egl.odin
index 3174fa60b..82181b1c5 100644
--- a/vendor/egl/egl.odin
+++ b/vendor/egl/egl.odin
@@ -47,7 +47,7 @@ foreign egl {
GetDisplay :: proc(display: NativeDisplayType) -> Display ---
Initialize :: proc(display: Display, major: ^i32, minor: ^i32) -> i32 ---
BindAPI :: proc(api: u32) -> i32 ---
- ChooseConfig :: proc(display: Display, attrib_list: ^i32, configs: ^Context, config_size: i32, num_config: ^i32) -> i32 ---
+ ChooseConfig :: proc(display: Display, attrib_list: ^i32, configs: ^Config, config_size: i32, num_config: ^i32) -> i32 ---
CreateWindowSurface :: proc(display: Display, config: Config, native_window: NativeWindowType, attrib_list: ^i32) -> Surface ---
CreateContext :: proc(display: Display, config: Config, share_context: Context, attrib_list: ^i32) -> Context ---
MakeCurrent :: proc(display: Display, draw: Surface, read: Surface, ctx: Context) -> i32 ---
diff --git a/vendor/microui/microui.odin b/vendor/microui/microui.odin
index cf39e2f55..08a96acf2 100644
--- a/vendor/microui/microui.odin
+++ b/vendor/microui/microui.odin
@@ -319,7 +319,12 @@ default_draw_frame :: proc(ctx: ^Context, rect: Rect, colorid: Color_Type) {
}
}
-init :: proc(ctx: ^Context, set_clipboard: proc(user_data: rawptr, text: string) -> (ok: bool), get_clipboard: proc(user_data: rawptr) -> (text: string, ok: bool), clipboard_user_data: rawptr) {
+init :: proc(
+ ctx: ^Context,
+ set_clipboard: proc(user_data: rawptr, text: string) -> (ok: bool) = nil,
+ get_clipboard: proc(user_data: rawptr) -> (text: string, ok: bool) = nil,
+ clipboard_user_data: rawptr = nil,
+) {
ctx^ = {} // zero memory
ctx.draw_frame = default_draw_frame
ctx._style = default_style
@@ -617,7 +622,7 @@ push_command :: proc(ctx: ^Context, $Type: typeid, extra_size := 0) -> ^Type {
return cmd
}
-next_command :: proc(ctx: ^Context, pcmd: ^^Command) -> bool {
+next_command :: proc "contextless" (ctx: ^Context, pcmd: ^^Command) -> bool {
cmd := pcmd^
defer pcmd^ = cmd
if cmd != nil {
@@ -625,7 +630,7 @@ next_command :: proc(ctx: ^Context, pcmd: ^^Command) -> bool {
} else {
cmd = (^Command)(&ctx.command_list.items[0])
}
- invalid_command :: #force_inline proc(ctx: ^Context) -> ^Command {
+ invalid_command :: #force_inline proc "contextless" (ctx: ^Context) -> ^Command {
return (^Command)(&ctx.command_list.items[ctx.command_list.idx])
}
for cmd != invalid_command(ctx) {
@@ -638,7 +643,7 @@ next_command :: proc(ctx: ^Context, pcmd: ^^Command) -> bool {
return false
}
-next_command_iterator :: proc(ctx: ^Context, pcm: ^^Command) -> (Command_Variant, bool) {
+next_command_iterator :: proc "contextless" (ctx: ^Context, pcm: ^^Command) -> (Command_Variant, bool) {
if next_command(ctx, pcm) {
return pcm^.variant, true
}
diff --git a/vendor/miniaudio/common.odin b/vendor/miniaudio/common.odin
index b38599d96..d72c3f251 100644
--- a/vendor/miniaudio/common.odin
+++ b/vendor/miniaudio/common.odin
@@ -8,12 +8,16 @@ when MINIAUDIO_SHARED {
#panic("Shared linking for miniaudio is not supported yet")
}
-when ODIN_OS == .Windows {
- foreign import lib "lib/miniaudio.lib"
-} else {
- foreign import lib "lib/miniaudio.a"
+@(private)
+LIB :: "lib/miniaudio.lib" when ODIN_OS == .Windows else "lib/miniaudio.a"
+
+when !#exists(LIB) {
+ // Windows library is shipped with the compiler, so a Windows specific message should not be needed.
+ #panic("Could not find the compiled miniaudio library, it can be compiled by running `make -C \"" + ODIN_ROOT + "vendor/miniaudio/src\"`")
}
+foreign import lib { LIB }
+
BINDINGS_VERSION_MAJOR :: 0
BINDINGS_VERSION_MINOR :: 11
BINDINGS_VERSION_REVISION :: 21
diff --git a/vendor/miniaudio/data_conversion.odin b/vendor/miniaudio/data_conversion.odin
index aee26bc8c..c33f54707 100644
--- a/vendor/miniaudio/data_conversion.odin
+++ b/vendor/miniaudio/data_conversion.odin
@@ -2,11 +2,7 @@ package miniaudio
import "core:c"
-when ODIN_OS == .Windows {
- foreign import lib "lib/miniaudio.lib"
-} else {
- foreign import lib "lib/miniaudio.a"
-}
+foreign import lib { LIB }
/************************************************************************************************************************************************************
*************************************************************************************************************************************************************
diff --git a/vendor/miniaudio/decoding.odin b/vendor/miniaudio/decoding.odin
index 4433aa5a7..4860680c9 100644
--- a/vendor/miniaudio/decoding.odin
+++ b/vendor/miniaudio/decoding.odin
@@ -2,11 +2,7 @@ package miniaudio
import "core:c"
-when ODIN_OS == .Windows {
- foreign import lib "lib/miniaudio.lib"
-} else {
- foreign import lib "lib/miniaudio.a"
-}
+foreign import lib { LIB }
/************************************************************************************************************************************************************
diff --git a/vendor/miniaudio/device_io_procs.odin b/vendor/miniaudio/device_io_procs.odin
index 0d572ae2c..21ac1afd7 100644
--- a/vendor/miniaudio/device_io_procs.odin
+++ b/vendor/miniaudio/device_io_procs.odin
@@ -1,10 +1,6 @@
package miniaudio
-when ODIN_OS == .Windows {
- foreign import lib "lib/miniaudio.lib"
-} else {
- foreign import lib "lib/miniaudio.a"
-}
+foreign import lib { LIB }
import "core:c"
diff --git a/vendor/miniaudio/effects.odin b/vendor/miniaudio/effects.odin
index 273845001..a3710ad88 100644
--- a/vendor/miniaudio/effects.odin
+++ b/vendor/miniaudio/effects.odin
@@ -2,11 +2,7 @@ package miniaudio
import "core:c"
-when ODIN_OS == .Windows {
- foreign import lib "lib/miniaudio.lib"
-} else {
- foreign import lib "lib/miniaudio.a"
-}
+foreign import lib { LIB }
/*
Delay
diff --git a/vendor/miniaudio/encoding.odin b/vendor/miniaudio/encoding.odin
index 63aa45c6d..da8389b06 100644
--- a/vendor/miniaudio/encoding.odin
+++ b/vendor/miniaudio/encoding.odin
@@ -2,11 +2,7 @@ package miniaudio
import "core:c"
-when ODIN_OS == .Windows {
- foreign import lib "lib/miniaudio.lib"
-} else {
- foreign import lib "lib/miniaudio.a"
-}
+foreign import lib { LIB }
/************************************************************************************************************************************************************
diff --git a/vendor/miniaudio/engine.odin b/vendor/miniaudio/engine.odin
index 6eabd75c2..ecd3fb39d 100644
--- a/vendor/miniaudio/engine.odin
+++ b/vendor/miniaudio/engine.odin
@@ -2,11 +2,7 @@ package miniaudio
import "core:c"
-when ODIN_OS == .Windows {
- foreign import lib "lib/miniaudio.lib"
-} else {
- foreign import lib "lib/miniaudio.a"
-}
+foreign import lib { LIB }
/************************************************************************************************************************************************************
diff --git a/vendor/miniaudio/filtering.odin b/vendor/miniaudio/filtering.odin
index 31ddbd7a4..d1c053d20 100644
--- a/vendor/miniaudio/filtering.odin
+++ b/vendor/miniaudio/filtering.odin
@@ -2,11 +2,7 @@ package miniaudio
import "core:c"
-when ODIN_OS == .Windows {
- foreign import lib "lib/miniaudio.lib"
-} else {
- foreign import lib "lib/miniaudio.a"
-}
+foreign import lib { LIB }
/**************************************************************************************************************************************************************
diff --git a/vendor/miniaudio/generation.odin b/vendor/miniaudio/generation.odin
index 69be85234..746efcca7 100644
--- a/vendor/miniaudio/generation.odin
+++ b/vendor/miniaudio/generation.odin
@@ -2,11 +2,7 @@ package miniaudio
import "core:c"
-when ODIN_OS == .Windows {
- foreign import lib "lib/miniaudio.lib"
-} else {
- foreign import lib "lib/miniaudio.a"
-}
+foreign import lib { LIB }
waveform_type :: enum c.int {
sine,
diff --git a/vendor/miniaudio/job_queue.odin b/vendor/miniaudio/job_queue.odin
index baa71c5f1..01ee31216 100644
--- a/vendor/miniaudio/job_queue.odin
+++ b/vendor/miniaudio/job_queue.odin
@@ -2,11 +2,7 @@ package miniaudio
import "core:c"
-when ODIN_OS == .Windows {
- foreign import lib "lib/miniaudio.lib"
-} else {
- foreign import lib "lib/miniaudio.a"
-}
+foreign import lib { LIB }
/*
Slot Allocator
diff --git a/vendor/miniaudio/logging.odin b/vendor/miniaudio/logging.odin
index 52b1c7980..afddf8e68 100644
--- a/vendor/miniaudio/logging.odin
+++ b/vendor/miniaudio/logging.odin
@@ -2,11 +2,7 @@ package miniaudio
import "core:c/libc"
-when ODIN_OS == .Windows {
- foreign import lib "lib/miniaudio.lib"
-} else {
- foreign import lib "lib/miniaudio.a"
-}
+foreign import lib { LIB }
MAX_LOG_CALLBACKS :: 4
diff --git a/vendor/miniaudio/node_graph.odin b/vendor/miniaudio/node_graph.odin
index 09ab50a3b..63482413b 100644
--- a/vendor/miniaudio/node_graph.odin
+++ b/vendor/miniaudio/node_graph.odin
@@ -2,11 +2,7 @@ package miniaudio
import "core:c"
-when ODIN_OS == .Windows {
- foreign import lib "lib/miniaudio.lib"
-} else {
- foreign import lib "lib/miniaudio.a"
-}
+foreign import lib { LIB }
/************************************************************************************************************************************************************
diff --git a/vendor/miniaudio/resource_manager.odin b/vendor/miniaudio/resource_manager.odin
index f27f3a53a..0284db86b 100644
--- a/vendor/miniaudio/resource_manager.odin
+++ b/vendor/miniaudio/resource_manager.odin
@@ -2,11 +2,7 @@ package miniaudio
import "core:c"
-when ODIN_OS == .Windows {
- foreign import lib "lib/miniaudio.lib"
-} else {
- foreign import lib "lib/miniaudio.a"
-}
+foreign import lib { LIB }
/************************************************************************************************************************************************************
diff --git a/vendor/miniaudio/synchronization.odin b/vendor/miniaudio/synchronization.odin
index cd4b0a5f0..012f52c2c 100644
--- a/vendor/miniaudio/synchronization.odin
+++ b/vendor/miniaudio/synchronization.odin
@@ -1,10 +1,6 @@
package miniaudio
-when ODIN_OS == .Windows {
- foreign import lib "lib/miniaudio.lib"
-} else {
- foreign import lib "lib/miniaudio.a"
-}
+foreign import lib { LIB }
@(default_calling_convention="c", link_prefix="ma_")
foreign lib {
diff --git a/vendor/miniaudio/utilities.odin b/vendor/miniaudio/utilities.odin
index d518a514a..8728f40dc 100644
--- a/vendor/miniaudio/utilities.odin
+++ b/vendor/miniaudio/utilities.odin
@@ -2,11 +2,7 @@ package miniaudio
import "core:c"
-when ODIN_OS == .Windows {
- foreign import lib "lib/miniaudio.lib"
-} else {
- foreign import lib "lib/miniaudio.a"
-}
+foreign import lib { LIB }
@(default_calling_convention="c", link_prefix="ma_")
foreign lib {
diff --git a/vendor/miniaudio/vfs.odin b/vendor/miniaudio/vfs.odin
index 475d118fc..b045a1501 100644
--- a/vendor/miniaudio/vfs.odin
+++ b/vendor/miniaudio/vfs.odin
@@ -2,11 +2,7 @@ package miniaudio
import "core:c"
-when ODIN_OS == .Windows {
- foreign import lib "lib/miniaudio.lib"
-} else {
- foreign import lib "lib/miniaudio.a"
-}
+foreign import lib { LIB }
/************************************************************************************************************************************************************
diff --git a/vendor/raylib/raygui.odin b/vendor/raylib/raygui.odin
index 41a4250a1..8cda9c072 100644
--- a/vendor/raylib/raygui.odin
+++ b/vendor/raylib/raygui.odin
@@ -5,47 +5,21 @@ import "core:c"
RAYGUI_SHARED :: #config(RAYGUI_SHARED, false)
when ODIN_OS == .Windows {
- when RAYGUI_SHARED {
- foreign import lib {
- "windows/rayguidll.lib",
- }
- } else {
- foreign import lib {
- "windows/raygui.lib",
- }
+ foreign import lib {
+ "windows/rayguidll.lib" when RAYGUI_SHARED else "windows/raygui.lib",
}
} else when ODIN_OS == .Linux {
- when RAYGUI_SHARED {
- foreign import lib "linux/libraygui.so"
- } else {
- foreign import lib "linux/libraygui.a"
+ foreign import lib {
+ "linux/libraygui.so" when RAYGUI_SHARED else "linux/libraygui.a",
}
} else when ODIN_OS == .Darwin {
when ODIN_ARCH == .arm64 {
- when RAYGUI_SHARED {
- foreign import lib {
- "macos-arm64/libraygui.dylib",
- }
- } else {
- foreign import lib {
- "macos-arm64/libraygui.a",
- // "system:Cocoa.framework",
- // "system:OpenGL.framework",
- // "system:IOKit.framework",
- }
+ foreign import lib {
+ "macos-arm64/libraygui.dylib" when RAYGUI_SHARED else "macos-arm64/libraygui.a",
}
} else {
- when RAYGUI_SHARED {
- foreign import lib {
- "macos/libraygui.dylib",
- }
- } else {
- foreign import lib {
- "macos/libraygui.a",
- // "system:Cocoa.framework",
- // "system:OpenGL.framework",
- // "system:IOKit.framework",
- }
+ foreign import lib {
+ "macos/libraygui.dylib" when RAYGUI_SHARED else "macos/libraygui.a",
}
}
} else {
@@ -56,8 +30,8 @@ RAYGUI_VERSION :: "4.0"
// Style property
GuiStyleProp :: struct {
- controlId: u16,
- propertyId: u16,
+ controlId: u16,
+ propertyId: u16,
propertyValue: c.int,
}
@@ -226,7 +200,7 @@ GuiColorPickerProperty :: enum c.int {
HUEBAR_SELECTOR_OVERFLOW, // ColorPicker right hue bar selector overflow
}
-SCROLLBAR_LEFT_SIDE :: 0
+SCROLLBAR_LEFT_SIDE :: 0
SCROLLBAR_RIGHT_SIDE :: 1
//----------------------------------------------------------------------------------
@@ -262,8 +236,8 @@ foreign lib {
// Style set/get functions
- GuiSetStyle :: proc(control: c.int, property: c.int, value: c.int) --- // Set one style property
- GuiGetStyle :: proc(control: c.int, property: c.int) -> c.int --- // Get one style property
+ GuiSetStyle :: proc(control: GuiControl, property: GuiStyleProp, value: c.int) --- // Set one style property
+ GuiGetStyle :: proc(control: GuiControl, property: GuiStyleProp) -> c.int --- // Get one style property
// Styles loading functions
@@ -278,11 +252,11 @@ foreign lib {
// Icons functionality
- GuiIconText :: proc(iconId: c.int, text: cstring) -> cstring --- // Get text with icon id prepended (if supported)
+ GuiIconText :: proc(iconId: GuiIconName, text: cstring) -> cstring --- // Get text with icon id prepended (if supported)
GuiSetIconScale :: proc(scale: c.int) --- // Set default icon drawing size
GuiGetIcons :: proc() -> [^]u32 --- // Get raygui icons data pointer
GuiLoadIcons :: proc(fileName: cstring, loadIconsName: bool) -> [^]cstring --- // Load raygui icons file (.rgi) into internal icons data
- GuiDrawIcon :: proc(iconId: c.int, posX: c.int, posY: c.int, pixelSize: c.int, color: Color) --- // Draw icon using pixel size at specified position
+ GuiDrawIcon :: proc(iconId: GuiIconName, posX, posY: c.int, pixelSize: c.int, color: Color) --- // Draw icon using pixel size at specified position
// Controls
@@ -300,11 +274,11 @@ foreign lib {
GuiLabel :: proc(bounds: Rectangle, text: cstring) -> c.int --- // Label control, shows text
GuiButton :: proc(bounds: Rectangle, text: cstring) -> bool --- // Button control, returns true when clicked
- GuiLabelButton :: proc(bounds: Rectangle, text: cstring) -> bool --- // Label button control, show true when clicked
+ GuiLabelButton :: proc(bounds: Rectangle, text: cstring) -> bool --- // Label button control, show true when clicked
GuiToggle :: proc(bounds: Rectangle, text: cstring, active: ^bool) -> c.int --- // Toggle Button control, returns true when active
GuiToggleGroup :: proc(bounds: Rectangle, text: cstring, active: ^c.int) -> c.int --- // Toggle Group control, returns active toggle index
GuiToggleSlider :: proc(bounds: Rectangle, text: cstring, active: ^c.int) -> c.int ---
- GuiCheckBox :: proc(bounds: Rectangle, text: cstring, checked: ^bool) -> bool --- // Check Box control, returns true when active
+ GuiCheckBox :: proc(bounds: Rectangle, text: cstring, checked: ^bool) -> bool --- // Check Box control, returns true when active
GuiComboBox :: proc(bounds: Rectangle, text: cstring, active: ^c.int) -> c.int --- // Combo Box control, returns selected item index
GuiDropdownBox :: proc(bounds: Rectangle, text: cstring, active: ^c.int, editMode: bool) -> bool --- // Dropdown Box control, returns selected item
diff --git a/vendor/raylib/raylib.odin b/vendor/raylib/raylib.odin
index b98770271..3d1b74058 100644
--- a/vendor/raylib/raylib.odin
+++ b/vendor/raylib/raylib.odin
@@ -97,76 +97,32 @@ MAX_TEXT_BUFFER_LENGTH :: #config(RAYLIB_MAX_TEXT_BUFFER_LENGTH, 1024)
RAYLIB_SHARED :: #config(RAYLIB_SHARED, false)
when ODIN_OS == .Windows {
- when RAYLIB_SHARED {
- @(extra_linker_flags="/NODEFAULTLIB:msvcrt")
- foreign import lib {
- "windows/raylibdll.lib",
- "system:Winmm.lib",
- "system:Gdi32.lib",
- "system:User32.lib",
- "system:Shell32.lib",
- }
- } else {
- @(extra_linker_flags="/NODEFAULTLIB:libcmt")
- foreign import lib {
- "windows/raylib.lib",
- "system:Winmm.lib",
- "system:Gdi32.lib",
- "system:User32.lib",
- "system:Shell32.lib",
- }
+ @(extra_linker_flags="/NODEFAULTLIB:" + ("msvcrt" when RAYLIB_SHARED else "libcmt"))
+ foreign import lib {
+ "windows/raylibdll.lib" when RAYLIB_SHARED else "windows/raylib.lib" ,
+ "system:Winmm.lib",
+ "system:Gdi32.lib",
+ "system:User32.lib",
+ "system:Shell32.lib",
}
} else when ODIN_OS == .Linux {
- when RAYLIB_SHARED {
- foreign import lib {
- // Note(bumbread): I'm not sure why in `linux/` folder there are
- // multiple copies of raylib.so, but since these bindings are for
- // particular version of the library, I better specify it. Ideally,
- // though, it's best specified in terms of major (.so.4)
- "linux/libraylib.so.500",
- "system:dl",
- "system:pthread",
- }
- } else {
- foreign import lib {
- "linux/libraylib.a",
- "system:dl",
- "system:pthread",
- }
+ foreign import lib {
+ // Note(bumbread): I'm not sure why in `linux/` folder there are
+ // multiple copies of raylib.so, but since these bindings are for
+ // particular version of the library, I better specify it. Ideally,
+ // though, it's best specified in terms of major (.so.4)
+ "linux/libraylib.so.500" when RAYLIB_SHARED else "linux/libraylib.a",
+ "system:dl",
+ "system:pthread",
}
} else when ODIN_OS == .Darwin {
- when ODIN_ARCH == .arm64 {
- when RAYLIB_SHARED {
- foreign import lib {
- "macos-arm64/libraylib.500.dylib",
- "system:Cocoa.framework",
- "system:OpenGL.framework",
- "system:IOKit.framework",
- }
- } else {
- foreign import lib {
- "macos-arm64/libraylib.a",
- "system:Cocoa.framework",
- "system:OpenGL.framework",
- "system:IOKit.framework",
- }
- }
- } else {
- when RAYLIB_SHARED {
- foreign import lib {
- "macos/libraylib.500.dylib",
- "system:Cocoa.framework",
- "system:OpenGL.framework",
- "system:IOKit.framework",
- }
- } else {
- foreign import lib {
- "macos/libraylib.a",
- "system:Cocoa.framework",
- "system:OpenGL.framework",
- "system:IOKit.framework",
- }
- }
+ foreign import lib {
+ "macos" +
+ ("-arm64" when ODIN_ARCH == .arm64 else "") +
+ "/libraylib" + (".500.dylib" when RAYLIB_SHARED else ".a"),
+ "system:Cocoa.framework",
+ "system:OpenGL.framework",
+ "system:IOKit.framework",
}
} else {
foreign import lib "system:raylib"
@@ -951,8 +907,8 @@ foreign lib {
SetWindowTitle :: proc(title: cstring) --- // Set title for window (only PLATFORM_DESKTOP and PLATFORM_WEB)
SetWindowPosition :: proc(x, y: c.int) --- // Set window position on screen (only PLATFORM_DESKTOP)
SetWindowMonitor :: proc(monitor: c.int) --- // Set monitor for the current window
- SetWindowMinSize :: proc(width, height: c.int) --- // Set window minimum dimensions (for FLAG_WINDOW_RESIZABLE)
- SetWindowMaxSize :: proc(width, height: c.int) --- // Set window maximum dimensions (for FLAG_WINDOW_RESIZABLE)
+ SetWindowMinSize :: proc(width, height: c.int) --- // Set window minimum dimensions (for WINDOW_RESIZABLE)
+ SetWindowMaxSize :: proc(width, height: c.int) --- // Set window maximum dimensions (for WINDOW_RESIZABLE)
SetWindowSize :: proc(width, height: c.int) --- // Set window dimensions
SetWindowOpacity :: proc(opacity: f32) --- // Set window opacity [0.0f..1.0f] (only PLATFORM_DESKTOP)
SetWindowFocused :: proc() --- // Set window focused (only PLATFORM_DESKTOP)
@@ -1028,12 +984,14 @@ foreign lib {
LoadShader :: proc(vsFileName, fsFileName: cstring) -> Shader --- // Load shader from files and bind default locations
LoadShaderFromMemory :: proc(vsCode, fsCode: cstring) -> Shader --- // Load shader from code strings and bind default locations
IsShaderReady :: proc(shader: Shader) -> bool --- // Check if a shader is ready
- GetShaderLocation :: proc(shader: Shader, uniformName: cstring) -> ShaderLocationIndex --- // Get shader uniform location
- GetShaderLocationAttrib :: proc(shader: Shader, attribName: cstring) -> ShaderLocationIndex --- // Get shader attribute location
- SetShaderValue :: proc(shader: Shader, locIndex: ShaderLocationIndex, value: rawptr, uniformType: ShaderUniformDataType) --- // Set shader uniform value
- SetShaderValueV :: proc(shader: Shader, locIndex: ShaderLocationIndex, value: rawptr, uniformType: ShaderUniformDataType, count: c.int) --- // Set shader uniform value vector
- SetShaderValueMatrix :: proc(shader: Shader, locIndex: ShaderLocationIndex, mat: Matrix) --- // Set shader uniform value (matrix 4x4)
- SetShaderValueTexture :: proc(shader: Shader, locIndex: ShaderLocationIndex, texture: Texture2D) --- // Set shader uniform value for texture (sampler2d)
+ GetShaderLocation :: proc(shader: Shader, uniformName: cstring) -> c.int --- // Get shader uniform location
+ GetShaderLocationAttrib :: proc(shader: Shader, attribName: cstring) -> c.int --- // Get shader attribute location
+
+ // We use #any_int here so we can pass ShaderLocationIndex
+ SetShaderValue :: proc(shader: Shader, #any_int locIndex: c.int, value: rawptr, uniformType: ShaderUniformDataType) --- // Set shader uniform value
+ SetShaderValueV :: proc(shader: Shader, #any_int locIndex: c.int, value: rawptr, uniformType: ShaderUniformDataType, count: c.int) --- // Set shader uniform value vector
+ SetShaderValueMatrix :: proc(shader: Shader, #any_int locIndex: c.int, mat: Matrix) --- // Set shader uniform value (matrix 4x4)
+ SetShaderValueTexture :: proc(shader: Shader, #any_int locIndex: c.int, texture: Texture2D) --- // Set shader uniform value for texture (sampler2d)
UnloadShader :: proc(shader: Shader) --- // Unload shader from GPU memory (VRAM)
// Screen-space-related functions
diff --git a/vendor/raylib/raymath.odin b/vendor/raylib/raymath.odin
index 9682ffe4f..eef5c2fcd 100644
--- a/vendor/raylib/raymath.odin
+++ b/vendor/raylib/raymath.odin
@@ -668,7 +668,7 @@ MatrixLookAt :: proc "c" (eye, target, up: Vector3) -> Matrix {
// Get float array of matrix data
@(require_results)
MatrixToFloatV :: proc "c" (mat: Matrix) -> [16]f32 {
- return transmute([16]f32)mat
+ return transmute([16]f32)linalg.transpose(mat)
}
diff --git a/vendor/raylib/rlgl.odin b/vendor/raylib/rlgl.odin
deleted file mode 100644
index 97ab0fd07..000000000
--- a/vendor/raylib/rlgl.odin
+++ /dev/null
@@ -1,564 +0,0 @@
-/**********************************************************************************************
-*
-* rlgl v5.0 - A multi-OpenGL abstraction layer with an immediate-mode style API
-*
-* DESCRIPTION:
-* An abstraction layer for multiple OpenGL versions (1.1, 2.1, 3.3 Core, 4.3 Core, ES 2.0)
-* that provides a pseudo-OpenGL 1.1 immediate-mode style API (rlVertex, rlTranslate, rlRotate...)
-*
-* ADDITIONAL NOTES:
-* When choosing an OpenGL backend different than OpenGL 1.1, some internal buffer are
-* initialized on rlglInit() to accumulate vertex data.
-*
-* When an internal state change is required all the stored vertex data is renderer in batch,
-* additionally, rlDrawRenderBatchActive() could be called to force flushing of the batch.
-*
-* Some resources are also loaded for convenience, here the complete list:
-* - Default batch (RLGL.defaultBatch): RenderBatch system to accumulate vertex data
-* - Default texture (RLGL.defaultTextureId): 1x1 white pixel R8G8B8A8
-* - Default shader (RLGL.State.defaultShaderId, RLGL.State.defaultShaderLocs)
-*
-* Internal buffer (and resources) must be manually unloaded calling rlglClose().
-*
-* CONFIGURATION:
-* #define GRAPHICS_API_OPENGL_11
-* #define GRAPHICS_API_OPENGL_21
-* #define GRAPHICS_API_OPENGL_33
-* #define GRAPHICS_API_OPENGL_43
-* #define GRAPHICS_API_OPENGL_ES2
-* #define GRAPHICS_API_OPENGL_ES3
-* Use selected OpenGL graphics backend, should be supported by platform
-* Those preprocessor defines are only used on rlgl module, if OpenGL version is
-* required by any other module, use rlGetVersion() to check it
-*
-* #define RLGL_IMPLEMENTATION
-* Generates the implementation of the library into the included file.
-* If not defined, the library is in header only mode and can be included in other headers
-* or source files without problems. But only ONE file should hold the implementation.
-*
-* #define RLGL_RENDER_TEXTURES_HINT
-* Enable framebuffer objects (fbo) support (enabled by default)
-* Some GPUs could not support them despite the OpenGL version
-*
-* #define RLGL_SHOW_GL_DETAILS_INFO
-* Show OpenGL extensions and capabilities detailed logs on init
-*
-* #define RLGL_ENABLE_OPENGL_DEBUG_CONTEXT
-* Enable debug context (only available on OpenGL 4.3)
-*
-* rlgl capabilities could be customized just defining some internal
-* values before library inclusion (default values listed):
-*
-* #define RL_DEFAULT_BATCH_BUFFER_ELEMENTS 8192 // Default internal render batch elements limits
-* #define RL_DEFAULT_BATCH_BUFFERS 1 // Default number of batch buffers (multi-buffering)
-* #define RL_DEFAULT_BATCH_DRAWCALLS 256 // Default number of batch draw calls (by state changes: mode, texture)
-* #define RL_DEFAULT_BATCH_MAX_TEXTURE_UNITS 4 // Maximum number of textures units that can be activated on batch drawing (SetShaderValueTexture())
-*
-* #define RL_MAX_MATRIX_STACK_SIZE 32 // Maximum size of internal Matrix stack
-* #define RL_MAX_SHADER_LOCATIONS 32 // Maximum number of shader locations supported
-* #define RL_CULL_DISTANCE_NEAR 0.01 // Default projection matrix near cull distance
-* #define RL_CULL_DISTANCE_FAR 1000.0 // Default projection matrix far cull distance
-*
-* When loading a shader, the following vertex attributes and uniform
-* location names are tried to be set automatically:
-*
-* #define RL_DEFAULT_SHADER_ATTRIB_NAME_POSITION "vertexPosition" // Bound by default to shader location: 0
-* #define RL_DEFAULT_SHADER_ATTRIB_NAME_TEXCOORD "vertexTexCoord" // Bound by default to shader location: 1
-* #define RL_DEFAULT_SHADER_ATTRIB_NAME_NORMAL "vertexNormal" // Bound by default to shader location: 2
-* #define RL_DEFAULT_SHADER_ATTRIB_NAME_COLOR "vertexColor" // Bound by default to shader location: 3
-* #define RL_DEFAULT_SHADER_ATTRIB_NAME_TANGENT "vertexTangent" // Bound by default to shader location: 4
-* #define RL_DEFAULT_SHADER_ATTRIB_NAME_TEXCOORD2 "vertexTexCoord2" // Bound by default to shader location: 5
-* #define RL_DEFAULT_SHADER_UNIFORM_NAME_MVP "mvp" // model-view-projection matrix
-* #define RL_DEFAULT_SHADER_UNIFORM_NAME_VIEW "matView" // view matrix
-* #define RL_DEFAULT_SHADER_UNIFORM_NAME_PROJECTION "matProjection" // projection matrix
-* #define RL_DEFAULT_SHADER_UNIFORM_NAME_MODEL "matModel" // model matrix
-* #define RL_DEFAULT_SHADER_UNIFORM_NAME_NORMAL "matNormal" // normal matrix (transpose(inverse(matModelView))
-* #define RL_DEFAULT_SHADER_UNIFORM_NAME_COLOR "colDiffuse" // color diffuse (base tint color, multiplied by texture color)
-* #define RL_DEFAULT_SHADER_SAMPLER2D_NAME_TEXTURE0 "texture0" // texture0 (texture slot active 0)
-* #define RL_DEFAULT_SHADER_SAMPLER2D_NAME_TEXTURE1 "texture1" // texture1 (texture slot active 1)
-* #define RL_DEFAULT_SHADER_SAMPLER2D_NAME_TEXTURE2 "texture2" // texture2 (texture slot active 2)
-*
-* DEPENDENCIES:
-* - OpenGL libraries (depending on platform and OpenGL version selected)
-* - GLAD OpenGL extensions loading library (only for OpenGL 3.3 Core, 4.3 Core)
-*
-*
-* LICENSE: zlib/libpng
-*
-* Copyright (c) 2014-2023 Ramon Santamaria (@raysan5)
-*
-* This software is provided "as-is", without any express or implied warranty. In no event
-* will the authors be held liable for any damages arising from the use of this software.
-*
-* Permission is granted to anyone to use this software for any purpose, including commercial
-* applications, and to alter it and redistribute it freely, subject to the following restrictions:
-*
-* 1. The origin of this software must not be misrepresented; you must not claim that you
-* wrote the original software. If you use this software in a product, an acknowledgment
-* in the product documentation would be appreciated but is not required.
-*
-* 2. Altered source versions must be plainly marked as such, and must not be misrepresented
-* as being the original software.
-*
-* 3. This notice may not be removed or altered from any source distribution.
-*
-**********************************************************************************************/
-
-
-package raylib
-
-import "core:c"
-
-RLGL_VERSION :: "4.5"
-
-when ODIN_OS == .Windows {
- foreign import lib {
- "windows/raylib.lib",
- "system:Winmm.lib",
- "system:Gdi32.lib",
- "system:User32.lib",
- "system:Shell32.lib",
- }
-} else when ODIN_OS == .Linux {
- foreign import lib "linux/libraylib.a"
-} else when ODIN_OS == .Darwin {
- when ODIN_ARCH == .arm64 {
- foreign import lib {
- "macos-arm64/libraylib.a",
- "system:Cocoa.framework",
- "system:OpenGL.framework",
- "system:IOKit.framework",
- }
- } else {
- foreign import lib {
- "macos/libraylib.a",
- "system:Cocoa.framework",
- "system:OpenGL.framework",
- "system:IOKit.framework",
- }
- }
-} else {
- foreign import lib "system:raylib"
-}
-
-RL_GRAPHICS_API_OPENGL_11 :: false
-RL_GRAPHICS_API_OPENGL_21 :: true
-RL_GRAPHICS_API_OPENGL_33 :: RL_GRAPHICS_API_OPENGL_21 // default currently
-RL_GRAPHICS_API_OPENGL_ES2 :: false
-RL_GRAPHICS_API_OPENGL_43 :: false
-RL_GRAPHICS_API_OPENGL_ES3 :: false
-
-when RL_GRAPHICS_API_OPENGL_ES3 {
- RL_GRAPHICS_API_OPENGL_ES2 :: true
-}
-
-when !RL_GRAPHICS_API_OPENGL_ES2 {
- // This is the maximum amount of elements (quads) per batch
- // NOTE: Be careful with text, every letter maps to a quad
- RL_DEFAULT_BATCH_BUFFER_ELEMENTS :: 8192
-} else {
- // We reduce memory sizes for embedded systems (RPI and HTML5)
- // NOTE: On HTML5 (emscripten) this is allocated on heap,
- // by default it's only 16MB!...just take care...
- RL_DEFAULT_BATCH_BUFFER_ELEMENTS :: 2048
-}
-
-RL_DEFAULT_BATCH_BUFFERS :: 1 // Default number of batch buffers (multi-buffering)
-RL_DEFAULT_BATCH_DRAWCALLS :: 256 // Default number of batch draw calls (by state changes: mode, texture)
-RL_DEFAULT_BATCH_MAX_TEXTURE_UNITS :: 4 // Maximum number of additional textures that can be activated on batch drawing (SetShaderValueTexture())
-
-// Internal Matrix stack
-RL_MAX_MATRIX_STACK_SIZE :: 32 // Maximum size of Matrix stack
-
-// Shader limits
-RL_MAX_SHADER_LOCATIONS :: 32 // Maximum number of shader locations supported
-
-// Projection matrix culling
-RL_CULL_DISTANCE_NEAR :: 0.01 // Default near cull distance
-RL_CULL_DISTANCE_FAR :: 1000.0 // Default far cull distance
-
-// Texture parameters (equivalent to OpenGL defines)
-RL_TEXTURE_WRAP_S :: 0x2802 // GL_TEXTURE_WRAP_S
-RL_TEXTURE_WRAP_T :: 0x2803 // GL_TEXTURE_WRAP_T
-RL_TEXTURE_MAG_FILTER :: 0x2800 // GL_TEXTURE_MAG_FILTER
-RL_TEXTURE_MIN_FILTER :: 0x2801 // GL_TEXTURE_MIN_FILTER
-
-RL_TEXTURE_FILTER_NEAREST :: 0x2600 // GL_NEAREST
-RL_TEXTURE_FILTER_LINEAR :: 0x2601 // GL_LINEAR
-RL_TEXTURE_FILTER_MIP_NEAREST :: 0x2700 // GL_NEAREST_MIPMAP_NEAREST
-RL_TEXTURE_FILTER_NEAREST_MIP_LINEAR :: 0x2702 // GL_NEAREST_MIPMAP_LINEAR
-RL_TEXTURE_FILTER_LINEAR_MIP_NEAREST :: 0x2701 // GL_LINEAR_MIPMAP_NEAREST
-RL_TEXTURE_FILTER_MIP_LINEAR :: 0x2703 // GL_LINEAR_MIPMAP_LINEAR
-RL_TEXTURE_FILTER_ANISOTROPIC :: 0x3000 // Anisotropic filter (custom identifier)
-
-RL_TEXTURE_WRAP_REPEAT :: 0x2901 // GL_REPEAT
-RL_TEXTURE_WRAP_CLAMP :: 0x812F // GL_CLAMP_TO_EDGE
-RL_TEXTURE_WRAP_MIRROR_REPEAT :: 0x8370 // GL_MIRRORED_REPEAT
-RL_TEXTURE_WRAP_MIRROR_CLAMP :: 0x8742 // GL_MIRROR_CLAMP_EXT
-
-// Matrix modes (equivalent to OpenGL)
-RL_MODELVIEW :: 0x1700 // GL_MODELVIEW
-RL_PROJECTION :: 0x1701 // GL_PROJECTION
-RL_TEXTURE :: 0x1702 // GL_TEXTURE
-
-// Primitive assembly draw modes
-RL_LINES :: 0x0001 // GL_LINES
-RL_TRIANGLES :: 0x0004 // GL_TRIANGLES
-RL_QUADS :: 0x0007 // GL_QUADS
-
-// GL equivalent data types
-RL_UNSIGNED_BYTE :: 0x1401 // GL_UNSIGNED_BYTE
-RL_FLOAT :: 0x1406 // GL_FLOAT
-
-// Buffer usage hint
-RL_STREAM_DRAW :: 0x88E0 // GL_STREAM_DRAW
-RL_STREAM_READ :: 0x88E1 // GL_STREAM_READ
-RL_STREAM_COPY :: 0x88E2 // GL_STREAM_COPY
-RL_STATIC_DRAW :: 0x88E4 // GL_STATIC_DRAW
-RL_STATIC_READ :: 0x88E5 // GL_STATIC_READ
-RL_STATIC_COPY :: 0x88E6 // GL_STATIC_COPY
-RL_DYNAMIC_DRAW :: 0x88E8 // GL_DYNAMIC_DRAW
-RL_DYNAMIC_READ :: 0x88E9 // GL_DYNAMIC_READ
-RL_DYNAMIC_COPY :: 0x88EA // GL_DYNAMIC_COPY
-
-// GL Shader type
-RL_FRAGMENT_SHADER :: 0x8B30 // GL_FRAGMENT_SHADER
-RL_VERTEX_SHADER :: 0x8B31 // GL_VERTEX_SHADER
-RL_COMPUTE_SHADER :: 0x91B9 // GL_COMPUTE_SHADER
-
-// GL blending factors
-RL_ZERO :: 0 // GL_ZERO
-RL_ONE :: 1 // GL_ONE
-RL_SRC_COLOR :: 0x0300 // GL_SRC_COLOR
-RL_ONE_MINUS_SRC_COLOR :: 0x0301 // GL_ONE_MINUS_SRC_COLOR
-RL_SRC_ALPHA :: 0x0302 // GL_SRC_ALPHA
-RL_ONE_MINUS_SRC_ALPHA :: 0x0303 // GL_ONE_MINUS_SRC_ALPHA
-RL_DST_ALPHA :: 0x0304 // GL_DST_ALPHA
-RL_ONE_MINUS_DST_ALPHA :: 0x0305 // GL_ONE_MINUS_DST_ALPHA
-RL_DST_COLOR :: 0x0306 // GL_DST_COLOR
-RL_ONE_MINUS_DST_COLOR :: 0x0307 // GL_ONE_MINUS_DST_COLOR
-RL_SRC_ALPHA_SATURATE :: 0x0308 // GL_SRC_ALPHA_SATURATE
-RL_CONSTANT_COLOR :: 0x8001 // GL_CONSTANT_COLOR
-RL_ONE_MINUS_CONSTANT_COLOR :: 0x8002 // GL_ONE_MINUS_CONSTANT_COLOR
-RL_CONSTANT_ALPHA :: 0x8003 // GL_CONSTANT_ALPHA
-RL_ONE_MINUS_CONSTANT_ALPHA :: 0x8004 // GL_ONE_MINUS_CONSTANT_ALPHA
-
-// GL blending functions/equations
-RL_FUNC_ADD :: 0x8006 // GL_FUNC_ADD
-RL_MIN :: 0x8007 // GL_MIN
-RL_MAX :: 0x8008 // GL_MAX
-RL_FUNC_SUBTRACT :: 0x800A // GL_FUNC_SUBTRACT
-RL_FUNC_REVERSE_SUBTRACT :: 0x800B // GL_FUNC_REVERSE_SUBTRACT
-RL_BLEND_EQUATION :: 0x8009 // GL_BLEND_EQUATION
-RL_BLEND_EQUATION_RGB :: 0x8009 // GL_BLEND_EQUATION_RGB // (Same as BLEND_EQUATION)
-RL_BLEND_EQUATION_ALPHA :: 0x883D // GL_BLEND_EQUATION_ALPHA
-RL_BLEND_DST_RGB :: 0x80C8 // GL_BLEND_DST_RGB
-RL_BLEND_SRC_RGB :: 0x80C9 // GL_BLEND_SRC_RGB
-RL_BLEND_DST_ALPHA :: 0x80CA // GL_BLEND_DST_ALPHA
-RL_BLEND_SRC_ALPHA :: 0x80CB // GL_BLEND_SRC_ALPHA
-RL_BLEND_COLOR :: 0x8005 // GL_BLEND_COLOR
-
-
-//----------------------------------------------------------------------------------
-// Types and Structures Definition
-//----------------------------------------------------------------------------------
-
-
-VertexBufferIndexType :: c.ushort when RL_GRAPHICS_API_OPENGL_ES2 else c.uint
-
-// Dynamic vertex buffers (position + texcoords + colors + indices arrays)
-VertexBuffer :: struct {
- elementCount: c.int, // Number of elements in the buffer (QUADS)
-
- vertices: [^]f32, // Vertex position (XYZ - 3 components per vertex) (shader-location = 0)
- texcoords: [^]f32, // Vertex texture coordinates (UV - 2 components per vertex) (shader-location = 1)
- colors: [^]u8, // Vertex colors (RGBA - 4 components per vertex) (shader-location = 3)
- indices: [^]VertexBufferIndexType, // Vertex indices (in case vertex data comes indexed) (6 indices per quad)
- vaoId: c.uint, // OpenGL Vertex Array Object id
- vboId: [4]c.uint, // OpenGL Vertex Buffer Objects id (4 types of vertex data)
-}
-
-// Draw call type
-// NOTE: Only texture changes register a new draw, other state-change-related elements are not
-// used at this moment (vaoId, shaderId, matrices), raylib just forces a batch draw call if any
-// of those state-change happens (this is done in core module)
-DrawCall :: struct {
- mode: c.int, // Drawing mode: LINES, TRIANGLES, QUADS
- vertexCount: c.int, // Number of vertex of the draw
- vertexAlignment: c.int, // Number of vertex required for index alignment (LINES, TRIANGLES)
- textureId: c.uint, // Texture id to be used on the draw -> Use to create new draw call if changes
-}
-
-// RenderBatch type
-RenderBatch :: struct {
- bufferCount: c.int, // Number of vertex buffers (multi-buffering support)
- currentBuffer: c.int, // Current buffer tracking in case of multi-buffering
- vertexBuffer: [^]VertexBuffer, // Dynamic buffer(s) for vertex data
-
- draws: [^]DrawCall, // Draw calls array, depends on textureId
- drawCounter: c.int, // Draw calls counter
- currentDepth: f32, // Current depth value for next draw
-}
-
-
-// OpenGL version
-GlVersion :: enum c.int {
- OPENGL_11 = 1, // OpenGL 1.1
- OPENGL_21, // OpenGL 2.1 (GLSL 120)
- OPENGL_33, // OpenGL 3.3 (GLSL 330)
- OPENGL_43, // OpenGL 4.3 (using GLSL 330)
- OPENGL_ES_20, // OpenGL ES 2.0 (GLSL 100)
- OPENGL_ES_30, // OpenGL ES 3.0 (GLSL 300 es)
-}
-
-
-// Shader attribute data types
-ShaderAttributeDataType :: enum c.int {
- FLOAT = 0, // Shader attribute type: float
- VEC2, // Shader attribute type: vec2 (2 float)
- VEC3, // Shader attribute type: vec3 (3 float)
- VEC4, // Shader attribute type: vec4 (4 float)
-}
-
-// Framebuffer attachment type
-// NOTE: By default up to 8 color channels defined, but it can be more
-FramebufferAttachType :: enum c.int {
- COLOR_CHANNEL0 = 0, // Framebuffer attachment type: color 0
- COLOR_CHANNEL1 = 1, // Framebuffer attachment type: color 1
- COLOR_CHANNEL2 = 2, // Framebuffer attachment type: color 2
- COLOR_CHANNEL3 = 3, // Framebuffer attachment type: color 3
- COLOR_CHANNEL4 = 4, // Framebuffer attachment type: color 4
- COLOR_CHANNEL5 = 5, // Framebuffer attachment type: color 5
- COLOR_CHANNEL6 = 6, // Framebuffer attachment type: color 6
- COLOR_CHANNEL7 = 7, // Framebuffer attachment type: color 7
- DEPTH = 100, // Framebuffer attachment type: depth
- STENCIL = 200, // Framebuffer attachment type: stencil
-}
-
-// Framebuffer texture attachment type
-FramebufferAttachTextureType :: enum c.int {
- CUBEMAP_POSITIVE_X = 0, // Framebuffer texture attachment type: cubemap, +X side
- CUBEMAP_NEGATIVE_X = 1, // Framebuffer texture attachment type: cubemap, -X side
- CUBEMAP_POSITIVE_Y = 2, // Framebuffer texture attachment type: cubemap, +Y side
- CUBEMAP_NEGATIVE_Y = 3, // Framebuffer texture attachment type: cubemap, -Y side
- CUBEMAP_POSITIVE_Z = 4, // Framebuffer texture attachment type: cubemap, +Z side
- CUBEMAP_NEGATIVE_Z = 5, // Framebuffer texture attachment type: cubemap, -Z side
- TEXTURE2D = 100, // Framebuffer texture attachment type: texture2d
- RENDERBUFFER = 200, // Framebuffer texture attachment type: renderbuffer
-}
-
-CullMode :: enum c.int {
- FRONT = 0,
- BACK,
-}
-
-@(default_calling_convention="c")
-foreign lib {
- //------------------------------------------------------------------------------------
- // Functions Declaration - Matrix operations
- //------------------------------------------------------------------------------------
- rlMatrixMode :: proc(mode: c.int) --- // Choose the current matrix to be transformed
- rlPushMatrix :: proc() --- // Push the current matrix to stack
- rlPopMatrix :: proc() --- // Pop lattest inserted matrix from stack
- rlLoadIdentity :: proc() --- // Reset current matrix to identity matrix
- rlTranslatef :: proc(x, y, z: f32) --- // Multiply the current matrix by a translation matrix
- rlRotatef :: proc(angleDeg: f32, x, y, z: f32) --- // Multiply the current matrix by a rotation matrix
- rlScalef :: proc(x, y, z: f32) --- // Multiply the current matrix by a scaling matrix
- rlMultMatrixf :: proc(matf: [^]f32) --- // Multiply the current matrix by another matrix
- rlFrustum :: proc(left, right, bottom, top, znear, zfar: f64) ---
- rlOrtho :: proc(left, right, bottom, top, znear, zfar: f64) ---
- rlViewport :: proc(x, y, width, height: c.int) --- // Set the viewport area
-
- //------------------------------------------------------------------------------------
- // Functions Declaration - Vertex level operations
- //------------------------------------------------------------------------------------
- rlBegin :: proc(mode: c.int) --- // Initialize drawing mode (how to organize vertex)
- rlEnd :: proc() --- // Finish vertex providing
- rlVertex2i :: proc(x, y: c.int) --- // Define one vertex (position) - 2 int
- rlVertex2f :: proc(x, y: f32) --- // Define one vertex (position) - 2 f32
- rlVertex3f :: proc(x, y, z: f32) --- // Define one vertex (position) - 3 f32
- rlTexCoord2f :: proc(x, y: f32) --- // Define one vertex (texture coordinate) - 2 f32
- rlNormal3f :: proc(x, y, z: f32) --- // Define one vertex (normal) - 3 f32
- rlColor4ub :: proc(r, g, b, a: u8) --- // Define one vertex (color) - 4 byte
- rlColor3f :: proc(x, y, z: f32) --- // Define one vertex (color) - 3 f32
- rlColor4f :: proc(x, y, z, w: f32) --- // Define one vertex (color) - 4 f32
-
- //------------------------------------------------------------------------------------
- // Functions Declaration - OpenGL style functions (common to 1.1, 3.3+, ES2)
- // NOTE: This functions are used to completely abstract raylib code from OpenGL layer,
- // some of them are direct wrappers over OpenGL calls, some others are custom
- //------------------------------------------------------------------------------------
-
- // Vertex buffers state
- rlEnableVertexArray :: proc(vaoId: c.uint) -> bool --- // Enable vertex array (VAO, if supported)
- rlDisableVertexArray :: proc() --- // Disable vertex array (VAO, if supported)
- rlEnableVertexBuffer :: proc(id: c.uint) --- // Enable vertex buffer (VBO)
- rlDisableVertexBuffer :: proc() --- // Disable vertex buffer (VBO)
- rlEnableVertexBufferElement :: proc(id: c.uint) --- // Enable vertex buffer element (VBO element)
- rlDisableVertexBufferElement :: proc() --- // Disable vertex buffer element (VBO element)
- rlEnableVertexAttribute :: proc(index: c.uint) --- // Enable vertex attribute index
- rlDisableVertexAttribute :: proc(index: c.uint) --- // Disable vertex attribute index
- when RL_GRAPHICS_API_OPENGL_11 {
- rlEnableStatePointer :: proc(vertexAttribType: c.int, buffer: rawptr) ---
- rlDisableStatePointer :: proc(vertexAttribType: c.int) ---
- }
-
- // Textures state
- rlActiveTextureSlot :: proc(slot: c.int) --- // Select and active a texture slot
- rlEnableTexture :: proc(id: c.uint) --- // Enable texture
- rlDisableTexture :: proc() --- // Disable texture
- rlEnableTextureCubemap :: proc(id: c.uint) --- // Enable texture cubemap
- rlDisableTextureCubemap :: proc() --- // Disable texture cubemap
- rlTextureParameters :: proc(id: c.uint, param: c.int, value: c.int) --- // Set texture parameters (filter, wrap)
- rlCubemapParameters :: proc(id: i32, param: c.int, value: c.int) --- // Set cubemap parameters (filter, wrap)
-
- // Shader state
- rlEnableShader :: proc(id: c.uint) --- // Enable shader program
- rlDisableShader :: proc() --- // Disable shader program
-
- // Framebuffer state
- rlEnableFramebuffer :: proc(id: c.uint) --- // Enable render texture (fbo)
- rlDisableFramebuffer :: proc() --- // Disable render texture (fbo), return to default framebuffer
- rlActiveDrawBuffers :: proc(count: c.int) --- // Activate multiple draw color buffers
- rlBlitFramebuffer :: proc(srcX, srcY, srcWidth, srcHeight, dstX, dstY, dstWidth, dstHeight, bufferMask: c.int) --- // Blit active framebuffer to main framebuffer
-
- // General render state
- rlDisableColorBlend :: proc() --- // Disable color blending
- rlEnableDepthTest :: proc() --- // Enable depth test
- rlDisableDepthTest :: proc() --- // Disable depth test
- rlEnableDepthMask :: proc() --- // Enable depth write
- rlDisableDepthMask :: proc() --- // Disable depth write
- rlEnableBackfaceCulling :: proc() --- // Enable backface culling
- rlDisableBackfaceCulling :: proc() --- // Disable backface culling
- rlSetCullFace :: proc(mode: CullMode) --- // Set face culling mode
- rlEnableScissorTest :: proc() --- // Enable scissor test
- rlDisableScissorTest :: proc() --- // Disable scissor test
- rlScissor :: proc(x, y, width, height: c.int) --- // Scissor test
- rlEnableWireMode :: proc() --- // Enable wire mode
- rlEnablePointMode :: proc() --- // Enable point mode
- rlDisableWireMode :: proc() --- // Disable wire and point modes
- rlSetLineWidth :: proc(width: f32) --- // Set the line drawing width
- rlGetLineWidth :: proc() -> f32 --- // Get the line drawing width
- rlEnableSmoothLines :: proc() --- // Enable line aliasing
- rlDisableSmoothLines :: proc() --- // Disable line aliasing
- rlEnableStereoRender :: proc() --- // Enable stereo rendering
- rlDisableStereoRender :: proc() --- // Disable stereo rendering
- rlIsStereoRenderEnabled :: proc() -> bool --- // Check if stereo render is enabled
-
-
- rlClearColor :: proc(r, g, b, a: u8) --- // Clear color buffer with color
- rlClearScreenBuffers :: proc() --- // Clear used screen buffers (color and depth)
- rlCheckErrors :: proc() --- // Check and log OpenGL error codes
- rlSetBlendMode :: proc(mode: c.int) --- // Set blending mode
- rlSetBlendFactors :: proc(glSrcFactor, glDstFactor, glEquation: c.int) --- // Set blending mode factor and equation (using OpenGL factors)
- rlSetBlendFactorsSeparate :: proc(glSrcRGB, glDstRGB, glSrcAlpha, glDstAlpha, glEqRGB, glEqAlpha: c.int) --- // Set blending mode factors and equations separately (using OpenGL factors)
-
- //------------------------------------------------------------------------------------
- // Functions Declaration - rlgl functionality
- //------------------------------------------------------------------------------------
- // rlgl initialization functions
- rlglInit :: proc(width, height: c.int) --- // Initialize rlgl (buffers, shaders, textures, states)
- rlglClose :: proc() --- // De-initialize rlgl (buffers, shaders, textures)
- rlLoadExtensions :: proc(loader: rawptr) --- // Load OpenGL extensions (loader function required)
- rlGetVersion :: proc() -> GlVersion --- // Get current OpenGL version
- rlSetFramebufferWidth :: proc(width: c.int) --- // Set current framebuffer width
- rlGetFramebufferWidth :: proc() -> c.int --- // Get default framebuffer width
- rlSetFramebufferHeight :: proc(height: c.int) --- // Set current framebuffer height
- rlGetFramebufferHeight :: proc() -> c.int --- // Get default framebuffer height
-
-
- rlGetTextureIdDefault :: proc() -> c.uint --- // Get default texture id
- rlGetShaderIdDefault :: proc() -> c.uint --- // Get default shader id
- rlGetShaderLocsDefault :: proc() -> [^]c.int --- // Get default shader locations
-
- // Render batch management
- // NOTE: rlgl provides a default render batch to behave like OpenGL 1.1 immediate mode
- // but this render batch API is exposed in case of custom batches are required
- rlLoadRenderBatch :: proc(numBuffers, bufferElements: c.int) -> RenderBatch --- // Load a render batch system
- rlUnloadRenderBatch :: proc(batch: RenderBatch) --- // Unload render batch system
- rlDrawRenderBatch :: proc(batch: ^RenderBatch) --- // Draw render batch data (Update->Draw->Reset)
- rlSetRenderBatchActive :: proc(batch: ^RenderBatch) --- // Set the active render batch for rlgl (NULL for default internal)
- rlDrawRenderBatchActive :: proc() --- // Update and draw internal render batch
- rlCheckRenderBatchLimit :: proc(vCount: c.int) -> c.int --- // Check internal buffer overflow for a given number of vertex
-
- rlSetTexture :: proc(id: c.uint) --- // Set current texture for render batch and check buffers limits
-
- //------------------------------------------------------------------------------------------------------------------------
-
- // Vertex buffers management
- rlLoadVertexArray :: proc() -> c.uint --- // Load vertex array (vao) if supported
- rlLoadVertexBuffer :: proc(buffer: rawptr, size: c.int, is_dynamic: bool) -> c.uint --- // Load a vertex buffer attribute
- rlLoadVertexBufferElement :: proc(buffer: rawptr, size: c.int, is_dynamic: bool) -> c.uint --- // Load a new attributes element buffer
- rlUpdateVertexBuffer :: proc(bufferId: c.uint, data: rawptr, dataSize: c.int, offset: c.int) --- // Update GPU buffer with new data
- rlUpdateVertexBufferElements :: proc(id: c.uint, data: rawptr, dataSize: c.int, offset: c.int) --- // Update vertex buffer elements with new data
- rlUnloadVertexArray :: proc(vaoId: c.uint) ---
- rlUnloadVertexBuffer :: proc(vboId: c.uint) ---
- rlSetVertexAttribute :: proc(index: c.uint, compSize: c.int, type: c.int, normalized: bool, stride: c.int, pointer: rawptr) ---
- rlSetVertexAttributeDivisor :: proc(index: c.uint, divisor: c.int) ---
- rlSetVertexAttributeDefault :: proc(locIndex: c.int, value: rawptr, attribType: c.int, count: c.int) --- // Set vertex attribute default value
- rlDrawVertexArray :: proc(offset: c.int, count: c.int) ---
- rlDrawVertexArrayElements :: proc(offset: c.int, count: c.int, buffer: rawptr) ---
- rlDrawVertexArrayInstanced :: proc(offset: c.int, count: c.int, instances: c.int) ---
- rlDrawVertexArrayElementsInstanced :: proc(offset: c.int, count: c.int, buffer: rawptr, instances: c.int) ---
-
- // Textures management
- rlLoadTexture :: proc(data: rawptr, width, height: c.int, format: c.int, mipmapCount: c.int) -> c.uint --- // Load texture in GPU
- rlLoadTextureDepth :: proc(width, height: c.int, useRenderBuffer: bool) -> c.uint --- // Load depth texture/renderbuffer (to be attached to fbo)
- rlLoadTextureCubemap :: proc(data: rawptr, size: c.int, format: c.int) -> c.uint --- // Load texture cubemap
- rlUpdateTexture :: proc(id: c.uint, offsetX, offsetY: c.int, width, height: c.int, format: c.int, data: rawptr) --- // Update GPU texture with new data
- rlGetGlTextureFormats :: proc(format: c.int, glInternalFormat, glFormat, glType: ^c.uint) --- // Get OpenGL internal formats
- rlGetPixelFormatName :: proc(format: c.uint) -> cstring --- // Get name string for pixel format
- rlUnloadTexture :: proc(id: c.uint) --- // Unload texture from GPU memory
- rlGenTextureMipmaps :: proc(id: c.uint, width, height: c.int, format: c.int, mipmaps: ^c.int) --- // Generate mipmap data for selected texture
- rlReadTexturePixels :: proc(id: c.uint, width, height: c.int, format: c.int) -> rawptr --- // Read texture pixel data
- rlReadScreenPixels :: proc(width, height: c.int) -> [^]byte --- // Read screen pixel data (color buffer)
-
- // Framebuffer management (fbo)
- rlLoadFramebuffer :: proc(width, height: c.int) -> c.uint --- // Load an empty framebuffer
- rlFramebufferAttach :: proc(fboId, texId: c.uint, attachType: c.int, texType: c.int, mipLevel: c.int) --- // Attach texture/renderbuffer to a framebuffer
- rlFramebufferComplete :: proc(id: c.uint) -> bool --- // Verify framebuffer is complete
- rlUnloadFramebuffer :: proc(id: c.uint) --- // Delete framebuffer from GPU
-
- // Shaders management
- rlLoadShaderCode :: proc(vsCode, fsCode: cstring) -> c.uint --- // Load shader from code strings
- rlCompileShader :: proc(shaderCode: cstring, type: c.int) -> c.uint --- // Compile custom shader and return shader id (type: RL_VERTEX_SHADER, RL_FRAGMENT_SHADER, RL_COMPUTE_SHADER)
- rlLoadShaderProgram :: proc(vShaderId, fShaderId: c.uint) -> c.uint --- // Load custom shader program
- rlUnloadShaderProgram :: proc(id: c.uint) --- // Unload shader program
- rlGetLocationUniform :: proc(shaderId: c.uint, uniformName: cstring) -> c.int --- // Get shader location uniform
- rlGetLocationAttrib :: proc(shaderId: c.uint, attribName: cstring) -> c.int --- // Get shader location attribute
- rlSetUniform :: proc(locIndex: c.int, value: rawptr, uniformType: c.int, count: c.int) --- // Set shader value uniform
- rlSetUniformMatrix :: proc(locIndex: c.int, mat: Matrix) --- // Set shader value matrix
- rlSetUniformSampler :: proc(locIndex: c.int, textureId: c.uint) --- // Set shader value sampler
- rlSetShader :: proc(id: c.uint, locs: [^]c.int) --- // Set shader currently active (id and locations)
-
- // Compute shader management
- rlLoadComputeShaderProgram :: proc(shaderId: c.uint) -> c.uint --- // Load compute shader program
- rlComputeShaderDispatch :: proc(groupX, groupY, groupZ: c.uint) --- // Dispatch compute shader (equivalent to *draw* for graphics pipeline)
-
- // Shader buffer storage object management (ssbo)
- rlLoadShaderBuffer :: proc(size: c.uint, data: rawptr, usageHint: c.int) -> c.uint --- // Load shader storage buffer object (SSBO)
- rlUnloadShaderBuffer :: proc(ssboId: c.uint) --- // Unload shader storage buffer object (SSBO)
- rlUpdateShaderBuffer :: proc(id: c.uint, data: rawptr, dataSize: c.uint, offset: c.uint) --- // Update SSBO buffer data
- rlBindShaderBuffer :: proc(id: c.uint, index: c.uint) --- // Bind SSBO buffer
- rlReadShaderBuffer :: proc(id: c.uint, dest: rawptr, count: c.uint, offset: c.uint) --- // Read SSBO buffer data (GPU->CPU)
- rlCopyShaderBuffer :: proc(destId, srcId: c.uint, destOffset, srcOffset: c.uint, count: c.uint) --- // Copy SSBO data between buffers
- rlGetShaderBufferSize :: proc(id: c.uint) -> c.uint --- // Get SSBO buffer size
-
- // Buffer management
- rlBindImageTexture :: proc(id: c.uint, index: c.uint, format: c.int, readonly: bool) --- // Bind image texture
-
- // Matrix state management
- rlGetMatrixModelview :: proc() -> Matrix --- // Get internal modelview matrix
- rlGetMatrixProjection :: proc() -> Matrix --- // Get internal projection matrix
- rlGetMatrixTransform :: proc() -> Matrix --- // Get internal accumulated transform matrix
- rlGetMatrixProjectionStereo :: proc(eye: c.int) -> Matrix --- // Get internal projection matrix for stereo render (selected eye)
- rlGetMatrixViewOffsetStereo :: proc(eye: c.int) -> Matrix --- // Get internal view offset matrix for stereo render (selected eye)
- rlSetMatrixProjection :: proc(proj: Matrix) --- // Set a custom projection matrix (replaces internal projection matrix)
- rlSetMatrixModelview :: proc(view: Matrix) --- // Set a custom modelview matrix (replaces internal modelview matrix)
- rlSetMatrixProjectionStereo :: proc(right, left: Matrix) --- // Set eyes projection matrices for stereo rendering
- rlSetMatrixViewOffsetStereo :: proc(right, left: Matrix) --- // Set eyes view offsets matrices for stereo rendering
-
- // Quick and dirty cube/quad buffers load->draw->unload
- rlLoadDrawCube :: proc() --- // Load and draw a cube
- rlLoadDrawQuad :: proc() --- // Load and draw a quad
-}
diff --git a/vendor/raylib/rlgl/rlgl.odin b/vendor/raylib/rlgl/rlgl.odin
new file mode 100644
index 000000000..cef31c238
--- /dev/null
+++ b/vendor/raylib/rlgl/rlgl.odin
@@ -0,0 +1,581 @@
+/**********************************************************************************************
+*
+* rlgl v5.0 - A multi-OpenGL abstraction layer with an immediate-mode style API
+*
+* DESCRIPTION:
+* An abstraction layer for multiple OpenGL versions (1.1, 2.1, 3.3 Core, 4.3 Core, ES 2.0)
+* that provides a pseudo-OpenGL 1.1 immediate-mode style API (rlVertex, rlTranslate, rlRotate...)
+*
+* ADDITIONAL NOTES:
+* When choosing an OpenGL backend different than OpenGL 1.1, some internal buffer are
+* initialized on rlglInit() to accumulate vertex data.
+*
+* When an internal state change is required all the stored vertex data is renderer in batch,
+* additionally, rlDrawRenderBatchActive() could be called to force flushing of the batch.
+*
+* Some resources are also loaded for convenience, here the complete list:
+* - Default batch (RLGL.defaultBatch): RenderBatch system to accumulate vertex data
+* - Default texture (RLGL.defaultTextureId): 1x1 white pixel R8G8B8A8
+* - Default shader (RLGL.State.defaultShaderId, RLGL.State.defaultShaderLocs)
+*
+* Internal buffer (and resources) must be manually unloaded calling rlglClose().
+*
+* CONFIGURATION:
+* #define GRAPHICS_API_OPENGL_11
+* #define GRAPHICS_API_OPENGL_21
+* #define GRAPHICS_API_OPENGL_33
+* #define GRAPHICS_API_OPENGL_43
+* #define GRAPHICS_API_OPENGL_ES2
+* #define GRAPHICS_API_OPENGL_ES3
+* Use selected OpenGL graphics backend, should be supported by platform
+* Those preprocessor defines are only used on rlgl module, if OpenGL version is
+* required by any other module, use rlGetVersion() to check it
+*
+* #define RLGL_IMPLEMENTATION
+* Generates the implementation of the library into the included file.
+* If not defined, the library is in header only mode and can be included in other headers
+* or source files without problems. But only ONE file should hold the implementation.
+*
+* #define RLGL_RENDER_TEXTURES_HINT
+* Enable framebuffer objects (fbo) support (enabled by default)
+* Some GPUs could not support them despite the OpenGL version
+*
+* #define RLGL_SHOW_GL_DETAILS_INFO
+* Show OpenGL extensions and capabilities detailed logs on init
+*
+* #define RLGL_ENABLE_OPENGL_DEBUG_CONTEXT
+* Enable debug context (only available on OpenGL 4.3)
+*
+* rlgl capabilities could be customized just defining some internal
+* values before library inclusion (default values listed):
+*
+* #define RL_DEFAULT_BATCH_BUFFER_ELEMENTS 8192 // Default internal render batch elements limits
+* #define RL_DEFAULT_BATCH_BUFFERS 1 // Default number of batch buffers (multi-buffering)
+* #define RL_DEFAULT_BATCH_DRAWCALLS 256 // Default number of batch draw calls (by state changes: mode, texture)
+* #define RL_DEFAULT_BATCH_MAX_TEXTURE_UNITS 4 // Maximum number of textures units that can be activated on batch drawing (SetShaderValueTexture())
+*
+* #define RL_MAX_MATRIX_STACK_SIZE 32 // Maximum size of internal Matrix stack
+* #define RL_MAX_SHADER_LOCATIONS 32 // Maximum number of shader locations supported
+* #define RL_CULL_DISTANCE_NEAR 0.01 // Default projection matrix near cull distance
+* #define RL_CULL_DISTANCE_FAR 1000.0 // Default projection matrix far cull distance
+*
+* When loading a shader, the following vertex attributes and uniform
+* location names are tried to be set automatically:
+*
+* #define RL_DEFAULT_SHADER_ATTRIB_NAME_POSITION "vertexPosition" // Bound by default to shader location: 0
+* #define RL_DEFAULT_SHADER_ATTRIB_NAME_TEXCOORD "vertexTexCoord" // Bound by default to shader location: 1
+* #define RL_DEFAULT_SHADER_ATTRIB_NAME_NORMAL "vertexNormal" // Bound by default to shader location: 2
+* #define RL_DEFAULT_SHADER_ATTRIB_NAME_COLOR "vertexColor" // Bound by default to shader location: 3
+* #define RL_DEFAULT_SHADER_ATTRIB_NAME_TANGENT "vertexTangent" // Bound by default to shader location: 4
+* #define RL_DEFAULT_SHADER_ATTRIB_NAME_TEXCOORD2 "vertexTexCoord2" // Bound by default to shader location: 5
+* #define RL_DEFAULT_SHADER_UNIFORM_NAME_MVP "mvp" // model-view-projection matrix
+* #define RL_DEFAULT_SHADER_UNIFORM_NAME_VIEW "matView" // view matrix
+* #define RL_DEFAULT_SHADER_UNIFORM_NAME_PROJECTION "matProjection" // projection matrix
+* #define RL_DEFAULT_SHADER_UNIFORM_NAME_MODEL "matModel" // model matrix
+* #define RL_DEFAULT_SHADER_UNIFORM_NAME_NORMAL "matNormal" // normal matrix (transpose(inverse(matModelView))
+* #define RL_DEFAULT_SHADER_UNIFORM_NAME_COLOR "colDiffuse" // color diffuse (base tint color, multiplied by texture color)
+* #define RL_DEFAULT_SHADER_SAMPLER2D_NAME_TEXTURE0 "texture0" // texture0 (texture slot active 0)
+* #define RL_DEFAULT_SHADER_SAMPLER2D_NAME_TEXTURE1 "texture1" // texture1 (texture slot active 1)
+* #define RL_DEFAULT_SHADER_SAMPLER2D_NAME_TEXTURE2 "texture2" // texture2 (texture slot active 2)
+*
+* DEPENDENCIES:
+* - OpenGL libraries (depending on platform and OpenGL version selected)
+* - GLAD OpenGL extensions loading library (only for OpenGL 3.3 Core, 4.3 Core)
+*
+*
+* LICENSE: zlib/libpng
+*
+* Copyright (c) 2014-2023 Ramon Santamaria (@raysan5)
+*
+* This software is provided "as-is", without any express or implied warranty. In no event
+* will the authors be held liable for any damages arising from the use of this software.
+*
+* Permission is granted to anyone to use this software for any purpose, including commercial
+* applications, and to alter it and redistribute it freely, subject to the following restrictions:
+*
+* 1. The origin of this software must not be misrepresented; you must not claim that you
+* wrote the original software. If you use this software in a product, an acknowledgment
+* in the product documentation would be appreciated but is not required.
+*
+* 2. Altered source versions must be plainly marked as such, and must not be misrepresented
+* as being the original software.
+*
+* 3. This notice may not be removed or altered from any source distribution.
+*
+**********************************************************************************************/
+
+
+package rlgl
+
+import "core:c"
+import rl "../."
+
+VERSION :: "5.0"
+
+RAYLIB_SHARED :: #config(RAYLIB_SHARED, false)
+
+// Note: We pull in the full raylib library. If you want a truly stand-alone rlgl, then:
+// - Compile a separate rlgl library and use that in the foreign import blocks below.
+// - Remove the `import rl "../."` line
+// - Copy the code from raylib.odin for any types we alias from that package (see PixelFormat etc)
+
+when ODIN_OS == .Windows {
+ @(extra_linker_flags="/NODEFAULTLIB:" + ("msvcrt" when RAYLIB_SHARED else "libcmt"))
+ foreign import lib {
+ "../windows/raylibdll.lib" when RAYLIB_SHARED else "../windows/raylib.lib" ,
+ "system:Winmm.lib",
+ "system:Gdi32.lib",
+ "system:User32.lib",
+ "system:Shell32.lib",
+ }
+} else when ODIN_OS == .Linux {
+ foreign import lib {
+ // Note(bumbread): I'm not sure why in `linux/` folder there are
+ // multiple copies of raylib.so, but since these bindings are for
+ // particular version of the library, I better specify it. Ideally,
+ // though, it's best specified in terms of major (.so.4)
+ "../linux/libraylib.so.500" when RAYLIB_SHARED else "../linux/libraylib.a",
+ "system:dl",
+ "system:pthread",
+ }
+} else when ODIN_OS == .Darwin {
+ foreign import lib {
+ "../macos" +
+ ("-arm64" when ODIN_ARCH == .arm64 else "") +
+ "/libraylib" + (".500.dylib" when RAYLIB_SHARED else ".a"),
+ "system:Cocoa.framework",
+ "system:OpenGL.framework",
+ "system:IOKit.framework",
+ }
+} else {
+ foreign import lib "system:raylib"
+}
+
+GRAPHICS_API_OPENGL_11 :: false
+GRAPHICS_API_OPENGL_21 :: true
+GRAPHICS_API_OPENGL_33 :: GRAPHICS_API_OPENGL_21 // default currently
+GRAPHICS_API_OPENGL_ES2 :: false
+GRAPHICS_API_OPENGL_43 :: false
+GRAPHICS_API_OPENGL_ES3 :: false
+
+when GRAPHICS_API_OPENGL_ES3 {
+ GRAPHICS_API_OPENGL_ES2 :: true
+}
+
+when !GRAPHICS_API_OPENGL_ES2 {
+ // This is the maximum amount of elements (quads) per batch
+ // NOTE: Be careful with text, every letter maps to a quad
+ DEFAULT_BATCH_BUFFER_ELEMENTS :: 8192
+} else {
+ // We reduce memory sizes for embedded systems (RPI and HTML5)
+ // NOTE: On HTML5 (emscripten) this is allocated on heap,
+ // by default it's only 16MB!...just take care...
+ DEFAULT_BATCH_BUFFER_ELEMENTS :: 2048
+}
+
+DEFAULT_BATCH_BUFFERS :: 1 // Default number of batch buffers (multi-buffering)
+DEFAULT_BATCH_DRAWCALLS :: 256 // Default number of batch draw calls (by state changes: mode, texture)
+DEFAULT_BATCH_MAX_TEXTURE_UNITS :: 4 // Maximum number of additional textures that can be activated on batch drawing (SetShaderValueTexture())
+
+// Internal Matrix stack
+MAX_MATRIX_STACK_SIZE :: 32 // Maximum size of Matrix stack
+
+// Shader limits
+MAX_SHADER_LOCATIONS :: 32 // Maximum number of shader locations supported
+
+// Projection matrix culling
+CULL_DISTANCE_NEAR :: 0.01 // Default near cull distance
+CULL_DISTANCE_FAR :: 1000.0 // Default far cull distance
+
+// Texture parameters (equivalent to OpenGL defines)
+TEXTURE_WRAP_S :: 0x2802 // GL_TEXTURE_WRAP_S
+TEXTURE_WRAP_T :: 0x2803 // GL_TEXTURE_WRAP_T
+TEXTURE_MAG_FILTER :: 0x2800 // GL_TEXTURE_MAG_FILTER
+TEXTURE_MIN_FILTER :: 0x2801 // GL_TEXTURE_MIN_FILTER
+
+TEXTURE_FILTER_NEAREST :: 0x2600 // GL_NEAREST
+TEXTURE_FILTER_LINEAR :: 0x2601 // GL_LINEAR
+TEXTURE_FILTER_MIP_NEAREST :: 0x2700 // GL_NEAREST_MIPMAP_NEAREST
+TEXTURE_FILTER_NEAREST_MIP_LINEAR :: 0x2702 // GL_NEAREST_MIPMAP_LINEAR
+TEXTURE_FILTER_LINEAR_MIP_NEAREST :: 0x2701 // GL_LINEAR_MIPMAP_NEAREST
+TEXTURE_FILTER_MIP_LINEAR :: 0x2703 // GL_LINEAR_MIPMAP_LINEAR
+TEXTURE_FILTER_ANISOTROPIC :: 0x3000 // Anisotropic filter (custom identifier)
+
+TEXTURE_WRAP_REPEAT :: 0x2901 // GL_REPEAT
+TEXTURE_WRAP_CLAMP :: 0x812F // GL_CLAMP_TO_EDGE
+TEXTURE_WRAP_MIRROR_REPEAT :: 0x8370 // GL_MIRRORED_REPEAT
+TEXTURE_WRAP_MIRROR_CLAMP :: 0x8742 // GL_MIRROR_CLAMP_EXT
+
+// Matrix modes (equivalent to OpenGL)
+MODELVIEW :: 0x1700 // GL_MODELVIEW
+PROJECTION :: 0x1701 // GL_PROJECTION
+TEXTURE :: 0x1702 // GL_TEXTURE
+
+// Primitive assembly draw modes
+LINES :: 0x0001 // GL_LINES
+TRIANGLES :: 0x0004 // GL_TRIANGLES
+QUADS :: 0x0007 // GL_QUADS
+
+// GL equivalent data types
+UNSIGNED_BYTE :: 0x1401 // GL_UNSIGNED_BYTE
+FLOAT :: 0x1406 // GL_FLOAT
+
+// Buffer usage hint
+STREAM_DRAW :: 0x88E0 // GL_STREAM_DRAW
+STREAM_READ :: 0x88E1 // GL_STREAM_READ
+STREAM_COPY :: 0x88E2 // GL_STREAM_COPY
+STATIC_DRAW :: 0x88E4 // GL_STATIC_DRAW
+STATIC_READ :: 0x88E5 // GL_STATIC_READ
+STATIC_COPY :: 0x88E6 // GL_STATIC_COPY
+DYNAMIC_DRAW :: 0x88E8 // GL_DYNAMIC_DRAW
+DYNAMIC_READ :: 0x88E9 // GL_DYNAMIC_READ
+DYNAMIC_COPY :: 0x88EA // GL_DYNAMIC_COPY
+
+// GL Shader type
+FRAGMENT_SHADER :: 0x8B30 // GL_FRAGMENT_SHADER
+VERTEX_SHADER :: 0x8B31 // GL_VERTEX_SHADER
+COMPUTE_SHADER :: 0x91B9 // GL_COMPUTE_SHADER
+
+// GL blending factors
+ZERO :: 0 // GL_ZERO
+ONE :: 1 // GL_ONE
+SRC_COLOR :: 0x0300 // GL_SRC_COLOR
+ONE_MINUS_SRC_COLOR :: 0x0301 // GL_ONE_MINUS_SRC_COLOR
+SRC_ALPHA :: 0x0302 // GL_SRC_ALPHA
+ONE_MINUS_SRC_ALPHA :: 0x0303 // GL_ONE_MINUS_SRC_ALPHA
+DST_ALPHA :: 0x0304 // GL_DST_ALPHA
+ONE_MINUS_DST_ALPHA :: 0x0305 // GL_ONE_MINUS_DST_ALPHA
+DST_COLOR :: 0x0306 // GL_DST_COLOR
+ONE_MINUS_DST_COLOR :: 0x0307 // GL_ONE_MINUS_DST_COLOR
+SRC_ALPHA_SATURATE :: 0x0308 // GL_SRC_ALPHA_SATURATE
+CONSTANT_COLOR :: 0x8001 // GL_CONSTANT_COLOR
+ONE_MINUS_CONSTANT_COLOR :: 0x8002 // GL_ONE_MINUS_CONSTANT_COLOR
+CONSTANT_ALPHA :: 0x8003 // GL_CONSTANT_ALPHA
+ONE_MINUS_CONSTANT_ALPHA :: 0x8004 // GL_ONE_MINUS_CONSTANT_ALPHA
+
+// GL blending functions/equations
+FUNC_ADD :: 0x8006 // GL_FUNC_ADD
+MIN :: 0x8007 // GL_MIN
+MAX :: 0x8008 // GL_MAX
+FUNC_SUBTRACT :: 0x800A // GL_FUNC_SUBTRACT
+FUNC_REVERSE_SUBTRACT :: 0x800B // GL_FUNC_REVERSE_SUBTRACT
+BLEND_EQUATION :: 0x8009 // GL_BLEND_EQUATION
+BLEND_EQUATION_RGB :: 0x8009 // GL_BLEND_EQUATION_RGB // (Same as BLEND_EQUATION)
+BLEND_EQUATION_ALPHA :: 0x883D // GL_BLEND_EQUATION_ALPHA
+BLEND_DST_RGB :: 0x80C8 // GL_BLEND_DST_RGB
+BLEND_SRC_RGB :: 0x80C9 // GL_BLEND_SRC_RGB
+BLEND_DST_ALPHA :: 0x80CA // GL_BLEND_DST_ALPHA
+BLEND_SRC_ALPHA :: 0x80CB // GL_BLEND_SRC_ALPHA
+BLEND_COLOR :: 0x8005 // GL_BLEND_COLOR
+
+//----------------------------------------------------------------------------------
+// Types and Structures Definition
+//----------------------------------------------------------------------------------
+
+
+VertexBufferIndexType :: c.ushort when GRAPHICS_API_OPENGL_ES2 else c.uint
+
+// Dynamic vertex buffers (position + texcoords + colors + indices arrays)
+VertexBuffer :: struct {
+ elementCount: c.int, // Number of elements in the buffer (QUADS)
+
+ vertices: [^]f32, // Vertex position (XYZ - 3 components per vertex) (shader-location = 0)
+ texcoords: [^]f32, // Vertex texture coordinates (UV - 2 components per vertex) (shader-location = 1)
+ colors: [^]u8, // Vertex colors (RGBA - 4 components per vertex) (shader-location = 3)
+ indices: [^]VertexBufferIndexType, // Vertex indices (in case vertex data comes indexed) (6 indices per quad)
+ vaoId: c.uint, // OpenGL Vertex Array Object id
+ vboId: [4]c.uint, // OpenGL Vertex Buffer Objects id (4 types of vertex data)
+}
+
+// Draw call type
+// NOTE: Only texture changes register a new draw, other state-change-related elements are not
+// used at this moment (vaoId, shaderId, matrices), raylib just forces a batch draw call if any
+// of those state-change happens (this is done in core module)
+DrawCall :: struct {
+ mode: c.int, // Drawing mode: LINES, TRIANGLES, QUADS
+ vertexCount: c.int, // Number of vertex of the draw
+ vertexAlignment: c.int, // Number of vertex required for index alignment (LINES, TRIANGLES)
+ textureId: c.uint, // Texture id to be used on the draw -> Use to create new draw call if changes
+}
+
+// RenderBatch type
+RenderBatch :: struct {
+ bufferCount: c.int, // Number of vertex buffers (multi-buffering support)
+ currentBuffer: c.int, // Current buffer tracking in case of multi-buffering
+ vertexBuffer: [^]VertexBuffer, // Dynamic buffer(s) for vertex data
+
+ draws: [^]DrawCall, // Draw calls array, depends on textureId
+ drawCounter: c.int, // Draw calls counter
+ currentDepth: f32, // Current depth value for next draw
+}
+
+// OpenGL version
+GlVersion :: enum c.int {
+ OPENGL_11 = 1, // OpenGL 1.1
+ OPENGL_21, // OpenGL 2.1 (GLSL 120)
+ OPENGL_33, // OpenGL 3.3 (GLSL 330)
+ OPENGL_43, // OpenGL 4.3 (using GLSL 330)
+ OPENGL_ES_20, // OpenGL ES 2.0 (GLSL 100)
+ OPENGL_ES_30, // OpenGL ES 3.0 (GLSL 300 es)
+}
+
+PixelFormat :: rl.PixelFormat
+TextureFilter :: rl.TextureFilter
+BlendMode :: rl.BlendMode
+ShaderLocationIndex :: rl.ShaderLocationIndex
+ShaderUniformDataType :: rl.ShaderUniformDataType
+
+// Shader attribute data types
+ShaderAttributeDataType :: enum c.int {
+ FLOAT = 0, // Shader attribute type: float
+ VEC2, // Shader attribute type: vec2 (2 float)
+ VEC3, // Shader attribute type: vec3 (3 float)
+ VEC4, // Shader attribute type: vec4 (4 float)
+}
+
+// Framebuffer attachment type
+// NOTE: By default up to 8 color channels defined, but it can be more
+FramebufferAttachType :: enum c.int {
+ COLOR_CHANNEL0 = 0, // Framebuffer attachment type: color 0
+ COLOR_CHANNEL1 = 1, // Framebuffer attachment type: color 1
+ COLOR_CHANNEL2 = 2, // Framebuffer attachment type: color 2
+ COLOR_CHANNEL3 = 3, // Framebuffer attachment type: color 3
+ COLOR_CHANNEL4 = 4, // Framebuffer attachment type: color 4
+ COLOR_CHANNEL5 = 5, // Framebuffer attachment type: color 5
+ COLOR_CHANNEL6 = 6, // Framebuffer attachment type: color 6
+ COLOR_CHANNEL7 = 7, // Framebuffer attachment type: color 7
+ DEPTH = 100, // Framebuffer attachment type: depth
+ STENCIL = 200, // Framebuffer attachment type: stencil
+}
+
+// Framebuffer texture attachment type
+FramebufferAttachTextureType :: enum c.int {
+ CUBEMAP_POSITIVE_X = 0, // Framebuffer texture attachment type: cubemap, +X side
+ CUBEMAP_NEGATIVE_X = 1, // Framebuffer texture attachment type: cubemap, -X side
+ CUBEMAP_POSITIVE_Y = 2, // Framebuffer texture attachment type: cubemap, +Y side
+ CUBEMAP_NEGATIVE_Y = 3, // Framebuffer texture attachment type: cubemap, -Y side
+ CUBEMAP_POSITIVE_Z = 4, // Framebuffer texture attachment type: cubemap, +Z side
+ CUBEMAP_NEGATIVE_Z = 5, // Framebuffer texture attachment type: cubemap, -Z side
+ TEXTURE2D = 100, // Framebuffer texture attachment type: texture2d
+ RENDERBUFFER = 200, // Framebuffer texture attachment type: renderbuffer
+}
+
+CullMode :: enum c.int {
+ FRONT = 0,
+ BACK,
+}
+
+Matrix :: rl.Matrix
+
+@(default_calling_convention="c", link_prefix="rl")
+foreign lib {
+ //------------------------------------------------------------------------------------
+ // Functions Declaration - Matrix operations
+ //------------------------------------------------------------------------------------
+ MatrixMode :: proc(mode: c.int) --- // Choose the current matrix to be transformed
+ PushMatrix :: proc() --- // Push the current matrix to stack
+ PopMatrix :: proc() --- // Pop lattest inserted matrix from stack
+ LoadIdentity :: proc() --- // Reset current matrix to identity matrix
+ Translatef :: proc(x, y, z: f32) --- // Multiply the current matrix by a translation matrix
+ Rotatef :: proc(angleDeg: f32, x, y, z: f32) --- // Multiply the current matrix by a rotation matrix
+ Scalef :: proc(x, y, z: f32) --- // Multiply the current matrix by a scaling matrix
+ MultMatrixf :: proc(matf: [^]f32) --- // Multiply the current matrix by another matrix
+ Frustum :: proc(left, right, bottom, top, znear, zfar: f64) ---
+ Ortho :: proc(left, right, bottom, top, znear, zfar: f64) ---
+ Viewport :: proc(x, y, width, height: c.int) --- // Set the viewport area
+
+ //------------------------------------------------------------------------------------
+ // Functions Declaration - Vertex level operations
+ //------------------------------------------------------------------------------------
+ Begin :: proc(mode: c.int) --- // Initialize drawing mode (how to organize vertex)
+ End :: proc() --- // Finish vertex providing
+ Vertex2i :: proc(x, y: c.int) --- // Define one vertex (position) - 2 int
+ Vertex2f :: proc(x, y: f32) --- // Define one vertex (position) - 2 f32
+ Vertex3f :: proc(x, y, z: f32) --- // Define one vertex (position) - 3 f32
+ TexCoord2f :: proc(x, y: f32) --- // Define one vertex (texture coordinate) - 2 f32
+ Normal3f :: proc(x, y, z: f32) --- // Define one vertex (normal) - 3 f32
+ Color4ub :: proc(r, g, b, a: u8) --- // Define one vertex (color) - 4 byte
+ Color3f :: proc(x, y, z: f32) --- // Define one vertex (color) - 3 f32
+ Color4f :: proc(x, y, z, w: f32) --- // Define one vertex (color) - 4 f32
+
+ //------------------------------------------------------------------------------------
+ // Functions Declaration - OpenGL style functions (common to 1.1, 3.3+, ES2)
+ // NOTE: This functions are used to completely abstract raylib code from OpenGL layer,
+ // some of them are direct wrappers over OpenGL calls, some others are custom
+ //------------------------------------------------------------------------------------
+
+ // Vertex buffers state
+ EnableVertexArray :: proc(vaoId: c.uint) -> bool --- // Enable vertex array (VAO, if supported)
+ DisableVertexArray :: proc() --- // Disable vertex array (VAO, if supported)
+ EnableVertexBuffer :: proc(id: c.uint) --- // Enable vertex buffer (VBO)
+ DisableVertexBuffer :: proc() --- // Disable vertex buffer (VBO)
+ EnableVertexBufferElement :: proc(id: c.uint) --- // Enable vertex buffer element (VBO element)
+ DisableVertexBufferElement :: proc() --- // Disable vertex buffer element (VBO element)
+ EnableVertexAttribute :: proc(index: c.uint) --- // Enable vertex attribute index
+ DisableVertexAttribute :: proc(index: c.uint) --- // Disable vertex attribute index
+ when GRAPHICS_API_OPENGL_11 {
+ EnableStatePointer :: proc(vertexAttribType: c.int, buffer: rawptr) ---
+ DisableStatePointer :: proc(vertexAttribType: c.int) ---
+ }
+
+ // Textures state
+ ActiveTextureSlot :: proc(slot: c.int) --- // Select and active a texture slot
+ EnableTexture :: proc(id: c.uint) --- // Enable texture
+ DisableTexture :: proc() --- // Disable texture
+ EnableTextureCubemap :: proc(id: c.uint) --- // Enable texture cubemap
+ DisableTextureCubemap :: proc() --- // Disable texture cubemap
+ TextureParameters :: proc(id: c.uint, param: c.int, value: c.int) --- // Set texture parameters (filter, wrap)
+ CubemapParameters :: proc(id: i32, param: c.int, value: c.int) --- // Set cubemap parameters (filter, wrap)
+
+ // Shader state
+ EnableShader :: proc(id: c.uint) --- // Enable shader program
+ DisableShader :: proc() --- // Disable shader program
+
+ // Framebuffer state
+ EnableFramebuffer :: proc(id: c.uint) --- // Enable render texture (fbo)
+ DisableFramebuffer :: proc() --- // Disable render texture (fbo), return to default framebuffer
+ ActiveDrawBuffers :: proc(count: c.int) --- // Activate multiple draw color buffers
+ BlitFramebuffer :: proc(srcX, srcY, srcWidth, srcHeight, dstX, dstY, dstWidth, dstHeight, bufferMask: c.int) --- // Blit active framebuffer to main framebuffer
+
+ // General render state
+ DisableColorBlend :: proc() --- // Disable color blending
+ EnableDepthTest :: proc() --- // Enable depth test
+ DisableDepthTest :: proc() --- // Disable depth test
+ EnableDepthMask :: proc() --- // Enable depth write
+ DisableDepthMask :: proc() --- // Disable depth write
+ EnableBackfaceCulling :: proc() --- // Enable backface culling
+ DisableBackfaceCulling :: proc() --- // Disable backface culling
+ SetCullFace :: proc(mode: CullMode) --- // Set face culling mode
+ EnableScissorTest :: proc() --- // Enable scissor test
+ DisableScissorTest :: proc() --- // Disable scissor test
+ Scissor :: proc(x, y, width, height: c.int) --- // Scissor test
+ EnableWireMode :: proc() --- // Enable wire mode
+ EnablePointMode :: proc() --- // Enable point mode
+ DisableWireMode :: proc() --- // Disable wire and point modes
+ SetLineWidth :: proc(width: f32) --- // Set the line drawing width
+ GetLineWidth :: proc() -> f32 --- // Get the line drawing width
+ EnableSmoothLines :: proc() --- // Enable line aliasing
+ DisableSmoothLines :: proc() --- // Disable line aliasing
+ EnableStereoRender :: proc() --- // Enable stereo rendering
+ DisableStereoRender :: proc() --- // Disable stereo rendering
+ IsStereoRenderEnabled :: proc() -> bool --- // Check if stereo render is enabled
+
+
+ ClearColor :: proc(r, g, b, a: u8) --- // Clear color buffer with color
+ ClearScreenBuffers :: proc() --- // Clear used screen buffers (color and depth)
+ CheckErrors :: proc() --- // Check and log OpenGL error codes
+ SetBlendMode :: proc(mode: c.int) --- // Set blending mode
+ SetBlendFactors :: proc(glSrcFactor, glDstFactor, glEquation: c.int) --- // Set blending mode factor and equation (using OpenGL factors)
+ SetBlendFactorsSeparate :: proc(glSrcRGB, glDstRGB, glSrcAlpha, glDstAlpha, glEqRGB, glEqAlpha: c.int) --- // Set blending mode factors and equations separately (using OpenGL factors)
+
+ //------------------------------------------------------------------------------------
+ // Functions Declaration - rlgl functionality
+ //------------------------------------------------------------------------------------
+ // rlgl initialization functions
+ @(link_prefix="rlgl")
+ Init :: proc(width, height: c.int) --- // Initialize rlgl (buffers, shaders, textures, states)
+ @(link_prefix="rlgl")
+ Close :: proc() --- // De-initialize rlgl (buffers, shaders, textures)
+ LoadExtensions :: proc(loader: rawptr) --- // Load OpenGL extensions (loader function required)
+ GetVersion :: proc() -> GlVersion --- // Get current OpenGL version
+ SetFramebufferWidth :: proc(width: c.int) --- // Set current framebuffer width
+ GetFramebufferWidth :: proc() -> c.int --- // Get default framebuffer width
+ SetFramebufferHeight :: proc(height: c.int) --- // Set current framebuffer height
+ GetFramebufferHeight :: proc() -> c.int --- // Get default framebuffer height
+
+
+ GetTextureIdDefault :: proc() -> c.uint --- // Get default texture id
+ GetShaderIdDefault :: proc() -> c.uint --- // Get default shader id
+ GetShaderLocsDefault :: proc() -> [^]c.int --- // Get default shader locations
+
+ // Render batch management
+ // NOTE: rlgl provides a default render batch to behave like OpenGL 1.1 immediate mode
+ // but this render batch API is exposed in case of custom batches are required
+ LoadRenderBatch :: proc(numBuffers, bufferElements: c.int) -> RenderBatch --- // Load a render batch system
+ UnloadRenderBatch :: proc(batch: RenderBatch) --- // Unload render batch system
+ DrawRenderBatch :: proc(batch: ^RenderBatch) --- // Draw render batch data (Update->Draw->Reset)
+ SetRenderBatchActive :: proc(batch: ^RenderBatch) --- // Set the active render batch for rlgl (NULL for default internal)
+ DrawRenderBatchActive :: proc() --- // Update and draw internal render batch
+ CheckRenderBatchLimit :: proc(vCount: c.int) -> c.int --- // Check internal buffer overflow for a given number of vertex
+
+ SetTexture :: proc(id: c.uint) --- // Set current texture for render batch and check buffers limits
+
+ //------------------------------------------------------------------------------------------------------------------------
+
+ // Vertex buffers management
+ LoadVertexArray :: proc() -> c.uint --- // Load vertex array (vao) if supported
+ LoadVertexBuffer :: proc(buffer: rawptr, size: c.int, is_dynamic: bool) -> c.uint --- // Load a vertex buffer attribute
+ LoadVertexBufferElement :: proc(buffer: rawptr, size: c.int, is_dynamic: bool) -> c.uint --- // Load a new attributes element buffer
+ UpdateVertexBuffer :: proc(bufferId: c.uint, data: rawptr, dataSize: c.int, offset: c.int) --- // Update GPU buffer with new data
+ UpdateVertexBufferElements :: proc(id: c.uint, data: rawptr, dataSize: c.int, offset: c.int) --- // Update vertex buffer elements with new data
+ UnloadVertexArray :: proc(vaoId: c.uint) ---
+ UnloadVertexBuffer :: proc(vboId: c.uint) ---
+ SetVertexAttribute :: proc(index: c.uint, compSize: c.int, type: c.int, normalized: bool, stride: c.int, pointer: rawptr) ---
+ SetVertexAttributeDivisor :: proc(index: c.uint, divisor: c.int) ---
+ SetVertexAttributeDefault :: proc(locIndex: c.int, value: rawptr, attribType: c.int, count: c.int) --- // Set vertex attribute default value
+ DrawVertexArray :: proc(offset: c.int, count: c.int) ---
+ DrawVertexArrayElements :: proc(offset: c.int, count: c.int, buffer: rawptr) ---
+ DrawVertexArrayInstanced :: proc(offset: c.int, count: c.int, instances: c.int) ---
+ DrawVertexArrayElementsInstanced :: proc(offset: c.int, count: c.int, buffer: rawptr, instances: c.int) ---
+
+ // Textures management
+ LoadTexture :: proc(data: rawptr, width, height: c.int, format: c.int, mipmapCount: c.int) -> c.uint --- // Load texture in GPU
+ LoadTextureDepth :: proc(width, height: c.int, useRenderBuffer: bool) -> c.uint --- // Load depth texture/renderbuffer (to be attached to fbo)
+ LoadTextureCubemap :: proc(data: rawptr, size: c.int, format: c.int) -> c.uint --- // Load texture cubemap
+ UpdateTexture :: proc(id: c.uint, offsetX, offsetY: c.int, width, height: c.int, format: c.int, data: rawptr) --- // Update GPU texture with new data
+ GetGlTextureFormats :: proc(format: c.int, glInternalFormat, glFormat, glType: ^c.uint) --- // Get OpenGL internal formats
+ GetPixelFormatName :: proc(format: c.uint) -> cstring --- // Get name string for pixel format
+ UnloadTexture :: proc(id: c.uint) --- // Unload texture from GPU memory
+ GenTextureMipmaps :: proc(id: c.uint, width, height: c.int, format: c.int, mipmaps: ^c.int) --- // Generate mipmap data for selected texture
+ ReadTexturePixels :: proc(id: c.uint, width, height: c.int, format: c.int) -> rawptr --- // Read texture pixel data
+ ReadScreenPixels :: proc(width, height: c.int) -> [^]byte --- // Read screen pixel data (color buffer)
+
+ // Framebuffer management (fbo)
+ LoadFramebuffer :: proc(width, height: c.int) -> c.uint --- // Load an empty framebuffer
+ FramebufferAttach :: proc(fboId, texId: c.uint, attachType: c.int, texType: c.int, mipLevel: c.int) --- // Attach texture/renderbuffer to a framebuffer
+ FramebufferComplete :: proc(id: c.uint) -> bool --- // Verify framebuffer is complete
+ UnloadFramebuffer :: proc(id: c.uint) --- // Delete framebuffer from GPU
+
+ // Shaders management
+ LoadShaderCode :: proc(vsCode, fsCode: cstring) -> c.uint --- // Load shader from code strings
+ CompileShader :: proc(shaderCode: cstring, type: c.int) -> c.uint --- // Compile custom shader and return shader id (type: VERTEX_SHADER, FRAGMENT_SHADER, COMPUTE_SHADER)
+ LoadShaderProgram :: proc(vShaderId, fShaderId: c.uint) -> c.uint --- // Load custom shader program
+ UnloadShaderProgram :: proc(id: c.uint) --- // Unload shader program
+ GetLocationUniform :: proc(shaderId: c.uint, uniformName: cstring) -> c.int --- // Get shader location uniform
+ GetLocationAttrib :: proc(shaderId: c.uint, attribName: cstring) -> c.int --- // Get shader location attribute
+ SetUniform :: proc(locIndex: c.int, value: rawptr, uniformType: c.int, count: c.int) --- // Set shader value uniform
+ SetUniformMatrix :: proc(locIndex: c.int, mat: Matrix) --- // Set shader value matrix
+ SetUniformSampler :: proc(locIndex: c.int, textureId: c.uint) --- // Set shader value sampler
+ SetShader :: proc(id: c.uint, locs: [^]c.int) --- // Set shader currently active (id and locations)
+
+ // Compute shader management
+ LoadComputeShaderProgram :: proc(shaderId: c.uint) -> c.uint --- // Load compute shader program
+ ComputeShaderDispatch :: proc(groupX, groupY, groupZ: c.uint) --- // Dispatch compute shader (equivalent to *draw* for graphics pipeline)
+
+ // Shader buffer storage object management (ssbo)
+ LoadShaderBuffer :: proc(size: c.uint, data: rawptr, usageHint: c.int) -> c.uint --- // Load shader storage buffer object (SSBO)
+ UnloadShaderBuffer :: proc(ssboId: c.uint) --- // Unload shader storage buffer object (SSBO)
+ UpdateShaderBuffer :: proc(id: c.uint, data: rawptr, dataSize: c.uint, offset: c.uint) --- // Update SSBO buffer data
+ BindShaderBuffer :: proc(id: c.uint, index: c.uint) --- // Bind SSBO buffer
+ ReadShaderBuffer :: proc(id: c.uint, dest: rawptr, count: c.uint, offset: c.uint) --- // Read SSBO buffer data (GPU->CPU)
+ CopyShaderBuffer :: proc(destId, srcId: c.uint, destOffset, srcOffset: c.uint, count: c.uint) --- // Copy SSBO data between buffers
+ GetShaderBufferSize :: proc(id: c.uint) -> c.uint --- // Get SSBO buffer size
+
+ // Buffer management
+ BindImageTexture :: proc(id: c.uint, index: c.uint, format: c.int, readonly: bool) --- // Bind image texture
+
+ // Matrix state management
+ GetMatrixModelview :: proc() -> Matrix --- // Get internal modelview matrix
+ GetMatrixProjection :: proc() -> Matrix --- // Get internal projection matrix
+ GetMatrixTransform :: proc() -> Matrix --- // Get internal accumulated transform matrix
+ GetMatrixProjectionStereo :: proc(eye: c.int) -> Matrix --- // Get internal projection matrix for stereo render (selected eye)
+ GetMatrixViewOffsetStereo :: proc(eye: c.int) -> Matrix --- // Get internal view offset matrix for stereo render (selected eye)
+ SetMatrixProjection :: proc(proj: Matrix) --- // Set a custom projection matrix (replaces internal projection matrix)
+ SetMatrixModelview :: proc(view: Matrix) --- // Set a custom modelview matrix (replaces internal modelview matrix)
+ SetMatrixProjectionStereo :: proc(right, left: Matrix) --- // Set eyes projection matrices for stereo rendering
+ SetMatrixViewOffsetStereo :: proc(right, left: Matrix) --- // Set eyes view offsets matrices for stereo rendering
+
+ // Quick and dirty cube/quad buffers load->draw->unload
+ LoadDrawCube :: proc() --- // Load and draw a cube
+ LoadDrawQuad :: proc() --- // Load and draw a quad
+}
diff --git a/vendor/sdl2/sdl_render.odin b/vendor/sdl2/sdl_render.odin
index f948b39b0..cceebf3ac 100644
--- a/vendor/sdl2/sdl_render.odin
+++ b/vendor/sdl2/sdl_render.odin
@@ -76,7 +76,7 @@ foreign lib {
GetRenderer :: proc(window: ^Window) -> ^Renderer ---
GetRendererInfo :: proc(renderer: ^Renderer, info: ^RendererInfo) -> c.int ---
GetRendererOutputSize :: proc(renderer: ^Renderer, w, h: ^c.int) -> c.int ---
- CreateTexture :: proc(renderer: ^Renderer, format: u32, access: TextureAccess, w, h: c.int) -> ^Texture ---
+ CreateTexture :: proc(renderer: ^Renderer, format: PixelFormatEnum, access: TextureAccess, w, h: c.int) -> ^Texture ---
CreateTextureFromSurface :: proc(renderer: ^Renderer, surface: ^Surface) -> ^Texture ---
QueryTexture :: proc(texture: ^Texture, format: ^u32, access, w, h: ^c.int) -> c.int ---
SetTextureColorMod :: proc(texture: ^Texture, r, g, b: u8) -> c.int ---
diff --git a/vendor/stb/image/stb_image.odin b/vendor/stb/image/stb_image.odin
index c7caf801e..828a1c2bd 100644
--- a/vendor/stb/image/stb_image.odin
+++ b/vendor/stb/image/stb_image.odin
@@ -2,13 +2,26 @@ package stb_image
import c "core:c/libc"
-#assert(size_of(c.int) == size_of(b32))
-
- when ODIN_OS == .Windows { foreign import stbi "../lib/stb_image.lib" }
-else when ODIN_OS == .Linux { foreign import stbi "../lib/stb_image.a" }
-else when ODIN_OS == .Darwin { foreign import stbi "../lib/darwin/stb_image.a" }
-else { foreign import stbi "system:stb_image" }
+@(private)
+LIB :: (
+ "../lib/stb_image.lib" when ODIN_OS == .Windows
+ else "../lib/stb_image.a" when ODIN_OS == .Linux
+ else "../lib/darwin/stb_image.a" when ODIN_OS == .Darwin
+ else ""
+)
+
+when LIB != "" {
+ when !#exists(LIB) {
+ // The STB libraries are shipped with the compiler on Windows so a Windows specific message should not be needed.
+ #panic("Could not find the compiled STB libraries, they can be compiled by running `make -C \"" + ODIN_ROOT + "vendor/stb/src\"`")
+ }
+
+ foreign import stbi { LIB }
+} else {
+ foreign import stbi "system:stb_image"
+}
+#assert(size_of(c.int) == size_of(b32))
#assert(size_of(b32) == size_of(c.int))
//
diff --git a/vendor/stb/image/stb_image_resize.odin b/vendor/stb/image/stb_image_resize.odin
index c464964df..d407a1852 100644
--- a/vendor/stb/image/stb_image_resize.odin
+++ b/vendor/stb/image/stb_image_resize.odin
@@ -2,11 +2,24 @@ package stb_image
import c "core:c/libc"
- when ODIN_OS == .Windows { foreign import lib "../lib/stb_image_resize.lib" }
-else when ODIN_OS == .Linux { foreign import lib "../lib/stb_image_resize.a" }
-else when ODIN_OS == .Darwin { foreign import lib "../lib/darwin/stb_image_resize.a" }
-else { foreign import lib "system:stb_image_resize" }
-
+@(private)
+RESIZE_LIB :: (
+ "../lib/stb_image_resize.lib" when ODIN_OS == .Windows
+ else "../lib/stb_image_resize.a" when ODIN_OS == .Linux
+ else "../lib/darwin/stb_image_resize.a" when ODIN_OS == .Darwin
+ else ""
+)
+
+when RESIZE_LIB != "" {
+ when !#exists(RESIZE_LIB) {
+ // The STB libraries are shipped with the compiler on Windows so a Windows specific message should not be needed.
+ #panic("Could not find the compiled STB libraries, they can be compiled by running `make -C \"" + ODIN_ROOT + "vendor/stb/src\"`")
+ }
+
+ foreign import lib { RESIZE_LIB }
+} else {
+ foreign import lib "system:stb_image_resize"
+}
//////////////////////////////////////////////////////////////////////////////
//
diff --git a/vendor/stb/image/stb_image_write.odin b/vendor/stb/image/stb_image_write.odin
index 9ed97eb48..f030f1e28 100644
--- a/vendor/stb/image/stb_image_write.odin
+++ b/vendor/stb/image/stb_image_write.odin
@@ -2,11 +2,24 @@ package stb_image
import c "core:c/libc"
- when ODIN_OS == .Windows { foreign import stbiw "../lib/stb_image_write.lib" }
-else when ODIN_OS == .Linux { foreign import stbiw "../lib/stb_image_write.a" }
-else when ODIN_OS == .Darwin { foreign import stbiw "../lib/darwin/stb_image_write.a" }
-else { foreign import stbiw "system:stb_image_write" }
+@(private)
+WRITE_LIB :: (
+ "../lib/stb_image_write.lib" when ODIN_OS == .Windows
+ else "../lib/stb_image_write.a" when ODIN_OS == .Linux
+ else "../lib/darwin/stb_image_write.a" when ODIN_OS == .Darwin
+ else ""
+)
+when WRITE_LIB != "" {
+ when !#exists(WRITE_LIB) {
+ // The STB libraries are shipped with the compiler on Windows so a Windows specific message should not be needed.
+ #panic("Could not find the compiled STB libraries, they can be compiled by running `make -C \"" + ODIN_ROOT + "vendor/stb/src\"`")
+ }
+
+ foreign import stbiw { WRITE_LIB }
+} else {
+ foreign import stbiw "system:stb_image_write"
+}
write_func :: proc "c" (ctx: rawptr, data: rawptr, size: c.int)
diff --git a/vendor/stb/rect_pack/stb_rect_pack.odin b/vendor/stb/rect_pack/stb_rect_pack.odin
index 3a2544b81..6c0b56378 100644
--- a/vendor/stb/rect_pack/stb_rect_pack.odin
+++ b/vendor/stb/rect_pack/stb_rect_pack.odin
@@ -4,10 +4,23 @@ import "core:c"
#assert(size_of(b32) == size_of(c.int))
- when ODIN_OS == .Windows { foreign import lib "../lib/stb_rect_pack.lib" }
-else when ODIN_OS == .Linux { foreign import lib "../lib/stb_rect_pack.a" }
-else when ODIN_OS == .Darwin { foreign import lib "../lib/darwin/stb_rect_pack.a" }
-else { foreign import lib "system:stb_rect_pack" }
+@(private)
+LIB :: (
+ "../lib/stb_rect_pack.lib" when ODIN_OS == .Windows
+ else "../lib/stb_rect_pack.a" when ODIN_OS == .Linux
+ else "../lib/darwin/stb_rect_pack.a" when ODIN_OS == .Darwin
+ else ""
+)
+
+when LIB != "" {
+ when !#exists(LIB) {
+ #panic("Could not find the compiled STB libraries, they can be compiled by running `make -C \"" + ODIN_ROOT + "vendor/stb/src\"`")
+ }
+
+ foreign import lib { LIB }
+} else {
+ foreign import lib "system:stb_rect_pack"
+}
Coord :: distinct c.int
_MAXVAL :: max(Coord)
diff --git a/vendor/stb/src/Makefile b/vendor/stb/src/Makefile
index a6db3e297..1027cf8d2 100644
--- a/vendor/stb/src/Makefile
+++ b/vendor/stb/src/Makefile
@@ -14,7 +14,7 @@ unix:
$(AR) rcs ../lib/stb_image_resize.a stb_image_resize.o
$(AR) rcs ../lib/stb_truetype.a stb_truetype.o
$(AR) rcs ../lib/stb_rect_pack.a stb_rect_pack.o
- #$(AR) rcs ../lib/stb_vorbis_pack.a stb_vorbis_pack.o
+ $(AR) rcs ../lib/stb_vorbis.a stb_vorbis.o
#$(CC) -fPIC -shared -Wl,-soname=stb_image.so -o ../lib/stb_image.so stb_image.o
#$(CC) -fPIC -shared -Wl,-soname=stb_image_write.so -o ../lib/stb_image_write.so stb_image_write.o
#$(CC) -fPIC -shared -Wl,-soname=stb_image_resize.so -o ../lib/stb_image_resize.so stb_image_resize.o
diff --git a/vendor/stb/truetype/stb_truetype.odin b/vendor/stb/truetype/stb_truetype.odin
index 1600041de..876138c3a 100644
--- a/vendor/stb/truetype/stb_truetype.odin
+++ b/vendor/stb/truetype/stb_truetype.odin
@@ -3,11 +3,23 @@ package stb_truetype
import c "core:c"
import stbrp "vendor:stb/rect_pack"
- when ODIN_OS == .Windows { foreign import stbtt "../lib/stb_truetype.lib" }
-else when ODIN_OS == .Linux { foreign import stbtt "../lib/stb_truetype.a" }
-else when ODIN_OS == .Darwin { foreign import stbtt "../lib/darwin/stb_truetype.a" }
-else { foreign import stbtt "system:stb_truetype" }
-
+@(private)
+LIB :: (
+ "../lib/stb_truetype.lib" when ODIN_OS == .Windows
+ else "../lib/stb_truetype.a" when ODIN_OS == .Linux
+ else "../lib/darwin/stb_truetype.a" when ODIN_OS == .Darwin
+ else ""
+)
+
+when LIB != "" {
+ when !#exists(LIB) {
+ #panic("Could not find the compiled STB libraries, they can be compiled by running `make -C \"" + ODIN_ROOT + "vendor/stb/src\"`")
+ }
+
+ foreign import stbtt { LIB }
+} else {
+ foreign import stbtt "system:stb_truetype"
+}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
diff --git a/vendor/stb/vorbis/stb_vorbis.odin b/vendor/stb/vorbis/stb_vorbis.odin
index 0c887a473..867ffb86d 100644
--- a/vendor/stb/vorbis/stb_vorbis.odin
+++ b/vendor/stb/vorbis/stb_vorbis.odin
@@ -2,13 +2,23 @@ package stb_vorbis
import c "core:c/libc"
-
- when ODIN_OS == .Windows { foreign import lib "../lib/stb_vorbis.lib" }
-else when ODIN_OS == .Linux { foreign import lib "../lib/stb_vorbis.a" }
-else when ODIN_OS == .Darwin { foreign import lib "../lib/darwin/stb_vorbis.a" }
-else { foreign import lib "system:stb_vorbis" }
-
-
+@(private)
+LIB :: (
+ "../lib/stb_vorbis.lib" when ODIN_OS == .Windows
+ else "../lib/stb_vorbis.a" when ODIN_OS == .Linux
+ else "../lib/darwin/stb_vorbis.a" when ODIN_OS == .Darwin
+ else ""
+)
+
+when LIB != "" {
+ when !#exists(LIB) {
+ #panic("Could not find the compiled STB libraries, they can be compiled by running `make -C \"" + ODIN_ROOT + "vendor/stb/src\"`")
+ }
+
+ foreign import lib { LIB }
+} else {
+ foreign import lib "system:stb_vorbis"
+}
/////////// THREAD SAFETY
diff --git a/vendor/wasm/js/runtime.js b/vendor/wasm/js/runtime.js
index 8b4ad157b..5c7f97fae 100644
--- a/vendor/wasm/js/runtime.js
+++ b/vendor/wasm/js/runtime.js
@@ -13,14 +13,18 @@ function stripNewline(str) {
return str.replace(/\n/, ' ')
}
-const INT_SIZE = 4; // NOTE: set to `8` if the target has 64 bit ints (`wasm64p32` for example).
-const STRING_SIZE = 2*INT_SIZE;
-
class WasmMemoryInterface {
constructor() {
this.memory = null;
this.exports = null;
this.listenerMap = {};
+
+ // Size (in bytes) of the integer type, should be 4 on `js_wasm32` and 8 on `js_wasm64p32`
+ this.intSize = 4;
+ }
+
+ setIntSize(size) {
+ this.intSize = size;
}
setMemory(memory) {
@@ -73,21 +77,21 @@ class WasmMemoryInterface {
loadF32(addr) { return this.mem.getFloat32(addr, true); }
loadF64(addr) { return this.mem.getFloat64(addr, true); }
loadInt(addr) {
- if (INT_SIZE == 8) {
+ if (this.intSize == 8) {
return this.loadI64(addr);
- } else if (INT_SIZE == 4) {
+ } else if (this.intSize == 4) {
return this.loadI32(addr);
} else {
- throw new Error('Unhandled `INT_SIZE`, expected `4` or `8`');
+ throw new Error('Unhandled `intSize`, expected `4` or `8`');
}
};
loadUint(addr) {
- if (INT_SIZE == 8) {
+ if (this.intSize == 8) {
return this.loadU64(addr);
- } else if (INT_SIZE == 4) {
+ } else if (this.intSize == 4) {
return this.loadU32(addr);
} else {
- throw new Error('Unhandled `INT_SIZE`, expected `4` or `8`');
+ throw new Error('Unhandled `intSize`, expected `4` or `8`');
}
};
loadPtr(addr) { return this.loadU32(addr); }
@@ -108,31 +112,43 @@ class WasmMemoryInterface {
storeU32(addr, value) { this.mem.setUint32 (addr, value, true); }
storeI32(addr, value) { this.mem.setInt32 (addr, value, true); }
storeU64(addr, value) {
- this.mem.setUint32(addr + 0, value, true);
- this.mem.setUint32(addr + 4, Math.floor(value / 4294967296), true);
+ this.mem.setUint32(addr + 0, Number(value), true);
+
+ let div = 4294967296;
+ if (typeof value == 'bigint') {
+ div = BigInt(div);
+ }
+
+ this.mem.setUint32(addr + 4, Math.floor(Number(value / div)), true);
}
storeI64(addr, value) {
- this.mem.setUint32(addr + 0, value, true);
- this.mem.setInt32 (addr + 4, Math.floor(value / 4294967296), true);
+ this.mem.setUint32(addr + 0, Number(value), true);
+
+ let div = 4294967296;
+ if (typeof value == 'bigint') {
+ div = BigInt(div);
+ }
+
+ this.mem.setInt32(addr + 4, Math.floor(Number(value / div)), true);
}
storeF32(addr, value) { this.mem.setFloat32(addr, value, true); }
storeF64(addr, value) { this.mem.setFloat64(addr, value, true); }
storeInt(addr, value) {
- if (INT_SIZE == 8) {
+ if (this.intSize == 8) {
this.storeI64(addr, value);
- } else if (INT_SIZE == 4) {
+ } else if (this.intSize == 4) {
this.storeI32(addr, value);
} else {
- throw new Error('Unhandled `INT_SIZE`, expected `4` or `8`');
+ throw new Error('Unhandled `intSize`, expected `4` or `8`');
}
}
storeUint(addr, value) {
- if (INT_SIZE == 8) {
+ if (this.intSize == 8) {
this.storeU64(addr, value);
- } else if (INT_SIZE == 4) {
+ } else if (this.intSize == 4) {
this.storeU32(addr, value);
} else {
- throw new Error('Unhandled `INT_SIZE`, expected `4` or `8`');
+ throw new Error('Unhandled `intSize`, expected `4` or `8`');
}
}
@@ -241,10 +257,11 @@ class WebGLInterface {
}
}
getSource(shader, strings_ptr, strings_length) {
+ const stringSize = this.mem.intSize*2;
let source = "";
for (let i = 0; i < strings_length; i++) {
- let ptr = this.mem.loadPtr(strings_ptr + i*STRING_SIZE);
- let len = this.mem.loadPtr(strings_ptr + i*STRING_SIZE + 4);
+ let ptr = this.mem.loadPtr(strings_ptr + i*stringSize);
+ let len = this.mem.loadPtr(strings_ptr + i*stringSize + 4);
let str = this.mem.loadString(ptr, len);
source += str;
}
@@ -1151,10 +1168,11 @@ class WebGLInterface {
},
TransformFeedbackVaryings: (program, varyings_ptr, varyings_len, bufferMode) => {
this.assertWebGL2();
+ const stringSize = this.mem.intSize*2;
let varyings = [];
for (let i = 0; i < varyings_len; i++) {
- let ptr = this.mem.loadPtr(varyings_ptr + i*STRING_SIZE + 0*4);
- let len = this.mem.loadPtr(varyings_ptr + i*STRING_SIZE + 1*4);
+ let ptr = this.mem.loadPtr(varyings_ptr + i*stringSize + 0*4);
+ let len = this.mem.loadPtr(varyings_ptr + i*stringSize + 1*4);
varyings.push(this.mem.loadString(ptr, len));
}
this.ctx.transformFeedbackVaryings(this.programs[program], varyings, bufferMode);
@@ -1393,7 +1411,7 @@ function odinSetupDefaultImports(wasmMemoryInterface, consoleElement) {
},
"odin_dom": {
init_event_raw: (ep) => {
- const W = 4;
+ const W = wasmMemoryInterface.intSize;
let offset = ep;
let off = (amount, alignment) => {
if (alignment === undefined) {
@@ -1407,6 +1425,13 @@ function odinSetupDefaultImports(wasmMemoryInterface, consoleElement) {
return x;
};
+ let align = (alignment) => {
+ const modulo = offset & (alignment-1);
+ if (modulo != 0) {
+ offset += alignment - modulo
+ }
+ };
+
let wmi = wasmMemoryInterface;
let e = event_temp_data.event;
@@ -1427,10 +1452,12 @@ function odinSetupDefaultImports(wasmMemoryInterface, consoleElement) {
wmi.storeU32(off(4), 0);
}
- wmi.storeUint(off(W), event_temp_data.id_ptr);
+ align(W);
+
+ wmi.storeI32(off(W), event_temp_data.id_ptr);
wmi.storeUint(off(W), event_temp_data.id_len);
- wmi.storeUint(off(W), 0); // padding
+ align(8);
wmi.storeF64(off(8), e.timeStamp*1e-3);
wmi.storeU8(off(1), e.eventPhase);
@@ -1442,8 +1469,13 @@ function odinSetupDefaultImports(wasmMemoryInterface, consoleElement) {
wmi.storeU8(off(1), !!e.isComposing);
wmi.storeU8(off(1), !!e.isTrusted);
- let base = off(0, 8);
- if (e instanceof MouseEvent) {
+ align(8);
+ if (e instanceof WheelEvent) {
+ wmi.storeF64(off(8), e.deltaX);
+ wmi.storeF64(off(8), e.deltaY);
+ wmi.storeF64(off(8), e.deltaZ);
+ wmi.storeU32(off(4), e.deltaMode);
+ } else if (e instanceof MouseEvent) {
wmi.storeI64(off(8), e.screenX);
wmi.storeI64(off(8), e.screenY);
wmi.storeI64(off(8), e.clientX);
@@ -1482,11 +1514,6 @@ function odinSetupDefaultImports(wasmMemoryInterface, consoleElement) {
wmi.storeI32(off(W), e.code.length)
wmi.storeString(off(16, 1), e.key);
wmi.storeString(off(16, 1), e.code);
- } else if (e instanceof WheelEvent) {
- wmi.storeF64(off(8), e.deltaX);
- wmi.storeF64(off(8), e.deltaY);
- wmi.storeF64(off(8), e.deltaZ);
- wmi.storeU32(off(4), e.deltaMode);
} else if (e.type === 'scroll') {
wmi.storeF64(off(8), window.scrollX);
wmi.storeF64(off(8), window.scrollY);
@@ -1689,8 +1716,15 @@ function odinSetupDefaultImports(wasmMemoryInterface, consoleElement) {
};
};
-async function runWasm(wasmPath, consoleElement, extraForeignImports) {
- let wasmMemoryInterface = new WasmMemoryInterface();
+/**
+ * @param {string} wasmPath - Path to the WASM module to run
+ * @param {?HTMLPreElement} consoleElement - Optional console/pre element to append output to, in addition to the console
+ * @param {any} extraForeignImports - Imports, in addition to the default runtime to provide the module
+ * @param {?int} intSize - Size (in bytes) of the integer type, should be 4 on `js_wasm32` and 8 on `js_wasm64p32`
+ */
+async function runWasm(wasmPath, consoleElement, extraForeignImports, intSize = 4) {
+ const wasmMemoryInterface = new WasmMemoryInterface();
+ wasmMemoryInterface.setIntSize(intSize);
let imports = odinSetupDefaultImports(wasmMemoryInterface, consoleElement);
let exports = {};
diff --git a/vendor/x11/xlib/xlib_const.odin b/vendor/x11/xlib/xlib_const.odin
index 910940dec..0b87ab9f7 100644
--- a/vendor/x11/xlib/xlib_const.odin
+++ b/vendor/x11/xlib/xlib_const.odin
@@ -17,6 +17,11 @@ AllTemporary :: 0
CurrentTime :: 0
NoSymbol :: 0
+PropModeReplace :: 0
+PropModePrepend :: 1
+PropModeAppend :: 2
+
+XA_ATOM :: Atom(4)
XA_WM_CLASS :: Atom(67)
XA_WM_CLIENT_MACHINE :: Atom(36)
XA_WM_COMMAND :: Atom(34)
diff --git a/vendor/x11/xlib/xlib_procs.odin b/vendor/x11/xlib/xlib_procs.odin
index 5e999519b..20ec5bb39 100644
--- a/vendor/x11/xlib/xlib_procs.odin
+++ b/vendor/x11/xlib/xlib_procs.odin
@@ -6,110 +6,117 @@ foreign xlib {
@(link_name="_Xdebug") _Xdebug: i32
}
+foreign import xcursor "system:Xcursor"
+@(default_calling_convention="c", link_prefix="X")
+foreign xcursor {
+ cursorGetTheme :: proc(display: ^Display) -> cstring ---
+ cursorGetDefaultSize :: proc(display: ^Display) -> i32 ---
+ cursorLibraryLoadImage :: proc(name: cstring, theme: cstring, size: i32) -> rawptr ---
+ cursorImageLoadCursor :: proc(display: ^Display, img: rawptr) -> Cursor ---
+ cursorImageDestroy :: proc(img: rawptr) ---
+}
+
/* ---- X11/Xlib.h ---------------------------------------------------------*/
-@(default_calling_convention="c")
+@(default_calling_convention="c", link_prefix="X")
foreign xlib {
// Free data allocated by Xlib
- XFree :: proc(ptr: rawptr) ---
+ Free :: proc(ptr: rawptr) ---
// Opening/closing a display
- XOpenDisplay :: proc(name: cstring) -> ^Display ---
- XCloseDisplay :: proc(display: ^Display) ---
- XSetCloseDownMode :: proc(display: ^Display, mode: CloseMode) ---
+ OpenDisplay :: proc(name: cstring) -> ^Display ---
+ CloseDisplay :: proc(display: ^Display) ---
+ SetCloseDownMode :: proc(display: ^Display, mode: CloseMode) ---
// Generate a no-op request
- XNoOp :: proc(display: ^Display) ---
+ NoOp :: proc(display: ^Display) ---
// Display macros (connection)
- XConnectionNumber :: proc(display: ^Display) -> i32 ---
- XExtendedMaxRequestSize ::
- proc(display: ^Display) -> int ---
- XMaxRequestSize :: proc(display: ^Display) -> int ---
- XLastKnownRequestProcessed ::
- proc(display: ^Display) -> uint ---
- XNextRequest :: proc(display: ^Display) -> uint ---
- XProtocolVersion :: proc(display: ^Display) -> i32 ---
- XProtocolRevision :: proc(display: ^Display) -> i32 ---
- XQLength :: proc(display: ^Display) -> i32 ---
- XServerVendor :: proc(display: ^Display) -> cstring ---
- XVendorRelease :: proc(display: ^Display) -> i32 ---
+ ConnectionNumber :: proc(display: ^Display) -> i32 ---
+ ExtendedMaxRequestSize :: proc(display: ^Display) -> int ---
+ MaxRequestSize :: proc(display: ^Display) -> int ---
+ LastKnownRequestProcessed :: proc(display: ^Display) -> uint ---
+ NextRequest :: proc(display: ^Display) -> uint ---
+ ProtocolVersion :: proc(display: ^Display) -> i32 ---
+ ProtocolRevision :: proc(display: ^Display) -> i32 ---
+ QLength :: proc(display: ^Display) -> i32 ---
+ ServerVendor :: proc(display: ^Display) -> cstring ---
+ VendorRelease :: proc(display: ^Display) -> i32 ---
// Display macros (display properties)
- XBlackPixel :: proc(display: ^Display, screen_no: i32) -> uint ---
- XWhitePixel :: proc(display: ^Display, screen_no: i32) -> uint ---
- XListDepths :: proc(display: ^Display, screen_no: i32, count: ^i32) -> [^]i32 ---
- XDisplayCells :: proc(display: ^Display, screen_no: i32) -> i32 ---
- XDisplayPlanes :: proc(display: ^Display, screen_no: i32) -> i32 ---
- XScreenOfDisplay :: proc(display: ^Display, screen_no: i32) -> ^Screen ---
- XDisplayString :: proc(display: ^Display) -> cstring ---
+ BlackPixel :: proc(display: ^Display, screen_no: i32) -> uint ---
+ WhitePixel :: proc(display: ^Display, screen_no: i32) -> uint ---
+ ListDepths :: proc(display: ^Display, screen_no: i32, count: ^i32) -> [^]i32 ---
+ DisplayCells :: proc(display: ^Display, screen_no: i32) -> i32 ---
+ DisplayPlanes :: proc(display: ^Display, screen_no: i32) -> i32 ---
+ ScreenOfDisplay :: proc(display: ^Display, screen_no: i32) -> ^Screen ---
+ DisplayString :: proc(display: ^Display) -> cstring ---
// Display macros (defaults)
- XDefaultColormap :: proc(display: ^Display, screen_no: i32) -> Colormap ---
- XDefaultDepth :: proc(display: ^Display) -> i32 ---
- XDefaultGC :: proc(display: ^Display, screen_no: i32) -> GC ---
- XDefaultRootWindow :: proc(display: ^Display) -> Window ---
- XDefaultScreen :: proc(display: ^Display) -> i32 ---
- XDefaultVisual :: proc(display: ^Display, screen_no: i32) -> ^Visual ---
- XDefaultScreenOfDisplay ::
- proc(display: ^Display) -> ^Screen ---
+ DefaultColormap :: proc(display: ^Display, screen_no: i32) -> Colormap ---
+ DefaultDepth :: proc(display: ^Display) -> i32 ---
+ DefaultGC :: proc(display: ^Display, screen_no: i32) -> GC ---
+ DefaultRootWindow :: proc(display: ^Display) -> Window ---
+ DefaultScreen :: proc(display: ^Display) -> i32 ---
+ DefaultVisual :: proc(display: ^Display, screen_no: i32) -> ^Visual ---
+ DefaultScreenOfDisplay :: proc(display: ^Display) -> ^Screen ---
// Display macros (other)
- XRootWindow :: proc(display: ^Display, screen_no: i32) -> Window ---
- XScreenCount :: proc(display: ^Display) -> i32 ---
+ RootWindow :: proc(display: ^Display, screen_no: i32) -> Window ---
+ ScreenCount :: proc(display: ^Display) -> i32 ---
// Display image format macros
- XListPixmapFormats :: proc(display: ^Display, count: ^i32) -> [^]XPixmapFormatValues ---
- XImageByteOrder :: proc(display: ^Display) -> ByteOrder ---
- XBitmapUnit :: proc(display: ^Display) -> i32 ---
- XBitmapBitOrder :: proc(display: ^Display) -> ByteOrder ---
- XBitmapPad :: proc(display: ^Display) -> i32 ---
- XDisplayHeight :: proc(display: ^Display, screen_no: i32) -> i32 ---
- XDisplayHeightMM :: proc(display: ^Display, screen_no: i32) -> i32 ---
- XDisplayWidth :: proc(display: ^Display, screen_no: i32) -> i32 ---
- XDisplayWidthMM :: proc(display: ^Display, screen_no: i32) -> i32 ---
+ ListPixmapFormats :: proc(display: ^Display, count: ^i32) -> [^]XPixmapFormatValues ---
+ ImageByteOrder :: proc(display: ^Display) -> ByteOrder ---
+ BitmapUnit :: proc(display: ^Display) -> i32 ---
+ BitmapBitOrder :: proc(display: ^Display) -> ByteOrder ---
+ BitmapPad :: proc(display: ^Display) -> i32 ---
+ DisplayHeight :: proc(display: ^Display, screen_no: i32) -> i32 ---
+ DisplayHeightMM :: proc(display: ^Display, screen_no: i32) -> i32 ---
+ DisplayWidth :: proc(display: ^Display, screen_no: i32) -> i32 ---
+ DisplayWidthMM :: proc(display: ^Display, screen_no: i32) -> i32 ---
// Screen macros
- XBlackPixelsOfScreen :: proc(screen: ^Screen) -> uint ---
- XWhitePixelsOfScreen :: proc(screen: ^Screen) -> uint ---
- XCellsOfScreen :: proc(screen: ^Screen) -> i32 ---
- XDefaultColormapOfScreen :: proc(screen: ^Screen) -> Colormap ---
- XDefaultDepthOfScreen :: proc(screen: ^Screen) -> i32 ---
- XDefaultGCOfScreen :: proc(screen: ^Screen) -> GC ---
- XDefaultVisualOfScreen :: proc(screen: ^Screen) -> ^Visual ---
- XDoesBackingStore :: proc(screen: ^Screen) -> BackingStore ---
- XDoesSaveUnders :: proc(screen: ^Screen) -> b32 ---
- XDisplayOfScreen :: proc(screen: ^Screen) -> ^Display ---
- XScreenNumberOfScreens :: proc(screen: ^Screen) -> i32 ---
- XEventMaskOfScreen :: proc(screen: ^Screen) -> EventMask ---
- XWidthOfScreen :: proc(screen: ^Screen) -> i32 ---
- XHeightOfScreen :: proc(screen: ^Screen) -> i32 ---
- XWidthMMOfScreen :: proc(screen: ^Screen) -> i32 ---
- XHeightMMOfScreen :: proc(screen: ^Screen) -> i32 ---
- XMaxCmapsOfScreen :: proc(screen: ^Screen) -> i32 ---
- XMinCmapsOfScreen :: proc(screen: ^Screen) -> i32 ---
- XPlanesOfScreen :: proc(screen: ^Screen) -> i32 ---
- XRootWindowOfScreen :: proc(screen: ^Screen) -> Window ---
+ BlackPixelsOfScreen :: proc(screen: ^Screen) -> uint ---
+ WhitePixelsOfScreen :: proc(screen: ^Screen) -> uint ---
+ CellsOfScreen :: proc(screen: ^Screen) -> i32 ---
+ DefaultColormapOfScreen :: proc(screen: ^Screen) -> Colormap ---
+ DefaultDepthOfScreen :: proc(screen: ^Screen) -> i32 ---
+ DefaultGCOfScreen :: proc(screen: ^Screen) -> GC ---
+ DefaultVisualOfScreen :: proc(screen: ^Screen) -> ^Visual ---
+ DoesBackingStore :: proc(screen: ^Screen) -> BackingStore ---
+ DoesSaveUnders :: proc(screen: ^Screen) -> b32 ---
+ DisplayOfScreen :: proc(screen: ^Screen) -> ^Display ---
+ ScreenNumberOfScreens :: proc(screen: ^Screen) -> i32 ---
+ EventMaskOfScreen :: proc(screen: ^Screen) -> EventMask ---
+ WidthOfScreen :: proc(screen: ^Screen) -> i32 ---
+ HeightOfScreen :: proc(screen: ^Screen) -> i32 ---
+ WidthMMOfScreen :: proc(screen: ^Screen) -> i32 ---
+ HeightMMOfScreen :: proc(screen: ^Screen) -> i32 ---
+ MaxCmapsOfScreen :: proc(screen: ^Screen) -> i32 ---
+ MinCmapsOfScreen :: proc(screen: ^Screen) -> i32 ---
+ PlanesOfScreen :: proc(screen: ^Screen) -> i32 ---
+ RootWindowOfScreen :: proc(screen: ^Screen) -> Window ---
// Threading functions
- XInitThreads :: proc() -> Status ---
- XLockDisplay :: proc(display: ^Display) ---
- XUnlockDisplay :: proc(display: ^Display) ---
+ InitThreads :: proc() -> Status ---
+ LockDisplay :: proc(display: ^Display) ---
+ UnlockDisplay :: proc(display: ^Display) ---
// Internal connections
- XAddConnectionWatch :: proc(
+ AddConnectionWatch :: proc(
display: ^Display,
procedure: XConnectionWatchProc,
data: rawptr,
) -> Status ---
- XRemoveConnectionWatch :: proc(
+ RemoveConnectionWatch :: proc(
display: ^Display,
procedure: XConnectionWatchProc,
data: rawptr,
) -> Status ---
- XProcessInternalConnections :: proc(
+ ProcessInternalConnections :: proc(
display: ^Display,
fd: i32,
) ---
- XInternalConnectionNumbers :: proc(
+ InternalConnectionNumbers :: proc(
display: ^Display,
fds: ^[^]i32,
count: ^i32,
) -> Status ---
// Windows functions
- XVisualIDFromVisual :: proc(visual: ^Visual) -> VisualID ---
+ VisualIDFromVisual :: proc(visual: ^Visual) -> VisualID ---
// Windows: creation/destruction
- XCreateWindow :: proc(
+ CreateWindow :: proc(
display: ^Display,
parent: Window,
x: i32,
@@ -123,7 +130,7 @@ foreign xlib {
attr_mask: WindowAttributeMask,
attr: ^XSetWindowAttributes,
) -> Window ---
- XCreateSimpleWindow :: proc(
+ CreateSimpleWindow :: proc(
display: ^Display,
parent: Window,
x: i32,
@@ -134,34 +141,34 @@ foreign xlib {
border: int,
bg: int,
) -> Window ---
- XDestroyWindow :: proc(display: ^Display, window: Window) ---
- XDestroySubwindows :: proc(display: ^Display, window: Window) ---
+ DestroyWindow :: proc(display: ^Display, window: Window) ---
+ DestroySubwindows :: proc(display: ^Display, window: Window) ---
// Windows: mapping/unmapping
- XMapWindow :: proc(display: ^Display, window: Window) ---
- XMapRaised :: proc(display: ^Display, window: Window) ---
- XMapSubwindows :: proc(display: ^Display, window: Window) ---
- XUnmapWindow :: proc(display: ^Display, window: Window) ---
- XUnmapSubwindows :: proc(display: ^Display, window: Window) ---
+ MapWindow :: proc(display: ^Display, window: Window) ---
+ MapRaised :: proc(display: ^Display, window: Window) ---
+ MapSubwindows :: proc(display: ^Display, window: Window) ---
+ UnmapWindow :: proc(display: ^Display, window: Window) ---
+ UnmapSubwindows :: proc(display: ^Display, window: Window) ---
// Windows: configuring
- XConfigureWindow :: proc(
+ ConfigureWindow :: proc(
display: ^Display,
window: Window,
mask: WindowChangesMask,
values: XWindowChanges,
) ---
- XMoveWindow :: proc(
+ MoveWindow :: proc(
display: ^Display,
window: Window,
x: i32,
y: i32,
) ---
- XResizeWindow :: proc(
+ ResizeWindow :: proc(
display: ^Display,
window: Window,
width: u32,
height: u32,
) ---
- XMoveResizeWindow :: proc(
+ MoveResizeWindow :: proc(
display: ^Display,
window: Window,
x: i32,
@@ -169,51 +176,51 @@ foreign xlib {
width: u32,
height: u32,
) ---
- XSetWindowBorderWidth :: proc(
+ SetWindowBorderWidth :: proc(
display: ^Display,
window: Window,
width: u32,
) ---
// Window: changing stacking order
- XRaiseWindow :: proc(display: ^Display, window: Window) ---
- XLowerWindow :: proc(display: ^Display, window: Window) ---
- XCirculateSubwindows :: proc(display: ^Display, window: Window, direction: CirculationDirection) ---
- XCirculateSubwindowsUp :: proc(display: ^Display, window: Window) ---
- XCirculateSubwindowsDown :: proc(display: ^Display, window: Window) ---
- XRestackWindows :: proc(display: ^Display, windows: [^]Window, nwindows: i32) ---
+ RaiseWindow :: proc(display: ^Display, window: Window) ---
+ LowerWindow :: proc(display: ^Display, window: Window) ---
+ CirculateSubwindows :: proc(display: ^Display, window: Window, direction: CirculationDirection) ---
+ CirculateSubwindowsUp :: proc(display: ^Display, window: Window) ---
+ CirculateSubwindowsDown :: proc(display: ^Display, window: Window) ---
+ RestackWindows :: proc(display: ^Display, windows: [^]Window, nwindows: i32) ---
// Window: changing attributes
- XChangeWindowAttributes :: proc(
+ ChangeWindowAttributes :: proc(
display: ^Display,
window: Window,
attr_mask: WindowAttributeMask,
attr: XWindowAttributes,
) ---
- XSetWindowBackground :: proc(
+ SetWindowBackground :: proc(
display: ^Display,
window: Window,
pixel: uint,
) ---
- XSetWindowBackgroundMap :: proc(
+ SetWindowBackgroundMap :: proc(
display: ^Display,
window: Window,
pixmap: Pixmap,
) ---
- XSetWindowColormap :: proc(
+ SetWindowColormap :: proc(
display: ^Display,
window: Window,
colormap: Colormap,
) ---
- XDefineCursor :: proc(
+ DefineCursor :: proc(
display: ^Display,
window: Window,
cursor: Cursor,
) ---
- XUndefineCursor :: proc(
+ UndefineCursor :: proc(
display: ^Display,
window: Window,
) ---
// Windows: querying information
- XQueryTree :: proc(
+ QueryTree :: proc(
display: ^Display,
window: Window,
root: ^Window,
@@ -221,12 +228,12 @@ foreign xlib {
children: ^[^]Window,
nchildren: ^u32,
) -> Status ---
- XGetWindowAttributes :: proc(
+ GetWindowAttributes :: proc(
display: ^Display,
window: Window,
attr: ^XWindowAttributes,
) ---
- XGetGeometry :: proc(
+ GetGeometry :: proc(
display: ^Display,
drawable: Drawable,
root: ^Window,
@@ -238,7 +245,7 @@ foreign xlib {
depth: ^u32,
) -> Status ---
// Windows: translating screen coordinates
- XTranslateCoordinates :: proc(
+ TranslateCoordinates :: proc(
display: ^Display,
src_window: Window,
dst_window: Window,
@@ -247,7 +254,7 @@ foreign xlib {
dst_x: ^i32,
dst_y: ^i32,
) -> b32 ---
- XQueryPointer :: proc(
+ QueryPointer :: proc(
display: ^Display,
window: Window,
root: ^Window,
@@ -259,29 +266,29 @@ foreign xlib {
mask: ^KeyMask,
) -> b32 ---
// Atoms
- XInternAtom :: proc(
+ InternAtom :: proc(
display: ^Display,
name: cstring,
existing: b32,
) -> Atom ---
- XInternAtoms :: proc(
+ InternAtoms :: proc(
display: ^Display,
names: [^]cstring,
count: i32,
atoms: [^]Atom,
) -> Status ---
- XGetAtomName :: proc(
+ GetAtomName :: proc(
display: ^Display,
atom: Atom,
) -> cstring ---
- XGetAtomNames :: proc(
+ GetAtomNames :: proc(
display: ^Display,
atoms: [^]Atom,
count: i32,
names: [^]cstring,
) -> Status ---
// Windows: Obtaining and changing properties
- XGetWindowProperty :: proc(
+ GetWindowProperty :: proc(
display: ^Display,
window: Window,
property: Atom,
@@ -295,12 +302,12 @@ foreign xlib {
bytes_after: [^]uint,
props: ^rawptr,
) -> i32 ---
- XListProperties :: proc(
+ ListProperties :: proc(
display: ^Display,
window: Window,
num: ^i32,
) -> [^]Atom ---
- XChangeProperty :: proc(
+ ChangeProperty :: proc(
display: ^Display,
window: Window,
property: Atom,
@@ -310,30 +317,30 @@ foreign xlib {
data: rawptr,
count: i32,
) ---
- XRotateWindowProperties :: proc(
+ RotateWindowProperties :: proc(
display: ^Display,
window: Window,
props: [^]Atom,
nprops: i32,
npos: i32,
) ---
- XDeleteProperty :: proc(
+ DeleteProperty :: proc(
display: ^Display,
window: Window,
prop: Atom,
) ---
// Selections
- XSetSelectionOwner :: proc(
+ SetSelectionOwner :: proc(
display: ^Display,
selection: Atom,
owber: Window,
time: Time,
) ---
- XGetSelectionOwner :: proc(
+ GetSelectionOwner :: proc(
display: ^Display,
selection: Atom,
) -> Window ---
- XConvertSelection :: proc(
+ ConvertSelection :: proc(
display: ^Display,
selection: Atom,
target: Atom,
@@ -342,23 +349,23 @@ foreign xlib {
time: Time,
) ---
// Creating and freeing pixmaps
- XCreatePixmap :: proc(
+ CreatePixmap :: proc(
display: ^Display,
drawable: Drawable,
width: u32,
height: u32,
depth: u32,
) -> Pixmap ---
- XFreePixmap :: proc(
+ FreePixmap :: proc(
display: ^Display,
pixmap: Pixmap,
) ---
// Creating recoloring and freeing cursors
- XCreateFontCursor :: proc(
+ CreateFontCursor :: proc(
display: ^Display,
shape: CursorShape,
) -> Cursor ---
- XCreateGlyphCursor :: proc(
+ CreateGlyphCursor :: proc(
display: ^Display,
src_font: Font,
mask_font: Font,
@@ -367,7 +374,7 @@ foreign xlib {
fg: ^XColor,
bg: ^XColor,
) -> Cursor ---
- XCreatePixmapCursor :: proc(
+ CreatePixmapCursor :: proc(
display: ^Display,
source: Pixmap,
mask: Pixmap,
@@ -376,7 +383,7 @@ foreign xlib {
x: u32,
y: u32,
) -> Cursor ---
- XQueryBestCursor :: proc(
+ QueryBestCursor :: proc(
display: ^Display,
drawable: Drawable,
width: u32,
@@ -384,72 +391,50 @@ foreign xlib {
out_width: ^u32,
out_height: ^u32,
) -> Status ---
- XRecolorCursor :: proc(
+ RecolorCursor :: proc(
display: ^Display,
cursor: Cursor,
fg: ^XColor,
bg: ^XColor,
) ---
- XFreeCursor :: proc(display: ^Display, cursor: Cursor) ---
+ FreeCursor :: proc(display: ^Display, cursor: Cursor) ---
// Creation/destruction of colormaps
- XCreateColormap :: proc(
+ CreateColormap :: proc(
display: ^Display,
window: Window,
visual: ^Visual,
alloc: ColormapAlloc,
) -> Colormap ---
- XCopyColormapAndFree :: proc(
+ CopyColormapAndFree :: proc(
display: ^Display,
colormap: Colormap,
) -> Colormap ---
- XFreeColormap :: proc(
+ FreeColormap :: proc(
display: ^Display,
colormap: Colormap,
) ---
// Mapping color names to values
- XLookupColor :: proc(
+ LookupColor :: proc(
display: ^Display,
colomap: Colormap,
name: cstring,
exact: ^XColor,
screen: ^XColor,
) -> Status ---
- XcmsLookupColor :: proc(
- display: ^Display,
- colormap: Colormap,
- name: cstring,
- exact: XcmsColor,
- screen: XcmsColor,
- format: XcmsColorFormat,
- ) -> Status ---
// Allocating and freeing color cells
- XAllocColor :: proc(
+ AllocColor :: proc(
display: ^Display,
colormap: Colormap,
screen: ^XColor,
) -> Status ---
- XcmsAllocColor :: proc(
- display: ^Display,
- colormap: Colormap,
- color: ^XcmsColor,
- format: XcmsColorFormat,
- ) -> Status ---
- XAllocNamedColor :: proc(
+ AllocNamedColor :: proc(
display: ^Display,
colormap: Colormap,
name: cstring,
screen: ^XColor,
exact: ^XColor,
) -> Status ---
- XcmsAllocNamedColor :: proc(
- display: ^Display,
- colormap: Colormap,
- name: cstring,
- screen: ^XcmsColor,
- exact: ^XcmsColor,
- format: XcmsColorFormat,
- ) -> Status ---
- XAllocColorCells :: proc(
+ AllocColorCells :: proc(
display: ^Display,
colormap: Colormap,
contig: b32,
@@ -458,7 +443,7 @@ foreign xlib {
pixels: [^]uint,
npixels: u32,
) -> Status ---
- XAllocColorPlanes :: proc(
+ AllocColorPlanes :: proc(
display: ^Display,
colormap: Colormap,
contig: b32,
@@ -471,7 +456,7 @@ foreign xlib {
gmask: [^]uint,
bmask: [^]uint,
) -> Status ---
- XFreeColors :: proc(
+ FreeColors :: proc(
display: ^Display,
colormap: Colormap,
pixels: [^]uint,
@@ -479,347 +464,47 @@ foreign xlib {
planes: uint,
) ---
// Modifying and querying colormap cells
- XStoreColor :: proc(
+ StoreColor :: proc(
display: ^Display,
colormap: Colormap,
color: ^XColor,
) ---
- XStoreColors :: proc(
+ StoreColors :: proc(
display: ^Display,
colormap: Colormap,
color: [^]XColor,
ncolors: i32,
) ---
- XcmsStoreColor :: proc(
- display: ^Display,
- colormap: Colormap,
- color: ^XcmsColor,
- ) -> Status ---
- XcmsStoreColors :: proc(
- display: ^Display,
- colormap: Colormap,
- colors: [^]XcmsColor,
- ncolors: XcmsColor,
- cflags: [^]b32,
- ) -> Status ---
- XStoreNamedColor :: proc(
- display: ^Display,
- colormap: Colormap,
- name: cstring,
- pixel: uint,
- flags: ColorFlags,
- ) ---
- XQueryColor :: proc(
- display: ^Display,
- colormap: Colormap,
- color: ^XColor,
- ) ---
- XQueryColors :: proc(
- display: ^Display,
- colormap: Colormap,
- colors: [^]XColor,
- ncolors: i32,
- ) ---
- XQueryExtension :: proc(
- display: ^Display,
- name: cstring,
- major_opcode_return: ^i32,
- first_event_return: ^i32,
- first_error_return: ^i32,
- ) -> b32 ---
- XcmsQueryColor :: proc(
- display: ^Display,
- colormap: Colormap,
- color: ^XcmsColor,
- format: XcmsColorFormat,
- ) -> Status ---
- XcmsQueryColors :: proc(
- display: ^Display,
- colormap: Colormap,
- color: [^]XcmsColor,
- ncolors: i32,
- format: XcmsColorFormat,
- ) -> Status ---
- // Getting and setting the color conversion context (CCC) of a colormap
- XcmsCCCOfColormap :: proc(
- display: ^Display,
- colormap: Colormap,
- ) -> XcmsCCC ---
- XcmsSetCCCOfColormap :: proc(
- display: ^Display,
- colormap: Colormap,
- ccc: XcmsCCC) -> XcmsCCC ---
- XcmsDefaultCCC :: proc(display: ^Display, screen_no: i32) -> XcmsCCC ---
- // Color conversion context macros
- XcmsDisplayOfCCC :: proc(ccc: XcmsCCC) -> ^Display ---
- XcmsVisualOfCCC :: proc(ccc: XcmsCCC) -> ^Visual ---
- XcmsScreenNumberOfCCC ::
- proc(ccc: XcmsCCC) -> i32 ---
- XcmsScreenWhitePointOfCCC ::
- proc(ccc: XcmsCCC) -> XcmsColor ---
- XcmsClientWhitePointOfCCC ::
- proc(ccc: XcmsCCC) -> XcmsColor ---
- // Modifying the attributes of color conversion context
- XcmsSetWhitePoint :: proc(
- ccc: XcmsCCC,
- color: ^XcmsColor,
- ) -> Status ---
- XcmsSetCompressionProc :: proc(
- ccc: XcmsCCC,
- cproc: XcmsCompressionProc,
- data: rawptr,
- ) -> XcmsCompressionProc ---
- XcmsSetWhiteAdjustProc :: proc(
- ccc: XcmsCCC,
- aproc: XcmsWhiteAdjustProc,
- data: rawptr,
- ) -> XcmsWhiteAdjustProc ---
- // Creating and freeing the color conversion context
- XcmsCreateCCC :: proc(
- display: ^Display,
- screen_no: i32,
- visual: ^Visual,
- white_point: ^XcmsColor,
- cproc: XcmsCompressionProc,
- cdata: rawptr,
- aproc: XcmsWhiteAdjustProc,
- adata: rawptr,
- ) -> XcmsCCC ---
- XcmsFreeCCC :: proc(ccc: XcmsCCC) ---
- // Converting between colorspaces
- XcmsConvertColors :: proc(
- ccc: XcmsCCC,
- colors: [^]XcmsColor,
- ncolors: u32,
- format: XcmsColorFormat,
- cflags: [^]b32,
- ) -> Status ---
- // Pre-defined gamut compression callbacks
- XcmsCIELabClipL :: proc(
- ctx: XcmsCCC,
- colors: [^]XcmsColor,
- ncolors: u32,
- index: u32,
- flags: [^]b32,
- ) -> Status ---
- XcmsCIELabClipab :: proc(
- ctx: XcmsCCC,
- colors: [^]XcmsColor,
- ncolors: u32,
- index: u32,
- flags: [^]b32,
- ) -> Status ---
- XcmsCIELabClipLab :: proc(
- ctx: XcmsCCC,
- colors: [^]XcmsColor,
- ncolors: u32,
- index: u32,
- flags: [^]b32,
- ) -> Status ---
- XcmsCIELuvClipL :: proc(
- ctx: XcmsCCC,
- colors: [^]XcmsColor,
- ncolors: u32,
- index: u32,
- flags: [^]b32,
- ) -> Status ---
- XcmsCIELuvClipuv :: proc(
- ctx: XcmsCCC,
- colors: [^]XcmsColor,
- ncolors: u32,
- index: u32,
- flags: [^]b32,
- ) -> Status ---
- XcmsCIELuvClipLuv :: proc(
- ctx: XcmsCCC,
- colors: [^]XcmsColor,
- ncolors: u32,
- index: u32,
- flags: [^]b32,
- ) -> Status ---
- XcmsTekHVCClipV :: proc(
- ctx: XcmsCCC,
- colors: [^]XcmsColor,
- ncolors: u32,
- index: u32,
- flags: [^]b32,
- ) -> Status ---
- XcmsTekHVCClipC :: proc(
- ctx: XcmsCCC,
- colors: [^]XcmsColor,
- ncolors: u32,
- index: u32,
- flags: [^]b32,
- ) -> Status ---
- XcmsTekHVCClipVC :: proc(
- ctx: XcmsCCC,
- colors: [^]XcmsColor,
- ncolors: u32,
- index: u32,
- flags: [^]b32,
- ) -> Status ---
- // Pre-defined white-point adjustment procedures
- XcmsCIELabWhiteShiftColors :: proc(
- ctx: XcmsCCC,
- initial_white_point: ^XcmsColor,
- target_white_point: ^XcmsColor,
- target_format: XcmsColorFormat,
- colors: [^]XcmsColor,
- ncolors: u32,
- compression: [^]b32,
- ) -> Status ---
- XcmsCIELuvWhiteShiftColors :: proc(
- ctx: XcmsCCC,
- initial_white_point: ^XcmsColor,
- target_white_point: ^XcmsColor,
- target_format: XcmsColorFormat,
- colors: [^]XcmsColor,
- ncolors: u32,
- compression: [^]b32,
- ) -> Status ---
- XcmsTekHVCWhiteShiftColors :: proc(
- ctx: XcmsCCC,
- initial_white_point: ^XcmsColor,
- target_white_point: ^XcmsColor,
- target_format: XcmsColorFormat,
- colors: [^]XcmsColor,
- ncolors: u32,
- compression: [^]b32,
- ) -> Status ---
- // Color querying
- XcmsQueryBlack :: proc(
- ccc: XcmsCCC,
- format: XcmsColorFormat,
- color: ^XcmsColor,
- ) -> Status ---
- XcmsQueryBlue :: proc(
- ccc: XcmsCCC,
- format: XcmsColorFormat,
- color: ^XcmsColor,
- ) -> Status ---
- XcmsQueryGreen :: proc(
- ccc: XcmsCCC,
- format: XcmsColorFormat,
- color: ^XcmsColor,
- ) -> Status ---
- XcmsQueryRed :: proc(
- ccc: XcmsCCC,
- format: XcmsColorFormat,
- color: ^XcmsColor,
- ) -> Status ---
- XcmsQueryWhite :: proc(
- ccc: XcmsCCC,
- format: XcmsColorFormat,
- color: ^XcmsColor,
- ) -> Status ---
- // CIELab queries
- XcmsCIELabQueryMaxC :: proc(
- ccc: XcmsCCC,
- hue: XcmsFloat,
- lstar: XcmsFloat,
- color: ^XcmsColor,
- ) -> Status ---
- XcmsCIELabQueryMaxL :: proc(
- ccc: XcmsCCC,
- hue: XcmsFloat,
- chroma: XcmsFloat,
- color: ^XcmsColor,
- ) -> Status ---
- XcmsCIELabQueryMaxLC :: proc(
- ccc: XcmsCCC,
- hue: XcmsFloat,
- color: ^XcmsColor,
- ) -> Status ---
- XcmsCIELabQueryMinL :: proc(
- ccc: XcmsCCC,
- hue: XcmsFloat,
- chroma: XcmsFloat,
- color: ^XcmsColor,
- ) -> Status ---
- // CIEluv queries
- XcmsCIELuvQueryMaxC :: proc(
- ccc: XcmsCCC,
- hue: XcmsFloat,
- lstar: XcmsFloat,
- color: ^XcmsColor,
- ) -> Status ---
- XcmsCIELuvQueryMaxL :: proc(
- ccc: XcmsCCC,
- hue: XcmsFloat,
- chroma: XcmsFloat,
- color: ^XcmsColor,
- ) -> Status ---
- XcmsCIELuvQueryMaxLC :: proc(
- ccc: XcmsCCC,
- hue: XcmsFloat,
- color: ^XcmsColor,
- ) -> Status ---
- XcmsCIELuvQueryMinL :: proc(
- ccc: XcmsCCC,
- hue: XcmsFloat,
- chroma: XcmsFloat,
- color: ^XcmsColor,
- ) -> Status ---
- // TexHVX queries
- XcmsTekHVCQueryMaxC :: proc(
- ccc: XcmsCCC,
- hue: XcmsFloat,
- value: XcmsFloat,
- color: ^XcmsColor,
- ) -> Status ---
- XcmsTekHVCQueryMaxV :: proc(
- ccc: XcmsCCC,
- hue: XcmsFloat,
- chroma: XcmsFloat,
- color: ^XcmsColor,
- ) -> Status ---
- XcmsTekHVCQueryMaxVC :: proc(
- ccc: XcmsCCC,
- hue: XcmsFloat,
- color: ^XcmsColor,
- ) -> Status ---
- XcmsTekHVCQueryMaxVSamples :: proc(
- ccc: XcmsCCC,
- hue: XcmsFloat,
- colors: [^]XcmsColor,
- nsamples: u32,
- ) -> Status ---
- XcmsTekHVCQueryMinV :: proc(
- ccc: XcmsCCC,
- hue: XcmsFloat,
- chroma: XcmsFloat,
- color: ^XcmsColor,
- ) -> Status ---
// Graphics context functions
- XCreateGC :: proc(
+ CreateGC :: proc(
display: ^Display,
drawable: Drawable,
mask: GCAttributeMask,
attr: ^XGCValues,
) -> GC ---
- XCopyGC :: proc(
+ CopyGC :: proc(
display: ^Display,
src: GC,
dst: GC,
mask: GCAttributeMask,
) ---
- XChangeGC :: proc(
+ ChangeGC :: proc(
display: ^Display,
gc: GC,
mask: GCAttributeMask,
values: ^XGCValues,
) ---
- XGetGCValues :: proc(
+ GetGCValues :: proc(
display: ^Display,
gc: GC,
mask: GCAttributeMask,
values: ^XGCValues,
) -> Status ---
- XFreeGC :: proc(display: ^Display, gc: GC) ---
- XGCContextFromGC :: proc(gc: GC) -> GContext ---
- XFlushGC :: proc(display: ^Display, gc: GC) ---
+ FreeGC :: proc(display: ^Display, gc: GC) ---
+ GCContextFromGC :: proc(gc: GC) -> GContext ---
+ FlushGC :: proc(display: ^Display, gc: GC) ---
// Convenience routines for GC
- XSetState :: proc(
+ SetState :: proc(
display: ^Display,
gc: GC,
fg: uint,
@@ -827,27 +512,27 @@ foreign xlib {
fn: GCFunction,
pmask: uint,
) ---
- XSetForeground :: proc(
+ SetForeground :: proc(
display: ^Display,
gc: GC,
fg: uint,
) ---
- XSetBackground :: proc(
+ SetBackground :: proc(
display: ^Display,
gc: GC,
bg: uint,
) ---
- XSetFunction :: proc(
+ SetFunction :: proc(
display: ^Display,
gc: GC,
fn: GCFunction,
) ---
- XSetPlaneMask :: proc(
+ SetPlaneMask :: proc(
display: ^Display,
gc: GC,
pmask: uint,
) ---
- XSetLineAttributes :: proc(
+ SetLineAttributes :: proc(
display: ^Display,
gc: GC,
width: u32,
@@ -855,24 +540,24 @@ foreign xlib {
cap_style: CapStyle,
join_style: JoinStyle,
) ---
- XSetDashes :: proc(
+ SetDashes :: proc(
display: ^Display,
gc: GC,
dash_offs: i32,
dash_list: [^]i8,
n: i32,
) ---
- XSetFillStyle :: proc(
+ SetFillStyle :: proc(
display: ^Display,
gc: GC,
style: FillStyle,
) ---
- XSetFillRule :: proc(
+ SetFillRule :: proc(
display: ^Display,
gc: GC,
rule: FillRule,
) ---
- XQueryBestSize :: proc(
+ QueryBestSize :: proc(
display: ^Display,
class: i32,
which: Drawable,
@@ -881,7 +566,7 @@ foreign xlib {
out_width: ^u32,
out_height: ^u32,
) -> Status ---
- XQueryBestTile :: proc(
+ QueryBestTile :: proc(
display: ^Display,
which: Drawable,
width: u32,
@@ -889,7 +574,7 @@ foreign xlib {
out_width: ^u32,
out_height: ^u32,
) -> Status ---
- XQueryBestStripple :: proc(
+ QueryBestStripple :: proc(
display: ^Display,
which: Drawable,
width: u32,
@@ -897,13 +582,13 @@ foreign xlib {
out_width: u32,
out_height: u32,
) -> Status ---
- XSetTile :: proc(display: ^Display, gc: GC, tile: Pixmap) ---
- XSetStripple :: proc(display: ^Display, gc: GC, stripple: Pixmap) ---
- XSetTSOrigin :: proc(display: ^Display, gc: GC, x: i32, y: i32) ---
- XSetFont :: proc(display: ^Display, gc: GC, font: Font) ---
- XSetClipOrigin :: proc(display: ^Display, gc: GC, x: i32, y: i32) ---
- XSetClipMask :: proc(display: ^Display, gc: GC, pixmap: Pixmap) ---
- XSetClipRectangles :: proc(
+ SetTile :: proc(display: ^Display, gc: GC, tile: Pixmap) ---
+ SetStripple :: proc(display: ^Display, gc: GC, stripple: Pixmap) ---
+ SetTSOrigin :: proc(display: ^Display, gc: GC, x: i32, y: i32) ---
+ SetFont :: proc(display: ^Display, gc: GC, font: Font) ---
+ SetClipOrigin :: proc(display: ^Display, gc: GC, x: i32, y: i32) ---
+ SetClipMask :: proc(display: ^Display, gc: GC, pixmap: Pixmap) ---
+ SetClipRectangles :: proc(
display: ^Display,
gc: GC,
x: i32,
@@ -912,11 +597,11 @@ foreign xlib {
n: i32,
ordering: i32,
) ---
- XSetArcMode :: proc(display: ^Display, gc: GC, mode: ArcMode) ---
- XSetSubwindowMode :: proc(display: ^Display, gc: GC, mode: SubwindowMode) ---
- XSetGraphicsExposures :: proc(display: ^Display, gc: GC, exp: b32) ---
+ SetArcMode :: proc(display: ^Display, gc: GC, mode: ArcMode) ---
+ SetSubwindowMode :: proc(display: ^Display, gc: GC, mode: SubwindowMode) ---
+ SetGraphicsExposures :: proc(display: ^Display, gc: GC, exp: b32) ---
// Graphics functions
- XClearArea :: proc(
+ ClearArea :: proc(
display: ^Display,
window: Window,
x: i32,
@@ -925,11 +610,11 @@ foreign xlib {
height: u32,
exp: b32,
) ---
- XClearWindow :: proc(
+ ClearWindow :: proc(
display: ^Display,
window: Window,
) ---
- XCopyArea :: proc(
+ CopyArea :: proc(
display: ^Display,
src: Drawable,
dst: Drawable,
@@ -941,7 +626,7 @@ foreign xlib {
dst_x: i32,
dst_y: i32,
) ---
- XCopyPlane :: proc(
+ CopyPlane :: proc(
display: ^Display,
src: Drawable,
dst: Drawable,
@@ -955,14 +640,14 @@ foreign xlib {
plane: uint,
) ---
// Drawing lines, points, rectangles and arc
- XDrawPoint :: proc(
+ DrawPoint :: proc(
display: ^Display,
drawable: Drawable,
gc: GC,
x: i32,
y: i32,
) ---
- XDrawPoints :: proc(
+ DrawPoints :: proc(
display: Display,
drawable: Drawable,
gc: GC,
@@ -970,7 +655,7 @@ foreign xlib {
npoints: i32,
mode: CoordMode,
) ---
- XDrawLine :: proc(
+ DrawLine :: proc(
display: ^Display,
drawable: Drawable,
gc: GC,
@@ -979,21 +664,21 @@ foreign xlib {
x2: i32,
y2: i32,
) ---
- XDrawLines :: proc(
+ DrawLines :: proc(
display: ^Display,
drawable: Drawable,
gc: GC,
points: [^]XPoint,
npoints: i32,
) ---
- XDrawSegments :: proc(
+ DrawSegments :: proc(
display: ^Display,
drawable: Drawable,
gc: GC,
segs: [^]XSegment,
nsegs: i32,
) ---
- XDrawRectangle :: proc(
+ DrawRectangle :: proc(
display: ^Display,
drawable: Drawable,
gc: GC,
@@ -1002,14 +687,14 @@ foreign xlib {
width: u32,
height: u32,
) ---
- XDrawRectangles :: proc(
+ DrawRectangles :: proc(
display: ^Display,
drawable: Drawable,
gc: GC,
rects: [^]XRectangle,
nrects: i32,
) ---
- XDrawArc :: proc(
+ DrawArc :: proc(
display: ^Display,
drawable: Drawable,
gc: GC,
@@ -1020,7 +705,7 @@ foreign xlib {
angle1: i32,
angle2: i32,
) ---
- XDrawArcs :: proc(
+ DrawArcs :: proc(
display: ^Display,
drawable: Drawable,
gc: GC,
@@ -1028,7 +713,7 @@ foreign xlib {
narcs: i32,
) ---
// Filling areas
- XFillRectangle :: proc(
+ FillRectangle :: proc(
display: ^Display,
drawable: Drawable,
gc: GC,
@@ -1037,14 +722,14 @@ foreign xlib {
width: u32,
height: u32,
) ---
- XFillRectangles :: proc(
+ FillRectangles :: proc(
display: ^Display,
drawable: Drawable,
gc: GC,
rects: [^]XRectangle,
nrects: i32,
) ---
- XFillPolygon :: proc(
+ FillPolygon :: proc(
display: ^Display,
drawable: Drawable,
gc: GC,
@@ -1053,7 +738,7 @@ foreign xlib {
shape: Shape,
mode: CoordMode,
) ---
- XFillArc :: proc(
+ FillArc :: proc(
display: ^Display,
drawable: Drawable,
gc: GC,
@@ -1064,7 +749,7 @@ foreign xlib {
angle1: i32,
angle2: i32,
) ---
- XFillArcs :: proc(
+ FillArcs :: proc(
display: ^Display,
drawable: Drawable,
gc: GC,
@@ -1072,26 +757,26 @@ foreign xlib {
narcs: i32,
) ---
// Font metrics
- XLoadFont :: proc(display: ^Display, name: cstring) -> Font ---
- XQueryFont :: proc(display: ^Display, id: XID) -> ^XFontStruct ---
- XLoadQueryFont :: proc(display: ^Display, name: cstring) -> ^XFontStruct ---
- XFreeFont :: proc(display: ^Display, font_struct: ^XFontStruct) ---
- XGetFontProperty :: proc(font_struct: ^XFontStruct, atom: Atom, ret: ^uint) -> b32 ---
- XUnloadFont :: proc(display: ^Display, font: Font) ---
- XListFonts :: proc(display: ^Display, pat: cstring, max: i32, count: ^i32) -> [^]cstring ---
- XFreeFontNames :: proc(display: ^Display, list: [^]cstring) ---
- XListFontsWithInfo :: proc(
+ LoadFont :: proc(display: ^Display, name: cstring) -> Font ---
+ QueryFont :: proc(display: ^Display, id: XID) -> ^XFontStruct ---
+ LoadQueryFont :: proc(display: ^Display, name: cstring) -> ^XFontStruct ---
+ FreeFont :: proc(display: ^Display, font_struct: ^XFontStruct) ---
+ GetFontProperty :: proc(font_struct: ^XFontStruct, atom: Atom, ret: ^uint) -> b32 ---
+ UnloadFont :: proc(display: ^Display, font: Font) ---
+ ListFonts :: proc(display: ^Display, pat: cstring, max: i32, count: ^i32) -> [^]cstring ---
+ FreeFontNames :: proc(display: ^Display, list: [^]cstring) ---
+ ListFontsWithInfo :: proc(
display: ^Display,
pat: cstring,
max: i32,
count: ^i32,
info: ^[^]XFontStruct,
) -> [^]cstring ---
- XFreeFontInfo :: proc(names: [^]cstring, info: [^]XFontStruct, count: i32) ---
+ FreeFontInfo :: proc(names: [^]cstring, info: [^]XFontStruct, count: i32) ---
// Computing character string sizes
- XTextWidth :: proc(font_struct: ^XFontStruct, string: [^]u8, count: i32) -> i32 ---
- XTextWidth16 :: proc(font_struct: ^XFontStruct, string: [^]XChar2b, count: i32) -> i32 ---
- XTextExtents :: proc(
+ TextWidth :: proc(font_struct: ^XFontStruct, string: [^]u8, count: i32) -> i32 ---
+ TextWidth16 :: proc(font_struct: ^XFontStruct, string: [^]XChar2b, count: i32) -> i32 ---
+ TextExtents :: proc(
font_struct: ^XFontStruct,
string: [^]u8,
nchars: i32,
@@ -1100,7 +785,7 @@ foreign xlib {
descent: ^i32,
ret: ^XCharStruct,
) ---
- XTextExtents16 :: proc(
+ TextExtents16 :: proc(
font_struct: ^XFontStruct,
string: [^]XChar2b,
nchars: i32,
@@ -1109,7 +794,7 @@ foreign xlib {
descent: ^i32,
ret: ^XCharStruct,
) ---
- XQueryTextExtents :: proc(
+ QueryTextExtents :: proc(
display: ^Display,
font_id: XID,
string: [^]u8,
@@ -1119,7 +804,7 @@ foreign xlib {
descent: ^i32,
ret: ^XCharStruct,
) ---
- XQueryTextExtents16 :: proc(
+ QueryTextExtents16 :: proc(
display: ^Display,
font_id: XID,
string: [^]XChar2b,
@@ -1130,7 +815,7 @@ foreign xlib {
ret: ^XCharStruct,
) ---
// Drawing complex text
- XDrawText :: proc(
+ DrawText :: proc(
display: ^Display,
drawable: Drawable,
gc: GC,
@@ -1139,7 +824,7 @@ foreign xlib {
items: XTextItem,
nitems: i32,
) ---
- XDrawText16 :: proc(
+ DrawText16 :: proc(
display: ^Display,
drawable: Drawable,
gc: GC,
@@ -1149,7 +834,7 @@ foreign xlib {
nitems: i32,
) ---
// Drawing text characters
- XDrawString :: proc(
+ DrawString :: proc(
display: ^Display,
drawable: Drawable,
gc: GC,
@@ -1158,7 +843,7 @@ foreign xlib {
string: [^]u8,
length: i32,
) ---
- XDrawString16 :: proc(
+ DrawString16 :: proc(
display: ^Display,
drawable: Drawable,
gc: GC,
@@ -1167,7 +852,7 @@ foreign xlib {
string: [^]XChar2b,
length: i32,
) ---
- XDrawImageString :: proc(
+ DrawImageString :: proc(
display: ^Display,
drawable: Drawable,
gc: GC,
@@ -1176,7 +861,7 @@ foreign xlib {
string: [^]u8,
length: i32,
) ---
- XDrawImageString16 :: proc(
+ DrawImageString16 :: proc(
display: ^Display,
drawable: Drawable,
gc: GC,
@@ -1186,8 +871,8 @@ foreign xlib {
length: i32,
) ---
// Transferring images between client and server
- XInitImage :: proc(image: ^XImage) -> Status ---
- XPutImage :: proc(
+ InitImage :: proc(image: ^XImage) -> Status ---
+ PutImage :: proc(
display: ^Display,
drawable: Drawable,
gc: GC,
@@ -1199,7 +884,7 @@ foreign xlib {
width: u32,
height: u32,
) ---
- XGetImage :: proc(
+ GetImage :: proc(
display: ^Display,
drawable: Drawable,
x: i32,
@@ -1209,7 +894,7 @@ foreign xlib {
mask: uint,
format: ImageFormat,
) -> ^XImage ---
- XGetSubImage :: proc(
+ GetSubImage :: proc(
display: ^Display,
drawable: Drawable,
src_x: i32,
@@ -1223,51 +908,51 @@ foreign xlib {
dst_y: i32,
) -> ^XImage ---
// Window and session manager functions
- XReparentWindow :: proc(
+ ReparentWindow :: proc(
display: ^Display,
window: Window,
parent: Window,
x: i32,
y: i32,
) ---
- XChangeSaveSet :: proc(
+ ChangeSaveSet :: proc(
display: ^Display,
window: Window,
mode: SaveSetChangeMode,
) ---
- XAddToSaveSet :: proc(
+ AddToSaveSet :: proc(
display: ^Display,
window: Window,
) ---
- XRemoveFromSaveSet :: proc(
+ RemoveFromSaveSet :: proc(
display: ^Display,
window: Window,
) ---
// Managing installed colormaps
- XInstallColormap :: proc(display: ^Display, colormap: Colormap) ---
- XUninstallColormap :: proc(display: ^Display, colormap: Colormap) ---
- XListInstalledColormaps :: proc(display: ^Display, window: Window, n: ^i32) -> [^]Colormap ---
+ InstallColormap :: proc(display: ^Display, colormap: Colormap) ---
+ UninstallColormap :: proc(display: ^Display, colormap: Colormap) ---
+ ListInstalledColormaps :: proc(display: ^Display, window: Window, n: ^i32) -> [^]Colormap ---
// Setting and retrieving font search paths
- XSetFontPath :: proc(display: ^Display, dirs: [^]cstring, ndirs: i32) ---
- XGetFontPath :: proc(display: ^Display, npaths: ^i32) -> [^]cstring ---
- XFreeFontPath :: proc(list: [^]cstring) ---
+ SetFontPath :: proc(display: ^Display, dirs: [^]cstring, ndirs: i32) ---
+ GetFontPath :: proc(display: ^Display, npaths: ^i32) -> [^]cstring ---
+ FreeFontPath :: proc(list: [^]cstring) ---
// Grabbing the server
- XGrabServer :: proc(display: ^Display) ---
- XUngrabServer :: proc(display: ^Display) ---
+ GrabServer :: proc(display: ^Display) ---
+ UngrabServer :: proc(display: ^Display) ---
// Killing clients
- XKillClient :: proc(display: ^Display, resource: XID) ---
+ KillClient :: proc(display: ^Display, resource: XID) ---
// Controlling the screen saver
- XSetScreenSaver :: proc(
+ SetScreenSaver :: proc(
display: ^Display,
timeout: i32,
interval: i32,
blanking: ScreenSaverBlanking,
exposures: ScreenSavingExposures,
) ---
- XForceScreenSaver :: proc(display: ^Display, mode: ScreenSaverForceMode) ---
- XActivateScreenSaver :: proc(display: ^Display) ---
- XResetScreenSaver :: proc(display: ^Display) ---
- XGetScreenSaver :: proc(
+ ForceScreenSaver :: proc(display: ^Display, mode: ScreenSaverForceMode) ---
+ ActivateScreenSaver :: proc(display: ^Display) ---
+ ResetScreenSaver :: proc(display: ^Display) ---
+ GetScreenSaver :: proc(
display: ^Display,
timeout: ^i32,
interval: ^i32,
@@ -1275,85 +960,85 @@ foreign xlib {
exposures: ^ScreenSavingExposures,
) ---
// Controlling host address
- XAddHost :: proc(display: ^Display, addr: ^XHostAddress) ---
- XAddHosts :: proc(display: ^Display, hosts: [^]XHostAddress, nhosts: i32) ---
- XListHosts :: proc(display: ^Display, nhosts: ^i32, state: [^]b32) -> [^]XHostAddress ---
- XRemoveHost :: proc(display: ^Display, host: XHostAddress) ---
- XRemoveHosts :: proc(display: ^Display, hosts: [^]XHostAddress, nhosts: i32) ---
+ AddHost :: proc(display: ^Display, addr: ^XHostAddress) ---
+ AddHosts :: proc(display: ^Display, hosts: [^]XHostAddress, nhosts: i32) ---
+ ListHosts :: proc(display: ^Display, nhosts: ^i32, state: [^]b32) -> [^]XHostAddress ---
+ RemoveHost :: proc(display: ^Display, host: XHostAddress) ---
+ RemoveHosts :: proc(display: ^Display, hosts: [^]XHostAddress, nhosts: i32) ---
// Access control list
- XSetAccessControl :: proc(display: ^Display, mode: AccessControlMode) ---
- XEnableAccessControl :: proc(display: ^Display) ---
- XDisableAccessControl :: proc(display: ^Display) ---
+ SetAccessControl :: proc(display: ^Display, mode: AccessControlMode) ---
+ EnableAccessControl :: proc(display: ^Display) ---
+ DisableAccessControl :: proc(display: ^Display) ---
// Events
- XSelectInput :: proc(display: ^Display, window: Window, mask: EventMask) ---
- XFlush :: proc(display: ^Display) ---
- XSync :: proc(display: ^Display) ---
- XEventsQueued :: proc(display: ^Display, mode: EventQueueMode) -> i32 ---
- XPending :: proc(display: ^Display) -> i32 ---
- XNextEvent :: proc(display: ^Display, event: ^XEvent) ---
- XPeekEvent :: proc(display: ^Display, event: ^XEvent) ---
- XGetEventData :: proc(display: ^Display, cookie: ^XGenericEventCookie) -> b32 ---
- XFreeEventData :: proc(display: ^Display, cookie: ^XGenericEventCookie) ---
+ SelectInput :: proc(display: ^Display, window: Window, mask: EventMask) ---
+ Flush :: proc(display: ^Display) ---
+ Sync :: proc(display: ^Display) ---
+ EventsQueued :: proc(display: ^Display, mode: EventQueueMode) -> i32 ---
+ Pending :: proc(display: ^Display) -> i32 ---
+ NextEvent :: proc(display: ^Display, event: ^XEvent) ---
+ PeekEvent :: proc(display: ^Display, event: ^XEvent) ---
+ GetEventData :: proc(display: ^Display, cookie: ^XGenericEventCookie) -> b32 ---
+ FreeEventData :: proc(display: ^Display, cookie: ^XGenericEventCookie) ---
// Selecting events using a predicate procedure
- XIfEvent :: proc(
+ IfEvent :: proc(
display: ^Display,
event: ^XEvent,
predicate: #type proc "c" (display: ^Display, event: ^XEvent, ctx: rawptr) -> b32,
ctx: rawptr,
) ---
- XCheckIfEvent :: proc(
+ CheckIfEvent :: proc(
display: ^Display,
event: ^XEvent,
predicate: #type proc "c" (display: ^Display, event: ^XEvent, ctx: rawptr) -> b32,
arg: rawptr,
) -> b32 ---
- XPeekIfEvent :: proc(
+ PeekIfEvent :: proc(
display: ^Display,
event: ^XEvent,
predicate: #type proc "c" (display: ^Display, event: ^XEvent, ctx: rawptr) -> b32,
ctx: rawptr,
) ---
// Selecting events using a window or event mask
- XWindowEvent :: proc(
+ WindowEvent :: proc(
display: ^Display,
window: Window,
mask: EventMask,
event: ^XEvent,
) ---
- XCheckWindowEvent :: proc(
+ CheckWindowEvent :: proc(
display: ^Display,
window: Window,
mask: EventMask,
event: ^XEvent,
) -> b32 ---
- XMaskEvent :: proc(
+ MaskEvent :: proc(
display: ^Display,
mask: EventMask,
event: ^XEvent,
) ---
- XCheckMaskEvent :: proc(
+ CheckMaskEvent :: proc(
display: ^Display,
mask: EventMask,
event: ^XEvent,
) -> b32 ---
- XCheckTypedEvent :: proc(
+ CheckTypedEvent :: proc(
display: ^Display,
type: EventType,
event: ^XEvent,
) -> b32 ---
- XCheckTypedWindowEvent :: proc(
+ CheckTypedWindowEvent :: proc(
display: ^Display,
window: Window,
type: EventType,
event: ^XEvent,
) -> b32 ---
// Putting events back
- XPutBackEvent :: proc(
+ PutBackEvent :: proc(
display: ^Display,
event: ^XEvent,
) ---
// Sending events to other applications
- XSendEvent :: proc(
+ SendEvent :: proc(
display: ^Display,
window: Window,
propagate: b32,
@@ -1361,8 +1046,8 @@ foreign xlib {
event: ^XEvent,
) -> Status ---
// Getting the history of pointer motion
- XDisplayMotionBufferSize :: proc(display: ^Display) -> uint ---
- XGetMotionEvents :: proc(
+ DisplayMotionBufferSize :: proc(display: ^Display) -> uint ---
+ GetMotionEvents :: proc(
display: ^Display,
window: Window,
start: Time,
@@ -1370,25 +1055,25 @@ foreign xlib {
nevents: ^i32,
) -> [^]XTimeCoord ---
// Enabling or disabling synchronization
- XSetAfterFunction :: proc(
+ SetAfterFunction :: proc(
display: ^Display,
procedure: #type proc "c" (display: ^Display) -> i32,
) -> i32 ---
- XSynchronize :: proc(
+ Synchronize :: proc(
display: ^Display,
onoff: b32,
) -> i32 ---
// Error handling
- XSetErrorHandler :: proc(
+ SetErrorHandler :: proc(
handler: #type proc "c" (display: ^Display, event: ^XErrorEvent) -> i32,
) -> i32 ---
- XGetErrorText :: proc(
+ GetErrorText :: proc(
display: ^Display,
code: i32,
buffer: [^]u8,
size: i32,
) ---
- XGetErrorDatabaseText :: proc(
+ GetErrorDatabaseText :: proc(
display: ^Display,
name: cstring,
message: cstring,
@@ -1396,12 +1081,12 @@ foreign xlib {
buffer: [^]u8,
size: i32,
) ---
- XDisplayName :: proc(string: cstring) -> cstring ---
- XSetIOErrorHandler :: proc(
+ DisplayName :: proc(string: cstring) -> cstring ---
+ SetIOErrorHandler :: proc(
handler: #type proc "c" (display: ^Display) -> i32,
) -> i32 ---
// Pointer grabbing
- XGrabPointer :: proc(
+ GrabPointer :: proc(
display: ^Display,
grab_window: Window,
owner_events: b32,
@@ -1412,17 +1097,17 @@ foreign xlib {
cursor: Cursor,
time: Time,
) -> i32 ---
- XUngrabPointer :: proc(
+ UngrabPointer :: proc(
display: ^Display,
time: Time,
) -> i32 ---
- XChangeActivePointerGrab :: proc(
+ ChangeActivePointerGrab :: proc(
display: ^Display,
event_mask: EventMask,
cursor: Cursor,
time: Time,
) ---
- XGrabButton :: proc(
+ GrabButton :: proc(
display: ^Display,
button: u32,
modifiers: InputMask,
@@ -1434,13 +1119,13 @@ foreign xlib {
confine_to: Window,
cursor: Cursor,
) ---
- XUngrabButton :: proc(
+ UngrabButton :: proc(
display: ^Display,
button: u32,
modifiers: InputMask,
grab_window: Window,
) ---
- XGrabKeyboard :: proc(
+ GrabKeyboard :: proc(
display: ^Display,
grab_window: Window,
owner_events: b32,
@@ -1448,11 +1133,11 @@ foreign xlib {
keyboard_mode: GrabMode,
time: Time,
) -> i32 ---
- XUngrabKeyboard :: proc(
+ UngrabKeyboard :: proc(
display: ^Display,
time: Time,
) ---
- XGrabKey :: proc(
+ GrabKey :: proc(
display: ^Display,
keycode: i32,
modifiers: InputMask,
@@ -1461,16 +1146,16 @@ foreign xlib {
pointer_mode: GrabMode,
keyboard_mode: GrabMode,
) ---
- XUngrabKey :: proc(
+ UngrabKey :: proc(
display: ^Display,
keycode: i32,
modifiers: InputMask,
grab_window: Window,
) ---
// Resuming event processing
- XAllowEvents :: proc(display: ^Display, evend_mode: AllowEventsMode, time: Time) ---
+ AllowEvents :: proc(display: ^Display, evend_mode: AllowEventsMode, time: Time) ---
// Moving the pointer
- XWarpPointer :: proc(
+ WarpPointer :: proc(
display: ^Display,
src_window: Window,
dst_window: Window,
@@ -1482,34 +1167,34 @@ foreign xlib {
dst_y: i32,
) ---
// Controlling input focus
- XSetInputFocus :: proc(
+ SetInputFocus :: proc(
display: ^Display,
focus: Window,
revert_to: FocusRevert,
time: Time,
) ---
- XGetInputFocus :: proc(
+ GetInputFocus :: proc(
display: ^Display,
focus: ^Window,
revert_to: ^FocusRevert,
) ---
// Manipulating the keyboard and pointer settings
- XChangeKeyboardControl :: proc(
+ ChangeKeyboardControl :: proc(
display: ^Display,
mask: KeyboardControlMask,
values: ^XKeyboardControl,
) ---
- XGetKeyboardControl :: proc(
+ GetKeyboardControl :: proc(
display: ^Display,
values: ^XKeyboardState,
) ---
- XAutoRepeatOn :: proc(display: ^Display) ---
- XAutoRepeatOff :: proc(display: ^Display) ---
- XBell :: proc(display: ^Display, percent: i32) ---
- XQueryKeymap :: proc(display: ^Display, keys: [^]u32) ---
- XSetPointerMapping :: proc(display: ^Display, map_should_not_be_a_keyword: [^]u8, nmap: i32) -> i32 ---
- XGetPointerMapping :: proc(display: ^Display, map_should_not_be_a_keyword: [^]u8, nmap: i32) -> i32 ---
- XChangePointerControl :: proc(
+ AutoRepeatOn :: proc(display: ^Display) ---
+ AutoRepeatOff :: proc(display: ^Display) ---
+ Bell :: proc(display: ^Display, percent: i32) ---
+ QueryKeymap :: proc(display: ^Display, keys: [^]u32) ---
+ SetPointerMapping :: proc(display: ^Display, map_should_not_be_a_keyword: [^]u8, nmap: i32) -> i32 ---
+ GetPointerMapping :: proc(display: ^Display, map_should_not_be_a_keyword: [^]u8, nmap: i32) -> i32 ---
+ ChangePointerControl :: proc(
display: ^Display,
do_accel: b32,
do_threshold: b32,
@@ -1517,57 +1202,57 @@ foreign xlib {
accel_denominator: i32,
threshold: i32,
) ---
- XGetPointerControl :: proc(
+ GetPointerControl :: proc(
display: ^Display,
accel_numerator: ^i32,
accel_denominator: ^i32,
threshold: ^i32,
) ---
// Manipulating the keyboard encoding
- XDisplayKeycodes :: proc(
+ DisplayKeycodes :: proc(
display: ^Display,
min_keycodes: ^i32,
max_keycodes: ^i32,
) ---
- XGetKeyboardMapping :: proc(
+ GetKeyboardMapping :: proc(
display: ^Display,
first: KeyCode,
count: i32,
keysyms_per: ^i32,
) -> ^KeySym ---
- XChangeKeyboardMapping :: proc(
+ ChangeKeyboardMapping :: proc(
display: ^Display,
first: KeyCode,
keysyms_per: i32,
keysyms: [^]KeySym,
num_codes: i32,
) ---
- XNewModifiermap :: proc(max_keys_per_mode: i32) -> ^XModifierKeymap ---
- XInsertModifiermapEntry :: proc(
+ NewModifiermap :: proc(max_keys_per_mode: i32) -> ^XModifierKeymap ---
+ InsertModifiermapEntry :: proc(
modmap: ^XModifierKeymap,
keycode_entry: KeyCode,
modifier: i32,
) -> ^XModifierKeymap ---
- XDeleteModifiermapEntry :: proc(
+ DeleteModifiermapEntry :: proc(
modmap: ^XModifierKeymap,
keycode_entry: KeyCode,
modifier: i32,
) -> ^XModifierKeymap ---
- XFreeModifiermap :: proc(modmap: ^XModifierKeymap) ---
- XSetModifierMapping :: proc(display: ^Display, modmap: ^XModifierKeymap) -> i32 ---
- XGetModifierMapping :: proc(display: ^Display) -> ^XModifierKeymap ---
+ FreeModifiermap :: proc(modmap: ^XModifierKeymap) ---
+ SetModifierMapping :: proc(display: ^Display, modmap: ^XModifierKeymap) -> i32 ---
+ GetModifierMapping :: proc(display: ^Display) -> ^XModifierKeymap ---
// Manipulating top-level windows
- XIconifyWindow :: proc(
+ IconifyWindow :: proc(
dipslay: ^Display,
window: Window,
screen_no: i32,
) -> Status ---
- XWithdrawWindow :: proc(
+ WithdrawWindow :: proc(
dipslay: ^Display,
window: Window,
screen_no: i32,
) -> Status ---
- XReconfigureWMWindow :: proc(
+ ReconfigureWMWindow :: proc(
dipslay: ^Display,
window: Window,
screen_no: i32,
@@ -1575,77 +1260,77 @@ foreign xlib {
changes: ^XWindowChanges,
) -> Status ---
// Getting and setting the WM_NAME property
- XSetWMName :: proc(
+ SetWMName :: proc(
display: ^Display,
window: Window,
prop: ^XTextProperty,
) ---
- XGetWMName :: proc(
+ GetWMName :: proc(
display: ^Display,
window: Window,
prop: ^XTextProperty,
) -> Status ---
- XStoreName :: proc(
+ StoreName :: proc(
display: ^Display,
window: Window,
name: cstring,
) ---
- XFetchName :: proc(
+ FetchName :: proc(
display: ^Display,
window: Window,
name: ^cstring,
) -> Status ---
- XSetWMIconName :: proc(
+ SetWMIconName :: proc(
display: ^Display,
window: Window,
prop: ^XTextProperty,
) ---
- XGetWMIconName :: proc(
+ GetWMIconName :: proc(
display: ^Display,
window: Window,
prop: ^XTextProperty,
) -> Status ---
- XSetIconName :: proc(
+ SetIconName :: proc(
display: ^Display,
window: Window,
name: cstring,
) ---
- XGetIconName :: proc(
+ GetIconName :: proc(
display: ^Display,
window: Window,
prop: ^cstring,
) -> Status ---
// Setting and reading WM_HINTS property
- XAllocWMHints :: proc() -> ^XWMHints ---
- XSetWMHints :: proc(
+ AllocWMHints :: proc() -> ^XWMHints ---
+ SetWMHints :: proc(
display: ^Display,
window: Window,
hints: ^XWMHints,
) ---
- XGetWMHints :: proc(
+ GetWMHints :: proc(
display: ^Display,
window: Window,
) -> ^XWMHints ---
// Setting and reading MW_NORMAL_HINTS property
- XAllocSizeHints :: proc() -> ^XSizeHints ---
- XSetWMNormalHints :: proc(
+ AllocSizeHints :: proc() -> ^XSizeHints ---
+ SetWMNormalHints :: proc(
display: ^Display,
window: Window,
hints: ^XSizeHints,
) ---
- XGetWMNormalHints :: proc(
+ GetWMNormalHints :: proc(
display: ^Display,
window: Window,
hints: ^XSizeHints,
flags: ^SizeHints,
) -> Status ---
- XSetWMSizeHints :: proc(
+ SetWMSizeHints :: proc(
display: ^Display,
window: Window,
hints: ^XSizeHints,
prop: Atom,
) ---
- XGetWMSizeHints :: proc(
+ GetWMSizeHints :: proc(
display: ^Display,
window: Window,
hints: ^XSizeHints,
@@ -1653,70 +1338,70 @@ foreign xlib {
prop: Atom,
) -> Status ---
// Setting and reading the WM_CLASS property
- XAllocClassHint :: proc() -> ^XClassHint ---
- XSetClassHint :: proc(
+ AllocClassHint :: proc() -> ^XClassHint ---
+ SetClassHint :: proc(
display: ^Display,
window: Window,
hint: ^XClassHint,
) ---
- XGetClassHint :: proc(
+ GetClassHint :: proc(
display: ^Display,
window: Window,
hint: ^XClassHint,
) -> Status ---
// Setting and reading WM_TRANSIENT_FOR property
- XSetTransientForHint :: proc(
+ SetTransientForHint :: proc(
display: ^Display,
window: Window,
prop_window: Window,
) ---
- XGetTransientForHint :: proc(
+ GetTransientForHint :: proc(
display: ^Display,
window: Window,
prop_window: ^Window,
) -> Status ---
// Setting and reading the WM_PROTOCOLS property
- XSetWMProtocols :: proc(
+ SetWMProtocols :: proc(
display: ^Display,
window: Window,
protocols: [^]Atom,
count: i32,
) -> Status ---
- XGetWMProtocols :: proc(
+ GetWMProtocols :: proc(
display: ^Display,
window: Window,
protocols: ^[^]Atom,
count: ^i32,
) -> Status ---
// Setting and reading the WM_COLORMAP_WINDOWS property
- XSetWMColormapWindows :: proc(
+ SetWMColormapWindows :: proc(
display: ^Display,
window: Window,
colormap_windows: [^]Window,
count: i32,
) -> Status ---
- XGetWMColormapWindows :: proc(
+ GetWMColormapWindows :: proc(
display: ^Display,
window: Window,
colormap_windows: ^[^]Window,
count: ^i32,
) -> Status ---
// Setting and reading the WM_ICON_SIZE_PROPERTY
- XAllocIconSize :: proc() -> ^XIconSize ---
- XSetIconSizes :: proc(
+ AllocIconSize :: proc() -> ^XIconSize ---
+ SetIconSizes :: proc(
display: ^Display,
window: Window,
size_list: [^]XIconSize,
count: i32,
) ---
- XGetIconSizes :: proc(
+ GetIconSizes :: proc(
display: ^Display,
window: Window,
size_list: ^[^]XIconSize,
count: ^i32,
) -> Status ---
// Using window manager convenience functions
- XmbSetWMProperties :: proc(
+ mbSetWMProperties :: proc(
display: ^Display,
window: Window,
window_name: cstring,
@@ -1727,7 +1412,7 @@ foreign xlib {
wm_hints: ^XWMHints,
class_hints: ^XClassHint,
) ---
- XSetWMProperties :: proc(
+ SetWMProperties :: proc(
display: ^Display,
window: Window,
window_name: ^XTextProperty,
@@ -1738,35 +1423,35 @@ foreign xlib {
class_hints: ^XWMHints,
) ---
// Client to session manager communication
- XSetCommand :: proc(
+ SetCommand :: proc(
display: ^Display,
window: Window,
argv: [^]cstring,
argc: i32,
) ---
- XGetCommand :: proc(
+ GetCommand :: proc(
display: ^Display,
window: Window,
argv: ^[^]cstring,
argc: ^i32,
) -> Status ---
- XSetWMClientMachine :: proc(
+ SetWMClientMachine :: proc(
display: ^Display,
window: Window,
prop: ^XTextProperty,
) ---
- XGetWMClientMachine :: proc(
+ GetWMClientMachine :: proc(
display: ^Display,
window: Window,
prop: ^XTextProperty,
) -> Status ---
- XSetRGBColormaps :: proc(
+ SetRGBColormaps :: proc(
display: ^Display,
window: Window,
colormap: ^XStandardColormap,
prop: Atom,
) ---
- XGetRGBColormaps :: proc(
+ GetRGBColormaps :: proc(
display: ^Display,
window: Window,
colormap: ^[^]XStandardColormap,
@@ -1774,35 +1459,35 @@ foreign xlib {
prop: Atom,
) -> Status ---
// Keyboard utility functions
- XLookupKeysym :: proc(
+ LookupKeysym :: proc(
event: ^XKeyEvent,
index: i32,
) -> KeySym ---
- XKeycodeToKeysym :: proc(
+ KeycodeToKeysym :: proc(
display: ^Display,
keycode: KeyCode,
index: i32,
) -> KeySym ---
- XKeysymToKeycode :: proc(
+ KeysymToKeycode :: proc(
display: ^Display,
keysym: KeySym,
) -> KeyCode ---
- XRefreshKeyboardMapping :: proc(event_map: ^XMappingEvent) ---
- XConvertCase :: proc(
+ RefreshKeyboardMapping :: proc(event_map: ^XMappingEvent) ---
+ ConvertCase :: proc(
keysym: KeySym,
lower: ^KeySym,
upper: ^KeySym,
) ---
- XStringToKeysym :: proc(str: cstring) -> KeySym ---
- XKeysymToString :: proc(keysym: KeySym) -> cstring ---
- XLookupString :: proc(
+ StringToKeysym :: proc(str: cstring) -> KeySym ---
+ KeysymToString :: proc(keysym: KeySym) -> cstring ---
+ LookupString :: proc(
event: ^XKeyEvent,
buffer: [^]u8,
count: i32,
keysym: ^KeySym,
status: ^XComposeStatus,
) -> i32 ---
- XRebindKeysym :: proc(
+ RebindKeysym :: proc(
display: ^Display,
keysym: KeySym,
list: [^]KeySym,
@@ -1811,16 +1496,16 @@ foreign xlib {
num_bytes: i32,
) ---
// Allocating permanent storage
- XPermalloc :: proc(size: u32) -> rawptr ---
+ Permalloc :: proc(size: u32) -> rawptr ---
// Parsing the window geometry
- XParseGeometry :: proc(
+ ParseGeometry :: proc(
parsestring: cstring,
x_ret: ^i32,
y_ret: ^i32,
width: ^u32,
height: ^u32,
) -> i32 ---
- XWMGeometry :: proc(
+ WMGeometry :: proc(
display: ^Display,
screen_no: i32,
user_geom: cstring,
@@ -1834,45 +1519,45 @@ foreign xlib {
grav: ^Gravity,
) -> i32 ---
// Creating, copying and destroying regions
- XCreateRegion :: proc() -> Region ---
- XPolygonRegion :: proc(
+ CreateRegion :: proc() -> Region ---
+ PolygonRegion :: proc(
points: [^]XPoint,
n: i32,
fill: FillRule,
) -> Region ---
- XSetRegion :: proc(
+ SetRegion :: proc(
display: ^Display,
gc: GC,
region: Region,
) ---
- XDestroyRegion :: proc(r: Region) ---
+ DestroyRegion :: proc(r: Region) ---
// Moving or shrinking regions
- XOffsetRegion :: proc(region: Region, dx, dy: i32) ---
- XShrinkRegion :: proc(region: Region, dx, dy: i32) ---
+ OffsetRegion :: proc(region: Region, dx, dy: i32) ---
+ ShrinkRegion :: proc(region: Region, dx, dy: i32) ---
// Computing with regions
- XClipBox :: proc(region: Region, rect: ^XRectangle) ---
- XIntersectRegion :: proc(sra, srb, ret: Region) ---
- XUnionRegion :: proc(sra, srb, ret: Region) ---
- XUnionRectWithRegion :: proc(rect: ^XRectangle, src, dst: Region) ---
- XSubtractRegion :: proc(sra, srb, ret: Region) ---
- XXorRegion :: proc(sra, srb, ret: Region) ---
- XEmptyRegion :: proc(reg: Region) -> b32 ---
- XEqualRegion :: proc(a,b: Region) -> b32 ---
- XPointInRegion :: proc(reg: Region, x,y: i32) -> b32 ---
- XRectInRegion :: proc(reg: Region, x,y: i32, w,h: u32) -> b32 ---
+ ClipBox :: proc(region: Region, rect: ^XRectangle) ---
+ IntersectRegion :: proc(sra, srb, ret: Region) ---
+ UnionRegion :: proc(sra, srb, ret: Region) ---
+ UnionRectWithRegion :: proc(rect: ^XRectangle, src, dst: Region) ---
+ SubtractRegion :: proc(sra, srb, ret: Region) ---
+ XorRegion :: proc(sra, srb, ret: Region) ---
+ EmptyRegion :: proc(reg: Region) -> b32 ---
+ EqualRegion :: proc(a,b: Region) -> b32 ---
+ PointInRegion :: proc(reg: Region, x,y: i32) -> b32 ---
+ RectInRegion :: proc(reg: Region, x,y: i32, w,h: u32) -> b32 ---
// Using cut buffers
- XStoreBytes :: proc(display: ^Display, bytes: [^]u8, nbytes: i32) ---
- XStoreBuffer :: proc(display: ^Display, bytes: [^]u8, nbytes: i32, buffer: i32) ---
- XFetchBytes :: proc(display: ^Display, nbytes: ^i32) -> [^]u8 ---
- XFetchBuffer :: proc(display: ^Display, nbytes: ^i32, buffer: i32) -> [^]u8 ---
+ StoreBytes :: proc(display: ^Display, bytes: [^]u8, nbytes: i32) ---
+ StoreBuffer :: proc(display: ^Display, bytes: [^]u8, nbytes: i32, buffer: i32) ---
+ FetchBytes :: proc(display: ^Display, nbytes: ^i32) -> [^]u8 ---
+ FetchBuffer :: proc(display: ^Display, nbytes: ^i32, buffer: i32) -> [^]u8 ---
// Determining the appropriate visual types
- XGetVisualInfo :: proc(
+ GetVisualInfo :: proc(
display: ^Display,
mask: VisualInfoMask,
info: ^XVisualInfo,
nret: ^i32,
) -> [^]XVisualInfo ---
- XMatchVisualInfo :: proc(
+ MatchVisualInfo :: proc(
display: ^Display,
screen_no: i32,
depth: i32,
@@ -1880,7 +1565,7 @@ foreign xlib {
ret: ^XVisualInfo,
) -> Status ---
// Manipulating images
- XCreateImage :: proc(
+ CreateImage :: proc(
display: ^Display,
visual: ^Visual,
depth: u32,
@@ -1892,27 +1577,365 @@ foreign xlib {
pad: i32,
stride: i32,
) -> ^XImage ---
- XGetPixel :: proc(
+ GetPixel :: proc(
image: ^XImage,
x: i32,
y: i32,
) -> uint ---
- XPutPixel :: proc(
+ PutPixel :: proc(
image: ^XImage,
x: i32,
y: i32,
pixel: uint,
) ---
- XSubImage :: proc(
+ SubImage :: proc(
image: ^XImage,
x: i32,
y: i32,
w: u32,
h: u32,
) -> ^XImage ---
- XAddPixel :: proc(
+ AddPixel :: proc(
image: ^XImage,
value: int,
) ---
- XDestroyImage :: proc(image: ^XImage) ---
+ StoreNamedColor :: proc(
+ display: ^Display,
+ colormap: Colormap,
+ name: cstring,
+ pixel: uint,
+ flags: ColorFlags,
+ ) ---
+ QueryColor :: proc(
+ display: ^Display,
+ colormap: Colormap,
+ color: ^XColor,
+ ) ---
+ QueryColors :: proc(
+ display: ^Display,
+ colormap: Colormap,
+ colors: [^]XColor,
+ ncolors: i32,
+ ) ---
+ QueryExtension :: proc(
+ display: ^Display,
+ name: cstring,
+ major_opcode_return: ^i32,
+ first_event_return: ^i32,
+ first_error_return: ^i32,
+ ) -> b32 ---
+ DestroyImage :: proc(image: ^XImage) ---
+ ResourceManagerString :: proc(display: ^Display) -> cstring ---
+ utf8SetWMProperties :: proc(
+ display: ^Display,
+ window: Window,
+ window_name: cstring,
+ icon_name: cstring,
+ argv: ^cstring,
+ argc: i32,
+ normal_hints: ^XSizeHints,
+ wm_hints: ^XWMHints,
+ class_hints: ^XClassHint,
+ ) ---
+}
+
+@(default_calling_convention="c")
+foreign xlib {
+ XcmsLookupColor :: proc(
+ display: ^Display,
+ colormap: Colormap,
+ name: cstring,
+ exact: XcmsColor,
+ screen: XcmsColor,
+ format: XcmsColorFormat,
+ ) -> Status ---
+ XcmsStoreColor :: proc(
+ display: ^Display,
+ colormap: Colormap,
+ color: ^XcmsColor,
+ ) -> Status ---
+ XcmsStoreColors :: proc(
+ display: ^Display,
+ colormap: Colormap,
+ colors: [^]XcmsColor,
+ ncolors: XcmsColor,
+ cflags: [^]b32,
+ ) -> Status ---
+ XcmsQueryColor :: proc(
+ display: ^Display,
+ colormap: Colormap,
+ color: ^XcmsColor,
+ format: XcmsColorFormat,
+ ) -> Status ---
+ XcmsQueryColors :: proc(
+ display: ^Display,
+ colormap: Colormap,
+ color: [^]XcmsColor,
+ ncolors: i32,
+ format: XcmsColorFormat,
+ ) -> Status ---
+ // Getting and setting the color conversion context (CCC) of a colormap
+ XcmsCCCOfColormap :: proc(
+ display: ^Display,
+ colormap: Colormap,
+ ) -> XcmsCCC ---
+ XcmsSetCCCOfColormap :: proc(
+ display: ^Display,
+ colormap: Colormap,
+ ccc: XcmsCCC) -> XcmsCCC ---
+ XcmsDefaultCCC :: proc(display: ^Display, screen_no: i32) -> XcmsCCC ---
+ // Color conversion context macros
+ XcmsDisplayOfCCC :: proc(ccc: XcmsCCC) -> ^Display ---
+ XcmsVisualOfCCC :: proc(ccc: XcmsCCC) -> ^Visual ---
+ XcmsScreenNumberOfCCC :: proc(ccc: XcmsCCC) -> i32 ---
+ XcmsScreenWhitePointOfCCC :: proc(ccc: XcmsCCC) -> XcmsColor ---
+ XcmsClientWhitePointOfCCC :: proc(ccc: XcmsCCC) -> XcmsColor ---
+ // Modifying the attributes of color conversion context
+ XcmsSetWhitePoint :: proc(
+ ccc: XcmsCCC,
+ color: ^XcmsColor,
+ ) -> Status ---
+ XcmsSetCompressionProc :: proc(
+ ccc: XcmsCCC,
+ cproc: XcmsCompressionProc,
+ data: rawptr,
+ ) -> XcmsCompressionProc ---
+ XcmsSetWhiteAdjustProc :: proc(
+ ccc: XcmsCCC,
+ aproc: XcmsWhiteAdjustProc,
+ data: rawptr,
+ ) -> XcmsWhiteAdjustProc ---
+ // Creating and freeing the color conversion context
+ XcmsCreateCCC :: proc(
+ display: ^Display,
+ screen_no: i32,
+ visual: ^Visual,
+ white_point: ^XcmsColor,
+ cproc: XcmsCompressionProc,
+ cdata: rawptr,
+ aproc: XcmsWhiteAdjustProc,
+ adata: rawptr,
+ ) -> XcmsCCC ---
+ XcmsFreeCCC :: proc(ccc: XcmsCCC) ---
+ // Converting between colorspaces
+ XcmsConvertColors :: proc(
+ ccc: XcmsCCC,
+ colors: [^]XcmsColor,
+ ncolors: u32,
+ format: XcmsColorFormat,
+ cflags: [^]b32,
+ ) -> Status ---
+ // Pre-defined gamut compression callbacks
+ XcmsCIELabClipL :: proc(
+ ctx: XcmsCCC,
+ colors: [^]XcmsColor,
+ ncolors: u32,
+ index: u32,
+ flags: [^]b32,
+ ) -> Status ---
+ XcmsCIELabClipab :: proc(
+ ctx: XcmsCCC,
+ colors: [^]XcmsColor,
+ ncolors: u32,
+ index: u32,
+ flags: [^]b32,
+ ) -> Status ---
+ XcmsCIELabClipLab :: proc(
+ ctx: XcmsCCC,
+ colors: [^]XcmsColor,
+ ncolors: u32,
+ index: u32,
+ flags: [^]b32,
+ ) -> Status ---
+ XcmsCIELuvClipL :: proc(
+ ctx: XcmsCCC,
+ colors: [^]XcmsColor,
+ ncolors: u32,
+ index: u32,
+ flags: [^]b32,
+ ) -> Status ---
+ XcmsCIELuvClipuv :: proc(
+ ctx: XcmsCCC,
+ colors: [^]XcmsColor,
+ ncolors: u32,
+ index: u32,
+ flags: [^]b32,
+ ) -> Status ---
+ XcmsCIELuvClipLuv :: proc(
+ ctx: XcmsCCC,
+ colors: [^]XcmsColor,
+ ncolors: u32,
+ index: u32,
+ flags: [^]b32,
+ ) -> Status ---
+ XcmsTekHVCClipV :: proc(
+ ctx: XcmsCCC,
+ colors: [^]XcmsColor,
+ ncolors: u32,
+ index: u32,
+ flags: [^]b32,
+ ) -> Status ---
+ XcmsTekHVCClipC :: proc(
+ ctx: XcmsCCC,
+ colors: [^]XcmsColor,
+ ncolors: u32,
+ index: u32,
+ flags: [^]b32,
+ ) -> Status ---
+ XcmsTekHVCClipVC :: proc(
+ ctx: XcmsCCC,
+ colors: [^]XcmsColor,
+ ncolors: u32,
+ index: u32,
+ flags: [^]b32,
+ ) -> Status ---
+ // Pre-defined white-point adjustment procedures
+ XcmsCIELabWhiteShiftColors :: proc(
+ ctx: XcmsCCC,
+ initial_white_point: ^XcmsColor,
+ target_white_point: ^XcmsColor,
+ target_format: XcmsColorFormat,
+ colors: [^]XcmsColor,
+ ncolors: u32,
+ compression: [^]b32,
+ ) -> Status ---
+ XcmsCIELuvWhiteShiftColors :: proc(
+ ctx: XcmsCCC,
+ initial_white_point: ^XcmsColor,
+ target_white_point: ^XcmsColor,
+ target_format: XcmsColorFormat,
+ colors: [^]XcmsColor,
+ ncolors: u32,
+ compression: [^]b32,
+ ) -> Status ---
+ XcmsTekHVCWhiteShiftColors :: proc(
+ ctx: XcmsCCC,
+ initial_white_point: ^XcmsColor,
+ target_white_point: ^XcmsColor,
+ target_format: XcmsColorFormat,
+ colors: [^]XcmsColor,
+ ncolors: u32,
+ compression: [^]b32,
+ ) -> Status ---
+ // Color querying
+ XcmsQueryBlack :: proc(
+ ccc: XcmsCCC,
+ format: XcmsColorFormat,
+ color: ^XcmsColor,
+ ) -> Status ---
+ XcmsQueryBlue :: proc(
+ ccc: XcmsCCC,
+ format: XcmsColorFormat,
+ color: ^XcmsColor,
+ ) -> Status ---
+ XcmsQueryGreen :: proc(
+ ccc: XcmsCCC,
+ format: XcmsColorFormat,
+ color: ^XcmsColor,
+ ) -> Status ---
+ XcmsQueryRed :: proc(
+ ccc: XcmsCCC,
+ format: XcmsColorFormat,
+ color: ^XcmsColor,
+ ) -> Status ---
+ XcmsQueryWhite :: proc(
+ ccc: XcmsCCC,
+ format: XcmsColorFormat,
+ color: ^XcmsColor,
+ ) -> Status ---
+ // CIELab queries
+ XcmsCIELabQueryMaxC :: proc(
+ ccc: XcmsCCC,
+ hue: XcmsFloat,
+ lstar: XcmsFloat,
+ color: ^XcmsColor,
+ ) -> Status ---
+ XcmsCIELabQueryMaxL :: proc(
+ ccc: XcmsCCC,
+ hue: XcmsFloat,
+ chroma: XcmsFloat,
+ color: ^XcmsColor,
+ ) -> Status ---
+ XcmsCIELabQueryMaxLC :: proc(
+ ccc: XcmsCCC,
+ hue: XcmsFloat,
+ color: ^XcmsColor,
+ ) -> Status ---
+ XcmsCIELabQueryMinL :: proc(
+ ccc: XcmsCCC,
+ hue: XcmsFloat,
+ chroma: XcmsFloat,
+ color: ^XcmsColor,
+ ) -> Status ---
+ // CIEluv queries
+ XcmsCIELuvQueryMaxC :: proc(
+ ccc: XcmsCCC,
+ hue: XcmsFloat,
+ lstar: XcmsFloat,
+ color: ^XcmsColor,
+ ) -> Status ---
+ XcmsCIELuvQueryMaxL :: proc(
+ ccc: XcmsCCC,
+ hue: XcmsFloat,
+ chroma: XcmsFloat,
+ color: ^XcmsColor,
+ ) -> Status ---
+ XcmsCIELuvQueryMaxLC :: proc(
+ ccc: XcmsCCC,
+ hue: XcmsFloat,
+ color: ^XcmsColor,
+ ) -> Status ---
+ XcmsCIELuvQueryMinL :: proc(
+ ccc: XcmsCCC,
+ hue: XcmsFloat,
+ chroma: XcmsFloat,
+ color: ^XcmsColor,
+ ) -> Status ---
+ // TexHVX queries
+ XcmsTekHVCQueryMaxC :: proc(
+ ccc: XcmsCCC,
+ hue: XcmsFloat,
+ value: XcmsFloat,
+ color: ^XcmsColor,
+ ) -> Status ---
+ XcmsTekHVCQueryMaxV :: proc(
+ ccc: XcmsCCC,
+ hue: XcmsFloat,
+ chroma: XcmsFloat,
+ color: ^XcmsColor,
+ ) -> Status ---
+ XcmsTekHVCQueryMaxVC :: proc(
+ ccc: XcmsCCC,
+ hue: XcmsFloat,
+ color: ^XcmsColor,
+ ) -> Status ---
+ XcmsTekHVCQueryMaxVSamples :: proc(
+ ccc: XcmsCCC,
+ hue: XcmsFloat,
+ colors: [^]XcmsColor,
+ nsamples: u32,
+ ) -> Status ---
+ XcmsTekHVCQueryMinV :: proc(
+ ccc: XcmsCCC,
+ hue: XcmsFloat,
+ chroma: XcmsFloat,
+ color: ^XcmsColor,
+ ) -> Status ---
+ XcmsAllocNamedColor :: proc(
+ display: ^Display,
+ colormap: Colormap,
+ name: cstring,
+ screen: ^XcmsColor,
+ exact: ^XcmsColor,
+ format: XcmsColorFormat,
+ ) -> Status ---
+ XcmsAllocColor :: proc(
+ display: ^Display,
+ colormap: Colormap,
+ color: ^XcmsColor,
+ format: XcmsColorFormat,
+ ) -> Status ---
+ XrmInitialize :: proc() ---
+ XrmGetStringDatabase :: proc(data: cstring) -> XrmDatabase ---
+ XrmGetResource :: proc(db: XrmDatabase, name: cstring, class: cstring, type_return: ^cstring, val_return: ^XrmValue) -> b32 ---
}